diff options
Diffstat (limited to 'arch')
507 files changed, 13730 insertions, 7521 deletions
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h index 70145cbb21cb..1b71ca70c9f6 100644 --- a/arch/alpha/include/asm/fcntl.h +++ b/arch/alpha/include/asm/fcntl.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #define __O_SYNC 020000000 | 31 | #define __O_SYNC 020000000 |
32 | #define O_SYNC (__O_SYNC|O_DSYNC) | 32 | #define O_SYNC (__O_SYNC|O_DSYNC) |
33 | 33 | ||
34 | #define O_PATH 040000000 | ||
35 | |||
34 | #define F_GETLK 7 | 36 | #define F_GETLK 7 |
35 | #define F_SETLK 8 | 37 | #define F_SETLK 8 |
36 | #define F_SETLKW 9 | 38 | #define F_SETLKW 9 |
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index 945de222ab91..e8a761aee088 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h | |||
@@ -29,7 +29,7 @@ | |||
29 | : "r" (uaddr), "r"(oparg) \ | 29 | : "r" (uaddr), "r"(oparg) \ |
30 | : "memory") | 30 | : "memory") |
31 | 31 | ||
32 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 32 | static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
33 | { | 33 | { |
34 | int op = (encoded_op >> 28) & 7; | 34 | int op = (encoded_op >> 28) & 7; |
35 | int cmp = (encoded_op >> 24) & 15; | 35 | int cmp = (encoded_op >> 24) & 15; |
@@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
40 | oparg = 1 << oparg; | 40 | oparg = 1 << oparg; |
41 | 41 | ||
42 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 42 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
43 | return -EFAULT; | 43 | return -EFAULT; |
44 | 44 | ||
45 | pagefault_disable(); | 45 | pagefault_disable(); |
@@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static inline int | 83 | static inline int |
84 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 84 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
85 | u32 oldval, u32 newval) | ||
85 | { | 86 | { |
86 | int prev, cmp; | 87 | int ret = 0, cmp; |
88 | u32 prev; | ||
87 | 89 | ||
88 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 90 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
89 | return -EFAULT; | 91 | return -EFAULT; |
90 | 92 | ||
91 | __asm__ __volatile__ ( | 93 | __asm__ __volatile__ ( |
92 | __ASM_SMP_MB | 94 | __ASM_SMP_MB |
93 | "1: ldl_l %0,0(%2)\n" | 95 | "1: ldl_l %1,0(%3)\n" |
94 | " cmpeq %0,%3,%1\n" | 96 | " cmpeq %1,%4,%2\n" |
95 | " beq %1,3f\n" | 97 | " beq %2,3f\n" |
96 | " mov %4,%1\n" | 98 | " mov %5,%2\n" |
97 | "2: stl_c %1,0(%2)\n" | 99 | "2: stl_c %2,0(%3)\n" |
98 | " beq %1,4f\n" | 100 | " beq %2,4f\n" |
99 | "3: .subsection 2\n" | 101 | "3: .subsection 2\n" |
100 | "4: br 1b\n" | 102 | "4: br 1b\n" |
101 | " .previous\n" | 103 | " .previous\n" |
@@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
105 | " .long 2b-.\n" | 107 | " .long 2b-.\n" |
106 | " lda $31,3b-2b(%0)\n" | 108 | " lda $31,3b-2b(%0)\n" |
107 | " .previous\n" | 109 | " .previous\n" |
108 | : "=&r"(prev), "=&r"(cmp) | 110 | : "+r"(ret), "=&r"(prev), "=&r"(cmp) |
109 | : "r"(uaddr), "r"((long)oldval), "r"(newval) | 111 | : "r"(uaddr), "r"((long)oldval), "r"(newval) |
110 | : "memory"); | 112 | : "memory"); |
111 | 113 | ||
112 | return prev; | 114 | *uval = prev; |
115 | return ret; | ||
113 | } | 116 | } |
114 | 117 | ||
115 | #endif /* __KERNEL__ */ | 118 | #endif /* __KERNEL__ */ |
diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h index 034b6cf5d9f3..80e1cee90f1f 100644 --- a/arch/alpha/include/asm/ioctls.h +++ b/arch/alpha/include/asm/ioctls.h | |||
@@ -94,6 +94,7 @@ | |||
94 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 94 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
95 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | 95 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ |
96 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 96 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
97 | #define TIOCVHANGUP 0x5437 | ||
97 | 98 | ||
98 | #define TIOCSERCONFIG 0x5453 | 99 | #define TIOCSERCONFIG 0x5453 |
99 | #define TIOCSERGWILD 0x5454 | 100 | #define TIOCSERGWILD 0x5454 |
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h index 1570c0b54336..a83bbea62c67 100644 --- a/arch/alpha/include/asm/rwsem.h +++ b/arch/alpha/include/asm/rwsem.h | |||
@@ -13,44 +13,13 @@ | |||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | 14 | ||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/list.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | 16 | ||
19 | struct rwsem_waiter; | ||
20 | |||
21 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
22 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
23 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | ||
24 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
25 | |||
26 | /* | ||
27 | * the semaphore definition | ||
28 | */ | ||
29 | struct rw_semaphore { | ||
30 | long count; | ||
31 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L | 17 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L |
32 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L | 18 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L |
33 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL | 19 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL |
34 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) | 20 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) |
35 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 21 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
36 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 22 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
37 | spinlock_t wait_lock; | ||
38 | struct list_head wait_list; | ||
39 | }; | ||
40 | |||
41 | #define __RWSEM_INITIALIZER(name) \ | ||
42 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
43 | LIST_HEAD_INIT((name).wait_list) } | ||
44 | |||
45 | #define DECLARE_RWSEM(name) \ | ||
46 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
47 | |||
48 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
49 | { | ||
50 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
51 | spin_lock_init(&sem->wait_lock); | ||
52 | INIT_LIST_HEAD(&sem->wait_list); | ||
53 | } | ||
54 | 23 | ||
55 | static inline void __down_read(struct rw_semaphore *sem) | 24 | static inline void __down_read(struct rw_semaphore *sem) |
56 | { | 25 | { |
@@ -250,10 +219,5 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) | |||
250 | #endif | 219 | #endif |
251 | } | 220 | } |
252 | 221 | ||
253 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
254 | { | ||
255 | return (sem->count != 0); | ||
256 | } | ||
257 | |||
258 | #endif /* __KERNEL__ */ | 222 | #endif /* __KERNEL__ */ |
259 | #endif /* _ALPHA_RWSEM_H */ | 223 | #endif /* _ALPHA_RWSEM_H */ |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index fe698b5045e9..376f22130791 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -230,44 +230,24 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st | |||
230 | return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; | 230 | return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | static int | 233 | SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, |
234 | do_osf_statfs(struct path *path, struct osf_statfs __user *buffer, | 234 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) |
235 | unsigned long bufsiz) | ||
236 | { | 235 | { |
237 | struct kstatfs linux_stat; | 236 | struct kstatfs linux_stat; |
238 | int error = vfs_statfs(path, &linux_stat); | 237 | int error = user_statfs(pathname, &linux_stat); |
239 | if (!error) | 238 | if (!error) |
240 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); | 239 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); |
241 | return error; | 240 | return error; |
242 | } | 241 | } |
243 | 242 | ||
244 | SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, | ||
245 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) | ||
246 | { | ||
247 | struct path path; | ||
248 | int retval; | ||
249 | |||
250 | retval = user_path(pathname, &path); | ||
251 | if (!retval) { | ||
252 | retval = do_osf_statfs(&path, buffer, bufsiz); | ||
253 | path_put(&path); | ||
254 | } | ||
255 | return retval; | ||
256 | } | ||
257 | |||
258 | SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, | 243 | SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, |
259 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) | 244 | struct osf_statfs __user *, buffer, unsigned long, bufsiz) |
260 | { | 245 | { |
261 | struct file *file; | 246 | struct kstatfs linux_stat; |
262 | int retval; | 247 | int error = fd_statfs(fd, &linux_stat); |
263 | 248 | if (!error) | |
264 | retval = -EBADF; | 249 | error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); |
265 | file = fget(fd); | 250 | return error; |
266 | if (file) { | ||
267 | retval = do_osf_statfs(&file->f_path, buffer, bufsiz); | ||
268 | fput(file); | ||
269 | } | ||
270 | return retval; | ||
271 | } | 251 | } |
272 | 252 | ||
273 | /* | 253 | /* |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index f6c108a3d673..8c13a0c77830 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -149,6 +149,7 @@ static int | |||
149 | titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, | 149 | titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, |
150 | bool force) | 150 | bool force) |
151 | { | 151 | { |
152 | unsigned int irq = d->irq; | ||
152 | spin_lock(&titan_irq_lock); | 153 | spin_lock(&titan_irq_lock); |
153 | titan_cpu_set_irq_affinity(irq - 16, *affinity); | 154 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
154 | titan_update_irq_hw(titan_cached_irq_mask); | 155 | titan_update_irq_hw(titan_cached_irq_mask); |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index c1f3e7cb82a4..a58e84f1a63b 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -159,7 +159,7 @@ void read_persistent_clock(struct timespec *ts) | |||
159 | 159 | ||
160 | /* | 160 | /* |
161 | * timer_interrupt() needs to keep up the real-time clock, | 161 | * timer_interrupt() needs to keep up the real-time clock, |
162 | * as well as call the "do_timer()" routine every clocktick | 162 | * as well as call the "xtime_update()" routine every clocktick |
163 | */ | 163 | */ |
164 | irqreturn_t timer_interrupt(int irq, void *dev) | 164 | irqreturn_t timer_interrupt(int irq, void *dev) |
165 | { | 165 | { |
@@ -172,8 +172,6 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
172 | profile_tick(CPU_PROFILING); | 172 | profile_tick(CPU_PROFILING); |
173 | #endif | 173 | #endif |
174 | 174 | ||
175 | write_seqlock(&xtime_lock); | ||
176 | |||
177 | /* | 175 | /* |
178 | * Calculate how many ticks have passed since the last update, | 176 | * Calculate how many ticks have passed since the last update, |
179 | * including any previous partial leftover. Save any resulting | 177 | * including any previous partial leftover. Save any resulting |
@@ -187,9 +185,7 @@ irqreturn_t timer_interrupt(int irq, void *dev) | |||
187 | nticks = delta >> FIX_SHIFT; | 185 | nticks = delta >> FIX_SHIFT; |
188 | 186 | ||
189 | if (nticks) | 187 | if (nticks) |
190 | do_timer(nticks); | 188 | xtime_update(nticks); |
191 | |||
192 | write_sequnlock(&xtime_lock); | ||
193 | 189 | ||
194 | if (test_irq_work_pending()) { | 190 | if (test_irq_work_pending()) { |
195 | clear_irq_work_pending(); | 191 | clear_irq_work_pending(); |
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 003ef4c02585..433be2a24f31 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | 1 | #include <asm-generic/vmlinux.lds.h> |
2 | #include <asm/thread_info.h> | 2 | #include <asm/thread_info.h> |
3 | #include <asm/cache.h> | ||
3 | #include <asm/page.h> | 4 | #include <asm/page.h> |
4 | 5 | ||
5 | OUTPUT_FORMAT("elf64-alpha") | 6 | OUTPUT_FORMAT("elf64-alpha") |
@@ -38,7 +39,7 @@ SECTIONS | |||
38 | __init_begin = ALIGN(PAGE_SIZE); | 39 | __init_begin = ALIGN(PAGE_SIZE); |
39 | INIT_TEXT_SECTION(PAGE_SIZE) | 40 | INIT_TEXT_SECTION(PAGE_SIZE) |
40 | INIT_DATA_SECTION(16) | 41 | INIT_DATA_SECTION(16) |
41 | PERCPU(PAGE_SIZE) | 42 | PERCPU(L1_CACHE_BYTES, PAGE_SIZE) |
42 | /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page | 43 | /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page |
43 | needed for the THREAD_SIZE aligned init_task gets freed after init */ | 44 | needed for the THREAD_SIZE aligned init_task gets freed after init */ |
44 | . = ALIGN(THREAD_SIZE); | 45 | . = ALIGN(THREAD_SIZE); |
@@ -46,7 +47,7 @@ SECTIONS | |||
46 | /* Freed after init ends here */ | 47 | /* Freed after init ends here */ |
47 | 48 | ||
48 | _data = .; | 49 | _data = .; |
49 | RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) | 50 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) |
50 | 51 | ||
51 | .got : { | 52 | .got : { |
52 | *(.got) | 53 | *(.got) |
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig new file mode 100644 index 000000000000..7a9267e5da55 --- /dev/null +++ b/arch/arm/configs/tegra_defconfig | |||
@@ -0,0 +1,123 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_IKCONFIG=y | ||
3 | CONFIG_IKCONFIG_PROC=y | ||
4 | CONFIG_CGROUPS=y | ||
5 | CONFIG_CGROUP_DEBUG=y | ||
6 | CONFIG_CGROUP_FREEZER=y | ||
7 | CONFIG_CGROUP_CPUACCT=y | ||
8 | CONFIG_RESOURCE_COUNTERS=y | ||
9 | CONFIG_CGROUP_SCHED=y | ||
10 | CONFIG_RT_GROUP_SCHED=y | ||
11 | CONFIG_BLK_DEV_INITRD=y | ||
12 | CONFIG_EMBEDDED=y | ||
13 | # CONFIG_SYSCTL_SYSCALL is not set | ||
14 | # CONFIG_ELF_CORE is not set | ||
15 | CONFIG_SLAB=y | ||
16 | CONFIG_MODULES=y | ||
17 | CONFIG_MODULE_UNLOAD=y | ||
18 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
19 | # CONFIG_BLK_DEV_BSG is not set | ||
20 | # CONFIG_IOSCHED_DEADLINE is not set | ||
21 | # CONFIG_IOSCHED_CFQ is not set | ||
22 | CONFIG_ARCH_TEGRA=y | ||
23 | CONFIG_MACH_HARMONY=y | ||
24 | CONFIG_TEGRA_DEBUG_UARTD=y | ||
25 | CONFIG_ARM_ERRATA_742230=y | ||
26 | CONFIG_NO_HZ=y | ||
27 | CONFIG_HIGH_RES_TIMERS=y | ||
28 | CONFIG_SMP=y | ||
29 | CONFIG_NR_CPUS=2 | ||
30 | CONFIG_PREEMPT=y | ||
31 | CONFIG_AEABI=y | ||
32 | # CONFIG_OABI_COMPAT is not set | ||
33 | CONFIG_HIGHMEM=y | ||
34 | CONFIG_ZBOOT_ROM_TEXT=0x0 | ||
35 | CONFIG_ZBOOT_ROM_BSS=0x0 | ||
36 | CONFIG_VFP=y | ||
37 | CONFIG_PM=y | ||
38 | CONFIG_NET=y | ||
39 | CONFIG_PACKET=y | ||
40 | CONFIG_UNIX=y | ||
41 | CONFIG_NET_KEY=y | ||
42 | CONFIG_INET=y | ||
43 | CONFIG_INET_ESP=y | ||
44 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
45 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
46 | # CONFIG_INET_LRO is not set | ||
47 | # CONFIG_INET_DIAG is not set | ||
48 | CONFIG_IPV6=y | ||
49 | CONFIG_IPV6_PRIVACY=y | ||
50 | CONFIG_IPV6_ROUTER_PREF=y | ||
51 | CONFIG_IPV6_OPTIMISTIC_DAD=y | ||
52 | CONFIG_INET6_AH=y | ||
53 | CONFIG_INET6_ESP=y | ||
54 | CONFIG_INET6_IPCOMP=y | ||
55 | CONFIG_IPV6_MIP6=y | ||
56 | CONFIG_IPV6_TUNNEL=y | ||
57 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
58 | # CONFIG_WIRELESS is not set | ||
59 | # CONFIG_FIRMWARE_IN_KERNEL is not set | ||
60 | CONFIG_BLK_DEV_LOOP=y | ||
61 | CONFIG_MISC_DEVICES=y | ||
62 | CONFIG_AD525X_DPOT=y | ||
63 | CONFIG_AD525X_DPOT_I2C=y | ||
64 | CONFIG_ICS932S401=y | ||
65 | CONFIG_APDS9802ALS=y | ||
66 | CONFIG_ISL29003=y | ||
67 | CONFIG_NETDEVICES=y | ||
68 | CONFIG_DUMMY=y | ||
69 | # CONFIG_NETDEV_1000 is not set | ||
70 | # CONFIG_NETDEV_10000 is not set | ||
71 | # CONFIG_WLAN is not set | ||
72 | # CONFIG_INPUT is not set | ||
73 | # CONFIG_SERIO is not set | ||
74 | # CONFIG_VT is not set | ||
75 | # CONFIG_DEVKMEM is not set | ||
76 | CONFIG_SERIAL_8250=y | ||
77 | CONFIG_SERIAL_8250_CONSOLE=y | ||
78 | # CONFIG_LEGACY_PTYS is not set | ||
79 | # CONFIG_HW_RANDOM is not set | ||
80 | CONFIG_I2C=y | ||
81 | # CONFIG_HWMON is not set | ||
82 | # CONFIG_MFD_SUPPORT is not set | ||
83 | # CONFIG_USB_SUPPORT is not set | ||
84 | CONFIG_MMC=y | ||
85 | CONFIG_MMC_SDHCI=y | ||
86 | CONFIG_MMC_SDHCI_PLTFM=y | ||
87 | CONFIG_EXT2_FS=y | ||
88 | CONFIG_EXT2_FS_XATTR=y | ||
89 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
90 | CONFIG_EXT2_FS_SECURITY=y | ||
91 | CONFIG_EXT3_FS=y | ||
92 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
93 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
94 | CONFIG_EXT3_FS_SECURITY=y | ||
95 | # CONFIG_DNOTIFY is not set | ||
96 | CONFIG_VFAT_FS=y | ||
97 | CONFIG_TMPFS=y | ||
98 | CONFIG_NLS_CODEPAGE_437=y | ||
99 | CONFIG_NLS_ISO8859_1=y | ||
100 | CONFIG_PRINTK_TIME=y | ||
101 | CONFIG_MAGIC_SYSRQ=y | ||
102 | CONFIG_DEBUG_FS=y | ||
103 | CONFIG_DEBUG_KERNEL=y | ||
104 | CONFIG_DETECT_HUNG_TASK=y | ||
105 | CONFIG_SCHEDSTATS=y | ||
106 | CONFIG_TIMER_STATS=y | ||
107 | CONFIG_DEBUG_SLAB=y | ||
108 | # CONFIG_DEBUG_PREEMPT is not set | ||
109 | CONFIG_DEBUG_MUTEXES=y | ||
110 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | ||
111 | CONFIG_DEBUG_INFO=y | ||
112 | CONFIG_DEBUG_VM=y | ||
113 | CONFIG_DEBUG_SG=y | ||
114 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
115 | CONFIG_DEBUG_LL=y | ||
116 | CONFIG_EARLY_PRINTK=y | ||
117 | CONFIG_CRYPTO_ECB=y | ||
118 | CONFIG_CRYPTO_AES=y | ||
119 | CONFIG_CRYPTO_ARC4=y | ||
120 | CONFIG_CRYPTO_TWOFISH=y | ||
121 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
122 | CONFIG_CRC_CCITT=y | ||
123 | CONFIG_CRC16=y | ||
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index b33fe7065b38..199a6b6de7f4 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
@@ -35,7 +35,7 @@ | |||
35 | : "cc", "memory") | 35 | : "cc", "memory") |
36 | 36 | ||
37 | static inline int | 37 | static inline int |
38 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 38 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
39 | { | 39 | { |
40 | int op = (encoded_op >> 28) & 7; | 40 | int op = (encoded_op >> 28) & 7; |
41 | int cmp = (encoded_op >> 24) & 15; | 41 | int cmp = (encoded_op >> 24) & 15; |
@@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
46 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 46 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
47 | oparg = 1 << oparg; | 47 | oparg = 1 << oparg; |
48 | 48 | ||
49 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 49 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
50 | return -EFAULT; | 50 | return -EFAULT; |
51 | 51 | ||
52 | pagefault_disable(); /* implies preempt_disable() */ | 52 | pagefault_disable(); /* implies preempt_disable() */ |
@@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | static inline int | 90 | static inline int |
91 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 91 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
92 | u32 oldval, u32 newval) | ||
92 | { | 93 | { |
93 | int val; | 94 | int ret = 0; |
95 | u32 val; | ||
94 | 96 | ||
95 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 97 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
96 | return -EFAULT; | 98 | return -EFAULT; |
97 | 99 | ||
98 | pagefault_disable(); /* implies preempt_disable() */ | ||
99 | |||
100 | __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" | 100 | __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" |
101 | "1: " T(ldr) " %0, [%3]\n" | 101 | "1: " T(ldr) " %1, [%4]\n" |
102 | " teq %0, %1\n" | 102 | " teq %1, %2\n" |
103 | " it eq @ explicit IT needed for the 2b label\n" | 103 | " it eq @ explicit IT needed for the 2b label\n" |
104 | "2: " T(streq) " %2, [%3]\n" | 104 | "2: " T(streq) " %3, [%4]\n" |
105 | "3:\n" | 105 | "3:\n" |
106 | " .pushsection __ex_table,\"a\"\n" | 106 | " .pushsection __ex_table,\"a\"\n" |
107 | " .align 3\n" | 107 | " .align 3\n" |
108 | " .long 1b, 4f, 2b, 4f\n" | 108 | " .long 1b, 4f, 2b, 4f\n" |
109 | " .popsection\n" | 109 | " .popsection\n" |
110 | " .pushsection .fixup,\"ax\"\n" | 110 | " .pushsection .fixup,\"ax\"\n" |
111 | "4: mov %0, %4\n" | 111 | "4: mov %0, %5\n" |
112 | " b 3b\n" | 112 | " b 3b\n" |
113 | " .popsection" | 113 | " .popsection" |
114 | : "=&r" (val) | 114 | : "+r" (ret), "=&r" (val) |
115 | : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) | 115 | : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) |
116 | : "cc", "memory"); | 116 | : "cc", "memory"); |
117 | 117 | ||
118 | pagefault_enable(); /* subsumes preempt_enable() */ | 118 | *uval = val; |
119 | 119 | return ret; | |
120 | return val; | ||
121 | } | 120 | } |
122 | 121 | ||
123 | #endif /* !SMP */ | 122 | #endif /* !SMP */ |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 3d76bf233734..1ff46cabc7ef 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -107,9 +107,7 @@ void timer_tick(void) | |||
107 | { | 107 | { |
108 | profile_tick(CPU_PROFILING); | 108 | profile_tick(CPU_PROFILING); |
109 | do_leds(); | 109 | do_leds(); |
110 | write_seqlock(&xtime_lock); | 110 | xtime_update(1); |
111 | do_timer(1); | ||
112 | write_sequnlock(&xtime_lock); | ||
113 | #ifndef CONFIG_SMP | 111 | #ifndef CONFIG_SMP |
114 | update_process_times(user_mode(get_irq_regs())); | 112 | update_process_times(user_mode(get_irq_regs())); |
115 | #endif | 113 | #endif |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index dfbb377e251d..b4348e62ef06 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -82,7 +82,7 @@ SECTIONS | |||
82 | #endif | 82 | #endif |
83 | } | 83 | } |
84 | 84 | ||
85 | PERCPU(PAGE_SIZE) | 85 | PERCPU(32, PAGE_SIZE) |
86 | 86 | ||
87 | #ifndef CONFIG_XIP_KERNEL | 87 | #ifndef CONFIG_XIP_KERNEL |
88 | . = ALIGN(PAGE_SIZE); | 88 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/arm/mach-clps711x/include/mach/time.h b/arch/arm/mach-clps711x/include/mach/time.h index 8fe283ccd1f3..61fef9129c6a 100644 --- a/arch/arm/mach-clps711x/include/mach/time.h +++ b/arch/arm/mach-clps711x/include/mach/time.h | |||
@@ -30,7 +30,7 @@ p720t_timer_interrupt(int irq, void *dev_id) | |||
30 | { | 30 | { |
31 | struct pt_regs *regs = get_irq_regs(); | 31 | struct pt_regs *regs = get_irq_regs(); |
32 | do_leds(); | 32 | do_leds(); |
33 | do_timer(1); | 33 | xtime_update(1); |
34 | #ifndef CONFIG_SMP | 34 | #ifndef CONFIG_SMP |
35 | update_process_times(user_mode(regs)); | 35 | update_process_times(user_mode(regs)); |
36 | #endif | 36 | #endif |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 1c0c2b02d870..64dc4176407b 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -229,7 +229,7 @@ usbfs-$(CONFIG_ARCH_OMAP_OTG) := usb-fs.o | |||
229 | obj-y += $(usbfs-m) $(usbfs-y) | 229 | obj-y += $(usbfs-m) $(usbfs-y) |
230 | obj-y += usb-musb.o | 230 | obj-y += usb-musb.o |
231 | obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o | 231 | obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o |
232 | obj-y += usb-ehci.o | 232 | obj-y += usb-host.o |
233 | 233 | ||
234 | onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o | 234 | onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o |
235 | obj-y += $(onenand-m) $(onenand-y) | 235 | obj-y += $(onenand-m) $(onenand-y) |
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index d4e41ef86aa5..7542ba59f2b8 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c | |||
@@ -653,11 +653,11 @@ static void enable_board_wakeup_source(void) | |||
653 | OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); | 653 | OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); |
654 | } | 654 | } |
655 | 655 | ||
656 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 656 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
657 | 657 | ||
658 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 658 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
659 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 659 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
660 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 660 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
661 | 661 | ||
662 | .phy_reset = true, | 662 | .phy_reset = true, |
663 | .reset_gpio_port[0] = 57, | 663 | .reset_gpio_port[0] = 57, |
@@ -816,7 +816,7 @@ static void __init omap_3430sdp_init(void) | |||
816 | board_flash_init(sdp_flash_partitions, chip_sel_3430); | 816 | board_flash_init(sdp_flash_partitions, chip_sel_3430); |
817 | sdp3430_display_init(); | 817 | sdp3430_display_init(); |
818 | enable_board_wakeup_source(); | 818 | enable_board_wakeup_source(); |
819 | usb_ehci_init(&ehci_pdata); | 819 | usbhs_init(&usbhs_bdata); |
820 | } | 820 | } |
821 | 821 | ||
822 | MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board") | 822 | MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board") |
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c index 62645640f5e4..deed2db32c53 100644 --- a/arch/arm/mach-omap2/board-3630sdp.c +++ b/arch/arm/mach-omap2/board-3630sdp.c | |||
@@ -54,11 +54,11 @@ static void enable_board_wakeup_source(void) | |||
54 | OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); | 54 | OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); |
55 | } | 55 | } |
56 | 56 | ||
57 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 57 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
58 | 58 | ||
59 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 59 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
60 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 60 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
61 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 61 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
62 | 62 | ||
63 | .phy_reset = true, | 63 | .phy_reset = true, |
64 | .reset_gpio_port[0] = 126, | 64 | .reset_gpio_port[0] = 126, |
@@ -211,7 +211,7 @@ static void __init omap_sdp_init(void) | |||
211 | board_smc91x_init(); | 211 | board_smc91x_init(); |
212 | board_flash_init(sdp_flash_partitions, chip_sel_sdp); | 212 | board_flash_init(sdp_flash_partitions, chip_sel_sdp); |
213 | enable_board_wakeup_source(); | 213 | enable_board_wakeup_source(); |
214 | usb_ehci_init(&ehci_pdata); | 214 | usbhs_init(&usbhs_bdata); |
215 | } | 215 | } |
216 | 216 | ||
217 | MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board") | 217 | MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board") |
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 07d1b20b1148..f603f3b04cb8 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #define ETH_KS8851_IRQ 34 | 44 | #define ETH_KS8851_IRQ 34 |
45 | #define ETH_KS8851_POWER_ON 48 | 45 | #define ETH_KS8851_POWER_ON 48 |
46 | #define ETH_KS8851_QUART 138 | 46 | #define ETH_KS8851_QUART 138 |
47 | #define OMAP4SDP_MDM_PWR_EN_GPIO 157 | ||
48 | #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 | 47 | #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 |
49 | #define OMAP4_SFH7741_ENABLE_GPIO 188 | 48 | #define OMAP4_SFH7741_ENABLE_GPIO 188 |
50 | 49 | ||
@@ -251,16 +250,6 @@ static void __init omap_4430sdp_init_irq(void) | |||
251 | gic_init_irq(); | 250 | gic_init_irq(); |
252 | } | 251 | } |
253 | 252 | ||
254 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | ||
255 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | ||
256 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | ||
257 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | ||
258 | .phy_reset = false, | ||
259 | .reset_gpio_port[0] = -EINVAL, | ||
260 | .reset_gpio_port[1] = -EINVAL, | ||
261 | .reset_gpio_port[2] = -EINVAL, | ||
262 | }; | ||
263 | |||
264 | static struct omap_musb_board_data musb_board_data = { | 253 | static struct omap_musb_board_data musb_board_data = { |
265 | .interface_type = MUSB_INTERFACE_UTMI, | 254 | .interface_type = MUSB_INTERFACE_UTMI, |
266 | .mode = MUSB_OTG, | 255 | .mode = MUSB_OTG, |
@@ -272,6 +261,7 @@ static struct twl4030_usb_data omap4_usbphy_data = { | |||
272 | .phy_exit = omap4430_phy_exit, | 261 | .phy_exit = omap4430_phy_exit, |
273 | .phy_power = omap4430_phy_power, | 262 | .phy_power = omap4430_phy_power, |
274 | .phy_set_clock = omap4430_phy_set_clk, | 263 | .phy_set_clock = omap4430_phy_set_clk, |
264 | .phy_suspend = omap4430_phy_suspend, | ||
275 | }; | 265 | }; |
276 | 266 | ||
277 | static struct omap2_hsmmc_info mmc[] = { | 267 | static struct omap2_hsmmc_info mmc[] = { |
@@ -576,14 +566,6 @@ static void __init omap_4430sdp_init(void) | |||
576 | omap_serial_init(); | 566 | omap_serial_init(); |
577 | omap4_twl6030_hsmmc_init(mmc); | 567 | omap4_twl6030_hsmmc_init(mmc); |
578 | 568 | ||
579 | /* Power on the ULPI PHY */ | ||
580 | status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3"); | ||
581 | if (status) | ||
582 | pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__); | ||
583 | else | ||
584 | gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1); | ||
585 | |||
586 | usb_ehci_init(&ehci_pdata); | ||
587 | usb_musb_init(&musb_board_data); | 569 | usb_musb_init(&musb_board_data); |
588 | 570 | ||
589 | status = omap_ethernet_init(); | 571 | status = omap_ethernet_init(); |
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c index 71acb5ab281c..e3a194f6b13f 100644 --- a/arch/arm/mach-omap2/board-am3517crane.c +++ b/arch/arm/mach-omap2/board-am3517crane.c | |||
@@ -59,10 +59,10 @@ static void __init am3517_crane_init_irq(void) | |||
59 | omap_init_irq(); | 59 | omap_init_irq(); |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { | 62 | static struct usbhs_omap_board_data usbhs_bdata __initdata = { |
63 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 63 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
64 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 64 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
65 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 65 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
66 | 66 | ||
67 | .phy_reset = true, | 67 | .phy_reset = true, |
68 | .reset_gpio_port[0] = GPIO_USB_NRESET, | 68 | .reset_gpio_port[0] = GPIO_USB_NRESET, |
@@ -103,7 +103,7 @@ static void __init am3517_crane_init(void) | |||
103 | return; | 103 | return; |
104 | } | 104 | } |
105 | 105 | ||
106 | usb_ehci_init(&ehci_pdata); | 106 | usbhs_init(&usbhs_bdata); |
107 | } | 107 | } |
108 | 108 | ||
109 | MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD") | 109 | MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD") |
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index 10d60b7743cf..913538ad17d8 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c | |||
@@ -430,15 +430,15 @@ static __init void am3517_evm_musb_init(void) | |||
430 | usb_musb_init(&musb_board_data); | 430 | usb_musb_init(&musb_board_data); |
431 | } | 431 | } |
432 | 432 | ||
433 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 433 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
434 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 434 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
435 | #if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ | 435 | #if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ |
436 | defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE) | 436 | defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE) |
437 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 437 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
438 | #else | 438 | #else |
439 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 439 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
440 | #endif | 440 | #endif |
441 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 441 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
442 | 442 | ||
443 | .phy_reset = true, | 443 | .phy_reset = true, |
444 | .reset_gpio_port[0] = 57, | 444 | .reset_gpio_port[0] = 57, |
@@ -502,7 +502,7 @@ static void __init am3517_evm_init(void) | |||
502 | 502 | ||
503 | /* Configure GPIO for EHCI port */ | 503 | /* Configure GPIO for EHCI port */ |
504 | omap_mux_init_gpio(57, OMAP_PIN_OUTPUT); | 504 | omap_mux_init_gpio(57, OMAP_PIN_OUTPUT); |
505 | usb_ehci_init(&ehci_pdata); | 505 | usbhs_init(&usbhs_bdata); |
506 | am3517_evm_hecc_init(&am3517_evm_hecc_pdata); | 506 | am3517_evm_hecc_init(&am3517_evm_hecc_pdata); |
507 | /* DSS */ | 507 | /* DSS */ |
508 | am3517_evm_display_init(); | 508 | am3517_evm_display_init(); |
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index dac141610666..9be7289cbb56 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c | |||
@@ -605,10 +605,10 @@ static struct omap2_hsmmc_info mmc[] = { | |||
605 | {} /* Terminator */ | 605 | {} /* Terminator */ |
606 | }; | 606 | }; |
607 | 607 | ||
608 | static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { | 608 | static struct usbhs_omap_board_data usbhs_bdata __initdata = { |
609 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 609 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
610 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 610 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
611 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 611 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
612 | 612 | ||
613 | .phy_reset = true, | 613 | .phy_reset = true, |
614 | .reset_gpio_port[0] = OMAP_MAX_GPIO_LINES + 6, | 614 | .reset_gpio_port[0] = OMAP_MAX_GPIO_LINES + 6, |
@@ -810,7 +810,7 @@ static void __init cm_t35_init(void) | |||
810 | cm_t35_init_display(); | 810 | cm_t35_init_display(); |
811 | 811 | ||
812 | usb_musb_init(&musb_board_data); | 812 | usb_musb_init(&musb_board_data); |
813 | usb_ehci_init(&ehci_pdata); | 813 | usbhs_init(&usbhs_bdata); |
814 | } | 814 | } |
815 | 815 | ||
816 | MACHINE_START(CM_T35, "Compulab CM-T35") | 816 | MACHINE_START(CM_T35, "Compulab CM-T35") |
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c index 8f9a64d650ee..8e18dc76b11e 100644 --- a/arch/arm/mach-omap2/board-cm-t3517.c +++ b/arch/arm/mach-omap2/board-cm-t3517.c | |||
@@ -167,10 +167,10 @@ static inline void cm_t3517_init_rtc(void) {} | |||
167 | #define HSUSB2_RESET_GPIO (147) | 167 | #define HSUSB2_RESET_GPIO (147) |
168 | #define USB_HUB_RESET_GPIO (152) | 168 | #define USB_HUB_RESET_GPIO (152) |
169 | 169 | ||
170 | static struct ehci_hcd_omap_platform_data cm_t3517_ehci_pdata __initdata = { | 170 | static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = { |
171 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 171 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
172 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 172 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
173 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 173 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
174 | 174 | ||
175 | .phy_reset = true, | 175 | .phy_reset = true, |
176 | .reset_gpio_port[0] = HSUSB1_RESET_GPIO, | 176 | .reset_gpio_port[0] = HSUSB1_RESET_GPIO, |
@@ -192,7 +192,7 @@ static int cm_t3517_init_usbh(void) | |||
192 | msleep(1); | 192 | msleep(1); |
193 | } | 193 | } |
194 | 194 | ||
195 | usb_ehci_init(&cm_t3517_ehci_pdata); | 195 | usbhs_init(&cm_t3517_ehci_pdata); |
196 | 196 | ||
197 | return 0; | 197 | return 0; |
198 | } | 198 | } |
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 9a2a31e011ce..bc0141b98694 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c | |||
@@ -620,11 +620,11 @@ static struct omap_musb_board_data musb_board_data = { | |||
620 | .power = 100, | 620 | .power = 100, |
621 | }; | 621 | }; |
622 | 622 | ||
623 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 623 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
624 | 624 | ||
625 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 625 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
626 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 626 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
627 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 627 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
628 | 628 | ||
629 | .phy_reset = true, | 629 | .phy_reset = true, |
630 | .reset_gpio_port[0] = -EINVAL, | 630 | .reset_gpio_port[0] = -EINVAL, |
@@ -803,7 +803,7 @@ static void __init devkit8000_init(void) | |||
803 | devkit8000_ads7846_init(); | 803 | devkit8000_ads7846_init(); |
804 | 804 | ||
805 | usb_musb_init(&musb_board_data); | 805 | usb_musb_init(&musb_board_data); |
806 | usb_ehci_init(&ehci_pdata); | 806 | usbhs_init(&usbhs_bdata); |
807 | devkit8000_flash_init(); | 807 | devkit8000_flash_init(); |
808 | 808 | ||
809 | /* Ensure SDRC pins are mux'd for self-refresh */ | 809 | /* Ensure SDRC pins are mux'd for self-refresh */ |
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 3be85a1f55f4..f9f534419311 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c | |||
@@ -627,10 +627,10 @@ static struct omap_musb_board_data musb_board_data = { | |||
627 | .power = 100, | 627 | .power = 100, |
628 | }; | 628 | }; |
629 | 629 | ||
630 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 630 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
631 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 631 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
632 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 632 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
633 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 633 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
634 | 634 | ||
635 | .phy_reset = true, | 635 | .phy_reset = true, |
636 | .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET, | 636 | .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET, |
@@ -699,7 +699,7 @@ static void __init igep2_init(void) | |||
699 | platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices)); | 699 | platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices)); |
700 | omap_serial_init(); | 700 | omap_serial_init(); |
701 | usb_musb_init(&musb_board_data); | 701 | usb_musb_init(&musb_board_data); |
702 | usb_ehci_init(&ehci_pdata); | 702 | usbhs_init(&usbhs_bdata); |
703 | 703 | ||
704 | igep2_flash_init(); | 704 | igep2_flash_init(); |
705 | igep2_leds_init(); | 705 | igep2_leds_init(); |
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c index 4dc62a9b9cb2..579fc2d2525f 100644 --- a/arch/arm/mach-omap2/board-igep0030.c +++ b/arch/arm/mach-omap2/board-igep0030.c | |||
@@ -408,10 +408,10 @@ static void __init igep3_wifi_bt_init(void) | |||
408 | void __init igep3_wifi_bt_init(void) {} | 408 | void __init igep3_wifi_bt_init(void) {} |
409 | #endif | 409 | #endif |
410 | 410 | ||
411 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 411 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
412 | .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 412 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, |
413 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 413 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
414 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 414 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
415 | 415 | ||
416 | .phy_reset = true, | 416 | .phy_reset = true, |
417 | .reset_gpio_port[0] = -EINVAL, | 417 | .reset_gpio_port[0] = -EINVAL, |
@@ -435,7 +435,7 @@ static void __init igep3_init(void) | |||
435 | platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices)); | 435 | platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices)); |
436 | omap_serial_init(); | 436 | omap_serial_init(); |
437 | usb_musb_init(&musb_board_data); | 437 | usb_musb_init(&musb_board_data); |
438 | usb_ehci_init(&ehci_pdata); | 438 | usbhs_init(&usbhs_bdata); |
439 | 439 | ||
440 | igep3_flash_init(); | 440 | igep3_flash_init(); |
441 | igep3_leds_init(); | 441 | igep3_leds_init(); |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 46d814ab5656..f0963b6e4627 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -586,11 +586,11 @@ static void __init omap3beagle_flash_init(void) | |||
586 | } | 586 | } |
587 | } | 587 | } |
588 | 588 | ||
589 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 589 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
590 | 590 | ||
591 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 591 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
592 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 592 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
593 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 593 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
594 | 594 | ||
595 | .phy_reset = true, | 595 | .phy_reset = true, |
596 | .reset_gpio_port[0] = -EINVAL, | 596 | .reset_gpio_port[0] = -EINVAL, |
@@ -625,7 +625,7 @@ static void __init omap3_beagle_init(void) | |||
625 | gpio_direction_output(170, true); | 625 | gpio_direction_output(170, true); |
626 | 626 | ||
627 | usb_musb_init(&musb_board_data); | 627 | usb_musb_init(&musb_board_data); |
628 | usb_ehci_init(&ehci_pdata); | 628 | usbhs_init(&usbhs_bdata); |
629 | omap3beagle_flash_init(); | 629 | omap3beagle_flash_init(); |
630 | 630 | ||
631 | /* Ensure SDRC pins are mux'd for self-refresh */ | 631 | /* Ensure SDRC pins are mux'd for self-refresh */ |
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 323c3809ce39..38a2d91790c0 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c | |||
@@ -638,11 +638,11 @@ static struct platform_device *omap3_evm_devices[] __initdata = { | |||
638 | &omap3_evm_dss_device, | 638 | &omap3_evm_dss_device, |
639 | }; | 639 | }; |
640 | 640 | ||
641 | static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { | 641 | static struct usbhs_omap_board_data usbhs_bdata __initdata = { |
642 | 642 | ||
643 | .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 643 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, |
644 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 644 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
645 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 645 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
646 | 646 | ||
647 | .phy_reset = true, | 647 | .phy_reset = true, |
648 | /* PHY reset GPIO will be runtime programmed based on EVM version */ | 648 | /* PHY reset GPIO will be runtime programmed based on EVM version */ |
@@ -700,7 +700,7 @@ static void __init omap3_evm_init(void) | |||
700 | 700 | ||
701 | /* setup EHCI phy reset config */ | 701 | /* setup EHCI phy reset config */ |
702 | omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP); | 702 | omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP); |
703 | ehci_pdata.reset_gpio_port[1] = 21; | 703 | usbhs_bdata.reset_gpio_port[1] = 21; |
704 | 704 | ||
705 | /* EVM REV >= E can supply 500mA with EXTVBUS programming */ | 705 | /* EVM REV >= E can supply 500mA with EXTVBUS programming */ |
706 | musb_board_data.power = 500; | 706 | musb_board_data.power = 500; |
@@ -708,10 +708,10 @@ static void __init omap3_evm_init(void) | |||
708 | } else { | 708 | } else { |
709 | /* setup EHCI phy reset on MDC */ | 709 | /* setup EHCI phy reset on MDC */ |
710 | omap_mux_init_gpio(135, OMAP_PIN_OUTPUT); | 710 | omap_mux_init_gpio(135, OMAP_PIN_OUTPUT); |
711 | ehci_pdata.reset_gpio_port[1] = 135; | 711 | usbhs_bdata.reset_gpio_port[1] = 135; |
712 | } | 712 | } |
713 | usb_musb_init(&musb_board_data); | 713 | usb_musb_init(&musb_board_data); |
714 | usb_ehci_init(&ehci_pdata); | 714 | usbhs_init(&usbhs_bdata); |
715 | ads7846_dev_init(); | 715 | ads7846_dev_init(); |
716 | omap3evm_init_smsc911x(); | 716 | omap3evm_init_smsc911x(); |
717 | omap3_evm_display_init(); | 717 | omap3_evm_display_init(); |
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 0b34beded11f..aa05f2e46a61 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c | |||
@@ -681,11 +681,11 @@ static struct platform_device *omap3pandora_devices[] __initdata = { | |||
681 | &pandora_vwlan_device, | 681 | &pandora_vwlan_device, |
682 | }; | 682 | }; |
683 | 683 | ||
684 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 684 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
685 | 685 | ||
686 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 686 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
687 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 687 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
688 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 688 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
689 | 689 | ||
690 | .phy_reset = true, | 690 | .phy_reset = true, |
691 | .reset_gpio_port[0] = 16, | 691 | .reset_gpio_port[0] = 16, |
@@ -716,7 +716,7 @@ static void __init omap3pandora_init(void) | |||
716 | spi_register_board_info(omap3pandora_spi_board_info, | 716 | spi_register_board_info(omap3pandora_spi_board_info, |
717 | ARRAY_SIZE(omap3pandora_spi_board_info)); | 717 | ARRAY_SIZE(omap3pandora_spi_board_info)); |
718 | omap3pandora_ads7846_init(); | 718 | omap3pandora_ads7846_init(); |
719 | usb_ehci_init(&ehci_pdata); | 719 | usbhs_init(&usbhs_bdata); |
720 | usb_musb_init(&musb_board_data); | 720 | usb_musb_init(&musb_board_data); |
721 | gpmc_nand_init(&pandora_nand_data); | 721 | gpmc_nand_init(&pandora_nand_data); |
722 | 722 | ||
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index 2a2dad447e86..f6c87787cd4f 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c | |||
@@ -608,10 +608,10 @@ static struct platform_device *omap3_stalker_devices[] __initdata = { | |||
608 | &keys_gpio, | 608 | &keys_gpio, |
609 | }; | 609 | }; |
610 | 610 | ||
611 | static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 611 | static struct usbhs_omap_board_data usbhs_bdata __initconst = { |
612 | .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 612 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, |
613 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 613 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
614 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 614 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
615 | 615 | ||
616 | .phy_reset = true, | 616 | .phy_reset = true, |
617 | .reset_gpio_port[0] = -EINVAL, | 617 | .reset_gpio_port[0] = -EINVAL, |
@@ -649,7 +649,7 @@ static void __init omap3_stalker_init(void) | |||
649 | 649 | ||
650 | omap_serial_init(); | 650 | omap_serial_init(); |
651 | usb_musb_init(&musb_board_data); | 651 | usb_musb_init(&musb_board_data); |
652 | usb_ehci_init(&ehci_pdata); | 652 | usbhs_init(&usbhs_bdata); |
653 | ads7846_dev_init(); | 653 | ads7846_dev_init(); |
654 | 654 | ||
655 | omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); | 655 | omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); |
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index db1f74fe6c4f..84cfddb19a74 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c | |||
@@ -468,11 +468,11 @@ static void __init omap3touchbook_flash_init(void) | |||
468 | } | 468 | } |
469 | } | 469 | } |
470 | 470 | ||
471 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 471 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
472 | 472 | ||
473 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 473 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
474 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 474 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
475 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 475 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
476 | 476 | ||
477 | .phy_reset = true, | 477 | .phy_reset = true, |
478 | .reset_gpio_port[0] = -EINVAL, | 478 | .reset_gpio_port[0] = -EINVAL, |
@@ -527,7 +527,7 @@ static void __init omap3_touchbook_init(void) | |||
527 | ARRAY_SIZE(omap3_ads7846_spi_board_info)); | 527 | ARRAY_SIZE(omap3_ads7846_spi_board_info)); |
528 | omap3_ads7846_init(); | 528 | omap3_ads7846_init(); |
529 | usb_musb_init(&musb_board_data); | 529 | usb_musb_init(&musb_board_data); |
530 | usb_ehci_init(&ehci_pdata); | 530 | usbhs_init(&usbhs_bdata); |
531 | omap3touchbook_flash_init(); | 531 | omap3touchbook_flash_init(); |
532 | 532 | ||
533 | /* Ensure SDRC pins are mux'd for self-refresh */ | 533 | /* Ensure SDRC pins are mux'd for self-refresh */ |
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e944025d5ef8..ed61c1f5d5e6 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c | |||
@@ -83,10 +83,10 @@ static void __init omap4_panda_init_irq(void) | |||
83 | gic_init_irq(); | 83 | gic_init_irq(); |
84 | } | 84 | } |
85 | 85 | ||
86 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 86 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
87 | .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, | 87 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
88 | .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 88 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
89 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 89 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
90 | .phy_reset = false, | 90 | .phy_reset = false, |
91 | .reset_gpio_port[0] = -EINVAL, | 91 | .reset_gpio_port[0] = -EINVAL, |
92 | .reset_gpio_port[1] = -EINVAL, | 92 | .reset_gpio_port[1] = -EINVAL, |
@@ -128,7 +128,7 @@ static void __init omap4_ehci_init(void) | |||
128 | gpio_set_value(GPIO_HUB_NRESET, 0); | 128 | gpio_set_value(GPIO_HUB_NRESET, 0); |
129 | gpio_set_value(GPIO_HUB_NRESET, 1); | 129 | gpio_set_value(GPIO_HUB_NRESET, 1); |
130 | 130 | ||
131 | usb_ehci_init(&ehci_pdata); | 131 | usbhs_init(&usbhs_bdata); |
132 | 132 | ||
133 | /* enable power to hub */ | 133 | /* enable power to hub */ |
134 | gpio_set_value(GPIO_HUB_POWER, 1); | 134 | gpio_set_value(GPIO_HUB_POWER, 1); |
@@ -153,6 +153,7 @@ static struct twl4030_usb_data omap4_usbphy_data = { | |||
153 | .phy_exit = omap4430_phy_exit, | 153 | .phy_exit = omap4430_phy_exit, |
154 | .phy_power = omap4430_phy_power, | 154 | .phy_power = omap4430_phy_power, |
155 | .phy_set_clock = omap4430_phy_set_clk, | 155 | .phy_set_clock = omap4430_phy_set_clk, |
156 | .phy_suspend = omap4430_phy_suspend, | ||
156 | }; | 157 | }; |
157 | 158 | ||
158 | static struct omap2_hsmmc_info mmc[] = { | 159 | static struct omap2_hsmmc_info mmc[] = { |
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index cb26e5d8268d..08770ccec0f3 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c | |||
@@ -423,10 +423,10 @@ static struct platform_device *overo_devices[] __initdata = { | |||
423 | &overo_lcd_device, | 423 | &overo_lcd_device, |
424 | }; | 424 | }; |
425 | 425 | ||
426 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 426 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
427 | .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 427 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, |
428 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 428 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
429 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 429 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
430 | 430 | ||
431 | .phy_reset = true, | 431 | .phy_reset = true, |
432 | .reset_gpio_port[0] = -EINVAL, | 432 | .reset_gpio_port[0] = -EINVAL, |
@@ -454,7 +454,7 @@ static void __init overo_init(void) | |||
454 | omap_serial_init(); | 454 | omap_serial_init(); |
455 | overo_flash_init(); | 455 | overo_flash_init(); |
456 | usb_musb_init(&musb_board_data); | 456 | usb_musb_init(&musb_board_data); |
457 | usb_ehci_init(&ehci_pdata); | 457 | usbhs_init(&usbhs_bdata); |
458 | overo_ads7846_init(); | 458 | overo_ads7846_init(); |
459 | overo_init_smsc911x(); | 459 | overo_init_smsc911x(); |
460 | 460 | ||
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c index e26754c24ee8..1dd195afa396 100644 --- a/arch/arm/mach-omap2/board-zoom.c +++ b/arch/arm/mach-omap2/board-zoom.c | |||
@@ -106,10 +106,10 @@ static struct mtd_partition zoom_nand_partitions[] = { | |||
106 | }, | 106 | }, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { | 109 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
110 | .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 110 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, |
111 | .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, | 111 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, |
112 | .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, | 112 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
113 | .phy_reset = true, | 113 | .phy_reset = true, |
114 | .reset_gpio_port[0] = -EINVAL, | 114 | .reset_gpio_port[0] = -EINVAL, |
115 | .reset_gpio_port[1] = ZOOM3_EHCI_RESET_GPIO, | 115 | .reset_gpio_port[1] = ZOOM3_EHCI_RESET_GPIO, |
@@ -123,7 +123,7 @@ static void __init omap_zoom_init(void) | |||
123 | } else if (machine_is_omap_zoom3()) { | 123 | } else if (machine_is_omap_zoom3()) { |
124 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); | 124 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBP); |
125 | omap_mux_init_gpio(ZOOM3_EHCI_RESET_GPIO, OMAP_PIN_OUTPUT); | 125 | omap_mux_init_gpio(ZOOM3_EHCI_RESET_GPIO, OMAP_PIN_OUTPUT); |
126 | usb_ehci_init(&ehci_pdata); | 126 | usbhs_init(&usbhs_bdata); |
127 | } | 127 | } |
128 | 128 | ||
129 | board_nand_init(zoom_nand_partitions, | 129 | board_nand_init(zoom_nand_partitions, |
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index 403a4a1d3f9c..fbb1e30a73dc 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c | |||
@@ -3286,7 +3286,7 @@ static struct omap_clk omap3xxx_clks[] = { | |||
3286 | CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3286 | CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3287 | CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3287 | CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3288 | CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3288 | CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3289 | CLK("ehci-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3289 | CLK("usbhs-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3290 | CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX), | 3290 | CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX), |
3291 | CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX), | 3291 | CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX), |
3292 | CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX), | 3292 | CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX), |
@@ -3322,7 +3322,7 @@ static struct omap_clk omap3xxx_clks[] = { | |||
3322 | CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX), | 3322 | CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX), |
3323 | CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX), | 3323 | CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX), |
3324 | CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3324 | CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3325 | CLK("ehci-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3325 | CLK("usbhs-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3326 | CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3326 | CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3327 | CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX), | 3327 | CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX), |
3328 | CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX), | 3328 | CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX), |
@@ -3368,11 +3368,20 @@ static struct omap_clk omap3xxx_clks[] = { | |||
3368 | CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX), | 3368 | CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX), |
3369 | CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX), | 3369 | CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX), |
3370 | CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3370 | CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3371 | CLK("ehci-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3371 | CLK("usbhs-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3372 | CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3372 | CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3373 | CLK("ehci-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3373 | CLK("usbhs-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3374 | CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3374 | CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3375 | CLK("ehci-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), | 3375 | CLK("usbhs-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX), |
3376 | CLK("usbhs-omap.0", "utmi_p1_gfclk", &dummy_ck, CK_3XXX), | ||
3377 | CLK("usbhs-omap.0", "utmi_p2_gfclk", &dummy_ck, CK_3XXX), | ||
3378 | CLK("usbhs-omap.0", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX), | ||
3379 | CLK("usbhs-omap.0", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX), | ||
3380 | CLK("usbhs-omap.0", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX), | ||
3381 | CLK("usbhs-omap.0", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX), | ||
3382 | CLK("usbhs-omap.0", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX), | ||
3383 | CLK("usbhs-omap.0", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX), | ||
3384 | CLK("usbhs-omap.0", "init_60m_fclk", &dummy_ck, CK_3XXX), | ||
3376 | CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX), | 3385 | CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX), |
3377 | CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX), | 3386 | CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX), |
3378 | CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX), | 3387 | CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX), |
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index de9ec8ddd2ae..46fd3f674cac 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c | |||
@@ -3197,7 +3197,7 @@ static struct omap_clk omap44xx_clks[] = { | |||
3197 | CLK(NULL, "uart3_fck", &uart3_fck, CK_443X), | 3197 | CLK(NULL, "uart3_fck", &uart3_fck, CK_443X), |
3198 | CLK(NULL, "uart4_fck", &uart4_fck, CK_443X), | 3198 | CLK(NULL, "uart4_fck", &uart4_fck, CK_443X), |
3199 | CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X), | 3199 | CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X), |
3200 | CLK("ehci-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X), | 3200 | CLK("usbhs-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X), |
3201 | CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X), | 3201 | CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X), |
3202 | CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X), | 3202 | CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X), |
3203 | CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X), | 3203 | CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X), |
@@ -3209,8 +3209,8 @@ static struct omap_clk omap44xx_clks[] = { | |||
3209 | CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X), | 3209 | CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X), |
3210 | CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X), | 3210 | CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X), |
3211 | CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X), | 3211 | CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X), |
3212 | CLK("ehci-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X), | 3212 | CLK("usbhs-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X), |
3213 | CLK("ehci-omap.0", "usbhost_ick", &dummy_ck, CK_443X), | 3213 | CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X), |
3214 | CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X), | 3214 | CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X), |
3215 | CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X), | 3215 | CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X), |
3216 | CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X), | 3216 | CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X), |
@@ -3219,8 +3219,8 @@ static struct omap_clk omap44xx_clks[] = { | |||
3219 | CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X), | 3219 | CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X), |
3220 | CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X), | 3220 | CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X), |
3221 | CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X), | 3221 | CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X), |
3222 | CLK("ehci-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X), | 3222 | CLK("usbhs-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X), |
3223 | CLK("ehci-omap.0", "usbtll_fck", &dummy_ck, CK_443X), | 3223 | CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X), |
3224 | CLK(NULL, "usim_ck", &usim_ck, CK_443X), | 3224 | CLK(NULL, "usim_ck", &usim_ck, CK_443X), |
3225 | CLK(NULL, "usim_fclk", &usim_fclk, CK_443X), | 3225 | CLK(NULL, "usim_fclk", &usim_fclk, CK_443X), |
3226 | CLK(NULL, "usim_fck", &usim_fck, CK_443X), | 3226 | CLK(NULL, "usim_fck", &usim_fck, CK_443X), |
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c index 745252c60e32..ebe33df708bd 100644 --- a/arch/arm/mach-omap2/omap_phy_internal.c +++ b/arch/arm/mach-omap2/omap_phy_internal.c | |||
@@ -43,6 +43,7 @@ | |||
43 | 43 | ||
44 | static struct clk *phyclk, *clk48m, *clk32k; | 44 | static struct clk *phyclk, *clk48m, *clk32k; |
45 | static void __iomem *ctrl_base; | 45 | static void __iomem *ctrl_base; |
46 | static int usbotghs_control; | ||
46 | 47 | ||
47 | int omap4430_phy_init(struct device *dev) | 48 | int omap4430_phy_init(struct device *dev) |
48 | { | 49 | { |
@@ -103,13 +104,6 @@ int omap4430_phy_set_clk(struct device *dev, int on) | |||
103 | int omap4430_phy_power(struct device *dev, int ID, int on) | 104 | int omap4430_phy_power(struct device *dev, int ID, int on) |
104 | { | 105 | { |
105 | if (on) { | 106 | if (on) { |
106 | /* enabled the clocks */ | ||
107 | omap4430_phy_set_clk(dev, 1); | ||
108 | /* power on the phy */ | ||
109 | if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) { | ||
110 | __raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF); | ||
111 | mdelay(200); | ||
112 | } | ||
113 | if (ID) | 107 | if (ID) |
114 | /* enable VBUS valid, IDDIG groung */ | 108 | /* enable VBUS valid, IDDIG groung */ |
115 | __raw_writel(AVALID | VBUSVALID, ctrl_base + | 109 | __raw_writel(AVALID | VBUSVALID, ctrl_base + |
@@ -125,10 +119,31 @@ int omap4430_phy_power(struct device *dev, int ID, int on) | |||
125 | /* Enable session END and IDIG to high impedence. */ | 119 | /* Enable session END and IDIG to high impedence. */ |
126 | __raw_writel(SESSEND | IDDIG, ctrl_base + | 120 | __raw_writel(SESSEND | IDDIG, ctrl_base + |
127 | USBOTGHS_CONTROL); | 121 | USBOTGHS_CONTROL); |
122 | } | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | int omap4430_phy_suspend(struct device *dev, int suspend) | ||
127 | { | ||
128 | if (suspend) { | ||
128 | /* Disable the clocks */ | 129 | /* Disable the clocks */ |
129 | omap4430_phy_set_clk(dev, 0); | 130 | omap4430_phy_set_clk(dev, 0); |
130 | /* Power down the phy */ | 131 | /* Power down the phy */ |
131 | __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); | 132 | __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); |
133 | |||
134 | /* save the context */ | ||
135 | usbotghs_control = __raw_readl(ctrl_base + USBOTGHS_CONTROL); | ||
136 | } else { | ||
137 | /* Enable the internel phy clcoks */ | ||
138 | omap4430_phy_set_clk(dev, 1); | ||
139 | /* power on the phy */ | ||
140 | if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) { | ||
141 | __raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF); | ||
142 | mdelay(200); | ||
143 | } | ||
144 | |||
145 | /* restore the context */ | ||
146 | __raw_writel(usbotghs_control, ctrl_base + USBOTGHS_CONTROL); | ||
132 | } | 147 | } |
133 | 148 | ||
134 | return 0; | 149 | return 0; |
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-host.c index 25eeadabc39b..89ae29847c59 100644 --- a/arch/arm/mach-omap2/usb-ehci.c +++ b/arch/arm/mach-omap2/usb-host.c | |||
@@ -1,14 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mach-omap2/usb-ehci.c | 2 | * usb-host.c - OMAP USB Host |
3 | * | 3 | * |
4 | * This file will contain the board specific details for the | 4 | * This file will contain the board specific details for the |
5 | * Synopsys EHCI host controller on OMAP3430 | 5 | * Synopsys EHCI/OHCI host controller on OMAP3430 and onwards |
6 | * | 6 | * |
7 | * Copyright (C) 2007 Texas Instruments | 7 | * Copyright (C) 2007-2011 Texas Instruments |
8 | * Author: Vikram Pandita <vikram.pandita@ti.com> | 8 | * Author: Vikram Pandita <vikram.pandita@ti.com> |
9 | * Author: Keshava Munegowda <keshava_mgowda@ti.com> | ||
9 | * | 10 | * |
10 | * Generalization by: | 11 | * Generalization by: |
11 | * Felipe Balbi <felipe.balbi@nokia.com> | 12 | * Felipe Balbi <balbi@ti.com> |
12 | * | 13 | * |
13 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License version 2 as | 15 | * it under the terms of the GNU General Public License version 2 as |
@@ -19,7 +20,7 @@ | |||
19 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
22 | #include <linux/clk.h> | 23 | #include <linux/slab.h> |
23 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
24 | 25 | ||
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -30,44 +31,56 @@ | |||
30 | 31 | ||
31 | #include "mux.h" | 32 | #include "mux.h" |
32 | 33 | ||
33 | #if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_EHCI_HCD_MODULE) | 34 | #ifdef CONFIG_MFD_OMAP_USB_HOST |
34 | 35 | ||
35 | static struct resource ehci_resources[] = { | 36 | #define OMAP_USBHS_DEVICE "usbhs-omap" |
37 | |||
38 | static struct resource usbhs_resources[] = { | ||
39 | { | ||
40 | .name = "uhh", | ||
41 | .flags = IORESOURCE_MEM, | ||
42 | }, | ||
36 | { | 43 | { |
44 | .name = "tll", | ||
37 | .flags = IORESOURCE_MEM, | 45 | .flags = IORESOURCE_MEM, |
38 | }, | 46 | }, |
39 | { | 47 | { |
48 | .name = "ehci", | ||
40 | .flags = IORESOURCE_MEM, | 49 | .flags = IORESOURCE_MEM, |
41 | }, | 50 | }, |
42 | { | 51 | { |
52 | .name = "ehci-irq", | ||
53 | .flags = IORESOURCE_IRQ, | ||
54 | }, | ||
55 | { | ||
56 | .name = "ohci", | ||
43 | .flags = IORESOURCE_MEM, | 57 | .flags = IORESOURCE_MEM, |
44 | }, | 58 | }, |
45 | { /* general IRQ */ | 59 | { |
46 | .flags = IORESOURCE_IRQ, | 60 | .name = "ohci-irq", |
61 | .flags = IORESOURCE_IRQ, | ||
47 | } | 62 | } |
48 | }; | 63 | }; |
49 | 64 | ||
50 | static u64 ehci_dmamask = ~(u32)0; | 65 | static struct platform_device usbhs_device = { |
51 | static struct platform_device ehci_device = { | 66 | .name = OMAP_USBHS_DEVICE, |
52 | .name = "ehci-omap", | 67 | .id = 0, |
53 | .id = 0, | 68 | .num_resources = ARRAY_SIZE(usbhs_resources), |
54 | .dev = { | 69 | .resource = usbhs_resources, |
55 | .dma_mask = &ehci_dmamask, | ||
56 | .coherent_dma_mask = 0xffffffff, | ||
57 | .platform_data = NULL, | ||
58 | }, | ||
59 | .num_resources = ARRAY_SIZE(ehci_resources), | ||
60 | .resource = ehci_resources, | ||
61 | }; | 70 | }; |
62 | 71 | ||
72 | static struct usbhs_omap_platform_data usbhs_data; | ||
73 | static struct ehci_hcd_omap_platform_data ehci_data; | ||
74 | static struct ohci_hcd_omap_platform_data ohci_data; | ||
75 | |||
63 | /* MUX settings for EHCI pins */ | 76 | /* MUX settings for EHCI pins */ |
64 | /* | 77 | /* |
65 | * setup_ehci_io_mux - initialize IO pad mux for USBHOST | 78 | * setup_ehci_io_mux - initialize IO pad mux for USBHOST |
66 | */ | 79 | */ |
67 | static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | 80 | static void setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) |
68 | { | 81 | { |
69 | switch (port_mode[0]) { | 82 | switch (port_mode[0]) { |
70 | case EHCI_HCD_OMAP_MODE_PHY: | 83 | case OMAP_EHCI_PORT_MODE_PHY: |
71 | omap_mux_init_signal("hsusb1_stp", OMAP_PIN_OUTPUT); | 84 | omap_mux_init_signal("hsusb1_stp", OMAP_PIN_OUTPUT); |
72 | omap_mux_init_signal("hsusb1_clk", OMAP_PIN_OUTPUT); | 85 | omap_mux_init_signal("hsusb1_clk", OMAP_PIN_OUTPUT); |
73 | omap_mux_init_signal("hsusb1_dir", OMAP_PIN_INPUT_PULLDOWN); | 86 | omap_mux_init_signal("hsusb1_dir", OMAP_PIN_INPUT_PULLDOWN); |
@@ -81,7 +94,7 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
81 | omap_mux_init_signal("hsusb1_data6", OMAP_PIN_INPUT_PULLDOWN); | 94 | omap_mux_init_signal("hsusb1_data6", OMAP_PIN_INPUT_PULLDOWN); |
82 | omap_mux_init_signal("hsusb1_data7", OMAP_PIN_INPUT_PULLDOWN); | 95 | omap_mux_init_signal("hsusb1_data7", OMAP_PIN_INPUT_PULLDOWN); |
83 | break; | 96 | break; |
84 | case EHCI_HCD_OMAP_MODE_TLL: | 97 | case OMAP_EHCI_PORT_MODE_TLL: |
85 | omap_mux_init_signal("hsusb1_tll_stp", | 98 | omap_mux_init_signal("hsusb1_tll_stp", |
86 | OMAP_PIN_INPUT_PULLUP); | 99 | OMAP_PIN_INPUT_PULLUP); |
87 | omap_mux_init_signal("hsusb1_tll_clk", | 100 | omap_mux_init_signal("hsusb1_tll_clk", |
@@ -107,14 +120,14 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
107 | omap_mux_init_signal("hsusb1_tll_data7", | 120 | omap_mux_init_signal("hsusb1_tll_data7", |
108 | OMAP_PIN_INPUT_PULLDOWN); | 121 | OMAP_PIN_INPUT_PULLDOWN); |
109 | break; | 122 | break; |
110 | case EHCI_HCD_OMAP_MODE_UNKNOWN: | 123 | case OMAP_USBHS_PORT_MODE_UNUSED: |
111 | /* FALLTHROUGH */ | 124 | /* FALLTHROUGH */ |
112 | default: | 125 | default: |
113 | break; | 126 | break; |
114 | } | 127 | } |
115 | 128 | ||
116 | switch (port_mode[1]) { | 129 | switch (port_mode[1]) { |
117 | case EHCI_HCD_OMAP_MODE_PHY: | 130 | case OMAP_EHCI_PORT_MODE_PHY: |
118 | omap_mux_init_signal("hsusb2_stp", OMAP_PIN_OUTPUT); | 131 | omap_mux_init_signal("hsusb2_stp", OMAP_PIN_OUTPUT); |
119 | omap_mux_init_signal("hsusb2_clk", OMAP_PIN_OUTPUT); | 132 | omap_mux_init_signal("hsusb2_clk", OMAP_PIN_OUTPUT); |
120 | omap_mux_init_signal("hsusb2_dir", OMAP_PIN_INPUT_PULLDOWN); | 133 | omap_mux_init_signal("hsusb2_dir", OMAP_PIN_INPUT_PULLDOWN); |
@@ -136,7 +149,7 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
136 | omap_mux_init_signal("hsusb2_data7", | 149 | omap_mux_init_signal("hsusb2_data7", |
137 | OMAP_PIN_INPUT_PULLDOWN); | 150 | OMAP_PIN_INPUT_PULLDOWN); |
138 | break; | 151 | break; |
139 | case EHCI_HCD_OMAP_MODE_TLL: | 152 | case OMAP_EHCI_PORT_MODE_TLL: |
140 | omap_mux_init_signal("hsusb2_tll_stp", | 153 | omap_mux_init_signal("hsusb2_tll_stp", |
141 | OMAP_PIN_INPUT_PULLUP); | 154 | OMAP_PIN_INPUT_PULLUP); |
142 | omap_mux_init_signal("hsusb2_tll_clk", | 155 | omap_mux_init_signal("hsusb2_tll_clk", |
@@ -162,17 +175,17 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
162 | omap_mux_init_signal("hsusb2_tll_data7", | 175 | omap_mux_init_signal("hsusb2_tll_data7", |
163 | OMAP_PIN_INPUT_PULLDOWN); | 176 | OMAP_PIN_INPUT_PULLDOWN); |
164 | break; | 177 | break; |
165 | case EHCI_HCD_OMAP_MODE_UNKNOWN: | 178 | case OMAP_USBHS_PORT_MODE_UNUSED: |
166 | /* FALLTHROUGH */ | 179 | /* FALLTHROUGH */ |
167 | default: | 180 | default: |
168 | break; | 181 | break; |
169 | } | 182 | } |
170 | 183 | ||
171 | switch (port_mode[2]) { | 184 | switch (port_mode[2]) { |
172 | case EHCI_HCD_OMAP_MODE_PHY: | 185 | case OMAP_EHCI_PORT_MODE_PHY: |
173 | printk(KERN_WARNING "Port3 can't be used in PHY mode\n"); | 186 | printk(KERN_WARNING "Port3 can't be used in PHY mode\n"); |
174 | break; | 187 | break; |
175 | case EHCI_HCD_OMAP_MODE_TLL: | 188 | case OMAP_EHCI_PORT_MODE_TLL: |
176 | omap_mux_init_signal("hsusb3_tll_stp", | 189 | omap_mux_init_signal("hsusb3_tll_stp", |
177 | OMAP_PIN_INPUT_PULLUP); | 190 | OMAP_PIN_INPUT_PULLUP); |
178 | omap_mux_init_signal("hsusb3_tll_clk", | 191 | omap_mux_init_signal("hsusb3_tll_clk", |
@@ -198,7 +211,7 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
198 | omap_mux_init_signal("hsusb3_tll_data7", | 211 | omap_mux_init_signal("hsusb3_tll_data7", |
199 | OMAP_PIN_INPUT_PULLDOWN); | 212 | OMAP_PIN_INPUT_PULLDOWN); |
200 | break; | 213 | break; |
201 | case EHCI_HCD_OMAP_MODE_UNKNOWN: | 214 | case OMAP_USBHS_PORT_MODE_UNUSED: |
202 | /* FALLTHROUGH */ | 215 | /* FALLTHROUGH */ |
203 | default: | 216 | default: |
204 | break; | 217 | break; |
@@ -207,10 +220,10 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
207 | return; | 220 | return; |
208 | } | 221 | } |
209 | 222 | ||
210 | static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | 223 | static void setup_4430ehci_io_mux(const enum usbhs_omap_port_mode *port_mode) |
211 | { | 224 | { |
212 | switch (port_mode[0]) { | 225 | switch (port_mode[0]) { |
213 | case EHCI_HCD_OMAP_MODE_PHY: | 226 | case OMAP_EHCI_PORT_MODE_PHY: |
214 | omap_mux_init_signal("usbb1_ulpiphy_stp", | 227 | omap_mux_init_signal("usbb1_ulpiphy_stp", |
215 | OMAP_PIN_OUTPUT); | 228 | OMAP_PIN_OUTPUT); |
216 | omap_mux_init_signal("usbb1_ulpiphy_clk", | 229 | omap_mux_init_signal("usbb1_ulpiphy_clk", |
@@ -236,7 +249,7 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
236 | omap_mux_init_signal("usbb1_ulpiphy_dat7", | 249 | omap_mux_init_signal("usbb1_ulpiphy_dat7", |
237 | OMAP_PIN_INPUT_PULLDOWN); | 250 | OMAP_PIN_INPUT_PULLDOWN); |
238 | break; | 251 | break; |
239 | case EHCI_HCD_OMAP_MODE_TLL: | 252 | case OMAP_EHCI_PORT_MODE_TLL: |
240 | omap_mux_init_signal("usbb1_ulpitll_stp", | 253 | omap_mux_init_signal("usbb1_ulpitll_stp", |
241 | OMAP_PIN_INPUT_PULLUP); | 254 | OMAP_PIN_INPUT_PULLUP); |
242 | omap_mux_init_signal("usbb1_ulpitll_clk", | 255 | omap_mux_init_signal("usbb1_ulpitll_clk", |
@@ -262,12 +275,12 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
262 | omap_mux_init_signal("usbb1_ulpitll_dat7", | 275 | omap_mux_init_signal("usbb1_ulpitll_dat7", |
263 | OMAP_PIN_INPUT_PULLDOWN); | 276 | OMAP_PIN_INPUT_PULLDOWN); |
264 | break; | 277 | break; |
265 | case EHCI_HCD_OMAP_MODE_UNKNOWN: | 278 | case OMAP_USBHS_PORT_MODE_UNUSED: |
266 | default: | 279 | default: |
267 | break; | 280 | break; |
268 | } | 281 | } |
269 | switch (port_mode[1]) { | 282 | switch (port_mode[1]) { |
270 | case EHCI_HCD_OMAP_MODE_PHY: | 283 | case OMAP_EHCI_PORT_MODE_PHY: |
271 | omap_mux_init_signal("usbb2_ulpiphy_stp", | 284 | omap_mux_init_signal("usbb2_ulpiphy_stp", |
272 | OMAP_PIN_OUTPUT); | 285 | OMAP_PIN_OUTPUT); |
273 | omap_mux_init_signal("usbb2_ulpiphy_clk", | 286 | omap_mux_init_signal("usbb2_ulpiphy_clk", |
@@ -293,7 +306,7 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
293 | omap_mux_init_signal("usbb2_ulpiphy_dat7", | 306 | omap_mux_init_signal("usbb2_ulpiphy_dat7", |
294 | OMAP_PIN_INPUT_PULLDOWN); | 307 | OMAP_PIN_INPUT_PULLDOWN); |
295 | break; | 308 | break; |
296 | case EHCI_HCD_OMAP_MODE_TLL: | 309 | case OMAP_EHCI_PORT_MODE_TLL: |
297 | omap_mux_init_signal("usbb2_ulpitll_stp", | 310 | omap_mux_init_signal("usbb2_ulpitll_stp", |
298 | OMAP_PIN_INPUT_PULLUP); | 311 | OMAP_PIN_INPUT_PULLUP); |
299 | omap_mux_init_signal("usbb2_ulpitll_clk", | 312 | omap_mux_init_signal("usbb2_ulpitll_clk", |
@@ -319,90 +332,13 @@ static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) | |||
319 | omap_mux_init_signal("usbb2_ulpitll_dat7", | 332 | omap_mux_init_signal("usbb2_ulpitll_dat7", |
320 | OMAP_PIN_INPUT_PULLDOWN); | 333 | OMAP_PIN_INPUT_PULLDOWN); |
321 | break; | 334 | break; |
322 | case EHCI_HCD_OMAP_MODE_UNKNOWN: | 335 | case OMAP_USBHS_PORT_MODE_UNUSED: |
323 | default: | 336 | default: |
324 | break; | 337 | break; |
325 | } | 338 | } |
326 | } | 339 | } |
327 | 340 | ||
328 | void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) | 341 | static void setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) |
329 | { | ||
330 | platform_device_add_data(&ehci_device, pdata, sizeof(*pdata)); | ||
331 | |||
332 | /* Setup Pin IO MUX for EHCI */ | ||
333 | if (cpu_is_omap34xx()) { | ||
334 | ehci_resources[0].start = OMAP34XX_EHCI_BASE; | ||
335 | ehci_resources[0].end = OMAP34XX_EHCI_BASE + SZ_1K - 1; | ||
336 | ehci_resources[1].start = OMAP34XX_UHH_CONFIG_BASE; | ||
337 | ehci_resources[1].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1; | ||
338 | ehci_resources[2].start = OMAP34XX_USBTLL_BASE; | ||
339 | ehci_resources[2].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1; | ||
340 | ehci_resources[3].start = INT_34XX_EHCI_IRQ; | ||
341 | setup_ehci_io_mux(pdata->port_mode); | ||
342 | } else if (cpu_is_omap44xx()) { | ||
343 | ehci_resources[0].start = OMAP44XX_HSUSB_EHCI_BASE; | ||
344 | ehci_resources[0].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1; | ||
345 | ehci_resources[1].start = OMAP44XX_UHH_CONFIG_BASE; | ||
346 | ehci_resources[1].end = OMAP44XX_UHH_CONFIG_BASE + SZ_2K - 1; | ||
347 | ehci_resources[2].start = OMAP44XX_USBTLL_BASE; | ||
348 | ehci_resources[2].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1; | ||
349 | ehci_resources[3].start = OMAP44XX_IRQ_EHCI; | ||
350 | setup_4430ehci_io_mux(pdata->port_mode); | ||
351 | } | ||
352 | |||
353 | if (platform_device_register(&ehci_device) < 0) { | ||
354 | printk(KERN_ERR "Unable to register HS-USB (EHCI) device\n"); | ||
355 | return; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | #else | ||
360 | |||
361 | void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) | ||
362 | |||
363 | { | ||
364 | } | ||
365 | |||
366 | #endif /* CONFIG_USB_EHCI_HCD */ | ||
367 | |||
368 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) | ||
369 | |||
370 | static struct resource ohci_resources[] = { | ||
371 | { | ||
372 | .start = OMAP34XX_OHCI_BASE, | ||
373 | .end = OMAP34XX_OHCI_BASE + SZ_1K - 1, | ||
374 | .flags = IORESOURCE_MEM, | ||
375 | }, | ||
376 | { | ||
377 | .start = OMAP34XX_UHH_CONFIG_BASE, | ||
378 | .end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1, | ||
379 | .flags = IORESOURCE_MEM, | ||
380 | }, | ||
381 | { | ||
382 | .start = OMAP34XX_USBTLL_BASE, | ||
383 | .end = OMAP34XX_USBTLL_BASE + SZ_4K - 1, | ||
384 | .flags = IORESOURCE_MEM, | ||
385 | }, | ||
386 | { /* general IRQ */ | ||
387 | .start = INT_34XX_OHCI_IRQ, | ||
388 | .flags = IORESOURCE_IRQ, | ||
389 | } | ||
390 | }; | ||
391 | |||
392 | static u64 ohci_dmamask = DMA_BIT_MASK(32); | ||
393 | |||
394 | static struct platform_device ohci_device = { | ||
395 | .name = "ohci-omap3", | ||
396 | .id = 0, | ||
397 | .dev = { | ||
398 | .dma_mask = &ohci_dmamask, | ||
399 | .coherent_dma_mask = 0xffffffff, | ||
400 | }, | ||
401 | .num_resources = ARRAY_SIZE(ohci_resources), | ||
402 | .resource = ohci_resources, | ||
403 | }; | ||
404 | |||
405 | static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) | ||
406 | { | 342 | { |
407 | switch (port_mode[0]) { | 343 | switch (port_mode[0]) { |
408 | case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: | 344 | case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: |
@@ -430,7 +366,7 @@ static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) | |||
430 | omap_mux_init_signal("mm1_txdat", | 366 | omap_mux_init_signal("mm1_txdat", |
431 | OMAP_PIN_INPUT_PULLDOWN); | 367 | OMAP_PIN_INPUT_PULLDOWN); |
432 | break; | 368 | break; |
433 | case OMAP_OHCI_PORT_MODE_UNUSED: | 369 | case OMAP_USBHS_PORT_MODE_UNUSED: |
434 | /* FALLTHROUGH */ | 370 | /* FALLTHROUGH */ |
435 | default: | 371 | default: |
436 | break; | 372 | break; |
@@ -461,7 +397,7 @@ static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) | |||
461 | omap_mux_init_signal("mm2_txdat", | 397 | omap_mux_init_signal("mm2_txdat", |
462 | OMAP_PIN_INPUT_PULLDOWN); | 398 | OMAP_PIN_INPUT_PULLDOWN); |
463 | break; | 399 | break; |
464 | case OMAP_OHCI_PORT_MODE_UNUSED: | 400 | case OMAP_USBHS_PORT_MODE_UNUSED: |
465 | /* FALLTHROUGH */ | 401 | /* FALLTHROUGH */ |
466 | default: | 402 | default: |
467 | break; | 403 | break; |
@@ -492,31 +428,147 @@ static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode) | |||
492 | omap_mux_init_signal("mm3_txdat", | 428 | omap_mux_init_signal("mm3_txdat", |
493 | OMAP_PIN_INPUT_PULLDOWN); | 429 | OMAP_PIN_INPUT_PULLDOWN); |
494 | break; | 430 | break; |
495 | case OMAP_OHCI_PORT_MODE_UNUSED: | 431 | case OMAP_USBHS_PORT_MODE_UNUSED: |
496 | /* FALLTHROUGH */ | 432 | /* FALLTHROUGH */ |
497 | default: | 433 | default: |
498 | break; | 434 | break; |
499 | } | 435 | } |
500 | } | 436 | } |
501 | 437 | ||
502 | void __init usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata) | 438 | static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode) |
503 | { | 439 | { |
504 | platform_device_add_data(&ohci_device, pdata, sizeof(*pdata)); | 440 | switch (port_mode[0]) { |
441 | case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: | ||
442 | case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: | ||
443 | case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: | ||
444 | case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: | ||
445 | omap_mux_init_signal("usbb1_mm_rxdp", | ||
446 | OMAP_PIN_INPUT_PULLDOWN); | ||
447 | omap_mux_init_signal("usbb1_mm_rxdm", | ||
448 | OMAP_PIN_INPUT_PULLDOWN); | ||
505 | 449 | ||
506 | /* Setup Pin IO MUX for OHCI */ | 450 | case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: |
507 | if (cpu_is_omap34xx()) | 451 | case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: |
452 | omap_mux_init_signal("usbb1_mm_rxrcv", | ||
453 | OMAP_PIN_INPUT_PULLDOWN); | ||
454 | |||
455 | case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: | ||
456 | case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: | ||
457 | omap_mux_init_signal("usbb1_mm_txen", | ||
458 | OMAP_PIN_INPUT_PULLDOWN); | ||
459 | |||
460 | |||
461 | case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: | ||
462 | case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: | ||
463 | omap_mux_init_signal("usbb1_mm_txdat", | ||
464 | OMAP_PIN_INPUT_PULLDOWN); | ||
465 | omap_mux_init_signal("usbb1_mm_txse0", | ||
466 | OMAP_PIN_INPUT_PULLDOWN); | ||
467 | break; | ||
468 | |||
469 | case OMAP_USBHS_PORT_MODE_UNUSED: | ||
470 | default: | ||
471 | break; | ||
472 | } | ||
473 | |||
474 | switch (port_mode[1]) { | ||
475 | case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: | ||
476 | case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: | ||
477 | case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: | ||
478 | case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: | ||
479 | omap_mux_init_signal("usbb2_mm_rxdp", | ||
480 | OMAP_PIN_INPUT_PULLDOWN); | ||
481 | omap_mux_init_signal("usbb2_mm_rxdm", | ||
482 | OMAP_PIN_INPUT_PULLDOWN); | ||
483 | |||
484 | case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: | ||
485 | case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: | ||
486 | omap_mux_init_signal("usbb2_mm_rxrcv", | ||
487 | OMAP_PIN_INPUT_PULLDOWN); | ||
488 | |||
489 | case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: | ||
490 | case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: | ||
491 | omap_mux_init_signal("usbb2_mm_txen", | ||
492 | OMAP_PIN_INPUT_PULLDOWN); | ||
493 | |||
494 | |||
495 | case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: | ||
496 | case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: | ||
497 | omap_mux_init_signal("usbb2_mm_txdat", | ||
498 | OMAP_PIN_INPUT_PULLDOWN); | ||
499 | omap_mux_init_signal("usbb2_mm_txse0", | ||
500 | OMAP_PIN_INPUT_PULLDOWN); | ||
501 | break; | ||
502 | |||
503 | case OMAP_USBHS_PORT_MODE_UNUSED: | ||
504 | default: | ||
505 | break; | ||
506 | } | ||
507 | } | ||
508 | |||
509 | void __init usbhs_init(const struct usbhs_omap_board_data *pdata) | ||
510 | { | ||
511 | int i; | ||
512 | |||
513 | for (i = 0; i < OMAP3_HS_USB_PORTS; i++) { | ||
514 | usbhs_data.port_mode[i] = pdata->port_mode[i]; | ||
515 | ohci_data.port_mode[i] = pdata->port_mode[i]; | ||
516 | ehci_data.port_mode[i] = pdata->port_mode[i]; | ||
517 | ehci_data.reset_gpio_port[i] = pdata->reset_gpio_port[i]; | ||
518 | ehci_data.regulator[i] = pdata->regulator[i]; | ||
519 | } | ||
520 | ehci_data.phy_reset = pdata->phy_reset; | ||
521 | ohci_data.es2_compatibility = pdata->es2_compatibility; | ||
522 | usbhs_data.ehci_data = &ehci_data; | ||
523 | usbhs_data.ohci_data = &ohci_data; | ||
524 | |||
525 | if (cpu_is_omap34xx()) { | ||
526 | usbhs_resources[0].start = OMAP34XX_UHH_CONFIG_BASE; | ||
527 | usbhs_resources[0].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1; | ||
528 | usbhs_resources[1].start = OMAP34XX_USBTLL_BASE; | ||
529 | usbhs_resources[1].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1; | ||
530 | usbhs_resources[2].start = OMAP34XX_EHCI_BASE; | ||
531 | usbhs_resources[2].end = OMAP34XX_EHCI_BASE + SZ_1K - 1; | ||
532 | usbhs_resources[3].start = INT_34XX_EHCI_IRQ; | ||
533 | usbhs_resources[4].start = OMAP34XX_OHCI_BASE; | ||
534 | usbhs_resources[4].end = OMAP34XX_OHCI_BASE + SZ_1K - 1; | ||
535 | usbhs_resources[5].start = INT_34XX_OHCI_IRQ; | ||
536 | setup_ehci_io_mux(pdata->port_mode); | ||
508 | setup_ohci_io_mux(pdata->port_mode); | 537 | setup_ohci_io_mux(pdata->port_mode); |
538 | } else if (cpu_is_omap44xx()) { | ||
539 | usbhs_resources[0].start = OMAP44XX_UHH_CONFIG_BASE; | ||
540 | usbhs_resources[0].end = OMAP44XX_UHH_CONFIG_BASE + SZ_1K - 1; | ||
541 | usbhs_resources[1].start = OMAP44XX_USBTLL_BASE; | ||
542 | usbhs_resources[1].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1; | ||
543 | usbhs_resources[2].start = OMAP44XX_HSUSB_EHCI_BASE; | ||
544 | usbhs_resources[2].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1; | ||
545 | usbhs_resources[3].start = OMAP44XX_IRQ_EHCI; | ||
546 | usbhs_resources[4].start = OMAP44XX_HSUSB_OHCI_BASE; | ||
547 | usbhs_resources[4].end = OMAP44XX_HSUSB_OHCI_BASE + SZ_1K - 1; | ||
548 | usbhs_resources[5].start = OMAP44XX_IRQ_OHCI; | ||
549 | setup_4430ehci_io_mux(pdata->port_mode); | ||
550 | setup_4430ohci_io_mux(pdata->port_mode); | ||
551 | } | ||
509 | 552 | ||
510 | if (platform_device_register(&ohci_device) < 0) { | 553 | if (platform_device_add_data(&usbhs_device, |
511 | pr_err("Unable to register FS-USB (OHCI) device\n"); | 554 | &usbhs_data, sizeof(usbhs_data)) < 0) { |
512 | return; | 555 | printk(KERN_ERR "USBHS platform_device_add_data failed\n"); |
556 | goto init_end; | ||
513 | } | 557 | } |
558 | |||
559 | if (platform_device_register(&usbhs_device) < 0) | ||
560 | printk(KERN_ERR "USBHS platform_device_register failed\n"); | ||
561 | |||
562 | init_end: | ||
563 | return; | ||
514 | } | 564 | } |
515 | 565 | ||
516 | #else | 566 | #else |
517 | 567 | ||
518 | void __init usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata) | 568 | void __init usbhs_init(const struct usbhs_omap_board_data *pdata) |
519 | { | 569 | { |
520 | } | 570 | } |
521 | 571 | ||
522 | #endif /* CONFIG_USB_OHCI_HCD */ | 572 | #endif |
573 | |||
574 | |||
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 5298949d4b11..241fc94b4116 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -214,6 +214,10 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data) | |||
214 | 214 | ||
215 | if (platform_device_register(&musb_device) < 0) | 215 | if (platform_device_register(&musb_device) < 0) |
216 | printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); | 216 | printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); |
217 | |||
218 | if (cpu_is_omap44xx()) | ||
219 | omap4430_phy_init(dev); | ||
220 | |||
217 | } | 221 | } |
218 | 222 | ||
219 | #else | 223 | #else |
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c index 1a81fe12ccd7..1e93f176c1de 100644 --- a/arch/arm/mach-s3c2410/mach-h1940.c +++ b/arch/arm/mach-s3c2410/mach-h1940.c | |||
@@ -162,29 +162,10 @@ struct gpio_chip h1940_latch_gpiochip = { | |||
162 | .get = h1940_gpiolib_latch_get, | 162 | .get = h1940_gpiolib_latch_get, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static void h1940_udc_pullup(enum s3c2410_udc_cmd_e cmd) | ||
166 | { | ||
167 | printk(KERN_DEBUG "udc: pullup(%d)\n",cmd); | ||
168 | |||
169 | switch (cmd) | ||
170 | { | ||
171 | case S3C2410_UDC_P_ENABLE : | ||
172 | gpio_set_value(H1940_LATCH_USB_DP, 1); | ||
173 | break; | ||
174 | case S3C2410_UDC_P_DISABLE : | ||
175 | gpio_set_value(H1940_LATCH_USB_DP, 0); | ||
176 | break; | ||
177 | case S3C2410_UDC_P_RESET : | ||
178 | break; | ||
179 | default: | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | static struct s3c2410_udc_mach_info h1940_udc_cfg __initdata = { | 165 | static struct s3c2410_udc_mach_info h1940_udc_cfg __initdata = { |
185 | .udc_command = h1940_udc_pullup, | ||
186 | .vbus_pin = S3C2410_GPG(5), | 166 | .vbus_pin = S3C2410_GPG(5), |
187 | .vbus_pin_inverted = 1, | 167 | .vbus_pin_inverted = 1, |
168 | .pullup_pin = H1940_LATCH_USB_DP, | ||
188 | }; | 169 | }; |
189 | 170 | ||
190 | static struct s3c2410_ts_mach_info h1940_ts_cfg __initdata = { | 171 | static struct s3c2410_ts_mach_info h1940_ts_cfg __initdata = { |
@@ -475,9 +456,6 @@ static void __init h1940_init(void) | |||
475 | gpio_direction_output(H1940_LATCH_LCD_P4, 0); | 456 | gpio_direction_output(H1940_LATCH_LCD_P4, 0); |
476 | gpio_direction_output(H1940_LATCH_MAX1698_nSHUTDOWN, 0); | 457 | gpio_direction_output(H1940_LATCH_MAX1698_nSHUTDOWN, 0); |
477 | 458 | ||
478 | gpio_request(H1940_LATCH_USB_DP, "USB pullup"); | ||
479 | gpio_direction_output(H1940_LATCH_USB_DP, 0); | ||
480 | |||
481 | gpio_request(H1940_LATCH_SD_POWER, "SD power"); | 459 | gpio_request(H1940_LATCH_SD_POWER, "SD power"); |
482 | gpio_direction_output(H1940_LATCH_SD_POWER, 0); | 460 | gpio_direction_output(H1940_LATCH_SD_POWER, 0); |
483 | 461 | ||
diff --git a/arch/arm/mach-s3c2410/mach-n30.c b/arch/arm/mach-s3c2410/mach-n30.c index 271b9aa6d40a..66f44440d5d3 100644 --- a/arch/arm/mach-s3c2410/mach-n30.c +++ b/arch/arm/mach-s3c2410/mach-n30.c | |||
@@ -84,26 +84,10 @@ static struct s3c2410_uartcfg n30_uartcfgs[] = { | |||
84 | }, | 84 | }, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static void n30_udc_pullup(enum s3c2410_udc_cmd_e cmd) | ||
88 | { | ||
89 | switch (cmd) { | ||
90 | case S3C2410_UDC_P_ENABLE : | ||
91 | gpio_set_value(S3C2410_GPB(3), 1); | ||
92 | break; | ||
93 | case S3C2410_UDC_P_DISABLE : | ||
94 | gpio_set_value(S3C2410_GPB(3), 0); | ||
95 | break; | ||
96 | case S3C2410_UDC_P_RESET : | ||
97 | break; | ||
98 | default: | ||
99 | break; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static struct s3c2410_udc_mach_info n30_udc_cfg __initdata = { | 87 | static struct s3c2410_udc_mach_info n30_udc_cfg __initdata = { |
104 | .udc_command = n30_udc_pullup, | ||
105 | .vbus_pin = S3C2410_GPG(1), | 88 | .vbus_pin = S3C2410_GPG(1), |
106 | .vbus_pin_inverted = 0, | 89 | .vbus_pin_inverted = 0, |
90 | .pullup_pin = S3C2410_GPB(3), | ||
107 | }; | 91 | }; |
108 | 92 | ||
109 | static struct gpio_keys_button n30_buttons[] = { | 93 | static struct gpio_keys_button n30_buttons[] = { |
@@ -596,9 +580,6 @@ static void __init n30_init(void) | |||
596 | 580 | ||
597 | platform_add_devices(n35_devices, ARRAY_SIZE(n35_devices)); | 581 | platform_add_devices(n35_devices, ARRAY_SIZE(n35_devices)); |
598 | } | 582 | } |
599 | |||
600 | WARN_ON(gpio_request(S3C2410_GPB(3), "udc pup")); | ||
601 | gpio_direction_output(S3C2410_GPB(3), 0); | ||
602 | } | 583 | } |
603 | 584 | ||
604 | MACHINE_START(N30, "Acer-N30") | 585 | MACHINE_START(N30, "Acer-N30") |
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c index 8e5758bdd666..834cfb61bcfe 100644 --- a/arch/arm/mach-s3c2412/mach-smdk2413.c +++ b/arch/arm/mach-s3c2412/mach-smdk2413.c | |||
@@ -78,28 +78,9 @@ static struct s3c2410_uartcfg smdk2413_uartcfgs[] __initdata = { | |||
78 | } | 78 | } |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static void smdk2413_udc_pullup(enum s3c2410_udc_cmd_e cmd) | ||
82 | { | ||
83 | printk(KERN_DEBUG "udc: pullup(%d)\n",cmd); | ||
84 | |||
85 | switch (cmd) | ||
86 | { | ||
87 | case S3C2410_UDC_P_ENABLE : | ||
88 | gpio_set_value(S3C2410_GPF(2), 1); | ||
89 | break; | ||
90 | case S3C2410_UDC_P_DISABLE : | ||
91 | gpio_set_value(S3C2410_GPF(2), 0); | ||
92 | break; | ||
93 | case S3C2410_UDC_P_RESET : | ||
94 | break; | ||
95 | default: | ||
96 | break; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | 81 | ||
101 | static struct s3c2410_udc_mach_info smdk2413_udc_cfg __initdata = { | 82 | static struct s3c2410_udc_mach_info smdk2413_udc_cfg __initdata = { |
102 | .udc_command = smdk2413_udc_pullup, | 83 | .pullup_pin = S3C2410_GPF(2), |
103 | }; | 84 | }; |
104 | 85 | ||
105 | 86 | ||
@@ -133,9 +114,6 @@ static void __init smdk2413_machine_init(void) | |||
133 | { /* Turn off suspend on both USB ports, and switch the | 114 | { /* Turn off suspend on both USB ports, and switch the |
134 | * selectable USB port to USB device mode. */ | 115 | * selectable USB port to USB device mode. */ |
135 | 116 | ||
136 | WARN_ON(gpio_request(S3C2410_GPF(2), "udc pull")); | ||
137 | gpio_direction_output(S3C2410_GPF(2), 0); | ||
138 | |||
139 | s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | | 117 | s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | |
140 | S3C2410_MISCCR_USBSUSPND0 | | 118 | S3C2410_MISCCR_USBSUSPND0 | |
141 | S3C2410_MISCCR_USBSUSPND1, 0x0); | 119 | S3C2410_MISCCR_USBSUSPND1, 0x0); |
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c index 9f2c14ec7181..37405d9abe32 100644 --- a/arch/arm/mach-s3c2440/mach-gta02.c +++ b/arch/arm/mach-s3c2440/mach-gta02.c | |||
@@ -455,28 +455,10 @@ static struct s3c2410_platform_nand __initdata gta02_nand_info = { | |||
455 | }; | 455 | }; |
456 | 456 | ||
457 | 457 | ||
458 | static void gta02_udc_command(enum s3c2410_udc_cmd_e cmd) | ||
459 | { | ||
460 | switch (cmd) { | ||
461 | case S3C2410_UDC_P_ENABLE: | ||
462 | pr_debug("%s S3C2410_UDC_P_ENABLE\n", __func__); | ||
463 | gpio_direction_output(GTA02_GPIO_USB_PULLUP, 1); | ||
464 | break; | ||
465 | case S3C2410_UDC_P_DISABLE: | ||
466 | pr_debug("%s S3C2410_UDC_P_DISABLE\n", __func__); | ||
467 | gpio_direction_output(GTA02_GPIO_USB_PULLUP, 0); | ||
468 | break; | ||
469 | case S3C2410_UDC_P_RESET: | ||
470 | pr_debug("%s S3C2410_UDC_P_RESET\n", __func__); | ||
471 | /* FIXME: Do something here. */ | ||
472 | } | ||
473 | } | ||
474 | |||
475 | /* Get PMU to set USB current limit accordingly. */ | 458 | /* Get PMU to set USB current limit accordingly. */ |
476 | static struct s3c2410_udc_mach_info gta02_udc_cfg = { | 459 | static struct s3c2410_udc_mach_info gta02_udc_cfg __initdata = { |
477 | .vbus_draw = gta02_udc_vbus_draw, | 460 | .vbus_draw = gta02_udc_vbus_draw, |
478 | .udc_command = gta02_udc_command, | 461 | .pullup_pin = GTA02_GPIO_USB_PULLUP, |
479 | |||
480 | }; | 462 | }; |
481 | 463 | ||
482 | /* USB */ | 464 | /* USB */ |
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index f62bb4c793bd..d80f129bca94 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c | |||
@@ -97,26 +97,8 @@ static struct s3c2410_uartcfg mini2440_uartcfgs[] __initdata = { | |||
97 | 97 | ||
98 | /* USB device UDC support */ | 98 | /* USB device UDC support */ |
99 | 99 | ||
100 | static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd) | ||
101 | { | ||
102 | pr_debug("udc: pullup(%d)\n", cmd); | ||
103 | |||
104 | switch (cmd) { | ||
105 | case S3C2410_UDC_P_ENABLE : | ||
106 | gpio_set_value(S3C2410_GPC(5), 1); | ||
107 | break; | ||
108 | case S3C2410_UDC_P_DISABLE : | ||
109 | gpio_set_value(S3C2410_GPC(5), 0); | ||
110 | break; | ||
111 | case S3C2410_UDC_P_RESET : | ||
112 | break; | ||
113 | default: | ||
114 | break; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | static struct s3c2410_udc_mach_info mini2440_udc_cfg __initdata = { | 100 | static struct s3c2410_udc_mach_info mini2440_udc_cfg __initdata = { |
119 | .udc_command = mini2440_udc_pullup, | 101 | .pullup_pin = S3C2410_GPC(5), |
120 | }; | 102 | }; |
121 | 103 | ||
122 | 104 | ||
@@ -644,10 +626,6 @@ static void __init mini2440_init(void) | |||
644 | s3c2410_gpio_setpin(S3C2410_GPB(1), 0); | 626 | s3c2410_gpio_setpin(S3C2410_GPB(1), 0); |
645 | s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT); | 627 | s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT); |
646 | 628 | ||
647 | /* Make sure the D+ pullup pin is output */ | ||
648 | WARN_ON(gpio_request(S3C2410_GPC(5), "udc pup")); | ||
649 | gpio_direction_output(S3C2410_GPC(5), 0); | ||
650 | |||
651 | /* mark the key as input, without pullups (there is one on the board) */ | 629 | /* mark the key as input, without pullups (there is one on the board) */ |
652 | for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) { | 630 | for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) { |
653 | s3c_gpio_setpull(mini2440_buttons[i].gpio, S3C_GPIO_PULL_UP); | 631 | s3c_gpio_setpull(mini2440_buttons[i].gpio, S3C_GPIO_PULL_UP); |
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c index eab6ae50683c..86bbc233b31c 100644 --- a/arch/arm/mach-s3c2440/mach-rx1950.c +++ b/arch/arm/mach-s3c2440/mach-rx1950.c | |||
@@ -566,26 +566,10 @@ static struct s3c2410_platform_nand rx1950_nand_info = { | |||
566 | .sets = rx1950_nand_sets, | 566 | .sets = rx1950_nand_sets, |
567 | }; | 567 | }; |
568 | 568 | ||
569 | static void rx1950_udc_pullup(enum s3c2410_udc_cmd_e cmd) | ||
570 | { | ||
571 | switch (cmd) { | ||
572 | case S3C2410_UDC_P_ENABLE: | ||
573 | gpio_direction_output(S3C2410_GPJ(5), 1); | ||
574 | break; | ||
575 | case S3C2410_UDC_P_DISABLE: | ||
576 | gpio_direction_output(S3C2410_GPJ(5), 0); | ||
577 | break; | ||
578 | case S3C2410_UDC_P_RESET: | ||
579 | break; | ||
580 | default: | ||
581 | break; | ||
582 | } | ||
583 | } | ||
584 | |||
585 | static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = { | 569 | static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = { |
586 | .udc_command = rx1950_udc_pullup, | ||
587 | .vbus_pin = S3C2410_GPG(5), | 570 | .vbus_pin = S3C2410_GPG(5), |
588 | .vbus_pin_inverted = 1, | 571 | .vbus_pin_inverted = 1, |
572 | .pullup_pin = S3C2410_GPJ(5), | ||
589 | }; | 573 | }; |
590 | 574 | ||
591 | static struct s3c2410_ts_mach_info rx1950_ts_cfg __initdata = { | 575 | static struct s3c2410_ts_mach_info rx1950_ts_cfg __initdata = { |
@@ -750,9 +734,6 @@ static void __init rx1950_init_machine(void) | |||
750 | S3C2410_MISCCR_USBSUSPND0 | | 734 | S3C2410_MISCCR_USBSUSPND0 | |
751 | S3C2410_MISCCR_USBSUSPND1, 0x0); | 735 | S3C2410_MISCCR_USBSUSPND1, 0x0); |
752 | 736 | ||
753 | WARN_ON(gpio_request(S3C2410_GPJ(5), "UDC pullup")); | ||
754 | gpio_direction_output(S3C2410_GPJ(5), 0); | ||
755 | |||
756 | /* mmc power is disabled by default */ | 737 | /* mmc power is disabled by default */ |
757 | WARN_ON(gpio_request(S3C2410_GPJ(1), "MMC power")); | 738 | WARN_ON(gpio_request(S3C2410_GPJ(1), "MMC power")); |
758 | gpio_direction_output(S3C2410_GPJ(1), 0); | 739 | gpio_direction_output(S3C2410_GPJ(1), 0); |
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index acd9552f8ada..622a9ec1ff08 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig | |||
@@ -10,6 +10,9 @@ config ARCH_TEGRA_2x_SOC | |||
10 | select CPU_V7 | 10 | select CPU_V7 |
11 | select ARM_GIC | 11 | select ARM_GIC |
12 | select ARCH_REQUIRE_GPIOLIB | 12 | select ARCH_REQUIRE_GPIOLIB |
13 | select USB_ARCH_HAS_EHCI if USB_SUPPORT | ||
14 | select USB_ULPI if USB_SUPPORT | ||
15 | select USB_ULPI_VIEWPORT if USB_SUPPORT | ||
13 | help | 16 | help |
14 | Support for NVIDIA Tegra AP20 and T20 processors, based on the | 17 | Support for NVIDIA Tegra AP20 and T20 processors, based on the |
15 | ARM CortexA9MP CPU and the ARM PL310 L2 cache controller | 18 | ARM CortexA9MP CPU and the ARM PL310 L2 cache controller |
@@ -27,6 +30,31 @@ config MACH_HARMONY | |||
27 | help | 30 | help |
28 | Support for nVidia Harmony development platform | 31 | Support for nVidia Harmony development platform |
29 | 32 | ||
33 | config MACH_KAEN | ||
34 | bool "Kaen board" | ||
35 | select MACH_SEABOARD | ||
36 | help | ||
37 | Support for the Kaen version of Seaboard | ||
38 | |||
39 | config MACH_SEABOARD | ||
40 | bool "Seaboard board" | ||
41 | help | ||
42 | Support for nVidia Seaboard development platform. It will | ||
43 | also be included for some of the derivative boards that | ||
44 | have large similarities with the seaboard design. | ||
45 | |||
46 | config MACH_TRIMSLICE | ||
47 | bool "TrimSlice board" | ||
48 | select TEGRA_PCI | ||
49 | help | ||
50 | Support for CompuLab TrimSlice platform | ||
51 | |||
52 | config MACH_WARIO | ||
53 | bool "Wario board" | ||
54 | select MACH_SEABOARD | ||
55 | help | ||
56 | Support for the Wario version of Seaboard | ||
57 | |||
30 | choice | 58 | choice |
31 | prompt "Low-level debug console UART" | 59 | prompt "Low-level debug console UART" |
32 | default TEGRA_DEBUG_UART_NONE | 60 | default TEGRA_DEBUG_UART_NONE |
@@ -58,4 +86,7 @@ config TEGRA_SYSTEM_DMA | |||
58 | Adds system DMA functionality for NVIDIA Tegra SoCs, used by | 86 | Adds system DMA functionality for NVIDIA Tegra SoCs, used by |
59 | several Tegra device drivers | 87 | several Tegra device drivers |
60 | 88 | ||
89 | config TEGRA_EMC_SCALING_ENABLE | ||
90 | bool "Enable scaling the memory frequency" | ||
91 | |||
61 | endif | 92 | endif |
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index cdbc68e4c0ca..9f7a7e1e0c38 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile | |||
@@ -1,21 +1,30 @@ | |||
1 | obj-y += common.o | 1 | obj-y += common.o |
2 | obj-y += devices.o | ||
2 | obj-y += io.o | 3 | obj-y += io.o |
3 | obj-y += irq.o legacy_irq.o | 4 | obj-y += irq.o legacy_irq.o |
4 | obj-y += clock.o | 5 | obj-y += clock.o |
5 | obj-y += timer.o | 6 | obj-y += timer.o |
6 | obj-y += gpio.o | 7 | obj-y += gpio.o |
7 | obj-y += pinmux.o | 8 | obj-y += pinmux.o |
9 | obj-y += powergate.o | ||
8 | obj-y += fuse.o | 10 | obj-y += fuse.o |
9 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o | 11 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o |
10 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o | 12 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o |
11 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_dvfs.o | 13 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o |
12 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o | 14 | obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o |
13 | obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o | 15 | obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o |
14 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | 16 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o |
15 | obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o | 17 | obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o |
16 | obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o | 18 | obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o |
17 | obj-$(CONFIG_TEGRA_PCI) += pcie.o | 19 | obj-$(CONFIG_TEGRA_PCI) += pcie.o |
20 | obj-$(CONFIG_USB_SUPPORT) += usb_phy.o | ||
18 | 21 | ||
19 | obj-${CONFIG_MACH_HARMONY} += board-harmony.o | 22 | obj-${CONFIG_MACH_HARMONY} += board-harmony.o |
20 | obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o | 23 | obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o |
21 | obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o | 24 | obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o |
25 | |||
26 | obj-${CONFIG_MACH_SEABOARD} += board-seaboard.o | ||
27 | obj-${CONFIG_MACH_SEABOARD} += board-seaboard-pinmux.o | ||
28 | |||
29 | obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice.o | ||
30 | obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice-pinmux.o | ||
diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c index 50b15d500cac..98368d947be3 100644 --- a/arch/arm/mach-tegra/board-harmony-pinmux.c +++ b/arch/arm/mach-tegra/board-harmony-pinmux.c | |||
@@ -15,8 +15,10 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/gpio.h> | ||
18 | #include <mach/pinmux.h> | 19 | #include <mach/pinmux.h> |
19 | 20 | ||
21 | #include "gpio-names.h" | ||
20 | #include "board-harmony.h" | 22 | #include "board-harmony.h" |
21 | 23 | ||
22 | static struct tegra_pingroup_config harmony_pinmux[] = { | 24 | static struct tegra_pingroup_config harmony_pinmux[] = { |
@@ -34,10 +36,10 @@ static struct tegra_pingroup_config harmony_pinmux[] = { | |||
34 | {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | 36 | {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, |
35 | {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | 37 | {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, |
36 | {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | 38 | {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, |
37 | {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | 39 | {TEGRA_PINGROUP_DTA, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, |
38 | {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | 40 | {TEGRA_PINGROUP_DTB, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, |
39 | {TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | 41 | {TEGRA_PINGROUP_DTC, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, |
40 | {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | 42 | {TEGRA_PINGROUP_DTD, TEGRA_MUX_SDIO2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, |
41 | {TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | 43 | {TEGRA_PINGROUP_DTE, TEGRA_MUX_RSVD1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, |
42 | {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | 44 | {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, |
43 | {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | 45 | {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, |
@@ -138,7 +140,18 @@ static struct tegra_pingroup_config harmony_pinmux[] = { | |||
138 | {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | 140 | {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, |
139 | }; | 141 | }; |
140 | 142 | ||
143 | static struct tegra_gpio_table gpio_table[] = { | ||
144 | { .gpio = TEGRA_GPIO_PI5, .enable = true }, /* mmc2 cd */ | ||
145 | { .gpio = TEGRA_GPIO_PH1, .enable = true }, /* mmc2 wp */ | ||
146 | { .gpio = TEGRA_GPIO_PT3, .enable = true }, /* mmc2 pwr */ | ||
147 | { .gpio = TEGRA_GPIO_PH2, .enable = true }, /* mmc4 cd */ | ||
148 | { .gpio = TEGRA_GPIO_PH3, .enable = true }, /* mmc4 wp */ | ||
149 | { .gpio = TEGRA_GPIO_PI6, .enable = true }, /* mmc4 pwr */ | ||
150 | }; | ||
151 | |||
141 | void harmony_pinmux_init(void) | 152 | void harmony_pinmux_init(void) |
142 | { | 153 | { |
143 | tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux)); | 154 | tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux)); |
155 | |||
156 | tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table)); | ||
144 | } | 157 | } |
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c index b9dbdb1289d0..49224e936eb4 100644 --- a/arch/arm/mach-tegra/board-harmony.c +++ b/arch/arm/mach-tegra/board-harmony.c | |||
@@ -30,35 +30,13 @@ | |||
30 | 30 | ||
31 | #include <mach/iomap.h> | 31 | #include <mach/iomap.h> |
32 | #include <mach/irqs.h> | 32 | #include <mach/irqs.h> |
33 | #include <mach/sdhci.h> | ||
33 | 34 | ||
34 | #include "board.h" | 35 | #include "board.h" |
35 | #include "board-harmony.h" | 36 | #include "board-harmony.h" |
36 | #include "clock.h" | 37 | #include "clock.h" |
37 | 38 | #include "devices.h" | |
38 | /* NVidia bootloader tags */ | 39 | #include "gpio-names.h" |
39 | #define ATAG_NVIDIA 0x41000801 | ||
40 | |||
41 | #define ATAG_NVIDIA_RM 0x1 | ||
42 | #define ATAG_NVIDIA_DISPLAY 0x2 | ||
43 | #define ATAG_NVIDIA_FRAMEBUFFER 0x3 | ||
44 | #define ATAG_NVIDIA_CHIPSHMOO 0x4 | ||
45 | #define ATAG_NVIDIA_CHIPSHMOOPHYS 0x5 | ||
46 | #define ATAG_NVIDIA_PRESERVED_MEM_0 0x10000 | ||
47 | #define ATAG_NVIDIA_PRESERVED_MEM_N 2 | ||
48 | #define ATAG_NVIDIA_FORCE_32 0x7fffffff | ||
49 | |||
50 | struct tag_tegra { | ||
51 | __u32 bootarg_key; | ||
52 | __u32 bootarg_len; | ||
53 | char bootarg[1]; | ||
54 | }; | ||
55 | |||
56 | static int __init parse_tag_nvidia(const struct tag *tag) | ||
57 | { | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | __tagtable(ATAG_NVIDIA, parse_tag_nvidia); | ||
62 | 40 | ||
63 | static struct plat_serial8250_port debug_uart_platform_data[] = { | 41 | static struct plat_serial8250_port debug_uart_platform_data[] = { |
64 | { | 42 | { |
@@ -84,6 +62,9 @@ static struct platform_device debug_uart = { | |||
84 | 62 | ||
85 | static struct platform_device *harmony_devices[] __initdata = { | 63 | static struct platform_device *harmony_devices[] __initdata = { |
86 | &debug_uart, | 64 | &debug_uart, |
65 | &tegra_sdhci_device1, | ||
66 | &tegra_sdhci_device2, | ||
67 | &tegra_sdhci_device4, | ||
87 | }; | 68 | }; |
88 | 69 | ||
89 | static void __init tegra_harmony_fixup(struct machine_desc *desc, | 70 | static void __init tegra_harmony_fixup(struct machine_desc *desc, |
@@ -102,22 +83,45 @@ static __initdata struct tegra_clk_init_table harmony_clk_init_table[] = { | |||
102 | { NULL, NULL, 0, 0}, | 83 | { NULL, NULL, 0, 0}, |
103 | }; | 84 | }; |
104 | 85 | ||
86 | |||
87 | static struct tegra_sdhci_platform_data sdhci_pdata1 = { | ||
88 | .cd_gpio = -1, | ||
89 | .wp_gpio = -1, | ||
90 | .power_gpio = -1, | ||
91 | }; | ||
92 | |||
93 | static struct tegra_sdhci_platform_data sdhci_pdata2 = { | ||
94 | .cd_gpio = TEGRA_GPIO_PI5, | ||
95 | .wp_gpio = TEGRA_GPIO_PH1, | ||
96 | .power_gpio = TEGRA_GPIO_PT3, | ||
97 | }; | ||
98 | |||
99 | static struct tegra_sdhci_platform_data sdhci_pdata4 = { | ||
100 | .cd_gpio = TEGRA_GPIO_PH2, | ||
101 | .wp_gpio = TEGRA_GPIO_PH3, | ||
102 | .power_gpio = TEGRA_GPIO_PI6, | ||
103 | .is_8bit = 1, | ||
104 | }; | ||
105 | |||
105 | static void __init tegra_harmony_init(void) | 106 | static void __init tegra_harmony_init(void) |
106 | { | 107 | { |
107 | tegra_common_init(); | ||
108 | |||
109 | tegra_clk_init_from_table(harmony_clk_init_table); | 108 | tegra_clk_init_from_table(harmony_clk_init_table); |
110 | 109 | ||
111 | harmony_pinmux_init(); | 110 | harmony_pinmux_init(); |
112 | 111 | ||
112 | tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1; | ||
113 | tegra_sdhci_device2.dev.platform_data = &sdhci_pdata2; | ||
114 | tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4; | ||
115 | |||
113 | platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices)); | 116 | platform_add_devices(harmony_devices, ARRAY_SIZE(harmony_devices)); |
114 | } | 117 | } |
115 | 118 | ||
116 | MACHINE_START(HARMONY, "harmony") | 119 | MACHINE_START(HARMONY, "harmony") |
117 | .boot_params = 0x00000100, | 120 | .boot_params = 0x00000100, |
118 | .fixup = tegra_harmony_fixup, | 121 | .fixup = tegra_harmony_fixup, |
119 | .init_irq = tegra_init_irq, | ||
120 | .init_machine = tegra_harmony_init, | ||
121 | .map_io = tegra_map_common_io, | 122 | .map_io = tegra_map_common_io, |
123 | .init_early = tegra_init_early, | ||
124 | .init_irq = tegra_init_irq, | ||
122 | .timer = &tegra_timer, | 125 | .timer = &tegra_timer, |
126 | .init_machine = tegra_harmony_init, | ||
123 | MACHINE_END | 127 | MACHINE_END |
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c new file mode 100644 index 000000000000..2d6ad83ed4b2 --- /dev/null +++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 NVIDIA Corporation | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/gpio.h> | ||
18 | |||
19 | #include <mach/pinmux.h> | ||
20 | #include <mach/pinmux-t2.h> | ||
21 | |||
22 | #include "gpio-names.h" | ||
23 | #include "board-seaboard.h" | ||
24 | |||
25 | #define DEFAULT_DRIVE(_name) \ | ||
26 | { \ | ||
27 | .pingroup = TEGRA_DRIVE_PINGROUP_##_name, \ | ||
28 | .hsm = TEGRA_HSM_DISABLE, \ | ||
29 | .schmitt = TEGRA_SCHMITT_ENABLE, \ | ||
30 | .drive = TEGRA_DRIVE_DIV_1, \ | ||
31 | .pull_down = TEGRA_PULL_31, \ | ||
32 | .pull_up = TEGRA_PULL_31, \ | ||
33 | .slew_rising = TEGRA_SLEW_SLOWEST, \ | ||
34 | .slew_falling = TEGRA_SLEW_SLOWEST, \ | ||
35 | } | ||
36 | |||
37 | static __initdata struct tegra_drive_pingroup_config seaboard_drive_pinmux[] = { | ||
38 | DEFAULT_DRIVE(SDIO1), | ||
39 | }; | ||
40 | |||
41 | static __initdata struct tegra_pingroup_config seaboard_pinmux[] = { | ||
42 | {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
43 | {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
44 | {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
45 | {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
46 | {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
47 | {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
48 | {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
49 | {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
50 | {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
51 | {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
52 | {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
53 | {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
54 | {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
55 | {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
56 | {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
57 | {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
58 | {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
59 | {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
60 | {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
61 | {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
62 | {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
63 | {TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
64 | {TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
65 | {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
66 | {TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
67 | {TEGRA_PINGROUP_GPU, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
68 | {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
69 | {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
70 | {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
71 | {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
72 | {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
73 | {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
74 | {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
75 | {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
76 | {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
77 | {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
78 | {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
79 | {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
80 | {TEGRA_PINGROUP_LCSN, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
81 | {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
82 | {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
83 | {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
84 | {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
85 | {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
86 | {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
87 | {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
88 | {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
89 | {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
90 | {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
91 | {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
92 | {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
93 | {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
94 | {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
95 | {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
96 | {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
97 | {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
98 | {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
99 | {TEGRA_PINGROUP_LDC, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
100 | {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
101 | {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
102 | {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
103 | {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
104 | {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
105 | {TEGRA_PINGROUP_LM0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
106 | {TEGRA_PINGROUP_LM1, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
107 | {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
108 | {TEGRA_PINGROUP_LPW0, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
109 | {TEGRA_PINGROUP_LPW1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
110 | {TEGRA_PINGROUP_LPW2, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
111 | {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
112 | {TEGRA_PINGROUP_LSC1, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
113 | {TEGRA_PINGROUP_LSCK, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
114 | {TEGRA_PINGROUP_LSDA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
115 | {TEGRA_PINGROUP_LSDI, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
116 | {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
117 | {TEGRA_PINGROUP_LVP0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
118 | {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
119 | {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
120 | {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
121 | {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
122 | {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
123 | {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
124 | {TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
125 | {TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
126 | {TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
127 | {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
128 | {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
129 | {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
130 | {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
131 | {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
132 | {TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
133 | {TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
134 | {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
135 | {TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
136 | {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
137 | {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
138 | {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
139 | {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
140 | {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
141 | {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
142 | {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
143 | {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
144 | {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
145 | {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
146 | {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
147 | {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
148 | {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
149 | {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
150 | {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
151 | {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
152 | {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
153 | {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
154 | {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
155 | {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
156 | {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
157 | {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
158 | }; | ||
159 | |||
160 | |||
161 | |||
162 | |||
163 | static struct tegra_gpio_table gpio_table[] = { | ||
164 | { .gpio = TEGRA_GPIO_PI5, .enable = true }, /* mmc2 cd */ | ||
165 | { .gpio = TEGRA_GPIO_PH1, .enable = true }, /* mmc2 wp */ | ||
166 | { .gpio = TEGRA_GPIO_PI6, .enable = true }, /* mmc2 pwr */ | ||
167 | { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true }, /* lid switch */ | ||
168 | { .gpio = TEGRA_GPIO_POWERKEY, .enable = true }, /* power key */ | ||
169 | }; | ||
170 | |||
171 | void __init seaboard_pinmux_init(void) | ||
172 | { | ||
173 | tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux)); | ||
174 | |||
175 | tegra_drive_pinmux_config_table(seaboard_drive_pinmux, | ||
176 | ARRAY_SIZE(seaboard_drive_pinmux)); | ||
177 | |||
178 | tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table)); | ||
179 | } | ||
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c new file mode 100644 index 000000000000..6ca9e61f6cd0 --- /dev/null +++ b/arch/arm/mach-tegra/board-seaboard.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010, 2011 NVIDIA Corporation. | ||
3 | * Copyright (C) 2010, 2011 Google, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/serial_8250.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/input.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/gpio_keys.h> | ||
25 | |||
26 | #include <mach/iomap.h> | ||
27 | #include <mach/irqs.h> | ||
28 | #include <mach/sdhci.h> | ||
29 | |||
30 | #include <asm/mach-types.h> | ||
31 | #include <asm/mach/arch.h> | ||
32 | |||
33 | #include "board.h" | ||
34 | #include "board-seaboard.h" | ||
35 | #include "clock.h" | ||
36 | #include "devices.h" | ||
37 | #include "gpio-names.h" | ||
38 | |||
39 | static struct plat_serial8250_port debug_uart_platform_data[] = { | ||
40 | { | ||
41 | /* Memory and IRQ filled in before registration */ | ||
42 | .flags = UPF_BOOT_AUTOCONF, | ||
43 | .iotype = UPIO_MEM, | ||
44 | .regshift = 2, | ||
45 | .uartclk = 216000000, | ||
46 | }, { | ||
47 | .flags = 0, | ||
48 | } | ||
49 | }; | ||
50 | |||
51 | static struct platform_device debug_uart = { | ||
52 | .name = "serial8250", | ||
53 | .id = PLAT8250_DEV_PLATFORM, | ||
54 | .dev = { | ||
55 | .platform_data = debug_uart_platform_data, | ||
56 | }, | ||
57 | }; | ||
58 | |||
59 | static __initdata struct tegra_clk_init_table seaboard_clk_init_table[] = { | ||
60 | /* name parent rate enabled */ | ||
61 | { "uartb", "pll_p", 216000000, true}, | ||
62 | { "uartd", "pll_p", 216000000, true}, | ||
63 | { NULL, NULL, 0, 0}, | ||
64 | }; | ||
65 | |||
66 | static struct gpio_keys_button seaboard_gpio_keys_buttons[] = { | ||
67 | { | ||
68 | .code = SW_LID, | ||
69 | .gpio = TEGRA_GPIO_LIDSWITCH, | ||
70 | .active_low = 0, | ||
71 | .desc = "Lid", | ||
72 | .type = EV_SW, | ||
73 | .wakeup = 1, | ||
74 | .debounce_interval = 1, | ||
75 | }, | ||
76 | { | ||
77 | .code = KEY_POWER, | ||
78 | .gpio = TEGRA_GPIO_POWERKEY, | ||
79 | .active_low = 1, | ||
80 | .desc = "Power", | ||
81 | .type = EV_KEY, | ||
82 | .wakeup = 1, | ||
83 | }, | ||
84 | }; | ||
85 | |||
86 | static struct gpio_keys_platform_data seaboard_gpio_keys = { | ||
87 | .buttons = seaboard_gpio_keys_buttons, | ||
88 | .nbuttons = ARRAY_SIZE(seaboard_gpio_keys_buttons), | ||
89 | }; | ||
90 | |||
91 | static struct platform_device seaboard_gpio_keys_device = { | ||
92 | .name = "gpio-keys", | ||
93 | .id = -1, | ||
94 | .dev = { | ||
95 | .platform_data = &seaboard_gpio_keys, | ||
96 | } | ||
97 | }; | ||
98 | |||
99 | static struct tegra_sdhci_platform_data sdhci_pdata1 = { | ||
100 | .cd_gpio = -1, | ||
101 | .wp_gpio = -1, | ||
102 | .power_gpio = -1, | ||
103 | }; | ||
104 | |||
105 | static struct tegra_sdhci_platform_data sdhci_pdata3 = { | ||
106 | .cd_gpio = TEGRA_GPIO_PI5, | ||
107 | .wp_gpio = TEGRA_GPIO_PH1, | ||
108 | .power_gpio = TEGRA_GPIO_PI6, | ||
109 | }; | ||
110 | |||
111 | static struct tegra_sdhci_platform_data sdhci_pdata4 = { | ||
112 | .cd_gpio = -1, | ||
113 | .wp_gpio = -1, | ||
114 | .power_gpio = -1, | ||
115 | .is_8bit = 1, | ||
116 | }; | ||
117 | |||
118 | static struct platform_device *seaboard_devices[] __initdata = { | ||
119 | &debug_uart, | ||
120 | &tegra_pmu_device, | ||
121 | &tegra_sdhci_device1, | ||
122 | &tegra_sdhci_device3, | ||
123 | &tegra_sdhci_device4, | ||
124 | &seaboard_gpio_keys_device, | ||
125 | }; | ||
126 | |||
127 | static void __init __tegra_seaboard_init(void) | ||
128 | { | ||
129 | seaboard_pinmux_init(); | ||
130 | |||
131 | tegra_clk_init_from_table(seaboard_clk_init_table); | ||
132 | |||
133 | tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1; | ||
134 | tegra_sdhci_device3.dev.platform_data = &sdhci_pdata3; | ||
135 | tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4; | ||
136 | |||
137 | platform_add_devices(seaboard_devices, ARRAY_SIZE(seaboard_devices)); | ||
138 | } | ||
139 | |||
140 | static void __init tegra_seaboard_init(void) | ||
141 | { | ||
142 | /* Seaboard uses UARTD for the debug port. */ | ||
143 | debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTD_BASE); | ||
144 | debug_uart_platform_data[0].mapbase = TEGRA_UARTD_BASE; | ||
145 | debug_uart_platform_data[0].irq = INT_UARTD; | ||
146 | |||
147 | __tegra_seaboard_init(); | ||
148 | } | ||
149 | |||
150 | static void __init tegra_kaen_init(void) | ||
151 | { | ||
152 | /* Kaen uses UARTB for the debug port. */ | ||
153 | debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE); | ||
154 | debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE; | ||
155 | debug_uart_platform_data[0].irq = INT_UARTB; | ||
156 | |||
157 | __tegra_seaboard_init(); | ||
158 | } | ||
159 | |||
160 | static void __init tegra_wario_init(void) | ||
161 | { | ||
162 | /* Wario uses UARTB for the debug port. */ | ||
163 | debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE); | ||
164 | debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE; | ||
165 | debug_uart_platform_data[0].irq = INT_UARTB; | ||
166 | |||
167 | __tegra_seaboard_init(); | ||
168 | } | ||
169 | |||
170 | |||
171 | MACHINE_START(SEABOARD, "seaboard") | ||
172 | .boot_params = 0x00000100, | ||
173 | .map_io = tegra_map_common_io, | ||
174 | .init_early = tegra_init_early, | ||
175 | .init_irq = tegra_init_irq, | ||
176 | .timer = &tegra_timer, | ||
177 | .init_machine = tegra_seaboard_init, | ||
178 | MACHINE_END | ||
179 | |||
180 | MACHINE_START(KAEN, "kaen") | ||
181 | .boot_params = 0x00000100, | ||
182 | .map_io = tegra_map_common_io, | ||
183 | .init_early = tegra_init_early, | ||
184 | .init_irq = tegra_init_irq, | ||
185 | .timer = &tegra_timer, | ||
186 | .init_machine = tegra_kaen_init, | ||
187 | MACHINE_END | ||
188 | |||
189 | MACHINE_START(WARIO, "wario") | ||
190 | .boot_params = 0x00000100, | ||
191 | .map_io = tegra_map_common_io, | ||
192 | .init_early = tegra_init_early, | ||
193 | .init_irq = tegra_init_irq, | ||
194 | .timer = &tegra_timer, | ||
195 | .init_machine = tegra_wario_init, | ||
196 | MACHINE_END | ||
diff --git a/arch/arm/mach-tegra/board-seaboard.h b/arch/arm/mach-tegra/board-seaboard.h new file mode 100644 index 000000000000..a098e3599731 --- /dev/null +++ b/arch/arm/mach-tegra/board-seaboard.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/board-seaboard.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef _MACH_TEGRA_BOARD_SEABOARD_H | ||
18 | #define _MACH_TEGRA_BOARD_SEABOARD_H | ||
19 | |||
20 | #define TEGRA_GPIO_LIDSWITCH TEGRA_GPIO_PC7 | ||
21 | #define TEGRA_GPIO_USB1 TEGRA_GPIO_PD0 | ||
22 | #define TEGRA_GPIO_POWERKEY TEGRA_GPIO_PV2 | ||
23 | #define TEGRA_GPIO_BACKLIGHT TEGRA_GPIO_PD4 | ||
24 | #define TEGRA_GPIO_LVDS_SHUTDOWN TEGRA_GPIO_PB2 | ||
25 | #define TEGRA_GPIO_BACKLIGHT_PWM TEGRA_GPIO_PU5 | ||
26 | #define TEGRA_GPIO_BACKLIGHT_VDD TEGRA_GPIO_PW0 | ||
27 | #define TEGRA_GPIO_EN_VDD_PNL TEGRA_GPIO_PC6 | ||
28 | #define TEGRA_GPIO_MAGNETOMETER TEGRA_GPIO_PN5 | ||
29 | #define TEGRA_GPIO_ISL29018_IRQ TEGRA_GPIO_PZ2 | ||
30 | #define TEGRA_GPIO_AC_ONLINE TEGRA_GPIO_PV3 | ||
31 | |||
32 | #define TPS_GPIO_BASE TEGRA_NR_GPIOS | ||
33 | |||
34 | #define TPS_GPIO_WWAN_PWR (TPS_GPIO_BASE + 2) | ||
35 | |||
36 | void seaboard_pinmux_init(void); | ||
37 | |||
38 | #endif | ||
diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c new file mode 100644 index 000000000000..6d4fc9f7f1fb --- /dev/null +++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/board-trimslice-pinmux.c | ||
3 | * | ||
4 | * Copyright (C) 2011 CompuLab, Ltd. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <mach/pinmux.h> | ||
20 | |||
21 | #include "board-trimslice.h" | ||
22 | |||
23 | static __initdata struct tegra_pingroup_config trimslice_pinmux[] = { | ||
24 | {TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
25 | {TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
26 | {TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
27 | {TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
28 | {TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
29 | {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_OSC, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
30 | {TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
31 | {TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
32 | {TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
33 | {TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
34 | {TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
35 | {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
36 | {TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
37 | {TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
38 | {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
39 | {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
40 | {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
41 | {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
42 | {TEGRA_PINGROUP_DTE, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
43 | {TEGRA_PINGROUP_DTF, TEGRA_MUX_I2C3, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
44 | {TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
45 | {TEGRA_PINGROUP_GMB, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
46 | {TEGRA_PINGROUP_GMC, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
47 | {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
48 | {TEGRA_PINGROUP_GME, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
49 | {TEGRA_PINGROUP_GPU, TEGRA_MUX_UARTA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
50 | {TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
51 | {TEGRA_PINGROUP_GPV, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
52 | {TEGRA_PINGROUP_HDINT, TEGRA_MUX_HDMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
53 | {TEGRA_PINGROUP_I2CP, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
54 | {TEGRA_PINGROUP_IRRX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
55 | {TEGRA_PINGROUP_IRTX, TEGRA_MUX_UARTB, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
56 | {TEGRA_PINGROUP_KBCA, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
57 | {TEGRA_PINGROUP_KBCB, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
58 | {TEGRA_PINGROUP_KBCC, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
59 | {TEGRA_PINGROUP_KBCD, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
60 | {TEGRA_PINGROUP_KBCE, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
61 | {TEGRA_PINGROUP_KBCF, TEGRA_MUX_KBC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
62 | {TEGRA_PINGROUP_LCSN, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
63 | {TEGRA_PINGROUP_LD0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
64 | {TEGRA_PINGROUP_LD1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
65 | {TEGRA_PINGROUP_LD2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
66 | {TEGRA_PINGROUP_LD3, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
67 | {TEGRA_PINGROUP_LD4, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
68 | {TEGRA_PINGROUP_LD5, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
69 | {TEGRA_PINGROUP_LD6, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
70 | {TEGRA_PINGROUP_LD7, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
71 | {TEGRA_PINGROUP_LD8, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
72 | {TEGRA_PINGROUP_LD9, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
73 | {TEGRA_PINGROUP_LD10, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
74 | {TEGRA_PINGROUP_LD11, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
75 | {TEGRA_PINGROUP_LD12, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
76 | {TEGRA_PINGROUP_LD13, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
77 | {TEGRA_PINGROUP_LD14, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
78 | {TEGRA_PINGROUP_LD15, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
79 | {TEGRA_PINGROUP_LD16, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
80 | {TEGRA_PINGROUP_LD17, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
81 | {TEGRA_PINGROUP_LDC, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
82 | {TEGRA_PINGROUP_LDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
83 | {TEGRA_PINGROUP_LHP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
84 | {TEGRA_PINGROUP_LHP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
85 | {TEGRA_PINGROUP_LHP2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
86 | {TEGRA_PINGROUP_LHS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
87 | {TEGRA_PINGROUP_LM0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
88 | {TEGRA_PINGROUP_LM1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
89 | {TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
90 | {TEGRA_PINGROUP_LPW0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
91 | {TEGRA_PINGROUP_LPW1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
92 | {TEGRA_PINGROUP_LPW2, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
93 | {TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
94 | {TEGRA_PINGROUP_LSC1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
95 | {TEGRA_PINGROUP_LSCK, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
96 | {TEGRA_PINGROUP_LSDA, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
97 | {TEGRA_PINGROUP_LSDI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
98 | {TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
99 | {TEGRA_PINGROUP_LVP0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
100 | {TEGRA_PINGROUP_LVP1, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL}, | ||
101 | {TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
102 | {TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
103 | {TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
104 | {TEGRA_PINGROUP_PTA, TEGRA_MUX_RSVD3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
105 | {TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
106 | {TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
107 | {TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
108 | {TEGRA_PINGROUP_SDD, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL}, | ||
109 | {TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
110 | {TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
111 | {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
112 | {TEGRA_PINGROUP_SLXD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
113 | {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
114 | {TEGRA_PINGROUP_SPDI, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
115 | {TEGRA_PINGROUP_SPDO, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
116 | {TEGRA_PINGROUP_SPIA, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
117 | {TEGRA_PINGROUP_SPIB, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
118 | {TEGRA_PINGROUP_SPIC, TEGRA_MUX_SPI2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
119 | {TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
120 | {TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
121 | {TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE}, | ||
122 | {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
123 | {TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
124 | {TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
125 | {TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
126 | {TEGRA_PINGROUP_UAC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
127 | {TEGRA_PINGROUP_UAD, TEGRA_MUX_IRDA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
128 | {TEGRA_PINGROUP_UCA, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
129 | {TEGRA_PINGROUP_UCB, TEGRA_MUX_UARTC, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE}, | ||
130 | {TEGRA_PINGROUP_UDA, TEGRA_MUX_ULPI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE}, | ||
131 | {TEGRA_PINGROUP_CK32, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
132 | {TEGRA_PINGROUP_DDRC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
133 | {TEGRA_PINGROUP_PMCA, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
134 | {TEGRA_PINGROUP_PMCB, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
135 | {TEGRA_PINGROUP_PMCC, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
136 | {TEGRA_PINGROUP_PMCD, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
137 | {TEGRA_PINGROUP_PMCE, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
138 | {TEGRA_PINGROUP_XM2C, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
139 | {TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL}, | ||
140 | }; | ||
141 | |||
142 | void __init trimslice_pinmux_init(void) | ||
143 | { | ||
144 | tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux)); | ||
145 | } | ||
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c new file mode 100644 index 000000000000..7be7d4acd02f --- /dev/null +++ b/arch/arm/mach-tegra/board-trimslice.c | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/board-trimslice.c | ||
3 | * | ||
4 | * Copyright (C) 2011 CompuLab, Ltd. | ||
5 | * Author: Mike Rapoport <mike@compulab.co.il> | ||
6 | * | ||
7 | * Based on board-harmony.c | ||
8 | * Copyright (C) 2010 Google, Inc. | ||
9 | * | ||
10 | * This software is licensed under the terms of the GNU General Public | ||
11 | * License version 2, as published by the Free Software Foundation, and | ||
12 | * may be copied, distributed, and modified under those terms. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/serial_8250.h> | ||
25 | #include <linux/io.h> | ||
26 | |||
27 | #include <asm/mach-types.h> | ||
28 | #include <asm/mach/arch.h> | ||
29 | #include <asm/setup.h> | ||
30 | |||
31 | #include <mach/iomap.h> | ||
32 | |||
33 | #include "board.h" | ||
34 | #include "clock.h" | ||
35 | |||
36 | #include "board-trimslice.h" | ||
37 | |||
38 | static struct plat_serial8250_port debug_uart_platform_data[] = { | ||
39 | { | ||
40 | .membase = IO_ADDRESS(TEGRA_UARTA_BASE), | ||
41 | .mapbase = TEGRA_UARTA_BASE, | ||
42 | .irq = INT_UARTA, | ||
43 | .flags = UPF_BOOT_AUTOCONF, | ||
44 | .iotype = UPIO_MEM, | ||
45 | .regshift = 2, | ||
46 | .uartclk = 216000000, | ||
47 | }, { | ||
48 | .flags = 0 | ||
49 | } | ||
50 | }; | ||
51 | |||
52 | static struct platform_device debug_uart = { | ||
53 | .name = "serial8250", | ||
54 | .id = PLAT8250_DEV_PLATFORM, | ||
55 | .dev = { | ||
56 | .platform_data = debug_uart_platform_data, | ||
57 | }, | ||
58 | }; | ||
59 | |||
60 | static struct platform_device *trimslice_devices[] __initdata = { | ||
61 | &debug_uart, | ||
62 | }; | ||
63 | |||
64 | static void __init tegra_trimslice_fixup(struct machine_desc *desc, | ||
65 | struct tag *tags, char **cmdline, struct meminfo *mi) | ||
66 | { | ||
67 | mi->nr_banks = 2; | ||
68 | mi->bank[0].start = PHYS_OFFSET; | ||
69 | mi->bank[0].size = 448 * SZ_1M; | ||
70 | mi->bank[1].start = SZ_512M; | ||
71 | mi->bank[1].size = SZ_512M; | ||
72 | } | ||
73 | |||
74 | static __initdata struct tegra_clk_init_table trimslice_clk_init_table[] = { | ||
75 | /* name parent rate enabled */ | ||
76 | { "uarta", "pll_p", 216000000, true }, | ||
77 | { NULL, NULL, 0, 0}, | ||
78 | }; | ||
79 | |||
80 | static int __init tegra_trimslice_pci_init(void) | ||
81 | { | ||
82 | if (!machine_is_trimslice()) | ||
83 | return 0; | ||
84 | |||
85 | return tegra_pcie_init(true, true); | ||
86 | } | ||
87 | subsys_initcall(tegra_trimslice_pci_init); | ||
88 | |||
89 | static void __init tegra_trimslice_init(void) | ||
90 | { | ||
91 | tegra_clk_init_from_table(trimslice_clk_init_table); | ||
92 | |||
93 | trimslice_pinmux_init(); | ||
94 | |||
95 | platform_add_devices(trimslice_devices, ARRAY_SIZE(trimslice_devices)); | ||
96 | } | ||
97 | |||
98 | MACHINE_START(TRIMSLICE, "trimslice") | ||
99 | .boot_params = 0x00000100, | ||
100 | .fixup = tegra_trimslice_fixup, | ||
101 | .map_io = tegra_map_common_io, | ||
102 | .init_early = tegra_init_early, | ||
103 | .init_irq = tegra_init_irq, | ||
104 | .timer = &tegra_timer, | ||
105 | .init_machine = tegra_trimslice_init, | ||
106 | MACHINE_END | ||
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.h b/arch/arm/mach-tegra/board-trimslice.h index f8c1adba96a6..16ec0f0d3bb1 100644 --- a/arch/arm/mach-tegra/tegra2_dvfs.h +++ b/arch/arm/mach-tegra/board-trimslice.h | |||
@@ -1,10 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/arm/mach-tegra/tegra2_dvfs.h | 2 | * arch/arm/mach-tegra/board-trimslice.h |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Google, Inc. | 4 | * Copyright (C) 2011 CompuLab, Ltd. |
5 | * | ||
6 | * Author: | ||
7 | * Colin Cross <ccross@google.com> | ||
8 | * | 5 | * |
9 | * This software is licensed under the terms of the GNU General Public | 6 | * This software is licensed under the terms of the GNU General Public |
10 | * License version 2, as published by the Free Software Foundation, and | 7 | * License version 2, as published by the Free Software Foundation, and |
@@ -17,4 +14,9 @@ | |||
17 | * | 14 | * |
18 | */ | 15 | */ |
19 | 16 | ||
20 | extern struct dvfs tegra_dvfs_virtual_cpu_dvfs; | 17 | #ifndef _MACH_TEGRA_BOARD_TRIMSLICE_H |
18 | #define _MACH_TEGRA_BOARD_TRIMSLICE_H | ||
19 | |||
20 | void trimslice_pinmux_init(void); | ||
21 | |||
22 | #endif | ||
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h index 0de565ca37c5..1d14df7eb7de 100644 --- a/arch/arm/mach-tegra/board.h +++ b/arch/arm/mach-tegra/board.h | |||
@@ -23,7 +23,9 @@ | |||
23 | 23 | ||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | 25 | ||
26 | void __init tegra_common_init(void); | 26 | void tegra_assert_system_reset(char mode, const char *cmd); |
27 | |||
28 | void __init tegra_init_early(void); | ||
27 | void __init tegra_map_common_io(void); | 29 | void __init tegra_map_common_io(void); |
28 | void __init tegra_init_irq(void); | 30 | void __init tegra_init_irq(void); |
29 | void __init tegra_init_clock(void); | 31 | void __init tegra_init_clock(void); |
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c index 77948e0f4909..e028320ab423 100644 --- a/arch/arm/mach-tegra/clock.c +++ b/arch/arm/mach-tegra/clock.c | |||
@@ -18,238 +18,177 @@ | |||
18 | 18 | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | #include <linux/list.h> | 21 | #include <linux/clkdev.h> |
22 | #include <linux/debugfs.h> | ||
23 | #include <linux/delay.h> | ||
22 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/list.h> | ||
23 | #include <linux/module.h> | 26 | #include <linux/module.h> |
24 | #include <linux/debugfs.h> | 27 | #include <linux/sched.h> |
25 | #include <linux/slab.h> | ||
26 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
27 | #include <linux/regulator/consumer.h> | 29 | #include <linux/slab.h> |
28 | #include <linux/clkdev.h> | 30 | |
31 | #include <mach/clk.h> | ||
29 | 32 | ||
30 | #include "clock.h" | ||
31 | #include "board.h" | 33 | #include "board.h" |
32 | #include "fuse.h" | 34 | #include "clock.h" |
33 | 35 | ||
36 | /* | ||
37 | * Locking: | ||
38 | * | ||
39 | * Each struct clk has a spinlock. | ||
40 | * | ||
41 | * To avoid AB-BA locking problems, locks must always be traversed from child | ||
42 | * clock to parent clock. For example, when enabling a clock, the clock's lock | ||
43 | * is taken, and then clk_enable is called on the parent, which take's the | ||
44 | * parent clock's lock. There is one exceptions to this ordering: When dumping | ||
45 | * the clock tree through debugfs. In this case, clk_lock_all is called, | ||
46 | * which attemps to iterate through the entire list of clocks and take every | ||
47 | * clock lock. If any call to spin_trylock fails, all locked clocks are | ||
48 | * unlocked, and the process is retried. When all the locks are held, | ||
49 | * the only clock operation that can be called is clk_get_rate_all_locked. | ||
50 | * | ||
51 | * Within a single clock, no clock operation can call another clock operation | ||
52 | * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any | ||
53 | * clock operation can call any other clock operation on any of it's possible | ||
54 | * parents. | ||
55 | * | ||
56 | * An additional mutex, clock_list_lock, is used to protect the list of all | ||
57 | * clocks. | ||
58 | * | ||
59 | * The clock operations must lock internally to protect against | ||
60 | * read-modify-write on registers that are shared by multiple clocks | ||
61 | */ | ||
62 | static DEFINE_MUTEX(clock_list_lock); | ||
34 | static LIST_HEAD(clocks); | 63 | static LIST_HEAD(clocks); |
35 | 64 | ||
36 | static DEFINE_SPINLOCK(clock_lock); | ||
37 | static DEFINE_MUTEX(dvfs_lock); | ||
38 | |||
39 | static int clk_is_dvfs(struct clk *c) | ||
40 | { | ||
41 | return (c->dvfs != NULL); | ||
42 | }; | ||
43 | |||
44 | static int dvfs_set_rate(struct dvfs *d, unsigned long rate) | ||
45 | { | ||
46 | struct dvfs_table *t; | ||
47 | |||
48 | if (d->table == NULL) | ||
49 | return -ENODEV; | ||
50 | |||
51 | for (t = d->table; t->rate != 0; t++) { | ||
52 | if (rate <= t->rate) { | ||
53 | if (!d->reg) | ||
54 | return 0; | ||
55 | |||
56 | return regulator_set_voltage(d->reg, | ||
57 | t->millivolts * 1000, | ||
58 | d->max_millivolts * 1000); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | return -EINVAL; | ||
63 | } | ||
64 | |||
65 | static void dvfs_init(struct clk *c) | ||
66 | { | ||
67 | int process_id; | ||
68 | int i; | ||
69 | struct dvfs_table *table; | ||
70 | |||
71 | process_id = c->dvfs->cpu ? tegra_core_process_id() : | ||
72 | tegra_cpu_process_id(); | ||
73 | |||
74 | for (i = 0; i < c->dvfs->process_id_table_length; i++) | ||
75 | if (process_id == c->dvfs->process_id_table[i].process_id) | ||
76 | c->dvfs->table = c->dvfs->process_id_table[i].table; | ||
77 | |||
78 | if (c->dvfs->table == NULL) { | ||
79 | pr_err("Failed to find dvfs table for clock %s process %d\n", | ||
80 | c->name, process_id); | ||
81 | return; | ||
82 | } | ||
83 | |||
84 | c->dvfs->max_millivolts = 0; | ||
85 | for (table = c->dvfs->table; table->rate != 0; table++) | ||
86 | if (c->dvfs->max_millivolts < table->millivolts) | ||
87 | c->dvfs->max_millivolts = table->millivolts; | ||
88 | |||
89 | c->dvfs->reg = regulator_get(NULL, c->dvfs->reg_id); | ||
90 | |||
91 | if (IS_ERR(c->dvfs->reg)) { | ||
92 | pr_err("Failed to get regulator %s for clock %s\n", | ||
93 | c->dvfs->reg_id, c->name); | ||
94 | c->dvfs->reg = NULL; | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | if (c->refcnt > 0) | ||
99 | dvfs_set_rate(c->dvfs, c->rate); | ||
100 | } | ||
101 | |||
102 | struct clk *tegra_get_clock_by_name(const char *name) | 65 | struct clk *tegra_get_clock_by_name(const char *name) |
103 | { | 66 | { |
104 | struct clk *c; | 67 | struct clk *c; |
105 | struct clk *ret = NULL; | 68 | struct clk *ret = NULL; |
106 | unsigned long flags; | 69 | mutex_lock(&clock_list_lock); |
107 | spin_lock_irqsave(&clock_lock, flags); | ||
108 | list_for_each_entry(c, &clocks, node) { | 70 | list_for_each_entry(c, &clocks, node) { |
109 | if (strcmp(c->name, name) == 0) { | 71 | if (strcmp(c->name, name) == 0) { |
110 | ret = c; | 72 | ret = c; |
111 | break; | 73 | break; |
112 | } | 74 | } |
113 | } | 75 | } |
114 | spin_unlock_irqrestore(&clock_lock, flags); | 76 | mutex_unlock(&clock_list_lock); |
115 | return ret; | 77 | return ret; |
116 | } | 78 | } |
117 | 79 | ||
118 | static void clk_recalculate_rate(struct clk *c) | 80 | /* Must be called with c->spinlock held */ |
81 | static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p) | ||
119 | { | 82 | { |
120 | u64 rate; | 83 | u64 rate; |
121 | 84 | ||
122 | if (!c->parent) | 85 | rate = clk_get_rate(p); |
123 | return; | ||
124 | |||
125 | rate = c->parent->rate; | ||
126 | 86 | ||
127 | if (c->mul != 0 && c->div != 0) { | 87 | if (c->mul != 0 && c->div != 0) { |
128 | rate = rate * c->mul; | 88 | rate *= c->mul; |
89 | rate += c->div - 1; /* round up */ | ||
129 | do_div(rate, c->div); | 90 | do_div(rate, c->div); |
130 | } | 91 | } |
131 | 92 | ||
132 | if (rate > c->max_rate) | 93 | return rate; |
133 | pr_warn("clocks: Set clock %s to rate %llu, max is %lu\n", | ||
134 | c->name, rate, c->max_rate); | ||
135 | |||
136 | c->rate = rate; | ||
137 | } | 94 | } |
138 | 95 | ||
139 | int clk_reparent(struct clk *c, struct clk *parent) | 96 | /* Must be called with c->spinlock held */ |
97 | unsigned long clk_get_rate_locked(struct clk *c) | ||
140 | { | 98 | { |
141 | pr_debug("%s: %s\n", __func__, c->name); | 99 | unsigned long rate; |
142 | c->parent = parent; | ||
143 | list_del(&c->sibling); | ||
144 | list_add_tail(&c->sibling, &parent->children); | ||
145 | return 0; | ||
146 | } | ||
147 | 100 | ||
148 | static void propagate_rate(struct clk *c) | 101 | if (c->parent) |
149 | { | 102 | rate = clk_predict_rate_from_parent(c, c->parent); |
150 | struct clk *clkp; | 103 | else |
151 | pr_debug("%s: %s\n", __func__, c->name); | 104 | rate = c->rate; |
152 | list_for_each_entry(clkp, &c->children, sibling) { | 105 | |
153 | pr_debug(" %s\n", clkp->name); | 106 | return rate; |
154 | clk_recalculate_rate(clkp); | ||
155 | propagate_rate(clkp); | ||
156 | } | ||
157 | } | 107 | } |
158 | 108 | ||
159 | void clk_init(struct clk *c) | 109 | unsigned long clk_get_rate(struct clk *c) |
160 | { | 110 | { |
161 | unsigned long flags; | 111 | unsigned long flags; |
112 | unsigned long rate; | ||
113 | |||
114 | spin_lock_irqsave(&c->spinlock, flags); | ||
162 | 115 | ||
163 | pr_debug("%s: %s\n", __func__, c->name); | 116 | rate = clk_get_rate_locked(c); |
164 | 117 | ||
165 | spin_lock_irqsave(&clock_lock, flags); | 118 | spin_unlock_irqrestore(&c->spinlock, flags); |
166 | 119 | ||
167 | INIT_LIST_HEAD(&c->children); | 120 | return rate; |
168 | INIT_LIST_HEAD(&c->sibling); | 121 | } |
122 | EXPORT_SYMBOL(clk_get_rate); | ||
123 | |||
124 | int clk_reparent(struct clk *c, struct clk *parent) | ||
125 | { | ||
126 | c->parent = parent; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | void clk_init(struct clk *c) | ||
131 | { | ||
132 | spin_lock_init(&c->spinlock); | ||
169 | 133 | ||
170 | if (c->ops && c->ops->init) | 134 | if (c->ops && c->ops->init) |
171 | c->ops->init(c); | 135 | c->ops->init(c); |
172 | 136 | ||
173 | clk_recalculate_rate(c); | 137 | if (!c->ops || !c->ops->enable) { |
138 | c->refcnt++; | ||
139 | c->set = true; | ||
140 | if (c->parent) | ||
141 | c->state = c->parent->state; | ||
142 | else | ||
143 | c->state = ON; | ||
144 | } | ||
174 | 145 | ||
146 | mutex_lock(&clock_list_lock); | ||
175 | list_add(&c->node, &clocks); | 147 | list_add(&c->node, &clocks); |
176 | 148 | mutex_unlock(&clock_list_lock); | |
177 | if (c->parent) | ||
178 | list_add_tail(&c->sibling, &c->parent->children); | ||
179 | |||
180 | spin_unlock_irqrestore(&clock_lock, flags); | ||
181 | } | 149 | } |
182 | 150 | ||
183 | int clk_enable_locked(struct clk *c) | 151 | int clk_enable(struct clk *c) |
184 | { | 152 | { |
185 | int ret; | 153 | int ret = 0; |
186 | pr_debug("%s: %s\n", __func__, c->name); | 154 | unsigned long flags; |
155 | |||
156 | spin_lock_irqsave(&c->spinlock, flags); | ||
157 | |||
187 | if (c->refcnt == 0) { | 158 | if (c->refcnt == 0) { |
188 | if (c->parent) { | 159 | if (c->parent) { |
189 | ret = clk_enable_locked(c->parent); | 160 | ret = clk_enable(c->parent); |
190 | if (ret) | 161 | if (ret) |
191 | return ret; | 162 | goto out; |
192 | } | 163 | } |
193 | 164 | ||
194 | if (c->ops && c->ops->enable) { | 165 | if (c->ops && c->ops->enable) { |
195 | ret = c->ops->enable(c); | 166 | ret = c->ops->enable(c); |
196 | if (ret) { | 167 | if (ret) { |
197 | if (c->parent) | 168 | if (c->parent) |
198 | clk_disable_locked(c->parent); | 169 | clk_disable(c->parent); |
199 | return ret; | 170 | goto out; |
200 | } | 171 | } |
201 | c->state = ON; | 172 | c->state = ON; |
202 | #ifdef CONFIG_DEBUG_FS | 173 | c->set = true; |
203 | c->set = 1; | ||
204 | #endif | ||
205 | } | 174 | } |
206 | } | 175 | } |
207 | c->refcnt++; | 176 | c->refcnt++; |
208 | 177 | out: | |
209 | return 0; | 178 | spin_unlock_irqrestore(&c->spinlock, flags); |
210 | } | ||
211 | |||
212 | int clk_enable_cansleep(struct clk *c) | ||
213 | { | ||
214 | int ret; | ||
215 | unsigned long flags; | ||
216 | |||
217 | mutex_lock(&dvfs_lock); | ||
218 | |||
219 | if (clk_is_dvfs(c) && c->refcnt > 0) | ||
220 | dvfs_set_rate(c->dvfs, c->rate); | ||
221 | |||
222 | spin_lock_irqsave(&clock_lock, flags); | ||
223 | ret = clk_enable_locked(c); | ||
224 | spin_unlock_irqrestore(&clock_lock, flags); | ||
225 | |||
226 | mutex_unlock(&dvfs_lock); | ||
227 | |||
228 | return ret; | 179 | return ret; |
229 | } | 180 | } |
230 | EXPORT_SYMBOL(clk_enable_cansleep); | 181 | EXPORT_SYMBOL(clk_enable); |
231 | 182 | ||
232 | int clk_enable(struct clk *c) | 183 | void clk_disable(struct clk *c) |
233 | { | 184 | { |
234 | int ret; | ||
235 | unsigned long flags; | 185 | unsigned long flags; |
236 | 186 | ||
237 | if (clk_is_dvfs(c)) | 187 | spin_lock_irqsave(&c->spinlock, flags); |
238 | BUG(); | ||
239 | |||
240 | spin_lock_irqsave(&clock_lock, flags); | ||
241 | ret = clk_enable_locked(c); | ||
242 | spin_unlock_irqrestore(&clock_lock, flags); | ||
243 | |||
244 | return ret; | ||
245 | } | ||
246 | EXPORT_SYMBOL(clk_enable); | ||
247 | 188 | ||
248 | void clk_disable_locked(struct clk *c) | ||
249 | { | ||
250 | pr_debug("%s: %s\n", __func__, c->name); | ||
251 | if (c->refcnt == 0) { | 189 | if (c->refcnt == 0) { |
252 | WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); | 190 | WARN(1, "Attempting to disable clock %s with refcnt 0", c->name); |
191 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
253 | return; | 192 | return; |
254 | } | 193 | } |
255 | if (c->refcnt == 1) { | 194 | if (c->refcnt == 1) { |
@@ -257,71 +196,39 @@ void clk_disable_locked(struct clk *c) | |||
257 | c->ops->disable(c); | 196 | c->ops->disable(c); |
258 | 197 | ||
259 | if (c->parent) | 198 | if (c->parent) |
260 | clk_disable_locked(c->parent); | 199 | clk_disable(c->parent); |
261 | 200 | ||
262 | c->state = OFF; | 201 | c->state = OFF; |
263 | } | 202 | } |
264 | c->refcnt--; | 203 | c->refcnt--; |
265 | } | ||
266 | |||
267 | void clk_disable_cansleep(struct clk *c) | ||
268 | { | ||
269 | unsigned long flags; | ||
270 | |||
271 | mutex_lock(&dvfs_lock); | ||
272 | |||
273 | spin_lock_irqsave(&clock_lock, flags); | ||
274 | clk_disable_locked(c); | ||
275 | spin_unlock_irqrestore(&clock_lock, flags); | ||
276 | 204 | ||
277 | if (clk_is_dvfs(c) && c->refcnt == 0) | 205 | spin_unlock_irqrestore(&c->spinlock, flags); |
278 | dvfs_set_rate(c->dvfs, c->rate); | ||
279 | |||
280 | mutex_unlock(&dvfs_lock); | ||
281 | } | ||
282 | EXPORT_SYMBOL(clk_disable_cansleep); | ||
283 | |||
284 | void clk_disable(struct clk *c) | ||
285 | { | ||
286 | unsigned long flags; | ||
287 | |||
288 | if (clk_is_dvfs(c)) | ||
289 | BUG(); | ||
290 | |||
291 | spin_lock_irqsave(&clock_lock, flags); | ||
292 | clk_disable_locked(c); | ||
293 | spin_unlock_irqrestore(&clock_lock, flags); | ||
294 | } | 206 | } |
295 | EXPORT_SYMBOL(clk_disable); | 207 | EXPORT_SYMBOL(clk_disable); |
296 | 208 | ||
297 | int clk_set_parent_locked(struct clk *c, struct clk *parent) | 209 | int clk_set_parent(struct clk *c, struct clk *parent) |
298 | { | 210 | { |
299 | int ret; | 211 | int ret; |
212 | unsigned long flags; | ||
213 | unsigned long new_rate; | ||
214 | unsigned long old_rate; | ||
300 | 215 | ||
301 | pr_debug("%s: %s\n", __func__, c->name); | 216 | spin_lock_irqsave(&c->spinlock, flags); |
302 | 217 | ||
303 | if (!c->ops || !c->ops->set_parent) | 218 | if (!c->ops || !c->ops->set_parent) { |
304 | return -ENOSYS; | 219 | ret = -ENOSYS; |
220 | goto out; | ||
221 | } | ||
305 | 222 | ||
306 | ret = c->ops->set_parent(c, parent); | 223 | new_rate = clk_predict_rate_from_parent(c, parent); |
224 | old_rate = clk_get_rate_locked(c); | ||
307 | 225 | ||
226 | ret = c->ops->set_parent(c, parent); | ||
308 | if (ret) | 227 | if (ret) |
309 | return ret; | 228 | goto out; |
310 | |||
311 | clk_recalculate_rate(c); | ||
312 | |||
313 | propagate_rate(c); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | 229 | ||
318 | int clk_set_parent(struct clk *c, struct clk *parent) | 230 | out: |
319 | { | 231 | spin_unlock_irqrestore(&c->spinlock, flags); |
320 | int ret; | ||
321 | unsigned long flags; | ||
322 | spin_lock_irqsave(&clock_lock, flags); | ||
323 | ret = clk_set_parent_locked(c, parent); | ||
324 | spin_unlock_irqrestore(&clock_lock, flags); | ||
325 | return ret; | 232 | return ret; |
326 | } | 233 | } |
327 | EXPORT_SYMBOL(clk_set_parent); | 234 | EXPORT_SYMBOL(clk_set_parent); |
@@ -334,100 +241,86 @@ EXPORT_SYMBOL(clk_get_parent); | |||
334 | 241 | ||
335 | int clk_set_rate_locked(struct clk *c, unsigned long rate) | 242 | int clk_set_rate_locked(struct clk *c, unsigned long rate) |
336 | { | 243 | { |
337 | int ret; | 244 | long new_rate; |
338 | |||
339 | if (rate > c->max_rate) | ||
340 | rate = c->max_rate; | ||
341 | 245 | ||
342 | if (!c->ops || !c->ops->set_rate) | 246 | if (!c->ops || !c->ops->set_rate) |
343 | return -ENOSYS; | 247 | return -ENOSYS; |
344 | 248 | ||
345 | ret = c->ops->set_rate(c, rate); | 249 | if (rate > c->max_rate) |
346 | 250 | rate = c->max_rate; | |
347 | if (ret) | ||
348 | return ret; | ||
349 | |||
350 | clk_recalculate_rate(c); | ||
351 | |||
352 | propagate_rate(c); | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | int clk_set_rate_cansleep(struct clk *c, unsigned long rate) | ||
358 | { | ||
359 | int ret = 0; | ||
360 | unsigned long flags; | ||
361 | |||
362 | pr_debug("%s: %s\n", __func__, c->name); | ||
363 | |||
364 | mutex_lock(&dvfs_lock); | ||
365 | |||
366 | if (rate > c->rate) | ||
367 | ret = dvfs_set_rate(c->dvfs, rate); | ||
368 | if (ret) | ||
369 | goto out; | ||
370 | 251 | ||
371 | spin_lock_irqsave(&clock_lock, flags); | 252 | if (c->ops && c->ops->round_rate) { |
372 | ret = clk_set_rate_locked(c, rate); | 253 | new_rate = c->ops->round_rate(c, rate); |
373 | spin_unlock_irqrestore(&clock_lock, flags); | ||
374 | 254 | ||
375 | if (ret) | 255 | if (new_rate < 0) |
376 | goto out; | 256 | return new_rate; |
377 | 257 | ||
378 | ret = dvfs_set_rate(c->dvfs, rate); | 258 | rate = new_rate; |
259 | } | ||
379 | 260 | ||
380 | out: | 261 | return c->ops->set_rate(c, rate); |
381 | mutex_unlock(&dvfs_lock); | ||
382 | return ret; | ||
383 | } | 262 | } |
384 | EXPORT_SYMBOL(clk_set_rate_cansleep); | ||
385 | 263 | ||
386 | int clk_set_rate(struct clk *c, unsigned long rate) | 264 | int clk_set_rate(struct clk *c, unsigned long rate) |
387 | { | 265 | { |
388 | int ret = 0; | 266 | int ret; |
389 | unsigned long flags; | 267 | unsigned long flags; |
390 | 268 | ||
391 | pr_debug("%s: %s\n", __func__, c->name); | 269 | spin_lock_irqsave(&c->spinlock, flags); |
392 | |||
393 | if (clk_is_dvfs(c)) | ||
394 | BUG(); | ||
395 | 270 | ||
396 | spin_lock_irqsave(&clock_lock, flags); | ||
397 | ret = clk_set_rate_locked(c, rate); | 271 | ret = clk_set_rate_locked(c, rate); |
398 | spin_unlock_irqrestore(&clock_lock, flags); | 272 | |
273 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
399 | 274 | ||
400 | return ret; | 275 | return ret; |
401 | } | 276 | } |
402 | EXPORT_SYMBOL(clk_set_rate); | 277 | EXPORT_SYMBOL(clk_set_rate); |
403 | 278 | ||
404 | unsigned long clk_get_rate(struct clk *c) | ||
405 | { | ||
406 | unsigned long flags; | ||
407 | unsigned long ret; | ||
408 | |||
409 | spin_lock_irqsave(&clock_lock, flags); | ||
410 | 279 | ||
411 | pr_debug("%s: %s\n", __func__, c->name); | 280 | /* Must be called with clocks lock and all indvidual clock locks held */ |
281 | unsigned long clk_get_rate_all_locked(struct clk *c) | ||
282 | { | ||
283 | u64 rate; | ||
284 | int mul = 1; | ||
285 | int div = 1; | ||
286 | struct clk *p = c; | ||
287 | |||
288 | while (p) { | ||
289 | c = p; | ||
290 | if (c->mul != 0 && c->div != 0) { | ||
291 | mul *= c->mul; | ||
292 | div *= c->div; | ||
293 | } | ||
294 | p = c->parent; | ||
295 | } | ||
412 | 296 | ||
413 | ret = c->rate; | 297 | rate = c->rate; |
298 | rate *= mul; | ||
299 | do_div(rate, div); | ||
414 | 300 | ||
415 | spin_unlock_irqrestore(&clock_lock, flags); | 301 | return rate; |
416 | return ret; | ||
417 | } | 302 | } |
418 | EXPORT_SYMBOL(clk_get_rate); | ||
419 | 303 | ||
420 | long clk_round_rate(struct clk *c, unsigned long rate) | 304 | long clk_round_rate(struct clk *c, unsigned long rate) |
421 | { | 305 | { |
422 | pr_debug("%s: %s\n", __func__, c->name); | 306 | unsigned long flags; |
307 | long ret; | ||
423 | 308 | ||
424 | if (!c->ops || !c->ops->round_rate) | 309 | spin_lock_irqsave(&c->spinlock, flags); |
425 | return -ENOSYS; | 310 | |
311 | if (!c->ops || !c->ops->round_rate) { | ||
312 | ret = -ENOSYS; | ||
313 | goto out; | ||
314 | } | ||
426 | 315 | ||
427 | if (rate > c->max_rate) | 316 | if (rate > c->max_rate) |
428 | rate = c->max_rate; | 317 | rate = c->max_rate; |
429 | 318 | ||
430 | return c->ops->round_rate(c, rate); | 319 | ret = c->ops->round_rate(c, rate); |
320 | |||
321 | out: | ||
322 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
323 | return ret; | ||
431 | } | 324 | } |
432 | EXPORT_SYMBOL(clk_round_rate); | 325 | EXPORT_SYMBOL(clk_round_rate); |
433 | 326 | ||
@@ -509,31 +402,90 @@ void __init tegra_init_clock(void) | |||
509 | tegra2_init_clocks(); | 402 | tegra2_init_clocks(); |
510 | } | 403 | } |
511 | 404 | ||
512 | int __init tegra_init_dvfs(void) | 405 | /* |
406 | * The SDMMC controllers have extra bits in the clock source register that | ||
407 | * adjust the delay between the clock and data to compenstate for delays | ||
408 | * on the PCB. | ||
409 | */ | ||
410 | void tegra_sdmmc_tap_delay(struct clk *c, int delay) | ||
513 | { | 411 | { |
514 | struct clk *c, *safe; | 412 | unsigned long flags; |
413 | |||
414 | spin_lock_irqsave(&c->spinlock, flags); | ||
415 | tegra2_sdmmc_tap_delay(c, delay); | ||
416 | spin_unlock_irqrestore(&c->spinlock, flags); | ||
417 | } | ||
515 | 418 | ||
516 | mutex_lock(&dvfs_lock); | 419 | #ifdef CONFIG_DEBUG_FS |
517 | 420 | ||
518 | list_for_each_entry_safe(c, safe, &clocks, node) | 421 | static int __clk_lock_all_spinlocks(void) |
519 | if (c->dvfs) | 422 | { |
520 | dvfs_init(c); | 423 | struct clk *c; |
521 | 424 | ||
522 | mutex_unlock(&dvfs_lock); | 425 | list_for_each_entry(c, &clocks, node) |
426 | if (!spin_trylock(&c->spinlock)) | ||
427 | goto unlock_spinlocks; | ||
523 | 428 | ||
524 | return 0; | 429 | return 0; |
430 | |||
431 | unlock_spinlocks: | ||
432 | list_for_each_entry_continue_reverse(c, &clocks, node) | ||
433 | spin_unlock(&c->spinlock); | ||
434 | |||
435 | return -EAGAIN; | ||
525 | } | 436 | } |
526 | 437 | ||
527 | late_initcall(tegra_init_dvfs); | 438 | static void __clk_unlock_all_spinlocks(void) |
439 | { | ||
440 | struct clk *c; | ||
441 | |||
442 | list_for_each_entry_reverse(c, &clocks, node) | ||
443 | spin_unlock(&c->spinlock); | ||
444 | } | ||
445 | |||
446 | /* | ||
447 | * This function retries until it can take all locks, and may take | ||
448 | * an arbitrarily long time to complete. | ||
449 | * Must be called with irqs enabled, returns with irqs disabled | ||
450 | * Must be called with clock_list_lock held | ||
451 | */ | ||
452 | static void clk_lock_all(void) | ||
453 | { | ||
454 | int ret; | ||
455 | retry: | ||
456 | local_irq_disable(); | ||
457 | |||
458 | ret = __clk_lock_all_spinlocks(); | ||
459 | if (ret) | ||
460 | goto failed_spinlocks; | ||
461 | |||
462 | /* All locks taken successfully, return */ | ||
463 | return; | ||
464 | |||
465 | failed_spinlocks: | ||
466 | local_irq_enable(); | ||
467 | yield(); | ||
468 | goto retry; | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * Unlocks all clocks after a clk_lock_all | ||
473 | * Must be called with irqs disabled, returns with irqs enabled | ||
474 | * Must be called with clock_list_lock held | ||
475 | */ | ||
476 | static void clk_unlock_all(void) | ||
477 | { | ||
478 | __clk_unlock_all_spinlocks(); | ||
479 | |||
480 | local_irq_enable(); | ||
481 | } | ||
528 | 482 | ||
529 | #ifdef CONFIG_DEBUG_FS | ||
530 | static struct dentry *clk_debugfs_root; | 483 | static struct dentry *clk_debugfs_root; |
531 | 484 | ||
532 | 485 | ||
533 | static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | 486 | static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) |
534 | { | 487 | { |
535 | struct clk *child; | 488 | struct clk *child; |
536 | struct clk *safe; | ||
537 | const char *state = "uninit"; | 489 | const char *state = "uninit"; |
538 | char div[8] = {0}; | 490 | char div[8] = {0}; |
539 | 491 | ||
@@ -564,8 +516,12 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | |||
564 | c->rate > c->max_rate ? '!' : ' ', | 516 | c->rate > c->max_rate ? '!' : ' ', |
565 | !c->set ? '*' : ' ', | 517 | !c->set ? '*' : ' ', |
566 | 30 - level * 3, c->name, | 518 | 30 - level * 3, c->name, |
567 | state, c->refcnt, div, c->rate); | 519 | state, c->refcnt, div, clk_get_rate_all_locked(c)); |
568 | list_for_each_entry_safe(child, safe, &c->children, sibling) { | 520 | |
521 | list_for_each_entry(child, &clocks, node) { | ||
522 | if (child->parent != c) | ||
523 | continue; | ||
524 | |||
569 | clock_tree_show_one(s, child, level + 1); | 525 | clock_tree_show_one(s, child, level + 1); |
570 | } | 526 | } |
571 | } | 527 | } |
@@ -573,14 +529,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level) | |||
573 | static int clock_tree_show(struct seq_file *s, void *data) | 529 | static int clock_tree_show(struct seq_file *s, void *data) |
574 | { | 530 | { |
575 | struct clk *c; | 531 | struct clk *c; |
576 | unsigned long flags; | ||
577 | seq_printf(s, " clock state ref div rate\n"); | 532 | seq_printf(s, " clock state ref div rate\n"); |
578 | seq_printf(s, "--------------------------------------------------------------\n"); | 533 | seq_printf(s, "--------------------------------------------------------------\n"); |
579 | spin_lock_irqsave(&clock_lock, flags); | 534 | |
535 | mutex_lock(&clock_list_lock); | ||
536 | |||
537 | clk_lock_all(); | ||
538 | |||
580 | list_for_each_entry(c, &clocks, node) | 539 | list_for_each_entry(c, &clocks, node) |
581 | if (c->parent == NULL) | 540 | if (c->parent == NULL) |
582 | clock_tree_show_one(s, c, 0); | 541 | clock_tree_show_one(s, c, 0); |
583 | spin_unlock_irqrestore(&clock_lock, flags); | 542 | |
543 | clk_unlock_all(); | ||
544 | |||
545 | mutex_unlock(&clock_list_lock); | ||
584 | return 0; | 546 | return 0; |
585 | } | 547 | } |
586 | 548 | ||
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h index 083a4cfc6cf0..688316abc64e 100644 --- a/arch/arm/mach-tegra/clock.h +++ b/arch/arm/mach-tegra/clock.h | |||
@@ -20,8 +20,9 @@ | |||
20 | #ifndef __MACH_TEGRA_CLOCK_H | 20 | #ifndef __MACH_TEGRA_CLOCK_H |
21 | #define __MACH_TEGRA_CLOCK_H | 21 | #define __MACH_TEGRA_CLOCK_H |
22 | 22 | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/clkdev.h> | 23 | #include <linux/clkdev.h> |
24 | #include <linux/list.h> | ||
25 | #include <linux/spinlock.h> | ||
25 | 26 | ||
26 | #define DIV_BUS (1 << 0) | 27 | #define DIV_BUS (1 << 0) |
27 | #define DIV_U71 (1 << 1) | 28 | #define DIV_U71 (1 << 1) |
@@ -41,36 +42,13 @@ | |||
41 | #define ENABLE_ON_INIT (1 << 28) | 42 | #define ENABLE_ON_INIT (1 << 28) |
42 | 43 | ||
43 | struct clk; | 44 | struct clk; |
44 | struct regulator; | ||
45 | |||
46 | struct dvfs_table { | ||
47 | unsigned long rate; | ||
48 | int millivolts; | ||
49 | }; | ||
50 | |||
51 | struct dvfs_process_id_table { | ||
52 | int process_id; | ||
53 | struct dvfs_table *table; | ||
54 | }; | ||
55 | |||
56 | |||
57 | struct dvfs { | ||
58 | struct regulator *reg; | ||
59 | struct dvfs_table *table; | ||
60 | int max_millivolts; | ||
61 | |||
62 | int process_id_table_length; | ||
63 | const char *reg_id; | ||
64 | bool cpu; | ||
65 | struct dvfs_process_id_table process_id_table[]; | ||
66 | }; | ||
67 | 45 | ||
68 | struct clk_mux_sel { | 46 | struct clk_mux_sel { |
69 | struct clk *input; | 47 | struct clk *input; |
70 | u32 value; | 48 | u32 value; |
71 | }; | 49 | }; |
72 | 50 | ||
73 | struct clk_pll_table { | 51 | struct clk_pll_freq_table { |
74 | unsigned long input_rate; | 52 | unsigned long input_rate; |
75 | unsigned long output_rate; | 53 | unsigned long output_rate; |
76 | u16 n; | 54 | u16 n; |
@@ -86,6 +64,7 @@ struct clk_ops { | |||
86 | int (*set_parent)(struct clk *, struct clk *); | 64 | int (*set_parent)(struct clk *, struct clk *); |
87 | int (*set_rate)(struct clk *, unsigned long); | 65 | int (*set_rate)(struct clk *, unsigned long); |
88 | long (*round_rate)(struct clk *, unsigned long); | 66 | long (*round_rate)(struct clk *, unsigned long); |
67 | void (*reset)(struct clk *, bool); | ||
89 | }; | 68 | }; |
90 | 69 | ||
91 | enum clk_state { | 70 | enum clk_state { |
@@ -96,55 +75,64 @@ enum clk_state { | |||
96 | 75 | ||
97 | struct clk { | 76 | struct clk { |
98 | /* node for master clocks list */ | 77 | /* node for master clocks list */ |
99 | struct list_head node; | 78 | struct list_head node; /* node for list of all clocks */ |
100 | struct list_head children; /* list of children */ | 79 | struct clk_lookup lookup; |
101 | struct list_head sibling; /* node for children */ | 80 | |
102 | #ifdef CONFIG_DEBUG_FS | ||
103 | struct dentry *dent; | ||
104 | struct dentry *parent_dent; | ||
105 | #endif | ||
106 | struct clk_ops *ops; | ||
107 | struct clk *parent; | ||
108 | struct clk_lookup lookup; | ||
109 | unsigned long rate; | ||
110 | unsigned long max_rate; | ||
111 | u32 flags; | ||
112 | u32 refcnt; | ||
113 | const char *name; | ||
114 | u32 reg; | ||
115 | u32 reg_shift; | ||
116 | unsigned int clk_num; | ||
117 | enum clk_state state; | ||
118 | #ifdef CONFIG_DEBUG_FS | 81 | #ifdef CONFIG_DEBUG_FS |
119 | bool set; | 82 | struct dentry *dent; |
120 | #endif | 83 | #endif |
84 | bool set; | ||
85 | struct clk_ops *ops; | ||
86 | unsigned long rate; | ||
87 | unsigned long max_rate; | ||
88 | unsigned long min_rate; | ||
89 | u32 flags; | ||
90 | const char *name; | ||
91 | |||
92 | u32 refcnt; | ||
93 | enum clk_state state; | ||
94 | struct clk *parent; | ||
95 | u32 div; | ||
96 | u32 mul; | ||
121 | 97 | ||
122 | /* PLL */ | ||
123 | unsigned long input_min; | ||
124 | unsigned long input_max; | ||
125 | unsigned long cf_min; | ||
126 | unsigned long cf_max; | ||
127 | unsigned long vco_min; | ||
128 | unsigned long vco_max; | ||
129 | const struct clk_pll_table *pll_table; | ||
130 | |||
131 | /* DIV */ | ||
132 | u32 div; | ||
133 | u32 mul; | ||
134 | |||
135 | /* MUX */ | ||
136 | const struct clk_mux_sel *inputs; | 98 | const struct clk_mux_sel *inputs; |
137 | u32 sel; | 99 | u32 reg; |
138 | u32 reg_mask; | 100 | u32 reg_shift; |
139 | |||
140 | /* Virtual cpu clock */ | ||
141 | struct clk *main; | ||
142 | struct clk *backup; | ||
143 | 101 | ||
144 | struct dvfs *dvfs; | 102 | struct list_head shared_bus_list; |
103 | |||
104 | union { | ||
105 | struct { | ||
106 | unsigned int clk_num; | ||
107 | } periph; | ||
108 | struct { | ||
109 | unsigned long input_min; | ||
110 | unsigned long input_max; | ||
111 | unsigned long cf_min; | ||
112 | unsigned long cf_max; | ||
113 | unsigned long vco_min; | ||
114 | unsigned long vco_max; | ||
115 | const struct clk_pll_freq_table *freq_table; | ||
116 | int lock_delay; | ||
117 | } pll; | ||
118 | struct { | ||
119 | u32 sel; | ||
120 | u32 reg_mask; | ||
121 | } mux; | ||
122 | struct { | ||
123 | struct clk *main; | ||
124 | struct clk *backup; | ||
125 | } cpu; | ||
126 | struct { | ||
127 | struct list_head node; | ||
128 | bool enabled; | ||
129 | unsigned long rate; | ||
130 | } shared_bus_user; | ||
131 | } u; | ||
132 | |||
133 | spinlock_t spinlock; | ||
145 | }; | 134 | }; |
146 | 135 | ||
147 | |||
148 | struct clk_duplicate { | 136 | struct clk_duplicate { |
149 | const char *name; | 137 | const char *name; |
150 | struct clk_lookup lookup; | 138 | struct clk_lookup lookup; |
@@ -163,11 +151,10 @@ void tegra2_periph_reset_assert(struct clk *c); | |||
163 | void clk_init(struct clk *clk); | 151 | void clk_init(struct clk *clk); |
164 | struct clk *tegra_get_clock_by_name(const char *name); | 152 | struct clk *tegra_get_clock_by_name(const char *name); |
165 | unsigned long clk_measure_input_freq(void); | 153 | unsigned long clk_measure_input_freq(void); |
166 | void clk_disable_locked(struct clk *c); | ||
167 | int clk_enable_locked(struct clk *c); | ||
168 | int clk_set_parent_locked(struct clk *c, struct clk *parent); | ||
169 | int clk_set_rate_locked(struct clk *c, unsigned long rate); | ||
170 | int clk_reparent(struct clk *c, struct clk *parent); | 154 | int clk_reparent(struct clk *c, struct clk *parent); |
171 | void tegra_clk_init_from_table(struct tegra_clk_init_table *table); | 155 | void tegra_clk_init_from_table(struct tegra_clk_init_table *table); |
156 | unsigned long clk_get_rate_locked(struct clk *c); | ||
157 | int clk_set_rate_locked(struct clk *c, unsigned long rate); | ||
158 | void tegra2_sdmmc_tap_delay(struct clk *c, int delay); | ||
172 | 159 | ||
173 | #endif | 160 | #endif |
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index 7c91e2b9d643..d5e3f89b05af 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c | |||
@@ -25,12 +25,25 @@ | |||
25 | #include <asm/hardware/cache-l2x0.h> | 25 | #include <asm/hardware/cache-l2x0.h> |
26 | 26 | ||
27 | #include <mach/iomap.h> | 27 | #include <mach/iomap.h> |
28 | #include <mach/dma.h> | 28 | #include <mach/system.h> |
29 | 29 | ||
30 | #include "board.h" | 30 | #include "board.h" |
31 | #include "clock.h" | 31 | #include "clock.h" |
32 | #include "fuse.h" | 32 | #include "fuse.h" |
33 | 33 | ||
34 | void (*arch_reset)(char mode, const char *cmd) = tegra_assert_system_reset; | ||
35 | |||
36 | void tegra_assert_system_reset(char mode, const char *cmd) | ||
37 | { | ||
38 | void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04); | ||
39 | u32 reg; | ||
40 | |||
41 | /* use *_related to avoid spinlock since caches are off */ | ||
42 | reg = readl_relaxed(reset); | ||
43 | reg |= 0x04; | ||
44 | writel_relaxed(reg, reset); | ||
45 | } | ||
46 | |||
34 | static __initdata struct tegra_clk_init_table common_clk_init_table[] = { | 47 | static __initdata struct tegra_clk_init_table common_clk_init_table[] = { |
35 | /* name parent rate enabled */ | 48 | /* name parent rate enabled */ |
36 | { "clk_m", NULL, 0, true }, | 49 | { "clk_m", NULL, 0, true }, |
@@ -42,6 +55,9 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = { | |||
42 | { "sclk", "pll_p_out4", 108000000, true }, | 55 | { "sclk", "pll_p_out4", 108000000, true }, |
43 | { "hclk", "sclk", 108000000, true }, | 56 | { "hclk", "sclk", 108000000, true }, |
44 | { "pclk", "hclk", 54000000, true }, | 57 | { "pclk", "hclk", 54000000, true }, |
58 | { "csite", NULL, 0, true }, | ||
59 | { "emc", NULL, 0, true }, | ||
60 | { "cpu", NULL, 0, true }, | ||
45 | { NULL, NULL, 0, 0}, | 61 | { NULL, NULL, 0, 0}, |
46 | }; | 62 | }; |
47 | 63 | ||
@@ -50,21 +66,18 @@ void __init tegra_init_cache(void) | |||
50 | #ifdef CONFIG_CACHE_L2X0 | 66 | #ifdef CONFIG_CACHE_L2X0 |
51 | void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; | 67 | void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; |
52 | 68 | ||
53 | writel(0x331, p + L2X0_TAG_LATENCY_CTRL); | 69 | writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL); |
54 | writel(0x441, p + L2X0_DATA_LATENCY_CTRL); | 70 | writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL); |
55 | 71 | ||
56 | l2x0_init(p, 0x6C080001, 0x8200c3fe); | 72 | l2x0_init(p, 0x6C080001, 0x8200c3fe); |
57 | #endif | 73 | #endif |
58 | 74 | ||
59 | } | 75 | } |
60 | 76 | ||
61 | void __init tegra_common_init(void) | 77 | void __init tegra_init_early(void) |
62 | { | 78 | { |
63 | tegra_init_fuse(); | 79 | tegra_init_fuse(); |
64 | tegra_init_clock(); | 80 | tegra_init_clock(); |
65 | tegra_clk_init_from_table(common_clk_init_table); | 81 | tegra_clk_init_from_table(common_clk_init_table); |
66 | tegra_init_cache(); | 82 | tegra_init_cache(); |
67 | #ifdef CONFIG_TEGRA_SYSTEM_DMA | ||
68 | tegra_dma_init(); | ||
69 | #endif | ||
70 | } | 83 | } |
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index fea5719c7072..0e1016a827ac 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
29 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
31 | #include <linux/suspend.h> | ||
31 | 32 | ||
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
33 | 34 | ||
@@ -36,21 +37,25 @@ | |||
36 | 37 | ||
37 | /* Frequency table index must be sequential starting at 0 */ | 38 | /* Frequency table index must be sequential starting at 0 */ |
38 | static struct cpufreq_frequency_table freq_table[] = { | 39 | static struct cpufreq_frequency_table freq_table[] = { |
39 | { 0, 312000 }, | 40 | { 0, 216000 }, |
40 | { 1, 456000 }, | 41 | { 1, 312000 }, |
41 | { 2, 608000 }, | 42 | { 2, 456000 }, |
42 | { 3, 760000 }, | 43 | { 3, 608000 }, |
43 | { 4, 816000 }, | 44 | { 4, 760000 }, |
44 | { 5, 912000 }, | 45 | { 5, 816000 }, |
45 | { 6, 1000000 }, | 46 | { 6, 912000 }, |
46 | { 7, CPUFREQ_TABLE_END }, | 47 | { 7, 1000000 }, |
48 | { 8, CPUFREQ_TABLE_END }, | ||
47 | }; | 49 | }; |
48 | 50 | ||
49 | #define NUM_CPUS 2 | 51 | #define NUM_CPUS 2 |
50 | 52 | ||
51 | static struct clk *cpu_clk; | 53 | static struct clk *cpu_clk; |
54 | static struct clk *emc_clk; | ||
52 | 55 | ||
53 | static unsigned long target_cpu_speed[NUM_CPUS]; | 56 | static unsigned long target_cpu_speed[NUM_CPUS]; |
57 | static DEFINE_MUTEX(tegra_cpu_lock); | ||
58 | static bool is_suspended; | ||
54 | 59 | ||
55 | int tegra_verify_speed(struct cpufreq_policy *policy) | 60 | int tegra_verify_speed(struct cpufreq_policy *policy) |
56 | { | 61 | { |
@@ -68,22 +73,28 @@ unsigned int tegra_getspeed(unsigned int cpu) | |||
68 | return rate; | 73 | return rate; |
69 | } | 74 | } |
70 | 75 | ||
71 | static int tegra_update_cpu_speed(void) | 76 | static int tegra_update_cpu_speed(unsigned long rate) |
72 | { | 77 | { |
73 | int i; | ||
74 | unsigned long rate = 0; | ||
75 | int ret = 0; | 78 | int ret = 0; |
76 | struct cpufreq_freqs freqs; | 79 | struct cpufreq_freqs freqs; |
77 | 80 | ||
78 | for_each_online_cpu(i) | ||
79 | rate = max(rate, target_cpu_speed[i]); | ||
80 | |||
81 | freqs.old = tegra_getspeed(0); | 81 | freqs.old = tegra_getspeed(0); |
82 | freqs.new = rate; | 82 | freqs.new = rate; |
83 | 83 | ||
84 | if (freqs.old == freqs.new) | 84 | if (freqs.old == freqs.new) |
85 | return ret; | 85 | return ret; |
86 | 86 | ||
87 | /* | ||
88 | * Vote on memory bus frequency based on cpu frequency | ||
89 | * This sets the minimum frequency, display or avp may request higher | ||
90 | */ | ||
91 | if (rate >= 816000) | ||
92 | clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ | ||
93 | else if (rate >= 456000) | ||
94 | clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ | ||
95 | else | ||
96 | clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ | ||
97 | |||
87 | for_each_online_cpu(freqs.cpu) | 98 | for_each_online_cpu(freqs.cpu) |
88 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 99 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
89 | 100 | ||
@@ -92,7 +103,7 @@ static int tegra_update_cpu_speed(void) | |||
92 | freqs.old, freqs.new); | 103 | freqs.old, freqs.new); |
93 | #endif | 104 | #endif |
94 | 105 | ||
95 | ret = clk_set_rate_cansleep(cpu_clk, freqs.new * 1000); | 106 | ret = clk_set_rate(cpu_clk, freqs.new * 1000); |
96 | if (ret) { | 107 | if (ret) { |
97 | pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", | 108 | pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", |
98 | freqs.new); | 109 | freqs.new); |
@@ -105,12 +116,30 @@ static int tegra_update_cpu_speed(void) | |||
105 | return 0; | 116 | return 0; |
106 | } | 117 | } |
107 | 118 | ||
119 | static unsigned long tegra_cpu_highest_speed(void) | ||
120 | { | ||
121 | unsigned long rate = 0; | ||
122 | int i; | ||
123 | |||
124 | for_each_online_cpu(i) | ||
125 | rate = max(rate, target_cpu_speed[i]); | ||
126 | return rate; | ||
127 | } | ||
128 | |||
108 | static int tegra_target(struct cpufreq_policy *policy, | 129 | static int tegra_target(struct cpufreq_policy *policy, |
109 | unsigned int target_freq, | 130 | unsigned int target_freq, |
110 | unsigned int relation) | 131 | unsigned int relation) |
111 | { | 132 | { |
112 | int idx; | 133 | int idx; |
113 | unsigned int freq; | 134 | unsigned int freq; |
135 | int ret = 0; | ||
136 | |||
137 | mutex_lock(&tegra_cpu_lock); | ||
138 | |||
139 | if (is_suspended) { | ||
140 | ret = -EBUSY; | ||
141 | goto out; | ||
142 | } | ||
114 | 143 | ||
115 | cpufreq_frequency_table_target(policy, freq_table, target_freq, | 144 | cpufreq_frequency_table_target(policy, freq_table, target_freq, |
116 | relation, &idx); | 145 | relation, &idx); |
@@ -119,9 +148,34 @@ static int tegra_target(struct cpufreq_policy *policy, | |||
119 | 148 | ||
120 | target_cpu_speed[policy->cpu] = freq; | 149 | target_cpu_speed[policy->cpu] = freq; |
121 | 150 | ||
122 | return tegra_update_cpu_speed(); | 151 | ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); |
152 | |||
153 | out: | ||
154 | mutex_unlock(&tegra_cpu_lock); | ||
155 | return ret; | ||
123 | } | 156 | } |
124 | 157 | ||
158 | static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, | ||
159 | void *dummy) | ||
160 | { | ||
161 | mutex_lock(&tegra_cpu_lock); | ||
162 | if (event == PM_SUSPEND_PREPARE) { | ||
163 | is_suspended = true; | ||
164 | pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", | ||
165 | freq_table[0].frequency); | ||
166 | tegra_update_cpu_speed(freq_table[0].frequency); | ||
167 | } else if (event == PM_POST_SUSPEND) { | ||
168 | is_suspended = false; | ||
169 | } | ||
170 | mutex_unlock(&tegra_cpu_lock); | ||
171 | |||
172 | return NOTIFY_OK; | ||
173 | } | ||
174 | |||
175 | static struct notifier_block tegra_cpu_pm_notifier = { | ||
176 | .notifier_call = tegra_pm_notify, | ||
177 | }; | ||
178 | |||
125 | static int tegra_cpu_init(struct cpufreq_policy *policy) | 179 | static int tegra_cpu_init(struct cpufreq_policy *policy) |
126 | { | 180 | { |
127 | if (policy->cpu >= NUM_CPUS) | 181 | if (policy->cpu >= NUM_CPUS) |
@@ -131,6 +185,15 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) | |||
131 | if (IS_ERR(cpu_clk)) | 185 | if (IS_ERR(cpu_clk)) |
132 | return PTR_ERR(cpu_clk); | 186 | return PTR_ERR(cpu_clk); |
133 | 187 | ||
188 | emc_clk = clk_get_sys("cpu", "emc"); | ||
189 | if (IS_ERR(emc_clk)) { | ||
190 | clk_put(cpu_clk); | ||
191 | return PTR_ERR(emc_clk); | ||
192 | } | ||
193 | |||
194 | clk_enable(emc_clk); | ||
195 | clk_enable(cpu_clk); | ||
196 | |||
134 | cpufreq_frequency_table_cpuinfo(policy, freq_table); | 197 | cpufreq_frequency_table_cpuinfo(policy, freq_table); |
135 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | 198 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); |
136 | policy->cur = tegra_getspeed(policy->cpu); | 199 | policy->cur = tegra_getspeed(policy->cpu); |
@@ -142,12 +205,17 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) | |||
142 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; | 205 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
143 | cpumask_copy(policy->related_cpus, cpu_possible_mask); | 206 | cpumask_copy(policy->related_cpus, cpu_possible_mask); |
144 | 207 | ||
208 | if (policy->cpu == 0) | ||
209 | register_pm_notifier(&tegra_cpu_pm_notifier); | ||
210 | |||
145 | return 0; | 211 | return 0; |
146 | } | 212 | } |
147 | 213 | ||
148 | static int tegra_cpu_exit(struct cpufreq_policy *policy) | 214 | static int tegra_cpu_exit(struct cpufreq_policy *policy) |
149 | { | 215 | { |
150 | cpufreq_frequency_table_cpuinfo(policy, freq_table); | 216 | cpufreq_frequency_table_cpuinfo(policy, freq_table); |
217 | clk_disable(emc_clk); | ||
218 | clk_put(emc_clk); | ||
151 | clk_put(cpu_clk); | 219 | clk_put(cpu_clk); |
152 | return 0; | 220 | return 0; |
153 | } | 221 | } |
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c new file mode 100644 index 000000000000..682e6d33108c --- /dev/null +++ b/arch/arm/mach-tegra/devices.c | |||
@@ -0,0 +1,505 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010,2011 Google, Inc. | ||
3 | * | ||
4 | * Author: | ||
5 | * Colin Cross <ccross@android.com> | ||
6 | * Erik Gilling <ccross@android.com> | ||
7 | * | ||
8 | * This software is licensed under the terms of the GNU General Public | ||
9 | * License version 2, as published by the Free Software Foundation, and | ||
10 | * may be copied, distributed, and modified under those terms. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | |||
20 | #include <linux/resource.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/fsl_devices.h> | ||
24 | #include <linux/serial_8250.h> | ||
25 | #include <asm/pmu.h> | ||
26 | #include <mach/irqs.h> | ||
27 | #include <mach/iomap.h> | ||
28 | #include <mach/dma.h> | ||
29 | |||
30 | static struct resource i2c_resource1[] = { | ||
31 | [0] = { | ||
32 | .start = INT_I2C, | ||
33 | .end = INT_I2C, | ||
34 | .flags = IORESOURCE_IRQ, | ||
35 | }, | ||
36 | [1] = { | ||
37 | .start = TEGRA_I2C_BASE, | ||
38 | .end = TEGRA_I2C_BASE + TEGRA_I2C_SIZE-1, | ||
39 | .flags = IORESOURCE_MEM, | ||
40 | }, | ||
41 | }; | ||
42 | |||
43 | static struct resource i2c_resource2[] = { | ||
44 | [0] = { | ||
45 | .start = INT_I2C2, | ||
46 | .end = INT_I2C2, | ||
47 | .flags = IORESOURCE_IRQ, | ||
48 | }, | ||
49 | [1] = { | ||
50 | .start = TEGRA_I2C2_BASE, | ||
51 | .end = TEGRA_I2C2_BASE + TEGRA_I2C2_SIZE-1, | ||
52 | .flags = IORESOURCE_MEM, | ||
53 | }, | ||
54 | }; | ||
55 | |||
56 | static struct resource i2c_resource3[] = { | ||
57 | [0] = { | ||
58 | .start = INT_I2C3, | ||
59 | .end = INT_I2C3, | ||
60 | .flags = IORESOURCE_IRQ, | ||
61 | }, | ||
62 | [1] = { | ||
63 | .start = TEGRA_I2C3_BASE, | ||
64 | .end = TEGRA_I2C3_BASE + TEGRA_I2C3_SIZE-1, | ||
65 | .flags = IORESOURCE_MEM, | ||
66 | }, | ||
67 | }; | ||
68 | |||
69 | static struct resource i2c_resource4[] = { | ||
70 | [0] = { | ||
71 | .start = INT_DVC, | ||
72 | .end = INT_DVC, | ||
73 | .flags = IORESOURCE_IRQ, | ||
74 | }, | ||
75 | [1] = { | ||
76 | .start = TEGRA_DVC_BASE, | ||
77 | .end = TEGRA_DVC_BASE + TEGRA_DVC_SIZE-1, | ||
78 | .flags = IORESOURCE_MEM, | ||
79 | }, | ||
80 | }; | ||
81 | |||
82 | struct platform_device tegra_i2c_device1 = { | ||
83 | .name = "tegra-i2c", | ||
84 | .id = 0, | ||
85 | .resource = i2c_resource1, | ||
86 | .num_resources = ARRAY_SIZE(i2c_resource1), | ||
87 | .dev = { | ||
88 | .platform_data = 0, | ||
89 | }, | ||
90 | }; | ||
91 | |||
92 | struct platform_device tegra_i2c_device2 = { | ||
93 | .name = "tegra-i2c", | ||
94 | .id = 1, | ||
95 | .resource = i2c_resource2, | ||
96 | .num_resources = ARRAY_SIZE(i2c_resource2), | ||
97 | .dev = { | ||
98 | .platform_data = 0, | ||
99 | }, | ||
100 | }; | ||
101 | |||
102 | struct platform_device tegra_i2c_device3 = { | ||
103 | .name = "tegra-i2c", | ||
104 | .id = 2, | ||
105 | .resource = i2c_resource3, | ||
106 | .num_resources = ARRAY_SIZE(i2c_resource3), | ||
107 | .dev = { | ||
108 | .platform_data = 0, | ||
109 | }, | ||
110 | }; | ||
111 | |||
112 | struct platform_device tegra_i2c_device4 = { | ||
113 | .name = "tegra-i2c", | ||
114 | .id = 3, | ||
115 | .resource = i2c_resource4, | ||
116 | .num_resources = ARRAY_SIZE(i2c_resource4), | ||
117 | .dev = { | ||
118 | .platform_data = 0, | ||
119 | }, | ||
120 | }; | ||
121 | |||
122 | static struct resource spi_resource1[] = { | ||
123 | [0] = { | ||
124 | .start = INT_S_LINK1, | ||
125 | .end = INT_S_LINK1, | ||
126 | .flags = IORESOURCE_IRQ, | ||
127 | }, | ||
128 | [1] = { | ||
129 | .start = TEGRA_SPI1_BASE, | ||
130 | .end = TEGRA_SPI1_BASE + TEGRA_SPI1_SIZE-1, | ||
131 | .flags = IORESOURCE_MEM, | ||
132 | }, | ||
133 | }; | ||
134 | |||
135 | static struct resource spi_resource2[] = { | ||
136 | [0] = { | ||
137 | .start = INT_SPI_2, | ||
138 | .end = INT_SPI_2, | ||
139 | .flags = IORESOURCE_IRQ, | ||
140 | }, | ||
141 | [1] = { | ||
142 | .start = TEGRA_SPI2_BASE, | ||
143 | .end = TEGRA_SPI2_BASE + TEGRA_SPI2_SIZE-1, | ||
144 | .flags = IORESOURCE_MEM, | ||
145 | }, | ||
146 | }; | ||
147 | |||
148 | static struct resource spi_resource3[] = { | ||
149 | [0] = { | ||
150 | .start = INT_SPI_3, | ||
151 | .end = INT_SPI_3, | ||
152 | .flags = IORESOURCE_IRQ, | ||
153 | }, | ||
154 | [1] = { | ||
155 | .start = TEGRA_SPI3_BASE, | ||
156 | .end = TEGRA_SPI3_BASE + TEGRA_SPI3_SIZE-1, | ||
157 | .flags = IORESOURCE_MEM, | ||
158 | }, | ||
159 | }; | ||
160 | |||
161 | static struct resource spi_resource4[] = { | ||
162 | [0] = { | ||
163 | .start = INT_SPI_4, | ||
164 | .end = INT_SPI_4, | ||
165 | .flags = IORESOURCE_IRQ, | ||
166 | }, | ||
167 | [1] = { | ||
168 | .start = TEGRA_SPI4_BASE, | ||
169 | .end = TEGRA_SPI4_BASE + TEGRA_SPI4_SIZE-1, | ||
170 | .flags = IORESOURCE_MEM, | ||
171 | }, | ||
172 | }; | ||
173 | |||
174 | struct platform_device tegra_spi_device1 = { | ||
175 | .name = "spi_tegra", | ||
176 | .id = 0, | ||
177 | .resource = spi_resource1, | ||
178 | .num_resources = ARRAY_SIZE(spi_resource1), | ||
179 | .dev = { | ||
180 | .coherent_dma_mask = 0xffffffff, | ||
181 | }, | ||
182 | }; | ||
183 | |||
184 | struct platform_device tegra_spi_device2 = { | ||
185 | .name = "spi_tegra", | ||
186 | .id = 1, | ||
187 | .resource = spi_resource2, | ||
188 | .num_resources = ARRAY_SIZE(spi_resource2), | ||
189 | .dev = { | ||
190 | .coherent_dma_mask = 0xffffffff, | ||
191 | }, | ||
192 | }; | ||
193 | |||
194 | struct platform_device tegra_spi_device3 = { | ||
195 | .name = "spi_tegra", | ||
196 | .id = 2, | ||
197 | .resource = spi_resource3, | ||
198 | .num_resources = ARRAY_SIZE(spi_resource3), | ||
199 | .dev = { | ||
200 | .coherent_dma_mask = 0xffffffff, | ||
201 | }, | ||
202 | }; | ||
203 | |||
204 | struct platform_device tegra_spi_device4 = { | ||
205 | .name = "spi_tegra", | ||
206 | .id = 3, | ||
207 | .resource = spi_resource4, | ||
208 | .num_resources = ARRAY_SIZE(spi_resource4), | ||
209 | .dev = { | ||
210 | .coherent_dma_mask = 0xffffffff, | ||
211 | }, | ||
212 | }; | ||
213 | |||
214 | |||
215 | static struct resource sdhci_resource1[] = { | ||
216 | [0] = { | ||
217 | .start = INT_SDMMC1, | ||
218 | .end = INT_SDMMC1, | ||
219 | .flags = IORESOURCE_IRQ, | ||
220 | }, | ||
221 | [1] = { | ||
222 | .start = TEGRA_SDMMC1_BASE, | ||
223 | .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1, | ||
224 | .flags = IORESOURCE_MEM, | ||
225 | }, | ||
226 | }; | ||
227 | |||
228 | static struct resource sdhci_resource2[] = { | ||
229 | [0] = { | ||
230 | .start = INT_SDMMC2, | ||
231 | .end = INT_SDMMC2, | ||
232 | .flags = IORESOURCE_IRQ, | ||
233 | }, | ||
234 | [1] = { | ||
235 | .start = TEGRA_SDMMC2_BASE, | ||
236 | .end = TEGRA_SDMMC2_BASE + TEGRA_SDMMC2_SIZE-1, | ||
237 | .flags = IORESOURCE_MEM, | ||
238 | }, | ||
239 | }; | ||
240 | |||
241 | static struct resource sdhci_resource3[] = { | ||
242 | [0] = { | ||
243 | .start = INT_SDMMC3, | ||
244 | .end = INT_SDMMC3, | ||
245 | .flags = IORESOURCE_IRQ, | ||
246 | }, | ||
247 | [1] = { | ||
248 | .start = TEGRA_SDMMC3_BASE, | ||
249 | .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1, | ||
250 | .flags = IORESOURCE_MEM, | ||
251 | }, | ||
252 | }; | ||
253 | |||
254 | static struct resource sdhci_resource4[] = { | ||
255 | [0] = { | ||
256 | .start = INT_SDMMC4, | ||
257 | .end = INT_SDMMC4, | ||
258 | .flags = IORESOURCE_IRQ, | ||
259 | }, | ||
260 | [1] = { | ||
261 | .start = TEGRA_SDMMC4_BASE, | ||
262 | .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1, | ||
263 | .flags = IORESOURCE_MEM, | ||
264 | }, | ||
265 | }; | ||
266 | |||
267 | /* board files should fill in platform_data register the devices themselvs. | ||
268 | * See board-harmony.c for an example | ||
269 | */ | ||
270 | struct platform_device tegra_sdhci_device1 = { | ||
271 | .name = "sdhci-tegra", | ||
272 | .id = 0, | ||
273 | .resource = sdhci_resource1, | ||
274 | .num_resources = ARRAY_SIZE(sdhci_resource1), | ||
275 | }; | ||
276 | |||
277 | struct platform_device tegra_sdhci_device2 = { | ||
278 | .name = "sdhci-tegra", | ||
279 | .id = 1, | ||
280 | .resource = sdhci_resource2, | ||
281 | .num_resources = ARRAY_SIZE(sdhci_resource2), | ||
282 | }; | ||
283 | |||
284 | struct platform_device tegra_sdhci_device3 = { | ||
285 | .name = "sdhci-tegra", | ||
286 | .id = 2, | ||
287 | .resource = sdhci_resource3, | ||
288 | .num_resources = ARRAY_SIZE(sdhci_resource3), | ||
289 | }; | ||
290 | |||
291 | struct platform_device tegra_sdhci_device4 = { | ||
292 | .name = "sdhci-tegra", | ||
293 | .id = 3, | ||
294 | .resource = sdhci_resource4, | ||
295 | .num_resources = ARRAY_SIZE(sdhci_resource4), | ||
296 | }; | ||
297 | |||
298 | static struct resource tegra_usb1_resources[] = { | ||
299 | [0] = { | ||
300 | .start = TEGRA_USB_BASE, | ||
301 | .end = TEGRA_USB_BASE + TEGRA_USB_SIZE - 1, | ||
302 | .flags = IORESOURCE_MEM, | ||
303 | }, | ||
304 | [1] = { | ||
305 | .start = INT_USB, | ||
306 | .end = INT_USB, | ||
307 | .flags = IORESOURCE_IRQ, | ||
308 | }, | ||
309 | }; | ||
310 | |||
311 | static struct resource tegra_usb2_resources[] = { | ||
312 | [0] = { | ||
313 | .start = TEGRA_USB2_BASE, | ||
314 | .end = TEGRA_USB2_BASE + TEGRA_USB2_SIZE - 1, | ||
315 | .flags = IORESOURCE_MEM, | ||
316 | }, | ||
317 | [1] = { | ||
318 | .start = INT_USB2, | ||
319 | .end = INT_USB2, | ||
320 | .flags = IORESOURCE_IRQ, | ||
321 | }, | ||
322 | }; | ||
323 | |||
324 | static struct resource tegra_usb3_resources[] = { | ||
325 | [0] = { | ||
326 | .start = TEGRA_USB3_BASE, | ||
327 | .end = TEGRA_USB3_BASE + TEGRA_USB3_SIZE - 1, | ||
328 | .flags = IORESOURCE_MEM, | ||
329 | }, | ||
330 | [1] = { | ||
331 | .start = INT_USB3, | ||
332 | .end = INT_USB3, | ||
333 | .flags = IORESOURCE_IRQ, | ||
334 | }, | ||
335 | }; | ||
336 | |||
337 | static u64 tegra_ehci_dmamask = DMA_BIT_MASK(32); | ||
338 | |||
339 | struct platform_device tegra_ehci1_device = { | ||
340 | .name = "tegra-ehci", | ||
341 | .id = 0, | ||
342 | .dev = { | ||
343 | .dma_mask = &tegra_ehci_dmamask, | ||
344 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
345 | }, | ||
346 | .resource = tegra_usb1_resources, | ||
347 | .num_resources = ARRAY_SIZE(tegra_usb1_resources), | ||
348 | }; | ||
349 | |||
350 | struct platform_device tegra_ehci2_device = { | ||
351 | .name = "tegra-ehci", | ||
352 | .id = 1, | ||
353 | .dev = { | ||
354 | .dma_mask = &tegra_ehci_dmamask, | ||
355 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
356 | }, | ||
357 | .resource = tegra_usb2_resources, | ||
358 | .num_resources = ARRAY_SIZE(tegra_usb2_resources), | ||
359 | }; | ||
360 | |||
361 | struct platform_device tegra_ehci3_device = { | ||
362 | .name = "tegra-ehci", | ||
363 | .id = 2, | ||
364 | .dev = { | ||
365 | .dma_mask = &tegra_ehci_dmamask, | ||
366 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
367 | }, | ||
368 | .resource = tegra_usb3_resources, | ||
369 | .num_resources = ARRAY_SIZE(tegra_usb3_resources), | ||
370 | }; | ||
371 | |||
372 | static struct resource tegra_pmu_resources[] = { | ||
373 | [0] = { | ||
374 | .start = INT_CPU0_PMU_INTR, | ||
375 | .end = INT_CPU0_PMU_INTR, | ||
376 | .flags = IORESOURCE_IRQ, | ||
377 | }, | ||
378 | [1] = { | ||
379 | .start = INT_CPU1_PMU_INTR, | ||
380 | .end = INT_CPU1_PMU_INTR, | ||
381 | .flags = IORESOURCE_IRQ, | ||
382 | }, | ||
383 | }; | ||
384 | |||
385 | struct platform_device tegra_pmu_device = { | ||
386 | .name = "arm-pmu", | ||
387 | .id = ARM_PMU_DEVICE_CPU, | ||
388 | .num_resources = ARRAY_SIZE(tegra_pmu_resources), | ||
389 | .resource = tegra_pmu_resources, | ||
390 | }; | ||
391 | |||
392 | static struct resource tegra_uarta_resources[] = { | ||
393 | [0] = { | ||
394 | .start = TEGRA_UARTA_BASE, | ||
395 | .end = TEGRA_UARTA_BASE + TEGRA_UARTA_SIZE - 1, | ||
396 | .flags = IORESOURCE_MEM, | ||
397 | }, | ||
398 | [1] = { | ||
399 | .start = INT_UARTA, | ||
400 | .end = INT_UARTA, | ||
401 | .flags = IORESOURCE_IRQ, | ||
402 | }, | ||
403 | }; | ||
404 | |||
405 | static struct resource tegra_uartb_resources[] = { | ||
406 | [0] = { | ||
407 | .start = TEGRA_UARTB_BASE, | ||
408 | .end = TEGRA_UARTB_BASE + TEGRA_UARTB_SIZE - 1, | ||
409 | .flags = IORESOURCE_MEM, | ||
410 | }, | ||
411 | [1] = { | ||
412 | .start = INT_UARTB, | ||
413 | .end = INT_UARTB, | ||
414 | .flags = IORESOURCE_IRQ, | ||
415 | }, | ||
416 | }; | ||
417 | |||
418 | static struct resource tegra_uartc_resources[] = { | ||
419 | [0] = { | ||
420 | .start = TEGRA_UARTC_BASE, | ||
421 | .end = TEGRA_UARTC_BASE + TEGRA_UARTC_SIZE - 1, | ||
422 | .flags = IORESOURCE_MEM, | ||
423 | }, | ||
424 | [1] = { | ||
425 | .start = INT_UARTC, | ||
426 | .end = INT_UARTC, | ||
427 | .flags = IORESOURCE_IRQ, | ||
428 | }, | ||
429 | }; | ||
430 | |||
431 | static struct resource tegra_uartd_resources[] = { | ||
432 | [0] = { | ||
433 | .start = TEGRA_UARTD_BASE, | ||
434 | .end = TEGRA_UARTD_BASE + TEGRA_UARTD_SIZE - 1, | ||
435 | .flags = IORESOURCE_MEM, | ||
436 | }, | ||
437 | [1] = { | ||
438 | .start = INT_UARTD, | ||
439 | .end = INT_UARTD, | ||
440 | .flags = IORESOURCE_IRQ, | ||
441 | }, | ||
442 | }; | ||
443 | |||
444 | static struct resource tegra_uarte_resources[] = { | ||
445 | [0] = { | ||
446 | .start = TEGRA_UARTE_BASE, | ||
447 | .end = TEGRA_UARTE_BASE + TEGRA_UARTE_SIZE - 1, | ||
448 | .flags = IORESOURCE_MEM, | ||
449 | }, | ||
450 | [1] = { | ||
451 | .start = INT_UARTE, | ||
452 | .end = INT_UARTE, | ||
453 | .flags = IORESOURCE_IRQ, | ||
454 | }, | ||
455 | }; | ||
456 | |||
457 | struct platform_device tegra_uarta_device = { | ||
458 | .name = "tegra_uart", | ||
459 | .id = 0, | ||
460 | .num_resources = ARRAY_SIZE(tegra_uarta_resources), | ||
461 | .resource = tegra_uarta_resources, | ||
462 | .dev = { | ||
463 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
464 | }, | ||
465 | }; | ||
466 | |||
467 | struct platform_device tegra_uartb_device = { | ||
468 | .name = "tegra_uart", | ||
469 | .id = 1, | ||
470 | .num_resources = ARRAY_SIZE(tegra_uartb_resources), | ||
471 | .resource = tegra_uartb_resources, | ||
472 | .dev = { | ||
473 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
474 | }, | ||
475 | }; | ||
476 | |||
477 | struct platform_device tegra_uartc_device = { | ||
478 | .name = "tegra_uart", | ||
479 | .id = 2, | ||
480 | .num_resources = ARRAY_SIZE(tegra_uartc_resources), | ||
481 | .resource = tegra_uartc_resources, | ||
482 | .dev = { | ||
483 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
484 | }, | ||
485 | }; | ||
486 | |||
487 | struct platform_device tegra_uartd_device = { | ||
488 | .name = "tegra_uart", | ||
489 | .id = 3, | ||
490 | .num_resources = ARRAY_SIZE(tegra_uartd_resources), | ||
491 | .resource = tegra_uartd_resources, | ||
492 | .dev = { | ||
493 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
494 | }, | ||
495 | }; | ||
496 | |||
497 | struct platform_device tegra_uarte_device = { | ||
498 | .name = "tegra_uart", | ||
499 | .id = 4, | ||
500 | .num_resources = ARRAY_SIZE(tegra_uarte_resources), | ||
501 | .resource = tegra_uarte_resources, | ||
502 | .dev = { | ||
503 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
504 | }, | ||
505 | }; | ||
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h new file mode 100644 index 000000000000..888810c37ee9 --- /dev/null +++ b/arch/arm/mach-tegra/devices.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010,2011 Google, Inc. | ||
3 | * | ||
4 | * Author: | ||
5 | * Colin Cross <ccross@android.com> | ||
6 | * Erik Gilling <ccross@android.com> | ||
7 | * | ||
8 | * This software is licensed under the terms of the GNU General Public | ||
9 | * License version 2, as published by the Free Software Foundation, and | ||
10 | * may be copied, distributed, and modified under those terms. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef __MACH_TEGRA_DEVICES_H | ||
20 | #define __MACH_TEGRA_DEVICES_H | ||
21 | |||
22 | #include <linux/platform_device.h> | ||
23 | |||
24 | extern struct platform_device tegra_sdhci_device1; | ||
25 | extern struct platform_device tegra_sdhci_device2; | ||
26 | extern struct platform_device tegra_sdhci_device3; | ||
27 | extern struct platform_device tegra_sdhci_device4; | ||
28 | extern struct platform_device tegra_i2c_device1; | ||
29 | extern struct platform_device tegra_i2c_device2; | ||
30 | extern struct platform_device tegra_i2c_device3; | ||
31 | extern struct platform_device tegra_i2c_device4; | ||
32 | extern struct platform_device tegra_spi_device1; | ||
33 | extern struct platform_device tegra_spi_device2; | ||
34 | extern struct platform_device tegra_spi_device3; | ||
35 | extern struct platform_device tegra_spi_device4; | ||
36 | extern struct platform_device tegra_ehci1_device; | ||
37 | extern struct platform_device tegra_ehci2_device; | ||
38 | extern struct platform_device tegra_ehci3_device; | ||
39 | extern struct platform_device tegra_uarta_device; | ||
40 | extern struct platform_device tegra_uartb_device; | ||
41 | extern struct platform_device tegra_uartc_device; | ||
42 | extern struct platform_device tegra_uartd_device; | ||
43 | extern struct platform_device tegra_uarte_device; | ||
44 | extern struct platform_device tegra_pmu_device; | ||
45 | |||
46 | #endif | ||
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index edda6ec5e925..e945ae28ee77 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c | |||
@@ -27,9 +27,11 @@ | |||
27 | #include <linux/err.h> | 27 | #include <linux/err.h> |
28 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/clk.h> | ||
30 | #include <mach/dma.h> | 31 | #include <mach/dma.h> |
31 | #include <mach/irqs.h> | 32 | #include <mach/irqs.h> |
32 | #include <mach/iomap.h> | 33 | #include <mach/iomap.h> |
34 | #include <mach/suspend.h> | ||
33 | 35 | ||
34 | #define APB_DMA_GEN 0x000 | 36 | #define APB_DMA_GEN 0x000 |
35 | #define GEN_ENABLE (1<<31) | 37 | #define GEN_ENABLE (1<<31) |
@@ -120,17 +122,14 @@ struct tegra_dma_channel { | |||
120 | void __iomem *addr; | 122 | void __iomem *addr; |
121 | int mode; | 123 | int mode; |
122 | int irq; | 124 | int irq; |
123 | 125 | int req_transfer_count; | |
124 | /* Register shadow */ | ||
125 | u32 csr; | ||
126 | u32 ahb_seq; | ||
127 | u32 ahb_ptr; | ||
128 | u32 apb_seq; | ||
129 | u32 apb_ptr; | ||
130 | }; | 126 | }; |
131 | 127 | ||
132 | #define NV_DMA_MAX_CHANNELS 32 | 128 | #define NV_DMA_MAX_CHANNELS 32 |
133 | 129 | ||
130 | static bool tegra_dma_initialized; | ||
131 | static DEFINE_MUTEX(tegra_dma_lock); | ||
132 | |||
134 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | 133 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); |
135 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | 134 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; |
136 | 135 | ||
@@ -138,7 +137,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
138 | struct tegra_dma_req *req); | 137 | struct tegra_dma_req *req); |
139 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 138 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, |
140 | struct tegra_dma_req *req); | 139 | struct tegra_dma_req *req); |
141 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch); | ||
142 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | 140 | static void tegra_dma_stop(struct tegra_dma_channel *ch); |
143 | 141 | ||
144 | void tegra_dma_flush(struct tegra_dma_channel *ch) | 142 | void tegra_dma_flush(struct tegra_dma_channel *ch) |
@@ -150,6 +148,9 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |||
150 | { | 148 | { |
151 | struct tegra_dma_req *req; | 149 | struct tegra_dma_req *req; |
152 | 150 | ||
151 | if (tegra_dma_is_empty(ch)) | ||
152 | return; | ||
153 | |||
153 | req = list_entry(ch->list.next, typeof(*req), node); | 154 | req = list_entry(ch->list.next, typeof(*req), node); |
154 | 155 | ||
155 | tegra_dma_dequeue_req(ch, req); | 156 | tegra_dma_dequeue_req(ch, req); |
@@ -158,10 +159,10 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |||
158 | 159 | ||
159 | void tegra_dma_stop(struct tegra_dma_channel *ch) | 160 | void tegra_dma_stop(struct tegra_dma_channel *ch) |
160 | { | 161 | { |
161 | unsigned int csr; | 162 | u32 csr; |
162 | unsigned int status; | 163 | u32 status; |
163 | 164 | ||
164 | csr = ch->csr; | 165 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
165 | csr &= ~CSR_IE_EOC; | 166 | csr &= ~CSR_IE_EOC; |
166 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 167 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
167 | 168 | ||
@@ -175,19 +176,16 @@ void tegra_dma_stop(struct tegra_dma_channel *ch) | |||
175 | 176 | ||
176 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | 177 | int tegra_dma_cancel(struct tegra_dma_channel *ch) |
177 | { | 178 | { |
178 | unsigned int csr; | 179 | u32 csr; |
179 | unsigned long irq_flags; | 180 | unsigned long irq_flags; |
180 | 181 | ||
181 | spin_lock_irqsave(&ch->lock, irq_flags); | 182 | spin_lock_irqsave(&ch->lock, irq_flags); |
182 | while (!list_empty(&ch->list)) | 183 | while (!list_empty(&ch->list)) |
183 | list_del(ch->list.next); | 184 | list_del(ch->list.next); |
184 | 185 | ||
185 | csr = ch->csr; | 186 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
186 | csr &= ~CSR_REQ_SEL_MASK; | 187 | csr &= ~CSR_REQ_SEL_MASK; |
187 | csr |= CSR_REQ_SEL_INVALID; | 188 | csr |= CSR_REQ_SEL_INVALID; |
188 | |||
189 | /* Set the enable as that is not shadowed */ | ||
190 | csr |= CSR_ENB; | ||
191 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 189 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
192 | 190 | ||
193 | tegra_dma_stop(ch); | 191 | tegra_dma_stop(ch); |
@@ -229,18 +227,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | |||
229 | * - Finally stop or program the DMA to the next buffer in the | 227 | * - Finally stop or program the DMA to the next buffer in the |
230 | * list. | 228 | * list. |
231 | */ | 229 | */ |
232 | csr = ch->csr; | 230 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
233 | csr &= ~CSR_REQ_SEL_MASK; | 231 | csr &= ~CSR_REQ_SEL_MASK; |
234 | csr |= CSR_REQ_SEL_INVALID; | 232 | csr |= CSR_REQ_SEL_INVALID; |
235 | |||
236 | /* Set the enable as that is not shadowed */ | ||
237 | csr |= CSR_ENB; | ||
238 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 233 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
239 | 234 | ||
240 | /* Get the transfer count */ | 235 | /* Get the transfer count */ |
241 | status = readl(ch->addr + APB_DMA_CHAN_STA); | 236 | status = readl(ch->addr + APB_DMA_CHAN_STA); |
242 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | 237 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; |
243 | req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | 238 | req_transfer_count = ch->req_transfer_count; |
244 | req_transfer_count += 1; | 239 | req_transfer_count += 1; |
245 | to_transfer += 1; | 240 | to_transfer += 1; |
246 | 241 | ||
@@ -318,6 +313,7 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | |||
318 | struct tegra_dma_req *req) | 313 | struct tegra_dma_req *req) |
319 | { | 314 | { |
320 | unsigned long irq_flags; | 315 | unsigned long irq_flags; |
316 | struct tegra_dma_req *_req; | ||
321 | int start_dma = 0; | 317 | int start_dma = 0; |
322 | 318 | ||
323 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || | 319 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || |
@@ -328,6 +324,13 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | |||
328 | 324 | ||
329 | spin_lock_irqsave(&ch->lock, irq_flags); | 325 | spin_lock_irqsave(&ch->lock, irq_flags); |
330 | 326 | ||
327 | list_for_each_entry(_req, &ch->list, node) { | ||
328 | if (req == _req) { | ||
329 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
330 | return -EEXIST; | ||
331 | } | ||
332 | } | ||
333 | |||
331 | req->bytes_transferred = 0; | 334 | req->bytes_transferred = 0; |
332 | req->status = 0; | 335 | req->status = 0; |
333 | req->buffer_status = 0; | 336 | req->buffer_status = 0; |
@@ -348,7 +351,12 @@ EXPORT_SYMBOL(tegra_dma_enqueue_req); | |||
348 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | 351 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) |
349 | { | 352 | { |
350 | int channel; | 353 | int channel; |
351 | struct tegra_dma_channel *ch; | 354 | struct tegra_dma_channel *ch = NULL; |
355 | |||
356 | if (WARN_ON(!tegra_dma_initialized)) | ||
357 | return NULL; | ||
358 | |||
359 | mutex_lock(&tegra_dma_lock); | ||
352 | 360 | ||
353 | /* first channel is the shared channel */ | 361 | /* first channel is the shared channel */ |
354 | if (mode & TEGRA_DMA_SHARED) { | 362 | if (mode & TEGRA_DMA_SHARED) { |
@@ -357,11 +365,14 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | |||
357 | channel = find_first_zero_bit(channel_usage, | 365 | channel = find_first_zero_bit(channel_usage, |
358 | ARRAY_SIZE(dma_channels)); | 366 | ARRAY_SIZE(dma_channels)); |
359 | if (channel >= ARRAY_SIZE(dma_channels)) | 367 | if (channel >= ARRAY_SIZE(dma_channels)) |
360 | return NULL; | 368 | goto out; |
361 | } | 369 | } |
362 | __set_bit(channel, channel_usage); | 370 | __set_bit(channel, channel_usage); |
363 | ch = &dma_channels[channel]; | 371 | ch = &dma_channels[channel]; |
364 | ch->mode = mode; | 372 | ch->mode = mode; |
373 | |||
374 | out: | ||
375 | mutex_unlock(&tegra_dma_lock); | ||
365 | return ch; | 376 | return ch; |
366 | } | 377 | } |
367 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | 378 | EXPORT_SYMBOL(tegra_dma_allocate_channel); |
@@ -371,22 +382,27 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch) | |||
371 | if (ch->mode & TEGRA_DMA_SHARED) | 382 | if (ch->mode & TEGRA_DMA_SHARED) |
372 | return; | 383 | return; |
373 | tegra_dma_cancel(ch); | 384 | tegra_dma_cancel(ch); |
385 | mutex_lock(&tegra_dma_lock); | ||
374 | __clear_bit(ch->id, channel_usage); | 386 | __clear_bit(ch->id, channel_usage); |
387 | mutex_unlock(&tegra_dma_lock); | ||
375 | } | 388 | } |
376 | EXPORT_SYMBOL(tegra_dma_free_channel); | 389 | EXPORT_SYMBOL(tegra_dma_free_channel); |
377 | 390 | ||
378 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 391 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, |
379 | struct tegra_dma_req *req) | 392 | struct tegra_dma_req *req) |
380 | { | 393 | { |
394 | u32 apb_ptr; | ||
395 | u32 ahb_ptr; | ||
396 | |||
381 | if (req->to_memory) { | 397 | if (req->to_memory) { |
382 | ch->apb_ptr = req->source_addr; | 398 | apb_ptr = req->source_addr; |
383 | ch->ahb_ptr = req->dest_addr; | 399 | ahb_ptr = req->dest_addr; |
384 | } else { | 400 | } else { |
385 | ch->apb_ptr = req->dest_addr; | 401 | apb_ptr = req->dest_addr; |
386 | ch->ahb_ptr = req->source_addr; | 402 | ahb_ptr = req->source_addr; |
387 | } | 403 | } |
388 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 404 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); |
389 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 405 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); |
390 | 406 | ||
391 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 407 | req->status = TEGRA_DMA_REQ_INFLIGHT; |
392 | return; | 408 | return; |
@@ -400,38 +416,39 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
400 | int ahb_bus_width; | 416 | int ahb_bus_width; |
401 | int apb_bus_width; | 417 | int apb_bus_width; |
402 | int index; | 418 | int index; |
403 | unsigned long csr; | ||
404 | 419 | ||
420 | u32 ahb_seq; | ||
421 | u32 apb_seq; | ||
422 | u32 ahb_ptr; | ||
423 | u32 apb_ptr; | ||
424 | u32 csr; | ||
425 | |||
426 | csr = CSR_IE_EOC | CSR_FLOW; | ||
427 | ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; | ||
428 | apb_seq = 0; | ||
405 | 429 | ||
406 | ch->csr |= CSR_FLOW; | 430 | csr |= req->req_sel << CSR_REQ_SEL_SHIFT; |
407 | ch->csr &= ~CSR_REQ_SEL_MASK; | ||
408 | ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | ||
409 | ch->ahb_seq &= ~AHB_SEQ_BURST_MASK; | ||
410 | ch->ahb_seq |= AHB_SEQ_BURST_1; | ||
411 | 431 | ||
412 | /* One shot mode is always single buffered, | 432 | /* One shot mode is always single buffered, |
413 | * continuous mode is always double buffered | 433 | * continuous mode is always double buffered |
414 | * */ | 434 | * */ |
415 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | 435 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { |
416 | ch->csr |= CSR_ONCE; | 436 | csr |= CSR_ONCE; |
417 | ch->ahb_seq &= ~AHB_SEQ_DBL_BUF; | 437 | ch->req_transfer_count = (req->size >> 2) - 1; |
418 | ch->csr &= ~CSR_WCOUNT_MASK; | ||
419 | ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT; | ||
420 | } else { | 438 | } else { |
421 | ch->csr &= ~CSR_ONCE; | 439 | ahb_seq |= AHB_SEQ_DBL_BUF; |
422 | ch->ahb_seq |= AHB_SEQ_DBL_BUF; | ||
423 | 440 | ||
424 | /* In double buffered mode, we set the size to half the | 441 | /* In double buffered mode, we set the size to half the |
425 | * requested size and interrupt when half the buffer | 442 | * requested size and interrupt when half the buffer |
426 | * is full */ | 443 | * is full */ |
427 | ch->csr &= ~CSR_WCOUNT_MASK; | 444 | ch->req_transfer_count = (req->size >> 3) - 1; |
428 | ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT; | ||
429 | } | 445 | } |
430 | 446 | ||
447 | csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; | ||
448 | |||
431 | if (req->to_memory) { | 449 | if (req->to_memory) { |
432 | ch->csr &= ~CSR_DIR; | 450 | apb_ptr = req->source_addr; |
433 | ch->apb_ptr = req->source_addr; | 451 | ahb_ptr = req->dest_addr; |
434 | ch->ahb_ptr = req->dest_addr; | ||
435 | 452 | ||
436 | apb_addr_wrap = req->source_wrap; | 453 | apb_addr_wrap = req->source_wrap; |
437 | ahb_addr_wrap = req->dest_wrap; | 454 | ahb_addr_wrap = req->dest_wrap; |
@@ -439,9 +456,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
439 | ahb_bus_width = req->dest_bus_width; | 456 | ahb_bus_width = req->dest_bus_width; |
440 | 457 | ||
441 | } else { | 458 | } else { |
442 | ch->csr |= CSR_DIR; | 459 | csr |= CSR_DIR; |
443 | ch->apb_ptr = req->dest_addr; | 460 | apb_ptr = req->dest_addr; |
444 | ch->ahb_ptr = req->source_addr; | 461 | ahb_ptr = req->source_addr; |
445 | 462 | ||
446 | apb_addr_wrap = req->dest_wrap; | 463 | apb_addr_wrap = req->dest_wrap; |
447 | ahb_addr_wrap = req->source_wrap; | 464 | ahb_addr_wrap = req->source_wrap; |
@@ -460,8 +477,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
460 | index++; | 477 | index++; |
461 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | 478 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); |
462 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | 479 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); |
463 | ch->apb_seq &= ~APB_SEQ_WRAP_MASK; | 480 | apb_seq |= index << APB_SEQ_WRAP_SHIFT; |
464 | ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT; | ||
465 | 481 | ||
466 | /* set address wrap for AHB size */ | 482 | /* set address wrap for AHB size */ |
467 | index = 0; | 483 | index = 0; |
@@ -471,55 +487,42 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
471 | index++; | 487 | index++; |
472 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | 488 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); |
473 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | 489 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); |
474 | ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK; | 490 | ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; |
475 | ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | ||
476 | 491 | ||
477 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 492 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { |
478 | if (bus_width_table[index] == ahb_bus_width) | 493 | if (bus_width_table[index] == ahb_bus_width) |
479 | break; | 494 | break; |
480 | } | 495 | } |
481 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 496 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); |
482 | ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK; | 497 | ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; |
483 | ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | ||
484 | 498 | ||
485 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 499 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { |
486 | if (bus_width_table[index] == apb_bus_width) | 500 | if (bus_width_table[index] == apb_bus_width) |
487 | break; | 501 | break; |
488 | } | 502 | } |
489 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 503 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); |
490 | ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK; | 504 | apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; |
491 | ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | ||
492 | |||
493 | ch->csr |= CSR_IE_EOC; | ||
494 | 505 | ||
495 | /* update hw registers with the shadow */ | 506 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
496 | writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR); | 507 | writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); |
497 | writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | 508 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); |
498 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 509 | writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); |
499 | writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | 510 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); |
500 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
501 | 511 | ||
502 | csr = ch->csr | CSR_ENB; | 512 | csr |= CSR_ENB; |
503 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 513 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
504 | 514 | ||
505 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 515 | req->status = TEGRA_DMA_REQ_INFLIGHT; |
506 | } | 516 | } |
507 | 517 | ||
508 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch) | ||
509 | { | ||
510 | /* One shot with an interrupt to CPU after transfer */ | ||
511 | ch->csr = CSR_ONCE | CSR_IE_EOC; | ||
512 | ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB; | ||
513 | ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT; | ||
514 | } | ||
515 | |||
516 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | 518 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) |
517 | { | 519 | { |
518 | struct tegra_dma_req *req; | 520 | struct tegra_dma_req *req; |
521 | unsigned long irq_flags; | ||
519 | 522 | ||
520 | spin_lock(&ch->lock); | 523 | spin_lock_irqsave(&ch->lock, irq_flags); |
521 | if (list_empty(&ch->list)) { | 524 | if (list_empty(&ch->list)) { |
522 | spin_unlock(&ch->lock); | 525 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
523 | return; | 526 | return; |
524 | } | 527 | } |
525 | 528 | ||
@@ -527,8 +530,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
527 | if (req) { | 530 | if (req) { |
528 | int bytes_transferred; | 531 | int bytes_transferred; |
529 | 532 | ||
530 | bytes_transferred = | 533 | bytes_transferred = ch->req_transfer_count; |
531 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | ||
532 | bytes_transferred += 1; | 534 | bytes_transferred += 1; |
533 | bytes_transferred <<= 2; | 535 | bytes_transferred <<= 2; |
534 | 536 | ||
@@ -536,12 +538,12 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
536 | req->bytes_transferred = bytes_transferred; | 538 | req->bytes_transferred = bytes_transferred; |
537 | req->status = TEGRA_DMA_REQ_SUCCESS; | 539 | req->status = TEGRA_DMA_REQ_SUCCESS; |
538 | 540 | ||
539 | spin_unlock(&ch->lock); | 541 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
540 | /* Callback should be called without any lock */ | 542 | /* Callback should be called without any lock */ |
541 | pr_debug("%s: transferred %d bytes\n", __func__, | 543 | pr_debug("%s: transferred %d bytes\n", __func__, |
542 | req->bytes_transferred); | 544 | req->bytes_transferred); |
543 | req->complete(req); | 545 | req->complete(req); |
544 | spin_lock(&ch->lock); | 546 | spin_lock_irqsave(&ch->lock, irq_flags); |
545 | } | 547 | } |
546 | 548 | ||
547 | if (!list_empty(&ch->list)) { | 549 | if (!list_empty(&ch->list)) { |
@@ -551,22 +553,55 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
551 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) | 553 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) |
552 | tegra_dma_update_hw(ch, req); | 554 | tegra_dma_update_hw(ch, req); |
553 | } | 555 | } |
554 | spin_unlock(&ch->lock); | 556 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
555 | } | 557 | } |
556 | 558 | ||
557 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | 559 | static void handle_continuous_dma(struct tegra_dma_channel *ch) |
558 | { | 560 | { |
559 | struct tegra_dma_req *req; | 561 | struct tegra_dma_req *req; |
562 | unsigned long irq_flags; | ||
560 | 563 | ||
561 | spin_lock(&ch->lock); | 564 | spin_lock_irqsave(&ch->lock, irq_flags); |
562 | if (list_empty(&ch->list)) { | 565 | if (list_empty(&ch->list)) { |
563 | spin_unlock(&ch->lock); | 566 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
564 | return; | 567 | return; |
565 | } | 568 | } |
566 | 569 | ||
567 | req = list_entry(ch->list.next, typeof(*req), node); | 570 | req = list_entry(ch->list.next, typeof(*req), node); |
568 | if (req) { | 571 | if (req) { |
569 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | 572 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { |
573 | bool is_dma_ping_complete; | ||
574 | is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) | ||
575 | & STA_PING_PONG) ? true : false; | ||
576 | if (req->to_memory) | ||
577 | is_dma_ping_complete = !is_dma_ping_complete; | ||
578 | /* Out of sync - Release current buffer */ | ||
579 | if (!is_dma_ping_complete) { | ||
580 | int bytes_transferred; | ||
581 | |||
582 | bytes_transferred = ch->req_transfer_count; | ||
583 | bytes_transferred += 1; | ||
584 | bytes_transferred <<= 3; | ||
585 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
586 | req->bytes_transferred = bytes_transferred; | ||
587 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
588 | tegra_dma_stop(ch); | ||
589 | |||
590 | if (!list_is_last(&req->node, &ch->list)) { | ||
591 | struct tegra_dma_req *next_req; | ||
592 | |||
593 | next_req = list_entry(req->node.next, | ||
594 | typeof(*next_req), node); | ||
595 | tegra_dma_update_hw(ch, next_req); | ||
596 | } | ||
597 | |||
598 | list_del(&req->node); | ||
599 | |||
600 | /* DMA lock is NOT held when callbak is called */ | ||
601 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
602 | req->complete(req); | ||
603 | return; | ||
604 | } | ||
570 | /* Load the next request into the hardware, if available | 605 | /* Load the next request into the hardware, if available |
571 | * */ | 606 | * */ |
572 | if (!list_is_last(&req->node, &ch->list)) { | 607 | if (!list_is_last(&req->node, &ch->list)) { |
@@ -579,7 +614,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
579 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | 614 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; |
580 | req->status = TEGRA_DMA_REQ_SUCCESS; | 615 | req->status = TEGRA_DMA_REQ_SUCCESS; |
581 | /* DMA lock is NOT held when callback is called */ | 616 | /* DMA lock is NOT held when callback is called */ |
582 | spin_unlock(&ch->lock); | 617 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
583 | if (likely(req->threshold)) | 618 | if (likely(req->threshold)) |
584 | req->threshold(req); | 619 | req->threshold(req); |
585 | return; | 620 | return; |
@@ -590,8 +625,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
590 | * the second interrupt */ | 625 | * the second interrupt */ |
591 | int bytes_transferred; | 626 | int bytes_transferred; |
592 | 627 | ||
593 | bytes_transferred = | 628 | bytes_transferred = ch->req_transfer_count; |
594 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | ||
595 | bytes_transferred += 1; | 629 | bytes_transferred += 1; |
596 | bytes_transferred <<= 3; | 630 | bytes_transferred <<= 3; |
597 | 631 | ||
@@ -601,7 +635,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
601 | list_del(&req->node); | 635 | list_del(&req->node); |
602 | 636 | ||
603 | /* DMA lock is NOT held when callbak is called */ | 637 | /* DMA lock is NOT held when callbak is called */ |
604 | spin_unlock(&ch->lock); | 638 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
605 | req->complete(req); | 639 | req->complete(req); |
606 | return; | 640 | return; |
607 | 641 | ||
@@ -609,7 +643,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
609 | BUG(); | 643 | BUG(); |
610 | } | 644 | } |
611 | } | 645 | } |
612 | spin_unlock(&ch->lock); | 646 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
613 | } | 647 | } |
614 | 648 | ||
615 | static irqreturn_t dma_isr(int irq, void *data) | 649 | static irqreturn_t dma_isr(int irq, void *data) |
@@ -646,6 +680,21 @@ int __init tegra_dma_init(void) | |||
646 | int i; | 680 | int i; |
647 | unsigned int irq; | 681 | unsigned int irq; |
648 | void __iomem *addr; | 682 | void __iomem *addr; |
683 | struct clk *c; | ||
684 | |||
685 | bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); | ||
686 | |||
687 | c = clk_get_sys("tegra-dma", NULL); | ||
688 | if (IS_ERR(c)) { | ||
689 | pr_err("Unable to get clock for APB DMA\n"); | ||
690 | ret = PTR_ERR(c); | ||
691 | goto fail; | ||
692 | } | ||
693 | ret = clk_enable(c); | ||
694 | if (ret != 0) { | ||
695 | pr_err("Unable to enable clock for APB DMA\n"); | ||
696 | goto fail; | ||
697 | } | ||
649 | 698 | ||
650 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 699 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); |
651 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | 700 | writel(GEN_ENABLE, addr + APB_DMA_GEN); |
@@ -653,18 +702,9 @@ int __init tegra_dma_init(void) | |||
653 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | 702 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), |
654 | addr + APB_DMA_IRQ_MASK_SET); | 703 | addr + APB_DMA_IRQ_MASK_SET); |
655 | 704 | ||
656 | memset(channel_usage, 0, sizeof(channel_usage)); | ||
657 | memset(dma_channels, 0, sizeof(dma_channels)); | ||
658 | |||
659 | /* Reserve all the channels we are not supposed to touch */ | ||
660 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++) | ||
661 | __set_bit(i, channel_usage); | ||
662 | |||
663 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 705 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { |
664 | struct tegra_dma_channel *ch = &dma_channels[i]; | 706 | struct tegra_dma_channel *ch = &dma_channels[i]; |
665 | 707 | ||
666 | __clear_bit(i, channel_usage); | ||
667 | |||
668 | ch->id = i; | 708 | ch->id = i; |
669 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | 709 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); |
670 | 710 | ||
@@ -673,7 +713,6 @@ int __init tegra_dma_init(void) | |||
673 | 713 | ||
674 | spin_lock_init(&ch->lock); | 714 | spin_lock_init(&ch->lock); |
675 | INIT_LIST_HEAD(&ch->list); | 715 | INIT_LIST_HEAD(&ch->list); |
676 | tegra_dma_init_hw(ch); | ||
677 | 716 | ||
678 | irq = INT_APB_DMA_CH0 + i; | 717 | irq = INT_APB_DMA_CH0 + i; |
679 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | 718 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, |
@@ -684,14 +723,15 @@ int __init tegra_dma_init(void) | |||
684 | goto fail; | 723 | goto fail; |
685 | } | 724 | } |
686 | ch->irq = irq; | 725 | ch->irq = irq; |
726 | |||
727 | __clear_bit(i, channel_usage); | ||
687 | } | 728 | } |
688 | /* mark the shared channel allocated */ | 729 | /* mark the shared channel allocated */ |
689 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | 730 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); |
690 | 731 | ||
691 | for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++) | 732 | tegra_dma_initialized = true; |
692 | __set_bit(i, channel_usage); | ||
693 | 733 | ||
694 | return ret; | 734 | return 0; |
695 | fail: | 735 | fail: |
696 | writel(0, addr + APB_DMA_GEN); | 736 | writel(0, addr + APB_DMA_GEN); |
697 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 737 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { |
@@ -701,6 +741,7 @@ fail: | |||
701 | } | 741 | } |
702 | return ret; | 742 | return ret; |
703 | } | 743 | } |
744 | postcore_initcall(tegra_dma_init); | ||
704 | 745 | ||
705 | #ifdef CONFIG_PM | 746 | #ifdef CONFIG_PM |
706 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | 747 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; |
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index ad8048801513..12090a2cf3e0 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/gpio.h> | 25 | #include <linux/gpio.h> |
26 | 26 | ||
27 | #include <mach/iomap.h> | 27 | #include <mach/iomap.h> |
28 | #include <mach/suspend.h> | ||
28 | 29 | ||
29 | #define GPIO_BANK(x) ((x) >> 5) | 30 | #define GPIO_BANK(x) ((x) >> 5) |
30 | #define GPIO_PORT(x) (((x) >> 3) & 0x3) | 31 | #define GPIO_PORT(x) (((x) >> 3) & 0x3) |
@@ -380,6 +381,20 @@ static int __init tegra_gpio_init(void) | |||
380 | 381 | ||
381 | postcore_initcall(tegra_gpio_init); | 382 | postcore_initcall(tegra_gpio_init); |
382 | 383 | ||
384 | void __init tegra_gpio_config(struct tegra_gpio_table *table, int num) | ||
385 | { | ||
386 | int i; | ||
387 | |||
388 | for (i = 0; i < num; i++) { | ||
389 | int gpio = table[i].gpio; | ||
390 | |||
391 | if (table[i].enable) | ||
392 | tegra_gpio_enable(gpio); | ||
393 | else | ||
394 | tegra_gpio_disable(gpio); | ||
395 | } | ||
396 | } | ||
397 | |||
383 | #ifdef CONFIG_DEBUG_FS | 398 | #ifdef CONFIG_DEBUG_FS |
384 | 399 | ||
385 | #include <linux/debugfs.h> | 400 | #include <linux/debugfs.h> |
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h index a217f68ba57c..c8baf8f80d23 100644 --- a/arch/arm/mach-tegra/include/mach/clk.h +++ b/arch/arm/mach-tegra/include/mach/clk.h | |||
@@ -25,9 +25,7 @@ struct clk; | |||
25 | void tegra_periph_reset_deassert(struct clk *c); | 25 | void tegra_periph_reset_deassert(struct clk *c); |
26 | void tegra_periph_reset_assert(struct clk *c); | 26 | void tegra_periph_reset_assert(struct clk *c); |
27 | 27 | ||
28 | int clk_enable_cansleep(struct clk *clk); | 28 | unsigned long clk_get_rate_all_locked(struct clk *c); |
29 | void clk_disable_cansleep(struct clk *clk); | 29 | void tegra_sdmmc_tap_delay(struct clk *c, int delay); |
30 | int clk_set_rate_cansleep(struct clk *clk, unsigned long rate); | ||
31 | int clk_set_parent_cansleep(struct clk *clk, struct clk *parent); | ||
32 | 30 | ||
33 | #endif | 31 | #endif |
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S index a0e7c12868bd..e0ebe65c1657 100644 --- a/arch/arm/mach-tegra/include/mach/debug-macro.S +++ b/arch/arm/mach-tegra/include/mach/debug-macro.S | |||
@@ -19,30 +19,15 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <mach/io.h> | 21 | #include <mach/io.h> |
22 | #include <mach/iomap.h> | ||
22 | 23 | ||
23 | .macro addruart, rp, rv | 24 | .macro addruart, rp, rv |
24 | ldr \rp, =IO_APB_PHYS @ physical | 25 | ldr \rp, =IO_APB_PHYS @ physical |
25 | ldr \rv, =IO_APB_VIRT @ virtual | 26 | ldr \rv, =IO_APB_VIRT @ virtual |
26 | #if defined(CONFIG_TEGRA_DEBUG_UART_NONE) | 27 | orr \rp, \rp, #(TEGRA_DEBUG_UART_BASE & 0xFF) |
27 | #error "A debug UART must be selected in the kernel config to use DEBUG_LL" | 28 | orr \rp, \rp, #(TEGRA_DEBUG_UART_BASE & 0xFF00) |
28 | #elif defined(CONFIG_TEGRA_DEBUG_UARTA) | 29 | orr \rv, \rv, #(TEGRA_DEBUG_UART_BASE & 0xFF) |
29 | orr \rp, \rp, #0x6000 | 30 | orr \rv, \rv, #(TEGRA_DEBUG_UART_BASE & 0xFF00) |
30 | orr \rv, \rv, #0x6000 | ||
31 | #elif defined(CONFIG_TEGRA_DEBUG_UARTB) | ||
32 | orr \rp, \rp, #0x6000 | ||
33 | orr \rp, \rp, #0x40 | ||
34 | orr \rv, \rv, #0x6000 | ||
35 | orr \rv, \rv, #0x40 | ||
36 | #elif defined(CONFIG_TEGRA_DEBUG_UARTC) | ||
37 | orr \rp, \rp, #0x6200 | ||
38 | orr \rv, \rv, #0x6200 | ||
39 | #elif defined(CONFIG_TEGRA_DEBUG_UARTD) | ||
40 | orr \rp, \rp, #0x6300 | ||
41 | orr \rv, \rv, #0x6300 | ||
42 | #elif defined(CONFIG_TEGRA_DEBUG_UARTE) | ||
43 | orr \rp, \rp, #0x6400 | ||
44 | orr \rv, \rv, #0x6400 | ||
45 | #endif | ||
46 | .endm | 31 | .endm |
47 | 32 | ||
48 | #define UART_SHIFT 2 | 33 | #define UART_SHIFT 2 |
diff --git a/arch/arm/mach-tegra/include/mach/gpio.h b/arch/arm/mach-tegra/include/mach/gpio.h index e31f486d69a2..196f114dc241 100644 --- a/arch/arm/mach-tegra/include/mach/gpio.h +++ b/arch/arm/mach-tegra/include/mach/gpio.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #ifndef __MACH_TEGRA_GPIO_H | 20 | #ifndef __MACH_TEGRA_GPIO_H |
21 | #define __MACH_TEGRA_GPIO_H | 21 | #define __MACH_TEGRA_GPIO_H |
22 | 22 | ||
23 | #include <linux/init.h> | ||
23 | #include <mach/irqs.h> | 24 | #include <mach/irqs.h> |
24 | 25 | ||
25 | #define TEGRA_NR_GPIOS INT_GPIO_NR | 26 | #define TEGRA_NR_GPIOS INT_GPIO_NR |
@@ -31,7 +32,7 @@ | |||
31 | #define gpio_cansleep __gpio_cansleep | 32 | #define gpio_cansleep __gpio_cansleep |
32 | 33 | ||
33 | #define TEGRA_GPIO_TO_IRQ(gpio) (INT_GPIO_BASE + (gpio)) | 34 | #define TEGRA_GPIO_TO_IRQ(gpio) (INT_GPIO_BASE + (gpio)) |
34 | #define TEGRA_IRQ_TO_GPIO(irq) ((gpio) - INT_GPIO_BASE) | 35 | #define TEGRA_IRQ_TO_GPIO(irq) ((irq) - INT_GPIO_BASE) |
35 | 36 | ||
36 | static inline int gpio_to_irq(unsigned int gpio) | 37 | static inline int gpio_to_irq(unsigned int gpio) |
37 | { | 38 | { |
@@ -47,6 +48,12 @@ static inline int irq_to_gpio(unsigned int irq) | |||
47 | return -EINVAL; | 48 | return -EINVAL; |
48 | } | 49 | } |
49 | 50 | ||
51 | struct tegra_gpio_table { | ||
52 | int gpio; /* GPIO number */ | ||
53 | bool enable; /* Enable for GPIO at init? */ | ||
54 | }; | ||
55 | |||
56 | void tegra_gpio_config(struct tegra_gpio_table *table, int num); | ||
50 | void tegra_gpio_enable(int gpio); | 57 | void tegra_gpio_enable(int gpio); |
51 | void tegra_gpio_disable(int gpio); | 58 | void tegra_gpio_disable(int gpio); |
52 | 59 | ||
diff --git a/arch/arm/mach-tegra/include/mach/harmony_audio.h b/arch/arm/mach-tegra/include/mach/harmony_audio.h new file mode 100644 index 000000000000..af086500ab7d --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/harmony_audio.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/include/mach/harmony_audio.h | ||
3 | * | ||
4 | * Copyright 2011 NVIDIA, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | struct harmony_audio_platform_data { | ||
18 | int gpio_spkr_en; | ||
19 | int gpio_hp_det; | ||
20 | int gpio_int_mic_en; | ||
21 | int gpio_ext_mic_en; | ||
22 | }; | ||
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h index 44a4f4bcf91f..691cdabd69cf 100644 --- a/arch/arm/mach-tegra/include/mach/iomap.h +++ b/arch/arm/mach-tegra/include/mach/iomap.h | |||
@@ -26,6 +26,9 @@ | |||
26 | #define TEGRA_IRAM_BASE 0x40000000 | 26 | #define TEGRA_IRAM_BASE 0x40000000 |
27 | #define TEGRA_IRAM_SIZE SZ_256K | 27 | #define TEGRA_IRAM_SIZE SZ_256K |
28 | 28 | ||
29 | #define TEGRA_HOST1X_BASE 0x50000000 | ||
30 | #define TEGRA_HOST1X_SIZE 0x24000 | ||
31 | |||
29 | #define TEGRA_ARM_PERIF_BASE 0x50040000 | 32 | #define TEGRA_ARM_PERIF_BASE 0x50040000 |
30 | #define TEGRA_ARM_PERIF_SIZE SZ_8K | 33 | #define TEGRA_ARM_PERIF_SIZE SZ_8K |
31 | 34 | ||
@@ -35,12 +38,30 @@ | |||
35 | #define TEGRA_ARM_INT_DIST_BASE 0x50041000 | 38 | #define TEGRA_ARM_INT_DIST_BASE 0x50041000 |
36 | #define TEGRA_ARM_INT_DIST_SIZE SZ_4K | 39 | #define TEGRA_ARM_INT_DIST_SIZE SZ_4K |
37 | 40 | ||
41 | #define TEGRA_MPE_BASE 0x54040000 | ||
42 | #define TEGRA_MPE_SIZE SZ_256K | ||
43 | |||
44 | #define TEGRA_VI_BASE 0x54080000 | ||
45 | #define TEGRA_VI_SIZE SZ_256K | ||
46 | |||
47 | #define TEGRA_ISP_BASE 0x54100000 | ||
48 | #define TEGRA_ISP_SIZE SZ_256K | ||
49 | |||
38 | #define TEGRA_DISPLAY_BASE 0x54200000 | 50 | #define TEGRA_DISPLAY_BASE 0x54200000 |
39 | #define TEGRA_DISPLAY_SIZE SZ_256K | 51 | #define TEGRA_DISPLAY_SIZE SZ_256K |
40 | 52 | ||
41 | #define TEGRA_DISPLAY2_BASE 0x54240000 | 53 | #define TEGRA_DISPLAY2_BASE 0x54240000 |
42 | #define TEGRA_DISPLAY2_SIZE SZ_256K | 54 | #define TEGRA_DISPLAY2_SIZE SZ_256K |
43 | 55 | ||
56 | #define TEGRA_HDMI_BASE 0x54280000 | ||
57 | #define TEGRA_HDMI_SIZE SZ_256K | ||
58 | |||
59 | #define TEGRA_GART_BASE 0x58000000 | ||
60 | #define TEGRA_GART_SIZE SZ_32M | ||
61 | |||
62 | #define TEGRA_RES_SEMA_BASE 0x60001000 | ||
63 | #define TEGRA_RES_SEMA_SIZE SZ_4K | ||
64 | |||
44 | #define TEGRA_PRIMARY_ICTLR_BASE 0x60004000 | 65 | #define TEGRA_PRIMARY_ICTLR_BASE 0x60004000 |
45 | #define TEGRA_PRIMARY_ICTLR_SIZE SZ_64 | 66 | #define TEGRA_PRIMARY_ICTLR_SIZE SZ_64 |
46 | 67 | ||
@@ -140,6 +161,18 @@ | |||
140 | #define TEGRA_PWFM_BASE 0x7000A000 | 161 | #define TEGRA_PWFM_BASE 0x7000A000 |
141 | #define TEGRA_PWFM_SIZE SZ_256 | 162 | #define TEGRA_PWFM_SIZE SZ_256 |
142 | 163 | ||
164 | #define TEGRA_PWFM0_BASE 0x7000A000 | ||
165 | #define TEGRA_PWFM0_SIZE 4 | ||
166 | |||
167 | #define TEGRA_PWFM1_BASE 0x7000A010 | ||
168 | #define TEGRA_PWFM1_SIZE 4 | ||
169 | |||
170 | #define TEGRA_PWFM2_BASE 0x7000A020 | ||
171 | #define TEGRA_PWFM2_SIZE 4 | ||
172 | |||
173 | #define TEGRA_PWFM3_BASE 0x7000A030 | ||
174 | #define TEGRA_PWFM3_SIZE 4 | ||
175 | |||
143 | #define TEGRA_MIPI_BASE 0x7000B000 | 176 | #define TEGRA_MIPI_BASE 0x7000B000 |
144 | #define TEGRA_MIPI_SIZE SZ_256 | 177 | #define TEGRA_MIPI_SIZE SZ_256 |
145 | 178 | ||
@@ -221,4 +254,18 @@ | |||
221 | #define TEGRA_SDMMC4_BASE 0xC8000600 | 254 | #define TEGRA_SDMMC4_BASE 0xC8000600 |
222 | #define TEGRA_SDMMC4_SIZE SZ_512 | 255 | #define TEGRA_SDMMC4_SIZE SZ_512 |
223 | 256 | ||
257 | #if defined(CONFIG_TEGRA_DEBUG_UART_NONE) | ||
258 | # define TEGRA_DEBUG_UART_BASE 0 | ||
259 | #elif defined(CONFIG_TEGRA_DEBUG_UARTA) | ||
260 | # define TEGRA_DEBUG_UART_BASE TEGRA_UARTA_BASE | ||
261 | #elif defined(CONFIG_TEGRA_DEBUG_UARTB) | ||
262 | # define TEGRA_DEBUG_UART_BASE TEGRA_UARTB_BASE | ||
263 | #elif defined(CONFIG_TEGRA_DEBUG_UARTC) | ||
264 | # define TEGRA_DEBUG_UART_BASE TEGRA_UARTC_BASE | ||
265 | #elif defined(CONFIG_TEGRA_DEBUG_UARTD) | ||
266 | # define TEGRA_DEBUG_UART_BASE TEGRA_UARTD_BASE | ||
267 | #elif defined(CONFIG_TEGRA_DEBUG_UARTE) | ||
268 | # define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE | ||
269 | #endif | ||
270 | |||
224 | #endif | 271 | #endif |
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h index 71bbf3422953..73265af4dda3 100644 --- a/arch/arm/mach-tegra/include/mach/irqs.h +++ b/arch/arm/mach-tegra/include/mach/irqs.h | |||
@@ -88,7 +88,7 @@ | |||
88 | #define INT_SYS_STATS_MON (INT_SEC_BASE + 22) | 88 | #define INT_SYS_STATS_MON (INT_SEC_BASE + 22) |
89 | #define INT_GPIO5 (INT_SEC_BASE + 23) | 89 | #define INT_GPIO5 (INT_SEC_BASE + 23) |
90 | #define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24) | 90 | #define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24) |
91 | #define INT_CPU2_PMU_INTR (INT_SEC_BASE + 25) | 91 | #define INT_CPU1_PMU_INTR (INT_SEC_BASE + 25) |
92 | #define INT_SEC_RES_26 (INT_SEC_BASE + 26) | 92 | #define INT_SEC_RES_26 (INT_SEC_BASE + 26) |
93 | #define INT_S_LINK1 (INT_SEC_BASE + 27) | 93 | #define INT_S_LINK1 (INT_SEC_BASE + 27) |
94 | #define INT_APB_DMA_COP (INT_SEC_BASE + 28) | 94 | #define INT_APB_DMA_COP (INT_SEC_BASE + 28) |
@@ -166,10 +166,18 @@ | |||
166 | #define INT_QUAD_RES_30 (INT_QUAD_BASE + 30) | 166 | #define INT_QUAD_RES_30 (INT_QUAD_BASE + 30) |
167 | #define INT_QUAD_RES_31 (INT_QUAD_BASE + 31) | 167 | #define INT_QUAD_RES_31 (INT_QUAD_BASE + 31) |
168 | 168 | ||
169 | #define INT_GPIO_BASE (INT_QUAD_BASE + 32) | 169 | #define INT_MAIN_NR (INT_QUAD_BASE + 32 - INT_PRI_BASE) |
170 | |||
171 | #define INT_GPIO_BASE (INT_PRI_BASE + INT_MAIN_NR) | ||
172 | |||
170 | #define INT_GPIO_NR (28 * 8) | 173 | #define INT_GPIO_NR (28 * 8) |
171 | 174 | ||
172 | #define NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR) | 175 | #define TEGRA_NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR) |
176 | |||
177 | #define INT_BOARD_BASE TEGRA_NR_IRQS | ||
178 | #define NR_BOARD_IRQS 32 | ||
179 | |||
180 | #define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS) | ||
173 | #endif | 181 | #endif |
174 | 182 | ||
175 | #endif | 183 | #endif |
diff --git a/arch/arm/mach-tegra/include/mach/legacy_irq.h b/arch/arm/mach-tegra/include/mach/legacy_irq.h index db1eb3dd04c8..d898c0e3d905 100644 --- a/arch/arm/mach-tegra/include/mach/legacy_irq.h +++ b/arch/arm/mach-tegra/include/mach/legacy_irq.h | |||
@@ -27,5 +27,9 @@ int tegra_legacy_force_irq_status(unsigned int irq); | |||
27 | void tegra_legacy_select_fiq(unsigned int irq, bool fiq); | 27 | void tegra_legacy_select_fiq(unsigned int irq, bool fiq); |
28 | unsigned long tegra_legacy_vfiq(int nr); | 28 | unsigned long tegra_legacy_vfiq(int nr); |
29 | unsigned long tegra_legacy_class(int nr); | 29 | unsigned long tegra_legacy_class(int nr); |
30 | int tegra_legacy_irq_set_wake(int irq, int enable); | ||
31 | void tegra_legacy_irq_set_lp1_wake_mask(void); | ||
32 | void tegra_legacy_irq_restore_mask(void); | ||
33 | void tegra_init_legacy_irq(void); | ||
30 | 34 | ||
31 | #endif | 35 | #endif |
diff --git a/arch/arm/mach-tegra/include/mach/pinmux-t2.h b/arch/arm/mach-tegra/include/mach/pinmux-t2.h index e5b9d740f973..4c2626347263 100644 --- a/arch/arm/mach-tegra/include/mach/pinmux-t2.h +++ b/arch/arm/mach-tegra/include/mach/pinmux-t2.h | |||
@@ -167,6 +167,16 @@ enum tegra_drive_pingroup { | |||
167 | TEGRA_DRIVE_PINGROUP_XM2D, | 167 | TEGRA_DRIVE_PINGROUP_XM2D, |
168 | TEGRA_DRIVE_PINGROUP_XM2CLK, | 168 | TEGRA_DRIVE_PINGROUP_XM2CLK, |
169 | TEGRA_DRIVE_PINGROUP_MEMCOMP, | 169 | TEGRA_DRIVE_PINGROUP_MEMCOMP, |
170 | TEGRA_DRIVE_PINGROUP_SDIO1, | ||
171 | TEGRA_DRIVE_PINGROUP_CRT, | ||
172 | TEGRA_DRIVE_PINGROUP_DDC, | ||
173 | TEGRA_DRIVE_PINGROUP_GMA, | ||
174 | TEGRA_DRIVE_PINGROUP_GMB, | ||
175 | TEGRA_DRIVE_PINGROUP_GMC, | ||
176 | TEGRA_DRIVE_PINGROUP_GMD, | ||
177 | TEGRA_DRIVE_PINGROUP_GME, | ||
178 | TEGRA_DRIVE_PINGROUP_OWR, | ||
179 | TEGRA_DRIVE_PINGROUP_UAD, | ||
170 | TEGRA_MAX_DRIVE_PINGROUP, | 180 | TEGRA_MAX_DRIVE_PINGROUP, |
171 | }; | 181 | }; |
172 | 182 | ||
diff --git a/arch/arm/mach-tegra/include/mach/powergate.h b/arch/arm/mach-tegra/include/mach/powergate.h new file mode 100644 index 000000000000..401d1b725291 --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/powergate.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * drivers/regulator/tegra-regulator.c | ||
3 | * | ||
4 | * Copyright (c) 2010 Google, Inc | ||
5 | * | ||
6 | * Author: | ||
7 | * Colin Cross <ccross@google.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef _MACH_TEGRA_POWERGATE_H_ | ||
21 | #define _MACH_TEGRA_POWERGATE_H_ | ||
22 | |||
23 | #define TEGRA_POWERGATE_CPU 0 | ||
24 | #define TEGRA_POWERGATE_3D 1 | ||
25 | #define TEGRA_POWERGATE_VENC 2 | ||
26 | #define TEGRA_POWERGATE_PCIE 3 | ||
27 | #define TEGRA_POWERGATE_VDEC 4 | ||
28 | #define TEGRA_POWERGATE_L2 5 | ||
29 | #define TEGRA_POWERGATE_MPE 6 | ||
30 | #define TEGRA_NUM_POWERGATE 7 | ||
31 | |||
32 | int tegra_powergate_power_on(int id); | ||
33 | int tegra_powergate_power_off(int id); | ||
34 | bool tegra_powergate_is_powered(int id); | ||
35 | int tegra_powergate_remove_clamping(int id); | ||
36 | |||
37 | /* Must be called with clk disabled, and returns with clk enabled */ | ||
38 | int tegra_powergate_sequence_power_up(int id, struct clk *clk); | ||
39 | |||
40 | #endif /* _MACH_TEGRA_POWERGATE_H_ */ | ||
diff --git a/arch/arm/mach-tegra/include/mach/suspend.h b/arch/arm/mach-tegra/include/mach/suspend.h new file mode 100644 index 000000000000..5af8715d2e1e --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/suspend.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/include/mach/suspend.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * | ||
6 | * Author: | ||
7 | * Colin Cross <ccross@google.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | |||
21 | #ifndef _MACH_TEGRA_SUSPEND_H_ | ||
22 | #define _MACH_TEGRA_SUSPEND_H_ | ||
23 | |||
24 | void tegra_pinmux_suspend(void); | ||
25 | void tegra_irq_suspend(void); | ||
26 | void tegra_gpio_suspend(void); | ||
27 | void tegra_clk_suspend(void); | ||
28 | void tegra_dma_suspend(void); | ||
29 | void tegra_timer_suspend(void); | ||
30 | |||
31 | void tegra_pinmux_resume(void); | ||
32 | void tegra_irq_resume(void); | ||
33 | void tegra_gpio_resume(void); | ||
34 | void tegra_clk_resume(void); | ||
35 | void tegra_dma_resume(void); | ||
36 | void tegra_timer_resume(void); | ||
37 | |||
38 | #endif /* _MACH_TEGRA_SUSPEND_H_ */ | ||
diff --git a/arch/arm/mach-tegra/include/mach/system.h b/arch/arm/mach-tegra/include/mach/system.h index 84d5d46113f7..d0183d876c3b 100644 --- a/arch/arm/mach-tegra/include/mach/system.h +++ b/arch/arm/mach-tegra/include/mach/system.h | |||
@@ -24,16 +24,10 @@ | |||
24 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
25 | #include <mach/iomap.h> | 25 | #include <mach/iomap.h> |
26 | 26 | ||
27 | static inline void arch_idle(void) | 27 | extern void (*arch_reset)(char mode, const char *cmd); |
28 | { | ||
29 | } | ||
30 | 28 | ||
31 | static inline void arch_reset(char mode, const char *cmd) | 29 | static inline void arch_idle(void) |
32 | { | 30 | { |
33 | void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04); | ||
34 | u32 reg = readl(reset); | ||
35 | reg |= 0x04; | ||
36 | writel(reg, reset); | ||
37 | } | 31 | } |
38 | 32 | ||
39 | #endif | 33 | #endif |
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h index 6c4dd815abd7..4e8323770c79 100644 --- a/arch/arm/mach-tegra/include/mach/uncompress.h +++ b/arch/arm/mach-tegra/include/mach/uncompress.h | |||
@@ -26,23 +26,9 @@ | |||
26 | 26 | ||
27 | #include <mach/iomap.h> | 27 | #include <mach/iomap.h> |
28 | 28 | ||
29 | #if defined(CONFIG_TEGRA_DEBUG_UARTA) | ||
30 | #define DEBUG_UART_BASE TEGRA_UARTA_BASE | ||
31 | #elif defined(CONFIG_TEGRA_DEBUG_UARTB) | ||
32 | #define DEBUG_UART_BASE TEGRA_UARTB_BASE | ||
33 | #elif defined(CONFIG_TEGRA_DEBUG_UARTC) | ||
34 | #define DEBUG_UART_BASE TEGRA_UARTC_BASE | ||
35 | #elif defined(CONFIG_TEGRA_DEBUG_UARTD) | ||
36 | #define DEBUG_UART_BASE TEGRA_UARTD_BASE | ||
37 | #elif defined(CONFIG_TEGRA_DEBUG_UARTE) | ||
38 | #define DEBUG_UART_BASE TEGRA_UARTE_BASE | ||
39 | #else | ||
40 | #define DEBUG_UART_BASE NULL | ||
41 | #endif | ||
42 | |||
43 | static void putc(int c) | 29 | static void putc(int c) |
44 | { | 30 | { |
45 | volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE; | 31 | volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE; |
46 | int shift = 2; | 32 | int shift = 2; |
47 | 33 | ||
48 | if (uart == NULL) | 34 | if (uart == NULL) |
@@ -59,7 +45,7 @@ static inline void flush(void) | |||
59 | 45 | ||
60 | static inline void arch_decomp_setup(void) | 46 | static inline void arch_decomp_setup(void) |
61 | { | 47 | { |
62 | volatile u8 *uart = (volatile u8 *)DEBUG_UART_BASE; | 48 | volatile u8 *uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE; |
63 | int shift = 2; | 49 | int shift = 2; |
64 | 50 | ||
65 | if (uart == NULL) | 51 | if (uart == NULL) |
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h new file mode 100644 index 000000000000..d4b8f9e298a8 --- /dev/null +++ b/arch/arm/mach-tegra/include/mach/usb_phy.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/include/mach/usb_phy.h | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __MACH_USB_PHY_H | ||
18 | #define __MACH_USB_PHY_H | ||
19 | |||
20 | #include <linux/clk.h> | ||
21 | #include <linux/usb/otg.h> | ||
22 | |||
23 | struct tegra_utmip_config { | ||
24 | u8 hssync_start_delay; | ||
25 | u8 elastic_limit; | ||
26 | u8 idle_wait_delay; | ||
27 | u8 term_range_adj; | ||
28 | u8 xcvr_setup; | ||
29 | u8 xcvr_lsfslew; | ||
30 | u8 xcvr_lsrslew; | ||
31 | }; | ||
32 | |||
33 | struct tegra_ulpi_config { | ||
34 | int reset_gpio; | ||
35 | const char *clk; | ||
36 | }; | ||
37 | |||
38 | enum tegra_usb_phy_port_speed { | ||
39 | TEGRA_USB_PHY_PORT_SPEED_FULL = 0, | ||
40 | TEGRA_USB_PHY_PORT_SPEED_LOW, | ||
41 | TEGRA_USB_PHY_PORT_SPEED_HIGH, | ||
42 | }; | ||
43 | |||
44 | enum tegra_usb_phy_mode { | ||
45 | TEGRA_USB_PHY_MODE_DEVICE, | ||
46 | TEGRA_USB_PHY_MODE_HOST, | ||
47 | }; | ||
48 | |||
49 | struct tegra_xtal_freq; | ||
50 | |||
51 | struct tegra_usb_phy { | ||
52 | int instance; | ||
53 | const struct tegra_xtal_freq *freq; | ||
54 | void __iomem *regs; | ||
55 | void __iomem *pad_regs; | ||
56 | struct clk *clk; | ||
57 | struct clk *pll_u; | ||
58 | struct clk *pad_clk; | ||
59 | enum tegra_usb_phy_mode mode; | ||
60 | void *config; | ||
61 | struct otg_transceiver *ulpi; | ||
62 | }; | ||
63 | |||
64 | struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, | ||
65 | void *config, enum tegra_usb_phy_mode phy_mode); | ||
66 | |||
67 | int tegra_usb_phy_power_on(struct tegra_usb_phy *phy); | ||
68 | |||
69 | void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy); | ||
70 | |||
71 | void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy); | ||
72 | |||
73 | void tegra_usb_phy_power_off(struct tegra_usb_phy *phy); | ||
74 | |||
75 | void tegra_usb_phy_preresume(struct tegra_usb_phy *phy); | ||
76 | |||
77 | void tegra_usb_phy_postresume(struct tegra_usb_phy *phy); | ||
78 | |||
79 | void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy, | ||
80 | enum tegra_usb_phy_port_speed port_speed); | ||
81 | |||
82 | void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy); | ||
83 | |||
84 | void tegra_usb_phy_close(struct tegra_usb_phy *phy); | ||
85 | |||
86 | #endif /* __MACH_USB_PHY_H */ | ||
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c index 17c74d21077c..dfbc219ea492 100644 --- a/arch/arm/mach-tegra/irq.c +++ b/arch/arm/mach-tegra/irq.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
23 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
@@ -26,73 +27,119 @@ | |||
26 | #include <asm/hardware/gic.h> | 27 | #include <asm/hardware/gic.h> |
27 | 28 | ||
28 | #include <mach/iomap.h> | 29 | #include <mach/iomap.h> |
30 | #include <mach/legacy_irq.h> | ||
31 | #include <mach/suspend.h> | ||
29 | 32 | ||
30 | #include "board.h" | 33 | #include "board.h" |
31 | 34 | ||
32 | #define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE) | 35 | #define PMC_CTRL 0x0 |
33 | #define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE) | 36 | #define PMC_CTRL_LATCH_WAKEUPS (1 << 5) |
34 | #define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ) | 37 | #define PMC_WAKE_MASK 0xc |
38 | #define PMC_WAKE_LEVEL 0x10 | ||
39 | #define PMC_WAKE_STATUS 0x14 | ||
40 | #define PMC_SW_WAKE_STATUS 0x18 | ||
41 | #define PMC_DPD_SAMPLE 0x20 | ||
35 | 42 | ||
36 | #define APBDMA_IRQ_STA_CPU 0x14 | 43 | static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); |
37 | #define APBDMA_IRQ_MASK_SET 0x20 | ||
38 | #define APBDMA_IRQ_MASK_CLR 0x24 | ||
39 | 44 | ||
40 | #define ICTLR_CPU_IER 0x20 | 45 | static u32 tegra_lp0_wake_enb; |
41 | #define ICTLR_CPU_IER_SET 0x24 | 46 | static u32 tegra_lp0_wake_level; |
42 | #define ICTLR_CPU_IER_CLR 0x28 | 47 | static u32 tegra_lp0_wake_level_any; |
43 | #define ICTLR_CPU_IEP_CLASS 0x2c | ||
44 | #define ICTLR_COP_IER 0x30 | ||
45 | #define ICTLR_COP_IER_SET 0x34 | ||
46 | #define ICTLR_COP_IER_CLR 0x38 | ||
47 | #define ICTLR_COP_IEP_CLASS 0x3c | ||
48 | 48 | ||
49 | static void (*tegra_gic_mask_irq)(struct irq_data *d); | 49 | static void (*tegra_gic_mask_irq)(struct irq_data *d); |
50 | static void (*tegra_gic_unmask_irq)(struct irq_data *d); | 50 | static void (*tegra_gic_unmask_irq)(struct irq_data *d); |
51 | static void (*tegra_gic_ack_irq)(struct irq_data *d); | ||
51 | 52 | ||
52 | #define irq_to_ictlr(irq) (((irq) - 32) >> 5) | 53 | /* ensures that sufficient time is passed for a register write to |
53 | static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE); | 54 | * serialize into the 32KHz domain */ |
54 | #define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr) * 0x100) | 55 | static void pmc_32kwritel(u32 val, unsigned long offs) |
56 | { | ||
57 | writel(val, pmc + offs); | ||
58 | udelay(130); | ||
59 | } | ||
60 | |||
61 | int tegra_set_lp1_wake(int irq, int enable) | ||
62 | { | ||
63 | return tegra_legacy_irq_set_wake(irq, enable); | ||
64 | } | ||
65 | |||
66 | void tegra_set_lp0_wake_pads(u32 wake_enb, u32 wake_level, u32 wake_any) | ||
67 | { | ||
68 | u32 temp; | ||
69 | u32 status; | ||
70 | u32 lvl; | ||
71 | |||
72 | wake_level &= wake_enb; | ||
73 | wake_any &= wake_enb; | ||
74 | |||
75 | wake_level |= (tegra_lp0_wake_level & tegra_lp0_wake_enb); | ||
76 | wake_any |= (tegra_lp0_wake_level_any & tegra_lp0_wake_enb); | ||
77 | |||
78 | wake_enb |= tegra_lp0_wake_enb; | ||
79 | |||
80 | pmc_32kwritel(0, PMC_SW_WAKE_STATUS); | ||
81 | temp = readl(pmc + PMC_CTRL); | ||
82 | temp |= PMC_CTRL_LATCH_WAKEUPS; | ||
83 | pmc_32kwritel(temp, PMC_CTRL); | ||
84 | temp &= ~PMC_CTRL_LATCH_WAKEUPS; | ||
85 | pmc_32kwritel(temp, PMC_CTRL); | ||
86 | status = readl(pmc + PMC_SW_WAKE_STATUS); | ||
87 | lvl = readl(pmc + PMC_WAKE_LEVEL); | ||
88 | |||
89 | /* flip the wakeup trigger for any-edge triggered pads | ||
90 | * which are currently asserting as wakeups */ | ||
91 | lvl ^= status; | ||
92 | lvl &= wake_any; | ||
93 | |||
94 | wake_level |= lvl; | ||
95 | |||
96 | writel(wake_level, pmc + PMC_WAKE_LEVEL); | ||
97 | /* Enable DPD sample to trigger sampling pads data and direction | ||
98 | * in which pad will be driven during lp0 mode*/ | ||
99 | writel(0x1, pmc + PMC_DPD_SAMPLE); | ||
100 | |||
101 | writel(wake_enb, pmc + PMC_WAKE_MASK); | ||
102 | } | ||
55 | 103 | ||
56 | static void tegra_mask(struct irq_data *d) | 104 | static void tegra_mask(struct irq_data *d) |
57 | { | 105 | { |
58 | void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq)); | ||
59 | tegra_gic_mask_irq(d); | 106 | tegra_gic_mask_irq(d); |
60 | writel(1 << (d->irq & 31), addr+ICTLR_CPU_IER_CLR); | 107 | tegra_legacy_mask_irq(d->irq); |
61 | } | 108 | } |
62 | 109 | ||
63 | static void tegra_unmask(struct irq_data *d) | 110 | static void tegra_unmask(struct irq_data *d) |
64 | { | 111 | { |
65 | void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq)); | ||
66 | tegra_gic_unmask_irq(d); | 112 | tegra_gic_unmask_irq(d); |
67 | writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET); | 113 | tegra_legacy_unmask_irq(d->irq); |
68 | } | 114 | } |
69 | 115 | ||
70 | #ifdef CONFIG_PM | 116 | static void tegra_ack(struct irq_data *d) |
117 | { | ||
118 | tegra_legacy_force_irq_clr(d->irq); | ||
119 | tegra_gic_ack_irq(d); | ||
120 | } | ||
71 | 121 | ||
72 | static int tegra_set_wake(struct irq_data *d, unsigned int on) | 122 | static int tegra_retrigger(struct irq_data *d) |
73 | { | 123 | { |
74 | return 0; | 124 | tegra_legacy_force_irq_set(d->irq); |
125 | return 1; | ||
75 | } | 126 | } |
76 | #endif | ||
77 | 127 | ||
78 | static struct irq_chip tegra_irq = { | 128 | static struct irq_chip tegra_irq = { |
79 | .name = "PPI", | 129 | .name = "PPI", |
80 | .irq_mask = tegra_mask, | 130 | .irq_ack = tegra_ack, |
81 | .irq_unmask = tegra_unmask, | 131 | .irq_mask = tegra_mask, |
82 | #ifdef CONFIG_PM | 132 | .irq_unmask = tegra_unmask, |
83 | .irq_set_wake = tegra_set_wake, | 133 | .irq_retrigger = tegra_retrigger, |
84 | #endif | ||
85 | }; | 134 | }; |
86 | 135 | ||
87 | void __init tegra_init_irq(void) | 136 | void __init tegra_init_irq(void) |
88 | { | 137 | { |
89 | struct irq_chip *gic; | 138 | struct irq_chip *gic; |
90 | unsigned int i; | 139 | unsigned int i; |
140 | int irq; | ||
91 | 141 | ||
92 | for (i = 0; i < PPI_NR; i++) { | 142 | tegra_init_legacy_irq(); |
93 | writel(~0, ictlr_to_virt(i) + ICTLR_CPU_IER_CLR); | ||
94 | writel(0, ictlr_to_virt(i) + ICTLR_CPU_IEP_CLASS); | ||
95 | } | ||
96 | 143 | ||
97 | gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), | 144 | gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), |
98 | IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); | 145 | IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); |
@@ -100,72 +147,15 @@ void __init tegra_init_irq(void) | |||
100 | gic = get_irq_chip(29); | 147 | gic = get_irq_chip(29); |
101 | tegra_gic_unmask_irq = gic->irq_unmask; | 148 | tegra_gic_unmask_irq = gic->irq_unmask; |
102 | tegra_gic_mask_irq = gic->irq_mask; | 149 | tegra_gic_mask_irq = gic->irq_mask; |
103 | tegra_irq.irq_ack = gic->irq_ack; | 150 | tegra_gic_ack_irq = gic->irq_ack; |
104 | #ifdef CONFIG_SMP | 151 | #ifdef CONFIG_SMP |
105 | tegra_irq.irq_set_affinity = gic->irq_set_affinity; | 152 | tegra_irq.irq_set_affinity = gic->irq_set_affinity; |
106 | #endif | 153 | #endif |
107 | 154 | ||
108 | for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) { | 155 | for (i = 0; i < INT_MAIN_NR; i++) { |
109 | set_irq_chip(i, &tegra_irq); | 156 | irq = INT_PRI_BASE + i; |
110 | set_irq_handler(i, handle_level_irq); | 157 | set_irq_chip(irq, &tegra_irq); |
111 | set_irq_flags(i, IRQF_VALID); | 158 | set_irq_handler(irq, handle_level_irq); |
159 | set_irq_flags(irq, IRQF_VALID); | ||
112 | } | 160 | } |
113 | } | 161 | } |
114 | |||
115 | #ifdef CONFIG_PM | ||
116 | static u32 cop_ier[PPI_NR]; | ||
117 | static u32 cpu_ier[PPI_NR]; | ||
118 | static u32 cpu_iep[PPI_NR]; | ||
119 | |||
120 | void tegra_irq_suspend(void) | ||
121 | { | ||
122 | unsigned long flags; | ||
123 | int i; | ||
124 | |||
125 | for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) { | ||
126 | struct irq_desc *desc = irq_to_desc(i); | ||
127 | if (!desc) | ||
128 | continue; | ||
129 | if (desc->status & IRQ_WAKEUP) { | ||
130 | pr_debug("irq %d is wakeup\n", i); | ||
131 | continue; | ||
132 | } | ||
133 | disable_irq(i); | ||
134 | } | ||
135 | |||
136 | local_irq_save(flags); | ||
137 | for (i = 0; i < PPI_NR; i++) { | ||
138 | void __iomem *ictlr = ictlr_to_virt(i); | ||
139 | cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER); | ||
140 | cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS); | ||
141 | cop_ier[i] = readl(ictlr + ICTLR_COP_IER); | ||
142 | writel(~0, ictlr + ICTLR_COP_IER_CLR); | ||
143 | } | ||
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | void tegra_irq_resume(void) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | int i; | ||
151 | |||
152 | local_irq_save(flags); | ||
153 | for (i = 0; i < PPI_NR; i++) { | ||
154 | void __iomem *ictlr = ictlr_to_virt(i); | ||
155 | writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); | ||
156 | writel(~0ul, ictlr + ICTLR_CPU_IER_CLR); | ||
157 | writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); | ||
158 | writel(0, ictlr + ICTLR_COP_IEP_CLASS); | ||
159 | writel(~0ul, ictlr + ICTLR_COP_IER_CLR); | ||
160 | writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET); | ||
161 | } | ||
162 | local_irq_restore(flags); | ||
163 | |||
164 | for (i = INT_PRI_BASE; i < INT_GPIO_BASE; i++) { | ||
165 | struct irq_desc *desc = irq_to_desc(i); | ||
166 | if (!desc || (desc->status & IRQ_WAKEUP)) | ||
167 | continue; | ||
168 | enable_irq(i); | ||
169 | } | ||
170 | } | ||
171 | #endif | ||
diff --git a/arch/arm/mach-tegra/legacy_irq.c b/arch/arm/mach-tegra/legacy_irq.c index 7cc8601c19ff..38eb719a4f53 100644 --- a/arch/arm/mach-tegra/legacy_irq.c +++ b/arch/arm/mach-tegra/legacy_irq.c | |||
@@ -18,17 +18,30 @@ | |||
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <mach/iomap.h> | 20 | #include <mach/iomap.h> |
21 | #include <mach/irqs.h> | ||
21 | #include <mach/legacy_irq.h> | 22 | #include <mach/legacy_irq.h> |
22 | 23 | ||
23 | #define ICTLR_CPU_IER 0x20 | 24 | #define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE) |
24 | #define ICTLR_CPU_IER_SET 0x24 | 25 | #define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE) |
25 | #define ICTLR_CPU_IER_CLR 0x28 | 26 | #define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ) |
26 | #define ICTLR_CPU_IEP_CLASS 0x2C | 27 | |
27 | #define ICTLR_CPU_IEP_VFIQ 0x08 | 28 | #define ICTLR_CPU_IEP_VFIQ 0x08 |
28 | #define ICTLR_CPU_IEP_FIR 0x14 | 29 | #define ICTLR_CPU_IEP_FIR 0x14 |
29 | #define ICTLR_CPU_IEP_FIR_SET 0x18 | 30 | #define ICTLR_CPU_IEP_FIR_SET 0x18 |
30 | #define ICTLR_CPU_IEP_FIR_CLR 0x1c | 31 | #define ICTLR_CPU_IEP_FIR_CLR 0x1c |
31 | 32 | ||
33 | #define ICTLR_CPU_IER 0x20 | ||
34 | #define ICTLR_CPU_IER_SET 0x24 | ||
35 | #define ICTLR_CPU_IER_CLR 0x28 | ||
36 | #define ICTLR_CPU_IEP_CLASS 0x2C | ||
37 | |||
38 | #define ICTLR_COP_IER 0x30 | ||
39 | #define ICTLR_COP_IER_SET 0x34 | ||
40 | #define ICTLR_COP_IER_CLR 0x38 | ||
41 | #define ICTLR_COP_IEP_CLASS 0x3c | ||
42 | |||
43 | #define NUM_ICTLRS 4 | ||
44 | |||
32 | static void __iomem *ictlr_reg_base[] = { | 45 | static void __iomem *ictlr_reg_base[] = { |
33 | IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE), | 46 | IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE), |
34 | IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE), | 47 | IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE), |
@@ -36,6 +49,9 @@ static void __iomem *ictlr_reg_base[] = { | |||
36 | IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE), | 49 | IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE), |
37 | }; | 50 | }; |
38 | 51 | ||
52 | static u32 tegra_legacy_wake_mask[4]; | ||
53 | static u32 tegra_legacy_saved_mask[4]; | ||
54 | |||
39 | /* When going into deep sleep, the CPU is powered down, taking the GIC with it | 55 | /* When going into deep sleep, the CPU is powered down, taking the GIC with it |
40 | In order to wake, the wake interrupts need to be enabled in the legacy | 56 | In order to wake, the wake interrupts need to be enabled in the legacy |
41 | interrupt controller. */ | 57 | interrupt controller. */ |
@@ -112,3 +128,88 @@ unsigned long tegra_legacy_class(int nr) | |||
112 | base = ictlr_reg_base[nr]; | 128 | base = ictlr_reg_base[nr]; |
113 | return readl(base + ICTLR_CPU_IEP_CLASS); | 129 | return readl(base + ICTLR_CPU_IEP_CLASS); |
114 | } | 130 | } |
131 | |||
132 | int tegra_legacy_irq_set_wake(int irq, int enable) | ||
133 | { | ||
134 | irq -= 32; | ||
135 | if (enable) | ||
136 | tegra_legacy_wake_mask[irq >> 5] |= 1 << (irq & 31); | ||
137 | else | ||
138 | tegra_legacy_wake_mask[irq >> 5] &= ~(1 << (irq & 31)); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | void tegra_legacy_irq_set_lp1_wake_mask(void) | ||
144 | { | ||
145 | void __iomem *base; | ||
146 | int i; | ||
147 | |||
148 | for (i = 0; i < NUM_ICTLRS; i++) { | ||
149 | base = ictlr_reg_base[i]; | ||
150 | tegra_legacy_saved_mask[i] = readl(base + ICTLR_CPU_IER); | ||
151 | writel(tegra_legacy_wake_mask[i], base + ICTLR_CPU_IER); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | void tegra_legacy_irq_restore_mask(void) | ||
156 | { | ||
157 | void __iomem *base; | ||
158 | int i; | ||
159 | |||
160 | for (i = 0; i < NUM_ICTLRS; i++) { | ||
161 | base = ictlr_reg_base[i]; | ||
162 | writel(tegra_legacy_saved_mask[i], base + ICTLR_CPU_IER); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | void tegra_init_legacy_irq(void) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | for (i = 0; i < NUM_ICTLRS; i++) { | ||
171 | void __iomem *ictlr = ictlr_reg_base[i]; | ||
172 | writel(~0, ictlr + ICTLR_CPU_IER_CLR); | ||
173 | writel(0, ictlr + ICTLR_CPU_IEP_CLASS); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | #ifdef CONFIG_PM | ||
178 | static u32 cop_ier[NUM_ICTLRS]; | ||
179 | static u32 cpu_ier[NUM_ICTLRS]; | ||
180 | static u32 cpu_iep[NUM_ICTLRS]; | ||
181 | |||
182 | void tegra_irq_suspend(void) | ||
183 | { | ||
184 | unsigned long flags; | ||
185 | int i; | ||
186 | |||
187 | local_irq_save(flags); | ||
188 | for (i = 0; i < NUM_ICTLRS; i++) { | ||
189 | void __iomem *ictlr = ictlr_reg_base[i]; | ||
190 | cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER); | ||
191 | cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS); | ||
192 | cop_ier[i] = readl(ictlr + ICTLR_COP_IER); | ||
193 | writel(~0, ictlr + ICTLR_COP_IER_CLR); | ||
194 | } | ||
195 | local_irq_restore(flags); | ||
196 | } | ||
197 | |||
198 | void tegra_irq_resume(void) | ||
199 | { | ||
200 | unsigned long flags; | ||
201 | int i; | ||
202 | |||
203 | local_irq_save(flags); | ||
204 | for (i = 0; i < NUM_ICTLRS; i++) { | ||
205 | void __iomem *ictlr = ictlr_reg_base[i]; | ||
206 | writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); | ||
207 | writel(~0ul, ictlr + ICTLR_CPU_IER_CLR); | ||
208 | writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); | ||
209 | writel(0, ictlr + ICTLR_COP_IEP_CLASS); | ||
210 | writel(~0ul, ictlr + ICTLR_COP_IER_CLR); | ||
211 | writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET); | ||
212 | } | ||
213 | local_irq_restore(flags); | ||
214 | } | ||
215 | #endif | ||
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c index 53f5fa37014a..2941212b853c 100644 --- a/arch/arm/mach-tegra/pcie.c +++ b/arch/arm/mach-tegra/pcie.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <mach/pinmux.h> | 39 | #include <mach/pinmux.h> |
40 | #include <mach/iomap.h> | 40 | #include <mach/iomap.h> |
41 | #include <mach/clk.h> | 41 | #include <mach/clk.h> |
42 | #include <mach/powergate.h> | ||
42 | 43 | ||
43 | /* register definitions */ | 44 | /* register definitions */ |
44 | #define AFI_OFFSET 0x3800 | 45 | #define AFI_OFFSET 0x3800 |
@@ -682,24 +683,41 @@ static void tegra_pcie_xclk_clamp(bool clamp) | |||
682 | pmc_writel(reg, PMC_SCRATCH42); | 683 | pmc_writel(reg, PMC_SCRATCH42); |
683 | } | 684 | } |
684 | 685 | ||
685 | static int tegra_pcie_power_on(void) | 686 | static void tegra_pcie_power_off(void) |
686 | { | 687 | { |
687 | tegra_pcie_xclk_clamp(true); | ||
688 | tegra_periph_reset_assert(tegra_pcie.pcie_xclk); | 688 | tegra_periph_reset_assert(tegra_pcie.pcie_xclk); |
689 | tegra_pcie_xclk_clamp(false); | 689 | tegra_periph_reset_assert(tegra_pcie.afi_clk); |
690 | tegra_periph_reset_assert(tegra_pcie.pex_clk); | ||
690 | 691 | ||
691 | clk_enable(tegra_pcie.afi_clk); | 692 | tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); |
692 | clk_enable(tegra_pcie.pex_clk); | 693 | tegra_pcie_xclk_clamp(true); |
693 | return clk_enable(tegra_pcie.pll_e); | ||
694 | } | 694 | } |
695 | 695 | ||
696 | static void tegra_pcie_power_off(void) | 696 | static int tegra_pcie_power_regate(void) |
697 | { | 697 | { |
698 | int err; | ||
699 | |||
700 | tegra_pcie_power_off(); | ||
701 | |||
702 | tegra_pcie_xclk_clamp(true); | ||
703 | |||
698 | tegra_periph_reset_assert(tegra_pcie.pcie_xclk); | 704 | tegra_periph_reset_assert(tegra_pcie.pcie_xclk); |
699 | tegra_periph_reset_assert(tegra_pcie.afi_clk); | 705 | tegra_periph_reset_assert(tegra_pcie.afi_clk); |
700 | tegra_periph_reset_assert(tegra_pcie.pex_clk); | ||
701 | 706 | ||
702 | tegra_pcie_xclk_clamp(true); | 707 | err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, |
708 | tegra_pcie.pex_clk); | ||
709 | if (err) { | ||
710 | pr_err("PCIE: powerup sequence failed: %d\n", err); | ||
711 | return err; | ||
712 | } | ||
713 | |||
714 | tegra_periph_reset_deassert(tegra_pcie.afi_clk); | ||
715 | |||
716 | tegra_pcie_xclk_clamp(false); | ||
717 | |||
718 | clk_enable(tegra_pcie.afi_clk); | ||
719 | clk_enable(tegra_pcie.pex_clk); | ||
720 | return clk_enable(tegra_pcie.pll_e); | ||
703 | } | 721 | } |
704 | 722 | ||
705 | static int tegra_pcie_clocks_get(void) | 723 | static int tegra_pcie_clocks_get(void) |
@@ -759,7 +777,7 @@ static int __init tegra_pcie_get_resources(void) | |||
759 | return err; | 777 | return err; |
760 | } | 778 | } |
761 | 779 | ||
762 | err = tegra_pcie_power_on(); | 780 | err = tegra_pcie_power_regate(); |
763 | if (err) { | 781 | if (err) { |
764 | pr_err("PCIE: failed to power up: %d\n", err); | 782 | pr_err("PCIE: failed to power up: %d\n", err); |
765 | goto err_pwr_on; | 783 | goto err_pwr_on; |
diff --git a/arch/arm/mach-tegra/pinmux-t2-tables.c b/arch/arm/mach-tegra/pinmux-t2-tables.c index a6ea34e782dc..a475367befa3 100644 --- a/arch/arm/mach-tegra/pinmux-t2-tables.c +++ b/arch/arm/mach-tegra/pinmux-t2-tables.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <mach/iomap.h> | 30 | #include <mach/iomap.h> |
31 | #include <mach/pinmux.h> | 31 | #include <mach/pinmux.h> |
32 | #include <mach/suspend.h> | ||
32 | 33 | ||
33 | #define DRIVE_PINGROUP(pg_name, r) \ | 34 | #define DRIVE_PINGROUP(pg_name, r) \ |
34 | [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \ | 35 | [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \ |
@@ -65,6 +66,16 @@ const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE | |||
65 | DRIVE_PINGROUP(XM2D, 0x8cc), | 66 | DRIVE_PINGROUP(XM2D, 0x8cc), |
66 | DRIVE_PINGROUP(XM2CLK, 0x8d0), | 67 | DRIVE_PINGROUP(XM2CLK, 0x8d0), |
67 | DRIVE_PINGROUP(MEMCOMP, 0x8d4), | 68 | DRIVE_PINGROUP(MEMCOMP, 0x8d4), |
69 | DRIVE_PINGROUP(SDIO1, 0x8e0), | ||
70 | DRIVE_PINGROUP(CRT, 0x8ec), | ||
71 | DRIVE_PINGROUP(DDC, 0x8f0), | ||
72 | DRIVE_PINGROUP(GMA, 0x8f4), | ||
73 | DRIVE_PINGROUP(GMB, 0x8f8), | ||
74 | DRIVE_PINGROUP(GMC, 0x8fc), | ||
75 | DRIVE_PINGROUP(GMD, 0x900), | ||
76 | DRIVE_PINGROUP(GME, 0x904), | ||
77 | DRIVE_PINGROUP(OWR, 0x908), | ||
78 | DRIVE_PINGROUP(UAD, 0x90c), | ||
68 | }; | 79 | }; |
69 | 80 | ||
70 | #define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \ | 81 | #define PINGROUP(pg_name, vdd, f0, f1, f2, f3, f_safe, \ |
@@ -216,7 +227,8 @@ const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = { | |||
216 | #define PULLUPDOWN_REG_NUM 5 | 227 | #define PULLUPDOWN_REG_NUM 5 |
217 | 228 | ||
218 | static u32 pinmux_reg[TRISTATE_REG_NUM + PIN_MUX_CTL_REG_NUM + | 229 | static u32 pinmux_reg[TRISTATE_REG_NUM + PIN_MUX_CTL_REG_NUM + |
219 | PULLUPDOWN_REG_NUM]; | 230 | PULLUPDOWN_REG_NUM + |
231 | ARRAY_SIZE(tegra_soc_drive_pingroups)]; | ||
220 | 232 | ||
221 | static inline unsigned long pg_readl(unsigned long offset) | 233 | static inline unsigned long pg_readl(unsigned long offset) |
222 | { | 234 | { |
@@ -233,14 +245,17 @@ void tegra_pinmux_suspend(void) | |||
233 | unsigned int i; | 245 | unsigned int i; |
234 | u32 *ctx = pinmux_reg; | 246 | u32 *ctx = pinmux_reg; |
235 | 247 | ||
236 | for (i = 0; i < TRISTATE_REG_NUM; i++) | ||
237 | *ctx++ = pg_readl(TRISTATE_REG_A + i*4); | ||
238 | |||
239 | for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++) | 248 | for (i = 0; i < PIN_MUX_CTL_REG_NUM; i++) |
240 | *ctx++ = pg_readl(PIN_MUX_CTL_REG_A + i*4); | 249 | *ctx++ = pg_readl(PIN_MUX_CTL_REG_A + i*4); |
241 | 250 | ||
242 | for (i = 0; i < PULLUPDOWN_REG_NUM; i++) | 251 | for (i = 0; i < PULLUPDOWN_REG_NUM; i++) |
243 | *ctx++ = pg_readl(PULLUPDOWN_REG_A + i*4); | 252 | *ctx++ = pg_readl(PULLUPDOWN_REG_A + i*4); |
253 | |||
254 | for (i = 0; i < TRISTATE_REG_NUM; i++) | ||
255 | *ctx++ = pg_readl(TRISTATE_REG_A + i*4); | ||
256 | |||
257 | for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++) | ||
258 | *ctx++ = pg_readl(tegra_soc_drive_pingroups[i].reg); | ||
244 | } | 259 | } |
245 | 260 | ||
246 | void tegra_pinmux_resume(void) | 261 | void tegra_pinmux_resume(void) |
@@ -256,5 +271,8 @@ void tegra_pinmux_resume(void) | |||
256 | 271 | ||
257 | for (i = 0; i < TRISTATE_REG_NUM; i++) | 272 | for (i = 0; i < TRISTATE_REG_NUM; i++) |
258 | pg_writel(*ctx++, TRISTATE_REG_A + i*4); | 273 | pg_writel(*ctx++, TRISTATE_REG_A + i*4); |
274 | |||
275 | for (i = 0; i < ARRAY_SIZE(tegra_soc_drive_pingroups); i++) | ||
276 | pg_writel(*ctx++, tegra_soc_drive_pingroups[i].reg); | ||
259 | } | 277 | } |
260 | #endif | 278 | #endif |
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c new file mode 100644 index 000000000000..3cee9aa1f2c8 --- /dev/null +++ b/arch/arm/mach-tegra/powergate.c | |||
@@ -0,0 +1,212 @@ | |||
1 | /* | ||
2 | * drivers/powergate/tegra-powergate.c | ||
3 | * | ||
4 | * Copyright (c) 2010 Google, Inc | ||
5 | * | ||
6 | * Author: | ||
7 | * Colin Cross <ccross@google.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/debugfs.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/seq_file.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | |||
30 | #include <mach/clk.h> | ||
31 | #include <mach/iomap.h> | ||
32 | #include <mach/powergate.h> | ||
33 | |||
34 | #define PWRGATE_TOGGLE 0x30 | ||
35 | #define PWRGATE_TOGGLE_START (1 << 8) | ||
36 | |||
37 | #define REMOVE_CLAMPING 0x34 | ||
38 | |||
39 | #define PWRGATE_STATUS 0x38 | ||
40 | |||
41 | static DEFINE_SPINLOCK(tegra_powergate_lock); | ||
42 | |||
43 | static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); | ||
44 | |||
45 | static u32 pmc_read(unsigned long reg) | ||
46 | { | ||
47 | return readl(pmc + reg); | ||
48 | } | ||
49 | |||
50 | static void pmc_write(u32 val, unsigned long reg) | ||
51 | { | ||
52 | writel(val, pmc + reg); | ||
53 | } | ||
54 | |||
55 | static int tegra_powergate_set(int id, bool new_state) | ||
56 | { | ||
57 | bool status; | ||
58 | unsigned long flags; | ||
59 | |||
60 | spin_lock_irqsave(&tegra_powergate_lock, flags); | ||
61 | |||
62 | status = pmc_read(PWRGATE_STATUS) & (1 << id); | ||
63 | |||
64 | if (status == new_state) { | ||
65 | spin_unlock_irqrestore(&tegra_powergate_lock, flags); | ||
66 | return -EINVAL; | ||
67 | } | ||
68 | |||
69 | pmc_write(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); | ||
70 | |||
71 | spin_unlock_irqrestore(&tegra_powergate_lock, flags); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | int tegra_powergate_power_on(int id) | ||
77 | { | ||
78 | if (id < 0 || id >= TEGRA_NUM_POWERGATE) | ||
79 | return -EINVAL; | ||
80 | |||
81 | return tegra_powergate_set(id, true); | ||
82 | } | ||
83 | |||
84 | int tegra_powergate_power_off(int id) | ||
85 | { | ||
86 | if (id < 0 || id >= TEGRA_NUM_POWERGATE) | ||
87 | return -EINVAL; | ||
88 | |||
89 | return tegra_powergate_set(id, false); | ||
90 | } | ||
91 | |||
92 | bool tegra_powergate_is_powered(int id) | ||
93 | { | ||
94 | u32 status; | ||
95 | |||
96 | if (id < 0 || id >= TEGRA_NUM_POWERGATE) | ||
97 | return -EINVAL; | ||
98 | |||
99 | status = pmc_read(PWRGATE_STATUS) & (1 << id); | ||
100 | return !!status; | ||
101 | } | ||
102 | |||
103 | int tegra_powergate_remove_clamping(int id) | ||
104 | { | ||
105 | u32 mask; | ||
106 | |||
107 | if (id < 0 || id >= TEGRA_NUM_POWERGATE) | ||
108 | return -EINVAL; | ||
109 | |||
110 | /* | ||
111 | * Tegra 2 has a bug where PCIE and VDE clamping masks are | ||
112 | * swapped relatively to the partition ids | ||
113 | */ | ||
114 | if (id == TEGRA_POWERGATE_VDEC) | ||
115 | mask = (1 << TEGRA_POWERGATE_PCIE); | ||
116 | else if (id == TEGRA_POWERGATE_PCIE) | ||
117 | mask = (1 << TEGRA_POWERGATE_VDEC); | ||
118 | else | ||
119 | mask = (1 << id); | ||
120 | |||
121 | pmc_write(mask, REMOVE_CLAMPING); | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | /* Must be called with clk disabled, and returns with clk enabled */ | ||
127 | int tegra_powergate_sequence_power_up(int id, struct clk *clk) | ||
128 | { | ||
129 | int ret; | ||
130 | |||
131 | tegra_periph_reset_assert(clk); | ||
132 | |||
133 | ret = tegra_powergate_power_on(id); | ||
134 | if (ret) | ||
135 | goto err_power; | ||
136 | |||
137 | ret = clk_enable(clk); | ||
138 | if (ret) | ||
139 | goto err_clk; | ||
140 | |||
141 | udelay(10); | ||
142 | |||
143 | ret = tegra_powergate_remove_clamping(id); | ||
144 | if (ret) | ||
145 | goto err_clamp; | ||
146 | |||
147 | udelay(10); | ||
148 | tegra_periph_reset_deassert(clk); | ||
149 | |||
150 | return 0; | ||
151 | |||
152 | err_clamp: | ||
153 | clk_disable(clk); | ||
154 | err_clk: | ||
155 | tegra_powergate_power_off(id); | ||
156 | err_power: | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | #ifdef CONFIG_DEBUG_FS | ||
161 | |||
162 | static const char * const powergate_name[] = { | ||
163 | [TEGRA_POWERGATE_CPU] = "cpu", | ||
164 | [TEGRA_POWERGATE_3D] = "3d", | ||
165 | [TEGRA_POWERGATE_VENC] = "venc", | ||
166 | [TEGRA_POWERGATE_VDEC] = "vdec", | ||
167 | [TEGRA_POWERGATE_PCIE] = "pcie", | ||
168 | [TEGRA_POWERGATE_L2] = "l2", | ||
169 | [TEGRA_POWERGATE_MPE] = "mpe", | ||
170 | }; | ||
171 | |||
172 | static int powergate_show(struct seq_file *s, void *data) | ||
173 | { | ||
174 | int i; | ||
175 | |||
176 | seq_printf(s, " powergate powered\n"); | ||
177 | seq_printf(s, "------------------\n"); | ||
178 | |||
179 | for (i = 0; i < TEGRA_NUM_POWERGATE; i++) | ||
180 | seq_printf(s, " %9s %7s\n", powergate_name[i], | ||
181 | tegra_powergate_is_powered(i) ? "yes" : "no"); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int powergate_open(struct inode *inode, struct file *file) | ||
186 | { | ||
187 | return single_open(file, powergate_show, inode->i_private); | ||
188 | } | ||
189 | |||
190 | static const struct file_operations powergate_fops = { | ||
191 | .open = powergate_open, | ||
192 | .read = seq_read, | ||
193 | .llseek = seq_lseek, | ||
194 | .release = single_release, | ||
195 | }; | ||
196 | |||
197 | static int __init powergate_debugfs_init(void) | ||
198 | { | ||
199 | struct dentry *d; | ||
200 | int err = -ENOMEM; | ||
201 | |||
202 | d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL, | ||
203 | &powergate_fops); | ||
204 | if (!d) | ||
205 | return -ENOMEM; | ||
206 | |||
207 | return err; | ||
208 | } | ||
209 | |||
210 | late_initcall(powergate_debugfs_init); | ||
211 | |||
212 | #endif | ||
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index f0dae6d8ba52..6d7c4eea4dcb 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c | |||
@@ -23,14 +23,15 @@ | |||
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/hrtimer.h> | ||
27 | #include <linux/clkdev.h> | 26 | #include <linux/clkdev.h> |
27 | #include <linux/clk.h> | ||
28 | 28 | ||
29 | #include <mach/iomap.h> | 29 | #include <mach/iomap.h> |
30 | #include <mach/suspend.h> | ||
30 | 31 | ||
31 | #include "clock.h" | 32 | #include "clock.h" |
32 | #include "fuse.h" | 33 | #include "fuse.h" |
33 | #include "tegra2_dvfs.h" | 34 | #include "tegra2_emc.h" |
34 | 35 | ||
35 | #define RST_DEVICES 0x004 | 36 | #define RST_DEVICES 0x004 |
36 | #define RST_DEVICES_SET 0x300 | 37 | #define RST_DEVICES_SET 0x300 |
@@ -51,7 +52,7 @@ | |||
51 | #define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30) | 52 | #define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30) |
52 | #define OSC_CTRL_OSC_FREQ_12MHZ (2<<30) | 53 | #define OSC_CTRL_OSC_FREQ_12MHZ (2<<30) |
53 | #define OSC_CTRL_OSC_FREQ_26MHZ (3<<30) | 54 | #define OSC_CTRL_OSC_FREQ_26MHZ (3<<30) |
54 | #define OSC_CTRL_MASK 0x3f2 | 55 | #define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK) |
55 | 56 | ||
56 | #define OSC_FREQ_DET 0x58 | 57 | #define OSC_FREQ_DET 0x58 |
57 | #define OSC_FREQ_DET_TRIG (1<<31) | 58 | #define OSC_FREQ_DET_TRIG (1<<31) |
@@ -73,12 +74,15 @@ | |||
73 | #define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF | 74 | #define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF |
74 | #define PERIPH_CLK_SOURCE_DIV_SHIFT 0 | 75 | #define PERIPH_CLK_SOURCE_DIV_SHIFT 0 |
75 | 76 | ||
77 | #define SDMMC_CLK_INT_FB_SEL (1 << 23) | ||
78 | #define SDMMC_CLK_INT_FB_DLY_SHIFT 16 | ||
79 | #define SDMMC_CLK_INT_FB_DLY_MASK (0xF << SDMMC_CLK_INT_FB_DLY_SHIFT) | ||
80 | |||
76 | #define PLL_BASE 0x0 | 81 | #define PLL_BASE 0x0 |
77 | #define PLL_BASE_BYPASS (1<<31) | 82 | #define PLL_BASE_BYPASS (1<<31) |
78 | #define PLL_BASE_ENABLE (1<<30) | 83 | #define PLL_BASE_ENABLE (1<<30) |
79 | #define PLL_BASE_REF_ENABLE (1<<29) | 84 | #define PLL_BASE_REF_ENABLE (1<<29) |
80 | #define PLL_BASE_OVERRIDE (1<<28) | 85 | #define PLL_BASE_OVERRIDE (1<<28) |
81 | #define PLL_BASE_LOCK (1<<27) | ||
82 | #define PLL_BASE_DIVP_MASK (0x7<<20) | 86 | #define PLL_BASE_DIVP_MASK (0x7<<20) |
83 | #define PLL_BASE_DIVP_SHIFT 20 | 87 | #define PLL_BASE_DIVP_SHIFT 20 |
84 | #define PLL_BASE_DIVN_MASK (0x3FF<<8) | 88 | #define PLL_BASE_DIVN_MASK (0x3FF<<8) |
@@ -93,7 +97,6 @@ | |||
93 | #define PLL_OUT_RESET_DISABLE (1<<0) | 97 | #define PLL_OUT_RESET_DISABLE (1<<0) |
94 | 98 | ||
95 | #define PLL_MISC(c) (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc) | 99 | #define PLL_MISC(c) (((c)->flags & PLL_ALT_MISC_REG) ? 0x4 : 0xc) |
96 | #define PLL_MISC_LOCK_ENABLE(c) (((c)->flags & PLLU) ? (1<<22) : (1<<18)) | ||
97 | 100 | ||
98 | #define PLL_MISC_DCCON_SHIFT 20 | 101 | #define PLL_MISC_DCCON_SHIFT 20 |
99 | #define PLL_MISC_CPCON_SHIFT 8 | 102 | #define PLL_MISC_CPCON_SHIFT 8 |
@@ -111,9 +114,9 @@ | |||
111 | 114 | ||
112 | #define PLLE_MISC_READY (1 << 15) | 115 | #define PLLE_MISC_READY (1 << 15) |
113 | 116 | ||
114 | #define PERIPH_CLK_TO_ENB_REG(c) ((c->clk_num / 32) * 4) | 117 | #define PERIPH_CLK_TO_ENB_REG(c) ((c->u.periph.clk_num / 32) * 4) |
115 | #define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->clk_num / 32) * 8) | 118 | #define PERIPH_CLK_TO_ENB_SET_REG(c) ((c->u.periph.clk_num / 32) * 8) |
116 | #define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->clk_num % 32)) | 119 | #define PERIPH_CLK_TO_ENB_BIT(c) (1 << (c->u.periph.clk_num % 32)) |
117 | 120 | ||
118 | #define SUPER_CLK_MUX 0x00 | 121 | #define SUPER_CLK_MUX 0x00 |
119 | #define SUPER_STATE_SHIFT 28 | 122 | #define SUPER_STATE_SHIFT 28 |
@@ -134,12 +137,42 @@ | |||
134 | #define BUS_CLK_DISABLE (1<<3) | 137 | #define BUS_CLK_DISABLE (1<<3) |
135 | #define BUS_CLK_DIV_MASK 0x3 | 138 | #define BUS_CLK_DIV_MASK 0x3 |
136 | 139 | ||
140 | #define PMC_CTRL 0x0 | ||
141 | #define PMC_CTRL_BLINK_ENB (1 << 7) | ||
142 | |||
143 | #define PMC_DPD_PADS_ORIDE 0x1c | ||
144 | #define PMC_DPD_PADS_ORIDE_BLINK_ENB (1 << 20) | ||
145 | |||
146 | #define PMC_BLINK_TIMER_DATA_ON_SHIFT 0 | ||
147 | #define PMC_BLINK_TIMER_DATA_ON_MASK 0x7fff | ||
148 | #define PMC_BLINK_TIMER_ENB (1 << 15) | ||
149 | #define PMC_BLINK_TIMER_DATA_OFF_SHIFT 16 | ||
150 | #define PMC_BLINK_TIMER_DATA_OFF_MASK 0xffff | ||
151 | |||
137 | static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE); | 152 | static void __iomem *reg_clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE); |
153 | static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); | ||
154 | |||
155 | /* | ||
156 | * Some clocks share a register with other clocks. Any clock op that | ||
157 | * non-atomically modifies a register used by another clock must lock | ||
158 | * clock_register_lock first. | ||
159 | */ | ||
160 | static DEFINE_SPINLOCK(clock_register_lock); | ||
161 | |||
162 | /* | ||
163 | * Some peripheral clocks share an enable bit, so refcount the enable bits | ||
164 | * in registers CLK_ENABLE_L, CLK_ENABLE_H, and CLK_ENABLE_U | ||
165 | */ | ||
166 | static int tegra_periph_clk_enable_refcount[3 * 32]; | ||
138 | 167 | ||
139 | #define clk_writel(value, reg) \ | 168 | #define clk_writel(value, reg) \ |
140 | __raw_writel(value, (u32)reg_clk_base + (reg)) | 169 | __raw_writel(value, (u32)reg_clk_base + (reg)) |
141 | #define clk_readl(reg) \ | 170 | #define clk_readl(reg) \ |
142 | __raw_readl((u32)reg_clk_base + (reg)) | 171 | __raw_readl((u32)reg_clk_base + (reg)) |
172 | #define pmc_writel(value, reg) \ | ||
173 | __raw_writel(value, (u32)reg_pmc_base + (reg)) | ||
174 | #define pmc_readl(reg) \ | ||
175 | __raw_readl((u32)reg_pmc_base + (reg)) | ||
143 | 176 | ||
144 | unsigned long clk_measure_input_freq(void) | 177 | unsigned long clk_measure_input_freq(void) |
145 | { | 178 | { |
@@ -245,6 +278,18 @@ static struct clk_ops tegra_clk_m_ops = { | |||
245 | .disable = tegra2_clk_m_disable, | 278 | .disable = tegra2_clk_m_disable, |
246 | }; | 279 | }; |
247 | 280 | ||
281 | void tegra2_periph_reset_assert(struct clk *c) | ||
282 | { | ||
283 | BUG_ON(!c->ops->reset); | ||
284 | c->ops->reset(c, true); | ||
285 | } | ||
286 | |||
287 | void tegra2_periph_reset_deassert(struct clk *c) | ||
288 | { | ||
289 | BUG_ON(!c->ops->reset); | ||
290 | c->ops->reset(c, false); | ||
291 | } | ||
292 | |||
248 | /* super clock functions */ | 293 | /* super clock functions */ |
249 | /* "super clocks" on tegra have two-stage muxes and a clock skipping | 294 | /* "super clocks" on tegra have two-stage muxes and a clock skipping |
250 | * super divider. We will ignore the clock skipping divider, since we | 295 | * super divider. We will ignore the clock skipping divider, since we |
@@ -303,12 +348,12 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p) | |||
303 | val |= sel->value << shift; | 348 | val |= sel->value << shift; |
304 | 349 | ||
305 | if (c->refcnt) | 350 | if (c->refcnt) |
306 | clk_enable_locked(p); | 351 | clk_enable(p); |
307 | 352 | ||
308 | clk_writel(val, c->reg); | 353 | clk_writel(val, c->reg); |
309 | 354 | ||
310 | if (c->refcnt && c->parent) | 355 | if (c->refcnt && c->parent) |
311 | clk_disable_locked(c->parent); | 356 | clk_disable(c->parent); |
312 | 357 | ||
313 | clk_reparent(c, p); | 358 | clk_reparent(c, p); |
314 | return 0; | 359 | return 0; |
@@ -317,11 +362,24 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p) | |||
317 | return -EINVAL; | 362 | return -EINVAL; |
318 | } | 363 | } |
319 | 364 | ||
365 | /* | ||
366 | * Super clocks have "clock skippers" instead of dividers. Dividing using | ||
367 | * a clock skipper does not allow the voltage to be scaled down, so instead | ||
368 | * adjust the rate of the parent clock. This requires that the parent of a | ||
369 | * super clock have no other children, otherwise the rate will change | ||
370 | * underneath the other children. | ||
371 | */ | ||
372 | static int tegra2_super_clk_set_rate(struct clk *c, unsigned long rate) | ||
373 | { | ||
374 | return clk_set_rate(c->parent, rate); | ||
375 | } | ||
376 | |||
320 | static struct clk_ops tegra_super_ops = { | 377 | static struct clk_ops tegra_super_ops = { |
321 | .init = tegra2_super_clk_init, | 378 | .init = tegra2_super_clk_init, |
322 | .enable = tegra2_super_clk_enable, | 379 | .enable = tegra2_super_clk_enable, |
323 | .disable = tegra2_super_clk_disable, | 380 | .disable = tegra2_super_clk_disable, |
324 | .set_parent = tegra2_super_clk_set_parent, | 381 | .set_parent = tegra2_super_clk_set_parent, |
382 | .set_rate = tegra2_super_clk_set_rate, | ||
325 | }; | 383 | }; |
326 | 384 | ||
327 | /* virtual cpu clock functions */ | 385 | /* virtual cpu clock functions */ |
@@ -351,25 +409,36 @@ static void tegra2_cpu_clk_disable(struct clk *c) | |||
351 | static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate) | 409 | static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate) |
352 | { | 410 | { |
353 | int ret; | 411 | int ret; |
354 | ret = clk_set_parent_locked(c->parent, c->backup); | 412 | /* |
413 | * Take an extra reference to the main pll so it doesn't turn | ||
414 | * off when we move the cpu off of it | ||
415 | */ | ||
416 | clk_enable(c->u.cpu.main); | ||
417 | |||
418 | ret = clk_set_parent(c->parent, c->u.cpu.backup); | ||
355 | if (ret) { | 419 | if (ret) { |
356 | pr_err("Failed to switch cpu to clock %s\n", c->backup->name); | 420 | pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name); |
357 | return ret; | 421 | goto out; |
358 | } | 422 | } |
359 | 423 | ||
360 | ret = clk_set_rate_locked(c->main, rate); | 424 | if (rate == clk_get_rate(c->u.cpu.backup)) |
425 | goto out; | ||
426 | |||
427 | ret = clk_set_rate(c->u.cpu.main, rate); | ||
361 | if (ret) { | 428 | if (ret) { |
362 | pr_err("Failed to change cpu pll to %lu\n", rate); | 429 | pr_err("Failed to change cpu pll to %lu\n", rate); |
363 | return ret; | 430 | goto out; |
364 | } | 431 | } |
365 | 432 | ||
366 | ret = clk_set_parent_locked(c->parent, c->main); | 433 | ret = clk_set_parent(c->parent, c->u.cpu.main); |
367 | if (ret) { | 434 | if (ret) { |
368 | pr_err("Failed to switch cpu to clock %s\n", c->main->name); | 435 | pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name); |
369 | return ret; | 436 | goto out; |
370 | } | 437 | } |
371 | 438 | ||
372 | return 0; | 439 | out: |
440 | clk_disable(c->u.cpu.main); | ||
441 | return ret; | ||
373 | } | 442 | } |
374 | 443 | ||
375 | static struct clk_ops tegra_cpu_ops = { | 444 | static struct clk_ops tegra_cpu_ops = { |
@@ -379,6 +448,20 @@ static struct clk_ops tegra_cpu_ops = { | |||
379 | .set_rate = tegra2_cpu_clk_set_rate, | 448 | .set_rate = tegra2_cpu_clk_set_rate, |
380 | }; | 449 | }; |
381 | 450 | ||
451 | /* virtual cop clock functions. Used to acquire the fake 'cop' clock to | ||
452 | * reset the COP block (i.e. AVP) */ | ||
453 | static void tegra2_cop_clk_reset(struct clk *c, bool assert) | ||
454 | { | ||
455 | unsigned long reg = assert ? RST_DEVICES_SET : RST_DEVICES_CLR; | ||
456 | |||
457 | pr_debug("%s %s\n", __func__, assert ? "assert" : "deassert"); | ||
458 | clk_writel(1 << 1, reg); | ||
459 | } | ||
460 | |||
461 | static struct clk_ops tegra_cop_ops = { | ||
462 | .reset = tegra2_cop_clk_reset, | ||
463 | }; | ||
464 | |||
382 | /* bus clock functions */ | 465 | /* bus clock functions */ |
383 | static void tegra2_bus_clk_init(struct clk *c) | 466 | static void tegra2_bus_clk_init(struct clk *c) |
384 | { | 467 | { |
@@ -390,24 +473,45 @@ static void tegra2_bus_clk_init(struct clk *c) | |||
390 | 473 | ||
391 | static int tegra2_bus_clk_enable(struct clk *c) | 474 | static int tegra2_bus_clk_enable(struct clk *c) |
392 | { | 475 | { |
393 | u32 val = clk_readl(c->reg); | 476 | u32 val; |
477 | unsigned long flags; | ||
478 | |||
479 | spin_lock_irqsave(&clock_register_lock, flags); | ||
480 | |||
481 | val = clk_readl(c->reg); | ||
394 | val &= ~(BUS_CLK_DISABLE << c->reg_shift); | 482 | val &= ~(BUS_CLK_DISABLE << c->reg_shift); |
395 | clk_writel(val, c->reg); | 483 | clk_writel(val, c->reg); |
484 | |||
485 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
486 | |||
396 | return 0; | 487 | return 0; |
397 | } | 488 | } |
398 | 489 | ||
399 | static void tegra2_bus_clk_disable(struct clk *c) | 490 | static void tegra2_bus_clk_disable(struct clk *c) |
400 | { | 491 | { |
401 | u32 val = clk_readl(c->reg); | 492 | u32 val; |
493 | unsigned long flags; | ||
494 | |||
495 | spin_lock_irqsave(&clock_register_lock, flags); | ||
496 | |||
497 | val = clk_readl(c->reg); | ||
402 | val |= BUS_CLK_DISABLE << c->reg_shift; | 498 | val |= BUS_CLK_DISABLE << c->reg_shift; |
403 | clk_writel(val, c->reg); | 499 | clk_writel(val, c->reg); |
500 | |||
501 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
404 | } | 502 | } |
405 | 503 | ||
406 | static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate) | 504 | static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate) |
407 | { | 505 | { |
408 | u32 val = clk_readl(c->reg); | 506 | u32 val; |
409 | unsigned long parent_rate = c->parent->rate; | 507 | unsigned long parent_rate = clk_get_rate(c->parent); |
508 | unsigned long flags; | ||
509 | int ret = -EINVAL; | ||
410 | int i; | 510 | int i; |
511 | |||
512 | spin_lock_irqsave(&clock_register_lock, flags); | ||
513 | |||
514 | val = clk_readl(c->reg); | ||
411 | for (i = 1; i <= 4; i++) { | 515 | for (i = 1; i <= 4; i++) { |
412 | if (rate == parent_rate / i) { | 516 | if (rate == parent_rate / i) { |
413 | val &= ~(BUS_CLK_DIV_MASK << c->reg_shift); | 517 | val &= ~(BUS_CLK_DIV_MASK << c->reg_shift); |
@@ -415,10 +519,14 @@ static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate) | |||
415 | clk_writel(val, c->reg); | 519 | clk_writel(val, c->reg); |
416 | c->div = i; | 520 | c->div = i; |
417 | c->mul = 1; | 521 | c->mul = 1; |
418 | return 0; | 522 | ret = 0; |
523 | break; | ||
419 | } | 524 | } |
420 | } | 525 | } |
421 | return -EINVAL; | 526 | |
527 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
528 | |||
529 | return ret; | ||
422 | } | 530 | } |
423 | 531 | ||
424 | static struct clk_ops tegra_bus_ops = { | 532 | static struct clk_ops tegra_bus_ops = { |
@@ -428,24 +536,96 @@ static struct clk_ops tegra_bus_ops = { | |||
428 | .set_rate = tegra2_bus_clk_set_rate, | 536 | .set_rate = tegra2_bus_clk_set_rate, |
429 | }; | 537 | }; |
430 | 538 | ||
431 | /* PLL Functions */ | 539 | /* Blink output functions */ |
432 | static int tegra2_pll_clk_wait_for_lock(struct clk *c) | 540 | |
541 | static void tegra2_blink_clk_init(struct clk *c) | ||
433 | { | 542 | { |
434 | ktime_t before; | 543 | u32 val; |
435 | 544 | ||
436 | before = ktime_get(); | 545 | val = pmc_readl(PMC_CTRL); |
546 | c->state = (val & PMC_CTRL_BLINK_ENB) ? ON : OFF; | ||
547 | c->mul = 1; | ||
548 | val = pmc_readl(c->reg); | ||
549 | |||
550 | if (val & PMC_BLINK_TIMER_ENB) { | ||
551 | unsigned int on_off; | ||
552 | |||
553 | on_off = (val >> PMC_BLINK_TIMER_DATA_ON_SHIFT) & | ||
554 | PMC_BLINK_TIMER_DATA_ON_MASK; | ||
555 | val >>= PMC_BLINK_TIMER_DATA_OFF_SHIFT; | ||
556 | val &= PMC_BLINK_TIMER_DATA_OFF_MASK; | ||
557 | on_off += val; | ||
558 | /* each tick in the blink timer is 4 32KHz clocks */ | ||
559 | c->div = on_off * 4; | ||
560 | } else { | ||
561 | c->div = 1; | ||
562 | } | ||
563 | } | ||
437 | 564 | ||
438 | while (!(clk_readl(c->reg + PLL_BASE) & PLL_BASE_LOCK)) { | 565 | static int tegra2_blink_clk_enable(struct clk *c) |
439 | if (ktime_us_delta(ktime_get(), before) > 5000) { | 566 | { |
440 | pr_err("Timed out waiting for lock bit on pll %s", | 567 | u32 val; |
441 | c->name); | 568 | |
442 | return -1; | 569 | val = pmc_readl(PMC_DPD_PADS_ORIDE); |
443 | } | 570 | pmc_writel(val | PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); |
571 | |||
572 | val = pmc_readl(PMC_CTRL); | ||
573 | pmc_writel(val | PMC_CTRL_BLINK_ENB, PMC_CTRL); | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | static void tegra2_blink_clk_disable(struct clk *c) | ||
579 | { | ||
580 | u32 val; | ||
581 | |||
582 | val = pmc_readl(PMC_CTRL); | ||
583 | pmc_writel(val & ~PMC_CTRL_BLINK_ENB, PMC_CTRL); | ||
584 | |||
585 | val = pmc_readl(PMC_DPD_PADS_ORIDE); | ||
586 | pmc_writel(val & ~PMC_DPD_PADS_ORIDE_BLINK_ENB, PMC_DPD_PADS_ORIDE); | ||
587 | } | ||
588 | |||
589 | static int tegra2_blink_clk_set_rate(struct clk *c, unsigned long rate) | ||
590 | { | ||
591 | unsigned long parent_rate = clk_get_rate(c->parent); | ||
592 | if (rate >= parent_rate) { | ||
593 | c->div = 1; | ||
594 | pmc_writel(0, c->reg); | ||
595 | } else { | ||
596 | unsigned int on_off; | ||
597 | u32 val; | ||
598 | |||
599 | on_off = DIV_ROUND_UP(parent_rate / 8, rate); | ||
600 | c->div = on_off * 8; | ||
601 | |||
602 | val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) << | ||
603 | PMC_BLINK_TIMER_DATA_ON_SHIFT; | ||
604 | on_off &= PMC_BLINK_TIMER_DATA_OFF_MASK; | ||
605 | on_off <<= PMC_BLINK_TIMER_DATA_OFF_SHIFT; | ||
606 | val |= on_off; | ||
607 | val |= PMC_BLINK_TIMER_ENB; | ||
608 | pmc_writel(val, c->reg); | ||
444 | } | 609 | } |
445 | 610 | ||
446 | return 0; | 611 | return 0; |
447 | } | 612 | } |
448 | 613 | ||
614 | static struct clk_ops tegra_blink_clk_ops = { | ||
615 | .init = &tegra2_blink_clk_init, | ||
616 | .enable = &tegra2_blink_clk_enable, | ||
617 | .disable = &tegra2_blink_clk_disable, | ||
618 | .set_rate = &tegra2_blink_clk_set_rate, | ||
619 | }; | ||
620 | |||
621 | /* PLL Functions */ | ||
622 | static int tegra2_pll_clk_wait_for_lock(struct clk *c) | ||
623 | { | ||
624 | udelay(c->u.pll.lock_delay); | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
449 | static void tegra2_pll_clk_init(struct clk *c) | 629 | static void tegra2_pll_clk_init(struct clk *c) |
450 | { | 630 | { |
451 | u32 val = clk_readl(c->reg + PLL_BASE); | 631 | u32 val = clk_readl(c->reg + PLL_BASE); |
@@ -479,10 +659,6 @@ static int tegra2_pll_clk_enable(struct clk *c) | |||
479 | val |= PLL_BASE_ENABLE; | 659 | val |= PLL_BASE_ENABLE; |
480 | clk_writel(val, c->reg + PLL_BASE); | 660 | clk_writel(val, c->reg + PLL_BASE); |
481 | 661 | ||
482 | val = clk_readl(c->reg + PLL_MISC(c)); | ||
483 | val |= PLL_MISC_LOCK_ENABLE(c); | ||
484 | clk_writel(val, c->reg + PLL_MISC(c)); | ||
485 | |||
486 | tegra2_pll_clk_wait_for_lock(c); | 662 | tegra2_pll_clk_wait_for_lock(c); |
487 | 663 | ||
488 | return 0; | 664 | return 0; |
@@ -502,13 +678,12 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate) | |||
502 | { | 678 | { |
503 | u32 val; | 679 | u32 val; |
504 | unsigned long input_rate; | 680 | unsigned long input_rate; |
505 | const struct clk_pll_table *sel; | 681 | const struct clk_pll_freq_table *sel; |
506 | 682 | ||
507 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); | 683 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); |
508 | BUG_ON(c->refcnt != 0); | ||
509 | 684 | ||
510 | input_rate = c->parent->rate; | 685 | input_rate = clk_get_rate(c->parent); |
511 | for (sel = c->pll_table; sel->input_rate != 0; sel++) { | 686 | for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) { |
512 | if (sel->input_rate == input_rate && sel->output_rate == rate) { | 687 | if (sel->input_rate == input_rate && sel->output_rate == rate) { |
513 | c->mul = sel->n; | 688 | c->mul = sel->n; |
514 | c->div = sel->m * sel->p; | 689 | c->div = sel->m * sel->p; |
@@ -620,9 +795,11 @@ static int tegra2_pll_div_clk_enable(struct clk *c) | |||
620 | { | 795 | { |
621 | u32 val; | 796 | u32 val; |
622 | u32 new_val; | 797 | u32 new_val; |
798 | unsigned long flags; | ||
623 | 799 | ||
624 | pr_debug("%s: %s\n", __func__, c->name); | 800 | pr_debug("%s: %s\n", __func__, c->name); |
625 | if (c->flags & DIV_U71) { | 801 | if (c->flags & DIV_U71) { |
802 | spin_lock_irqsave(&clock_register_lock, flags); | ||
626 | val = clk_readl(c->reg); | 803 | val = clk_readl(c->reg); |
627 | new_val = val >> c->reg_shift; | 804 | new_val = val >> c->reg_shift; |
628 | new_val &= 0xFFFF; | 805 | new_val &= 0xFFFF; |
@@ -632,12 +809,15 @@ static int tegra2_pll_div_clk_enable(struct clk *c) | |||
632 | val &= ~(0xFFFF << c->reg_shift); | 809 | val &= ~(0xFFFF << c->reg_shift); |
633 | val |= new_val << c->reg_shift; | 810 | val |= new_val << c->reg_shift; |
634 | clk_writel(val, c->reg); | 811 | clk_writel(val, c->reg); |
812 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
635 | return 0; | 813 | return 0; |
636 | } else if (c->flags & DIV_2) { | 814 | } else if (c->flags & DIV_2) { |
637 | BUG_ON(!(c->flags & PLLD)); | 815 | BUG_ON(!(c->flags & PLLD)); |
816 | spin_lock_irqsave(&clock_register_lock, flags); | ||
638 | val = clk_readl(c->reg); | 817 | val = clk_readl(c->reg); |
639 | val &= ~PLLD_MISC_DIV_RST; | 818 | val &= ~PLLD_MISC_DIV_RST; |
640 | clk_writel(val, c->reg); | 819 | clk_writel(val, c->reg); |
820 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
641 | return 0; | 821 | return 0; |
642 | } | 822 | } |
643 | return -EINVAL; | 823 | return -EINVAL; |
@@ -647,9 +827,11 @@ static void tegra2_pll_div_clk_disable(struct clk *c) | |||
647 | { | 827 | { |
648 | u32 val; | 828 | u32 val; |
649 | u32 new_val; | 829 | u32 new_val; |
830 | unsigned long flags; | ||
650 | 831 | ||
651 | pr_debug("%s: %s\n", __func__, c->name); | 832 | pr_debug("%s: %s\n", __func__, c->name); |
652 | if (c->flags & DIV_U71) { | 833 | if (c->flags & DIV_U71) { |
834 | spin_lock_irqsave(&clock_register_lock, flags); | ||
653 | val = clk_readl(c->reg); | 835 | val = clk_readl(c->reg); |
654 | new_val = val >> c->reg_shift; | 836 | new_val = val >> c->reg_shift; |
655 | new_val &= 0xFFFF; | 837 | new_val &= 0xFFFF; |
@@ -659,11 +841,14 @@ static void tegra2_pll_div_clk_disable(struct clk *c) | |||
659 | val &= ~(0xFFFF << c->reg_shift); | 841 | val &= ~(0xFFFF << c->reg_shift); |
660 | val |= new_val << c->reg_shift; | 842 | val |= new_val << c->reg_shift; |
661 | clk_writel(val, c->reg); | 843 | clk_writel(val, c->reg); |
844 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
662 | } else if (c->flags & DIV_2) { | 845 | } else if (c->flags & DIV_2) { |
663 | BUG_ON(!(c->flags & PLLD)); | 846 | BUG_ON(!(c->flags & PLLD)); |
847 | spin_lock_irqsave(&clock_register_lock, flags); | ||
664 | val = clk_readl(c->reg); | 848 | val = clk_readl(c->reg); |
665 | val |= PLLD_MISC_DIV_RST; | 849 | val |= PLLD_MISC_DIV_RST; |
666 | clk_writel(val, c->reg); | 850 | clk_writel(val, c->reg); |
851 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
667 | } | 852 | } |
668 | } | 853 | } |
669 | 854 | ||
@@ -672,10 +857,14 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate) | |||
672 | u32 val; | 857 | u32 val; |
673 | u32 new_val; | 858 | u32 new_val; |
674 | int divider_u71; | 859 | int divider_u71; |
860 | unsigned long parent_rate = clk_get_rate(c->parent); | ||
861 | unsigned long flags; | ||
862 | |||
675 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); | 863 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); |
676 | if (c->flags & DIV_U71) { | 864 | if (c->flags & DIV_U71) { |
677 | divider_u71 = clk_div71_get_divider(c->parent->rate, rate); | 865 | divider_u71 = clk_div71_get_divider(parent_rate, rate); |
678 | if (divider_u71 >= 0) { | 866 | if (divider_u71 >= 0) { |
867 | spin_lock_irqsave(&clock_register_lock, flags); | ||
679 | val = clk_readl(c->reg); | 868 | val = clk_readl(c->reg); |
680 | new_val = val >> c->reg_shift; | 869 | new_val = val >> c->reg_shift; |
681 | new_val &= 0xFFFF; | 870 | new_val &= 0xFFFF; |
@@ -689,10 +878,11 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate) | |||
689 | clk_writel(val, c->reg); | 878 | clk_writel(val, c->reg); |
690 | c->div = divider_u71 + 2; | 879 | c->div = divider_u71 + 2; |
691 | c->mul = 2; | 880 | c->mul = 2; |
881 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
692 | return 0; | 882 | return 0; |
693 | } | 883 | } |
694 | } else if (c->flags & DIV_2) { | 884 | } else if (c->flags & DIV_2) { |
695 | if (c->parent->rate == rate * 2) | 885 | if (parent_rate == rate * 2) |
696 | return 0; | 886 | return 0; |
697 | } | 887 | } |
698 | return -EINVAL; | 888 | return -EINVAL; |
@@ -701,15 +891,16 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate) | |||
701 | static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate) | 891 | static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate) |
702 | { | 892 | { |
703 | int divider; | 893 | int divider; |
894 | unsigned long parent_rate = clk_get_rate(c->parent); | ||
704 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); | 895 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); |
705 | 896 | ||
706 | if (c->flags & DIV_U71) { | 897 | if (c->flags & DIV_U71) { |
707 | divider = clk_div71_get_divider(c->parent->rate, rate); | 898 | divider = clk_div71_get_divider(parent_rate, rate); |
708 | if (divider < 0) | 899 | if (divider < 0) |
709 | return divider; | 900 | return divider; |
710 | return c->parent->rate * 2 / (divider + 2); | 901 | return DIV_ROUND_UP(parent_rate * 2, divider + 2); |
711 | } else if (c->flags & DIV_2) { | 902 | } else if (c->flags & DIV_2) { |
712 | return c->parent->rate / 2; | 903 | return DIV_ROUND_UP(parent_rate, 2); |
713 | } | 904 | } |
714 | return -EINVAL; | 905 | return -EINVAL; |
715 | } | 906 | } |
@@ -755,9 +946,14 @@ static void tegra2_periph_clk_init(struct clk *c) | |||
755 | } | 946 | } |
756 | 947 | ||
757 | c->state = ON; | 948 | c->state = ON; |
949 | |||
950 | if (!c->u.periph.clk_num) | ||
951 | return; | ||
952 | |||
758 | if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & | 953 | if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & |
759 | PERIPH_CLK_TO_ENB_BIT(c))) | 954 | PERIPH_CLK_TO_ENB_BIT(c))) |
760 | c->state = OFF; | 955 | c->state = OFF; |
956 | |||
761 | if (!(c->flags & PERIPH_NO_RESET)) | 957 | if (!(c->flags & PERIPH_NO_RESET)) |
762 | if (clk_readl(RST_DEVICES + PERIPH_CLK_TO_ENB_REG(c)) & | 958 | if (clk_readl(RST_DEVICES + PERIPH_CLK_TO_ENB_REG(c)) & |
763 | PERIPH_CLK_TO_ENB_BIT(c)) | 959 | PERIPH_CLK_TO_ENB_BIT(c)) |
@@ -767,8 +963,20 @@ static void tegra2_periph_clk_init(struct clk *c) | |||
767 | static int tegra2_periph_clk_enable(struct clk *c) | 963 | static int tegra2_periph_clk_enable(struct clk *c) |
768 | { | 964 | { |
769 | u32 val; | 965 | u32 val; |
966 | unsigned long flags; | ||
967 | int refcount; | ||
770 | pr_debug("%s on clock %s\n", __func__, c->name); | 968 | pr_debug("%s on clock %s\n", __func__, c->name); |
771 | 969 | ||
970 | if (!c->u.periph.clk_num) | ||
971 | return 0; | ||
972 | |||
973 | spin_lock_irqsave(&clock_register_lock, flags); | ||
974 | |||
975 | refcount = tegra_periph_clk_enable_refcount[c->u.periph.clk_num]++; | ||
976 | |||
977 | if (refcount > 1) | ||
978 | goto out; | ||
979 | |||
772 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), | 980 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), |
773 | CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); | 981 | CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); |
774 | if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET)) | 982 | if (!(c->flags & PERIPH_NO_RESET) && !(c->flags & PERIPH_MANUAL_RESET)) |
@@ -781,34 +989,48 @@ static int tegra2_periph_clk_enable(struct clk *c) | |||
781 | val |= 0x3 << 24; | 989 | val |= 0x3 << 24; |
782 | clk_writel(val, c->reg); | 990 | clk_writel(val, c->reg); |
783 | } | 991 | } |
992 | |||
993 | out: | ||
994 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
995 | |||
784 | return 0; | 996 | return 0; |
785 | } | 997 | } |
786 | 998 | ||
787 | static void tegra2_periph_clk_disable(struct clk *c) | 999 | static void tegra2_periph_clk_disable(struct clk *c) |
788 | { | 1000 | { |
1001 | unsigned long flags; | ||
1002 | |||
789 | pr_debug("%s on clock %s\n", __func__, c->name); | 1003 | pr_debug("%s on clock %s\n", __func__, c->name); |
790 | 1004 | ||
791 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), | 1005 | if (!c->u.periph.clk_num) |
792 | CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); | 1006 | return; |
793 | } | ||
794 | 1007 | ||
795 | void tegra2_periph_reset_deassert(struct clk *c) | 1008 | spin_lock_irqsave(&clock_register_lock, flags); |
796 | { | 1009 | |
797 | pr_debug("%s on clock %s\n", __func__, c->name); | 1010 | if (c->refcnt) |
798 | if (!(c->flags & PERIPH_NO_RESET)) | 1011 | tegra_periph_clk_enable_refcount[c->u.periph.clk_num]--; |
1012 | |||
1013 | if (tegra_periph_clk_enable_refcount[c->u.periph.clk_num] == 0) | ||
799 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), | 1014 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), |
800 | RST_DEVICES_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); | 1015 | CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); |
1016 | |||
1017 | spin_unlock_irqrestore(&clock_register_lock, flags); | ||
801 | } | 1018 | } |
802 | 1019 | ||
803 | void tegra2_periph_reset_assert(struct clk *c) | 1020 | static void tegra2_periph_clk_reset(struct clk *c, bool assert) |
804 | { | 1021 | { |
805 | pr_debug("%s on clock %s\n", __func__, c->name); | 1022 | unsigned long base = assert ? RST_DEVICES_SET : RST_DEVICES_CLR; |
1023 | |||
1024 | pr_debug("%s %s on clock %s\n", __func__, | ||
1025 | assert ? "assert" : "deassert", c->name); | ||
1026 | |||
1027 | BUG_ON(!c->u.periph.clk_num); | ||
1028 | |||
806 | if (!(c->flags & PERIPH_NO_RESET)) | 1029 | if (!(c->flags & PERIPH_NO_RESET)) |
807 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), | 1030 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), |
808 | RST_DEVICES_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); | 1031 | base + PERIPH_CLK_TO_ENB_SET_REG(c)); |
809 | } | 1032 | } |
810 | 1033 | ||
811 | |||
812 | static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p) | 1034 | static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p) |
813 | { | 1035 | { |
814 | u32 val; | 1036 | u32 val; |
@@ -821,12 +1043,12 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p) | |||
821 | val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT; | 1043 | val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT; |
822 | 1044 | ||
823 | if (c->refcnt) | 1045 | if (c->refcnt) |
824 | clk_enable_locked(p); | 1046 | clk_enable(p); |
825 | 1047 | ||
826 | clk_writel(val, c->reg); | 1048 | clk_writel(val, c->reg); |
827 | 1049 | ||
828 | if (c->refcnt && c->parent) | 1050 | if (c->refcnt && c->parent) |
829 | clk_disable_locked(c->parent); | 1051 | clk_disable(c->parent); |
830 | 1052 | ||
831 | clk_reparent(c, p); | 1053 | clk_reparent(c, p); |
832 | return 0; | 1054 | return 0; |
@@ -840,9 +1062,10 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate) | |||
840 | { | 1062 | { |
841 | u32 val; | 1063 | u32 val; |
842 | int divider; | 1064 | int divider; |
843 | pr_debug("%s: %lu\n", __func__, rate); | 1065 | unsigned long parent_rate = clk_get_rate(c->parent); |
1066 | |||
844 | if (c->flags & DIV_U71) { | 1067 | if (c->flags & DIV_U71) { |
845 | divider = clk_div71_get_divider(c->parent->rate, rate); | 1068 | divider = clk_div71_get_divider(parent_rate, rate); |
846 | if (divider >= 0) { | 1069 | if (divider >= 0) { |
847 | val = clk_readl(c->reg); | 1070 | val = clk_readl(c->reg); |
848 | val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK; | 1071 | val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK; |
@@ -853,7 +1076,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate) | |||
853 | return 0; | 1076 | return 0; |
854 | } | 1077 | } |
855 | } else if (c->flags & DIV_U16) { | 1078 | } else if (c->flags & DIV_U16) { |
856 | divider = clk_div16_get_divider(c->parent->rate, rate); | 1079 | divider = clk_div16_get_divider(parent_rate, rate); |
857 | if (divider >= 0) { | 1080 | if (divider >= 0) { |
858 | val = clk_readl(c->reg); | 1081 | val = clk_readl(c->reg); |
859 | val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK; | 1082 | val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK; |
@@ -863,7 +1086,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate) | |||
863 | c->mul = 1; | 1086 | c->mul = 1; |
864 | return 0; | 1087 | return 0; |
865 | } | 1088 | } |
866 | } else if (c->parent->rate <= rate) { | 1089 | } else if (parent_rate <= rate) { |
867 | c->div = 1; | 1090 | c->div = 1; |
868 | c->mul = 1; | 1091 | c->mul = 1; |
869 | return 0; | 1092 | return 0; |
@@ -875,19 +1098,20 @@ static long tegra2_periph_clk_round_rate(struct clk *c, | |||
875 | unsigned long rate) | 1098 | unsigned long rate) |
876 | { | 1099 | { |
877 | int divider; | 1100 | int divider; |
1101 | unsigned long parent_rate = clk_get_rate(c->parent); | ||
878 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); | 1102 | pr_debug("%s: %s %lu\n", __func__, c->name, rate); |
879 | 1103 | ||
880 | if (c->flags & DIV_U71) { | 1104 | if (c->flags & DIV_U71) { |
881 | divider = clk_div71_get_divider(c->parent->rate, rate); | 1105 | divider = clk_div71_get_divider(parent_rate, rate); |
882 | if (divider < 0) | 1106 | if (divider < 0) |
883 | return divider; | 1107 | return divider; |
884 | 1108 | ||
885 | return c->parent->rate * 2 / (divider + 2); | 1109 | return DIV_ROUND_UP(parent_rate * 2, divider + 2); |
886 | } else if (c->flags & DIV_U16) { | 1110 | } else if (c->flags & DIV_U16) { |
887 | divider = clk_div16_get_divider(c->parent->rate, rate); | 1111 | divider = clk_div16_get_divider(parent_rate, rate); |
888 | if (divider < 0) | 1112 | if (divider < 0) |
889 | return divider; | 1113 | return divider; |
890 | return c->parent->rate / (divider + 1); | 1114 | return DIV_ROUND_UP(parent_rate, divider + 1); |
891 | } | 1115 | } |
892 | return -EINVAL; | 1116 | return -EINVAL; |
893 | } | 1117 | } |
@@ -899,6 +1123,71 @@ static struct clk_ops tegra_periph_clk_ops = { | |||
899 | .set_parent = &tegra2_periph_clk_set_parent, | 1123 | .set_parent = &tegra2_periph_clk_set_parent, |
900 | .set_rate = &tegra2_periph_clk_set_rate, | 1124 | .set_rate = &tegra2_periph_clk_set_rate, |
901 | .round_rate = &tegra2_periph_clk_round_rate, | 1125 | .round_rate = &tegra2_periph_clk_round_rate, |
1126 | .reset = &tegra2_periph_clk_reset, | ||
1127 | }; | ||
1128 | |||
1129 | /* The SDMMC controllers have extra bits in the clock source register that | ||
1130 | * adjust the delay between the clock and data to compenstate for delays | ||
1131 | * on the PCB. */ | ||
1132 | void tegra2_sdmmc_tap_delay(struct clk *c, int delay) | ||
1133 | { | ||
1134 | u32 reg; | ||
1135 | |||
1136 | delay = clamp(delay, 0, 15); | ||
1137 | reg = clk_readl(c->reg); | ||
1138 | reg &= ~SDMMC_CLK_INT_FB_DLY_MASK; | ||
1139 | reg |= SDMMC_CLK_INT_FB_SEL; | ||
1140 | reg |= delay << SDMMC_CLK_INT_FB_DLY_SHIFT; | ||
1141 | clk_writel(reg, c->reg); | ||
1142 | } | ||
1143 | |||
1144 | /* External memory controller clock ops */ | ||
1145 | static void tegra2_emc_clk_init(struct clk *c) | ||
1146 | { | ||
1147 | tegra2_periph_clk_init(c); | ||
1148 | c->max_rate = clk_get_rate_locked(c); | ||
1149 | } | ||
1150 | |||
1151 | static long tegra2_emc_clk_round_rate(struct clk *c, unsigned long rate) | ||
1152 | { | ||
1153 | long new_rate = rate; | ||
1154 | |||
1155 | new_rate = tegra_emc_round_rate(new_rate); | ||
1156 | if (new_rate < 0) | ||
1157 | return c->max_rate; | ||
1158 | |||
1159 | BUG_ON(new_rate != tegra2_periph_clk_round_rate(c, new_rate)); | ||
1160 | |||
1161 | return new_rate; | ||
1162 | } | ||
1163 | |||
1164 | static int tegra2_emc_clk_set_rate(struct clk *c, unsigned long rate) | ||
1165 | { | ||
1166 | int ret; | ||
1167 | /* | ||
1168 | * The Tegra2 memory controller has an interlock with the clock | ||
1169 | * block that allows memory shadowed registers to be updated, | ||
1170 | * and then transfer them to the main registers at the same | ||
1171 | * time as the clock update without glitches. | ||
1172 | */ | ||
1173 | ret = tegra_emc_set_rate(rate); | ||
1174 | if (ret < 0) | ||
1175 | return ret; | ||
1176 | |||
1177 | ret = tegra2_periph_clk_set_rate(c, rate); | ||
1178 | udelay(1); | ||
1179 | |||
1180 | return ret; | ||
1181 | } | ||
1182 | |||
1183 | static struct clk_ops tegra_emc_clk_ops = { | ||
1184 | .init = &tegra2_emc_clk_init, | ||
1185 | .enable = &tegra2_periph_clk_enable, | ||
1186 | .disable = &tegra2_periph_clk_disable, | ||
1187 | .set_parent = &tegra2_periph_clk_set_parent, | ||
1188 | .set_rate = &tegra2_emc_clk_set_rate, | ||
1189 | .round_rate = &tegra2_emc_clk_round_rate, | ||
1190 | .reset = &tegra2_periph_clk_reset, | ||
902 | }; | 1191 | }; |
903 | 1192 | ||
904 | /* Clock doubler ops */ | 1193 | /* Clock doubler ops */ |
@@ -907,6 +1196,10 @@ static void tegra2_clk_double_init(struct clk *c) | |||
907 | c->mul = 2; | 1196 | c->mul = 2; |
908 | c->div = 1; | 1197 | c->div = 1; |
909 | c->state = ON; | 1198 | c->state = ON; |
1199 | |||
1200 | if (!c->u.periph.clk_num) | ||
1201 | return; | ||
1202 | |||
910 | if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & | 1203 | if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & |
911 | PERIPH_CLK_TO_ENB_BIT(c))) | 1204 | PERIPH_CLK_TO_ENB_BIT(c))) |
912 | c->state = OFF; | 1205 | c->state = OFF; |
@@ -914,7 +1207,7 @@ static void tegra2_clk_double_init(struct clk *c) | |||
914 | 1207 | ||
915 | static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate) | 1208 | static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate) |
916 | { | 1209 | { |
917 | if (rate != 2 * c->parent->rate) | 1210 | if (rate != 2 * clk_get_rate(c->parent)) |
918 | return -EINVAL; | 1211 | return -EINVAL; |
919 | c->mul = 2; | 1212 | c->mul = 2; |
920 | c->div = 1; | 1213 | c->div = 1; |
@@ -928,6 +1221,7 @@ static struct clk_ops tegra_clk_double_ops = { | |||
928 | .set_rate = &tegra2_clk_double_set_rate, | 1221 | .set_rate = &tegra2_clk_double_set_rate, |
929 | }; | 1222 | }; |
930 | 1223 | ||
1224 | /* Audio sync clock ops */ | ||
931 | static void tegra2_audio_sync_clk_init(struct clk *c) | 1225 | static void tegra2_audio_sync_clk_init(struct clk *c) |
932 | { | 1226 | { |
933 | int source; | 1227 | int source; |
@@ -964,12 +1258,12 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p) | |||
964 | val |= sel->value; | 1258 | val |= sel->value; |
965 | 1259 | ||
966 | if (c->refcnt) | 1260 | if (c->refcnt) |
967 | clk_enable_locked(p); | 1261 | clk_enable(p); |
968 | 1262 | ||
969 | clk_writel(val, c->reg); | 1263 | clk_writel(val, c->reg); |
970 | 1264 | ||
971 | if (c->refcnt && c->parent) | 1265 | if (c->refcnt && c->parent) |
972 | clk_disable_locked(c->parent); | 1266 | clk_disable(c->parent); |
973 | 1267 | ||
974 | clk_reparent(c, p); | 1268 | clk_reparent(c, p); |
975 | return 0; | 1269 | return 0; |
@@ -979,33 +1273,153 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p) | |||
979 | return -EINVAL; | 1273 | return -EINVAL; |
980 | } | 1274 | } |
981 | 1275 | ||
982 | static int tegra2_audio_sync_clk_set_rate(struct clk *c, unsigned long rate) | ||
983 | { | ||
984 | unsigned long parent_rate; | ||
985 | if (!c->parent) { | ||
986 | pr_err("%s: clock has no parent\n", __func__); | ||
987 | return -EINVAL; | ||
988 | } | ||
989 | parent_rate = c->parent->rate; | ||
990 | if (rate != parent_rate) { | ||
991 | pr_err("%s: %s/%ld differs from parent %s/%ld\n", | ||
992 | __func__, | ||
993 | c->name, rate, | ||
994 | c->parent->name, parent_rate); | ||
995 | return -EINVAL; | ||
996 | } | ||
997 | c->rate = parent_rate; | ||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | static struct clk_ops tegra_audio_sync_clk_ops = { | 1276 | static struct clk_ops tegra_audio_sync_clk_ops = { |
1002 | .init = tegra2_audio_sync_clk_init, | 1277 | .init = tegra2_audio_sync_clk_init, |
1003 | .enable = tegra2_audio_sync_clk_enable, | 1278 | .enable = tegra2_audio_sync_clk_enable, |
1004 | .disable = tegra2_audio_sync_clk_disable, | 1279 | .disable = tegra2_audio_sync_clk_disable, |
1005 | .set_rate = tegra2_audio_sync_clk_set_rate, | ||
1006 | .set_parent = tegra2_audio_sync_clk_set_parent, | 1280 | .set_parent = tegra2_audio_sync_clk_set_parent, |
1007 | }; | 1281 | }; |
1008 | 1282 | ||
1283 | /* cdev1 and cdev2 (dap_mclk1 and dap_mclk2) ops */ | ||
1284 | |||
1285 | static void tegra2_cdev_clk_init(struct clk *c) | ||
1286 | { | ||
1287 | /* We could un-tristate the cdev1 or cdev2 pingroup here; this is | ||
1288 | * currently done in the pinmux code. */ | ||
1289 | c->state = ON; | ||
1290 | |||
1291 | BUG_ON(!c->u.periph.clk_num); | ||
1292 | |||
1293 | if (!(clk_readl(CLK_OUT_ENB + PERIPH_CLK_TO_ENB_REG(c)) & | ||
1294 | PERIPH_CLK_TO_ENB_BIT(c))) | ||
1295 | c->state = OFF; | ||
1296 | } | ||
1297 | |||
1298 | static int tegra2_cdev_clk_enable(struct clk *c) | ||
1299 | { | ||
1300 | BUG_ON(!c->u.periph.clk_num); | ||
1301 | |||
1302 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), | ||
1303 | CLK_OUT_ENB_SET + PERIPH_CLK_TO_ENB_SET_REG(c)); | ||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | static void tegra2_cdev_clk_disable(struct clk *c) | ||
1308 | { | ||
1309 | BUG_ON(!c->u.periph.clk_num); | ||
1310 | |||
1311 | clk_writel(PERIPH_CLK_TO_ENB_BIT(c), | ||
1312 | CLK_OUT_ENB_CLR + PERIPH_CLK_TO_ENB_SET_REG(c)); | ||
1313 | } | ||
1314 | |||
1315 | static struct clk_ops tegra_cdev_clk_ops = { | ||
1316 | .init = &tegra2_cdev_clk_init, | ||
1317 | .enable = &tegra2_cdev_clk_enable, | ||
1318 | .disable = &tegra2_cdev_clk_disable, | ||
1319 | }; | ||
1320 | |||
1321 | /* shared bus ops */ | ||
1322 | /* | ||
1323 | * Some clocks may have multiple downstream users that need to request a | ||
1324 | * higher clock rate. Shared bus clocks provide a unique shared_bus_user | ||
1325 | * clock to each user. The frequency of the bus is set to the highest | ||
1326 | * enabled shared_bus_user clock, with a minimum value set by the | ||
1327 | * shared bus. | ||
1328 | */ | ||
1329 | static int tegra_clk_shared_bus_update(struct clk *bus) | ||
1330 | { | ||
1331 | struct clk *c; | ||
1332 | unsigned long rate = bus->min_rate; | ||
1333 | |||
1334 | list_for_each_entry(c, &bus->shared_bus_list, u.shared_bus_user.node) | ||
1335 | if (c->u.shared_bus_user.enabled) | ||
1336 | rate = max(c->u.shared_bus_user.rate, rate); | ||
1337 | |||
1338 | if (rate == clk_get_rate_locked(bus)) | ||
1339 | return 0; | ||
1340 | |||
1341 | return clk_set_rate_locked(bus, rate); | ||
1342 | }; | ||
1343 | |||
1344 | static void tegra_clk_shared_bus_init(struct clk *c) | ||
1345 | { | ||
1346 | unsigned long flags; | ||
1347 | |||
1348 | c->max_rate = c->parent->max_rate; | ||
1349 | c->u.shared_bus_user.rate = c->parent->max_rate; | ||
1350 | c->state = OFF; | ||
1351 | c->set = true; | ||
1352 | |||
1353 | spin_lock_irqsave(&c->parent->spinlock, flags); | ||
1354 | |||
1355 | list_add_tail(&c->u.shared_bus_user.node, | ||
1356 | &c->parent->shared_bus_list); | ||
1357 | |||
1358 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | ||
1359 | } | ||
1360 | |||
1361 | static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) | ||
1362 | { | ||
1363 | unsigned long flags; | ||
1364 | int ret; | ||
1365 | |||
1366 | rate = clk_round_rate(c->parent, rate); | ||
1367 | if (rate < 0) | ||
1368 | return rate; | ||
1369 | |||
1370 | spin_lock_irqsave(&c->parent->spinlock, flags); | ||
1371 | |||
1372 | c->u.shared_bus_user.rate = rate; | ||
1373 | ret = tegra_clk_shared_bus_update(c->parent); | ||
1374 | |||
1375 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | ||
1376 | |||
1377 | return ret; | ||
1378 | } | ||
1379 | |||
1380 | static long tegra_clk_shared_bus_round_rate(struct clk *c, unsigned long rate) | ||
1381 | { | ||
1382 | return clk_round_rate(c->parent, rate); | ||
1383 | } | ||
1384 | |||
1385 | static int tegra_clk_shared_bus_enable(struct clk *c) | ||
1386 | { | ||
1387 | unsigned long flags; | ||
1388 | int ret; | ||
1389 | |||
1390 | spin_lock_irqsave(&c->parent->spinlock, flags); | ||
1391 | |||
1392 | c->u.shared_bus_user.enabled = true; | ||
1393 | ret = tegra_clk_shared_bus_update(c->parent); | ||
1394 | |||
1395 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | ||
1396 | |||
1397 | return ret; | ||
1398 | } | ||
1399 | |||
1400 | static void tegra_clk_shared_bus_disable(struct clk *c) | ||
1401 | { | ||
1402 | unsigned long flags; | ||
1403 | int ret; | ||
1404 | |||
1405 | spin_lock_irqsave(&c->parent->spinlock, flags); | ||
1406 | |||
1407 | c->u.shared_bus_user.enabled = false; | ||
1408 | ret = tegra_clk_shared_bus_update(c->parent); | ||
1409 | WARN_ON_ONCE(ret); | ||
1410 | |||
1411 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | ||
1412 | } | ||
1413 | |||
1414 | static struct clk_ops tegra_clk_shared_bus_ops = { | ||
1415 | .init = tegra_clk_shared_bus_init, | ||
1416 | .enable = tegra_clk_shared_bus_enable, | ||
1417 | .disable = tegra_clk_shared_bus_disable, | ||
1418 | .set_rate = tegra_clk_shared_bus_set_rate, | ||
1419 | .round_rate = tegra_clk_shared_bus_round_rate, | ||
1420 | }; | ||
1421 | |||
1422 | |||
1009 | /* Clock definitions */ | 1423 | /* Clock definitions */ |
1010 | static struct clk tegra_clk_32k = { | 1424 | static struct clk tegra_clk_32k = { |
1011 | .name = "clk_32k", | 1425 | .name = "clk_32k", |
@@ -1014,7 +1428,7 @@ static struct clk tegra_clk_32k = { | |||
1014 | .max_rate = 32768, | 1428 | .max_rate = 32768, |
1015 | }; | 1429 | }; |
1016 | 1430 | ||
1017 | static struct clk_pll_table tegra_pll_s_table[] = { | 1431 | static struct clk_pll_freq_table tegra_pll_s_freq_table[] = { |
1018 | {32768, 12000000, 366, 1, 1, 0}, | 1432 | {32768, 12000000, 366, 1, 1, 0}, |
1019 | {32768, 13000000, 397, 1, 1, 0}, | 1433 | {32768, 13000000, 397, 1, 1, 0}, |
1020 | {32768, 19200000, 586, 1, 1, 0}, | 1434 | {32768, 19200000, 586, 1, 1, 0}, |
@@ -1026,16 +1440,19 @@ static struct clk tegra_pll_s = { | |||
1026 | .name = "pll_s", | 1440 | .name = "pll_s", |
1027 | .flags = PLL_ALT_MISC_REG, | 1441 | .flags = PLL_ALT_MISC_REG, |
1028 | .ops = &tegra_pll_ops, | 1442 | .ops = &tegra_pll_ops, |
1029 | .reg = 0xf0, | ||
1030 | .input_min = 32768, | ||
1031 | .input_max = 32768, | ||
1032 | .parent = &tegra_clk_32k, | 1443 | .parent = &tegra_clk_32k, |
1033 | .cf_min = 0, /* FIXME */ | ||
1034 | .cf_max = 0, /* FIXME */ | ||
1035 | .vco_min = 12000000, | ||
1036 | .vco_max = 26000000, | ||
1037 | .pll_table = tegra_pll_s_table, | ||
1038 | .max_rate = 26000000, | 1444 | .max_rate = 26000000, |
1445 | .reg = 0xf0, | ||
1446 | .u.pll = { | ||
1447 | .input_min = 32768, | ||
1448 | .input_max = 32768, | ||
1449 | .cf_min = 0, /* FIXME */ | ||
1450 | .cf_max = 0, /* FIXME */ | ||
1451 | .vco_min = 12000000, | ||
1452 | .vco_max = 26000000, | ||
1453 | .freq_table = tegra_pll_s_freq_table, | ||
1454 | .lock_delay = 300, | ||
1455 | }, | ||
1039 | }; | 1456 | }; |
1040 | 1457 | ||
1041 | static struct clk_mux_sel tegra_clk_m_sel[] = { | 1458 | static struct clk_mux_sel tegra_clk_m_sel[] = { |
@@ -1043,18 +1460,18 @@ static struct clk_mux_sel tegra_clk_m_sel[] = { | |||
1043 | { .input = &tegra_pll_s, .value = 1}, | 1460 | { .input = &tegra_pll_s, .value = 1}, |
1044 | { 0, 0}, | 1461 | { 0, 0}, |
1045 | }; | 1462 | }; |
1463 | |||
1046 | static struct clk tegra_clk_m = { | 1464 | static struct clk tegra_clk_m = { |
1047 | .name = "clk_m", | 1465 | .name = "clk_m", |
1048 | .flags = ENABLE_ON_INIT, | 1466 | .flags = ENABLE_ON_INIT, |
1049 | .ops = &tegra_clk_m_ops, | 1467 | .ops = &tegra_clk_m_ops, |
1050 | .inputs = tegra_clk_m_sel, | 1468 | .inputs = tegra_clk_m_sel, |
1051 | .reg = 0x1fc, | 1469 | .reg = 0x1fc, |
1052 | .reg_mask = (1<<28), | ||
1053 | .reg_shift = 28, | 1470 | .reg_shift = 28, |
1054 | .max_rate = 26000000, | 1471 | .max_rate = 26000000, |
1055 | }; | 1472 | }; |
1056 | 1473 | ||
1057 | static struct clk_pll_table tegra_pll_c_table[] = { | 1474 | static struct clk_pll_freq_table tegra_pll_c_freq_table[] = { |
1058 | { 0, 0, 0, 0, 0, 0 }, | 1475 | { 0, 0, 0, 0, 0, 0 }, |
1059 | }; | 1476 | }; |
1060 | 1477 | ||
@@ -1063,15 +1480,18 @@ static struct clk tegra_pll_c = { | |||
1063 | .flags = PLL_HAS_CPCON, | 1480 | .flags = PLL_HAS_CPCON, |
1064 | .ops = &tegra_pll_ops, | 1481 | .ops = &tegra_pll_ops, |
1065 | .reg = 0x80, | 1482 | .reg = 0x80, |
1066 | .input_min = 2000000, | ||
1067 | .input_max = 31000000, | ||
1068 | .parent = &tegra_clk_m, | 1483 | .parent = &tegra_clk_m, |
1069 | .cf_min = 1000000, | ||
1070 | .cf_max = 6000000, | ||
1071 | .vco_min = 20000000, | ||
1072 | .vco_max = 1400000000, | ||
1073 | .pll_table = tegra_pll_c_table, | ||
1074 | .max_rate = 600000000, | 1484 | .max_rate = 600000000, |
1485 | .u.pll = { | ||
1486 | .input_min = 2000000, | ||
1487 | .input_max = 31000000, | ||
1488 | .cf_min = 1000000, | ||
1489 | .cf_max = 6000000, | ||
1490 | .vco_min = 20000000, | ||
1491 | .vco_max = 1400000000, | ||
1492 | .freq_table = tegra_pll_c_freq_table, | ||
1493 | .lock_delay = 300, | ||
1494 | }, | ||
1075 | }; | 1495 | }; |
1076 | 1496 | ||
1077 | static struct clk tegra_pll_c_out1 = { | 1497 | static struct clk tegra_pll_c_out1 = { |
@@ -1084,7 +1504,7 @@ static struct clk tegra_pll_c_out1 = { | |||
1084 | .max_rate = 600000000, | 1504 | .max_rate = 600000000, |
1085 | }; | 1505 | }; |
1086 | 1506 | ||
1087 | static struct clk_pll_table tegra_pll_m_table[] = { | 1507 | static struct clk_pll_freq_table tegra_pll_m_freq_table[] = { |
1088 | { 12000000, 666000000, 666, 12, 1, 8}, | 1508 | { 12000000, 666000000, 666, 12, 1, 8}, |
1089 | { 13000000, 666000000, 666, 13, 1, 8}, | 1509 | { 13000000, 666000000, 666, 13, 1, 8}, |
1090 | { 19200000, 666000000, 555, 16, 1, 8}, | 1510 | { 19200000, 666000000, 555, 16, 1, 8}, |
@@ -1101,15 +1521,18 @@ static struct clk tegra_pll_m = { | |||
1101 | .flags = PLL_HAS_CPCON, | 1521 | .flags = PLL_HAS_CPCON, |
1102 | .ops = &tegra_pll_ops, | 1522 | .ops = &tegra_pll_ops, |
1103 | .reg = 0x90, | 1523 | .reg = 0x90, |
1104 | .input_min = 2000000, | ||
1105 | .input_max = 31000000, | ||
1106 | .parent = &tegra_clk_m, | 1524 | .parent = &tegra_clk_m, |
1107 | .cf_min = 1000000, | ||
1108 | .cf_max = 6000000, | ||
1109 | .vco_min = 20000000, | ||
1110 | .vco_max = 1200000000, | ||
1111 | .pll_table = tegra_pll_m_table, | ||
1112 | .max_rate = 800000000, | 1525 | .max_rate = 800000000, |
1526 | .u.pll = { | ||
1527 | .input_min = 2000000, | ||
1528 | .input_max = 31000000, | ||
1529 | .cf_min = 1000000, | ||
1530 | .cf_max = 6000000, | ||
1531 | .vco_min = 20000000, | ||
1532 | .vco_max = 1200000000, | ||
1533 | .freq_table = tegra_pll_m_freq_table, | ||
1534 | .lock_delay = 300, | ||
1535 | }, | ||
1113 | }; | 1536 | }; |
1114 | 1537 | ||
1115 | static struct clk tegra_pll_m_out1 = { | 1538 | static struct clk tegra_pll_m_out1 = { |
@@ -1122,7 +1545,7 @@ static struct clk tegra_pll_m_out1 = { | |||
1122 | .max_rate = 600000000, | 1545 | .max_rate = 600000000, |
1123 | }; | 1546 | }; |
1124 | 1547 | ||
1125 | static struct clk_pll_table tegra_pll_p_table[] = { | 1548 | static struct clk_pll_freq_table tegra_pll_p_freq_table[] = { |
1126 | { 12000000, 216000000, 432, 12, 2, 8}, | 1549 | { 12000000, 216000000, 432, 12, 2, 8}, |
1127 | { 13000000, 216000000, 432, 13, 2, 8}, | 1550 | { 13000000, 216000000, 432, 13, 2, 8}, |
1128 | { 19200000, 216000000, 90, 4, 2, 1}, | 1551 | { 19200000, 216000000, 90, 4, 2, 1}, |
@@ -1139,15 +1562,18 @@ static struct clk tegra_pll_p = { | |||
1139 | .flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON, | 1562 | .flags = ENABLE_ON_INIT | PLL_FIXED | PLL_HAS_CPCON, |
1140 | .ops = &tegra_pll_ops, | 1563 | .ops = &tegra_pll_ops, |
1141 | .reg = 0xa0, | 1564 | .reg = 0xa0, |
1142 | .input_min = 2000000, | ||
1143 | .input_max = 31000000, | ||
1144 | .parent = &tegra_clk_m, | 1565 | .parent = &tegra_clk_m, |
1145 | .cf_min = 1000000, | ||
1146 | .cf_max = 6000000, | ||
1147 | .vco_min = 20000000, | ||
1148 | .vco_max = 1400000000, | ||
1149 | .pll_table = tegra_pll_p_table, | ||
1150 | .max_rate = 432000000, | 1566 | .max_rate = 432000000, |
1567 | .u.pll = { | ||
1568 | .input_min = 2000000, | ||
1569 | .input_max = 31000000, | ||
1570 | .cf_min = 1000000, | ||
1571 | .cf_max = 6000000, | ||
1572 | .vco_min = 20000000, | ||
1573 | .vco_max = 1400000000, | ||
1574 | .freq_table = tegra_pll_p_freq_table, | ||
1575 | .lock_delay = 300, | ||
1576 | }, | ||
1151 | }; | 1577 | }; |
1152 | 1578 | ||
1153 | static struct clk tegra_pll_p_out1 = { | 1579 | static struct clk tegra_pll_p_out1 = { |
@@ -1190,11 +1616,9 @@ static struct clk tegra_pll_p_out4 = { | |||
1190 | .max_rate = 432000000, | 1616 | .max_rate = 432000000, |
1191 | }; | 1617 | }; |
1192 | 1618 | ||
1193 | static struct clk_pll_table tegra_pll_a_table[] = { | 1619 | static struct clk_pll_freq_table tegra_pll_a_freq_table[] = { |
1194 | { 28800000, 56448000, 49, 25, 1, 1}, | 1620 | { 28800000, 56448000, 49, 25, 1, 1}, |
1195 | { 28800000, 73728000, 64, 25, 1, 1}, | 1621 | { 28800000, 73728000, 64, 25, 1, 1}, |
1196 | { 28800000, 11289600, 49, 25, 1, 1}, | ||
1197 | { 28800000, 12288000, 64, 25, 1, 1}, | ||
1198 | { 28800000, 24000000, 5, 6, 1, 1}, | 1622 | { 28800000, 24000000, 5, 6, 1, 1}, |
1199 | { 0, 0, 0, 0, 0, 0 }, | 1623 | { 0, 0, 0, 0, 0, 0 }, |
1200 | }; | 1624 | }; |
@@ -1204,15 +1628,18 @@ static struct clk tegra_pll_a = { | |||
1204 | .flags = PLL_HAS_CPCON, | 1628 | .flags = PLL_HAS_CPCON, |
1205 | .ops = &tegra_pll_ops, | 1629 | .ops = &tegra_pll_ops, |
1206 | .reg = 0xb0, | 1630 | .reg = 0xb0, |
1207 | .input_min = 2000000, | ||
1208 | .input_max = 31000000, | ||
1209 | .parent = &tegra_pll_p_out1, | 1631 | .parent = &tegra_pll_p_out1, |
1210 | .cf_min = 1000000, | 1632 | .max_rate = 73728000, |
1211 | .cf_max = 6000000, | 1633 | .u.pll = { |
1212 | .vco_min = 20000000, | 1634 | .input_min = 2000000, |
1213 | .vco_max = 1400000000, | 1635 | .input_max = 31000000, |
1214 | .pll_table = tegra_pll_a_table, | 1636 | .cf_min = 1000000, |
1215 | .max_rate = 56448000, | 1637 | .cf_max = 6000000, |
1638 | .vco_min = 20000000, | ||
1639 | .vco_max = 1400000000, | ||
1640 | .freq_table = tegra_pll_a_freq_table, | ||
1641 | .lock_delay = 300, | ||
1642 | }, | ||
1216 | }; | 1643 | }; |
1217 | 1644 | ||
1218 | static struct clk tegra_pll_a_out0 = { | 1645 | static struct clk tegra_pll_a_out0 = { |
@@ -1222,14 +1649,25 @@ static struct clk tegra_pll_a_out0 = { | |||
1222 | .parent = &tegra_pll_a, | 1649 | .parent = &tegra_pll_a, |
1223 | .reg = 0xb4, | 1650 | .reg = 0xb4, |
1224 | .reg_shift = 0, | 1651 | .reg_shift = 0, |
1225 | .max_rate = 56448000, | 1652 | .max_rate = 73728000, |
1226 | }; | 1653 | }; |
1227 | 1654 | ||
1228 | static struct clk_pll_table tegra_pll_d_table[] = { | 1655 | static struct clk_pll_freq_table tegra_pll_d_freq_table[] = { |
1656 | { 12000000, 216000000, 216, 12, 1, 4}, | ||
1657 | { 13000000, 216000000, 216, 13, 1, 4}, | ||
1658 | { 19200000, 216000000, 135, 12, 1, 3}, | ||
1659 | { 26000000, 216000000, 216, 26, 1, 4}, | ||
1660 | |||
1661 | { 12000000, 594000000, 594, 12, 1, 8}, | ||
1662 | { 13000000, 594000000, 594, 13, 1, 8}, | ||
1663 | { 19200000, 594000000, 495, 16, 1, 8}, | ||
1664 | { 26000000, 594000000, 594, 26, 1, 8}, | ||
1665 | |||
1229 | { 12000000, 1000000000, 1000, 12, 1, 12}, | 1666 | { 12000000, 1000000000, 1000, 12, 1, 12}, |
1230 | { 13000000, 1000000000, 1000, 13, 1, 12}, | 1667 | { 13000000, 1000000000, 1000, 13, 1, 12}, |
1231 | { 19200000, 1000000000, 625, 12, 1, 8}, | 1668 | { 19200000, 1000000000, 625, 12, 1, 8}, |
1232 | { 26000000, 1000000000, 1000, 26, 1, 12}, | 1669 | { 26000000, 1000000000, 1000, 26, 1, 12}, |
1670 | |||
1233 | { 0, 0, 0, 0, 0, 0 }, | 1671 | { 0, 0, 0, 0, 0, 0 }, |
1234 | }; | 1672 | }; |
1235 | 1673 | ||
@@ -1238,15 +1676,18 @@ static struct clk tegra_pll_d = { | |||
1238 | .flags = PLL_HAS_CPCON | PLLD, | 1676 | .flags = PLL_HAS_CPCON | PLLD, |
1239 | .ops = &tegra_pll_ops, | 1677 | .ops = &tegra_pll_ops, |
1240 | .reg = 0xd0, | 1678 | .reg = 0xd0, |
1241 | .input_min = 2000000, | ||
1242 | .input_max = 40000000, | ||
1243 | .parent = &tegra_clk_m, | 1679 | .parent = &tegra_clk_m, |
1244 | .cf_min = 1000000, | ||
1245 | .cf_max = 6000000, | ||
1246 | .vco_min = 40000000, | ||
1247 | .vco_max = 1000000000, | ||
1248 | .pll_table = tegra_pll_d_table, | ||
1249 | .max_rate = 1000000000, | 1680 | .max_rate = 1000000000, |
1681 | .u.pll = { | ||
1682 | .input_min = 2000000, | ||
1683 | .input_max = 40000000, | ||
1684 | .cf_min = 1000000, | ||
1685 | .cf_max = 6000000, | ||
1686 | .vco_min = 40000000, | ||
1687 | .vco_max = 1000000000, | ||
1688 | .freq_table = tegra_pll_d_freq_table, | ||
1689 | .lock_delay = 1000, | ||
1690 | }, | ||
1250 | }; | 1691 | }; |
1251 | 1692 | ||
1252 | static struct clk tegra_pll_d_out0 = { | 1693 | static struct clk tegra_pll_d_out0 = { |
@@ -1257,7 +1698,7 @@ static struct clk tegra_pll_d_out0 = { | |||
1257 | .max_rate = 500000000, | 1698 | .max_rate = 500000000, |
1258 | }; | 1699 | }; |
1259 | 1700 | ||
1260 | static struct clk_pll_table tegra_pll_u_table[] = { | 1701 | static struct clk_pll_freq_table tegra_pll_u_freq_table[] = { |
1261 | { 12000000, 480000000, 960, 12, 2, 0}, | 1702 | { 12000000, 480000000, 960, 12, 2, 0}, |
1262 | { 13000000, 480000000, 960, 13, 2, 0}, | 1703 | { 13000000, 480000000, 960, 13, 2, 0}, |
1263 | { 19200000, 480000000, 200, 4, 2, 0}, | 1704 | { 19200000, 480000000, 200, 4, 2, 0}, |
@@ -1270,18 +1711,21 @@ static struct clk tegra_pll_u = { | |||
1270 | .flags = PLLU, | 1711 | .flags = PLLU, |
1271 | .ops = &tegra_pll_ops, | 1712 | .ops = &tegra_pll_ops, |
1272 | .reg = 0xc0, | 1713 | .reg = 0xc0, |
1273 | .input_min = 2000000, | ||
1274 | .input_max = 40000000, | ||
1275 | .parent = &tegra_clk_m, | 1714 | .parent = &tegra_clk_m, |
1276 | .cf_min = 1000000, | ||
1277 | .cf_max = 6000000, | ||
1278 | .vco_min = 480000000, | ||
1279 | .vco_max = 960000000, | ||
1280 | .pll_table = tegra_pll_u_table, | ||
1281 | .max_rate = 480000000, | 1715 | .max_rate = 480000000, |
1282 | }; | 1716 | .u.pll = { |
1283 | 1717 | .input_min = 2000000, | |
1284 | static struct clk_pll_table tegra_pll_x_table[] = { | 1718 | .input_max = 40000000, |
1719 | .cf_min = 1000000, | ||
1720 | .cf_max = 6000000, | ||
1721 | .vco_min = 480000000, | ||
1722 | .vco_max = 960000000, | ||
1723 | .freq_table = tegra_pll_u_freq_table, | ||
1724 | .lock_delay = 1000, | ||
1725 | }, | ||
1726 | }; | ||
1727 | |||
1728 | static struct clk_pll_freq_table tegra_pll_x_freq_table[] = { | ||
1285 | /* 1 GHz */ | 1729 | /* 1 GHz */ |
1286 | { 12000000, 1000000000, 1000, 12, 1, 12}, | 1730 | { 12000000, 1000000000, 1000, 12, 1, 12}, |
1287 | { 13000000, 1000000000, 1000, 13, 1, 12}, | 1731 | { 13000000, 1000000000, 1000, 13, 1, 12}, |
@@ -1307,10 +1751,10 @@ static struct clk_pll_table tegra_pll_x_table[] = { | |||
1307 | { 26000000, 760000000, 760, 26, 1, 12}, | 1751 | { 26000000, 760000000, 760, 26, 1, 12}, |
1308 | 1752 | ||
1309 | /* 608 MHz */ | 1753 | /* 608 MHz */ |
1310 | { 12000000, 608000000, 760, 12, 1, 12}, | 1754 | { 12000000, 608000000, 608, 12, 1, 12}, |
1311 | { 13000000, 608000000, 760, 13, 1, 12}, | 1755 | { 13000000, 608000000, 608, 13, 1, 12}, |
1312 | { 19200000, 608000000, 380, 12, 1, 8}, | 1756 | { 19200000, 608000000, 380, 12, 1, 8}, |
1313 | { 26000000, 608000000, 760, 26, 1, 12}, | 1757 | { 26000000, 608000000, 608, 26, 1, 12}, |
1314 | 1758 | ||
1315 | /* 456 MHz */ | 1759 | /* 456 MHz */ |
1316 | { 12000000, 456000000, 456, 12, 1, 12}, | 1760 | { 12000000, 456000000, 456, 12, 1, 12}, |
@@ -1332,18 +1776,21 @@ static struct clk tegra_pll_x = { | |||
1332 | .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG, | 1776 | .flags = PLL_HAS_CPCON | PLL_ALT_MISC_REG, |
1333 | .ops = &tegra_pllx_ops, | 1777 | .ops = &tegra_pllx_ops, |
1334 | .reg = 0xe0, | 1778 | .reg = 0xe0, |
1335 | .input_min = 2000000, | ||
1336 | .input_max = 31000000, | ||
1337 | .parent = &tegra_clk_m, | 1779 | .parent = &tegra_clk_m, |
1338 | .cf_min = 1000000, | ||
1339 | .cf_max = 6000000, | ||
1340 | .vco_min = 20000000, | ||
1341 | .vco_max = 1200000000, | ||
1342 | .pll_table = tegra_pll_x_table, | ||
1343 | .max_rate = 1000000000, | 1780 | .max_rate = 1000000000, |
1344 | }; | 1781 | .u.pll = { |
1345 | 1782 | .input_min = 2000000, | |
1346 | static struct clk_pll_table tegra_pll_e_table[] = { | 1783 | .input_max = 31000000, |
1784 | .cf_min = 1000000, | ||
1785 | .cf_max = 6000000, | ||
1786 | .vco_min = 20000000, | ||
1787 | .vco_max = 1200000000, | ||
1788 | .freq_table = tegra_pll_x_freq_table, | ||
1789 | .lock_delay = 300, | ||
1790 | }, | ||
1791 | }; | ||
1792 | |||
1793 | static struct clk_pll_freq_table tegra_pll_e_freq_table[] = { | ||
1347 | { 12000000, 100000000, 200, 24, 1, 0 }, | 1794 | { 12000000, 100000000, 200, 24, 1, 0 }, |
1348 | { 0, 0, 0, 0, 0, 0 }, | 1795 | { 0, 0, 0, 0, 0, 0 }, |
1349 | }; | 1796 | }; |
@@ -1352,23 +1799,49 @@ static struct clk tegra_pll_e = { | |||
1352 | .name = "pll_e", | 1799 | .name = "pll_e", |
1353 | .flags = PLL_ALT_MISC_REG, | 1800 | .flags = PLL_ALT_MISC_REG, |
1354 | .ops = &tegra_plle_ops, | 1801 | .ops = &tegra_plle_ops, |
1355 | .input_min = 12000000, | ||
1356 | .input_max = 12000000, | ||
1357 | .max_rate = 100000000, | ||
1358 | .parent = &tegra_clk_m, | 1802 | .parent = &tegra_clk_m, |
1359 | .reg = 0xe8, | 1803 | .reg = 0xe8, |
1360 | .pll_table = tegra_pll_e_table, | 1804 | .max_rate = 100000000, |
1805 | .u.pll = { | ||
1806 | .input_min = 12000000, | ||
1807 | .input_max = 12000000, | ||
1808 | .freq_table = tegra_pll_e_freq_table, | ||
1809 | }, | ||
1361 | }; | 1810 | }; |
1362 | 1811 | ||
1363 | static struct clk tegra_clk_d = { | 1812 | static struct clk tegra_clk_d = { |
1364 | .name = "clk_d", | 1813 | .name = "clk_d", |
1365 | .flags = PERIPH_NO_RESET, | 1814 | .flags = PERIPH_NO_RESET, |
1366 | .ops = &tegra_clk_double_ops, | 1815 | .ops = &tegra_clk_double_ops, |
1367 | .clk_num = 90, | ||
1368 | .reg = 0x34, | 1816 | .reg = 0x34, |
1369 | .reg_shift = 12, | 1817 | .reg_shift = 12, |
1370 | .parent = &tegra_clk_m, | 1818 | .parent = &tegra_clk_m, |
1371 | .max_rate = 52000000, | 1819 | .max_rate = 52000000, |
1820 | .u.periph = { | ||
1821 | .clk_num = 90, | ||
1822 | }, | ||
1823 | }; | ||
1824 | |||
1825 | /* dap_mclk1, belongs to the cdev1 pingroup. */ | ||
1826 | static struct clk tegra_clk_cdev1 = { | ||
1827 | .name = "cdev1", | ||
1828 | .ops = &tegra_cdev_clk_ops, | ||
1829 | .rate = 26000000, | ||
1830 | .max_rate = 26000000, | ||
1831 | .u.periph = { | ||
1832 | .clk_num = 94, | ||
1833 | }, | ||
1834 | }; | ||
1835 | |||
1836 | /* dap_mclk2, belongs to the cdev2 pingroup. */ | ||
1837 | static struct clk tegra_clk_cdev2 = { | ||
1838 | .name = "cdev2", | ||
1839 | .ops = &tegra_cdev_clk_ops, | ||
1840 | .rate = 26000000, | ||
1841 | .max_rate = 26000000, | ||
1842 | .u.periph = { | ||
1843 | .clk_num = 93, | ||
1844 | }, | ||
1372 | }; | 1845 | }; |
1373 | 1846 | ||
1374 | /* initialized before peripheral clocks */ | 1847 | /* initialized before peripheral clocks */ |
@@ -1394,7 +1867,7 @@ static struct clk tegra_clk_audio = { | |||
1394 | .name = "audio", | 1867 | .name = "audio", |
1395 | .inputs = mux_audio_sync_clk, | 1868 | .inputs = mux_audio_sync_clk, |
1396 | .reg = 0x38, | 1869 | .reg = 0x38, |
1397 | .max_rate = 24000000, | 1870 | .max_rate = 73728000, |
1398 | .ops = &tegra_audio_sync_clk_ops | 1871 | .ops = &tegra_audio_sync_clk_ops |
1399 | }; | 1872 | }; |
1400 | 1873 | ||
@@ -1403,10 +1876,12 @@ static struct clk tegra_clk_audio_2x = { | |||
1403 | .flags = PERIPH_NO_RESET, | 1876 | .flags = PERIPH_NO_RESET, |
1404 | .max_rate = 48000000, | 1877 | .max_rate = 48000000, |
1405 | .ops = &tegra_clk_double_ops, | 1878 | .ops = &tegra_clk_double_ops, |
1406 | .clk_num = 89, | ||
1407 | .reg = 0x34, | 1879 | .reg = 0x34, |
1408 | .reg_shift = 8, | 1880 | .reg_shift = 8, |
1409 | .parent = &tegra_clk_audio, | 1881 | .parent = &tegra_clk_audio, |
1882 | .u.periph = { | ||
1883 | .clk_num = 89, | ||
1884 | }, | ||
1410 | }; | 1885 | }; |
1411 | 1886 | ||
1412 | struct clk_lookup tegra_audio_clk_lookups[] = { | 1887 | struct clk_lookup tegra_audio_clk_lookups[] = { |
@@ -1478,17 +1953,26 @@ static struct clk tegra_clk_sclk = { | |||
1478 | .inputs = mux_sclk, | 1953 | .inputs = mux_sclk, |
1479 | .reg = 0x28, | 1954 | .reg = 0x28, |
1480 | .ops = &tegra_super_ops, | 1955 | .ops = &tegra_super_ops, |
1481 | .max_rate = 600000000, | 1956 | .max_rate = 240000000, |
1957 | .min_rate = 120000000, | ||
1482 | }; | 1958 | }; |
1483 | 1959 | ||
1484 | static struct clk tegra_clk_virtual_cpu = { | 1960 | static struct clk tegra_clk_virtual_cpu = { |
1485 | .name = "cpu", | 1961 | .name = "cpu", |
1486 | .parent = &tegra_clk_cclk, | 1962 | .parent = &tegra_clk_cclk, |
1487 | .main = &tegra_pll_x, | ||
1488 | .backup = &tegra_clk_m, | ||
1489 | .ops = &tegra_cpu_ops, | 1963 | .ops = &tegra_cpu_ops, |
1490 | .max_rate = 1000000000, | 1964 | .max_rate = 1000000000, |
1491 | .dvfs = &tegra_dvfs_virtual_cpu_dvfs, | 1965 | .u.cpu = { |
1966 | .main = &tegra_pll_x, | ||
1967 | .backup = &tegra_pll_p, | ||
1968 | }, | ||
1969 | }; | ||
1970 | |||
1971 | static struct clk tegra_clk_cop = { | ||
1972 | .name = "cop", | ||
1973 | .parent = &tegra_clk_sclk, | ||
1974 | .ops = &tegra_cop_ops, | ||
1975 | .max_rate = 240000000, | ||
1492 | }; | 1976 | }; |
1493 | 1977 | ||
1494 | static struct clk tegra_clk_hclk = { | 1978 | static struct clk tegra_clk_hclk = { |
@@ -1508,7 +1992,15 @@ static struct clk tegra_clk_pclk = { | |||
1508 | .reg = 0x30, | 1992 | .reg = 0x30, |
1509 | .reg_shift = 0, | 1993 | .reg_shift = 0, |
1510 | .ops = &tegra_bus_ops, | 1994 | .ops = &tegra_bus_ops, |
1511 | .max_rate = 108000000, | 1995 | .max_rate = 120000000, |
1996 | }; | ||
1997 | |||
1998 | static struct clk tegra_clk_blink = { | ||
1999 | .name = "blink", | ||
2000 | .parent = &tegra_clk_32k, | ||
2001 | .reg = 0x40, | ||
2002 | .ops = &tegra_blink_clk_ops, | ||
2003 | .max_rate = 32768, | ||
1512 | }; | 2004 | }; |
1513 | 2005 | ||
1514 | static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = { | 2006 | static struct clk_mux_sel mux_pllm_pllc_pllp_plla[] = { |
@@ -1587,6 +2079,23 @@ static struct clk_mux_sel mux_clk_32k[] = { | |||
1587 | { 0, 0}, | 2079 | { 0, 0}, |
1588 | }; | 2080 | }; |
1589 | 2081 | ||
2082 | static struct clk_mux_sel mux_pclk[] = { | ||
2083 | { .input = &tegra_clk_pclk, .value = 0}, | ||
2084 | { 0, 0}, | ||
2085 | }; | ||
2086 | |||
2087 | static struct clk tegra_clk_emc = { | ||
2088 | .name = "emc", | ||
2089 | .ops = &tegra_emc_clk_ops, | ||
2090 | .reg = 0x19c, | ||
2091 | .max_rate = 800000000, | ||
2092 | .inputs = mux_pllm_pllc_pllp_clkm, | ||
2093 | .flags = MUX | DIV_U71 | PERIPH_EMC_ENB, | ||
2094 | .u.periph = { | ||
2095 | .clk_num = 57, | ||
2096 | }, | ||
2097 | }; | ||
2098 | |||
1590 | #define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ | 2099 | #define PERIPH_CLK(_name, _dev, _con, _clk_num, _reg, _max, _inputs, _flags) \ |
1591 | { \ | 2100 | { \ |
1592 | .name = _name, \ | 2101 | .name = _name, \ |
@@ -1595,19 +2104,32 @@ static struct clk_mux_sel mux_clk_32k[] = { | |||
1595 | .con_id = _con, \ | 2104 | .con_id = _con, \ |
1596 | }, \ | 2105 | }, \ |
1597 | .ops = &tegra_periph_clk_ops, \ | 2106 | .ops = &tegra_periph_clk_ops, \ |
1598 | .clk_num = _clk_num, \ | ||
1599 | .reg = _reg, \ | 2107 | .reg = _reg, \ |
1600 | .inputs = _inputs, \ | 2108 | .inputs = _inputs, \ |
1601 | .flags = _flags, \ | 2109 | .flags = _flags, \ |
1602 | .max_rate = _max, \ | 2110 | .max_rate = _max, \ |
2111 | .u.periph = { \ | ||
2112 | .clk_num = _clk_num, \ | ||
2113 | }, \ | ||
2114 | } | ||
2115 | |||
2116 | #define SHARED_CLK(_name, _dev, _con, _parent) \ | ||
2117 | { \ | ||
2118 | .name = _name, \ | ||
2119 | .lookup = { \ | ||
2120 | .dev_id = _dev, \ | ||
2121 | .con_id = _con, \ | ||
2122 | }, \ | ||
2123 | .ops = &tegra_clk_shared_bus_ops, \ | ||
2124 | .parent = _parent, \ | ||
1603 | } | 2125 | } |
1604 | 2126 | ||
1605 | struct clk tegra_periph_clks[] = { | 2127 | struct clk tegra_list_clks[] = { |
2128 | PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 108000000, mux_pclk, 0), | ||
1606 | PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET), | 2129 | PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET), |
1607 | PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0), | 2130 | PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0), |
1608 | PERIPH_CLK("i2s1", "i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), | 2131 | PERIPH_CLK("i2s1", "tegra-i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), |
1609 | PERIPH_CLK("i2s2", "i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), | 2132 | PERIPH_CLK("i2s2", "tegra-i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), |
1610 | /* FIXME: spdif has 2 clocks but 1 enable */ | ||
1611 | PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), | 2133 | PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71), |
1612 | PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71), | 2134 | PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71), |
1613 | PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71), | 2135 | PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71), |
@@ -1620,13 +2142,15 @@ struct clk tegra_periph_clks[] = { | |||
1620 | PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), | 2142 | PERIPH_CLK("sbc4", "spi_tegra.3", NULL, 68, 0x1b4, 160000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), |
1621 | PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */ | 2143 | PERIPH_CLK("ide", "ide", NULL, 25, 0x144, 100000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* requires min voltage */ |
1622 | PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ | 2144 | PERIPH_CLK("ndflash", "tegra_nand", NULL, 13, 0x160, 164000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ |
1623 | /* FIXME: vfir shares an enable with uartb */ | ||
1624 | PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), | 2145 | PERIPH_CLK("vfir", "vfir", NULL, 7, 0x168, 72000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), |
1625 | PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ | 2146 | PERIPH_CLK("sdmmc1", "sdhci-tegra.0", NULL, 14, 0x150, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ |
1626 | PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ | 2147 | PERIPH_CLK("sdmmc2", "sdhci-tegra.1", NULL, 9, 0x154, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ |
1627 | PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ | 2148 | PERIPH_CLK("sdmmc3", "sdhci-tegra.2", NULL, 69, 0x1bc, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ |
1628 | PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x160, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ | 2149 | PERIPH_CLK("sdmmc4", "sdhci-tegra.3", NULL, 15, 0x164, 52000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage */ |
1629 | PERIPH_CLK("vde", "vde", NULL, 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ | 2150 | PERIPH_CLK("vcp", "tegra-avp", "vcp", 29, 0, 250000000, mux_clk_m, 0), |
2151 | PERIPH_CLK("bsea", "tegra-avp", "bsea", 62, 0, 250000000, mux_clk_m, 0), | ||
2152 | PERIPH_CLK("bsev", "tegra-aes", "bsev", 63, 0, 250000000, mux_clk_m, 0), | ||
2153 | PERIPH_CLK("vde", "tegra-avp", "vde", 61, 0x1c8, 250000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ | ||
1630 | PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */ | 2154 | PERIPH_CLK("csite", "csite", NULL, 73, 0x1d4, 144000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), /* max rate ??? */ |
1631 | /* FIXME: what is la? */ | 2155 | /* FIXME: what is la? */ |
1632 | PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), | 2156 | PERIPH_CLK("la", "la", NULL, 76, 0x1f8, 26000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71), |
@@ -1641,37 +2165,46 @@ struct clk tegra_periph_clks[] = { | |||
1641 | PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), | 2165 | PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), |
1642 | PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), | 2166 | PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), |
1643 | PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), | 2167 | PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0), |
1644 | PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 216000000, mux_pllp_pllc_pllm_clkm, MUX), | 2168 | PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX), |
1645 | PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 216000000, mux_pllp_pllc_pllm_clkm, MUX), | 2169 | PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX), |
1646 | PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 216000000, mux_pllp_pllc_pllm_clkm, MUX), | 2170 | PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX), |
1647 | PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 216000000, mux_pllp_pllc_pllm_clkm, MUX), | 2171 | PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX), |
1648 | PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 216000000, mux_pllp_pllc_pllm_clkm, MUX), | 2172 | PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX), |
1649 | PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */ | 2173 | PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */ |
1650 | PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ | 2174 | PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ |
1651 | /* FIXME: vi and vi_sensor share an enable */ | 2175 | PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ |
1652 | PERIPH_CLK("vi", "vi", NULL, 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ | 2176 | PERIPH_CLK("vi_sensor", "tegra_camera", "vi_sensor", 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */ |
1653 | PERIPH_CLK("vi_sensor", "vi_sensor", NULL, 20, 0x1a8, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_NO_RESET), /* scales with voltage and process_id */ | ||
1654 | PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ | 2177 | PERIPH_CLK("epp", "epp", NULL, 19, 0x16c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ |
1655 | PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ | 2178 | PERIPH_CLK("mpe", "mpe", NULL, 60, 0x170, 250000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ |
1656 | PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ | 2179 | PERIPH_CLK("host1x", "host1x", NULL, 28, 0x180, 166000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */ |
1657 | /* FIXME: cve and tvo share an enable */ | ||
1658 | PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ | 2180 | PERIPH_CLK("cve", "cve", NULL, 49, 0x140, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ |
1659 | PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ | 2181 | PERIPH_CLK("tvo", "tvo", NULL, 49, 0x188, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ |
1660 | PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 148500000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ | 2182 | PERIPH_CLK("hdmi", "hdmi", NULL, 51, 0x18c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ |
1661 | PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ | 2183 | PERIPH_CLK("tvdac", "tvdac", NULL, 53, 0x194, 250000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* requires min voltage */ |
1662 | PERIPH_CLK("disp1", "tegrafb.0", NULL, 27, 0x138, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ | 2184 | PERIPH_CLK("disp1", "tegradc.0", NULL, 27, 0x138, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ |
1663 | PERIPH_CLK("disp2", "tegrafb.1", NULL, 26, 0x13c, 190000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ | 2185 | PERIPH_CLK("disp2", "tegradc.1", NULL, 26, 0x13c, 600000000, mux_pllp_plld_pllc_clkm, MUX | DIV_U71), /* scales with voltage and process_id */ |
1664 | PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ | 2186 | PERIPH_CLK("usbd", "fsl-tegra-udc", NULL, 22, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ |
1665 | PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ | 2187 | PERIPH_CLK("usb2", "tegra-ehci.1", NULL, 58, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ |
1666 | PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ | 2188 | PERIPH_CLK("usb3", "tegra-ehci.2", NULL, 59, 0, 480000000, mux_clk_m, 0), /* requires min voltage */ |
1667 | PERIPH_CLK("emc", "emc", NULL, 57, 0x19c, 800000000, mux_pllm_pllc_pllp_clkm, MUX | DIV_U71 | PERIPH_EMC_ENB), | ||
1668 | PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */ | 2189 | PERIPH_CLK("dsi", "dsi", NULL, 48, 0, 500000000, mux_plld, 0), /* scales with voltage */ |
1669 | PERIPH_CLK("csi", "csi", NULL, 52, 0, 72000000, mux_pllp_out3, 0), | 2190 | PERIPH_CLK("csi", "tegra_camera", "csi", 52, 0, 72000000, mux_pllp_out3, 0), |
1670 | PERIPH_CLK("isp", "isp", NULL, 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */ | 2191 | PERIPH_CLK("isp", "tegra_camera", "isp", 23, 0, 150000000, mux_clk_m, 0), /* same frequency as VI */ |
1671 | PERIPH_CLK("csus", "csus", NULL, 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), | 2192 | PERIPH_CLK("csus", "tegra_camera", "csus", 92, 0, 150000000, mux_clk_m, PERIPH_NO_RESET), |
1672 | PERIPH_CLK("pex", NULL, "pex", 70, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), | 2193 | PERIPH_CLK("pex", NULL, "pex", 70, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), |
1673 | PERIPH_CLK("afi", NULL, "afi", 72, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), | 2194 | PERIPH_CLK("afi", NULL, "afi", 72, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), |
1674 | PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), | 2195 | PERIPH_CLK("pcie_xclk", NULL, "pcie_xclk", 74, 0, 26000000, mux_clk_m, PERIPH_MANUAL_RESET), |
2196 | |||
2197 | SHARED_CLK("avp.sclk", "tegra-avp", "sclk", &tegra_clk_sclk), | ||
2198 | SHARED_CLK("avp.emc", "tegra-avp", "emc", &tegra_clk_emc), | ||
2199 | SHARED_CLK("cpu.emc", "cpu", "emc", &tegra_clk_emc), | ||
2200 | SHARED_CLK("disp1.emc", "tegradc.0", "emc", &tegra_clk_emc), | ||
2201 | SHARED_CLK("disp2.emc", "tegradc.1", "emc", &tegra_clk_emc), | ||
2202 | SHARED_CLK("hdmi.emc", "hdmi", "emc", &tegra_clk_emc), | ||
2203 | SHARED_CLK("host.emc", "tegra_grhost", "emc", &tegra_clk_emc), | ||
2204 | SHARED_CLK("usbd.emc", "fsl-tegra-udc", "emc", &tegra_clk_emc), | ||
2205 | SHARED_CLK("usb1.emc", "tegra-ehci.0", "emc", &tegra_clk_emc), | ||
2206 | SHARED_CLK("usb2.emc", "tegra-ehci.1", "emc", &tegra_clk_emc), | ||
2207 | SHARED_CLK("usb3.emc", "tegra-ehci.2", "emc", &tegra_clk_emc), | ||
1675 | }; | 2208 | }; |
1676 | 2209 | ||
1677 | #define CLK_DUPLICATE(_name, _dev, _con) \ | 2210 | #define CLK_DUPLICATE(_name, _dev, _con) \ |
@@ -1693,9 +2226,22 @@ struct clk_duplicate tegra_clk_duplicates[] = { | |||
1693 | CLK_DUPLICATE("uartc", "tegra_uart.2", NULL), | 2226 | CLK_DUPLICATE("uartc", "tegra_uart.2", NULL), |
1694 | CLK_DUPLICATE("uartd", "tegra_uart.3", NULL), | 2227 | CLK_DUPLICATE("uartd", "tegra_uart.3", NULL), |
1695 | CLK_DUPLICATE("uarte", "tegra_uart.4", NULL), | 2228 | CLK_DUPLICATE("uarte", "tegra_uart.4", NULL), |
1696 | CLK_DUPLICATE("host1x", "tegrafb.0", "host1x"), | 2229 | CLK_DUPLICATE("usbd", "utmip-pad", NULL), |
1697 | CLK_DUPLICATE("host1x", "tegrafb.1", "host1x"), | ||
1698 | CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL), | 2230 | CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL), |
2231 | CLK_DUPLICATE("usbd", "tegra-otg", NULL), | ||
2232 | CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"), | ||
2233 | CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"), | ||
2234 | CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL), | ||
2235 | CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL), | ||
2236 | CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL), | ||
2237 | CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL), | ||
2238 | CLK_DUPLICATE("host1x", "tegra_grhost", "host1x"), | ||
2239 | CLK_DUPLICATE("2d", "tegra_grhost", "gr2d"), | ||
2240 | CLK_DUPLICATE("3d", "tegra_grhost", "gr3d"), | ||
2241 | CLK_DUPLICATE("epp", "tegra_grhost", "epp"), | ||
2242 | CLK_DUPLICATE("mpe", "tegra_grhost", "mpe"), | ||
2243 | CLK_DUPLICATE("cop", "tegra-avp", "cop"), | ||
2244 | CLK_DUPLICATE("vde", "tegra-aes", "vde"), | ||
1699 | }; | 2245 | }; |
1700 | 2246 | ||
1701 | #define CLK(dev, con, ck) \ | 2247 | #define CLK(dev, con, ck) \ |
@@ -1705,68 +2251,70 @@ struct clk_duplicate tegra_clk_duplicates[] = { | |||
1705 | .clk = ck, \ | 2251 | .clk = ck, \ |
1706 | } | 2252 | } |
1707 | 2253 | ||
1708 | struct clk_lookup tegra_clk_lookups[] = { | 2254 | struct clk *tegra_ptr_clks[] = { |
1709 | /* external root sources */ | 2255 | &tegra_clk_32k, |
1710 | CLK(NULL, "32k_clk", &tegra_clk_32k), | 2256 | &tegra_pll_s, |
1711 | CLK(NULL, "pll_s", &tegra_pll_s), | 2257 | &tegra_clk_m, |
1712 | CLK(NULL, "clk_m", &tegra_clk_m), | 2258 | &tegra_pll_m, |
1713 | CLK(NULL, "pll_m", &tegra_pll_m), | 2259 | &tegra_pll_m_out1, |
1714 | CLK(NULL, "pll_m_out1", &tegra_pll_m_out1), | 2260 | &tegra_pll_c, |
1715 | CLK(NULL, "pll_c", &tegra_pll_c), | 2261 | &tegra_pll_c_out1, |
1716 | CLK(NULL, "pll_c_out1", &tegra_pll_c_out1), | 2262 | &tegra_pll_p, |
1717 | CLK(NULL, "pll_p", &tegra_pll_p), | 2263 | &tegra_pll_p_out1, |
1718 | CLK(NULL, "pll_p_out1", &tegra_pll_p_out1), | 2264 | &tegra_pll_p_out2, |
1719 | CLK(NULL, "pll_p_out2", &tegra_pll_p_out2), | 2265 | &tegra_pll_p_out3, |
1720 | CLK(NULL, "pll_p_out3", &tegra_pll_p_out3), | 2266 | &tegra_pll_p_out4, |
1721 | CLK(NULL, "pll_p_out4", &tegra_pll_p_out4), | 2267 | &tegra_pll_a, |
1722 | CLK(NULL, "pll_a", &tegra_pll_a), | 2268 | &tegra_pll_a_out0, |
1723 | CLK(NULL, "pll_a_out0", &tegra_pll_a_out0), | 2269 | &tegra_pll_d, |
1724 | CLK(NULL, "pll_d", &tegra_pll_d), | 2270 | &tegra_pll_d_out0, |
1725 | CLK(NULL, "pll_d_out0", &tegra_pll_d_out0), | 2271 | &tegra_pll_u, |
1726 | CLK(NULL, "pll_u", &tegra_pll_u), | 2272 | &tegra_pll_x, |
1727 | CLK(NULL, "pll_x", &tegra_pll_x), | 2273 | &tegra_pll_e, |
1728 | CLK(NULL, "pll_e", &tegra_pll_e), | 2274 | &tegra_clk_cclk, |
1729 | CLK(NULL, "cclk", &tegra_clk_cclk), | 2275 | &tegra_clk_sclk, |
1730 | CLK(NULL, "sclk", &tegra_clk_sclk), | 2276 | &tegra_clk_hclk, |
1731 | CLK(NULL, "hclk", &tegra_clk_hclk), | 2277 | &tegra_clk_pclk, |
1732 | CLK(NULL, "pclk", &tegra_clk_pclk), | 2278 | &tegra_clk_d, |
1733 | CLK(NULL, "clk_d", &tegra_clk_d), | 2279 | &tegra_clk_cdev1, |
1734 | CLK(NULL, "cpu", &tegra_clk_virtual_cpu), | 2280 | &tegra_clk_cdev2, |
1735 | }; | 2281 | &tegra_clk_virtual_cpu, |
2282 | &tegra_clk_blink, | ||
2283 | &tegra_clk_cop, | ||
2284 | &tegra_clk_emc, | ||
2285 | }; | ||
2286 | |||
2287 | static void tegra2_init_one_clock(struct clk *c) | ||
2288 | { | ||
2289 | clk_init(c); | ||
2290 | INIT_LIST_HEAD(&c->shared_bus_list); | ||
2291 | if (!c->lookup.dev_id && !c->lookup.con_id) | ||
2292 | c->lookup.con_id = c->name; | ||
2293 | c->lookup.clk = c; | ||
2294 | clkdev_add(&c->lookup); | ||
2295 | } | ||
1736 | 2296 | ||
1737 | void __init tegra2_init_clocks(void) | 2297 | void __init tegra2_init_clocks(void) |
1738 | { | 2298 | { |
1739 | int i; | 2299 | int i; |
1740 | struct clk_lookup *cl; | ||
1741 | struct clk *c; | 2300 | struct clk *c; |
1742 | struct clk_duplicate *cd; | ||
1743 | |||
1744 | for (i = 0; i < ARRAY_SIZE(tegra_clk_lookups); i++) { | ||
1745 | cl = &tegra_clk_lookups[i]; | ||
1746 | clk_init(cl->clk); | ||
1747 | clkdev_add(cl); | ||
1748 | } | ||
1749 | 2301 | ||
1750 | for (i = 0; i < ARRAY_SIZE(tegra_periph_clks); i++) { | 2302 | for (i = 0; i < ARRAY_SIZE(tegra_ptr_clks); i++) |
1751 | c = &tegra_periph_clks[i]; | 2303 | tegra2_init_one_clock(tegra_ptr_clks[i]); |
1752 | cl = &c->lookup; | ||
1753 | cl->clk = c; | ||
1754 | 2304 | ||
1755 | clk_init(cl->clk); | 2305 | for (i = 0; i < ARRAY_SIZE(tegra_list_clks); i++) |
1756 | clkdev_add(cl); | 2306 | tegra2_init_one_clock(&tegra_list_clks[i]); |
1757 | } | ||
1758 | 2307 | ||
1759 | for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) { | 2308 | for (i = 0; i < ARRAY_SIZE(tegra_clk_duplicates); i++) { |
1760 | cd = &tegra_clk_duplicates[i]; | 2309 | c = tegra_get_clock_by_name(tegra_clk_duplicates[i].name); |
1761 | c = tegra_get_clock_by_name(cd->name); | 2310 | if (!c) { |
1762 | if (c) { | ||
1763 | cl = &cd->lookup; | ||
1764 | cl->clk = c; | ||
1765 | clkdev_add(cl); | ||
1766 | } else { | ||
1767 | pr_err("%s: Unknown duplicate clock %s\n", __func__, | 2311 | pr_err("%s: Unknown duplicate clock %s\n", __func__, |
1768 | cd->name); | 2312 | tegra_clk_duplicates[i].name); |
2313 | continue; | ||
1769 | } | 2314 | } |
2315 | |||
2316 | tegra_clk_duplicates[i].lookup.clk = c; | ||
2317 | clkdev_add(&tegra_clk_duplicates[i].lookup); | ||
1770 | } | 2318 | } |
1771 | 2319 | ||
1772 | init_audio_sync_clock_mux(); | 2320 | init_audio_sync_clock_mux(); |
@@ -1774,7 +2322,7 @@ void __init tegra2_init_clocks(void) | |||
1774 | 2322 | ||
1775 | #ifdef CONFIG_PM | 2323 | #ifdef CONFIG_PM |
1776 | static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM + | 2324 | static u32 clk_rst_suspend[RST_DEVICES_NUM + CLK_OUT_ENB_NUM + |
1777 | PERIPH_CLK_SOURCE_NUM + 3]; | 2325 | PERIPH_CLK_SOURCE_NUM + 22]; |
1778 | 2326 | ||
1779 | void tegra_clk_suspend(void) | 2327 | void tegra_clk_suspend(void) |
1780 | { | 2328 | { |
@@ -1782,6 +2330,29 @@ void tegra_clk_suspend(void) | |||
1782 | u32 *ctx = clk_rst_suspend; | 2330 | u32 *ctx = clk_rst_suspend; |
1783 | 2331 | ||
1784 | *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK; | 2332 | *ctx++ = clk_readl(OSC_CTRL) & OSC_CTRL_MASK; |
2333 | *ctx++ = clk_readl(tegra_pll_c.reg + PLL_BASE); | ||
2334 | *ctx++ = clk_readl(tegra_pll_c.reg + PLL_MISC(&tegra_pll_c)); | ||
2335 | *ctx++ = clk_readl(tegra_pll_a.reg + PLL_BASE); | ||
2336 | *ctx++ = clk_readl(tegra_pll_a.reg + PLL_MISC(&tegra_pll_a)); | ||
2337 | *ctx++ = clk_readl(tegra_pll_s.reg + PLL_BASE); | ||
2338 | *ctx++ = clk_readl(tegra_pll_s.reg + PLL_MISC(&tegra_pll_s)); | ||
2339 | *ctx++ = clk_readl(tegra_pll_d.reg + PLL_BASE); | ||
2340 | *ctx++ = clk_readl(tegra_pll_d.reg + PLL_MISC(&tegra_pll_d)); | ||
2341 | *ctx++ = clk_readl(tegra_pll_u.reg + PLL_BASE); | ||
2342 | *ctx++ = clk_readl(tegra_pll_u.reg + PLL_MISC(&tegra_pll_u)); | ||
2343 | |||
2344 | *ctx++ = clk_readl(tegra_pll_m_out1.reg); | ||
2345 | *ctx++ = clk_readl(tegra_pll_a_out0.reg); | ||
2346 | *ctx++ = clk_readl(tegra_pll_c_out1.reg); | ||
2347 | |||
2348 | *ctx++ = clk_readl(tegra_clk_cclk.reg); | ||
2349 | *ctx++ = clk_readl(tegra_clk_cclk.reg + SUPER_CLK_DIVIDER); | ||
2350 | |||
2351 | *ctx++ = clk_readl(tegra_clk_sclk.reg); | ||
2352 | *ctx++ = clk_readl(tegra_clk_sclk.reg + SUPER_CLK_DIVIDER); | ||
2353 | *ctx++ = clk_readl(tegra_clk_pclk.reg); | ||
2354 | |||
2355 | *ctx++ = clk_readl(tegra_clk_audio.reg); | ||
1785 | 2356 | ||
1786 | for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC; | 2357 | for (off = PERIPH_CLK_SOURCE_I2S1; off <= PERIPH_CLK_SOURCE_OSC; |
1787 | off += 4) { | 2358 | off += 4) { |
@@ -1800,6 +2371,8 @@ void tegra_clk_suspend(void) | |||
1800 | 2371 | ||
1801 | *ctx++ = clk_readl(MISC_CLK_ENB); | 2372 | *ctx++ = clk_readl(MISC_CLK_ENB); |
1802 | *ctx++ = clk_readl(CLK_MASK_ARM); | 2373 | *ctx++ = clk_readl(CLK_MASK_ARM); |
2374 | |||
2375 | BUG_ON(ctx - clk_rst_suspend != ARRAY_SIZE(clk_rst_suspend)); | ||
1803 | } | 2376 | } |
1804 | 2377 | ||
1805 | void tegra_clk_resume(void) | 2378 | void tegra_clk_resume(void) |
@@ -1812,6 +2385,31 @@ void tegra_clk_resume(void) | |||
1812 | val |= *ctx++; | 2385 | val |= *ctx++; |
1813 | clk_writel(val, OSC_CTRL); | 2386 | clk_writel(val, OSC_CTRL); |
1814 | 2387 | ||
2388 | clk_writel(*ctx++, tegra_pll_c.reg + PLL_BASE); | ||
2389 | clk_writel(*ctx++, tegra_pll_c.reg + PLL_MISC(&tegra_pll_c)); | ||
2390 | clk_writel(*ctx++, tegra_pll_a.reg + PLL_BASE); | ||
2391 | clk_writel(*ctx++, tegra_pll_a.reg + PLL_MISC(&tegra_pll_a)); | ||
2392 | clk_writel(*ctx++, tegra_pll_s.reg + PLL_BASE); | ||
2393 | clk_writel(*ctx++, tegra_pll_s.reg + PLL_MISC(&tegra_pll_s)); | ||
2394 | clk_writel(*ctx++, tegra_pll_d.reg + PLL_BASE); | ||
2395 | clk_writel(*ctx++, tegra_pll_d.reg + PLL_MISC(&tegra_pll_d)); | ||
2396 | clk_writel(*ctx++, tegra_pll_u.reg + PLL_BASE); | ||
2397 | clk_writel(*ctx++, tegra_pll_u.reg + PLL_MISC(&tegra_pll_u)); | ||
2398 | udelay(1000); | ||
2399 | |||
2400 | clk_writel(*ctx++, tegra_pll_m_out1.reg); | ||
2401 | clk_writel(*ctx++, tegra_pll_a_out0.reg); | ||
2402 | clk_writel(*ctx++, tegra_pll_c_out1.reg); | ||
2403 | |||
2404 | clk_writel(*ctx++, tegra_clk_cclk.reg); | ||
2405 | clk_writel(*ctx++, tegra_clk_cclk.reg + SUPER_CLK_DIVIDER); | ||
2406 | |||
2407 | clk_writel(*ctx++, tegra_clk_sclk.reg); | ||
2408 | clk_writel(*ctx++, tegra_clk_sclk.reg + SUPER_CLK_DIVIDER); | ||
2409 | clk_writel(*ctx++, tegra_clk_pclk.reg); | ||
2410 | |||
2411 | clk_writel(*ctx++, tegra_clk_audio.reg); | ||
2412 | |||
1815 | /* enable all clocks before configuring clock sources */ | 2413 | /* enable all clocks before configuring clock sources */ |
1816 | clk_writel(0xbffffff9ul, CLK_OUT_ENB); | 2414 | clk_writel(0xbffffff9ul, CLK_OUT_ENB); |
1817 | clk_writel(0xfefffff7ul, CLK_OUT_ENB + 4); | 2415 | clk_writel(0xfefffff7ul, CLK_OUT_ENB + 4); |
diff --git a/arch/arm/mach-tegra/tegra2_dvfs.c b/arch/arm/mach-tegra/tegra2_dvfs.c deleted file mode 100644 index 5529c238dd77..000000000000 --- a/arch/arm/mach-tegra/tegra2_dvfs.c +++ /dev/null | |||
@@ -1,86 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/tegra2_dvfs.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * | ||
6 | * Author: | ||
7 | * Colin Cross <ccross@google.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | |||
22 | #include "clock.h" | ||
23 | #include "tegra2_dvfs.h" | ||
24 | |||
25 | static struct dvfs_table virtual_cpu_process_0[] = { | ||
26 | {314000000, 750}, | ||
27 | {456000000, 825}, | ||
28 | {608000000, 900}, | ||
29 | {760000000, 975}, | ||
30 | {817000000, 1000}, | ||
31 | {912000000, 1050}, | ||
32 | {1000000000, 1100}, | ||
33 | {0, 0}, | ||
34 | }; | ||
35 | |||
36 | static struct dvfs_table virtual_cpu_process_1[] = { | ||
37 | {314000000, 750}, | ||
38 | {456000000, 825}, | ||
39 | {618000000, 900}, | ||
40 | {770000000, 975}, | ||
41 | {827000000, 1000}, | ||
42 | {922000000, 1050}, | ||
43 | {1000000000, 1100}, | ||
44 | {0, 0}, | ||
45 | }; | ||
46 | |||
47 | static struct dvfs_table virtual_cpu_process_2[] = { | ||
48 | {494000000, 750}, | ||
49 | {675000000, 825}, | ||
50 | {817000000, 875}, | ||
51 | {922000000, 925}, | ||
52 | {1000000000, 975}, | ||
53 | {0, 0}, | ||
54 | }; | ||
55 | |||
56 | static struct dvfs_table virtual_cpu_process_3[] = { | ||
57 | {730000000, 750}, | ||
58 | {760000000, 775}, | ||
59 | {845000000, 800}, | ||
60 | {1000000000, 875}, | ||
61 | {0, 0}, | ||
62 | }; | ||
63 | |||
64 | struct dvfs tegra_dvfs_virtual_cpu_dvfs = { | ||
65 | .reg_id = "vdd_cpu", | ||
66 | .process_id_table = { | ||
67 | { | ||
68 | .process_id = 0, | ||
69 | .table = virtual_cpu_process_0, | ||
70 | }, | ||
71 | { | ||
72 | .process_id = 1, | ||
73 | .table = virtual_cpu_process_1, | ||
74 | }, | ||
75 | { | ||
76 | .process_id = 2, | ||
77 | .table = virtual_cpu_process_2, | ||
78 | }, | ||
79 | { | ||
80 | .process_id = 3, | ||
81 | .table = virtual_cpu_process_3, | ||
82 | }, | ||
83 | }, | ||
84 | .process_id_table_length = 4, | ||
85 | .cpu = 1, | ||
86 | }; | ||
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c new file mode 100644 index 000000000000..0f7ae6e90b55 --- /dev/null +++ b/arch/arm/mach-tegra/tegra2_emc.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Google, Inc. | ||
3 | * | ||
4 | * Author: | ||
5 | * Colin Cross <ccross@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/clk.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/module.h> | ||
23 | |||
24 | #include <mach/iomap.h> | ||
25 | |||
26 | #include "tegra2_emc.h" | ||
27 | |||
28 | #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE | ||
29 | static bool emc_enable = true; | ||
30 | #else | ||
31 | static bool emc_enable; | ||
32 | #endif | ||
33 | module_param(emc_enable, bool, 0644); | ||
34 | |||
35 | static void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE); | ||
36 | static const struct tegra_emc_table *tegra_emc_table; | ||
37 | static int tegra_emc_table_size; | ||
38 | |||
39 | static inline void emc_writel(u32 val, unsigned long addr) | ||
40 | { | ||
41 | writel(val, emc + addr); | ||
42 | } | ||
43 | |||
44 | static inline u32 emc_readl(unsigned long addr) | ||
45 | { | ||
46 | return readl(emc + addr); | ||
47 | } | ||
48 | |||
49 | static const unsigned long emc_reg_addr[TEGRA_EMC_NUM_REGS] = { | ||
50 | 0x2c, /* RC */ | ||
51 | 0x30, /* RFC */ | ||
52 | 0x34, /* RAS */ | ||
53 | 0x38, /* RP */ | ||
54 | 0x3c, /* R2W */ | ||
55 | 0x40, /* W2R */ | ||
56 | 0x44, /* R2P */ | ||
57 | 0x48, /* W2P */ | ||
58 | 0x4c, /* RD_RCD */ | ||
59 | 0x50, /* WR_RCD */ | ||
60 | 0x54, /* RRD */ | ||
61 | 0x58, /* REXT */ | ||
62 | 0x5c, /* WDV */ | ||
63 | 0x60, /* QUSE */ | ||
64 | 0x64, /* QRST */ | ||
65 | 0x68, /* QSAFE */ | ||
66 | 0x6c, /* RDV */ | ||
67 | 0x70, /* REFRESH */ | ||
68 | 0x74, /* BURST_REFRESH_NUM */ | ||
69 | 0x78, /* PDEX2WR */ | ||
70 | 0x7c, /* PDEX2RD */ | ||
71 | 0x80, /* PCHG2PDEN */ | ||
72 | 0x84, /* ACT2PDEN */ | ||
73 | 0x88, /* AR2PDEN */ | ||
74 | 0x8c, /* RW2PDEN */ | ||
75 | 0x90, /* TXSR */ | ||
76 | 0x94, /* TCKE */ | ||
77 | 0x98, /* TFAW */ | ||
78 | 0x9c, /* TRPAB */ | ||
79 | 0xa0, /* TCLKSTABLE */ | ||
80 | 0xa4, /* TCLKSTOP */ | ||
81 | 0xa8, /* TREFBW */ | ||
82 | 0xac, /* QUSE_EXTRA */ | ||
83 | 0x114, /* FBIO_CFG6 */ | ||
84 | 0xb0, /* ODT_WRITE */ | ||
85 | 0xb4, /* ODT_READ */ | ||
86 | 0x104, /* FBIO_CFG5 */ | ||
87 | 0x2bc, /* CFG_DIG_DLL */ | ||
88 | 0x2c0, /* DLL_XFORM_DQS */ | ||
89 | 0x2c4, /* DLL_XFORM_QUSE */ | ||
90 | 0x2e0, /* ZCAL_REF_CNT */ | ||
91 | 0x2e4, /* ZCAL_WAIT_CNT */ | ||
92 | 0x2a8, /* AUTO_CAL_INTERVAL */ | ||
93 | 0x2d0, /* CFG_CLKTRIM_0 */ | ||
94 | 0x2d4, /* CFG_CLKTRIM_1 */ | ||
95 | 0x2d8, /* CFG_CLKTRIM_2 */ | ||
96 | }; | ||
97 | |||
98 | /* Select the closest EMC rate that is higher than the requested rate */ | ||
99 | long tegra_emc_round_rate(unsigned long rate) | ||
100 | { | ||
101 | int i; | ||
102 | int best = -1; | ||
103 | unsigned long distance = ULONG_MAX; | ||
104 | |||
105 | if (!tegra_emc_table) | ||
106 | return -EINVAL; | ||
107 | |||
108 | if (!emc_enable) | ||
109 | return -EINVAL; | ||
110 | |||
111 | pr_debug("%s: %lu\n", __func__, rate); | ||
112 | |||
113 | /* | ||
114 | * The EMC clock rate is twice the bus rate, and the bus rate is | ||
115 | * measured in kHz | ||
116 | */ | ||
117 | rate = rate / 2 / 1000; | ||
118 | |||
119 | for (i = 0; i < tegra_emc_table_size; i++) { | ||
120 | if (tegra_emc_table[i].rate >= rate && | ||
121 | (tegra_emc_table[i].rate - rate) < distance) { | ||
122 | distance = tegra_emc_table[i].rate - rate; | ||
123 | best = i; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | if (best < 0) | ||
128 | return -EINVAL; | ||
129 | |||
130 | pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate); | ||
131 | |||
132 | return tegra_emc_table[best].rate * 2 * 1000; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * The EMC registers have shadow registers. When the EMC clock is updated | ||
137 | * in the clock controller, the shadow registers are copied to the active | ||
138 | * registers, allowing glitchless memory bus frequency changes. | ||
139 | * This function updates the shadow registers for a new clock frequency, | ||
140 | * and relies on the clock lock on the emc clock to avoid races between | ||
141 | * multiple frequency changes | ||
142 | */ | ||
143 | int tegra_emc_set_rate(unsigned long rate) | ||
144 | { | ||
145 | int i; | ||
146 | int j; | ||
147 | |||
148 | if (!tegra_emc_table) | ||
149 | return -EINVAL; | ||
150 | |||
151 | /* | ||
152 | * The EMC clock rate is twice the bus rate, and the bus rate is | ||
153 | * measured in kHz | ||
154 | */ | ||
155 | rate = rate / 2 / 1000; | ||
156 | |||
157 | for (i = 0; i < tegra_emc_table_size; i++) | ||
158 | if (tegra_emc_table[i].rate == rate) | ||
159 | break; | ||
160 | |||
161 | if (i >= tegra_emc_table_size) | ||
162 | return -EINVAL; | ||
163 | |||
164 | pr_debug("%s: setting to %lu\n", __func__, rate); | ||
165 | |||
166 | for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) | ||
167 | emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]); | ||
168 | |||
169 | emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]); | ||
170 | |||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | void tegra_init_emc(const struct tegra_emc_table *table, int table_size) | ||
175 | { | ||
176 | tegra_emc_table = table; | ||
177 | tegra_emc_table_size = table_size; | ||
178 | } | ||
diff --git a/arch/arm/mach-tegra/tegra2_emc.h b/arch/arm/mach-tegra/tegra2_emc.h new file mode 100644 index 000000000000..19f08cb31603 --- /dev/null +++ b/arch/arm/mach-tegra/tegra2_emc.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Google, Inc. | ||
3 | * | ||
4 | * Author: | ||
5 | * Colin Cross <ccross@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #define TEGRA_EMC_NUM_REGS 46 | ||
19 | |||
20 | struct tegra_emc_table { | ||
21 | unsigned long rate; | ||
22 | u32 regs[TEGRA_EMC_NUM_REGS]; | ||
23 | }; | ||
24 | |||
25 | int tegra_emc_set_rate(unsigned long rate); | ||
26 | long tegra_emc_round_rate(unsigned long rate); | ||
27 | void tegra_init_emc(const struct tegra_emc_table *table, int table_size); | ||
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c index 7b8ad1f98f44..0fcb1eb4214d 100644 --- a/arch/arm/mach-tegra/timer.c +++ b/arch/arm/mach-tegra/timer.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/err.h> | ||
21 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
22 | #include <linux/time.h> | 23 | #include <linux/time.h> |
23 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
@@ -33,10 +34,15 @@ | |||
33 | 34 | ||
34 | #include <mach/iomap.h> | 35 | #include <mach/iomap.h> |
35 | #include <mach/irqs.h> | 36 | #include <mach/irqs.h> |
37 | #include <mach/suspend.h> | ||
36 | 38 | ||
37 | #include "board.h" | 39 | #include "board.h" |
38 | #include "clock.h" | 40 | #include "clock.h" |
39 | 41 | ||
42 | #define RTC_SECONDS 0x08 | ||
43 | #define RTC_SHADOW_SECONDS 0x0c | ||
44 | #define RTC_MILLISECONDS 0x10 | ||
45 | |||
40 | #define TIMERUS_CNTR_1US 0x10 | 46 | #define TIMERUS_CNTR_1US 0x10 |
41 | #define TIMERUS_USEC_CFG 0x14 | 47 | #define TIMERUS_USEC_CFG 0x14 |
42 | #define TIMERUS_CNTR_FREEZE 0x4c | 48 | #define TIMERUS_CNTR_FREEZE 0x4c |
@@ -49,9 +55,11 @@ | |||
49 | #define TIMER_PTV 0x0 | 55 | #define TIMER_PTV 0x0 |
50 | #define TIMER_PCR 0x4 | 56 | #define TIMER_PCR 0x4 |
51 | 57 | ||
52 | struct tegra_timer; | ||
53 | |||
54 | static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE); | 58 | static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE); |
59 | static void __iomem *rtc_base = IO_ADDRESS(TEGRA_RTC_BASE); | ||
60 | |||
61 | static struct timespec persistent_ts; | ||
62 | static u64 persistent_ms, last_persistent_ms; | ||
55 | 63 | ||
56 | #define timer_writel(value, reg) \ | 64 | #define timer_writel(value, reg) \ |
57 | __raw_writel(value, (u32)timer_reg_base + (reg)) | 65 | __raw_writel(value, (u32)timer_reg_base + (reg)) |
@@ -132,6 +140,42 @@ static void notrace tegra_update_sched_clock(void) | |||
132 | update_sched_clock(&cd, cyc, (u32)~0); | 140 | update_sched_clock(&cd, cyc, (u32)~0); |
133 | } | 141 | } |
134 | 142 | ||
143 | /* | ||
144 | * tegra_rtc_read - Reads the Tegra RTC registers | ||
145 | * Care must be taken that this funciton is not called while the | ||
146 | * tegra_rtc driver could be executing to avoid race conditions | ||
147 | * on the RTC shadow register | ||
148 | */ | ||
149 | u64 tegra_rtc_read_ms(void) | ||
150 | { | ||
151 | u32 ms = readl(rtc_base + RTC_MILLISECONDS); | ||
152 | u32 s = readl(rtc_base + RTC_SHADOW_SECONDS); | ||
153 | return (u64)s * MSEC_PER_SEC + ms; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * read_persistent_clock - Return time from a persistent clock. | ||
158 | * | ||
159 | * Reads the time from a source which isn't disabled during PM, the | ||
160 | * 32k sync timer. Convert the cycles elapsed since last read into | ||
161 | * nsecs and adds to a monotonically increasing timespec. | ||
162 | * Care must be taken that this funciton is not called while the | ||
163 | * tegra_rtc driver could be executing to avoid race conditions | ||
164 | * on the RTC shadow register | ||
165 | */ | ||
166 | void read_persistent_clock(struct timespec *ts) | ||
167 | { | ||
168 | u64 delta; | ||
169 | struct timespec *tsp = &persistent_ts; | ||
170 | |||
171 | last_persistent_ms = persistent_ms; | ||
172 | persistent_ms = tegra_rtc_read_ms(); | ||
173 | delta = persistent_ms - last_persistent_ms; | ||
174 | |||
175 | timespec_add_ns(tsp, delta * NSEC_PER_MSEC); | ||
176 | *ts = *tsp; | ||
177 | } | ||
178 | |||
135 | static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) | 179 | static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) |
136 | { | 180 | { |
137 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | 181 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; |
@@ -150,9 +194,22 @@ static struct irqaction tegra_timer_irq = { | |||
150 | 194 | ||
151 | static void __init tegra_init_timer(void) | 195 | static void __init tegra_init_timer(void) |
152 | { | 196 | { |
197 | struct clk *clk; | ||
153 | unsigned long rate = clk_measure_input_freq(); | 198 | unsigned long rate = clk_measure_input_freq(); |
154 | int ret; | 199 | int ret; |
155 | 200 | ||
201 | clk = clk_get_sys("timer", NULL); | ||
202 | BUG_ON(IS_ERR(clk)); | ||
203 | clk_enable(clk); | ||
204 | |||
205 | /* | ||
206 | * rtc registers are used by read_persistent_clock, keep the rtc clock | ||
207 | * enabled | ||
208 | */ | ||
209 | clk = clk_get_sys("rtc-tegra", NULL); | ||
210 | BUG_ON(IS_ERR(clk)); | ||
211 | clk_enable(clk); | ||
212 | |||
156 | #ifdef CONFIG_HAVE_ARM_TWD | 213 | #ifdef CONFIG_HAVE_ARM_TWD |
157 | twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600); | 214 | twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600); |
158 | #endif | 215 | #endif |
@@ -196,10 +253,22 @@ static void __init tegra_init_timer(void) | |||
196 | tegra_clockevent.cpumask = cpu_all_mask; | 253 | tegra_clockevent.cpumask = cpu_all_mask; |
197 | tegra_clockevent.irq = tegra_timer_irq.irq; | 254 | tegra_clockevent.irq = tegra_timer_irq.irq; |
198 | clockevents_register_device(&tegra_clockevent); | 255 | clockevents_register_device(&tegra_clockevent); |
199 | |||
200 | return; | ||
201 | } | 256 | } |
202 | 257 | ||
203 | struct sys_timer tegra_timer = { | 258 | struct sys_timer tegra_timer = { |
204 | .init = tegra_init_timer, | 259 | .init = tegra_init_timer, |
205 | }; | 260 | }; |
261 | |||
262 | #ifdef CONFIG_PM | ||
263 | static u32 usec_config; | ||
264 | |||
265 | void tegra_timer_suspend(void) | ||
266 | { | ||
267 | usec_config = timer_readl(TIMERUS_USEC_CFG); | ||
268 | } | ||
269 | |||
270 | void tegra_timer_resume(void) | ||
271 | { | ||
272 | timer_writel(usec_config, TIMERUS_USEC_CFG); | ||
273 | } | ||
274 | #endif | ||
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c new file mode 100644 index 000000000000..88081bb3ec52 --- /dev/null +++ b/arch/arm/mach-tegra/usb_phy.c | |||
@@ -0,0 +1,795 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-tegra/usb_phy.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * | ||
6 | * Author: | ||
7 | * Erik Gilling <konkers@google.com> | ||
8 | * Benoit Goby <benoit@android.com> | ||
9 | * | ||
10 | * This software is licensed under the terms of the GNU General Public | ||
11 | * License version 2, as published by the Free Software Foundation, and | ||
12 | * may be copied, distributed, and modified under those terms. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/resource.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/err.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/gpio.h> | ||
28 | #include <linux/usb/otg.h> | ||
29 | #include <linux/usb/ulpi.h> | ||
30 | #include <asm/mach-types.h> | ||
31 | #include <mach/usb_phy.h> | ||
32 | #include <mach/iomap.h> | ||
33 | |||
34 | #define ULPI_VIEWPORT 0x170 | ||
35 | |||
36 | #define USB_PORTSC1 0x184 | ||
37 | #define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30) | ||
38 | #define USB_PORTSC1_PSPD(x) (((x) & 0x3) << 26) | ||
39 | #define USB_PORTSC1_PHCD (1 << 23) | ||
40 | #define USB_PORTSC1_WKOC (1 << 22) | ||
41 | #define USB_PORTSC1_WKDS (1 << 21) | ||
42 | #define USB_PORTSC1_WKCN (1 << 20) | ||
43 | #define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16) | ||
44 | #define USB_PORTSC1_PP (1 << 12) | ||
45 | #define USB_PORTSC1_SUSP (1 << 7) | ||
46 | #define USB_PORTSC1_PE (1 << 2) | ||
47 | #define USB_PORTSC1_CCS (1 << 0) | ||
48 | |||
49 | #define USB_SUSP_CTRL 0x400 | ||
50 | #define USB_WAKE_ON_CNNT_EN_DEV (1 << 3) | ||
51 | #define USB_WAKE_ON_DISCON_EN_DEV (1 << 4) | ||
52 | #define USB_SUSP_CLR (1 << 5) | ||
53 | #define USB_PHY_CLK_VALID (1 << 7) | ||
54 | #define UTMIP_RESET (1 << 11) | ||
55 | #define UHSIC_RESET (1 << 11) | ||
56 | #define UTMIP_PHY_ENABLE (1 << 12) | ||
57 | #define ULPI_PHY_ENABLE (1 << 13) | ||
58 | #define USB_SUSP_SET (1 << 14) | ||
59 | #define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16) | ||
60 | |||
61 | #define USB1_LEGACY_CTRL 0x410 | ||
62 | #define USB1_NO_LEGACY_MODE (1 << 0) | ||
63 | #define USB1_VBUS_SENSE_CTL_MASK (3 << 1) | ||
64 | #define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1) | ||
65 | #define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \ | ||
66 | (1 << 1) | ||
67 | #define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1) | ||
68 | #define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1) | ||
69 | |||
70 | #define ULPI_TIMING_CTRL_0 0x424 | ||
71 | #define ULPI_OUTPUT_PINMUX_BYP (1 << 10) | ||
72 | #define ULPI_CLKOUT_PINMUX_BYP (1 << 11) | ||
73 | |||
74 | #define ULPI_TIMING_CTRL_1 0x428 | ||
75 | #define ULPI_DATA_TRIMMER_LOAD (1 << 0) | ||
76 | #define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1) | ||
77 | #define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16) | ||
78 | #define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17) | ||
79 | #define ULPI_DIR_TRIMMER_LOAD (1 << 24) | ||
80 | #define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25) | ||
81 | |||
82 | #define UTMIP_PLL_CFG1 0x804 | ||
83 | #define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) | ||
84 | #define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) | ||
85 | |||
86 | #define UTMIP_XCVR_CFG0 0x808 | ||
87 | #define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0) | ||
88 | #define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8) | ||
89 | #define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10) | ||
90 | #define UTMIP_FORCE_PD_POWERDOWN (1 << 14) | ||
91 | #define UTMIP_FORCE_PD2_POWERDOWN (1 << 16) | ||
92 | #define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18) | ||
93 | #define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25) | ||
94 | |||
95 | #define UTMIP_BIAS_CFG0 0x80c | ||
96 | #define UTMIP_OTGPD (1 << 11) | ||
97 | #define UTMIP_BIASPD (1 << 10) | ||
98 | |||
99 | #define UTMIP_HSRX_CFG0 0x810 | ||
100 | #define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10) | ||
101 | #define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15) | ||
102 | |||
103 | #define UTMIP_HSRX_CFG1 0x814 | ||
104 | #define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1) | ||
105 | |||
106 | #define UTMIP_TX_CFG0 0x820 | ||
107 | #define UTMIP_FS_PREABMLE_J (1 << 19) | ||
108 | #define UTMIP_HS_DISCON_DISABLE (1 << 8) | ||
109 | |||
110 | #define UTMIP_MISC_CFG0 0x824 | ||
111 | #define UTMIP_DPDM_OBSERVE (1 << 26) | ||
112 | #define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27) | ||
113 | #define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf) | ||
114 | #define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe) | ||
115 | #define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd) | ||
116 | #define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc) | ||
117 | #define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22) | ||
118 | |||
119 | #define UTMIP_MISC_CFG1 0x828 | ||
120 | #define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18) | ||
121 | #define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6) | ||
122 | |||
123 | #define UTMIP_DEBOUNCE_CFG0 0x82c | ||
124 | #define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0) | ||
125 | |||
126 | #define UTMIP_BAT_CHRG_CFG0 0x830 | ||
127 | #define UTMIP_PD_CHRG (1 << 0) | ||
128 | |||
129 | #define UTMIP_SPARE_CFG0 0x834 | ||
130 | #define FUSE_SETUP_SEL (1 << 3) | ||
131 | |||
132 | #define UTMIP_XCVR_CFG1 0x838 | ||
133 | #define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0) | ||
134 | #define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2) | ||
135 | #define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4) | ||
136 | #define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18) | ||
137 | |||
138 | #define UTMIP_BIAS_CFG1 0x83c | ||
139 | #define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3) | ||
140 | |||
141 | static DEFINE_SPINLOCK(utmip_pad_lock); | ||
142 | static int utmip_pad_count; | ||
143 | |||
144 | struct tegra_xtal_freq { | ||
145 | int freq; | ||
146 | u8 enable_delay; | ||
147 | u8 stable_count; | ||
148 | u8 active_delay; | ||
149 | u8 xtal_freq_count; | ||
150 | u16 debounce; | ||
151 | }; | ||
152 | |||
153 | static const struct tegra_xtal_freq tegra_freq_table[] = { | ||
154 | { | ||
155 | .freq = 12000000, | ||
156 | .enable_delay = 0x02, | ||
157 | .stable_count = 0x2F, | ||
158 | .active_delay = 0x04, | ||
159 | .xtal_freq_count = 0x76, | ||
160 | .debounce = 0x7530, | ||
161 | }, | ||
162 | { | ||
163 | .freq = 13000000, | ||
164 | .enable_delay = 0x02, | ||
165 | .stable_count = 0x33, | ||
166 | .active_delay = 0x05, | ||
167 | .xtal_freq_count = 0x7F, | ||
168 | .debounce = 0x7EF4, | ||
169 | }, | ||
170 | { | ||
171 | .freq = 19200000, | ||
172 | .enable_delay = 0x03, | ||
173 | .stable_count = 0x4B, | ||
174 | .active_delay = 0x06, | ||
175 | .xtal_freq_count = 0xBB, | ||
176 | .debounce = 0xBB80, | ||
177 | }, | ||
178 | { | ||
179 | .freq = 26000000, | ||
180 | .enable_delay = 0x04, | ||
181 | .stable_count = 0x66, | ||
182 | .active_delay = 0x09, | ||
183 | .xtal_freq_count = 0xFE, | ||
184 | .debounce = 0xFDE8, | ||
185 | }, | ||
186 | }; | ||
187 | |||
188 | static struct tegra_utmip_config utmip_default[] = { | ||
189 | [0] = { | ||
190 | .hssync_start_delay = 9, | ||
191 | .idle_wait_delay = 17, | ||
192 | .elastic_limit = 16, | ||
193 | .term_range_adj = 6, | ||
194 | .xcvr_setup = 9, | ||
195 | .xcvr_lsfslew = 1, | ||
196 | .xcvr_lsrslew = 1, | ||
197 | }, | ||
198 | [2] = { | ||
199 | .hssync_start_delay = 9, | ||
200 | .idle_wait_delay = 17, | ||
201 | .elastic_limit = 16, | ||
202 | .term_range_adj = 6, | ||
203 | .xcvr_setup = 9, | ||
204 | .xcvr_lsfslew = 2, | ||
205 | .xcvr_lsrslew = 2, | ||
206 | }, | ||
207 | }; | ||
208 | |||
209 | static inline bool phy_is_ulpi(struct tegra_usb_phy *phy) | ||
210 | { | ||
211 | return (phy->instance == 1); | ||
212 | } | ||
213 | |||
214 | static int utmip_pad_open(struct tegra_usb_phy *phy) | ||
215 | { | ||
216 | phy->pad_clk = clk_get_sys("utmip-pad", NULL); | ||
217 | if (IS_ERR(phy->pad_clk)) { | ||
218 | pr_err("%s: can't get utmip pad clock\n", __func__); | ||
219 | return PTR_ERR(phy->pad_clk); | ||
220 | } | ||
221 | |||
222 | if (phy->instance == 0) { | ||
223 | phy->pad_regs = phy->regs; | ||
224 | } else { | ||
225 | phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE); | ||
226 | if (!phy->pad_regs) { | ||
227 | pr_err("%s: can't remap usb registers\n", __func__); | ||
228 | clk_put(phy->pad_clk); | ||
229 | return -ENOMEM; | ||
230 | } | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static void utmip_pad_close(struct tegra_usb_phy *phy) | ||
236 | { | ||
237 | if (phy->instance != 0) | ||
238 | iounmap(phy->pad_regs); | ||
239 | clk_put(phy->pad_clk); | ||
240 | } | ||
241 | |||
242 | static void utmip_pad_power_on(struct tegra_usb_phy *phy) | ||
243 | { | ||
244 | unsigned long val, flags; | ||
245 | void __iomem *base = phy->pad_regs; | ||
246 | |||
247 | clk_enable(phy->pad_clk); | ||
248 | |||
249 | spin_lock_irqsave(&utmip_pad_lock, flags); | ||
250 | |||
251 | if (utmip_pad_count++ == 0) { | ||
252 | val = readl(base + UTMIP_BIAS_CFG0); | ||
253 | val &= ~(UTMIP_OTGPD | UTMIP_BIASPD); | ||
254 | writel(val, base + UTMIP_BIAS_CFG0); | ||
255 | } | ||
256 | |||
257 | spin_unlock_irqrestore(&utmip_pad_lock, flags); | ||
258 | |||
259 | clk_disable(phy->pad_clk); | ||
260 | } | ||
261 | |||
262 | static int utmip_pad_power_off(struct tegra_usb_phy *phy) | ||
263 | { | ||
264 | unsigned long val, flags; | ||
265 | void __iomem *base = phy->pad_regs; | ||
266 | |||
267 | if (!utmip_pad_count) { | ||
268 | pr_err("%s: utmip pad already powered off\n", __func__); | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | clk_enable(phy->pad_clk); | ||
273 | |||
274 | spin_lock_irqsave(&utmip_pad_lock, flags); | ||
275 | |||
276 | if (--utmip_pad_count == 0) { | ||
277 | val = readl(base + UTMIP_BIAS_CFG0); | ||
278 | val |= UTMIP_OTGPD | UTMIP_BIASPD; | ||
279 | writel(val, base + UTMIP_BIAS_CFG0); | ||
280 | } | ||
281 | |||
282 | spin_unlock_irqrestore(&utmip_pad_lock, flags); | ||
283 | |||
284 | clk_disable(phy->pad_clk); | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) | ||
290 | { | ||
291 | unsigned long timeout = 2000; | ||
292 | do { | ||
293 | if ((readl(reg) & mask) == result) | ||
294 | return 0; | ||
295 | udelay(1); | ||
296 | timeout--; | ||
297 | } while (timeout); | ||
298 | return -1; | ||
299 | } | ||
300 | |||
301 | static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) | ||
302 | { | ||
303 | unsigned long val; | ||
304 | void __iomem *base = phy->regs; | ||
305 | |||
306 | if (phy->instance == 0) { | ||
307 | val = readl(base + USB_SUSP_CTRL); | ||
308 | val |= USB_SUSP_SET; | ||
309 | writel(val, base + USB_SUSP_CTRL); | ||
310 | |||
311 | udelay(10); | ||
312 | |||
313 | val = readl(base + USB_SUSP_CTRL); | ||
314 | val &= ~USB_SUSP_SET; | ||
315 | writel(val, base + USB_SUSP_CTRL); | ||
316 | } | ||
317 | |||
318 | if (phy->instance == 2) { | ||
319 | val = readl(base + USB_PORTSC1); | ||
320 | val |= USB_PORTSC1_PHCD; | ||
321 | writel(val, base + USB_PORTSC1); | ||
322 | } | ||
323 | |||
324 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0) | ||
325 | pr_err("%s: timeout waiting for phy to stabilize\n", __func__); | ||
326 | } | ||
327 | |||
328 | static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) | ||
329 | { | ||
330 | unsigned long val; | ||
331 | void __iomem *base = phy->regs; | ||
332 | |||
333 | if (phy->instance == 0) { | ||
334 | val = readl(base + USB_SUSP_CTRL); | ||
335 | val |= USB_SUSP_CLR; | ||
336 | writel(val, base + USB_SUSP_CTRL); | ||
337 | |||
338 | udelay(10); | ||
339 | |||
340 | val = readl(base + USB_SUSP_CTRL); | ||
341 | val &= ~USB_SUSP_CLR; | ||
342 | writel(val, base + USB_SUSP_CTRL); | ||
343 | } | ||
344 | |||
345 | if (phy->instance == 2) { | ||
346 | val = readl(base + USB_PORTSC1); | ||
347 | val &= ~USB_PORTSC1_PHCD; | ||
348 | writel(val, base + USB_PORTSC1); | ||
349 | } | ||
350 | |||
351 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, | ||
352 | USB_PHY_CLK_VALID)) | ||
353 | pr_err("%s: timeout waiting for phy to stabilize\n", __func__); | ||
354 | } | ||
355 | |||
356 | static int utmi_phy_power_on(struct tegra_usb_phy *phy) | ||
357 | { | ||
358 | unsigned long val; | ||
359 | void __iomem *base = phy->regs; | ||
360 | struct tegra_utmip_config *config = phy->config; | ||
361 | |||
362 | val = readl(base + USB_SUSP_CTRL); | ||
363 | val |= UTMIP_RESET; | ||
364 | writel(val, base + USB_SUSP_CTRL); | ||
365 | |||
366 | if (phy->instance == 0) { | ||
367 | val = readl(base + USB1_LEGACY_CTRL); | ||
368 | val |= USB1_NO_LEGACY_MODE; | ||
369 | writel(val, base + USB1_LEGACY_CTRL); | ||
370 | } | ||
371 | |||
372 | val = readl(base + UTMIP_TX_CFG0); | ||
373 | val &= ~UTMIP_FS_PREABMLE_J; | ||
374 | writel(val, base + UTMIP_TX_CFG0); | ||
375 | |||
376 | val = readl(base + UTMIP_HSRX_CFG0); | ||
377 | val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0)); | ||
378 | val |= UTMIP_IDLE_WAIT(config->idle_wait_delay); | ||
379 | val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit); | ||
380 | writel(val, base + UTMIP_HSRX_CFG0); | ||
381 | |||
382 | val = readl(base + UTMIP_HSRX_CFG1); | ||
383 | val &= ~UTMIP_HS_SYNC_START_DLY(~0); | ||
384 | val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay); | ||
385 | writel(val, base + UTMIP_HSRX_CFG1); | ||
386 | |||
387 | val = readl(base + UTMIP_DEBOUNCE_CFG0); | ||
388 | val &= ~UTMIP_BIAS_DEBOUNCE_A(~0); | ||
389 | val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce); | ||
390 | writel(val, base + UTMIP_DEBOUNCE_CFG0); | ||
391 | |||
392 | val = readl(base + UTMIP_MISC_CFG0); | ||
393 | val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE; | ||
394 | writel(val, base + UTMIP_MISC_CFG0); | ||
395 | |||
396 | val = readl(base + UTMIP_MISC_CFG1); | ||
397 | val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0)); | ||
398 | val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) | | ||
399 | UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count); | ||
400 | writel(val, base + UTMIP_MISC_CFG1); | ||
401 | |||
402 | val = readl(base + UTMIP_PLL_CFG1); | ||
403 | val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) | UTMIP_PLLU_ENABLE_DLY_COUNT(~0)); | ||
404 | val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) | | ||
405 | UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay); | ||
406 | writel(val, base + UTMIP_PLL_CFG1); | ||
407 | |||
408 | if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) { | ||
409 | val = readl(base + USB_SUSP_CTRL); | ||
410 | val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV); | ||
411 | writel(val, base + USB_SUSP_CTRL); | ||
412 | } | ||
413 | |||
414 | utmip_pad_power_on(phy); | ||
415 | |||
416 | val = readl(base + UTMIP_XCVR_CFG0); | ||
417 | val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | | ||
418 | UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_SETUP(~0) | | ||
419 | UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0) | | ||
420 | UTMIP_XCVR_HSSLEW_MSB(~0)); | ||
421 | val |= UTMIP_XCVR_SETUP(config->xcvr_setup); | ||
422 | val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew); | ||
423 | val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew); | ||
424 | writel(val, base + UTMIP_XCVR_CFG0); | ||
425 | |||
426 | val = readl(base + UTMIP_XCVR_CFG1); | ||
427 | val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | | ||
428 | UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0)); | ||
429 | val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj); | ||
430 | writel(val, base + UTMIP_XCVR_CFG1); | ||
431 | |||
432 | val = readl(base + UTMIP_BAT_CHRG_CFG0); | ||
433 | val &= ~UTMIP_PD_CHRG; | ||
434 | writel(val, base + UTMIP_BAT_CHRG_CFG0); | ||
435 | |||
436 | val = readl(base + UTMIP_BIAS_CFG1); | ||
437 | val &= ~UTMIP_BIAS_PDTRK_COUNT(~0); | ||
438 | val |= UTMIP_BIAS_PDTRK_COUNT(0x5); | ||
439 | writel(val, base + UTMIP_BIAS_CFG1); | ||
440 | |||
441 | if (phy->instance == 0) { | ||
442 | val = readl(base + UTMIP_SPARE_CFG0); | ||
443 | if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) | ||
444 | val &= ~FUSE_SETUP_SEL; | ||
445 | else | ||
446 | val |= FUSE_SETUP_SEL; | ||
447 | writel(val, base + UTMIP_SPARE_CFG0); | ||
448 | } | ||
449 | |||
450 | if (phy->instance == 2) { | ||
451 | val = readl(base + USB_SUSP_CTRL); | ||
452 | val |= UTMIP_PHY_ENABLE; | ||
453 | writel(val, base + USB_SUSP_CTRL); | ||
454 | } | ||
455 | |||
456 | val = readl(base + USB_SUSP_CTRL); | ||
457 | val &= ~UTMIP_RESET; | ||
458 | writel(val, base + USB_SUSP_CTRL); | ||
459 | |||
460 | if (phy->instance == 0) { | ||
461 | val = readl(base + USB1_LEGACY_CTRL); | ||
462 | val &= ~USB1_VBUS_SENSE_CTL_MASK; | ||
463 | val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD; | ||
464 | writel(val, base + USB1_LEGACY_CTRL); | ||
465 | |||
466 | val = readl(base + USB_SUSP_CTRL); | ||
467 | val &= ~USB_SUSP_SET; | ||
468 | writel(val, base + USB_SUSP_CTRL); | ||
469 | } | ||
470 | |||
471 | utmi_phy_clk_enable(phy); | ||
472 | |||
473 | if (phy->instance == 2) { | ||
474 | val = readl(base + USB_PORTSC1); | ||
475 | val &= ~USB_PORTSC1_PTS(~0); | ||
476 | writel(val, base + USB_PORTSC1); | ||
477 | } | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static void utmi_phy_power_off(struct tegra_usb_phy *phy) | ||
483 | { | ||
484 | unsigned long val; | ||
485 | void __iomem *base = phy->regs; | ||
486 | |||
487 | utmi_phy_clk_disable(phy); | ||
488 | |||
489 | if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) { | ||
490 | val = readl(base + USB_SUSP_CTRL); | ||
491 | val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); | ||
492 | val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5); | ||
493 | writel(val, base + USB_SUSP_CTRL); | ||
494 | } | ||
495 | |||
496 | val = readl(base + USB_SUSP_CTRL); | ||
497 | val |= UTMIP_RESET; | ||
498 | writel(val, base + USB_SUSP_CTRL); | ||
499 | |||
500 | val = readl(base + UTMIP_BAT_CHRG_CFG0); | ||
501 | val |= UTMIP_PD_CHRG; | ||
502 | writel(val, base + UTMIP_BAT_CHRG_CFG0); | ||
503 | |||
504 | val = readl(base + UTMIP_XCVR_CFG0); | ||
505 | val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | | ||
506 | UTMIP_FORCE_PDZI_POWERDOWN; | ||
507 | writel(val, base + UTMIP_XCVR_CFG0); | ||
508 | |||
509 | val = readl(base + UTMIP_XCVR_CFG1); | ||
510 | val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | | ||
511 | UTMIP_FORCE_PDDR_POWERDOWN; | ||
512 | writel(val, base + UTMIP_XCVR_CFG1); | ||
513 | |||
514 | utmip_pad_power_off(phy); | ||
515 | } | ||
516 | |||
517 | static void utmi_phy_preresume(struct tegra_usb_phy *phy) | ||
518 | { | ||
519 | unsigned long val; | ||
520 | void __iomem *base = phy->regs; | ||
521 | |||
522 | val = readl(base + UTMIP_TX_CFG0); | ||
523 | val |= UTMIP_HS_DISCON_DISABLE; | ||
524 | writel(val, base + UTMIP_TX_CFG0); | ||
525 | } | ||
526 | |||
527 | static void utmi_phy_postresume(struct tegra_usb_phy *phy) | ||
528 | { | ||
529 | unsigned long val; | ||
530 | void __iomem *base = phy->regs; | ||
531 | |||
532 | val = readl(base + UTMIP_TX_CFG0); | ||
533 | val &= ~UTMIP_HS_DISCON_DISABLE; | ||
534 | writel(val, base + UTMIP_TX_CFG0); | ||
535 | } | ||
536 | |||
537 | static void utmi_phy_restore_start(struct tegra_usb_phy *phy, | ||
538 | enum tegra_usb_phy_port_speed port_speed) | ||
539 | { | ||
540 | unsigned long val; | ||
541 | void __iomem *base = phy->regs; | ||
542 | |||
543 | val = readl(base + UTMIP_MISC_CFG0); | ||
544 | val &= ~UTMIP_DPDM_OBSERVE_SEL(~0); | ||
545 | if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) | ||
546 | val |= UTMIP_DPDM_OBSERVE_SEL_FS_K; | ||
547 | else | ||
548 | val |= UTMIP_DPDM_OBSERVE_SEL_FS_J; | ||
549 | writel(val, base + UTMIP_MISC_CFG0); | ||
550 | udelay(1); | ||
551 | |||
552 | val = readl(base + UTMIP_MISC_CFG0); | ||
553 | val |= UTMIP_DPDM_OBSERVE; | ||
554 | writel(val, base + UTMIP_MISC_CFG0); | ||
555 | udelay(10); | ||
556 | } | ||
557 | |||
558 | static void utmi_phy_restore_end(struct tegra_usb_phy *phy) | ||
559 | { | ||
560 | unsigned long val; | ||
561 | void __iomem *base = phy->regs; | ||
562 | |||
563 | val = readl(base + UTMIP_MISC_CFG0); | ||
564 | val &= ~UTMIP_DPDM_OBSERVE; | ||
565 | writel(val, base + UTMIP_MISC_CFG0); | ||
566 | udelay(10); | ||
567 | } | ||
568 | |||
569 | static int ulpi_phy_power_on(struct tegra_usb_phy *phy) | ||
570 | { | ||
571 | int ret; | ||
572 | unsigned long val; | ||
573 | void __iomem *base = phy->regs; | ||
574 | struct tegra_ulpi_config *config = phy->config; | ||
575 | |||
576 | gpio_direction_output(config->reset_gpio, 0); | ||
577 | msleep(5); | ||
578 | gpio_direction_output(config->reset_gpio, 1); | ||
579 | |||
580 | clk_enable(phy->clk); | ||
581 | msleep(1); | ||
582 | |||
583 | val = readl(base + USB_SUSP_CTRL); | ||
584 | val |= UHSIC_RESET; | ||
585 | writel(val, base + USB_SUSP_CTRL); | ||
586 | |||
587 | val = readl(base + ULPI_TIMING_CTRL_0); | ||
588 | val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP; | ||
589 | writel(val, base + ULPI_TIMING_CTRL_0); | ||
590 | |||
591 | val = readl(base + USB_SUSP_CTRL); | ||
592 | val |= ULPI_PHY_ENABLE; | ||
593 | writel(val, base + USB_SUSP_CTRL); | ||
594 | |||
595 | val = 0; | ||
596 | writel(val, base + ULPI_TIMING_CTRL_1); | ||
597 | |||
598 | val |= ULPI_DATA_TRIMMER_SEL(4); | ||
599 | val |= ULPI_STPDIRNXT_TRIMMER_SEL(4); | ||
600 | val |= ULPI_DIR_TRIMMER_SEL(4); | ||
601 | writel(val, base + ULPI_TIMING_CTRL_1); | ||
602 | udelay(10); | ||
603 | |||
604 | val |= ULPI_DATA_TRIMMER_LOAD; | ||
605 | val |= ULPI_STPDIRNXT_TRIMMER_LOAD; | ||
606 | val |= ULPI_DIR_TRIMMER_LOAD; | ||
607 | writel(val, base + ULPI_TIMING_CTRL_1); | ||
608 | |||
609 | /* Fix VbusInvalid due to floating VBUS */ | ||
610 | ret = otg_io_write(phy->ulpi, 0x40, 0x08); | ||
611 | if (ret) { | ||
612 | pr_err("%s: ulpi write failed\n", __func__); | ||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | ret = otg_io_write(phy->ulpi, 0x80, 0x0B); | ||
617 | if (ret) { | ||
618 | pr_err("%s: ulpi write failed\n", __func__); | ||
619 | return ret; | ||
620 | } | ||
621 | |||
622 | val = readl(base + USB_PORTSC1); | ||
623 | val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN; | ||
624 | writel(val, base + USB_PORTSC1); | ||
625 | |||
626 | val = readl(base + USB_SUSP_CTRL); | ||
627 | val |= USB_SUSP_CLR; | ||
628 | writel(val, base + USB_SUSP_CTRL); | ||
629 | udelay(100); | ||
630 | |||
631 | val = readl(base + USB_SUSP_CTRL); | ||
632 | val &= ~USB_SUSP_CLR; | ||
633 | writel(val, base + USB_SUSP_CTRL); | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | static void ulpi_phy_power_off(struct tegra_usb_phy *phy) | ||
639 | { | ||
640 | unsigned long val; | ||
641 | void __iomem *base = phy->regs; | ||
642 | struct tegra_ulpi_config *config = phy->config; | ||
643 | |||
644 | /* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB | ||
645 | * Controller to immediately bring the ULPI PHY out of low power | ||
646 | */ | ||
647 | val = readl(base + USB_PORTSC1); | ||
648 | val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN); | ||
649 | writel(val, base + USB_PORTSC1); | ||
650 | |||
651 | gpio_direction_output(config->reset_gpio, 0); | ||
652 | clk_disable(phy->clk); | ||
653 | } | ||
654 | |||
655 | struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, | ||
656 | void *config, enum tegra_usb_phy_mode phy_mode) | ||
657 | { | ||
658 | struct tegra_usb_phy *phy; | ||
659 | struct tegra_ulpi_config *ulpi_config; | ||
660 | unsigned long parent_rate; | ||
661 | int i; | ||
662 | int err; | ||
663 | |||
664 | phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL); | ||
665 | if (!phy) | ||
666 | return ERR_PTR(-ENOMEM); | ||
667 | |||
668 | phy->instance = instance; | ||
669 | phy->regs = regs; | ||
670 | phy->config = config; | ||
671 | phy->mode = phy_mode; | ||
672 | |||
673 | if (!phy->config) { | ||
674 | if (phy_is_ulpi(phy)) { | ||
675 | pr_err("%s: ulpi phy configuration missing", __func__); | ||
676 | err = -EINVAL; | ||
677 | goto err0; | ||
678 | } else { | ||
679 | phy->config = &utmip_default[instance]; | ||
680 | } | ||
681 | } | ||
682 | |||
683 | phy->pll_u = clk_get_sys(NULL, "pll_u"); | ||
684 | if (IS_ERR(phy->pll_u)) { | ||
685 | pr_err("Can't get pll_u clock\n"); | ||
686 | err = PTR_ERR(phy->pll_u); | ||
687 | goto err0; | ||
688 | } | ||
689 | clk_enable(phy->pll_u); | ||
690 | |||
691 | parent_rate = clk_get_rate(clk_get_parent(phy->pll_u)); | ||
692 | for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) { | ||
693 | if (tegra_freq_table[i].freq == parent_rate) { | ||
694 | phy->freq = &tegra_freq_table[i]; | ||
695 | break; | ||
696 | } | ||
697 | } | ||
698 | if (!phy->freq) { | ||
699 | pr_err("invalid pll_u parent rate %ld\n", parent_rate); | ||
700 | err = -EINVAL; | ||
701 | goto err1; | ||
702 | } | ||
703 | |||
704 | if (phy_is_ulpi(phy)) { | ||
705 | ulpi_config = config; | ||
706 | phy->clk = clk_get_sys(NULL, ulpi_config->clk); | ||
707 | if (IS_ERR(phy->clk)) { | ||
708 | pr_err("%s: can't get ulpi clock\n", __func__); | ||
709 | err = -ENXIO; | ||
710 | goto err1; | ||
711 | } | ||
712 | tegra_gpio_enable(ulpi_config->reset_gpio); | ||
713 | gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b"); | ||
714 | gpio_direction_output(ulpi_config->reset_gpio, 0); | ||
715 | phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0); | ||
716 | phy->ulpi->io_priv = regs + ULPI_VIEWPORT; | ||
717 | } else { | ||
718 | err = utmip_pad_open(phy); | ||
719 | if (err < 0) | ||
720 | goto err1; | ||
721 | } | ||
722 | |||
723 | return phy; | ||
724 | |||
725 | err1: | ||
726 | clk_disable(phy->pll_u); | ||
727 | clk_put(phy->pll_u); | ||
728 | err0: | ||
729 | kfree(phy); | ||
730 | return ERR_PTR(err); | ||
731 | } | ||
732 | |||
733 | int tegra_usb_phy_power_on(struct tegra_usb_phy *phy) | ||
734 | { | ||
735 | if (phy_is_ulpi(phy)) | ||
736 | return ulpi_phy_power_on(phy); | ||
737 | else | ||
738 | return utmi_phy_power_on(phy); | ||
739 | } | ||
740 | |||
741 | void tegra_usb_phy_power_off(struct tegra_usb_phy *phy) | ||
742 | { | ||
743 | if (phy_is_ulpi(phy)) | ||
744 | ulpi_phy_power_off(phy); | ||
745 | else | ||
746 | utmi_phy_power_off(phy); | ||
747 | } | ||
748 | |||
749 | void tegra_usb_phy_preresume(struct tegra_usb_phy *phy) | ||
750 | { | ||
751 | if (!phy_is_ulpi(phy)) | ||
752 | utmi_phy_preresume(phy); | ||
753 | } | ||
754 | |||
755 | void tegra_usb_phy_postresume(struct tegra_usb_phy *phy) | ||
756 | { | ||
757 | if (!phy_is_ulpi(phy)) | ||
758 | utmi_phy_postresume(phy); | ||
759 | } | ||
760 | |||
761 | void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy, | ||
762 | enum tegra_usb_phy_port_speed port_speed) | ||
763 | { | ||
764 | if (!phy_is_ulpi(phy)) | ||
765 | utmi_phy_restore_start(phy, port_speed); | ||
766 | } | ||
767 | |||
768 | void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy) | ||
769 | { | ||
770 | if (!phy_is_ulpi(phy)) | ||
771 | utmi_phy_restore_end(phy); | ||
772 | } | ||
773 | |||
774 | void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy) | ||
775 | { | ||
776 | if (!phy_is_ulpi(phy)) | ||
777 | utmi_phy_clk_disable(phy); | ||
778 | } | ||
779 | |||
780 | void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy) | ||
781 | { | ||
782 | if (!phy_is_ulpi(phy)) | ||
783 | utmi_phy_clk_enable(phy); | ||
784 | } | ||
785 | |||
786 | void tegra_usb_phy_close(struct tegra_usb_phy *phy) | ||
787 | { | ||
788 | if (phy_is_ulpi(phy)) | ||
789 | clk_put(phy->clk); | ||
790 | else | ||
791 | utmip_pad_close(phy); | ||
792 | clk_disable(phy->pll_u); | ||
793 | clk_put(phy->pll_u); | ||
794 | kfree(phy); | ||
795 | } | ||
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h index 450a332f1009..fe449f1a1c14 100644 --- a/arch/arm/plat-omap/include/plat/usb.h +++ b/arch/arm/plat-omap/include/plat/usb.h | |||
@@ -7,15 +7,12 @@ | |||
7 | #include <plat/board.h> | 7 | #include <plat/board.h> |
8 | 8 | ||
9 | #define OMAP3_HS_USB_PORTS 3 | 9 | #define OMAP3_HS_USB_PORTS 3 |
10 | enum ehci_hcd_omap_mode { | ||
11 | EHCI_HCD_OMAP_MODE_UNKNOWN, | ||
12 | EHCI_HCD_OMAP_MODE_PHY, | ||
13 | EHCI_HCD_OMAP_MODE_TLL, | ||
14 | EHCI_HCD_OMAP_MODE_HSIC, | ||
15 | }; | ||
16 | 10 | ||
17 | enum ohci_omap3_port_mode { | 11 | enum usbhs_omap_port_mode { |
18 | OMAP_OHCI_PORT_MODE_UNUSED, | 12 | OMAP_USBHS_PORT_MODE_UNUSED, |
13 | OMAP_EHCI_PORT_MODE_PHY, | ||
14 | OMAP_EHCI_PORT_MODE_TLL, | ||
15 | OMAP_EHCI_PORT_MODE_HSIC, | ||
19 | OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0, | 16 | OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0, |
20 | OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM, | 17 | OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM, |
21 | OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0, | 18 | OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0, |
@@ -25,24 +22,45 @@ enum ohci_omap3_port_mode { | |||
25 | OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0, | 22 | OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0, |
26 | OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM, | 23 | OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM, |
27 | OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0, | 24 | OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0, |
28 | OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM, | 25 | OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM |
29 | }; | 26 | }; |
30 | 27 | ||
31 | struct ehci_hcd_omap_platform_data { | 28 | struct usbhs_omap_board_data { |
32 | enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS]; | 29 | enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; |
33 | unsigned phy_reset:1; | ||
34 | 30 | ||
35 | /* have to be valid if phy_reset is true and portx is in phy mode */ | 31 | /* have to be valid if phy_reset is true and portx is in phy mode */ |
36 | int reset_gpio_port[OMAP3_HS_USB_PORTS]; | 32 | int reset_gpio_port[OMAP3_HS_USB_PORTS]; |
33 | |||
34 | /* Set this to true for ES2.x silicon */ | ||
35 | unsigned es2_compatibility:1; | ||
36 | |||
37 | unsigned phy_reset:1; | ||
38 | |||
39 | /* | ||
40 | * Regulators for USB PHYs. | ||
41 | * Each PHY can have a separate regulator. | ||
42 | */ | ||
43 | struct regulator *regulator[OMAP3_HS_USB_PORTS]; | ||
37 | }; | 44 | }; |
38 | 45 | ||
39 | struct ohci_hcd_omap_platform_data { | 46 | struct ehci_hcd_omap_platform_data { |
40 | enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS]; | 47 | enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; |
48 | int reset_gpio_port[OMAP3_HS_USB_PORTS]; | ||
49 | struct regulator *regulator[OMAP3_HS_USB_PORTS]; | ||
50 | unsigned phy_reset:1; | ||
51 | }; | ||
41 | 52 | ||
42 | /* Set this to true for ES2.x silicon */ | 53 | struct ohci_hcd_omap_platform_data { |
54 | enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; | ||
43 | unsigned es2_compatibility:1; | 55 | unsigned es2_compatibility:1; |
44 | }; | 56 | }; |
45 | 57 | ||
58 | struct usbhs_omap_platform_data { | ||
59 | enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; | ||
60 | |||
61 | struct ehci_hcd_omap_platform_data *ehci_data; | ||
62 | struct ohci_hcd_omap_platform_data *ohci_data; | ||
63 | }; | ||
46 | /*-------------------------------------------------------------------------*/ | 64 | /*-------------------------------------------------------------------------*/ |
47 | 65 | ||
48 | #define OMAP1_OTG_BASE 0xfffb0400 | 66 | #define OMAP1_OTG_BASE 0xfffb0400 |
@@ -80,18 +98,18 @@ enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI}; | |||
80 | 98 | ||
81 | extern void usb_musb_init(struct omap_musb_board_data *board_data); | 99 | extern void usb_musb_init(struct omap_musb_board_data *board_data); |
82 | 100 | ||
83 | extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata); | 101 | extern void usbhs_init(const struct usbhs_omap_board_data *pdata); |
84 | 102 | ||
85 | extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata); | 103 | extern int omap_usbhs_enable(struct device *dev); |
104 | extern void omap_usbhs_disable(struct device *dev); | ||
86 | 105 | ||
87 | extern int omap4430_phy_power(struct device *dev, int ID, int on); | 106 | extern int omap4430_phy_power(struct device *dev, int ID, int on); |
88 | extern int omap4430_phy_set_clk(struct device *dev, int on); | 107 | extern int omap4430_phy_set_clk(struct device *dev, int on); |
89 | extern int omap4430_phy_init(struct device *dev); | 108 | extern int omap4430_phy_init(struct device *dev); |
90 | extern int omap4430_phy_exit(struct device *dev); | 109 | extern int omap4430_phy_exit(struct device *dev); |
91 | 110 | extern int omap4430_phy_suspend(struct device *dev, int suspend); | |
92 | #endif | 111 | #endif |
93 | 112 | ||
94 | |||
95 | /* | 113 | /* |
96 | * FIXME correct answer depends on hmc_mode, | 114 | * FIXME correct answer depends on hmc_mode, |
97 | * as does (on omap1) any nonzero value for config->otg port number | 115 | * as does (on omap1) any nonzero value for config->otg port number |
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 49d3208793e5..69ddc9f76c13 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <plat/mailbox.h> | 33 | #include <plat/mailbox.h> |
34 | 34 | ||
35 | static struct workqueue_struct *mboxd; | ||
36 | static struct omap_mbox **mboxes; | 35 | static struct omap_mbox **mboxes; |
37 | 36 | ||
38 | static int mbox_configured; | 37 | static int mbox_configured; |
@@ -197,7 +196,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox) | |||
197 | /* no more messages in the fifo. clear IRQ source. */ | 196 | /* no more messages in the fifo. clear IRQ source. */ |
198 | ack_mbox_irq(mbox, IRQ_RX); | 197 | ack_mbox_irq(mbox, IRQ_RX); |
199 | nomem: | 198 | nomem: |
200 | queue_work(mboxd, &mbox->rxq->work); | 199 | schedule_work(&mbox->rxq->work); |
201 | } | 200 | } |
202 | 201 | ||
203 | static irqreturn_t mbox_interrupt(int irq, void *p) | 202 | static irqreturn_t mbox_interrupt(int irq, void *p) |
@@ -307,7 +306,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox) | |||
307 | if (!--mbox->use_count) { | 306 | if (!--mbox->use_count) { |
308 | free_irq(mbox->irq, mbox); | 307 | free_irq(mbox->irq, mbox); |
309 | tasklet_kill(&mbox->txq->tasklet); | 308 | tasklet_kill(&mbox->txq->tasklet); |
310 | flush_work(&mbox->rxq->work); | 309 | flush_work_sync(&mbox->rxq->work); |
311 | mbox_queue_free(mbox->txq); | 310 | mbox_queue_free(mbox->txq); |
312 | mbox_queue_free(mbox->rxq); | 311 | mbox_queue_free(mbox->rxq); |
313 | } | 312 | } |
@@ -409,10 +408,6 @@ static int __init omap_mbox_init(void) | |||
409 | if (err) | 408 | if (err) |
410 | return err; | 409 | return err; |
411 | 410 | ||
412 | mboxd = create_workqueue("mboxd"); | ||
413 | if (!mboxd) | ||
414 | return -ENOMEM; | ||
415 | |||
416 | /* kfifo size sanity check: alignment and minimal size */ | 411 | /* kfifo size sanity check: alignment and minimal size */ |
417 | mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); | 412 | mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); |
418 | mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, | 413 | mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, |
@@ -424,7 +419,6 @@ subsys_initcall(omap_mbox_init); | |||
424 | 419 | ||
425 | static void __exit omap_mbox_exit(void) | 420 | static void __exit omap_mbox_exit(void) |
426 | { | 421 | { |
427 | destroy_workqueue(mboxd); | ||
428 | class_unregister(&omap_mbox_class); | 422 | class_unregister(&omap_mbox_class); |
429 | } | 423 | } |
430 | module_exit(omap_mbox_exit); | 424 | module_exit(omap_mbox_exit); |
diff --git a/arch/arm/plat-s3c24xx/include/plat/udc.h b/arch/arm/plat-s3c24xx/include/plat/udc.h index 546bb4008f49..80457c6414aa 100644 --- a/arch/arm/plat-s3c24xx/include/plat/udc.h +++ b/arch/arm/plat-s3c24xx/include/plat/udc.h | |||
@@ -27,6 +27,10 @@ enum s3c2410_udc_cmd_e { | |||
27 | struct s3c2410_udc_mach_info { | 27 | struct s3c2410_udc_mach_info { |
28 | void (*udc_command)(enum s3c2410_udc_cmd_e); | 28 | void (*udc_command)(enum s3c2410_udc_cmd_e); |
29 | void (*vbus_draw)(unsigned int ma); | 29 | void (*vbus_draw)(unsigned int ma); |
30 | |||
31 | unsigned int pullup_pin; | ||
32 | unsigned int pullup_pin_inverted; | ||
33 | |||
30 | unsigned int vbus_pin; | 34 | unsigned int vbus_pin; |
31 | unsigned char vbus_pin_inverted; | 35 | unsigned char vbus_pin_inverted; |
32 | }; | 36 | }; |
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index c9113619029f..8d73724c0092 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c | |||
@@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void) | |||
114 | 114 | ||
115 | /* | 115 | /* |
116 | * timer_interrupt() needs to keep up the real-time clock, | 116 | * timer_interrupt() needs to keep up the real-time clock, |
117 | * as well as call the "do_timer()" routine every clocktick | 117 | * as well as call the "xtime_update()" routine every clocktick |
118 | */ | 118 | */ |
119 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 | 119 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
120 | __attribute__((l1_text)) | 120 | __attribute__((l1_text)) |
121 | #endif | 121 | #endif |
122 | irqreturn_t timer_interrupt(int irq, void *dummy) | 122 | irqreturn_t timer_interrupt(int irq, void *dummy) |
123 | { | 123 | { |
124 | write_seqlock(&xtime_lock); | 124 | xtime_update(1); |
125 | do_timer(1); | ||
126 | write_sequnlock(&xtime_lock); | ||
127 | 125 | ||
128 | #ifdef CONFIG_IPIPE | 126 | #ifdef CONFIG_IPIPE |
129 | update_root_process_times(get_irq_regs()); | 127 | update_root_process_times(get_irq_regs()); |
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 4122678529c0..c40d07f708e8 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -136,7 +136,7 @@ SECTIONS | |||
136 | 136 | ||
137 | . = ALIGN(16); | 137 | . = ALIGN(16); |
138 | INIT_DATA_SECTION(16) | 138 | INIT_DATA_SECTION(16) |
139 | PERCPU(4) | 139 | PERCPU(32, 4) |
140 | 140 | ||
141 | .exit.data : | 141 | .exit.data : |
142 | { | 142 | { |
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c index 00eb36f8debf..20c85b5dc7d0 100644 --- a/arch/cris/arch-v10/kernel/time.c +++ b/arch/cris/arch-v10/kernel/time.c | |||
@@ -140,7 +140,7 @@ stop_watchdog(void) | |||
140 | 140 | ||
141 | /* | 141 | /* |
142 | * timer_interrupt() needs to keep up the real-time clock, | 142 | * timer_interrupt() needs to keep up the real-time clock, |
143 | * as well as call the "do_timer()" routine every clocktick | 143 | * as well as call the "xtime_update()" routine every clocktick |
144 | */ | 144 | */ |
145 | 145 | ||
146 | //static unsigned short myjiff; /* used by our debug routine print_timestamp */ | 146 | //static unsigned short myjiff; /* used by our debug routine print_timestamp */ |
@@ -176,7 +176,7 @@ timer_interrupt(int irq, void *dev_id) | |||
176 | 176 | ||
177 | /* call the real timer interrupt handler */ | 177 | /* call the real timer interrupt handler */ |
178 | 178 | ||
179 | do_timer(1); | 179 | xtime_update(1); |
180 | 180 | ||
181 | cris_do_profile(regs); /* Save profiling information */ | 181 | cris_do_profile(regs); /* Save profiling information */ |
182 | return IRQ_HANDLED; | 182 | return IRQ_HANDLED; |
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 84fed3b4b079..4c9e3e1ba5d1 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -26,7 +26,9 @@ | |||
26 | #define FLUSH_ALL (void*)0xffffffff | 26 | #define FLUSH_ALL (void*)0xffffffff |
27 | 27 | ||
28 | /* Vector of locks used for various atomic operations */ | 28 | /* Vector of locks used for various atomic operations */ |
29 | spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; | 29 | spinlock_t cris_atomic_locks[] = { |
30 | [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks) | ||
31 | }; | ||
30 | 32 | ||
31 | /* CPU masks */ | 33 | /* CPU masks */ |
32 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 34 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index a545211e999d..bb978ede8985 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c | |||
@@ -183,7 +183,7 @@ void handle_watchdog_bite(struct pt_regs *regs) | |||
183 | 183 | ||
184 | /* | 184 | /* |
185 | * timer_interrupt() needs to keep up the real-time clock, | 185 | * timer_interrupt() needs to keep up the real-time clock, |
186 | * as well as call the "do_timer()" routine every clocktick. | 186 | * as well as call the "xtime_update()" routine every clocktick. |
187 | */ | 187 | */ |
188 | extern void cris_do_profile(struct pt_regs *regs); | 188 | extern void cris_do_profile(struct pt_regs *regs); |
189 | 189 | ||
@@ -216,9 +216,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
216 | return IRQ_HANDLED; | 216 | return IRQ_HANDLED; |
217 | 217 | ||
218 | /* Call the real timer interrupt handler */ | 218 | /* Call the real timer interrupt handler */ |
219 | write_seqlock(&xtime_lock); | 219 | xtime_update(1); |
220 | do_timer(1); | ||
221 | write_sequnlock(&xtime_lock); | ||
222 | return IRQ_HANDLED; | 220 | return IRQ_HANDLED; |
223 | } | 221 | } |
224 | 222 | ||
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index c49be845f96a..728bbd9e7d4c 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S | |||
@@ -102,7 +102,7 @@ SECTIONS | |||
102 | #endif | 102 | #endif |
103 | __vmlinux_end = .; /* Last address of the physical file. */ | 103 | __vmlinux_end = .; /* Last address of the physical file. */ |
104 | #ifdef CONFIG_ETRAX_ARCH_V32 | 104 | #ifdef CONFIG_ETRAX_ARCH_V32 |
105 | PERCPU(PAGE_SIZE) | 105 | PERCPU(32, PAGE_SIZE) |
106 | 106 | ||
107 | .init.ramfs : { | 107 | .init.ramfs : { |
108 | INIT_RAM_FS | 108 | INIT_RAM_FS |
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h index 08b3d1da3583..4bea27f50a7a 100644 --- a/arch/frv/include/asm/futex.h +++ b/arch/frv/include/asm/futex.h | |||
@@ -7,10 +7,11 @@ | |||
7 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
8 | #include <asm/uaccess.h> | 8 | #include <asm/uaccess.h> |
9 | 9 | ||
10 | extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); | 10 | extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); |
11 | 11 | ||
12 | static inline int | 12 | static inline int |
13 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 13 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
14 | u32 oldval, u32 newval) | ||
14 | { | 15 | { |
15 | return -ENOSYS; | 16 | return -ENOSYS; |
16 | } | 17 | } |
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c index 14f64b054c7e..d155ca9e5098 100644 --- a/arch/frv/kernel/futex.c +++ b/arch/frv/kernel/futex.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * the various futex operations; MMU fault checking is ignored under no-MMU | 18 | * the various futex operations; MMU fault checking is ignored under no-MMU |
19 | * conditions | 19 | * conditions |
20 | */ | 20 | */ |
21 | static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) | 21 | static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) |
22 | { | 22 | { |
23 | int oldval, ret; | 23 | int oldval, ret; |
24 | 24 | ||
@@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o | |||
50 | return ret; | 50 | return ret; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) | 53 | static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) |
54 | { | 54 | { |
55 | int oldval, ret; | 55 | int oldval, ret; |
56 | 56 | ||
@@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o | |||
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) | 86 | static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) |
87 | { | 87 | { |
88 | int oldval, ret; | 88 | int oldval, ret; |
89 | 89 | ||
@@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol | |||
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
118 | 118 | ||
119 | static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) | 119 | static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) |
120 | { | 120 | { |
121 | int oldval, ret; | 121 | int oldval, ret; |
122 | 122 | ||
@@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o | |||
149 | return ret; | 149 | return ret; |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) | 152 | static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) |
153 | { | 153 | { |
154 | int oldval, ret; | 154 | int oldval, ret; |
155 | 155 | ||
@@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o | |||
186 | /* | 186 | /* |
187 | * do the futex operations | 187 | * do the futex operations |
188 | */ | 188 | */ |
189 | int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 189 | int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
190 | { | 190 | { |
191 | int op = (encoded_op >> 28) & 7; | 191 | int op = (encoded_op >> 28) & 7; |
192 | int cmp = (encoded_op >> 24) & 15; | 192 | int cmp = (encoded_op >> 24) & 15; |
@@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
197 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 197 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
198 | oparg = 1 << oparg; | 198 | oparg = 1 << oparg; |
199 | 199 | ||
200 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 200 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
201 | return -EFAULT; | 201 | return -EFAULT; |
202 | 202 | ||
203 | pagefault_disable(); | 203 | pagefault_disable(); |
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index 0ddbbae83cb2..b457de496b70 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c | |||
@@ -50,21 +50,13 @@ static struct irqaction timer_irq = { | |||
50 | 50 | ||
51 | /* | 51 | /* |
52 | * timer_interrupt() needs to keep up the real-time clock, | 52 | * timer_interrupt() needs to keep up the real-time clock, |
53 | * as well as call the "do_timer()" routine every clocktick | 53 | * as well as call the "xtime_update()" routine every clocktick |
54 | */ | 54 | */ |
55 | static irqreturn_t timer_interrupt(int irq, void *dummy) | 55 | static irqreturn_t timer_interrupt(int irq, void *dummy) |
56 | { | 56 | { |
57 | profile_tick(CPU_PROFILING); | 57 | profile_tick(CPU_PROFILING); |
58 | /* | ||
59 | * Here we are in the timer irq handler. We just have irqs locally | ||
60 | * disabled but we don't know if the timer_bh is running on the other | ||
61 | * CPU. We need to avoid to SMP race with it. NOTE: we don't need | ||
62 | * the irq version of write_lock because as just said we have irq | ||
63 | * locally disabled. -arca | ||
64 | */ | ||
65 | write_seqlock(&xtime_lock); | ||
66 | 58 | ||
67 | do_timer(1); | 59 | xtime_update(1); |
68 | 60 | ||
69 | #ifdef CONFIG_HEARTBEAT | 61 | #ifdef CONFIG_HEARTBEAT |
70 | static unsigned short n; | 62 | static unsigned short n; |
@@ -72,8 +64,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy) | |||
72 | __set_LEDS(n); | 64 | __set_LEDS(n); |
73 | #endif /* CONFIG_HEARTBEAT */ | 65 | #endif /* CONFIG_HEARTBEAT */ |
74 | 66 | ||
75 | write_sequnlock(&xtime_lock); | ||
76 | |||
77 | update_process_times(user_mode(get_irq_regs())); | 67 | update_process_times(user_mode(get_irq_regs())); |
78 | 68 | ||
79 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index 8b973f3cc90e..0daae8af5787 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S | |||
@@ -37,7 +37,7 @@ SECTIONS | |||
37 | _einittext = .; | 37 | _einittext = .; |
38 | 38 | ||
39 | INIT_DATA_SECTION(8) | 39 | INIT_DATA_SECTION(8) |
40 | PERCPU(4096) | 40 | PERCPU(L1_CACHE_BYTES, 4096) |
41 | 41 | ||
42 | . = ALIGN(PAGE_SIZE); | 42 | . = ALIGN(PAGE_SIZE); |
43 | __init_end = .; | 43 | __init_end = .; |
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c index 165005aff9df..32263a138aa6 100644 --- a/arch/h8300/kernel/time.c +++ b/arch/h8300/kernel/time.c | |||
@@ -35,9 +35,7 @@ void h8300_timer_tick(void) | |||
35 | { | 35 | { |
36 | if (current->pid) | 36 | if (current->pid) |
37 | profile_tick(CPU_PROFILING); | 37 | profile_tick(CPU_PROFILING); |
38 | write_seqlock(&xtime_lock); | 38 | xtime_update(1); |
39 | do_timer(1); | ||
40 | write_sequnlock(&xtime_lock); | ||
41 | update_process_times(user_mode(get_irq_regs())); | 39 | update_process_times(user_mode(get_irq_regs())); |
42 | } | 40 | } |
43 | 41 | ||
diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c index 3946c0fa8374..7a1533fad47d 100644 --- a/arch/h8300/kernel/timer/timer8.c +++ b/arch/h8300/kernel/timer/timer8.c | |||
@@ -61,7 +61,7 @@ | |||
61 | 61 | ||
62 | /* | 62 | /* |
63 | * timer_interrupt() needs to keep up the real-time clock, | 63 | * timer_interrupt() needs to keep up the real-time clock, |
64 | * as well as call the "do_timer()" routine every clocktick | 64 | * as well as call the "xtime_update()" routine every clocktick |
65 | */ | 65 | */ |
66 | 66 | ||
67 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | 67 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 3ded8fe62759..1d7bca0a396d 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -233,3 +233,4 @@ CONFIG_CRYPTO_PCBC=m | |||
233 | CONFIG_CRYPTO_MD5=y | 233 | CONFIG_CRYPTO_MD5=y |
234 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 234 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
235 | CONFIG_CRC_T10DIF=y | 235 | CONFIG_CRC_T10DIF=y |
236 | CONFIG_MISC_DEVICES=y | ||
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 3a98b2dd58ac..b11fa880e4b6 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig | |||
@@ -208,3 +208,4 @@ CONFIG_MAGIC_SYSRQ=y | |||
208 | CONFIG_DEBUG_KERNEL=y | 208 | CONFIG_DEBUG_KERNEL=y |
209 | CONFIG_DEBUG_MUTEXES=y | 209 | CONFIG_DEBUG_MUTEXES=y |
210 | CONFIG_CRYPTO_MD5=y | 210 | CONFIG_CRYPTO_MD5=y |
211 | CONFIG_MISC_DEVICES=y | ||
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 13633da0d3de..bff0824cf8a4 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -390,8 +390,7 @@ static void rs_unthrottle(struct tty_struct * tty) | |||
390 | } | 390 | } |
391 | 391 | ||
392 | 392 | ||
393 | static int rs_ioctl(struct tty_struct *tty, struct file * file, | 393 | static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) |
394 | unsigned int cmd, unsigned long arg) | ||
395 | { | 394 | { |
396 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && | 395 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && |
397 | (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && | 396 | (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index a2e7368a0150..4336d080b241 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -12,6 +12,8 @@ | |||
12 | 12 | ||
13 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK | 13 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
14 | 14 | ||
15 | #define DMA_ERROR_CODE 0 | ||
16 | |||
15 | extern struct dma_map_ops *dma_ops; | 17 | extern struct dma_map_ops *dma_ops; |
16 | extern struct ia64_machine_vector ia64_mv; | 18 | extern struct ia64_machine_vector ia64_mv; |
17 | extern void set_iommu_machvec(void); | 19 | extern void set_iommu_machvec(void); |
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index c7f0f062239c..8428525ddb22 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h | |||
@@ -46,7 +46,7 @@ do { \ | |||
46 | } while (0) | 46 | } while (0) |
47 | 47 | ||
48 | static inline int | 48 | static inline int |
49 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 49 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
50 | { | 50 | { |
51 | int op = (encoded_op >> 28) & 7; | 51 | int op = (encoded_op >> 28) & 7; |
52 | int cmp = (encoded_op >> 24) & 15; | 52 | int cmp = (encoded_op >> 24) & 15; |
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
56 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 56 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
57 | oparg = 1 << oparg; | 57 | oparg = 1 << oparg; |
58 | 58 | ||
59 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 59 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
60 | return -EFAULT; | 60 | return -EFAULT; |
61 | 61 | ||
62 | pagefault_disable(); | 62 | pagefault_disable(); |
@@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | static inline int | 102 | static inline int |
103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 103 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
104 | u32 oldval, u32 newval) | ||
104 | { | 105 | { |
105 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 106 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
106 | return -EFAULT; | 107 | return -EFAULT; |
107 | 108 | ||
108 | { | 109 | { |
109 | register unsigned long r8 __asm ("r8"); | 110 | register unsigned long r8 __asm ("r8") = 0; |
111 | unsigned long prev; | ||
110 | __asm__ __volatile__( | 112 | __asm__ __volatile__( |
111 | " mf;; \n" | 113 | " mf;; \n" |
112 | " mov ar.ccv=%3;; \n" | 114 | " mov ar.ccv=%3;; \n" |
113 | "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" | 115 | "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" |
114 | " .xdata4 \"__ex_table\", 1b-., 2f-. \n" | 116 | " .xdata4 \"__ex_table\", 1b-., 2f-. \n" |
115 | "[2:]" | 117 | "[2:]" |
116 | : "=r" (r8) | 118 | : "=r" (prev) |
117 | : "r" (uaddr), "r" (newval), | 119 | : "r" (uaddr), "r" (newval), |
118 | "rO" ((long) (unsigned) oldval) | 120 | "rO" ((long) (unsigned) oldval) |
119 | : "memory"); | 121 | : "memory"); |
122 | *uval = prev; | ||
120 | return r8; | 123 | return r8; |
121 | } | 124 | } |
122 | } | 125 | } |
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 215d5454c7d3..3027e7516d85 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h | |||
@@ -25,20 +25,8 @@ | |||
25 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." | 25 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | |||
31 | #include <asm/intrinsics.h> | 28 | #include <asm/intrinsics.h> |
32 | 29 | ||
33 | /* | ||
34 | * the semaphore definition | ||
35 | */ | ||
36 | struct rw_semaphore { | ||
37 | signed long count; | ||
38 | spinlock_t wait_lock; | ||
39 | struct list_head wait_list; | ||
40 | }; | ||
41 | |||
42 | #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) | 30 | #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) |
43 | #define RWSEM_ACTIVE_BIAS (1L) | 31 | #define RWSEM_ACTIVE_BIAS (1L) |
44 | #define RWSEM_ACTIVE_MASK (0xffffffffL) | 32 | #define RWSEM_ACTIVE_MASK (0xffffffffL) |
@@ -46,26 +34,6 @@ struct rw_semaphore { | |||
46 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 34 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
47 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 35 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
48 | 36 | ||
49 | #define __RWSEM_INITIALIZER(name) \ | ||
50 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
51 | LIST_HEAD_INIT((name).wait_list) } | ||
52 | |||
53 | #define DECLARE_RWSEM(name) \ | ||
54 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
55 | |||
56 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
57 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
58 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
59 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
60 | |||
61 | static inline void | ||
62 | init_rwsem (struct rw_semaphore *sem) | ||
63 | { | ||
64 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
65 | spin_lock_init(&sem->wait_lock); | ||
66 | INIT_LIST_HEAD(&sem->wait_list); | ||
67 | } | ||
68 | |||
69 | /* | 37 | /* |
70 | * lock for reading | 38 | * lock for reading |
71 | */ | 39 | */ |
@@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem) | |||
174 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) | 142 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) |
175 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) | 143 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) |
176 | 144 | ||
177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
178 | { | ||
179 | return (sem->count != 0); | ||
180 | } | ||
181 | |||
182 | #endif /* _ASM_IA64_RWSEM_H */ | 145 | #endif /* _ASM_IA64_RWSEM_H */ |
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h index 96fc62366aa4..ed28bcd5bb85 100644 --- a/arch/ia64/include/asm/xen/hypercall.h +++ b/arch/ia64/include/asm/xen/hypercall.h | |||
@@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2, | |||
107 | static inline int | 107 | static inline int |
108 | xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) | 108 | xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) |
109 | { | 109 | { |
110 | return _hypercall2(int, sched_op_new, cmd, arg); | 110 | return _hypercall2(int, sched_op, cmd, arg); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline long | 113 | static inline long |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1753f6a30d55..80d50b83d419 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -582,6 +582,8 @@ out: | |||
582 | /* Get the CPE error record and log it */ | 582 | /* Get the CPE error record and log it */ |
583 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); | 583 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); |
584 | 584 | ||
585 | local_irq_disable(); | ||
586 | |||
585 | return IRQ_HANDLED; | 587 | return IRQ_HANDLED; |
586 | } | 588 | } |
587 | 589 | ||
@@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1859 | data = mca_bootmem(); | 1861 | data = mca_bootmem(); |
1860 | first_time = 0; | 1862 | first_time = 0; |
1861 | } else | 1863 | } else |
1862 | data = __get_free_pages(GFP_KERNEL, get_order(sz)); | 1864 | data = (void *)__get_free_pages(GFP_KERNEL, |
1865 | get_order(sz)); | ||
1863 | if (!data) | 1866 | if (!data) |
1864 | panic("Could not allocate MCA memory for cpu %d\n", | 1867 | panic("Could not allocate MCA memory for cpu %d\n", |
1865 | cpu); | 1868 | cpu); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 9702fa92489e..156ad803d5b7 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id) | |||
190 | 190 | ||
191 | new_itm += local_cpu_data->itm_delta; | 191 | new_itm += local_cpu_data->itm_delta; |
192 | 192 | ||
193 | if (smp_processor_id() == time_keeper_id) { | 193 | if (smp_processor_id() == time_keeper_id) |
194 | /* | 194 | xtime_update(1); |
195 | * Here we are in the timer irq handler. We have irqs locally | 195 | |
196 | * disabled, but we don't know if the timer_bh is running on | 196 | local_cpu_data->itm_next = new_itm; |
197 | * another CPU. We need to avoid to SMP race by acquiring the | ||
198 | * xtime_lock. | ||
199 | */ | ||
200 | write_seqlock(&xtime_lock); | ||
201 | do_timer(1); | ||
202 | local_cpu_data->itm_next = new_itm; | ||
203 | write_sequnlock(&xtime_lock); | ||
204 | } else | ||
205 | local_cpu_data->itm_next = new_itm; | ||
206 | 197 | ||
207 | if (time_after(new_itm, ia64_get_itc())) | 198 | if (time_after(new_itm, ia64_get_itc())) |
208 | break; | 199 | break; |
@@ -222,7 +213,7 @@ skip_process_time_accounting: | |||
222 | * comfort, we increase the safety margin by | 213 | * comfort, we increase the safety margin by |
223 | * intentionally dropping the next tick(s). We do NOT | 214 | * intentionally dropping the next tick(s). We do NOT |
224 | * update itm.next because that would force us to call | 215 | * update itm.next because that would force us to call |
225 | * do_timer() which in turn would let our clock run | 216 | * xtime_update() which in turn would let our clock run |
226 | * too fast (with the potentially devastating effect | 217 | * too fast (with the potentially devastating effect |
227 | * of losing monotony of time). | 218 | * of losing monotony of time). |
228 | */ | 219 | */ |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 5a4d044dcb1c..787de4a77d82 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -198,7 +198,7 @@ SECTIONS { | |||
198 | 198 | ||
199 | /* Per-cpu data: */ | 199 | /* Per-cpu data: */ |
200 | . = ALIGN(PERCPU_PAGE_SIZE); | 200 | . = ALIGN(PERCPU_PAGE_SIZE); |
201 | PERCPU_VADDR(PERCPU_ADDR, :percpu) | 201 | PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) |
202 | __phys_per_cpu_start = __per_cpu_load; | 202 | __phys_per_cpu_start = __per_cpu_load; |
203 | /* | 203 | /* |
204 | * ensure percpu data fits | 204 | * ensure percpu data fits |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index dbc4cbecb5ed..77db0b514fa4 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void) | |||
592 | /* | 592 | /* |
593 | * Don't check status. The SAL call is not supported on all PROMs | 593 | * Don't check status. The SAL call is not supported on all PROMs |
594 | * but a failure is harmless. | 594 | * but a failure is harmless. |
595 | * Architechtuallly, cpu_init is always called twice on cpu 0. We | 595 | * Architecturally, cpu_init is always called twice on cpu 0. We |
596 | * should set cpu_number on cpu 0 once. | 596 | * should set cpu_number on cpu 0 once. |
597 | */ | 597 | */ |
598 | if (cpuid == 0) { | 598 | if (cpuid == 0) { |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 4d4536e3b6f3..9c271be9919a 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
509 | * use the GART mapped mode. | 509 | * use the GART mapped mode. |
510 | */ | 510 | */ |
511 | static u64 | 511 | static u64 |
512 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) | 512 | tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) |
513 | { | 513 | { |
514 | u64 mapaddr; | 514 | u64 mapaddr; |
515 | 515 | ||
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c index fd66b048c6fa..419c8620945a 100644 --- a/arch/ia64/xen/suspend.c +++ b/arch/ia64/xen/suspend.c | |||
@@ -37,19 +37,14 @@ xen_mm_unpin_all(void) | |||
37 | /* nothing */ | 37 | /* nothing */ |
38 | } | 38 | } |
39 | 39 | ||
40 | void xen_pre_device_suspend(void) | ||
41 | { | ||
42 | /* nothing */ | ||
43 | } | ||
44 | |||
45 | void | 40 | void |
46 | xen_pre_suspend() | 41 | xen_arch_pre_suspend() |
47 | { | 42 | { |
48 | /* nothing */ | 43 | /* nothing */ |
49 | } | 44 | } |
50 | 45 | ||
51 | void | 46 | void |
52 | xen_post_suspend(int suspend_cancelled) | 47 | xen_arch_post_suspend(int suspend_cancelled) |
53 | { | 48 | { |
54 | if (suspend_cancelled) | 49 | if (suspend_cancelled) |
55 | return; | 50 | return; |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index c1c544513e8d..1f8244a78bee 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm) | |||
139 | run_posix_cpu_timers(p); | 139 | run_posix_cpu_timers(p); |
140 | delta_itm += local_cpu_data->itm_delta * (stolen + blocked); | 140 | delta_itm += local_cpu_data->itm_delta * (stolen + blocked); |
141 | 141 | ||
142 | if (cpu == time_keeper_id) { | 142 | if (cpu == time_keeper_id) |
143 | write_seqlock(&xtime_lock); | 143 | xtime_update(stolen + blocked); |
144 | do_timer(stolen + blocked); | 144 | |
145 | local_cpu_data->itm_next = delta_itm + new_itm; | 145 | local_cpu_data->itm_next = delta_itm + new_itm; |
146 | write_sequnlock(&xtime_lock); | 146 | |
147 | } else { | ||
148 | local_cpu_data->itm_next = delta_itm + new_itm; | ||
149 | } | ||
150 | per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; | 147 | per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; |
151 | per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; | 148 | per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; |
152 | } | 149 | } |
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index bda86820bffd..84dd04048db9 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c | |||
@@ -107,15 +107,14 @@ u32 arch_gettimeoffset(void) | |||
107 | 107 | ||
108 | /* | 108 | /* |
109 | * timer_interrupt() needs to keep up the real-time clock, | 109 | * timer_interrupt() needs to keep up the real-time clock, |
110 | * as well as call the "do_timer()" routine every clocktick | 110 | * as well as call the "xtime_update()" routine every clocktick |
111 | */ | 111 | */ |
112 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | 112 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
113 | { | 113 | { |
114 | #ifndef CONFIG_SMP | 114 | #ifndef CONFIG_SMP |
115 | profile_tick(CPU_PROFILING); | 115 | profile_tick(CPU_PROFILING); |
116 | #endif | 116 | #endif |
117 | /* XXX FIXME. Uh, the xtime_lock should be held here, no? */ | 117 | xtime_update(1); |
118 | do_timer(1); | ||
119 | 118 | ||
120 | #ifndef CONFIG_SMP | 119 | #ifndef CONFIG_SMP |
121 | update_process_times(user_mode(get_irq_regs())); | 120 | update_process_times(user_mode(get_irq_regs())); |
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 7da94eaa082b..c194d64cdbb9 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S | |||
@@ -53,7 +53,7 @@ SECTIONS | |||
53 | __init_begin = .; | 53 | __init_begin = .; |
54 | INIT_TEXT_SECTION(PAGE_SIZE) | 54 | INIT_TEXT_SECTION(PAGE_SIZE) |
55 | INIT_DATA_SECTION(16) | 55 | INIT_DATA_SECTION(16) |
56 | PERCPU(PAGE_SIZE) | 56 | PERCPU(32, PAGE_SIZE) |
57 | . = ALIGN(PAGE_SIZE); | 57 | . = ALIGN(PAGE_SIZE); |
58 | __init_end = .; | 58 | __init_end = .; |
59 | /* freed after init ends here */ | 59 | /* freed after init ends here */ |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index bc9271b85759..a85e251c411f 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -554,14 +554,6 @@ config MVME147_SCC | |||
554 | This is the driver for the serial ports on the Motorola MVME147 | 554 | This is the driver for the serial ports on the Motorola MVME147 |
555 | boards. Everyone using one of these boards should say Y here. | 555 | boards. Everyone using one of these boards should say Y here. |
556 | 556 | ||
557 | config SERIAL167 | ||
558 | bool "CD2401 support for MVME166/7 serial ports" | ||
559 | depends on MVME16x | ||
560 | help | ||
561 | This is the driver for the serial ports on the Motorola MVME166, | ||
562 | 167, and 172 boards. Everyone using one of these boards should say | ||
563 | Y here. | ||
564 | |||
565 | config MVME162_SCC | 557 | config MVME162_SCC |
566 | bool "SCC support for MVME162 serial ports" | 558 | bool "SCC support for MVME162 serial ports" |
567 | depends on MVME16x && BROKEN | 559 | depends on MVME16x && BROKEN |
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 9fe6fefb5e14..1edd95095cb4 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c | |||
@@ -45,8 +45,8 @@ extern int bvme6000_set_clock_mmss (unsigned long); | |||
45 | extern void bvme6000_reset (void); | 45 | extern void bvme6000_reset (void); |
46 | void bvme6000_set_vectors (void); | 46 | void bvme6000_set_vectors (void); |
47 | 47 | ||
48 | /* Save tick handler routine pointer, will point to do_timer() in | 48 | /* Save tick handler routine pointer, will point to xtime_update() in |
49 | * kernel/sched.c, called via bvme6000_process_int() */ | 49 | * kernel/timer/timekeeping.c, called via bvme6000_process_int() */ |
50 | 50 | ||
51 | static irq_handler_t tick_handler; | 51 | static irq_handler_t tick_handler; |
52 | 52 | ||
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h index 213028cbe110..c94557b91448 100644 --- a/arch/m68k/include/asm/coldfire.h +++ b/arch/m68k/include/asm/coldfire.h | |||
@@ -14,39 +14,35 @@ | |||
14 | 14 | ||
15 | 15 | ||
16 | /* | 16 | /* |
17 | * Define master clock frequency. This is essentially done at config | 17 | * Define master clock frequency. This is done at config time now. |
18 | * time now. No point enumerating dozens of possible clock options | 18 | * No point enumerating dozens of possible clock options here. And |
19 | * here. Also the peripheral clock (bus clock) divide ratio is set | 19 | * in any case new boards come along from time to time that have yet |
20 | * at config time too. | 20 | * another different clocking frequency. |
21 | */ | 21 | */ |
22 | #ifdef CONFIG_CLOCK_SET | 22 | #ifdef CONFIG_CLOCK_SET |
23 | #define MCF_CLK CONFIG_CLOCK_FREQ | 23 | #define MCF_CLK CONFIG_CLOCK_FREQ |
24 | #define MCF_BUSCLK (CONFIG_CLOCK_FREQ / CONFIG_CLOCK_DIV) | ||
25 | #else | 24 | #else |
26 | #error "Don't know what your ColdFire CPU clock frequency is??" | 25 | #error "Don't know what your ColdFire CPU clock frequency is??" |
27 | #endif | 26 | #endif |
28 | 27 | ||
29 | /* | 28 | /* |
30 | * Define the processor support peripherals base address. | 29 | * Define the processor internal peripherals base address. |
31 | * This is generally setup by the boards start up code. | 30 | * |
31 | * The majority of ColdFire parts use an MBAR register to set | ||
32 | * the base address. Some have an IPSBAR register instead, and it | ||
33 | * has slightly different rules on its size and alignment. Some | ||
34 | * parts have fixed addresses and the internal peripherals cannot | ||
35 | * be relocated in the CPU address space. | ||
36 | * | ||
37 | * The value of MBAR or IPSBAR is config time selectable, we no | ||
38 | * longer hard define it here. No MBAR or IPSBAR will be defined if | ||
39 | * this part has a fixed peripheral address map. | ||
32 | */ | 40 | */ |
33 | #define MCF_MBAR 0x10000000 | 41 | #ifdef CONFIG_MBAR |
34 | #define MCF_MBAR2 0x80000000 | 42 | #define MCF_MBAR CONFIG_MBAR |
35 | #if defined(CONFIG_M54xx) | ||
36 | #define MCF_IPSBAR MCF_MBAR | ||
37 | #elif defined(CONFIG_M520x) | ||
38 | #define MCF_IPSBAR 0xFC000000 | ||
39 | #else | ||
40 | #define MCF_IPSBAR 0x40000000 | ||
41 | #endif | 43 | #endif |
42 | 44 | #ifdef CONFIG_IPSBAR | |
43 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 45 | #define MCF_IPSBAR CONFIG_IPSBAR |
44 | defined(CONFIG_M520x) | ||
45 | #undef MCF_MBAR | ||
46 | #define MCF_MBAR MCF_IPSBAR | ||
47 | #elif defined(CONFIG_M532x) | ||
48 | #undef MCF_MBAR | ||
49 | #define MCF_MBAR 0x00000000 | ||
50 | #endif | 46 | #endif |
51 | 47 | ||
52 | /****************************************************************************/ | 48 | /****************************************************************************/ |
diff --git a/arch/m68k/include/asm/m5206sim.h b/arch/m68k/include/asm/m5206sim.h index 561b03b5ddf8..9015eadd5c00 100644 --- a/arch/m68k/include/asm/m5206sim.h +++ b/arch/m68k/include/asm/m5206sim.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #define CPU_NAME "COLDFIRE(m5206)" | 15 | #define CPU_NAME "COLDFIRE(m5206)" |
16 | #define CPU_INSTR_PER_JIFFY 3 | 16 | #define CPU_INSTR_PER_JIFFY 3 |
17 | #define MCF_BUSCLK MCF_CLK | ||
17 | 18 | ||
18 | #include <asm/m52xxacr.h> | 19 | #include <asm/m52xxacr.h> |
19 | 20 | ||
@@ -48,14 +49,14 @@ | |||
48 | #define MCFSIM_SWIVR 0x42 /* SW Watchdog intr reg (r/w) */ | 49 | #define MCFSIM_SWIVR 0x42 /* SW Watchdog intr reg (r/w) */ |
49 | #define MCFSIM_SWSR 0x43 /* SW Watchdog service (r/w) */ | 50 | #define MCFSIM_SWSR 0x43 /* SW Watchdog service (r/w) */ |
50 | 51 | ||
51 | #define MCFSIM_DCRR 0x46 /* DRAM Refresh reg (r/w) */ | 52 | #define MCFSIM_DCRR (MCF_MBAR + 0x46) /* DRAM Refresh reg (r/w) */ |
52 | #define MCFSIM_DCTR 0x4a /* DRAM Timing reg (r/w) */ | 53 | #define MCFSIM_DCTR (MCF_MBAR + 0x4a) /* DRAM Timing reg (r/w) */ |
53 | #define MCFSIM_DAR0 0x4c /* DRAM 0 Address reg(r/w) */ | 54 | #define MCFSIM_DAR0 (MCF_MBAR + 0x4c) /* DRAM 0 Address reg(r/w) */ |
54 | #define MCFSIM_DMR0 0x50 /* DRAM 0 Mask reg (r/w) */ | 55 | #define MCFSIM_DMR0 (MCF_MBAR + 0x50) /* DRAM 0 Mask reg (r/w) */ |
55 | #define MCFSIM_DCR0 0x57 /* DRAM 0 Control reg (r/w) */ | 56 | #define MCFSIM_DCR0 (MCF_MBAR + 0x57) /* DRAM 0 Control reg (r/w) */ |
56 | #define MCFSIM_DAR1 0x58 /* DRAM 1 Address reg (r/w) */ | 57 | #define MCFSIM_DAR1 (MCF_MBAR + 0x58) /* DRAM 1 Address reg (r/w) */ |
57 | #define MCFSIM_DMR1 0x5c /* DRAM 1 Mask reg (r/w) */ | 58 | #define MCFSIM_DMR1 (MCF_MBAR + 0x5c) /* DRAM 1 Mask reg (r/w) */ |
58 | #define MCFSIM_DCR1 0x63 /* DRAM 1 Control reg (r/w) */ | 59 | #define MCFSIM_DCR1 (MCF_MBAR + 0x63) /* DRAM 1 Control reg (r/w) */ |
59 | 60 | ||
60 | #define MCFSIM_CSAR0 0x64 /* CS 0 Address 0 reg (r/w) */ | 61 | #define MCFSIM_CSAR0 0x64 /* CS 0 Address 0 reg (r/w) */ |
61 | #define MCFSIM_CSMR0 0x68 /* CS 0 Mask 0 reg (r/w) */ | 62 | #define MCFSIM_CSMR0 0x68 /* CS 0 Mask 0 reg (r/w) */ |
@@ -89,9 +90,15 @@ | |||
89 | #define MCFSIM_PAR 0xcb /* Pin Assignment reg (r/w) */ | 90 | #define MCFSIM_PAR 0xcb /* Pin Assignment reg (r/w) */ |
90 | #endif | 91 | #endif |
91 | 92 | ||
93 | #define MCFTIMER_BASE1 (MCF_MBAR + 0x100) /* Base of TIMER1 */ | ||
94 | #define MCFTIMER_BASE2 (MCF_MBAR + 0x120) /* Base of TIMER2 */ | ||
95 | |||
92 | #define MCFSIM_PADDR (MCF_MBAR + 0x1c5) /* Parallel Direction (r/w) */ | 96 | #define MCFSIM_PADDR (MCF_MBAR + 0x1c5) /* Parallel Direction (r/w) */ |
93 | #define MCFSIM_PADAT (MCF_MBAR + 0x1c9) /* Parallel Port Value (r/w) */ | 97 | #define MCFSIM_PADAT (MCF_MBAR + 0x1c9) /* Parallel Port Value (r/w) */ |
94 | 98 | ||
99 | #define MCFDMA_BASE0 (MCF_MBAR + 0x200) /* Base address DMA 0 */ | ||
100 | #define MCFDMA_BASE1 (MCF_MBAR + 0x240) /* Base address DMA 1 */ | ||
101 | |||
95 | #if defined(CONFIG_NETtel) | 102 | #if defined(CONFIG_NETtel) |
96 | #define MCFUART_BASE1 0x180 /* Base address of UART1 */ | 103 | #define MCFUART_BASE1 0x180 /* Base address of UART1 */ |
97 | #define MCFUART_BASE2 0x140 /* Base address of UART2 */ | 104 | #define MCFUART_BASE2 0x140 /* Base address of UART2 */ |
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h index 88ed8239fe4e..55d5a4c5fe0b 100644 --- a/arch/m68k/include/asm/m520xsim.h +++ b/arch/m68k/include/asm/m520xsim.h | |||
@@ -13,13 +13,14 @@ | |||
13 | 13 | ||
14 | #define CPU_NAME "COLDFIRE(m520x)" | 14 | #define CPU_NAME "COLDFIRE(m520x)" |
15 | #define CPU_INSTR_PER_JIFFY 3 | 15 | #define CPU_INSTR_PER_JIFFY 3 |
16 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
16 | 17 | ||
17 | #include <asm/m52xxacr.h> | 18 | #include <asm/m52xxacr.h> |
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Define the 520x SIM register set addresses. | 21 | * Define the 520x SIM register set addresses. |
21 | */ | 22 | */ |
22 | #define MCFICM_INTC0 0x48000 /* Base for Interrupt Ctrl 0 */ | 23 | #define MCFICM_INTC0 0xFC048000 /* Base for Interrupt Ctrl 0 */ |
23 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ | 24 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ |
24 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ | 25 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ |
25 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ | 26 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ |
@@ -35,9 +36,9 @@ | |||
35 | * address to the SIMR and CIMR registers (not offsets into IPSBAR). | 36 | * address to the SIMR and CIMR registers (not offsets into IPSBAR). |
36 | * The 520x family only has a single INTC unit. | 37 | * The 520x family only has a single INTC unit. |
37 | */ | 38 | */ |
38 | #define MCFINTC0_SIMR (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_SIMR) | 39 | #define MCFINTC0_SIMR (MCFICM_INTC0 + MCFINTC_SIMR) |
39 | #define MCFINTC0_CIMR (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_CIMR) | 40 | #define MCFINTC0_CIMR (MCFICM_INTC0 + MCFINTC_CIMR) |
40 | #define MCFINTC0_ICR0 (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_ICR0) | 41 | #define MCFINTC0_ICR0 (MCFICM_INTC0 + MCFINTC_ICR0) |
41 | #define MCFINTC1_SIMR (0) | 42 | #define MCFINTC1_SIMR (0) |
42 | #define MCFINTC1_CIMR (0) | 43 | #define MCFINTC1_CIMR (0) |
43 | #define MCFINTC1_ICR0 (0) | 44 | #define MCFINTC1_ICR0 (0) |
@@ -52,19 +53,22 @@ | |||
52 | /* | 53 | /* |
53 | * SDRAM configuration registers. | 54 | * SDRAM configuration registers. |
54 | */ | 55 | */ |
55 | #define MCFSIM_SDMR 0x000a8000 /* SDRAM Mode/Extended Mode Register */ | 56 | #define MCFSIM_SDMR 0xFC0a8000 /* SDRAM Mode/Extended Mode Register */ |
56 | #define MCFSIM_SDCR 0x000a8004 /* SDRAM Control Register */ | 57 | #define MCFSIM_SDCR 0xFC0a8004 /* SDRAM Control Register */ |
57 | #define MCFSIM_SDCFG1 0x000a8008 /* SDRAM Configuration Register 1 */ | 58 | #define MCFSIM_SDCFG1 0xFC0a8008 /* SDRAM Configuration Register 1 */ |
58 | #define MCFSIM_SDCFG2 0x000a800c /* SDRAM Configuration Register 2 */ | 59 | #define MCFSIM_SDCFG2 0xFC0a800c /* SDRAM Configuration Register 2 */ |
59 | #define MCFSIM_SDCS0 0x000a8110 /* SDRAM Chip Select 0 Configuration */ | 60 | #define MCFSIM_SDCS0 0xFC0a8110 /* SDRAM Chip Select 0 Configuration */ |
60 | #define MCFSIM_SDCS1 0x000a8114 /* SDRAM Chip Select 1 Configuration */ | 61 | #define MCFSIM_SDCS1 0xFC0a8114 /* SDRAM Chip Select 1 Configuration */ |
61 | 62 | ||
62 | /* | 63 | /* |
63 | * EPORT and GPIO registers. | 64 | * EPORT and GPIO registers. |
64 | */ | 65 | */ |
66 | #define MCFEPORT_EPPAR 0xFC088000 | ||
65 | #define MCFEPORT_EPDDR 0xFC088002 | 67 | #define MCFEPORT_EPDDR 0xFC088002 |
68 | #define MCFEPORT_EPIER 0xFC088003 | ||
66 | #define MCFEPORT_EPDR 0xFC088004 | 69 | #define MCFEPORT_EPDR 0xFC088004 |
67 | #define MCFEPORT_EPPDR 0xFC088005 | 70 | #define MCFEPORT_EPPDR 0xFC088005 |
71 | #define MCFEPORT_EPFR 0xFC088006 | ||
68 | 72 | ||
69 | #define MCFGPIO_PODR_BUSCTL 0xFC0A4000 | 73 | #define MCFGPIO_PODR_BUSCTL 0xFC0A4000 |
70 | #define MCFGPIO_PODR_BE 0xFC0A4001 | 74 | #define MCFGPIO_PODR_BE 0xFC0A4001 |
@@ -119,10 +123,10 @@ | |||
119 | #define MCFGPIO_IRQ_MAX 8 | 123 | #define MCFGPIO_IRQ_MAX 8 |
120 | #define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE | 124 | #define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE |
121 | 125 | ||
122 | #define MCF_GPIO_PAR_UART (0xA4036) | 126 | #define MCF_GPIO_PAR_UART 0xFC0A4036 |
123 | #define MCF_GPIO_PAR_FECI2C (0xA4033) | 127 | #define MCF_GPIO_PAR_FECI2C 0xFC0A4033 |
124 | #define MCF_GPIO_PAR_QSPI (0xA4034) | 128 | #define MCF_GPIO_PAR_QSPI 0xFC0A4034 |
125 | #define MCF_GPIO_PAR_FEC (0xA4038) | 129 | #define MCF_GPIO_PAR_FEC 0xFC0A4038 |
126 | 130 | ||
127 | #define MCF_GPIO_PAR_UART_PAR_URXD0 (0x0001) | 131 | #define MCF_GPIO_PAR_UART_PAR_URXD0 (0x0001) |
128 | #define MCF_GPIO_PAR_UART_PAR_UTXD0 (0x0002) | 132 | #define MCF_GPIO_PAR_UART_PAR_UTXD0 (0x0002) |
@@ -134,11 +138,23 @@ | |||
134 | #define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 (0x04) | 138 | #define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 (0x04) |
135 | 139 | ||
136 | /* | 140 | /* |
141 | * PIT timer module. | ||
142 | */ | ||
143 | #define MCFPIT_BASE1 0xFC080000 /* Base address of TIMER1 */ | ||
144 | #define MCFPIT_BASE2 0xFC084000 /* Base address of TIMER2 */ | ||
145 | |||
146 | /* | ||
137 | * UART module. | 147 | * UART module. |
138 | */ | 148 | */ |
139 | #define MCFUART_BASE1 0x60000 /* Base address of UART1 */ | 149 | #define MCFUART_BASE1 0xFC060000 /* Base address of UART1 */ |
140 | #define MCFUART_BASE2 0x64000 /* Base address of UART2 */ | 150 | #define MCFUART_BASE2 0xFC064000 /* Base address of UART2 */ |
141 | #define MCFUART_BASE3 0x68000 /* Base address of UART2 */ | 151 | #define MCFUART_BASE3 0xFC068000 /* Base address of UART2 */ |
152 | |||
153 | /* | ||
154 | * FEC module. | ||
155 | */ | ||
156 | #define MCFFEC_BASE 0xFC030000 /* Base of FEC ethernet */ | ||
157 | #define MCFFEC_SIZE 0x800 /* Register set size */ | ||
142 | 158 | ||
143 | /* | 159 | /* |
144 | * Reset Controll Unit. | 160 | * Reset Controll Unit. |
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h index 4ad7a00257a8..8996df62ede4 100644 --- a/arch/m68k/include/asm/m523xsim.h +++ b/arch/m68k/include/asm/m523xsim.h | |||
@@ -13,14 +13,16 @@ | |||
13 | 13 | ||
14 | #define CPU_NAME "COLDFIRE(m523x)" | 14 | #define CPU_NAME "COLDFIRE(m523x)" |
15 | #define CPU_INSTR_PER_JIFFY 3 | 15 | #define CPU_INSTR_PER_JIFFY 3 |
16 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
16 | 17 | ||
17 | #include <asm/m52xxacr.h> | 18 | #include <asm/m52xxacr.h> |
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Define the 523x SIM register set addresses. | 21 | * Define the 523x SIM register set addresses. |
21 | */ | 22 | */ |
22 | #define MCFICM_INTC0 0x0c00 /* Base for Interrupt Ctrl 0 */ | 23 | #define MCFICM_INTC0 (MCF_IPSBAR + 0x0c00) /* Base for Interrupt Ctrl 0 */ |
23 | #define MCFICM_INTC1 0x0d00 /* Base for Interrupt Ctrl 0 */ | 24 | #define MCFICM_INTC1 (MCF_IPSBAR + 0x0d00) /* Base for Interrupt Ctrl 0 */ |
25 | |||
24 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ | 26 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ |
25 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ | 27 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ |
26 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ | 28 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ |
@@ -39,11 +41,11 @@ | |||
39 | /* | 41 | /* |
40 | * SDRAM configuration registers. | 42 | * SDRAM configuration registers. |
41 | */ | 43 | */ |
42 | #define MCFSIM_DCR 0x44 /* SDRAM control */ | 44 | #define MCFSIM_DCR (MCF_IPSBAR + 0x44) /* Control */ |
43 | #define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ | 45 | #define MCFSIM_DACR0 (MCF_IPSBAR + 0x48) /* Base address 0 */ |
44 | #define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ | 46 | #define MCFSIM_DMR0 (MCF_IPSBAR + 0x4c) /* Address mask 0 */ |
45 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ | 47 | #define MCFSIM_DACR1 (MCF_IPSBAR + 0x50) /* Base address 1 */ |
46 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ | 48 | #define MCFSIM_DMR1 (MCF_IPSBAR + 0x54) /* Address mask 1 */ |
47 | 49 | ||
48 | /* | 50 | /* |
49 | * Reset Controll Unit (relative to IPSBAR). | 51 | * Reset Controll Unit (relative to IPSBAR). |
@@ -57,10 +59,19 @@ | |||
57 | /* | 59 | /* |
58 | * UART module. | 60 | * UART module. |
59 | */ | 61 | */ |
60 | #define MCFUART_BASE1 0x200 /* Base address of UART1 */ | 62 | #define MCFUART_BASE1 (MCF_IPSBAR + 0x200) |
61 | #define MCFUART_BASE2 0x240 /* Base address of UART2 */ | 63 | #define MCFUART_BASE2 (MCF_IPSBAR + 0x240) |
62 | #define MCFUART_BASE3 0x280 /* Base address of UART3 */ | 64 | #define MCFUART_BASE3 (MCF_IPSBAR + 0x280) |
65 | |||
66 | /* | ||
67 | * FEC ethernet module. | ||
68 | */ | ||
69 | #define MCFFEC_BASE (MCF_IPSBAR + 0x1000) | ||
70 | #define MCFFEC_SIZE 0x800 | ||
63 | 71 | ||
72 | /* | ||
73 | * GPIO module. | ||
74 | */ | ||
64 | #define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000) | 75 | #define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000) |
65 | #define MCFGPIO_PODR_DATAH (MCF_IPSBAR + 0x100001) | 76 | #define MCFGPIO_PODR_DATAH (MCF_IPSBAR + 0x100001) |
66 | #define MCFGPIO_PODR_DATAL (MCF_IPSBAR + 0x100002) | 77 | #define MCFGPIO_PODR_DATAL (MCF_IPSBAR + 0x100002) |
@@ -118,12 +129,22 @@ | |||
118 | #define MCFGPIO_PCLRR_ETPU (MCF_IPSBAR + 0x10003C) | 129 | #define MCFGPIO_PCLRR_ETPU (MCF_IPSBAR + 0x10003C) |
119 | 130 | ||
120 | /* | 131 | /* |
121 | * EPort | 132 | * PIT timer base addresses. |
122 | */ | 133 | */ |
134 | #define MCFPIT_BASE1 (MCF_IPSBAR + 0x150000) | ||
135 | #define MCFPIT_BASE2 (MCF_IPSBAR + 0x160000) | ||
136 | #define MCFPIT_BASE3 (MCF_IPSBAR + 0x170000) | ||
137 | #define MCFPIT_BASE4 (MCF_IPSBAR + 0x180000) | ||
123 | 138 | ||
139 | /* | ||
140 | * EPort | ||
141 | */ | ||
142 | #define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000) | ||
124 | #define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002) | 143 | #define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002) |
144 | #define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003) | ||
125 | #define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004) | 145 | #define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004) |
126 | #define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005) | 146 | #define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005) |
147 | #define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006) | ||
127 | 148 | ||
128 | /* | 149 | /* |
129 | * Generic GPIO support | 150 | * Generic GPIO support |
@@ -143,5 +164,14 @@ | |||
143 | */ | 164 | */ |
144 | #define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A) | 165 | #define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A) |
145 | #define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C) | 166 | #define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C) |
167 | |||
168 | /* | ||
169 | * DMA unit base addresses. | ||
170 | */ | ||
171 | #define MCFDMA_BASE0 (MCF_IPSBAR + 0x100) | ||
172 | #define MCFDMA_BASE1 (MCF_IPSBAR + 0x140) | ||
173 | #define MCFDMA_BASE2 (MCF_IPSBAR + 0x180) | ||
174 | #define MCFDMA_BASE3 (MCF_IPSBAR + 0x1C0) | ||
175 | |||
146 | /****************************************************************************/ | 176 | /****************************************************************************/ |
147 | #endif /* m523xsim_h */ | 177 | #endif /* m523xsim_h */ |
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h index 4908b118f2fd..805714ca8d7d 100644 --- a/arch/m68k/include/asm/m5249sim.h +++ b/arch/m68k/include/asm/m5249sim.h | |||
@@ -13,10 +13,16 @@ | |||
13 | 13 | ||
14 | #define CPU_NAME "COLDFIRE(m5249)" | 14 | #define CPU_NAME "COLDFIRE(m5249)" |
15 | #define CPU_INSTR_PER_JIFFY 3 | 15 | #define CPU_INSTR_PER_JIFFY 3 |
16 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
16 | 17 | ||
17 | #include <asm/m52xxacr.h> | 18 | #include <asm/m52xxacr.h> |
18 | 19 | ||
19 | /* | 20 | /* |
21 | * The 5249 has a second MBAR region, define its address. | ||
22 | */ | ||
23 | #define MCF_MBAR2 0x80000000 | ||
24 | |||
25 | /* | ||
20 | * Define the 5249 SIM register set addresses. | 26 | * Define the 5249 SIM register set addresses. |
21 | */ | 27 | */ |
22 | #define MCFSIM_RSR 0x00 /* Reset Status reg (r/w) */ | 28 | #define MCFSIM_RSR 0x00 /* Reset Status reg (r/w) */ |
@@ -55,11 +61,17 @@ | |||
55 | #define MCFSIM_CSMR3 0xa8 /* CS 3 Mask reg (r/w) */ | 61 | #define MCFSIM_CSMR3 0xa8 /* CS 3 Mask reg (r/w) */ |
56 | #define MCFSIM_CSCR3 0xae /* CS 3 Control reg (r/w) */ | 62 | #define MCFSIM_CSCR3 0xae /* CS 3 Control reg (r/w) */ |
57 | 63 | ||
58 | #define MCFSIM_DCR 0x100 /* DRAM Control reg (r/w) */ | 64 | #define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */ |
59 | #define MCFSIM_DACR0 0x108 /* DRAM 0 Addr and Ctrl (r/w) */ | 65 | #define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */ |
60 | #define MCFSIM_DMR0 0x10c /* DRAM 0 Mask reg (r/w) */ | 66 | #define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */ |
61 | #define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */ | 67 | #define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */ |
62 | #define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */ | 68 | #define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */ |
69 | |||
70 | /* | ||
71 | * Timer module. | ||
72 | */ | ||
73 | #define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */ | ||
74 | #define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */ | ||
63 | 75 | ||
64 | /* | 76 | /* |
65 | * UART module. | 77 | * UART module. |
@@ -68,6 +80,14 @@ | |||
68 | #define MCFUART_BASE2 0x200 /* Base address of UART2 */ | 80 | #define MCFUART_BASE2 0x200 /* Base address of UART2 */ |
69 | 81 | ||
70 | /* | 82 | /* |
83 | * DMA unit base addresses. | ||
84 | */ | ||
85 | #define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */ | ||
86 | #define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */ | ||
87 | #define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */ | ||
88 | #define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */ | ||
89 | |||
90 | /* | ||
71 | * Some symbol defines for the above... | 91 | * Some symbol defines for the above... |
72 | */ | 92 | */ |
73 | #define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */ | 93 | #define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */ |
diff --git a/arch/m68k/include/asm/m5272sim.h b/arch/m68k/include/asm/m5272sim.h index b7cc50abc831..759c2b07a994 100644 --- a/arch/m68k/include/asm/m5272sim.h +++ b/arch/m68k/include/asm/m5272sim.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #define CPU_NAME "COLDFIRE(m5272)" | 15 | #define CPU_NAME "COLDFIRE(m5272)" |
16 | #define CPU_INSTR_PER_JIFFY 3 | 16 | #define CPU_INSTR_PER_JIFFY 3 |
17 | #define MCF_BUSCLK MCF_CLK | ||
17 | 18 | ||
18 | #include <asm/m52xxacr.h> | 19 | #include <asm/m52xxacr.h> |
19 | 20 | ||
@@ -80,6 +81,13 @@ | |||
80 | #define MCFSIM_PCDAT (MCF_MBAR + 0x96) /* Port C Data (r/w) */ | 81 | #define MCFSIM_PCDAT (MCF_MBAR + 0x96) /* Port C Data (r/w) */ |
81 | #define MCFSIM_PDCNT (MCF_MBAR + 0x98) /* Port D Control (r/w) */ | 82 | #define MCFSIM_PDCNT (MCF_MBAR + 0x98) /* Port D Control (r/w) */ |
82 | 83 | ||
84 | #define MCFDMA_BASE0 (MCF_MBAR + 0xe0) /* Base address DMA 0 */ | ||
85 | |||
86 | #define MCFTIMER_BASE1 (MCF_MBAR + 0x200) /* Base address TIMER1 */ | ||
87 | #define MCFTIMER_BASE2 (MCF_MBAR + 0x220) /* Base address TIMER2 */ | ||
88 | #define MCFTIMER_BASE3 (MCF_MBAR + 0x240) /* Base address TIMER4 */ | ||
89 | #define MCFTIMER_BASE4 (MCF_MBAR + 0x260) /* Base address TIMER3 */ | ||
90 | |||
83 | /* | 91 | /* |
84 | * Define system peripheral IRQ usage. | 92 | * Define system peripheral IRQ usage. |
85 | */ | 93 | */ |
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h index e8042e8bc003..74855a66c050 100644 --- a/arch/m68k/include/asm/m527xsim.h +++ b/arch/m68k/include/asm/m527xsim.h | |||
@@ -13,14 +13,16 @@ | |||
13 | 13 | ||
14 | #define CPU_NAME "COLDFIRE(m527x)" | 14 | #define CPU_NAME "COLDFIRE(m527x)" |
15 | #define CPU_INSTR_PER_JIFFY 3 | 15 | #define CPU_INSTR_PER_JIFFY 3 |
16 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
16 | 17 | ||
17 | #include <asm/m52xxacr.h> | 18 | #include <asm/m52xxacr.h> |
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Define the 5270/5271 SIM register set addresses. | 21 | * Define the 5270/5271 SIM register set addresses. |
21 | */ | 22 | */ |
22 | #define MCFICM_INTC0 0x0c00 /* Base for Interrupt Ctrl 0 */ | 23 | #define MCFICM_INTC0 (MCF_IPSBAR + 0x0c00) /* Base for Interrupt Ctrl 0 */ |
23 | #define MCFICM_INTC1 0x0d00 /* Base for Interrupt Ctrl 1 */ | 24 | #define MCFICM_INTC1 (MCF_IPSBAR + 0x0d00) /* Base for Interrupt Ctrl 1 */ |
25 | |||
24 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ | 26 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ |
25 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ | 27 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ |
26 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ | 28 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ |
@@ -42,29 +44,45 @@ | |||
42 | * SDRAM configuration registers. | 44 | * SDRAM configuration registers. |
43 | */ | 45 | */ |
44 | #ifdef CONFIG_M5271 | 46 | #ifdef CONFIG_M5271 |
45 | #define MCFSIM_DCR 0x40 /* SDRAM control */ | 47 | #define MCFSIM_DCR (MCF_IPSBAR + 0x40) /* Control */ |
46 | #define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ | 48 | #define MCFSIM_DACR0 (MCF_IPSBAR + 0x48) /* Base address 0 */ |
47 | #define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ | 49 | #define MCFSIM_DMR0 (MCF_IPSBAR + 0x4c) /* Address mask 0 */ |
48 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ | 50 | #define MCFSIM_DACR1 (MCF_IPSBAR + 0x50) /* Base address 1 */ |
49 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ | 51 | #define MCFSIM_DMR1 (MCF_IPSBAR + 0x54) /* Address mask 1 */ |
50 | #endif | 52 | #endif |
51 | #ifdef CONFIG_M5275 | 53 | #ifdef CONFIG_M5275 |
52 | #define MCFSIM_DMR 0x40 /* SDRAM mode */ | 54 | #define MCFSIM_DMR (MCF_IPSBAR + 0x40) /* Mode */ |
53 | #define MCFSIM_DCR 0x44 /* SDRAM control */ | 55 | #define MCFSIM_DCR (MCF_IPSBAR + 0x44) /* Control */ |
54 | #define MCFSIM_DCFG1 0x48 /* SDRAM configuration 1 */ | 56 | #define MCFSIM_DCFG1 (MCF_IPSBAR + 0x48) /* Configuration 1 */ |
55 | #define MCFSIM_DCFG2 0x4c /* SDRAM configuration 2 */ | 57 | #define MCFSIM_DCFG2 (MCF_IPSBAR + 0x4c) /* Configuration 2 */ |
56 | #define MCFSIM_DBAR0 0x50 /* SDRAM base address 0 */ | 58 | #define MCFSIM_DBAR0 (MCF_IPSBAR + 0x50) /* Base address 0 */ |
57 | #define MCFSIM_DMR0 0x54 /* SDRAM address mask 0 */ | 59 | #define MCFSIM_DMR0 (MCF_IPSBAR + 0x54) /* Address mask 0 */ |
58 | #define MCFSIM_DBAR1 0x58 /* SDRAM base address 1 */ | 60 | #define MCFSIM_DBAR1 (MCF_IPSBAR + 0x58) /* Base address 1 */ |
59 | #define MCFSIM_DMR1 0x5c /* SDRAM address mask 1 */ | 61 | #define MCFSIM_DMR1 (MCF_IPSBAR + 0x5c) /* Address mask 1 */ |
60 | #endif | 62 | #endif |
61 | 63 | ||
62 | /* | 64 | /* |
65 | * DMA unit base addresses. | ||
66 | */ | ||
67 | #define MCFDMA_BASE0 (MCF_IPSBAR + 0x100) | ||
68 | #define MCFDMA_BASE1 (MCF_IPSBAR + 0x140) | ||
69 | #define MCFDMA_BASE2 (MCF_IPSBAR + 0x180) | ||
70 | #define MCFDMA_BASE3 (MCF_IPSBAR + 0x1C0) | ||
71 | |||
72 | /* | ||
63 | * UART module. | 73 | * UART module. |
64 | */ | 74 | */ |
65 | #define MCFUART_BASE1 0x200 /* Base address of UART1 */ | 75 | #define MCFUART_BASE1 (MCF_IPSBAR + 0x200) |
66 | #define MCFUART_BASE2 0x240 /* Base address of UART2 */ | 76 | #define MCFUART_BASE2 (MCF_IPSBAR + 0x240) |
67 | #define MCFUART_BASE3 0x280 /* Base address of UART3 */ | 77 | #define MCFUART_BASE3 (MCF_IPSBAR + 0x280) |
78 | |||
79 | /* | ||
80 | * FEC ethernet module. | ||
81 | */ | ||
82 | #define MCFFEC_BASE0 (MCF_IPSBAR + 0x1000) | ||
83 | #define MCFFEC_SIZE0 0x800 | ||
84 | #define MCFFEC_BASE1 (MCF_IPSBAR + 0x1800) | ||
85 | #define MCFFEC_SIZE1 0x800 | ||
68 | 86 | ||
69 | #ifdef CONFIG_M5271 | 87 | #ifdef CONFIG_M5271 |
70 | #define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000) | 88 | #define MCFGPIO_PODR_ADDR (MCF_IPSBAR + 0x100000) |
@@ -231,14 +249,22 @@ | |||
231 | #endif | 249 | #endif |
232 | 250 | ||
233 | /* | 251 | /* |
234 | * EPort | 252 | * PIT timer base addresses. |
235 | */ | 253 | */ |
254 | #define MCFPIT_BASE1 (MCF_IPSBAR + 0x150000) | ||
255 | #define MCFPIT_BASE2 (MCF_IPSBAR + 0x160000) | ||
256 | #define MCFPIT_BASE3 (MCF_IPSBAR + 0x170000) | ||
257 | #define MCFPIT_BASE4 (MCF_IPSBAR + 0x180000) | ||
236 | 258 | ||
259 | /* | ||
260 | * EPort | ||
261 | */ | ||
262 | #define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000) | ||
237 | #define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002) | 263 | #define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002) |
264 | #define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003) | ||
238 | #define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004) | 265 | #define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004) |
239 | #define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005) | 266 | #define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005) |
240 | 267 | #define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006) | |
241 | |||
242 | 268 | ||
243 | /* | 269 | /* |
244 | * GPIO pins setups to enable the UARTs. | 270 | * GPIO pins setups to enable the UARTs. |
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h index a6d2f4d9aaa0..d798bd5df56c 100644 --- a/arch/m68k/include/asm/m528xsim.h +++ b/arch/m68k/include/asm/m528xsim.h | |||
@@ -13,14 +13,16 @@ | |||
13 | 13 | ||
14 | #define CPU_NAME "COLDFIRE(m528x)" | 14 | #define CPU_NAME "COLDFIRE(m528x)" |
15 | #define CPU_INSTR_PER_JIFFY 3 | 15 | #define CPU_INSTR_PER_JIFFY 3 |
16 | #define MCF_BUSCLK MCF_CLK | ||
16 | 17 | ||
17 | #include <asm/m52xxacr.h> | 18 | #include <asm/m52xxacr.h> |
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Define the 5280/5282 SIM register set addresses. | 21 | * Define the 5280/5282 SIM register set addresses. |
21 | */ | 22 | */ |
22 | #define MCFICM_INTC0 0x0c00 /* Base for Interrupt Ctrl 0 */ | 23 | #define MCFICM_INTC0 (MCF_IPSBAR + 0x0c00) /* Base for Interrupt Ctrl 0 */ |
23 | #define MCFICM_INTC1 0x0d00 /* Base for Interrupt Ctrl 0 */ | 24 | #define MCFICM_INTC1 (MCF_IPSBAR + 0x0d00) /* Base for Interrupt Ctrl 0 */ |
25 | |||
24 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ | 26 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ |
25 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ | 27 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ |
26 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ | 28 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ |
@@ -39,18 +41,32 @@ | |||
39 | /* | 41 | /* |
40 | * SDRAM configuration registers. | 42 | * SDRAM configuration registers. |
41 | */ | 43 | */ |
42 | #define MCFSIM_DCR 0x44 /* SDRAM control */ | 44 | #define MCFSIM_DCR (MCF_IPSBAR + 0x00000044) /* Control */ |
43 | #define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ | 45 | #define MCFSIM_DACR0 (MCF_IPSBAR + 0x00000048) /* Base address 0 */ |
44 | #define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ | 46 | #define MCFSIM_DMR0 (MCF_IPSBAR + 0x0000004c) /* Address mask 0 */ |
45 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ | 47 | #define MCFSIM_DACR1 (MCF_IPSBAR + 0x00000050) /* Base address 1 */ |
46 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ | 48 | #define MCFSIM_DMR1 (MCF_IPSBAR + 0x00000054) /* Address mask 1 */ |
49 | |||
50 | /* | ||
51 | * DMA unit base addresses. | ||
52 | */ | ||
53 | #define MCFDMA_BASE0 (MCF_IPSBAR + 0x00000100) | ||
54 | #define MCFDMA_BASE1 (MCF_IPSBAR + 0x00000140) | ||
55 | #define MCFDMA_BASE2 (MCF_IPSBAR + 0x00000180) | ||
56 | #define MCFDMA_BASE3 (MCF_IPSBAR + 0x000001C0) | ||
47 | 57 | ||
48 | /* | 58 | /* |
49 | * UART module. | 59 | * UART module. |
50 | */ | 60 | */ |
51 | #define MCFUART_BASE1 0x200 /* Base address of UART1 */ | 61 | #define MCFUART_BASE1 (MCF_IPSBAR + 0x00000200) |
52 | #define MCFUART_BASE2 0x240 /* Base address of UART2 */ | 62 | #define MCFUART_BASE2 (MCF_IPSBAR + 0x00000240) |
53 | #define MCFUART_BASE3 0x280 /* Base address of UART3 */ | 63 | #define MCFUART_BASE3 (MCF_IPSBAR + 0x00000280) |
64 | |||
65 | /* | ||
66 | * FEC ethernet module. | ||
67 | */ | ||
68 | #define MCFFEC_BASE (MCF_IPSBAR + 0x00001000) | ||
69 | #define MCFFEC_SIZE 0x800 | ||
54 | 70 | ||
55 | /* | 71 | /* |
56 | * GPIO registers | 72 | * GPIO registers |
@@ -163,6 +179,14 @@ | |||
163 | #define MCFGPIO_PUAPAR (MCF_IPSBAR + 0x0010005C) | 179 | #define MCFGPIO_PUAPAR (MCF_IPSBAR + 0x0010005C) |
164 | 180 | ||
165 | /* | 181 | /* |
182 | * PIT timer base addresses. | ||
183 | */ | ||
184 | #define MCFPIT_BASE1 (MCF_IPSBAR + 0x00150000) | ||
185 | #define MCFPIT_BASE2 (MCF_IPSBAR + 0x00160000) | ||
186 | #define MCFPIT_BASE3 (MCF_IPSBAR + 0x00170000) | ||
187 | #define MCFPIT_BASE4 (MCF_IPSBAR + 0x00180000) | ||
188 | |||
189 | /* | ||
166 | * Edge Port registers | 190 | * Edge Port registers |
167 | */ | 191 | */ |
168 | #define MCFEPORT_EPPAR (MCF_IPSBAR + 0x00130000) | 192 | #define MCFEPORT_EPPAR (MCF_IPSBAR + 0x00130000) |
diff --git a/arch/m68k/include/asm/m5307sim.h b/arch/m68k/include/asm/m5307sim.h index 0bf57397e7a9..4c94c01f36c4 100644 --- a/arch/m68k/include/asm/m5307sim.h +++ b/arch/m68k/include/asm/m5307sim.h | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #define CPU_NAME "COLDFIRE(m5307)" | 17 | #define CPU_NAME "COLDFIRE(m5307)" |
18 | #define CPU_INSTR_PER_JIFFY 3 | 18 | #define CPU_INSTR_PER_JIFFY 3 |
19 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
19 | 20 | ||
20 | #include <asm/m53xxacr.h> | 21 | #include <asm/m53xxacr.h> |
21 | 22 | ||
@@ -89,16 +90,30 @@ | |||
89 | #define MCFSIM_CSCR7 0xde /* CS 7 Control reg (r/w) */ | 90 | #define MCFSIM_CSCR7 0xde /* CS 7 Control reg (r/w) */ |
90 | #endif /* CONFIG_OLDMASK */ | 91 | #endif /* CONFIG_OLDMASK */ |
91 | 92 | ||
92 | #define MCFSIM_DCR 0x100 /* DRAM Control reg (r/w) */ | 93 | #define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */ |
93 | #define MCFSIM_DACR0 0x108 /* DRAM 0 Addr and Ctrl (r/w) */ | 94 | #define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM Addr/Ctrl 0 */ |
94 | #define MCFSIM_DMR0 0x10c /* DRAM 0 Mask reg (r/w) */ | 95 | #define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM Mask 0 */ |
95 | #define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */ | 96 | #define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM Addr/Ctrl 1 */ |
96 | #define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */ | 97 | #define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM Mask 1 */ |
98 | |||
99 | /* | ||
100 | * Timer module. | ||
101 | */ | ||
102 | #define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */ | ||
103 | #define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */ | ||
97 | 104 | ||
98 | #define MCFSIM_PADDR (MCF_MBAR + 0x244) | 105 | #define MCFSIM_PADDR (MCF_MBAR + 0x244) |
99 | #define MCFSIM_PADAT (MCF_MBAR + 0x248) | 106 | #define MCFSIM_PADAT (MCF_MBAR + 0x248) |
100 | 107 | ||
101 | /* | 108 | /* |
109 | * DMA unit base addresses. | ||
110 | */ | ||
111 | #define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */ | ||
112 | #define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */ | ||
113 | #define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */ | ||
114 | #define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */ | ||
115 | |||
116 | /* | ||
102 | * UART module. | 117 | * UART module. |
103 | */ | 118 | */ |
104 | #if defined(CONFIG_NETtel) || defined(CONFIG_SECUREEDGEMP3) | 119 | #if defined(CONFIG_NETtel) || defined(CONFIG_SECUREEDGEMP3) |
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h index e6470f8ca324..ba4cc784f574 100644 --- a/arch/m68k/include/asm/m532xsim.h +++ b/arch/m68k/include/asm/m532xsim.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #define CPU_NAME "COLDFIRE(m532x)" | 12 | #define CPU_NAME "COLDFIRE(m532x)" |
13 | #define CPU_INSTR_PER_JIFFY 3 | 13 | #define CPU_INSTR_PER_JIFFY 3 |
14 | #define MCF_BUSCLK (MCF_CLK / 3) | ||
14 | 15 | ||
15 | #include <asm/m53xxacr.h> | 16 | #include <asm/m53xxacr.h> |
16 | 17 | ||
@@ -85,6 +86,14 @@ | |||
85 | #define MCFUART_BASE2 0xFC064000 /* Base address of UART2 */ | 86 | #define MCFUART_BASE2 0xFC064000 /* Base address of UART2 */ |
86 | #define MCFUART_BASE3 0xFC068000 /* Base address of UART3 */ | 87 | #define MCFUART_BASE3 0xFC068000 /* Base address of UART3 */ |
87 | 88 | ||
89 | /* | ||
90 | * Timer module. | ||
91 | */ | ||
92 | #define MCFTIMER_BASE1 0xFC070000 /* Base address of TIMER1 */ | ||
93 | #define MCFTIMER_BASE2 0xFC074000 /* Base address of TIMER2 */ | ||
94 | #define MCFTIMER_BASE3 0xFC078000 /* Base address of TIMER3 */ | ||
95 | #define MCFTIMER_BASE4 0xFC07C000 /* Base address of TIMER4 */ | ||
96 | |||
88 | /********************************************************************* | 97 | /********************************************************************* |
89 | * | 98 | * |
90 | * Reset Controller Module | 99 | * Reset Controller Module |
diff --git a/arch/m68k/include/asm/m5407sim.h b/arch/m68k/include/asm/m5407sim.h index 75f5c28a551d..762c58c89050 100644 --- a/arch/m68k/include/asm/m5407sim.h +++ b/arch/m68k/include/asm/m5407sim.h | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #define CPU_NAME "COLDFIRE(m5407)" | 17 | #define CPU_NAME "COLDFIRE(m5407)" |
18 | #define CPU_INSTR_PER_JIFFY 3 | 18 | #define CPU_INSTR_PER_JIFFY 3 |
19 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
19 | 20 | ||
20 | #include <asm/m54xxacr.h> | 21 | #include <asm/m54xxacr.h> |
21 | 22 | ||
@@ -72,11 +73,17 @@ | |||
72 | #define MCFSIM_CSMR7 0xd8 /* CS 7 Mask reg (r/w) */ | 73 | #define MCFSIM_CSMR7 0xd8 /* CS 7 Mask reg (r/w) */ |
73 | #define MCFSIM_CSCR7 0xde /* CS 7 Control reg (r/w) */ | 74 | #define MCFSIM_CSCR7 0xde /* CS 7 Control reg (r/w) */ |
74 | 75 | ||
75 | #define MCFSIM_DCR 0x100 /* DRAM Control reg (r/w) */ | 76 | #define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */ |
76 | #define MCFSIM_DACR0 0x108 /* DRAM 0 Addr and Ctrl (r/w) */ | 77 | #define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */ |
77 | #define MCFSIM_DMR0 0x10c /* DRAM 0 Mask reg (r/w) */ | 78 | #define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */ |
78 | #define MCFSIM_DACR1 0x110 /* DRAM 1 Addr and Ctrl (r/w) */ | 79 | #define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */ |
79 | #define MCFSIM_DMR1 0x114 /* DRAM 1 Mask reg (r/w) */ | 80 | #define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */ |
81 | |||
82 | /* | ||
83 | * Timer module. | ||
84 | */ | ||
85 | #define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */ | ||
86 | #define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */ | ||
80 | 87 | ||
81 | #define MCFUART_BASE1 0x1c0 /* Base address of UART1 */ | 88 | #define MCFUART_BASE1 0x1c0 /* Base address of UART1 */ |
82 | #define MCFUART_BASE2 0x200 /* Base address of UART2 */ | 89 | #define MCFUART_BASE2 0x200 /* Base address of UART2 */ |
@@ -85,6 +92,14 @@ | |||
85 | #define MCFSIM_PADAT (MCF_MBAR + 0x248) | 92 | #define MCFSIM_PADAT (MCF_MBAR + 0x248) |
86 | 93 | ||
87 | /* | 94 | /* |
95 | * DMA unit base addresses. | ||
96 | */ | ||
97 | #define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */ | ||
98 | #define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */ | ||
99 | #define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */ | ||
100 | #define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */ | ||
101 | |||
102 | /* | ||
88 | * Generic GPIO support | 103 | * Generic GPIO support |
89 | */ | 104 | */ |
90 | #define MCFGPIO_PIN_MAX 16 | 105 | #define MCFGPIO_PIN_MAX 16 |
diff --git a/arch/m68k/include/asm/m54xxsim.h b/arch/m68k/include/asm/m54xxsim.h index 462ae5328441..1ed8bfb02772 100644 --- a/arch/m68k/include/asm/m54xxsim.h +++ b/arch/m68k/include/asm/m54xxsim.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #define CPU_NAME "COLDFIRE(m54xx)" | 8 | #define CPU_NAME "COLDFIRE(m54xx)" |
9 | #define CPU_INSTR_PER_JIFFY 2 | 9 | #define CPU_INSTR_PER_JIFFY 2 |
10 | #define MCF_BUSCLK (MCF_CLK / 2) | ||
10 | 11 | ||
11 | #include <asm/m54xxacr.h> | 12 | #include <asm/m54xxacr.h> |
12 | 13 | ||
@@ -15,7 +16,8 @@ | |||
15 | /* | 16 | /* |
16 | * Interrupt Controller Registers | 17 | * Interrupt Controller Registers |
17 | */ | 18 | */ |
18 | #define MCFICM_INTC0 0x0700 /* Base for Interrupt Ctrl 0 */ | 19 | #define MCFICM_INTC0 (MCF_MBAR + 0x700) /* Base for Interrupt Ctrl 0 */ |
20 | |||
19 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ | 21 | #define MCFINTC_IPRH 0x00 /* Interrupt pending 32-63 */ |
20 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ | 22 | #define MCFINTC_IPRL 0x04 /* Interrupt pending 1-31 */ |
21 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ | 23 | #define MCFINTC_IMRH 0x08 /* Interrupt mask 32-63 */ |
@@ -48,6 +50,16 @@ | |||
48 | #define MCFGPIO_IRQ_VECBASE -1 | 50 | #define MCFGPIO_IRQ_VECBASE -1 |
49 | 51 | ||
50 | /* | 52 | /* |
53 | * EDGE Port support. | ||
54 | */ | ||
55 | #define MCFEPORT_EPPAR (MCF_MBAR + 0xf00) /* Pin assignment */ | ||
56 | #define MCFEPORT_EPDDR (MCF_MBAR + 0xf04) /* Data direction */ | ||
57 | #define MCFEPORT_EPIER (MCF_MBAR + 0xf05) /* Interrupt enable */ | ||
58 | #define MCFEPORT_EPDR (MCF_MBAR + 0xf08) /* Port data (w) */ | ||
59 | #define MCFEPORT_EPPDR (MCF_MBAR + 0xf09) /* Port data (r) */ | ||
60 | #define MCFEPORT_EPFR (MCF_MBAR + 0xf0c) /* Flags */ | ||
61 | |||
62 | /* | ||
51 | * Some PSC related definitions | 63 | * Some PSC related definitions |
52 | */ | 64 | */ |
53 | #define MCF_PAR_PSC(x) (0x000A4F-((x)&0x3)) | 65 | #define MCF_PAR_PSC(x) (0x000A4F-((x)&0x3)) |
diff --git a/arch/m68k/include/asm/mcfdma.h b/arch/m68k/include/asm/mcfdma.h index 705c52c79cd8..10bc7e391c14 100644 --- a/arch/m68k/include/asm/mcfdma.h +++ b/arch/m68k/include/asm/mcfdma.h | |||
@@ -11,29 +11,6 @@ | |||
11 | #define mcfdma_h | 11 | #define mcfdma_h |
12 | /****************************************************************************/ | 12 | /****************************************************************************/ |
13 | 13 | ||
14 | |||
15 | /* | ||
16 | * Get address specific defines for this Coldfire member. | ||
17 | */ | ||
18 | #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) | ||
19 | #define MCFDMA_BASE0 0x200 /* Base address of DMA 0 */ | ||
20 | #define MCFDMA_BASE1 0x240 /* Base address of DMA 1 */ | ||
21 | #elif defined(CONFIG_M5272) | ||
22 | #define MCFDMA_BASE0 0x0e0 /* Base address of DMA 0 */ | ||
23 | #elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) | ||
24 | /* These are relative to the IPSBAR, not MBAR */ | ||
25 | #define MCFDMA_BASE0 0x100 /* Base address of DMA 0 */ | ||
26 | #define MCFDMA_BASE1 0x140 /* Base address of DMA 1 */ | ||
27 | #define MCFDMA_BASE2 0x180 /* Base address of DMA 2 */ | ||
28 | #define MCFDMA_BASE3 0x1C0 /* Base address of DMA 3 */ | ||
29 | #elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) | ||
30 | #define MCFDMA_BASE0 0x300 /* Base address of DMA 0 */ | ||
31 | #define MCFDMA_BASE1 0x340 /* Base address of DMA 1 */ | ||
32 | #define MCFDMA_BASE2 0x380 /* Base address of DMA 2 */ | ||
33 | #define MCFDMA_BASE3 0x3C0 /* Base address of DMA 3 */ | ||
34 | #endif | ||
35 | |||
36 | |||
37 | #if !defined(CONFIG_M5272) | 14 | #if !defined(CONFIG_M5272) |
38 | 15 | ||
39 | /* | 16 | /* |
diff --git a/arch/m68k/include/asm/mcfpit.h b/arch/m68k/include/asm/mcfpit.h index f570cf64fd29..9fd321ca0725 100644 --- a/arch/m68k/include/asm/mcfpit.h +++ b/arch/m68k/include/asm/mcfpit.h | |||
@@ -11,22 +11,8 @@ | |||
11 | #define mcfpit_h | 11 | #define mcfpit_h |
12 | /****************************************************************************/ | 12 | /****************************************************************************/ |
13 | 13 | ||
14 | |||
15 | /* | ||
16 | * Get address specific defines for the 5270/5271, 5280/5282, and 5208. | ||
17 | */ | ||
18 | #if defined(CONFIG_M520x) | ||
19 | #define MCFPIT_BASE1 0x00080000 /* Base address of TIMER1 */ | ||
20 | #define MCFPIT_BASE2 0x00084000 /* Base address of TIMER2 */ | ||
21 | #else | ||
22 | #define MCFPIT_BASE1 0x00150000 /* Base address of TIMER1 */ | ||
23 | #define MCFPIT_BASE2 0x00160000 /* Base address of TIMER2 */ | ||
24 | #define MCFPIT_BASE3 0x00170000 /* Base address of TIMER3 */ | ||
25 | #define MCFPIT_BASE4 0x00180000 /* Base address of TIMER4 */ | ||
26 | #endif | ||
27 | |||
28 | /* | 14 | /* |
29 | * Define the PIT timer register set addresses. | 15 | * Define the PIT timer register address offsets. |
30 | */ | 16 | */ |
31 | #define MCFPIT_PCSR 0x0 /* PIT control register */ | 17 | #define MCFPIT_PCSR 0x0 /* PIT control register */ |
32 | #define MCFPIT_PMR 0x2 /* PIT modulus register */ | 18 | #define MCFPIT_PMR 0x2 /* PIT modulus register */ |
diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h index 0f90f6d2227a..92b276fe8240 100644 --- a/arch/m68k/include/asm/mcftimer.h +++ b/arch/m68k/include/asm/mcftimer.h | |||
@@ -12,29 +12,6 @@ | |||
12 | #define mcftimer_h | 12 | #define mcftimer_h |
13 | /****************************************************************************/ | 13 | /****************************************************************************/ |
14 | 14 | ||
15 | |||
16 | /* | ||
17 | * Get address specific defines for this ColdFire member. | ||
18 | */ | ||
19 | #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) | ||
20 | #define MCFTIMER_BASE1 0x100 /* Base address of TIMER1 */ | ||
21 | #define MCFTIMER_BASE2 0x120 /* Base address of TIMER2 */ | ||
22 | #elif defined(CONFIG_M5272) | ||
23 | #define MCFTIMER_BASE1 0x200 /* Base address of TIMER1 */ | ||
24 | #define MCFTIMER_BASE2 0x220 /* Base address of TIMER2 */ | ||
25 | #define MCFTIMER_BASE3 0x240 /* Base address of TIMER4 */ | ||
26 | #define MCFTIMER_BASE4 0x260 /* Base address of TIMER3 */ | ||
27 | #elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) | ||
28 | #define MCFTIMER_BASE1 0x140 /* Base address of TIMER1 */ | ||
29 | #define MCFTIMER_BASE2 0x180 /* Base address of TIMER2 */ | ||
30 | #elif defined(CONFIG_M532x) | ||
31 | #define MCFTIMER_BASE1 0xfc070000 /* Base address of TIMER1 */ | ||
32 | #define MCFTIMER_BASE2 0xfc074000 /* Base address of TIMER2 */ | ||
33 | #define MCFTIMER_BASE3 0xfc078000 /* Base address of TIMER3 */ | ||
34 | #define MCFTIMER_BASE4 0xfc07c000 /* Base address of TIMER4 */ | ||
35 | #endif | ||
36 | |||
37 | |||
38 | /* | 15 | /* |
39 | * Define the TIMER register set addresses. | 16 | * Define the TIMER register set addresses. |
40 | */ | 17 | */ |
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 06438dac08ff..18b34ee5db3b 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c | |||
@@ -37,11 +37,11 @@ static inline int set_rtc_mmss(unsigned long nowtime) | |||
37 | 37 | ||
38 | /* | 38 | /* |
39 | * timer_interrupt() needs to keep up the real-time clock, | 39 | * timer_interrupt() needs to keep up the real-time clock, |
40 | * as well as call the "do_timer()" routine every clocktick | 40 | * as well as call the "xtime_update()" routine every clocktick |
41 | */ | 41 | */ |
42 | static irqreturn_t timer_interrupt(int irq, void *dummy) | 42 | static irqreturn_t timer_interrupt(int irq, void *dummy) |
43 | { | 43 | { |
44 | do_timer(1); | 44 | xtime_update(1); |
45 | update_process_times(user_mode(get_irq_regs())); | 45 | update_process_times(user_mode(get_irq_regs())); |
46 | profile_tick(CPU_PROFILING); | 46 | profile_tick(CPU_PROFILING); |
47 | 47 | ||
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 100baaa692a1..6cb9c3a9b6c9 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c | |||
@@ -46,8 +46,8 @@ extern void mvme147_reset (void); | |||
46 | 46 | ||
47 | static int bcd2int (unsigned char b); | 47 | static int bcd2int (unsigned char b); |
48 | 48 | ||
49 | /* Save tick handler routine pointer, will point to do_timer() in | 49 | /* Save tick handler routine pointer, will point to xtime_update() in |
50 | * kernel/sched.c, called via mvme147_process_int() */ | 50 | * kernel/time/timekeeping.c, called via mvme147_process_int() */ |
51 | 51 | ||
52 | irq_handler_t tick_handler; | 52 | irq_handler_t tick_handler; |
53 | 53 | ||
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 11edf61cc2c4..0b28e2621653 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c | |||
@@ -51,8 +51,8 @@ extern void mvme16x_reset (void); | |||
51 | 51 | ||
52 | int bcd2int (unsigned char b); | 52 | int bcd2int (unsigned char b); |
53 | 53 | ||
54 | /* Save tick handler routine pointer, will point to do_timer() in | 54 | /* Save tick handler routine pointer, will point to xtime_update() in |
55 | * kernel/sched.c, called via mvme16x_process_int() */ | 55 | * kernel/time/timekeeping.c, called via mvme16x_process_int() */ |
56 | 56 | ||
57 | static irq_handler_t tick_handler; | 57 | static irq_handler_t tick_handler; |
58 | 58 | ||
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c index 2d9e21bd313a..6464ad3ae3e6 100644 --- a/arch/m68k/sun3/sun3ints.c +++ b/arch/m68k/sun3/sun3ints.c | |||
@@ -66,7 +66,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id) | |||
66 | #ifdef CONFIG_SUN3 | 66 | #ifdef CONFIG_SUN3 |
67 | intersil_clear(); | 67 | intersil_clear(); |
68 | #endif | 68 | #endif |
69 | do_timer(1); | 69 | xtime_update(1); |
70 | update_process_times(user_mode(get_irq_regs())); | 70 | update_process_times(user_mode(get_irq_regs())); |
71 | if (!(kstat_cpu(0).irqs[irq] % 20)) | 71 | if (!(kstat_cpu(0).irqs[irq] % 20)) |
72 | sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); | 72 | sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); |
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index 8b9dacaa0f6e..b5424cf948e6 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig | |||
@@ -3,6 +3,7 @@ config M68K | |||
3 | default y | 3 | default y |
4 | select HAVE_IDE | 4 | select HAVE_IDE |
5 | select HAVE_GENERIC_HARDIRQS | 5 | select HAVE_GENERIC_HARDIRQS |
6 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
6 | 7 | ||
7 | config MMU | 8 | config MMU |
8 | bool | 9 | bool |
@@ -78,6 +79,12 @@ config HAVE_CACHE_SPLIT | |||
78 | config HAVE_CACHE_CB | 79 | config HAVE_CACHE_CB |
79 | bool | 80 | bool |
80 | 81 | ||
82 | config HAVE_MBAR | ||
83 | bool | ||
84 | |||
85 | config HAVE_IPSBAR | ||
86 | bool | ||
87 | |||
81 | source "init/Kconfig" | 88 | source "init/Kconfig" |
82 | 89 | ||
83 | source "kernel/Kconfig.freezer" | 90 | source "kernel/Kconfig.freezer" |
@@ -111,12 +118,14 @@ config M68360 | |||
111 | config M5206 | 118 | config M5206 |
112 | bool "MCF5206" | 119 | bool "MCF5206" |
113 | select COLDFIRE_SW_A7 | 120 | select COLDFIRE_SW_A7 |
121 | select HAVE_MBAR | ||
114 | help | 122 | help |
115 | Motorola ColdFire 5206 processor support. | 123 | Motorola ColdFire 5206 processor support. |
116 | 124 | ||
117 | config M5206e | 125 | config M5206e |
118 | bool "MCF5206e" | 126 | bool "MCF5206e" |
119 | select COLDFIRE_SW_A7 | 127 | select COLDFIRE_SW_A7 |
128 | select HAVE_MBAR | ||
120 | help | 129 | help |
121 | Motorola ColdFire 5206e processor support. | 130 | Motorola ColdFire 5206e processor support. |
122 | 131 | ||
@@ -131,30 +140,35 @@ config M523x | |||
131 | bool "MCF523x" | 140 | bool "MCF523x" |
132 | select GENERIC_CLOCKEVENTS | 141 | select GENERIC_CLOCKEVENTS |
133 | select HAVE_CACHE_SPLIT | 142 | select HAVE_CACHE_SPLIT |
143 | select HAVE_IPSBAR | ||
134 | help | 144 | help |
135 | Freescale Coldfire 5230/1/2/4/5 processor support | 145 | Freescale Coldfire 5230/1/2/4/5 processor support |
136 | 146 | ||
137 | config M5249 | 147 | config M5249 |
138 | bool "MCF5249" | 148 | bool "MCF5249" |
139 | select COLDFIRE_SW_A7 | 149 | select COLDFIRE_SW_A7 |
150 | select HAVE_MBAR | ||
140 | help | 151 | help |
141 | Motorola ColdFire 5249 processor support. | 152 | Motorola ColdFire 5249 processor support. |
142 | 153 | ||
143 | config M5271 | 154 | config M5271 |
144 | bool "MCF5271" | 155 | bool "MCF5271" |
145 | select HAVE_CACHE_SPLIT | 156 | select HAVE_CACHE_SPLIT |
157 | select HAVE_IPSBAR | ||
146 | help | 158 | help |
147 | Freescale (Motorola) ColdFire 5270/5271 processor support. | 159 | Freescale (Motorola) ColdFire 5270/5271 processor support. |
148 | 160 | ||
149 | config M5272 | 161 | config M5272 |
150 | bool "MCF5272" | 162 | bool "MCF5272" |
151 | select COLDFIRE_SW_A7 | 163 | select COLDFIRE_SW_A7 |
164 | select HAVE_MBAR | ||
152 | help | 165 | help |
153 | Motorola ColdFire 5272 processor support. | 166 | Motorola ColdFire 5272 processor support. |
154 | 167 | ||
155 | config M5275 | 168 | config M5275 |
156 | bool "MCF5275" | 169 | bool "MCF5275" |
157 | select HAVE_CACHE_SPLIT | 170 | select HAVE_CACHE_SPLIT |
171 | select HAVE_IPSBAR | ||
158 | help | 172 | help |
159 | Freescale (Motorola) ColdFire 5274/5275 processor support. | 173 | Freescale (Motorola) ColdFire 5274/5275 processor support. |
160 | 174 | ||
@@ -162,6 +176,7 @@ config M528x | |||
162 | bool "MCF528x" | 176 | bool "MCF528x" |
163 | select GENERIC_CLOCKEVENTS | 177 | select GENERIC_CLOCKEVENTS |
164 | select HAVE_CACHE_SPLIT | 178 | select HAVE_CACHE_SPLIT |
179 | select HAVE_IPSBAR | ||
165 | help | 180 | help |
166 | Motorola ColdFire 5280/5282 processor support. | 181 | Motorola ColdFire 5280/5282 processor support. |
167 | 182 | ||
@@ -169,6 +184,7 @@ config M5307 | |||
169 | bool "MCF5307" | 184 | bool "MCF5307" |
170 | select COLDFIRE_SW_A7 | 185 | select COLDFIRE_SW_A7 |
171 | select HAVE_CACHE_CB | 186 | select HAVE_CACHE_CB |
187 | select HAVE_MBAR | ||
172 | help | 188 | help |
173 | Motorola ColdFire 5307 processor support. | 189 | Motorola ColdFire 5307 processor support. |
174 | 190 | ||
@@ -182,18 +198,21 @@ config M5407 | |||
182 | bool "MCF5407" | 198 | bool "MCF5407" |
183 | select COLDFIRE_SW_A7 | 199 | select COLDFIRE_SW_A7 |
184 | select HAVE_CACHE_CB | 200 | select HAVE_CACHE_CB |
201 | select HAVE_MBAR | ||
185 | help | 202 | help |
186 | Motorola ColdFire 5407 processor support. | 203 | Motorola ColdFire 5407 processor support. |
187 | 204 | ||
188 | config M547x | 205 | config M547x |
189 | bool "MCF547x" | 206 | bool "MCF547x" |
190 | select HAVE_CACHE_CB | 207 | select HAVE_CACHE_CB |
208 | select HAVE_MBAR | ||
191 | help | 209 | help |
192 | Freescale ColdFire 5470/5471/5472/5473/5474/5475 processor support. | 210 | Freescale ColdFire 5470/5471/5472/5473/5474/5475 processor support. |
193 | 211 | ||
194 | config M548x | 212 | config M548x |
195 | bool "MCF548x" | 213 | bool "MCF548x" |
196 | select HAVE_CACHE_CB | 214 | select HAVE_CACHE_CB |
215 | select HAVE_MBAR | ||
197 | help | 216 | help |
198 | Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support. | 217 | Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support. |
199 | 218 | ||
@@ -241,17 +260,6 @@ config CLOCK_FREQ | |||
241 | if it is fitted (there are some exceptions). This value will be | 260 | if it is fitted (there are some exceptions). This value will be |
242 | specific to the exact CPU that you are using. | 261 | specific to the exact CPU that you are using. |
243 | 262 | ||
244 | config CLOCK_DIV | ||
245 | int "Set the core/bus clock divide ratio" | ||
246 | default "1" | ||
247 | depends on CLOCK_SET | ||
248 | help | ||
249 | On many SoC style CPUs the master CPU clock is also used to drive | ||
250 | on-chip peripherals. The clock that is distributed to these | ||
251 | peripherals is sometimes a fixed ratio of the master clock | ||
252 | frequency. If so then set this to the divider ratio of the | ||
253 | master clock to the peripheral clock. If not sure then select 1. | ||
254 | |||
255 | config OLDMASK | 263 | config OLDMASK |
256 | bool "Old mask 5307 (1H55J) silicon" | 264 | bool "Old mask 5307 (1H55J) silicon" |
257 | depends on M5307 | 265 | depends on M5307 |
@@ -500,6 +508,12 @@ config M5407C3 | |||
500 | help | 508 | help |
501 | Support for the Motorola M5407C3 board. | 509 | Support for the Motorola M5407C3 board. |
502 | 510 | ||
511 | config FIREBEE | ||
512 | bool "FireBee board support" | ||
513 | depends on M547x | ||
514 | help | ||
515 | Support for the FireBee ColdFire 5475 based board. | ||
516 | |||
503 | config CLEOPATRA | 517 | config CLEOPATRA |
504 | bool "Feith CLEOPATRA board support" | 518 | bool "Feith CLEOPATRA board support" |
505 | depends on (M5307 || M5407) | 519 | depends on (M5307 || M5407) |
@@ -649,6 +663,28 @@ config VECTORBASE | |||
649 | platforms this address is programmed into the VBR register, thus | 663 | platforms this address is programmed into the VBR register, thus |
650 | actually setting the address to use. | 664 | actually setting the address to use. |
651 | 665 | ||
666 | config MBAR | ||
667 | hex "Address of the MBAR (internal peripherals)" | ||
668 | default "0x10000000" | ||
669 | depends on HAVE_MBAR | ||
670 | help | ||
671 | Define the address of the internal system peripherals. This value | ||
672 | is set in the processors MBAR register. This is generally setup by | ||
673 | the boot loader, and will not be written by the kernel. By far most | ||
674 | ColdFire boards use the default 0x10000000 value, so if unsure then | ||
675 | use this. | ||
676 | |||
677 | config IPSBAR | ||
678 | hex "Address of the IPSBAR (internal peripherals)" | ||
679 | default "0x40000000" | ||
680 | depends on HAVE_IPSBAR | ||
681 | help | ||
682 | Define the address of the internal system peripherals. This value | ||
683 | is set in the processors IPSBAR register. This is generally setup by | ||
684 | the boot loader, and will not be written by the kernel. By far most | ||
685 | ColdFire boards use the default 0x40000000 value, so if unsure then | ||
686 | use this. | ||
687 | |||
652 | config KERNELBASE | 688 | config KERNELBASE |
653 | hex "Address of the base of kernel code" | 689 | hex "Address of the base of kernel code" |
654 | default "0x400" | 690 | default "0x400" |
diff --git a/arch/m68knommu/kernel/irq.c b/arch/m68knommu/kernel/irq.c index c9cac36d4422..c7dd48f37bee 100644 --- a/arch/m68knommu/kernel/irq.c +++ b/arch/m68knommu/kernel/irq.c | |||
@@ -38,11 +38,13 @@ int show_interrupts(struct seq_file *p, void *v) | |||
38 | seq_puts(p, " CPU0\n"); | 38 | seq_puts(p, " CPU0\n"); |
39 | 39 | ||
40 | if (irq < NR_IRQS) { | 40 | if (irq < NR_IRQS) { |
41 | ap = irq_desc[irq].action; | 41 | struct irq_desc *desc = irq_to_desc(irq); |
42 | |||
43 | ap = desc->action; | ||
42 | if (ap) { | 44 | if (ap) { |
43 | seq_printf(p, "%3d: ", irq); | 45 | seq_printf(p, "%3d: ", irq); |
44 | seq_printf(p, "%10u ", kstat_irqs(irq)); | 46 | seq_printf(p, "%10u ", kstat_irqs(irq)); |
45 | seq_printf(p, "%14s ", irq_desc[irq].chip->name); | 47 | seq_printf(p, "%14s ", get_irq_desc_chip(desc)->name); |
46 | 48 | ||
47 | seq_printf(p, "%s", ap->name); | 49 | seq_printf(p, "%s", ap->name); |
48 | for (ap = ap->next; ap; ap = ap->next) | 50 | for (ap = ap->next; ap; ap = ap->next) |
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index d6ac2a43453c..6623909f70e6 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c | |||
@@ -36,7 +36,7 @@ static inline int set_rtc_mmss(unsigned long nowtime) | |||
36 | #ifndef CONFIG_GENERIC_CLOCKEVENTS | 36 | #ifndef CONFIG_GENERIC_CLOCKEVENTS |
37 | /* | 37 | /* |
38 | * timer_interrupt() needs to keep up the real-time clock, | 38 | * timer_interrupt() needs to keep up the real-time clock, |
39 | * as well as call the "do_timer()" routine every clocktick | 39 | * as well as call the "xtime_update()" routine every clocktick |
40 | */ | 40 | */ |
41 | irqreturn_t arch_timer_interrupt(int irq, void *dummy) | 41 | irqreturn_t arch_timer_interrupt(int irq, void *dummy) |
42 | { | 42 | { |
@@ -44,11 +44,7 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy) | |||
44 | if (current->pid) | 44 | if (current->pid) |
45 | profile_tick(CPU_PROFILING); | 45 | profile_tick(CPU_PROFILING); |
46 | 46 | ||
47 | write_seqlock(&xtime_lock); | 47 | xtime_update(1); |
48 | |||
49 | do_timer(1); | ||
50 | |||
51 | write_sequnlock(&xtime_lock); | ||
52 | 48 | ||
53 | update_process_times(user_mode(get_irq_regs())); | 49 | update_process_times(user_mode(get_irq_regs())); |
54 | 50 | ||
diff --git a/arch/m68knommu/platform/5206/gpio.c b/arch/m68knommu/platform/5206/gpio.c index 60f779ce1651..b9ab4a120f28 100644 --- a/arch/m68knommu/platform/5206/gpio.c +++ b/arch/m68knommu/platform/5206/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 8, | 33 | .ngpio = 8, |
34 | }, | 34 | }, |
35 | .pddr = MCFSIM_PADDR, | 35 | .pddr = (void __iomem *) MCFSIM_PADDR, |
36 | .podr = MCFSIM_PADAT, | 36 | .podr = (void __iomem *) MCFSIM_PADAT, |
37 | .ppdr = MCFSIM_PADAT, | 37 | .ppdr = (void __iomem *) MCFSIM_PADAT, |
38 | }, | 38 | }, |
39 | }; | 39 | }; |
40 | 40 | ||
diff --git a/arch/m68knommu/platform/5206e/gpio.c b/arch/m68knommu/platform/5206e/gpio.c index 60f779ce1651..b9ab4a120f28 100644 --- a/arch/m68knommu/platform/5206e/gpio.c +++ b/arch/m68knommu/platform/5206e/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 8, | 33 | .ngpio = 8, |
34 | }, | 34 | }, |
35 | .pddr = MCFSIM_PADDR, | 35 | .pddr = (void __iomem *) MCFSIM_PADDR, |
36 | .podr = MCFSIM_PADAT, | 36 | .podr = (void __iomem *) MCFSIM_PADAT, |
37 | .ppdr = MCFSIM_PADAT, | 37 | .ppdr = (void __iomem *) MCFSIM_PADAT, |
38 | }, | 38 | }, |
39 | }; | 39 | }; |
40 | 40 | ||
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c index 71d2ba474c63..621238f1a219 100644 --- a/arch/m68knommu/platform/520x/config.c +++ b/arch/m68knommu/platform/520x/config.c | |||
@@ -27,15 +27,15 @@ | |||
27 | 27 | ||
28 | static struct mcf_platform_uart m520x_uart_platform[] = { | 28 | static struct mcf_platform_uart m520x_uart_platform[] = { |
29 | { | 29 | { |
30 | .mapbase = MCF_MBAR + MCFUART_BASE1, | 30 | .mapbase = MCFUART_BASE1, |
31 | .irq = MCFINT_VECBASE + MCFINT_UART0, | 31 | .irq = MCFINT_VECBASE + MCFINT_UART0, |
32 | }, | 32 | }, |
33 | { | 33 | { |
34 | .mapbase = MCF_MBAR + MCFUART_BASE2, | 34 | .mapbase = MCFUART_BASE2, |
35 | .irq = MCFINT_VECBASE + MCFINT_UART1, | 35 | .irq = MCFINT_VECBASE + MCFINT_UART1, |
36 | }, | 36 | }, |
37 | { | 37 | { |
38 | .mapbase = MCF_MBAR + MCFUART_BASE3, | 38 | .mapbase = MCFUART_BASE3, |
39 | .irq = MCFINT_VECBASE + MCFINT_UART2, | 39 | .irq = MCFINT_VECBASE + MCFINT_UART2, |
40 | }, | 40 | }, |
41 | { }, | 41 | { }, |
@@ -49,8 +49,8 @@ static struct platform_device m520x_uart = { | |||
49 | 49 | ||
50 | static struct resource m520x_fec_resources[] = { | 50 | static struct resource m520x_fec_resources[] = { |
51 | { | 51 | { |
52 | .start = MCF_MBAR + 0x30000, | 52 | .start = MCFFEC_BASE, |
53 | .end = MCF_MBAR + 0x30000 + 0x7ff, | 53 | .end = MCFFEC_BASE + MCFFEC_SIZE - 1, |
54 | .flags = IORESOURCE_MEM, | 54 | .flags = IORESOURCE_MEM, |
55 | }, | 55 | }, |
56 | { | 56 | { |
@@ -208,11 +208,11 @@ static void __init m520x_qspi_init(void) | |||
208 | { | 208 | { |
209 | u16 par; | 209 | u16 par; |
210 | /* setup Port QS for QSPI with gpio CS control */ | 210 | /* setup Port QS for QSPI with gpio CS control */ |
211 | writeb(0x3f, MCF_IPSBAR + MCF_GPIO_PAR_QSPI); | 211 | writeb(0x3f, MCF_GPIO_PAR_QSPI); |
212 | /* make U1CTS and U2RTS gpio for cs_control */ | 212 | /* make U1CTS and U2RTS gpio for cs_control */ |
213 | par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); | 213 | par = readw(MCF_GPIO_PAR_UART); |
214 | par &= 0x00ff; | 214 | par &= 0x00ff; |
215 | writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART); | 215 | writew(par, MCF_GPIO_PAR_UART); |
216 | } | 216 | } |
217 | #endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */ | 217 | #endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */ |
218 | 218 | ||
@@ -234,23 +234,23 @@ static void __init m520x_uart_init_line(int line, int irq) | |||
234 | 234 | ||
235 | switch (line) { | 235 | switch (line) { |
236 | case 0: | 236 | case 0: |
237 | par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); | 237 | par = readw(MCF_GPIO_PAR_UART); |
238 | par |= MCF_GPIO_PAR_UART_PAR_UTXD0 | | 238 | par |= MCF_GPIO_PAR_UART_PAR_UTXD0 | |
239 | MCF_GPIO_PAR_UART_PAR_URXD0; | 239 | MCF_GPIO_PAR_UART_PAR_URXD0; |
240 | writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART); | 240 | writew(par, MCF_GPIO_PAR_UART); |
241 | break; | 241 | break; |
242 | case 1: | 242 | case 1: |
243 | par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART); | 243 | par = readw(MCF_GPIO_PAR_UART); |
244 | par |= MCF_GPIO_PAR_UART_PAR_UTXD1 | | 244 | par |= MCF_GPIO_PAR_UART_PAR_UTXD1 | |
245 | MCF_GPIO_PAR_UART_PAR_URXD1; | 245 | MCF_GPIO_PAR_UART_PAR_URXD1; |
246 | writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART); | 246 | writew(par, MCF_GPIO_PAR_UART); |
247 | break; | 247 | break; |
248 | case 2: | 248 | case 2: |
249 | par2 = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); | 249 | par2 = readb(MCF_GPIO_PAR_FECI2C); |
250 | par2 &= ~0x0F; | 250 | par2 &= ~0x0F; |
251 | par2 |= MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 | | 251 | par2 |= MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2 | |
252 | MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2; | 252 | MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2; |
253 | writeb(par2, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); | 253 | writeb(par2, MCF_GPIO_PAR_FECI2C); |
254 | break; | 254 | break; |
255 | } | 255 | } |
256 | } | 256 | } |
@@ -271,11 +271,11 @@ static void __init m520x_fec_init(void) | |||
271 | u8 v; | 271 | u8 v; |
272 | 272 | ||
273 | /* Set multi-function pins to ethernet mode */ | 273 | /* Set multi-function pins to ethernet mode */ |
274 | v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FEC); | 274 | v = readb(MCF_GPIO_PAR_FEC); |
275 | writeb(v | 0xf0, MCF_IPSBAR + MCF_GPIO_PAR_FEC); | 275 | writeb(v | 0xf0, MCF_GPIO_PAR_FEC); |
276 | 276 | ||
277 | v = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); | 277 | v = readb(MCF_GPIO_PAR_FECI2C); |
278 | writeb(v | 0x0f, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C); | 278 | writeb(v | 0x0f, MCF_GPIO_PAR_FECI2C); |
279 | } | 279 | } |
280 | 280 | ||
281 | /***************************************************************************/ | 281 | /***************************************************************************/ |
diff --git a/arch/m68knommu/platform/520x/gpio.c b/arch/m68knommu/platform/520x/gpio.c index 15b5bb62a698..d757328563d1 100644 --- a/arch/m68knommu/platform/520x/gpio.c +++ b/arch/m68knommu/platform/520x/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 8, | 33 | .ngpio = 8, |
34 | }, | 34 | }, |
35 | .pddr = MCFEPORT_EPDDR, | 35 | .pddr = (void __iomem *) MCFEPORT_EPDDR, |
36 | .podr = MCFEPORT_EPDR, | 36 | .podr = (void __iomem *) MCFEPORT_EPDR, |
37 | .ppdr = MCFEPORT_EPPDR, | 37 | .ppdr = (void __iomem *) MCFEPORT_EPPDR, |
38 | }, | 38 | }, |
39 | { | 39 | { |
40 | .gpio_chip = { | 40 | .gpio_chip = { |
@@ -48,11 +48,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
48 | .base = 8, | 48 | .base = 8, |
49 | .ngpio = 4, | 49 | .ngpio = 4, |
50 | }, | 50 | }, |
51 | .pddr = MCFGPIO_PDDR_BUSCTL, | 51 | .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, |
52 | .podr = MCFGPIO_PODR_BUSCTL, | 52 | .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, |
53 | .ppdr = MCFGPIO_PPDSDR_BUSCTL, | 53 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
54 | .setr = MCFGPIO_PPDSDR_BUSCTL, | 54 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
55 | .clrr = MCFGPIO_PCLRR_BUSCTL, | 55 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, |
56 | }, | 56 | }, |
57 | { | 57 | { |
58 | .gpio_chip = { | 58 | .gpio_chip = { |
@@ -66,11 +66,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
66 | .base = 16, | 66 | .base = 16, |
67 | .ngpio = 4, | 67 | .ngpio = 4, |
68 | }, | 68 | }, |
69 | .pddr = MCFGPIO_PDDR_BE, | 69 | .pddr = (void __iomem *) MCFGPIO_PDDR_BE, |
70 | .podr = MCFGPIO_PODR_BE, | 70 | .podr = (void __iomem *) MCFGPIO_PODR_BE, |
71 | .ppdr = MCFGPIO_PPDSDR_BE, | 71 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE, |
72 | .setr = MCFGPIO_PPDSDR_BE, | 72 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BE, |
73 | .clrr = MCFGPIO_PCLRR_BE, | 73 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BE, |
74 | }, | 74 | }, |
75 | { | 75 | { |
76 | .gpio_chip = { | 76 | .gpio_chip = { |
@@ -84,11 +84,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
84 | .base = 25, | 84 | .base = 25, |
85 | .ngpio = 3, | 85 | .ngpio = 3, |
86 | }, | 86 | }, |
87 | .pddr = MCFGPIO_PDDR_CS, | 87 | .pddr = (void __iomem *) MCFGPIO_PDDR_CS, |
88 | .podr = MCFGPIO_PODR_CS, | 88 | .podr = (void __iomem *) MCFGPIO_PODR_CS, |
89 | .ppdr = MCFGPIO_PPDSDR_CS, | 89 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
90 | .setr = MCFGPIO_PPDSDR_CS, | 90 | .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
91 | .clrr = MCFGPIO_PCLRR_CS, | 91 | .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, |
92 | }, | 92 | }, |
93 | { | 93 | { |
94 | .gpio_chip = { | 94 | .gpio_chip = { |
@@ -102,11 +102,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
102 | .base = 32, | 102 | .base = 32, |
103 | .ngpio = 4, | 103 | .ngpio = 4, |
104 | }, | 104 | }, |
105 | .pddr = MCFGPIO_PDDR_FECI2C, | 105 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, |
106 | .podr = MCFGPIO_PODR_FECI2C, | 106 | .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, |
107 | .ppdr = MCFGPIO_PPDSDR_FECI2C, | 107 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
108 | .setr = MCFGPIO_PPDSDR_FECI2C, | 108 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
109 | .clrr = MCFGPIO_PCLRR_FECI2C, | 109 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, |
110 | }, | 110 | }, |
111 | { | 111 | { |
112 | .gpio_chip = { | 112 | .gpio_chip = { |
@@ -120,11 +120,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
120 | .base = 40, | 120 | .base = 40, |
121 | .ngpio = 4, | 121 | .ngpio = 4, |
122 | }, | 122 | }, |
123 | .pddr = MCFGPIO_PDDR_QSPI, | 123 | .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, |
124 | .podr = MCFGPIO_PODR_QSPI, | 124 | .podr = (void __iomem *) MCFGPIO_PODR_QSPI, |
125 | .ppdr = MCFGPIO_PPDSDR_QSPI, | 125 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
126 | .setr = MCFGPIO_PPDSDR_QSPI, | 126 | .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
127 | .clrr = MCFGPIO_PCLRR_QSPI, | 127 | .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, |
128 | }, | 128 | }, |
129 | { | 129 | { |
130 | .gpio_chip = { | 130 | .gpio_chip = { |
@@ -138,11 +138,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
138 | .base = 48, | 138 | .base = 48, |
139 | .ngpio = 4, | 139 | .ngpio = 4, |
140 | }, | 140 | }, |
141 | .pddr = MCFGPIO_PDDR_TIMER, | 141 | .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, |
142 | .podr = MCFGPIO_PODR_TIMER, | 142 | .podr = (void __iomem *) MCFGPIO_PODR_TIMER, |
143 | .ppdr = MCFGPIO_PPDSDR_TIMER, | 143 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
144 | .setr = MCFGPIO_PPDSDR_TIMER, | 144 | .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
145 | .clrr = MCFGPIO_PCLRR_TIMER, | 145 | .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, |
146 | }, | 146 | }, |
147 | { | 147 | { |
148 | .gpio_chip = { | 148 | .gpio_chip = { |
@@ -156,11 +156,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
156 | .base = 56, | 156 | .base = 56, |
157 | .ngpio = 8, | 157 | .ngpio = 8, |
158 | }, | 158 | }, |
159 | .pddr = MCFGPIO_PDDR_UART, | 159 | .pddr = (void __iomem *) MCFGPIO_PDDR_UART, |
160 | .podr = MCFGPIO_PODR_UART, | 160 | .podr = (void __iomem *) MCFGPIO_PODR_UART, |
161 | .ppdr = MCFGPIO_PPDSDR_UART, | 161 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UART, |
162 | .setr = MCFGPIO_PPDSDR_UART, | 162 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UART, |
163 | .clrr = MCFGPIO_PCLRR_UART, | 163 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UART, |
164 | }, | 164 | }, |
165 | { | 165 | { |
166 | .gpio_chip = { | 166 | .gpio_chip = { |
@@ -174,11 +174,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
174 | .base = 64, | 174 | .base = 64, |
175 | .ngpio = 8, | 175 | .ngpio = 8, |
176 | }, | 176 | }, |
177 | .pddr = MCFGPIO_PDDR_FECH, | 177 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECH, |
178 | .podr = MCFGPIO_PODR_FECH, | 178 | .podr = (void __iomem *) MCFGPIO_PODR_FECH, |
179 | .ppdr = MCFGPIO_PPDSDR_FECH, | 179 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECH, |
180 | .setr = MCFGPIO_PPDSDR_FECH, | 180 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECH, |
181 | .clrr = MCFGPIO_PCLRR_FECH, | 181 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECH, |
182 | }, | 182 | }, |
183 | { | 183 | { |
184 | .gpio_chip = { | 184 | .gpio_chip = { |
@@ -192,11 +192,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
192 | .base = 72, | 192 | .base = 72, |
193 | .ngpio = 8, | 193 | .ngpio = 8, |
194 | }, | 194 | }, |
195 | .pddr = MCFGPIO_PDDR_FECL, | 195 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECL, |
196 | .podr = MCFGPIO_PODR_FECL, | 196 | .podr = (void __iomem *) MCFGPIO_PODR_FECL, |
197 | .ppdr = MCFGPIO_PPDSDR_FECL, | 197 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECL, |
198 | .setr = MCFGPIO_PPDSDR_FECL, | 198 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECL, |
199 | .clrr = MCFGPIO_PCLRR_FECL, | 199 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECL, |
200 | }, | 200 | }, |
201 | }; | 201 | }; |
202 | 202 | ||
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c index 8980f6d7715a..418a76feb1e3 100644 --- a/arch/m68knommu/platform/523x/config.c +++ b/arch/m68knommu/platform/523x/config.c | |||
@@ -28,15 +28,15 @@ | |||
28 | 28 | ||
29 | static struct mcf_platform_uart m523x_uart_platform[] = { | 29 | static struct mcf_platform_uart m523x_uart_platform[] = { |
30 | { | 30 | { |
31 | .mapbase = MCF_MBAR + MCFUART_BASE1, | 31 | .mapbase = MCFUART_BASE1, |
32 | .irq = MCFINT_VECBASE + MCFINT_UART0, | 32 | .irq = MCFINT_VECBASE + MCFINT_UART0, |
33 | }, | 33 | }, |
34 | { | 34 | { |
35 | .mapbase = MCF_MBAR + MCFUART_BASE2, | 35 | .mapbase = MCFUART_BASE2, |
36 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 1, | 36 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 1, |
37 | }, | 37 | }, |
38 | { | 38 | { |
39 | .mapbase = MCF_MBAR + MCFUART_BASE3, | 39 | .mapbase = MCFUART_BASE3, |
40 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 2, | 40 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 2, |
41 | }, | 41 | }, |
42 | { }, | 42 | { }, |
@@ -50,8 +50,8 @@ static struct platform_device m523x_uart = { | |||
50 | 50 | ||
51 | static struct resource m523x_fec_resources[] = { | 51 | static struct resource m523x_fec_resources[] = { |
52 | { | 52 | { |
53 | .start = MCF_MBAR + 0x1000, | 53 | .start = MCFFEC_BASE, |
54 | .end = MCF_MBAR + 0x1000 + 0x7ff, | 54 | .end = MCFFEC_BASE + MCFFEC_SIZE - 1, |
55 | .flags = IORESOURCE_MEM, | 55 | .flags = IORESOURCE_MEM, |
56 | }, | 56 | }, |
57 | { | 57 | { |
diff --git a/arch/m68knommu/platform/523x/gpio.c b/arch/m68knommu/platform/523x/gpio.c index a8842dc27839..327ebf142c8e 100644 --- a/arch/m68knommu/platform/523x/gpio.c +++ b/arch/m68knommu/platform/523x/gpio.c | |||
@@ -33,9 +33,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
33 | .base = 1, | 33 | .base = 1, |
34 | .ngpio = 7, | 34 | .ngpio = 7, |
35 | }, | 35 | }, |
36 | .pddr = MCFEPORT_EPDDR, | 36 | .pddr = (void __iomem *) MCFEPORT_EPDDR, |
37 | .podr = MCFEPORT_EPDR, | 37 | .podr = (void __iomem *) MCFEPORT_EPDR, |
38 | .ppdr = MCFEPORT_EPPDR, | 38 | .ppdr = (void __iomem *) MCFEPORT_EPPDR, |
39 | }, | 39 | }, |
40 | { | 40 | { |
41 | .gpio_chip = { | 41 | .gpio_chip = { |
@@ -49,11 +49,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
49 | .base = 13, | 49 | .base = 13, |
50 | .ngpio = 3, | 50 | .ngpio = 3, |
51 | }, | 51 | }, |
52 | .pddr = MCFGPIO_PDDR_ADDR, | 52 | .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, |
53 | .podr = MCFGPIO_PODR_ADDR, | 53 | .podr = (void __iomem *) MCFGPIO_PODR_ADDR, |
54 | .ppdr = MCFGPIO_PPDSDR_ADDR, | 54 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, |
55 | .setr = MCFGPIO_PPDSDR_ADDR, | 55 | .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, |
56 | .clrr = MCFGPIO_PCLRR_ADDR, | 56 | .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, |
57 | }, | 57 | }, |
58 | { | 58 | { |
59 | .gpio_chip = { | 59 | .gpio_chip = { |
@@ -67,11 +67,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
67 | .base = 16, | 67 | .base = 16, |
68 | .ngpio = 8, | 68 | .ngpio = 8, |
69 | }, | 69 | }, |
70 | .pddr = MCFGPIO_PDDR_DATAH, | 70 | .pddr = (void __iomem *) MCFGPIO_PDDR_DATAH, |
71 | .podr = MCFGPIO_PODR_DATAH, | 71 | .podr = (void __iomem *) MCFGPIO_PODR_DATAH, |
72 | .ppdr = MCFGPIO_PPDSDR_DATAH, | 72 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, |
73 | .setr = MCFGPIO_PPDSDR_DATAH, | 73 | .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, |
74 | .clrr = MCFGPIO_PCLRR_DATAH, | 74 | .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAH, |
75 | }, | 75 | }, |
76 | { | 76 | { |
77 | .gpio_chip = { | 77 | .gpio_chip = { |
@@ -85,11 +85,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
85 | .base = 24, | 85 | .base = 24, |
86 | .ngpio = 8, | 86 | .ngpio = 8, |
87 | }, | 87 | }, |
88 | .pddr = MCFGPIO_PDDR_DATAL, | 88 | .pddr = (void __iomem *) MCFGPIO_PDDR_DATAL, |
89 | .podr = MCFGPIO_PODR_DATAL, | 89 | .podr = (void __iomem *) MCFGPIO_PODR_DATAL, |
90 | .ppdr = MCFGPIO_PPDSDR_DATAL, | 90 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, |
91 | .setr = MCFGPIO_PPDSDR_DATAL, | 91 | .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, |
92 | .clrr = MCFGPIO_PCLRR_DATAL, | 92 | .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAL, |
93 | }, | 93 | }, |
94 | { | 94 | { |
95 | .gpio_chip = { | 95 | .gpio_chip = { |
@@ -103,11 +103,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
103 | .base = 32, | 103 | .base = 32, |
104 | .ngpio = 8, | 104 | .ngpio = 8, |
105 | }, | 105 | }, |
106 | .pddr = MCFGPIO_PDDR_BUSCTL, | 106 | .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, |
107 | .podr = MCFGPIO_PODR_BUSCTL, | 107 | .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, |
108 | .ppdr = MCFGPIO_PPDSDR_BUSCTL, | 108 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
109 | .setr = MCFGPIO_PPDSDR_BUSCTL, | 109 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
110 | .clrr = MCFGPIO_PCLRR_BUSCTL, | 110 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, |
111 | }, | 111 | }, |
112 | { | 112 | { |
113 | .gpio_chip = { | 113 | .gpio_chip = { |
@@ -121,11 +121,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
121 | .base = 40, | 121 | .base = 40, |
122 | .ngpio = 4, | 122 | .ngpio = 4, |
123 | }, | 123 | }, |
124 | .pddr = MCFGPIO_PDDR_BS, | 124 | .pddr = (void __iomem *) MCFGPIO_PDDR_BS, |
125 | .podr = MCFGPIO_PODR_BS, | 125 | .podr = (void __iomem *) MCFGPIO_PODR_BS, |
126 | .ppdr = MCFGPIO_PPDSDR_BS, | 126 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, |
127 | .setr = MCFGPIO_PPDSDR_BS, | 127 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, |
128 | .clrr = MCFGPIO_PCLRR_BS, | 128 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, |
129 | }, | 129 | }, |
130 | { | 130 | { |
131 | .gpio_chip = { | 131 | .gpio_chip = { |
@@ -139,11 +139,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
139 | .base = 49, | 139 | .base = 49, |
140 | .ngpio = 7, | 140 | .ngpio = 7, |
141 | }, | 141 | }, |
142 | .pddr = MCFGPIO_PDDR_CS, | 142 | .pddr = (void __iomem *) MCFGPIO_PDDR_CS, |
143 | .podr = MCFGPIO_PODR_CS, | 143 | .podr = (void __iomem *) MCFGPIO_PODR_CS, |
144 | .ppdr = MCFGPIO_PPDSDR_CS, | 144 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
145 | .setr = MCFGPIO_PPDSDR_CS, | 145 | .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
146 | .clrr = MCFGPIO_PCLRR_CS, | 146 | .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, |
147 | }, | 147 | }, |
148 | { | 148 | { |
149 | .gpio_chip = { | 149 | .gpio_chip = { |
@@ -157,11 +157,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
157 | .base = 56, | 157 | .base = 56, |
158 | .ngpio = 6, | 158 | .ngpio = 6, |
159 | }, | 159 | }, |
160 | .pddr = MCFGPIO_PDDR_SDRAM, | 160 | .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, |
161 | .podr = MCFGPIO_PODR_SDRAM, | 161 | .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, |
162 | .ppdr = MCFGPIO_PPDSDR_SDRAM, | 162 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, |
163 | .setr = MCFGPIO_PPDSDR_SDRAM, | 163 | .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, |
164 | .clrr = MCFGPIO_PCLRR_SDRAM, | 164 | .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, |
165 | }, | 165 | }, |
166 | { | 166 | { |
167 | .gpio_chip = { | 167 | .gpio_chip = { |
@@ -175,11 +175,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
175 | .base = 64, | 175 | .base = 64, |
176 | .ngpio = 4, | 176 | .ngpio = 4, |
177 | }, | 177 | }, |
178 | .pddr = MCFGPIO_PDDR_FECI2C, | 178 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, |
179 | .podr = MCFGPIO_PODR_FECI2C, | 179 | .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, |
180 | .ppdr = MCFGPIO_PPDSDR_FECI2C, | 180 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
181 | .setr = MCFGPIO_PPDSDR_FECI2C, | 181 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
182 | .clrr = MCFGPIO_PCLRR_FECI2C, | 182 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, |
183 | }, | 183 | }, |
184 | { | 184 | { |
185 | .gpio_chip = { | 185 | .gpio_chip = { |
@@ -193,11 +193,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
193 | .base = 72, | 193 | .base = 72, |
194 | .ngpio = 2, | 194 | .ngpio = 2, |
195 | }, | 195 | }, |
196 | .pddr = MCFGPIO_PDDR_UARTH, | 196 | .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, |
197 | .podr = MCFGPIO_PODR_UARTH, | 197 | .podr = (void __iomem *) MCFGPIO_PODR_UARTH, |
198 | .ppdr = MCFGPIO_PPDSDR_UARTH, | 198 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, |
199 | .setr = MCFGPIO_PPDSDR_UARTH, | 199 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, |
200 | .clrr = MCFGPIO_PCLRR_UARTH, | 200 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, |
201 | }, | 201 | }, |
202 | { | 202 | { |
203 | .gpio_chip = { | 203 | .gpio_chip = { |
@@ -211,11 +211,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
211 | .base = 80, | 211 | .base = 80, |
212 | .ngpio = 8, | 212 | .ngpio = 8, |
213 | }, | 213 | }, |
214 | .pddr = MCFGPIO_PDDR_UARTL, | 214 | .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, |
215 | .podr = MCFGPIO_PODR_UARTL, | 215 | .podr = (void __iomem *) MCFGPIO_PODR_UARTL, |
216 | .ppdr = MCFGPIO_PPDSDR_UARTL, | 216 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, |
217 | .setr = MCFGPIO_PPDSDR_UARTL, | 217 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, |
218 | .clrr = MCFGPIO_PCLRR_UARTL, | 218 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, |
219 | }, | 219 | }, |
220 | { | 220 | { |
221 | .gpio_chip = { | 221 | .gpio_chip = { |
@@ -229,11 +229,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
229 | .base = 88, | 229 | .base = 88, |
230 | .ngpio = 5, | 230 | .ngpio = 5, |
231 | }, | 231 | }, |
232 | .pddr = MCFGPIO_PDDR_QSPI, | 232 | .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, |
233 | .podr = MCFGPIO_PODR_QSPI, | 233 | .podr = (void __iomem *) MCFGPIO_PODR_QSPI, |
234 | .ppdr = MCFGPIO_PPDSDR_QSPI, | 234 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
235 | .setr = MCFGPIO_PPDSDR_QSPI, | 235 | .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
236 | .clrr = MCFGPIO_PCLRR_QSPI, | 236 | .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, |
237 | }, | 237 | }, |
238 | { | 238 | { |
239 | .gpio_chip = { | 239 | .gpio_chip = { |
@@ -247,11 +247,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
247 | .base = 96, | 247 | .base = 96, |
248 | .ngpio = 8, | 248 | .ngpio = 8, |
249 | }, | 249 | }, |
250 | .pddr = MCFGPIO_PDDR_TIMER, | 250 | .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, |
251 | .podr = MCFGPIO_PODR_TIMER, | 251 | .podr = (void __iomem *) MCFGPIO_PODR_TIMER, |
252 | .ppdr = MCFGPIO_PPDSDR_TIMER, | 252 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
253 | .setr = MCFGPIO_PPDSDR_TIMER, | 253 | .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
254 | .clrr = MCFGPIO_PCLRR_TIMER, | 254 | .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, |
255 | }, | 255 | }, |
256 | { | 256 | { |
257 | .gpio_chip = { | 257 | .gpio_chip = { |
@@ -265,11 +265,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
265 | .base = 104, | 265 | .base = 104, |
266 | .ngpio = 3, | 266 | .ngpio = 3, |
267 | }, | 267 | }, |
268 | .pddr = MCFGPIO_PDDR_ETPU, | 268 | .pddr = (void __iomem *) MCFGPIO_PDDR_ETPU, |
269 | .podr = MCFGPIO_PODR_ETPU, | 269 | .podr = (void __iomem *) MCFGPIO_PODR_ETPU, |
270 | .ppdr = MCFGPIO_PPDSDR_ETPU, | 270 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ETPU, |
271 | .setr = MCFGPIO_PPDSDR_ETPU, | 271 | .setr = (void __iomem *) MCFGPIO_PPDSDR_ETPU, |
272 | .clrr = MCFGPIO_PCLRR_ETPU, | 272 | .clrr = (void __iomem *) MCFGPIO_PCLRR_ETPU, |
273 | }, | 273 | }, |
274 | }; | 274 | }; |
275 | 275 | ||
diff --git a/arch/m68knommu/platform/5249/gpio.c b/arch/m68knommu/platform/5249/gpio.c index c611eab8b3b6..2b56c6ef65bf 100644 --- a/arch/m68knommu/platform/5249/gpio.c +++ b/arch/m68knommu/platform/5249/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 32, | 33 | .ngpio = 32, |
34 | }, | 34 | }, |
35 | .pddr = MCFSIM2_GPIOENABLE, | 35 | .pddr = (void __iomem *) MCFSIM2_GPIOENABLE, |
36 | .podr = MCFSIM2_GPIOWRITE, | 36 | .podr = (void __iomem *) MCFSIM2_GPIOWRITE, |
37 | .ppdr = MCFSIM2_GPIOREAD, | 37 | .ppdr = (void __iomem *) MCFSIM2_GPIOREAD, |
38 | }, | 38 | }, |
39 | { | 39 | { |
40 | .gpio_chip = { | 40 | .gpio_chip = { |
@@ -48,9 +48,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
48 | .base = 32, | 48 | .base = 32, |
49 | .ngpio = 32, | 49 | .ngpio = 32, |
50 | }, | 50 | }, |
51 | .pddr = MCFSIM2_GPIO1ENABLE, | 51 | .pddr = (void __iomem *) MCFSIM2_GPIO1ENABLE, |
52 | .podr = MCFSIM2_GPIO1WRITE, | 52 | .podr = (void __iomem *) MCFSIM2_GPIO1WRITE, |
53 | .ppdr = MCFSIM2_GPIO1READ, | 53 | .ppdr = (void __iomem *) MCFSIM2_GPIO1READ, |
54 | }, | 54 | }, |
55 | }; | 55 | }; |
56 | 56 | ||
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c index c5151f846591..8f4b63e17366 100644 --- a/arch/m68knommu/platform/5249/intc2.c +++ b/arch/m68knommu/platform/5249/intc2.c | |||
@@ -17,32 +17,32 @@ | |||
17 | #include <asm/coldfire.h> | 17 | #include <asm/coldfire.h> |
18 | #include <asm/mcfsim.h> | 18 | #include <asm/mcfsim.h> |
19 | 19 | ||
20 | static void intc2_irq_gpio_mask(unsigned int irq) | 20 | static void intc2_irq_gpio_mask(struct irq_data *d) |
21 | { | 21 | { |
22 | u32 imr; | 22 | u32 imr; |
23 | imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); | 23 | imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); |
24 | imr &= ~(0x1 << (irq - MCFINTC2_GPIOIRQ0)); | 24 | imr &= ~(0x1 << (d->irq - MCFINTC2_GPIOIRQ0)); |
25 | writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); | 25 | writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); |
26 | } | 26 | } |
27 | 27 | ||
28 | static void intc2_irq_gpio_unmask(unsigned int irq) | 28 | static void intc2_irq_gpio_unmask(struct irq_data *d) |
29 | { | 29 | { |
30 | u32 imr; | 30 | u32 imr; |
31 | imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); | 31 | imr = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); |
32 | imr |= (0x1 << (irq - MCFINTC2_GPIOIRQ0)); | 32 | imr |= (0x1 << (d->irq - MCFINTC2_GPIOIRQ0)); |
33 | writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); | 33 | writel(imr, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE); |
34 | } | 34 | } |
35 | 35 | ||
36 | static void intc2_irq_gpio_ack(unsigned int irq) | 36 | static void intc2_irq_gpio_ack(struct irq_data *d) |
37 | { | 37 | { |
38 | writel(0x1 << (irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR); | 38 | writel(0x1 << (d->irq - MCFINTC2_GPIOIRQ0), MCF_MBAR2 + MCFSIM2_GPIOINTCLEAR); |
39 | } | 39 | } |
40 | 40 | ||
41 | static struct irq_chip intc2_irq_gpio_chip = { | 41 | static struct irq_chip intc2_irq_gpio_chip = { |
42 | .name = "CF-INTC2", | 42 | .name = "CF-INTC2", |
43 | .mask = intc2_irq_gpio_mask, | 43 | .irq_mask = intc2_irq_gpio_mask, |
44 | .unmask = intc2_irq_gpio_unmask, | 44 | .irq_unmask = intc2_irq_gpio_unmask, |
45 | .ack = intc2_irq_gpio_ack, | 45 | .irq_ack = intc2_irq_gpio_ack, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static int __init mcf_intc2_init(void) | 48 | static int __init mcf_intc2_init(void) |
@@ -51,7 +51,7 @@ static int __init mcf_intc2_init(void) | |||
51 | 51 | ||
52 | /* GPIO interrupt sources */ | 52 | /* GPIO interrupt sources */ |
53 | for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { | 53 | for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { |
54 | irq_desc[irq].chip = &intc2_irq_gpio_chip; | 54 | set_irq_chip(irq, &intc2_irq_gpio_chip); |
55 | set_irq_handler(irq, handle_edge_irq); | 55 | set_irq_handler(irq, handle_edge_irq); |
56 | } | 56 | } |
57 | 57 | ||
diff --git a/arch/m68knommu/platform/5272/gpio.c b/arch/m68knommu/platform/5272/gpio.c index 459db89a89cc..57ac10a5d7f7 100644 --- a/arch/m68knommu/platform/5272/gpio.c +++ b/arch/m68knommu/platform/5272/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 16, | 33 | .ngpio = 16, |
34 | }, | 34 | }, |
35 | .pddr = MCFSIM_PADDR, | 35 | .pddr = (void __iomem *) MCFSIM_PADDR, |
36 | .podr = MCFSIM_PADAT, | 36 | .podr = (void __iomem *) MCFSIM_PADAT, |
37 | .ppdr = MCFSIM_PADAT, | 37 | .ppdr = (void __iomem *) MCFSIM_PADAT, |
38 | }, | 38 | }, |
39 | { | 39 | { |
40 | .gpio_chip = { | 40 | .gpio_chip = { |
@@ -48,9 +48,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
48 | .base = 16, | 48 | .base = 16, |
49 | .ngpio = 16, | 49 | .ngpio = 16, |
50 | }, | 50 | }, |
51 | .pddr = MCFSIM_PBDDR, | 51 | .pddr = (void __iomem *) MCFSIM_PBDDR, |
52 | .podr = MCFSIM_PBDAT, | 52 | .podr = (void __iomem *) MCFSIM_PBDAT, |
53 | .ppdr = MCFSIM_PBDAT, | 53 | .ppdr = (void __iomem *) MCFSIM_PBDAT, |
54 | }, | 54 | }, |
55 | { | 55 | { |
56 | .gpio_chip = { | 56 | .gpio_chip = { |
@@ -64,9 +64,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
64 | .base = 32, | 64 | .base = 32, |
65 | .ngpio = 16, | 65 | .ngpio = 16, |
66 | }, | 66 | }, |
67 | .pddr = MCFSIM_PCDDR, | 67 | .pddr = (void __iomem *) MCFSIM_PCDDR, |
68 | .podr = MCFSIM_PCDAT, | 68 | .podr = (void __iomem *) MCFSIM_PCDAT, |
69 | .ppdr = MCFSIM_PCDAT, | 69 | .ppdr = (void __iomem *) MCFSIM_PCDAT, |
70 | }, | 70 | }, |
71 | }; | 71 | }; |
72 | 72 | ||
diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c index 3cf681c177aa..969ff0a467c6 100644 --- a/arch/m68knommu/platform/5272/intc.c +++ b/arch/m68knommu/platform/5272/intc.c | |||
@@ -78,8 +78,10 @@ static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = { | |||
78 | * an interrupt on this irq (for the external irqs). So this mask function | 78 | * an interrupt on this irq (for the external irqs). So this mask function |
79 | * is also an ack_mask function. | 79 | * is also an ack_mask function. |
80 | */ | 80 | */ |
81 | static void intc_irq_mask(unsigned int irq) | 81 | static void intc_irq_mask(struct irq_data *d) |
82 | { | 82 | { |
83 | unsigned int irq = d->irq; | ||
84 | |||
83 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { | 85 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { |
84 | u32 v; | 86 | u32 v; |
85 | irq -= MCFINT_VECBASE; | 87 | irq -= MCFINT_VECBASE; |
@@ -88,8 +90,10 @@ static void intc_irq_mask(unsigned int irq) | |||
88 | } | 90 | } |
89 | } | 91 | } |
90 | 92 | ||
91 | static void intc_irq_unmask(unsigned int irq) | 93 | static void intc_irq_unmask(struct irq_data *d) |
92 | { | 94 | { |
95 | unsigned int irq = d->irq; | ||
96 | |||
93 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { | 97 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { |
94 | u32 v; | 98 | u32 v; |
95 | irq -= MCFINT_VECBASE; | 99 | irq -= MCFINT_VECBASE; |
@@ -98,8 +102,10 @@ static void intc_irq_unmask(unsigned int irq) | |||
98 | } | 102 | } |
99 | } | 103 | } |
100 | 104 | ||
101 | static void intc_irq_ack(unsigned int irq) | 105 | static void intc_irq_ack(struct irq_data *d) |
102 | { | 106 | { |
107 | unsigned int irq = d->irq; | ||
108 | |||
103 | /* Only external interrupts are acked */ | 109 | /* Only external interrupts are acked */ |
104 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { | 110 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { |
105 | irq -= MCFINT_VECBASE; | 111 | irq -= MCFINT_VECBASE; |
@@ -113,8 +119,10 @@ static void intc_irq_ack(unsigned int irq) | |||
113 | } | 119 | } |
114 | } | 120 | } |
115 | 121 | ||
116 | static int intc_irq_set_type(unsigned int irq, unsigned int type) | 122 | static int intc_irq_set_type(struct irq_data *d, unsigned int type) |
117 | { | 123 | { |
124 | unsigned int irq = d->irq; | ||
125 | |||
118 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { | 126 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) { |
119 | irq -= MCFINT_VECBASE; | 127 | irq -= MCFINT_VECBASE; |
120 | if (intc_irqmap[irq].ack) { | 128 | if (intc_irqmap[irq].ack) { |
@@ -137,20 +145,17 @@ static int intc_irq_set_type(unsigned int irq, unsigned int type) | |||
137 | */ | 145 | */ |
138 | static void intc_external_irq(unsigned int irq, struct irq_desc *desc) | 146 | static void intc_external_irq(unsigned int irq, struct irq_desc *desc) |
139 | { | 147 | { |
140 | kstat_incr_irqs_this_cpu(irq, desc); | 148 | get_irq_desc_chip(desc)->irq_ack(&desc->irq_data); |
141 | desc->status |= IRQ_INPROGRESS; | 149 | handle_simple_irq(irq, desc); |
142 | desc->chip->ack(irq); | ||
143 | handle_IRQ_event(irq, desc->action); | ||
144 | desc->status &= ~IRQ_INPROGRESS; | ||
145 | } | 150 | } |
146 | 151 | ||
147 | static struct irq_chip intc_irq_chip = { | 152 | static struct irq_chip intc_irq_chip = { |
148 | .name = "CF-INTC", | 153 | .name = "CF-INTC", |
149 | .mask = intc_irq_mask, | 154 | .irq_mask = intc_irq_mask, |
150 | .unmask = intc_irq_unmask, | 155 | .irq_unmask = intc_irq_unmask, |
151 | .mask_ack = intc_irq_mask, | 156 | .irq_mask_ack = intc_irq_mask, |
152 | .ack = intc_irq_ack, | 157 | .irq_ack = intc_irq_ack, |
153 | .set_type = intc_irq_set_type, | 158 | .irq_set_type = intc_irq_set_type, |
154 | }; | 159 | }; |
155 | 160 | ||
156 | void __init init_IRQ(void) | 161 | void __init init_IRQ(void) |
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c index 3d9c35c98b98..fa359593b613 100644 --- a/arch/m68knommu/platform/527x/config.c +++ b/arch/m68knommu/platform/527x/config.c | |||
@@ -28,15 +28,15 @@ | |||
28 | 28 | ||
29 | static struct mcf_platform_uart m527x_uart_platform[] = { | 29 | static struct mcf_platform_uart m527x_uart_platform[] = { |
30 | { | 30 | { |
31 | .mapbase = MCF_MBAR + MCFUART_BASE1, | 31 | .mapbase = MCFUART_BASE1, |
32 | .irq = MCFINT_VECBASE + MCFINT_UART0, | 32 | .irq = MCFINT_VECBASE + MCFINT_UART0, |
33 | }, | 33 | }, |
34 | { | 34 | { |
35 | .mapbase = MCF_MBAR + MCFUART_BASE2, | 35 | .mapbase = MCFUART_BASE2, |
36 | .irq = MCFINT_VECBASE + MCFINT_UART1, | 36 | .irq = MCFINT_VECBASE + MCFINT_UART1, |
37 | }, | 37 | }, |
38 | { | 38 | { |
39 | .mapbase = MCF_MBAR + MCFUART_BASE3, | 39 | .mapbase = MCFUART_BASE3, |
40 | .irq = MCFINT_VECBASE + MCFINT_UART2, | 40 | .irq = MCFINT_VECBASE + MCFINT_UART2, |
41 | }, | 41 | }, |
42 | { }, | 42 | { }, |
@@ -50,8 +50,8 @@ static struct platform_device m527x_uart = { | |||
50 | 50 | ||
51 | static struct resource m527x_fec0_resources[] = { | 51 | static struct resource m527x_fec0_resources[] = { |
52 | { | 52 | { |
53 | .start = MCF_MBAR + 0x1000, | 53 | .start = MCFFEC_BASE0, |
54 | .end = MCF_MBAR + 0x1000 + 0x7ff, | 54 | .end = MCFFEC_BASE0 + MCFFEC_SIZE0 - 1, |
55 | .flags = IORESOURCE_MEM, | 55 | .flags = IORESOURCE_MEM, |
56 | }, | 56 | }, |
57 | { | 57 | { |
@@ -73,8 +73,8 @@ static struct resource m527x_fec0_resources[] = { | |||
73 | 73 | ||
74 | static struct resource m527x_fec1_resources[] = { | 74 | static struct resource m527x_fec1_resources[] = { |
75 | { | 75 | { |
76 | .start = MCF_MBAR + 0x1800, | 76 | .start = MCFFEC_BASE1, |
77 | .end = MCF_MBAR + 0x1800 + 0x7ff, | 77 | .end = MCFFEC_BASE1 + MCFFEC_SIZE1 - 1, |
78 | .flags = IORESOURCE_MEM, | 78 | .flags = IORESOURCE_MEM, |
79 | }, | 79 | }, |
80 | { | 80 | { |
diff --git a/arch/m68knommu/platform/527x/gpio.c b/arch/m68knommu/platform/527x/gpio.c index 0b56e19db0f8..205da0aa0f2d 100644 --- a/arch/m68knommu/platform/527x/gpio.c +++ b/arch/m68knommu/platform/527x/gpio.c | |||
@@ -34,9 +34,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
34 | .base = 1, | 34 | .base = 1, |
35 | .ngpio = 7, | 35 | .ngpio = 7, |
36 | }, | 36 | }, |
37 | .pddr = MCFEPORT_EPDDR, | 37 | .pddr = (void __iomem *) MCFEPORT_EPDDR, |
38 | .podr = MCFEPORT_EPDR, | 38 | .podr = (void __iomem *) MCFEPORT_EPDR, |
39 | .ppdr = MCFEPORT_EPPDR, | 39 | .ppdr = (void __iomem *) MCFEPORT_EPPDR, |
40 | }, | 40 | }, |
41 | { | 41 | { |
42 | .gpio_chip = { | 42 | .gpio_chip = { |
@@ -50,11 +50,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
50 | .base = 13, | 50 | .base = 13, |
51 | .ngpio = 3, | 51 | .ngpio = 3, |
52 | }, | 52 | }, |
53 | .pddr = MCFGPIO_PDDR_ADDR, | 53 | .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, |
54 | .podr = MCFGPIO_PODR_ADDR, | 54 | .podr = (void __iomem *) MCFGPIO_PODR_ADDR, |
55 | .ppdr = MCFGPIO_PPDSDR_ADDR, | 55 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, |
56 | .setr = MCFGPIO_PPDSDR_ADDR, | 56 | .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, |
57 | .clrr = MCFGPIO_PCLRR_ADDR, | 57 | .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, |
58 | }, | 58 | }, |
59 | { | 59 | { |
60 | .gpio_chip = { | 60 | .gpio_chip = { |
@@ -68,11 +68,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
68 | .base = 16, | 68 | .base = 16, |
69 | .ngpio = 8, | 69 | .ngpio = 8, |
70 | }, | 70 | }, |
71 | .pddr = MCFGPIO_PDDR_DATAH, | 71 | .pddr = (void __iomem *) MCFGPIO_PDDR_DATAH, |
72 | .podr = MCFGPIO_PODR_DATAH, | 72 | .podr = (void __iomem *) MCFGPIO_PODR_DATAH, |
73 | .ppdr = MCFGPIO_PPDSDR_DATAH, | 73 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, |
74 | .setr = MCFGPIO_PPDSDR_DATAH, | 74 | .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAH, |
75 | .clrr = MCFGPIO_PCLRR_DATAH, | 75 | .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAH, |
76 | }, | 76 | }, |
77 | { | 77 | { |
78 | .gpio_chip = { | 78 | .gpio_chip = { |
@@ -86,11 +86,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
86 | .base = 24, | 86 | .base = 24, |
87 | .ngpio = 8, | 87 | .ngpio = 8, |
88 | }, | 88 | }, |
89 | .pddr = MCFGPIO_PDDR_DATAL, | 89 | .pddr = (void __iomem *) MCFGPIO_PDDR_DATAL, |
90 | .podr = MCFGPIO_PODR_DATAL, | 90 | .podr = (void __iomem *) MCFGPIO_PODR_DATAL, |
91 | .ppdr = MCFGPIO_PPDSDR_DATAL, | 91 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, |
92 | .setr = MCFGPIO_PPDSDR_DATAL, | 92 | .setr = (void __iomem *) MCFGPIO_PPDSDR_DATAL, |
93 | .clrr = MCFGPIO_PCLRR_DATAL, | 93 | .clrr = (void __iomem *) MCFGPIO_PCLRR_DATAL, |
94 | }, | 94 | }, |
95 | { | 95 | { |
96 | .gpio_chip = { | 96 | .gpio_chip = { |
@@ -104,11 +104,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
104 | .base = 32, | 104 | .base = 32, |
105 | .ngpio = 8, | 105 | .ngpio = 8, |
106 | }, | 106 | }, |
107 | .pddr = MCFGPIO_PDDR_BUSCTL, | 107 | .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, |
108 | .podr = MCFGPIO_PODR_BUSCTL, | 108 | .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, |
109 | .ppdr = MCFGPIO_PPDSDR_BUSCTL, | 109 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
110 | .setr = MCFGPIO_PPDSDR_BUSCTL, | 110 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
111 | .clrr = MCFGPIO_PCLRR_BUSCTL, | 111 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, |
112 | }, | 112 | }, |
113 | { | 113 | { |
114 | .gpio_chip = { | 114 | .gpio_chip = { |
@@ -122,11 +122,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
122 | .base = 40, | 122 | .base = 40, |
123 | .ngpio = 4, | 123 | .ngpio = 4, |
124 | }, | 124 | }, |
125 | .pddr = MCFGPIO_PDDR_BS, | 125 | .pddr = (void __iomem *) MCFGPIO_PDDR_BS, |
126 | .podr = MCFGPIO_PODR_BS, | 126 | .podr = (void __iomem *) MCFGPIO_PODR_BS, |
127 | .ppdr = MCFGPIO_PPDSDR_BS, | 127 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, |
128 | .setr = MCFGPIO_PPDSDR_BS, | 128 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, |
129 | .clrr = MCFGPIO_PCLRR_BS, | 129 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, |
130 | }, | 130 | }, |
131 | { | 131 | { |
132 | .gpio_chip = { | 132 | .gpio_chip = { |
@@ -140,11 +140,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
140 | .base = 49, | 140 | .base = 49, |
141 | .ngpio = 7, | 141 | .ngpio = 7, |
142 | }, | 142 | }, |
143 | .pddr = MCFGPIO_PDDR_CS, | 143 | .pddr = (void __iomem *) MCFGPIO_PDDR_CS, |
144 | .podr = MCFGPIO_PODR_CS, | 144 | .podr = (void __iomem *) MCFGPIO_PODR_CS, |
145 | .ppdr = MCFGPIO_PPDSDR_CS, | 145 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
146 | .setr = MCFGPIO_PPDSDR_CS, | 146 | .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
147 | .clrr = MCFGPIO_PCLRR_CS, | 147 | .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, |
148 | }, | 148 | }, |
149 | { | 149 | { |
150 | .gpio_chip = { | 150 | .gpio_chip = { |
@@ -158,11 +158,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
158 | .base = 56, | 158 | .base = 56, |
159 | .ngpio = 6, | 159 | .ngpio = 6, |
160 | }, | 160 | }, |
161 | .pddr = MCFGPIO_PDDR_SDRAM, | 161 | .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, |
162 | .podr = MCFGPIO_PODR_SDRAM, | 162 | .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, |
163 | .ppdr = MCFGPIO_PPDSDR_SDRAM, | 163 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, |
164 | .setr = MCFGPIO_PPDSDR_SDRAM, | 164 | .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, |
165 | .clrr = MCFGPIO_PCLRR_SDRAM, | 165 | .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, |
166 | }, | 166 | }, |
167 | { | 167 | { |
168 | .gpio_chip = { | 168 | .gpio_chip = { |
@@ -176,11 +176,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
176 | .base = 64, | 176 | .base = 64, |
177 | .ngpio = 4, | 177 | .ngpio = 4, |
178 | }, | 178 | }, |
179 | .pddr = MCFGPIO_PDDR_FECI2C, | 179 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, |
180 | .podr = MCFGPIO_PODR_FECI2C, | 180 | .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, |
181 | .ppdr = MCFGPIO_PPDSDR_FECI2C, | 181 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
182 | .setr = MCFGPIO_PPDSDR_FECI2C, | 182 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
183 | .clrr = MCFGPIO_PCLRR_FECI2C, | 183 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, |
184 | }, | 184 | }, |
185 | { | 185 | { |
186 | .gpio_chip = { | 186 | .gpio_chip = { |
@@ -194,11 +194,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
194 | .base = 72, | 194 | .base = 72, |
195 | .ngpio = 2, | 195 | .ngpio = 2, |
196 | }, | 196 | }, |
197 | .pddr = MCFGPIO_PDDR_UARTH, | 197 | .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, |
198 | .podr = MCFGPIO_PODR_UARTH, | 198 | .podr = (void __iomem *) MCFGPIO_PODR_UARTH, |
199 | .ppdr = MCFGPIO_PPDSDR_UARTH, | 199 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, |
200 | .setr = MCFGPIO_PPDSDR_UARTH, | 200 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, |
201 | .clrr = MCFGPIO_PCLRR_UARTH, | 201 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, |
202 | }, | 202 | }, |
203 | { | 203 | { |
204 | .gpio_chip = { | 204 | .gpio_chip = { |
@@ -212,11 +212,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
212 | .base = 80, | 212 | .base = 80, |
213 | .ngpio = 8, | 213 | .ngpio = 8, |
214 | }, | 214 | }, |
215 | .pddr = MCFGPIO_PDDR_UARTL, | 215 | .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, |
216 | .podr = MCFGPIO_PODR_UARTL, | 216 | .podr = (void __iomem *) MCFGPIO_PODR_UARTL, |
217 | .ppdr = MCFGPIO_PPDSDR_UARTL, | 217 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, |
218 | .setr = MCFGPIO_PPDSDR_UARTL, | 218 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, |
219 | .clrr = MCFGPIO_PCLRR_UARTL, | 219 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, |
220 | }, | 220 | }, |
221 | { | 221 | { |
222 | .gpio_chip = { | 222 | .gpio_chip = { |
@@ -230,11 +230,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
230 | .base = 88, | 230 | .base = 88, |
231 | .ngpio = 5, | 231 | .ngpio = 5, |
232 | }, | 232 | }, |
233 | .pddr = MCFGPIO_PDDR_QSPI, | 233 | .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, |
234 | .podr = MCFGPIO_PODR_QSPI, | 234 | .podr = (void __iomem *) MCFGPIO_PODR_QSPI, |
235 | .ppdr = MCFGPIO_PPDSDR_QSPI, | 235 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
236 | .setr = MCFGPIO_PPDSDR_QSPI, | 236 | .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
237 | .clrr = MCFGPIO_PCLRR_QSPI, | 237 | .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, |
238 | }, | 238 | }, |
239 | { | 239 | { |
240 | .gpio_chip = { | 240 | .gpio_chip = { |
@@ -248,11 +248,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
248 | .base = 96, | 248 | .base = 96, |
249 | .ngpio = 8, | 249 | .ngpio = 8, |
250 | }, | 250 | }, |
251 | .pddr = MCFGPIO_PDDR_TIMER, | 251 | .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, |
252 | .podr = MCFGPIO_PODR_TIMER, | 252 | .podr = (void __iomem *) MCFGPIO_PODR_TIMER, |
253 | .ppdr = MCFGPIO_PPDSDR_TIMER, | 253 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
254 | .setr = MCFGPIO_PPDSDR_TIMER, | 254 | .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
255 | .clrr = MCFGPIO_PCLRR_TIMER, | 255 | .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, |
256 | }, | 256 | }, |
257 | #elif defined(CONFIG_M5275) | 257 | #elif defined(CONFIG_M5275) |
258 | { | 258 | { |
@@ -267,9 +267,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
267 | .base = 1, | 267 | .base = 1, |
268 | .ngpio = 7, | 268 | .ngpio = 7, |
269 | }, | 269 | }, |
270 | .pddr = MCFEPORT_EPDDR, | 270 | .pddr = (void __iomem *) MCFEPORT_EPDDR, |
271 | .podr = MCFEPORT_EPDR, | 271 | .podr = (void __iomem *) MCFEPORT_EPDR, |
272 | .ppdr = MCFEPORT_EPPDR, | 272 | .ppdr = (void __iomem *) MCFEPORT_EPPDR, |
273 | }, | 273 | }, |
274 | { | 274 | { |
275 | .gpio_chip = { | 275 | .gpio_chip = { |
@@ -283,11 +283,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
283 | .base = 8, | 283 | .base = 8, |
284 | .ngpio = 8, | 284 | .ngpio = 8, |
285 | }, | 285 | }, |
286 | .pddr = MCFGPIO_PDDR_BUSCTL, | 286 | .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, |
287 | .podr = MCFGPIO_PODR_BUSCTL, | 287 | .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, |
288 | .ppdr = MCFGPIO_PPDSDR_BUSCTL, | 288 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
289 | .setr = MCFGPIO_PPDSDR_BUSCTL, | 289 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
290 | .clrr = MCFGPIO_PCLRR_BUSCTL, | 290 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, |
291 | }, | 291 | }, |
292 | { | 292 | { |
293 | .gpio_chip = { | 293 | .gpio_chip = { |
@@ -301,11 +301,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
301 | .base = 21, | 301 | .base = 21, |
302 | .ngpio = 3, | 302 | .ngpio = 3, |
303 | }, | 303 | }, |
304 | .pddr = MCFGPIO_PDDR_ADDR, | 304 | .pddr = (void __iomem *) MCFGPIO_PDDR_ADDR, |
305 | .podr = MCFGPIO_PODR_ADDR, | 305 | .podr = (void __iomem *) MCFGPIO_PODR_ADDR, |
306 | .ppdr = MCFGPIO_PPDSDR_ADDR, | 306 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, |
307 | .setr = MCFGPIO_PPDSDR_ADDR, | 307 | .setr = (void __iomem *) MCFGPIO_PPDSDR_ADDR, |
308 | .clrr = MCFGPIO_PCLRR_ADDR, | 308 | .clrr = (void __iomem *) MCFGPIO_PCLRR_ADDR, |
309 | }, | 309 | }, |
310 | { | 310 | { |
311 | .gpio_chip = { | 311 | .gpio_chip = { |
@@ -319,11 +319,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
319 | .base = 25, | 319 | .base = 25, |
320 | .ngpio = 7, | 320 | .ngpio = 7, |
321 | }, | 321 | }, |
322 | .pddr = MCFGPIO_PDDR_CS, | 322 | .pddr = (void __iomem *) MCFGPIO_PDDR_CS, |
323 | .podr = MCFGPIO_PODR_CS, | 323 | .podr = (void __iomem *) MCFGPIO_PODR_CS, |
324 | .ppdr = MCFGPIO_PPDSDR_CS, | 324 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
325 | .setr = MCFGPIO_PPDSDR_CS, | 325 | .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
326 | .clrr = MCFGPIO_PCLRR_CS, | 326 | .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, |
327 | }, | 327 | }, |
328 | { | 328 | { |
329 | .gpio_chip = { | 329 | .gpio_chip = { |
@@ -337,11 +337,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
337 | .base = 32, | 337 | .base = 32, |
338 | .ngpio = 8, | 338 | .ngpio = 8, |
339 | }, | 339 | }, |
340 | .pddr = MCFGPIO_PDDR_FEC0H, | 340 | .pddr = (void __iomem *) MCFGPIO_PDDR_FEC0H, |
341 | .podr = MCFGPIO_PODR_FEC0H, | 341 | .podr = (void __iomem *) MCFGPIO_PODR_FEC0H, |
342 | .ppdr = MCFGPIO_PPDSDR_FEC0H, | 342 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC0H, |
343 | .setr = MCFGPIO_PPDSDR_FEC0H, | 343 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC0H, |
344 | .clrr = MCFGPIO_PCLRR_FEC0H, | 344 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC0H, |
345 | }, | 345 | }, |
346 | { | 346 | { |
347 | .gpio_chip = { | 347 | .gpio_chip = { |
@@ -355,11 +355,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
355 | .base = 40, | 355 | .base = 40, |
356 | .ngpio = 8, | 356 | .ngpio = 8, |
357 | }, | 357 | }, |
358 | .pddr = MCFGPIO_PDDR_FEC0L, | 358 | .pddr = (void __iomem *) MCFGPIO_PDDR_FEC0L, |
359 | .podr = MCFGPIO_PODR_FEC0L, | 359 | .podr = (void __iomem *) MCFGPIO_PODR_FEC0L, |
360 | .ppdr = MCFGPIO_PPDSDR_FEC0L, | 360 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC0L, |
361 | .setr = MCFGPIO_PPDSDR_FEC0L, | 361 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC0L, |
362 | .clrr = MCFGPIO_PCLRR_FEC0L, | 362 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC0L, |
363 | }, | 363 | }, |
364 | { | 364 | { |
365 | .gpio_chip = { | 365 | .gpio_chip = { |
@@ -373,11 +373,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
373 | .base = 48, | 373 | .base = 48, |
374 | .ngpio = 6, | 374 | .ngpio = 6, |
375 | }, | 375 | }, |
376 | .pddr = MCFGPIO_PDDR_FECI2C, | 376 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, |
377 | .podr = MCFGPIO_PODR_FECI2C, | 377 | .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, |
378 | .ppdr = MCFGPIO_PPDSDR_FECI2C, | 378 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
379 | .setr = MCFGPIO_PPDSDR_FECI2C, | 379 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
380 | .clrr = MCFGPIO_PCLRR_FECI2C, | 380 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, |
381 | }, | 381 | }, |
382 | { | 382 | { |
383 | .gpio_chip = { | 383 | .gpio_chip = { |
@@ -391,11 +391,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
391 | .base = 56, | 391 | .base = 56, |
392 | .ngpio = 7, | 392 | .ngpio = 7, |
393 | }, | 393 | }, |
394 | .pddr = MCFGPIO_PDDR_QSPI, | 394 | .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, |
395 | .podr = MCFGPIO_PODR_QSPI, | 395 | .podr = (void __iomem *) MCFGPIO_PODR_QSPI, |
396 | .ppdr = MCFGPIO_PPDSDR_QSPI, | 396 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
397 | .setr = MCFGPIO_PPDSDR_QSPI, | 397 | .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
398 | .clrr = MCFGPIO_PCLRR_QSPI, | 398 | .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, |
399 | }, | 399 | }, |
400 | { | 400 | { |
401 | .gpio_chip = { | 401 | .gpio_chip = { |
@@ -409,11 +409,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
409 | .base = 64, | 409 | .base = 64, |
410 | .ngpio = 8, | 410 | .ngpio = 8, |
411 | }, | 411 | }, |
412 | .pddr = MCFGPIO_PDDR_SDRAM, | 412 | .pddr = (void __iomem *) MCFGPIO_PDDR_SDRAM, |
413 | .podr = MCFGPIO_PODR_SDRAM, | 413 | .podr = (void __iomem *) MCFGPIO_PODR_SDRAM, |
414 | .ppdr = MCFGPIO_PPDSDR_SDRAM, | 414 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, |
415 | .setr = MCFGPIO_PPDSDR_SDRAM, | 415 | .setr = (void __iomem *) MCFGPIO_PPDSDR_SDRAM, |
416 | .clrr = MCFGPIO_PCLRR_SDRAM, | 416 | .clrr = (void __iomem *) MCFGPIO_PCLRR_SDRAM, |
417 | }, | 417 | }, |
418 | { | 418 | { |
419 | .gpio_chip = { | 419 | .gpio_chip = { |
@@ -427,11 +427,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
427 | .base = 72, | 427 | .base = 72, |
428 | .ngpio = 4, | 428 | .ngpio = 4, |
429 | }, | 429 | }, |
430 | .pddr = MCFGPIO_PDDR_TIMERH, | 430 | .pddr = (void __iomem *) MCFGPIO_PDDR_TIMERH, |
431 | .podr = MCFGPIO_PODR_TIMERH, | 431 | .podr = (void __iomem *) MCFGPIO_PODR_TIMERH, |
432 | .ppdr = MCFGPIO_PPDSDR_TIMERH, | 432 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMERH, |
433 | .setr = MCFGPIO_PPDSDR_TIMERH, | 433 | .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMERH, |
434 | .clrr = MCFGPIO_PCLRR_TIMERH, | 434 | .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMERH, |
435 | }, | 435 | }, |
436 | { | 436 | { |
437 | .gpio_chip = { | 437 | .gpio_chip = { |
@@ -445,11 +445,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
445 | .base = 80, | 445 | .base = 80, |
446 | .ngpio = 4, | 446 | .ngpio = 4, |
447 | }, | 447 | }, |
448 | .pddr = MCFGPIO_PDDR_TIMERL, | 448 | .pddr = (void __iomem *) MCFGPIO_PDDR_TIMERL, |
449 | .podr = MCFGPIO_PODR_TIMERL, | 449 | .podr = (void __iomem *) MCFGPIO_PODR_TIMERL, |
450 | .ppdr = MCFGPIO_PPDSDR_TIMERL, | 450 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMERL, |
451 | .setr = MCFGPIO_PPDSDR_TIMERL, | 451 | .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMERL, |
452 | .clrr = MCFGPIO_PCLRR_TIMERL, | 452 | .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMERL, |
453 | }, | 453 | }, |
454 | { | 454 | { |
455 | .gpio_chip = { | 455 | .gpio_chip = { |
@@ -463,11 +463,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
463 | .base = 88, | 463 | .base = 88, |
464 | .ngpio = 8, | 464 | .ngpio = 8, |
465 | }, | 465 | }, |
466 | .pddr = MCFGPIO_PDDR_UARTL, | 466 | .pddr = (void __iomem *) MCFGPIO_PDDR_UARTL, |
467 | .podr = MCFGPIO_PODR_UARTL, | 467 | .podr = (void __iomem *) MCFGPIO_PODR_UARTL, |
468 | .ppdr = MCFGPIO_PPDSDR_UARTL, | 468 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, |
469 | .setr = MCFGPIO_PPDSDR_UARTL, | 469 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTL, |
470 | .clrr = MCFGPIO_PCLRR_UARTL, | 470 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTL, |
471 | }, | 471 | }, |
472 | { | 472 | { |
473 | .gpio_chip = { | 473 | .gpio_chip = { |
@@ -481,11 +481,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
481 | .base = 96, | 481 | .base = 96, |
482 | .ngpio = 8, | 482 | .ngpio = 8, |
483 | }, | 483 | }, |
484 | .pddr = MCFGPIO_PDDR_FEC1H, | 484 | .pddr = (void __iomem *) MCFGPIO_PDDR_FEC1H, |
485 | .podr = MCFGPIO_PODR_FEC1H, | 485 | .podr = (void __iomem *) MCFGPIO_PODR_FEC1H, |
486 | .ppdr = MCFGPIO_PPDSDR_FEC1H, | 486 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC1H, |
487 | .setr = MCFGPIO_PPDSDR_FEC1H, | 487 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC1H, |
488 | .clrr = MCFGPIO_PCLRR_FEC1H, | 488 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC1H, |
489 | }, | 489 | }, |
490 | { | 490 | { |
491 | .gpio_chip = { | 491 | .gpio_chip = { |
@@ -499,11 +499,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
499 | .base = 104, | 499 | .base = 104, |
500 | .ngpio = 8, | 500 | .ngpio = 8, |
501 | }, | 501 | }, |
502 | .pddr = MCFGPIO_PDDR_FEC1L, | 502 | .pddr = (void __iomem *) MCFGPIO_PDDR_FEC1L, |
503 | .podr = MCFGPIO_PODR_FEC1L, | 503 | .podr = (void __iomem *) MCFGPIO_PODR_FEC1L, |
504 | .ppdr = MCFGPIO_PPDSDR_FEC1L, | 504 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FEC1L, |
505 | .setr = MCFGPIO_PPDSDR_FEC1L, | 505 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FEC1L, |
506 | .clrr = MCFGPIO_PCLRR_FEC1L, | 506 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FEC1L, |
507 | }, | 507 | }, |
508 | { | 508 | { |
509 | .gpio_chip = { | 509 | .gpio_chip = { |
@@ -517,11 +517,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
517 | .base = 114, | 517 | .base = 114, |
518 | .ngpio = 2, | 518 | .ngpio = 2, |
519 | }, | 519 | }, |
520 | .pddr = MCFGPIO_PDDR_BS, | 520 | .pddr = (void __iomem *) MCFGPIO_PDDR_BS, |
521 | .podr = MCFGPIO_PODR_BS, | 521 | .podr = (void __iomem *) MCFGPIO_PODR_BS, |
522 | .ppdr = MCFGPIO_PPDSDR_BS, | 522 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BS, |
523 | .setr = MCFGPIO_PPDSDR_BS, | 523 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BS, |
524 | .clrr = MCFGPIO_PCLRR_BS, | 524 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BS, |
525 | }, | 525 | }, |
526 | { | 526 | { |
527 | .gpio_chip = { | 527 | .gpio_chip = { |
@@ -535,11 +535,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
535 | .base = 121, | 535 | .base = 121, |
536 | .ngpio = 7, | 536 | .ngpio = 7, |
537 | }, | 537 | }, |
538 | .pddr = MCFGPIO_PDDR_IRQ, | 538 | .pddr = (void __iomem *) MCFGPIO_PDDR_IRQ, |
539 | .podr = MCFGPIO_PODR_IRQ, | 539 | .podr = (void __iomem *) MCFGPIO_PODR_IRQ, |
540 | .ppdr = MCFGPIO_PPDSDR_IRQ, | 540 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_IRQ, |
541 | .setr = MCFGPIO_PPDSDR_IRQ, | 541 | .setr = (void __iomem *) MCFGPIO_PPDSDR_IRQ, |
542 | .clrr = MCFGPIO_PCLRR_IRQ, | 542 | .clrr = (void __iomem *) MCFGPIO_PCLRR_IRQ, |
543 | }, | 543 | }, |
544 | { | 544 | { |
545 | .gpio_chip = { | 545 | .gpio_chip = { |
@@ -553,11 +553,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
553 | .base = 128, | 553 | .base = 128, |
554 | .ngpio = 1, | 554 | .ngpio = 1, |
555 | }, | 555 | }, |
556 | .pddr = MCFGPIO_PDDR_USBH, | 556 | .pddr = (void __iomem *) MCFGPIO_PDDR_USBH, |
557 | .podr = MCFGPIO_PODR_USBH, | 557 | .podr = (void __iomem *) MCFGPIO_PODR_USBH, |
558 | .ppdr = MCFGPIO_PPDSDR_USBH, | 558 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_USBH, |
559 | .setr = MCFGPIO_PPDSDR_USBH, | 559 | .setr = (void __iomem *) MCFGPIO_PPDSDR_USBH, |
560 | .clrr = MCFGPIO_PCLRR_USBH, | 560 | .clrr = (void __iomem *) MCFGPIO_PCLRR_USBH, |
561 | }, | 561 | }, |
562 | { | 562 | { |
563 | .gpio_chip = { | 563 | .gpio_chip = { |
@@ -571,11 +571,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
571 | .base = 136, | 571 | .base = 136, |
572 | .ngpio = 8, | 572 | .ngpio = 8, |
573 | }, | 573 | }, |
574 | .pddr = MCFGPIO_PDDR_USBL, | 574 | .pddr = (void __iomem *) MCFGPIO_PDDR_USBL, |
575 | .podr = MCFGPIO_PODR_USBL, | 575 | .podr = (void __iomem *) MCFGPIO_PODR_USBL, |
576 | .ppdr = MCFGPIO_PPDSDR_USBL, | 576 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_USBL, |
577 | .setr = MCFGPIO_PPDSDR_USBL, | 577 | .setr = (void __iomem *) MCFGPIO_PPDSDR_USBL, |
578 | .clrr = MCFGPIO_PCLRR_USBL, | 578 | .clrr = (void __iomem *) MCFGPIO_PCLRR_USBL, |
579 | }, | 579 | }, |
580 | { | 580 | { |
581 | .gpio_chip = { | 581 | .gpio_chip = { |
@@ -589,11 +589,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
589 | .base = 144, | 589 | .base = 144, |
590 | .ngpio = 4, | 590 | .ngpio = 4, |
591 | }, | 591 | }, |
592 | .pddr = MCFGPIO_PDDR_UARTH, | 592 | .pddr = (void __iomem *) MCFGPIO_PDDR_UARTH, |
593 | .podr = MCFGPIO_PODR_UARTH, | 593 | .podr = (void __iomem *) MCFGPIO_PODR_UARTH, |
594 | .ppdr = MCFGPIO_PPDSDR_UARTH, | 594 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, |
595 | .setr = MCFGPIO_PPDSDR_UARTH, | 595 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UARTH, |
596 | .clrr = MCFGPIO_PCLRR_UARTH, | 596 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UARTH, |
597 | }, | 597 | }, |
598 | #endif | 598 | #endif |
599 | }; | 599 | }; |
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c index 76b743343bfa..ac39fc661219 100644 --- a/arch/m68knommu/platform/528x/config.c +++ b/arch/m68knommu/platform/528x/config.c | |||
@@ -29,15 +29,15 @@ | |||
29 | 29 | ||
30 | static struct mcf_platform_uart m528x_uart_platform[] = { | 30 | static struct mcf_platform_uart m528x_uart_platform[] = { |
31 | { | 31 | { |
32 | .mapbase = MCF_MBAR + MCFUART_BASE1, | 32 | .mapbase = MCFUART_BASE1, |
33 | .irq = MCFINT_VECBASE + MCFINT_UART0, | 33 | .irq = MCFINT_VECBASE + MCFINT_UART0, |
34 | }, | 34 | }, |
35 | { | 35 | { |
36 | .mapbase = MCF_MBAR + MCFUART_BASE2, | 36 | .mapbase = MCFUART_BASE2, |
37 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 1, | 37 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 1, |
38 | }, | 38 | }, |
39 | { | 39 | { |
40 | .mapbase = MCF_MBAR + MCFUART_BASE3, | 40 | .mapbase = MCFUART_BASE3, |
41 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 2, | 41 | .irq = MCFINT_VECBASE + MCFINT_UART0 + 2, |
42 | }, | 42 | }, |
43 | { }, | 43 | { }, |
@@ -51,8 +51,8 @@ static struct platform_device m528x_uart = { | |||
51 | 51 | ||
52 | static struct resource m528x_fec_resources[] = { | 52 | static struct resource m528x_fec_resources[] = { |
53 | { | 53 | { |
54 | .start = MCF_MBAR + 0x1000, | 54 | .start = MCFFEC_BASE, |
55 | .end = MCF_MBAR + 0x1000 + 0x7ff, | 55 | .end = MCFFEC_BASE + MCFFEC_SIZE - 1, |
56 | .flags = IORESOURCE_MEM, | 56 | .flags = IORESOURCE_MEM, |
57 | }, | 57 | }, |
58 | { | 58 | { |
@@ -227,9 +227,9 @@ static void __init m528x_uart_init_line(int line, int irq) | |||
227 | 227 | ||
228 | /* make sure PUAPAR is set for UART0 and UART1 */ | 228 | /* make sure PUAPAR is set for UART0 and UART1 */ |
229 | if (line < 2) { | 229 | if (line < 2) { |
230 | port = readb(MCF_MBAR + MCF5282_GPIO_PUAPAR); | 230 | port = readb(MCF5282_GPIO_PUAPAR); |
231 | port |= (0x03 << (line * 2)); | 231 | port |= (0x03 << (line * 2)); |
232 | writeb(port, MCF_MBAR + MCF5282_GPIO_PUAPAR); | 232 | writeb(port, MCF5282_GPIO_PUAPAR); |
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
diff --git a/arch/m68knommu/platform/528x/gpio.c b/arch/m68knommu/platform/528x/gpio.c index eedaf0adbcd7..526db665d87e 100644 --- a/arch/m68knommu/platform/528x/gpio.c +++ b/arch/m68knommu/platform/528x/gpio.c | |||
@@ -33,9 +33,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
33 | .base = 1, | 33 | .base = 1, |
34 | .ngpio = 7, | 34 | .ngpio = 7, |
35 | }, | 35 | }, |
36 | .pddr = MCFEPORT_EPDDR, | 36 | .pddr = (void __iomem *)MCFEPORT_EPDDR, |
37 | .podr = MCFEPORT_EPDR, | 37 | .podr = (void __iomem *)MCFEPORT_EPDR, |
38 | .ppdr = MCFEPORT_EPPDR, | 38 | .ppdr = (void __iomem *)MCFEPORT_EPPDR, |
39 | }, | 39 | }, |
40 | { | 40 | { |
41 | .gpio_chip = { | 41 | .gpio_chip = { |
@@ -49,9 +49,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
49 | .base = 8, | 49 | .base = 8, |
50 | .ngpio = 4, | 50 | .ngpio = 4, |
51 | }, | 51 | }, |
52 | .pddr = MCFGPTA_GPTDDR, | 52 | .pddr = (void __iomem *)MCFGPTA_GPTDDR, |
53 | .podr = MCFGPTA_GPTPORT, | 53 | .podr = (void __iomem *)MCFGPTA_GPTPORT, |
54 | .ppdr = MCFGPTB_GPTPORT, | 54 | .ppdr = (void __iomem *)MCFGPTB_GPTPORT, |
55 | }, | 55 | }, |
56 | { | 56 | { |
57 | .gpio_chip = { | 57 | .gpio_chip = { |
@@ -65,9 +65,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
65 | .base = 16, | 65 | .base = 16, |
66 | .ngpio = 4, | 66 | .ngpio = 4, |
67 | }, | 67 | }, |
68 | .pddr = MCFGPTB_GPTDDR, | 68 | .pddr = (void __iomem *)MCFGPTB_GPTDDR, |
69 | .podr = MCFGPTB_GPTPORT, | 69 | .podr = (void __iomem *)MCFGPTB_GPTPORT, |
70 | .ppdr = MCFGPTB_GPTPORT, | 70 | .ppdr = (void __iomem *)MCFGPTB_GPTPORT, |
71 | }, | 71 | }, |
72 | { | 72 | { |
73 | .gpio_chip = { | 73 | .gpio_chip = { |
@@ -81,9 +81,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
81 | .base = 24, | 81 | .base = 24, |
82 | .ngpio = 4, | 82 | .ngpio = 4, |
83 | }, | 83 | }, |
84 | .pddr = MCFQADC_DDRQA, | 84 | .pddr = (void __iomem *)MCFQADC_DDRQA, |
85 | .podr = MCFQADC_PORTQA, | 85 | .podr = (void __iomem *)MCFQADC_PORTQA, |
86 | .ppdr = MCFQADC_PORTQA, | 86 | .ppdr = (void __iomem *)MCFQADC_PORTQA, |
87 | }, | 87 | }, |
88 | { | 88 | { |
89 | .gpio_chip = { | 89 | .gpio_chip = { |
@@ -97,9 +97,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
97 | .base = 32, | 97 | .base = 32, |
98 | .ngpio = 4, | 98 | .ngpio = 4, |
99 | }, | 99 | }, |
100 | .pddr = MCFQADC_DDRQB, | 100 | .pddr = (void __iomem *)MCFQADC_DDRQB, |
101 | .podr = MCFQADC_PORTQB, | 101 | .podr = (void __iomem *)MCFQADC_PORTQB, |
102 | .ppdr = MCFQADC_PORTQB, | 102 | .ppdr = (void __iomem *)MCFQADC_PORTQB, |
103 | }, | 103 | }, |
104 | { | 104 | { |
105 | .gpio_chip = { | 105 | .gpio_chip = { |
@@ -113,11 +113,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
113 | .base = 40, | 113 | .base = 40, |
114 | .ngpio = 8, | 114 | .ngpio = 8, |
115 | }, | 115 | }, |
116 | .pddr = MCFGPIO_DDRA, | 116 | .pddr = (void __iomem *)MCFGPIO_DDRA, |
117 | .podr = MCFGPIO_PORTA, | 117 | .podr = (void __iomem *)MCFGPIO_PORTA, |
118 | .ppdr = MCFGPIO_PORTAP, | 118 | .ppdr = (void __iomem *)MCFGPIO_PORTAP, |
119 | .setr = MCFGPIO_SETA, | 119 | .setr = (void __iomem *)MCFGPIO_SETA, |
120 | .clrr = MCFGPIO_CLRA, | 120 | .clrr = (void __iomem *)MCFGPIO_CLRA, |
121 | }, | 121 | }, |
122 | { | 122 | { |
123 | .gpio_chip = { | 123 | .gpio_chip = { |
@@ -131,11 +131,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
131 | .base = 48, | 131 | .base = 48, |
132 | .ngpio = 8, | 132 | .ngpio = 8, |
133 | }, | 133 | }, |
134 | .pddr = MCFGPIO_DDRB, | 134 | .pddr = (void __iomem *)MCFGPIO_DDRB, |
135 | .podr = MCFGPIO_PORTB, | 135 | .podr = (void __iomem *)MCFGPIO_PORTB, |
136 | .ppdr = MCFGPIO_PORTBP, | 136 | .ppdr = (void __iomem *)MCFGPIO_PORTBP, |
137 | .setr = MCFGPIO_SETB, | 137 | .setr = (void __iomem *)MCFGPIO_SETB, |
138 | .clrr = MCFGPIO_CLRB, | 138 | .clrr = (void __iomem *)MCFGPIO_CLRB, |
139 | }, | 139 | }, |
140 | { | 140 | { |
141 | .gpio_chip = { | 141 | .gpio_chip = { |
@@ -149,11 +149,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
149 | .base = 56, | 149 | .base = 56, |
150 | .ngpio = 8, | 150 | .ngpio = 8, |
151 | }, | 151 | }, |
152 | .pddr = MCFGPIO_DDRC, | 152 | .pddr = (void __iomem *)MCFGPIO_DDRC, |
153 | .podr = MCFGPIO_PORTC, | 153 | .podr = (void __iomem *)MCFGPIO_PORTC, |
154 | .ppdr = MCFGPIO_PORTCP, | 154 | .ppdr = (void __iomem *)MCFGPIO_PORTCP, |
155 | .setr = MCFGPIO_SETC, | 155 | .setr = (void __iomem *)MCFGPIO_SETC, |
156 | .clrr = MCFGPIO_CLRC, | 156 | .clrr = (void __iomem *)MCFGPIO_CLRC, |
157 | }, | 157 | }, |
158 | { | 158 | { |
159 | .gpio_chip = { | 159 | .gpio_chip = { |
@@ -167,11 +167,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
167 | .base = 64, | 167 | .base = 64, |
168 | .ngpio = 8, | 168 | .ngpio = 8, |
169 | }, | 169 | }, |
170 | .pddr = MCFGPIO_DDRD, | 170 | .pddr = (void __iomem *)MCFGPIO_DDRD, |
171 | .podr = MCFGPIO_PORTD, | 171 | .podr = (void __iomem *)MCFGPIO_PORTD, |
172 | .ppdr = MCFGPIO_PORTDP, | 172 | .ppdr = (void __iomem *)MCFGPIO_PORTDP, |
173 | .setr = MCFGPIO_SETD, | 173 | .setr = (void __iomem *)MCFGPIO_SETD, |
174 | .clrr = MCFGPIO_CLRD, | 174 | .clrr = (void __iomem *)MCFGPIO_CLRD, |
175 | }, | 175 | }, |
176 | { | 176 | { |
177 | .gpio_chip = { | 177 | .gpio_chip = { |
@@ -185,11 +185,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
185 | .base = 72, | 185 | .base = 72, |
186 | .ngpio = 8, | 186 | .ngpio = 8, |
187 | }, | 187 | }, |
188 | .pddr = MCFGPIO_DDRE, | 188 | .pddr = (void __iomem *)MCFGPIO_DDRE, |
189 | .podr = MCFGPIO_PORTE, | 189 | .podr = (void __iomem *)MCFGPIO_PORTE, |
190 | .ppdr = MCFGPIO_PORTEP, | 190 | .ppdr = (void __iomem *)MCFGPIO_PORTEP, |
191 | .setr = MCFGPIO_SETE, | 191 | .setr = (void __iomem *)MCFGPIO_SETE, |
192 | .clrr = MCFGPIO_CLRE, | 192 | .clrr = (void __iomem *)MCFGPIO_CLRE, |
193 | }, | 193 | }, |
194 | { | 194 | { |
195 | .gpio_chip = { | 195 | .gpio_chip = { |
@@ -203,11 +203,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
203 | .base = 80, | 203 | .base = 80, |
204 | .ngpio = 8, | 204 | .ngpio = 8, |
205 | }, | 205 | }, |
206 | .pddr = MCFGPIO_DDRF, | 206 | .pddr = (void __iomem *)MCFGPIO_DDRF, |
207 | .podr = MCFGPIO_PORTF, | 207 | .podr = (void __iomem *)MCFGPIO_PORTF, |
208 | .ppdr = MCFGPIO_PORTFP, | 208 | .ppdr = (void __iomem *)MCFGPIO_PORTFP, |
209 | .setr = MCFGPIO_SETF, | 209 | .setr = (void __iomem *)MCFGPIO_SETF, |
210 | .clrr = MCFGPIO_CLRF, | 210 | .clrr = (void __iomem *)MCFGPIO_CLRF, |
211 | }, | 211 | }, |
212 | { | 212 | { |
213 | .gpio_chip = { | 213 | .gpio_chip = { |
@@ -221,11 +221,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
221 | .base = 88, | 221 | .base = 88, |
222 | .ngpio = 8, | 222 | .ngpio = 8, |
223 | }, | 223 | }, |
224 | .pddr = MCFGPIO_DDRG, | 224 | .pddr = (void __iomem *)MCFGPIO_DDRG, |
225 | .podr = MCFGPIO_PORTG, | 225 | .podr = (void __iomem *)MCFGPIO_PORTG, |
226 | .ppdr = MCFGPIO_PORTGP, | 226 | .ppdr = (void __iomem *)MCFGPIO_PORTGP, |
227 | .setr = MCFGPIO_SETG, | 227 | .setr = (void __iomem *)MCFGPIO_SETG, |
228 | .clrr = MCFGPIO_CLRG, | 228 | .clrr = (void __iomem *)MCFGPIO_CLRG, |
229 | }, | 229 | }, |
230 | { | 230 | { |
231 | .gpio_chip = { | 231 | .gpio_chip = { |
@@ -239,11 +239,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
239 | .base = 96, | 239 | .base = 96, |
240 | .ngpio = 8, | 240 | .ngpio = 8, |
241 | }, | 241 | }, |
242 | .pddr = MCFGPIO_DDRH, | 242 | .pddr = (void __iomem *)MCFGPIO_DDRH, |
243 | .podr = MCFGPIO_PORTH, | 243 | .podr = (void __iomem *)MCFGPIO_PORTH, |
244 | .ppdr = MCFGPIO_PORTHP, | 244 | .ppdr = (void __iomem *)MCFGPIO_PORTHP, |
245 | .setr = MCFGPIO_SETH, | 245 | .setr = (void __iomem *)MCFGPIO_SETH, |
246 | .clrr = MCFGPIO_CLRH, | 246 | .clrr = (void __iomem *)MCFGPIO_CLRH, |
247 | }, | 247 | }, |
248 | { | 248 | { |
249 | .gpio_chip = { | 249 | .gpio_chip = { |
@@ -257,11 +257,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
257 | .base = 104, | 257 | .base = 104, |
258 | .ngpio = 8, | 258 | .ngpio = 8, |
259 | }, | 259 | }, |
260 | .pddr = MCFGPIO_DDRJ, | 260 | .pddr = (void __iomem *)MCFGPIO_DDRJ, |
261 | .podr = MCFGPIO_PORTJ, | 261 | .podr = (void __iomem *)MCFGPIO_PORTJ, |
262 | .ppdr = MCFGPIO_PORTJP, | 262 | .ppdr = (void __iomem *)MCFGPIO_PORTJP, |
263 | .setr = MCFGPIO_SETJ, | 263 | .setr = (void __iomem *)MCFGPIO_SETJ, |
264 | .clrr = MCFGPIO_CLRJ, | 264 | .clrr = (void __iomem *)MCFGPIO_CLRJ, |
265 | }, | 265 | }, |
266 | { | 266 | { |
267 | .gpio_chip = { | 267 | .gpio_chip = { |
@@ -275,11 +275,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
275 | .base = 112, | 275 | .base = 112, |
276 | .ngpio = 8, | 276 | .ngpio = 8, |
277 | }, | 277 | }, |
278 | .pddr = MCFGPIO_DDRDD, | 278 | .pddr = (void __iomem *)MCFGPIO_DDRDD, |
279 | .podr = MCFGPIO_PORTDD, | 279 | .podr = (void __iomem *)MCFGPIO_PORTDD, |
280 | .ppdr = MCFGPIO_PORTDDP, | 280 | .ppdr = (void __iomem *)MCFGPIO_PORTDDP, |
281 | .setr = MCFGPIO_SETDD, | 281 | .setr = (void __iomem *)MCFGPIO_SETDD, |
282 | .clrr = MCFGPIO_CLRDD, | 282 | .clrr = (void __iomem *)MCFGPIO_CLRDD, |
283 | }, | 283 | }, |
284 | { | 284 | { |
285 | .gpio_chip = { | 285 | .gpio_chip = { |
@@ -293,11 +293,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
293 | .base = 120, | 293 | .base = 120, |
294 | .ngpio = 8, | 294 | .ngpio = 8, |
295 | }, | 295 | }, |
296 | .pddr = MCFGPIO_DDREH, | 296 | .pddr = (void __iomem *)MCFGPIO_DDREH, |
297 | .podr = MCFGPIO_PORTEH, | 297 | .podr = (void __iomem *)MCFGPIO_PORTEH, |
298 | .ppdr = MCFGPIO_PORTEHP, | 298 | .ppdr = (void __iomem *)MCFGPIO_PORTEHP, |
299 | .setr = MCFGPIO_SETEH, | 299 | .setr = (void __iomem *)MCFGPIO_SETEH, |
300 | .clrr = MCFGPIO_CLREH, | 300 | .clrr = (void __iomem *)MCFGPIO_CLREH, |
301 | }, | 301 | }, |
302 | { | 302 | { |
303 | .gpio_chip = { | 303 | .gpio_chip = { |
@@ -311,11 +311,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
311 | .base = 128, | 311 | .base = 128, |
312 | .ngpio = 8, | 312 | .ngpio = 8, |
313 | }, | 313 | }, |
314 | .pddr = MCFGPIO_DDREL, | 314 | .pddr = (void __iomem *)MCFGPIO_DDREL, |
315 | .podr = MCFGPIO_PORTEL, | 315 | .podr = (void __iomem *)MCFGPIO_PORTEL, |
316 | .ppdr = MCFGPIO_PORTELP, | 316 | .ppdr = (void __iomem *)MCFGPIO_PORTELP, |
317 | .setr = MCFGPIO_SETEL, | 317 | .setr = (void __iomem *)MCFGPIO_SETEL, |
318 | .clrr = MCFGPIO_CLREL, | 318 | .clrr = (void __iomem *)MCFGPIO_CLREL, |
319 | }, | 319 | }, |
320 | { | 320 | { |
321 | .gpio_chip = { | 321 | .gpio_chip = { |
@@ -329,11 +329,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
329 | .base = 136, | 329 | .base = 136, |
330 | .ngpio = 6, | 330 | .ngpio = 6, |
331 | }, | 331 | }, |
332 | .pddr = MCFGPIO_DDRAS, | 332 | .pddr = (void __iomem *)MCFGPIO_DDRAS, |
333 | .podr = MCFGPIO_PORTAS, | 333 | .podr = (void __iomem *)MCFGPIO_PORTAS, |
334 | .ppdr = MCFGPIO_PORTASP, | 334 | .ppdr = (void __iomem *)MCFGPIO_PORTASP, |
335 | .setr = MCFGPIO_SETAS, | 335 | .setr = (void __iomem *)MCFGPIO_SETAS, |
336 | .clrr = MCFGPIO_CLRAS, | 336 | .clrr = (void __iomem *)MCFGPIO_CLRAS, |
337 | }, | 337 | }, |
338 | { | 338 | { |
339 | .gpio_chip = { | 339 | .gpio_chip = { |
@@ -347,11 +347,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
347 | .base = 144, | 347 | .base = 144, |
348 | .ngpio = 7, | 348 | .ngpio = 7, |
349 | }, | 349 | }, |
350 | .pddr = MCFGPIO_DDRQS, | 350 | .pddr = (void __iomem *)MCFGPIO_DDRQS, |
351 | .podr = MCFGPIO_PORTQS, | 351 | .podr = (void __iomem *)MCFGPIO_PORTQS, |
352 | .ppdr = MCFGPIO_PORTQSP, | 352 | .ppdr = (void __iomem *)MCFGPIO_PORTQSP, |
353 | .setr = MCFGPIO_SETQS, | 353 | .setr = (void __iomem *)MCFGPIO_SETQS, |
354 | .clrr = MCFGPIO_CLRQS, | 354 | .clrr = (void __iomem *)MCFGPIO_CLRQS, |
355 | }, | 355 | }, |
356 | { | 356 | { |
357 | .gpio_chip = { | 357 | .gpio_chip = { |
@@ -365,11 +365,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
365 | .base = 152, | 365 | .base = 152, |
366 | .ngpio = 6, | 366 | .ngpio = 6, |
367 | }, | 367 | }, |
368 | .pddr = MCFGPIO_DDRSD, | 368 | .pddr = (void __iomem *)MCFGPIO_DDRSD, |
369 | .podr = MCFGPIO_PORTSD, | 369 | .podr = (void __iomem *)MCFGPIO_PORTSD, |
370 | .ppdr = MCFGPIO_PORTSDP, | 370 | .ppdr = (void __iomem *)MCFGPIO_PORTSDP, |
371 | .setr = MCFGPIO_SETSD, | 371 | .setr = (void __iomem *)MCFGPIO_SETSD, |
372 | .clrr = MCFGPIO_CLRSD, | 372 | .clrr = (void __iomem *)MCFGPIO_CLRSD, |
373 | }, | 373 | }, |
374 | { | 374 | { |
375 | .gpio_chip = { | 375 | .gpio_chip = { |
@@ -383,11 +383,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
383 | .base = 160, | 383 | .base = 160, |
384 | .ngpio = 4, | 384 | .ngpio = 4, |
385 | }, | 385 | }, |
386 | .pddr = MCFGPIO_DDRTC, | 386 | .pddr = (void __iomem *)MCFGPIO_DDRTC, |
387 | .podr = MCFGPIO_PORTTC, | 387 | .podr = (void __iomem *)MCFGPIO_PORTTC, |
388 | .ppdr = MCFGPIO_PORTTCP, | 388 | .ppdr = (void __iomem *)MCFGPIO_PORTTCP, |
389 | .setr = MCFGPIO_SETTC, | 389 | .setr = (void __iomem *)MCFGPIO_SETTC, |
390 | .clrr = MCFGPIO_CLRTC, | 390 | .clrr = (void __iomem *)MCFGPIO_CLRTC, |
391 | }, | 391 | }, |
392 | { | 392 | { |
393 | .gpio_chip = { | 393 | .gpio_chip = { |
@@ -401,11 +401,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
401 | .base = 168, | 401 | .base = 168, |
402 | .ngpio = 4, | 402 | .ngpio = 4, |
403 | }, | 403 | }, |
404 | .pddr = MCFGPIO_DDRTD, | 404 | .pddr = (void __iomem *)MCFGPIO_DDRTD, |
405 | .podr = MCFGPIO_PORTTD, | 405 | .podr = (void __iomem *)MCFGPIO_PORTTD, |
406 | .ppdr = MCFGPIO_PORTTDP, | 406 | .ppdr = (void __iomem *)MCFGPIO_PORTTDP, |
407 | .setr = MCFGPIO_SETTD, | 407 | .setr = (void __iomem *)MCFGPIO_SETTD, |
408 | .clrr = MCFGPIO_CLRTD, | 408 | .clrr = (void __iomem *)MCFGPIO_CLRTD, |
409 | }, | 409 | }, |
410 | { | 410 | { |
411 | .gpio_chip = { | 411 | .gpio_chip = { |
@@ -419,11 +419,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
419 | .base = 176, | 419 | .base = 176, |
420 | .ngpio = 4, | 420 | .ngpio = 4, |
421 | }, | 421 | }, |
422 | .pddr = MCFGPIO_DDRUA, | 422 | .pddr = (void __iomem *)MCFGPIO_DDRUA, |
423 | .podr = MCFGPIO_PORTUA, | 423 | .podr = (void __iomem *)MCFGPIO_PORTUA, |
424 | .ppdr = MCFGPIO_PORTUAP, | 424 | .ppdr = (void __iomem *)MCFGPIO_PORTUAP, |
425 | .setr = MCFGPIO_SETUA, | 425 | .setr = (void __iomem *)MCFGPIO_SETUA, |
426 | .clrr = MCFGPIO_CLRUA, | 426 | .clrr = (void __iomem *)MCFGPIO_CLRUA, |
427 | }, | 427 | }, |
428 | }; | 428 | }; |
429 | 429 | ||
diff --git a/arch/m68knommu/platform/5307/gpio.c b/arch/m68knommu/platform/5307/gpio.c index 8da5880e4066..5850612b4a38 100644 --- a/arch/m68knommu/platform/5307/gpio.c +++ b/arch/m68knommu/platform/5307/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 16, | 33 | .ngpio = 16, |
34 | }, | 34 | }, |
35 | .pddr = MCFSIM_PADDR, | 35 | .pddr = (void __iomem *) MCFSIM_PADDR, |
36 | .podr = MCFSIM_PADAT, | 36 | .podr = (void __iomem *) MCFSIM_PADAT, |
37 | .ppdr = MCFSIM_PADAT, | 37 | .ppdr = (void __iomem *) MCFSIM_PADAT, |
38 | }, | 38 | }, |
39 | }; | 39 | }; |
40 | 40 | ||
diff --git a/arch/m68knommu/platform/532x/gpio.c b/arch/m68knommu/platform/532x/gpio.c index 184b77382c3d..212a85deac90 100644 --- a/arch/m68knommu/platform/532x/gpio.c +++ b/arch/m68knommu/platform/532x/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 8, | 33 | .ngpio = 8, |
34 | }, | 34 | }, |
35 | .pddr = MCFEPORT_EPDDR, | 35 | .pddr = (void __iomem *) MCFEPORT_EPDDR, |
36 | .podr = MCFEPORT_EPDR, | 36 | .podr = (void __iomem *) MCFEPORT_EPDR, |
37 | .ppdr = MCFEPORT_EPPDR, | 37 | .ppdr = (void __iomem *) MCFEPORT_EPPDR, |
38 | }, | 38 | }, |
39 | { | 39 | { |
40 | .gpio_chip = { | 40 | .gpio_chip = { |
@@ -48,11 +48,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
48 | .base = 8, | 48 | .base = 8, |
49 | .ngpio = 8, | 49 | .ngpio = 8, |
50 | }, | 50 | }, |
51 | .pddr = MCFGPIO_PDDR_FECH, | 51 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECH, |
52 | .podr = MCFGPIO_PODR_FECH, | 52 | .podr = (void __iomem *) MCFGPIO_PODR_FECH, |
53 | .ppdr = MCFGPIO_PPDSDR_FECH, | 53 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECH, |
54 | .setr = MCFGPIO_PPDSDR_FECH, | 54 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECH, |
55 | .clrr = MCFGPIO_PCLRR_FECH, | 55 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECH, |
56 | }, | 56 | }, |
57 | { | 57 | { |
58 | .gpio_chip = { | 58 | .gpio_chip = { |
@@ -66,11 +66,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
66 | .base = 16, | 66 | .base = 16, |
67 | .ngpio = 8, | 67 | .ngpio = 8, |
68 | }, | 68 | }, |
69 | .pddr = MCFGPIO_PDDR_FECL, | 69 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECL, |
70 | .podr = MCFGPIO_PODR_FECL, | 70 | .podr = (void __iomem *) MCFGPIO_PODR_FECL, |
71 | .ppdr = MCFGPIO_PPDSDR_FECL, | 71 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECL, |
72 | .setr = MCFGPIO_PPDSDR_FECL, | 72 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECL, |
73 | .clrr = MCFGPIO_PCLRR_FECL, | 73 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECL, |
74 | }, | 74 | }, |
75 | { | 75 | { |
76 | .gpio_chip = { | 76 | .gpio_chip = { |
@@ -84,11 +84,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
84 | .base = 24, | 84 | .base = 24, |
85 | .ngpio = 5, | 85 | .ngpio = 5, |
86 | }, | 86 | }, |
87 | .pddr = MCFGPIO_PDDR_SSI, | 87 | .pddr = (void __iomem *) MCFGPIO_PDDR_SSI, |
88 | .podr = MCFGPIO_PODR_SSI, | 88 | .podr = (void __iomem *) MCFGPIO_PODR_SSI, |
89 | .ppdr = MCFGPIO_PPDSDR_SSI, | 89 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_SSI, |
90 | .setr = MCFGPIO_PPDSDR_SSI, | 90 | .setr = (void __iomem *) MCFGPIO_PPDSDR_SSI, |
91 | .clrr = MCFGPIO_PCLRR_SSI, | 91 | .clrr = (void __iomem *) MCFGPIO_PCLRR_SSI, |
92 | }, | 92 | }, |
93 | { | 93 | { |
94 | .gpio_chip = { | 94 | .gpio_chip = { |
@@ -102,11 +102,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
102 | .base = 32, | 102 | .base = 32, |
103 | .ngpio = 4, | 103 | .ngpio = 4, |
104 | }, | 104 | }, |
105 | .pddr = MCFGPIO_PDDR_BUSCTL, | 105 | .pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL, |
106 | .podr = MCFGPIO_PODR_BUSCTL, | 106 | .podr = (void __iomem *) MCFGPIO_PODR_BUSCTL, |
107 | .ppdr = MCFGPIO_PPDSDR_BUSCTL, | 107 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
108 | .setr = MCFGPIO_PPDSDR_BUSCTL, | 108 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL, |
109 | .clrr = MCFGPIO_PCLRR_BUSCTL, | 109 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL, |
110 | }, | 110 | }, |
111 | { | 111 | { |
112 | .gpio_chip = { | 112 | .gpio_chip = { |
@@ -120,11 +120,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
120 | .base = 40, | 120 | .base = 40, |
121 | .ngpio = 4, | 121 | .ngpio = 4, |
122 | }, | 122 | }, |
123 | .pddr = MCFGPIO_PDDR_BE, | 123 | .pddr = (void __iomem *) MCFGPIO_PDDR_BE, |
124 | .podr = MCFGPIO_PODR_BE, | 124 | .podr = (void __iomem *) MCFGPIO_PODR_BE, |
125 | .ppdr = MCFGPIO_PPDSDR_BE, | 125 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE, |
126 | .setr = MCFGPIO_PPDSDR_BE, | 126 | .setr = (void __iomem *) MCFGPIO_PPDSDR_BE, |
127 | .clrr = MCFGPIO_PCLRR_BE, | 127 | .clrr = (void __iomem *) MCFGPIO_PCLRR_BE, |
128 | }, | 128 | }, |
129 | { | 129 | { |
130 | .gpio_chip = { | 130 | .gpio_chip = { |
@@ -138,11 +138,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
138 | .base = 49, | 138 | .base = 49, |
139 | .ngpio = 5, | 139 | .ngpio = 5, |
140 | }, | 140 | }, |
141 | .pddr = MCFGPIO_PDDR_CS, | 141 | .pddr = (void __iomem *) MCFGPIO_PDDR_CS, |
142 | .podr = MCFGPIO_PODR_CS, | 142 | .podr = (void __iomem *) MCFGPIO_PODR_CS, |
143 | .ppdr = MCFGPIO_PPDSDR_CS, | 143 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
144 | .setr = MCFGPIO_PPDSDR_CS, | 144 | .setr = (void __iomem *) MCFGPIO_PPDSDR_CS, |
145 | .clrr = MCFGPIO_PCLRR_CS, | 145 | .clrr = (void __iomem *) MCFGPIO_PCLRR_CS, |
146 | }, | 146 | }, |
147 | { | 147 | { |
148 | .gpio_chip = { | 148 | .gpio_chip = { |
@@ -156,11 +156,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
156 | .base = 58, | 156 | .base = 58, |
157 | .ngpio = 4, | 157 | .ngpio = 4, |
158 | }, | 158 | }, |
159 | .pddr = MCFGPIO_PDDR_PWM, | 159 | .pddr = (void __iomem *) MCFGPIO_PDDR_PWM, |
160 | .podr = MCFGPIO_PODR_PWM, | 160 | .podr = (void __iomem *) MCFGPIO_PODR_PWM, |
161 | .ppdr = MCFGPIO_PPDSDR_PWM, | 161 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_PWM, |
162 | .setr = MCFGPIO_PPDSDR_PWM, | 162 | .setr = (void __iomem *) MCFGPIO_PPDSDR_PWM, |
163 | .clrr = MCFGPIO_PCLRR_PWM, | 163 | .clrr = (void __iomem *) MCFGPIO_PCLRR_PWM, |
164 | }, | 164 | }, |
165 | { | 165 | { |
166 | .gpio_chip = { | 166 | .gpio_chip = { |
@@ -174,11 +174,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
174 | .base = 64, | 174 | .base = 64, |
175 | .ngpio = 4, | 175 | .ngpio = 4, |
176 | }, | 176 | }, |
177 | .pddr = MCFGPIO_PDDR_FECI2C, | 177 | .pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C, |
178 | .podr = MCFGPIO_PODR_FECI2C, | 178 | .podr = (void __iomem *) MCFGPIO_PODR_FECI2C, |
179 | .ppdr = MCFGPIO_PPDSDR_FECI2C, | 179 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
180 | .setr = MCFGPIO_PPDSDR_FECI2C, | 180 | .setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C, |
181 | .clrr = MCFGPIO_PCLRR_FECI2C, | 181 | .clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C, |
182 | }, | 182 | }, |
183 | { | 183 | { |
184 | .gpio_chip = { | 184 | .gpio_chip = { |
@@ -192,11 +192,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
192 | .base = 72, | 192 | .base = 72, |
193 | .ngpio = 8, | 193 | .ngpio = 8, |
194 | }, | 194 | }, |
195 | .pddr = MCFGPIO_PDDR_UART, | 195 | .pddr = (void __iomem *) MCFGPIO_PDDR_UART, |
196 | .podr = MCFGPIO_PODR_UART, | 196 | .podr = (void __iomem *) MCFGPIO_PODR_UART, |
197 | .ppdr = MCFGPIO_PPDSDR_UART, | 197 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_UART, |
198 | .setr = MCFGPIO_PPDSDR_UART, | 198 | .setr = (void __iomem *) MCFGPIO_PPDSDR_UART, |
199 | .clrr = MCFGPIO_PCLRR_UART, | 199 | .clrr = (void __iomem *) MCFGPIO_PCLRR_UART, |
200 | }, | 200 | }, |
201 | { | 201 | { |
202 | .gpio_chip = { | 202 | .gpio_chip = { |
@@ -210,11 +210,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
210 | .base = 80, | 210 | .base = 80, |
211 | .ngpio = 6, | 211 | .ngpio = 6, |
212 | }, | 212 | }, |
213 | .pddr = MCFGPIO_PDDR_QSPI, | 213 | .pddr = (void __iomem *) MCFGPIO_PDDR_QSPI, |
214 | .podr = MCFGPIO_PODR_QSPI, | 214 | .podr = (void __iomem *) MCFGPIO_PODR_QSPI, |
215 | .ppdr = MCFGPIO_PPDSDR_QSPI, | 215 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
216 | .setr = MCFGPIO_PPDSDR_QSPI, | 216 | .setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI, |
217 | .clrr = MCFGPIO_PCLRR_QSPI, | 217 | .clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI, |
218 | }, | 218 | }, |
219 | { | 219 | { |
220 | .gpio_chip = { | 220 | .gpio_chip = { |
@@ -228,11 +228,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
228 | .base = 88, | 228 | .base = 88, |
229 | .ngpio = 4, | 229 | .ngpio = 4, |
230 | }, | 230 | }, |
231 | .pddr = MCFGPIO_PDDR_TIMER, | 231 | .pddr = (void __iomem *) MCFGPIO_PDDR_TIMER, |
232 | .podr = MCFGPIO_PODR_TIMER, | 232 | .podr = (void __iomem *) MCFGPIO_PODR_TIMER, |
233 | .ppdr = MCFGPIO_PPDSDR_TIMER, | 233 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
234 | .setr = MCFGPIO_PPDSDR_TIMER, | 234 | .setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER, |
235 | .clrr = MCFGPIO_PCLRR_TIMER, | 235 | .clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER, |
236 | }, | 236 | }, |
237 | { | 237 | { |
238 | .gpio_chip = { | 238 | .gpio_chip = { |
@@ -246,11 +246,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
246 | .base = 96, | 246 | .base = 96, |
247 | .ngpio = 2, | 247 | .ngpio = 2, |
248 | }, | 248 | }, |
249 | .pddr = MCFGPIO_PDDR_LCDDATAH, | 249 | .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAH, |
250 | .podr = MCFGPIO_PODR_LCDDATAH, | 250 | .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAH, |
251 | .ppdr = MCFGPIO_PPDSDR_LCDDATAH, | 251 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH, |
252 | .setr = MCFGPIO_PPDSDR_LCDDATAH, | 252 | .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH, |
253 | .clrr = MCFGPIO_PCLRR_LCDDATAH, | 253 | .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAH, |
254 | }, | 254 | }, |
255 | { | 255 | { |
256 | .gpio_chip = { | 256 | .gpio_chip = { |
@@ -264,11 +264,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
264 | .base = 104, | 264 | .base = 104, |
265 | .ngpio = 8, | 265 | .ngpio = 8, |
266 | }, | 266 | }, |
267 | .pddr = MCFGPIO_PDDR_LCDDATAM, | 267 | .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAM, |
268 | .podr = MCFGPIO_PODR_LCDDATAM, | 268 | .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAM, |
269 | .ppdr = MCFGPIO_PPDSDR_LCDDATAM, | 269 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM, |
270 | .setr = MCFGPIO_PPDSDR_LCDDATAM, | 270 | .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM, |
271 | .clrr = MCFGPIO_PCLRR_LCDDATAM, | 271 | .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAM, |
272 | }, | 272 | }, |
273 | { | 273 | { |
274 | .gpio_chip = { | 274 | .gpio_chip = { |
@@ -282,11 +282,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
282 | .base = 112, | 282 | .base = 112, |
283 | .ngpio = 8, | 283 | .ngpio = 8, |
284 | }, | 284 | }, |
285 | .pddr = MCFGPIO_PDDR_LCDDATAL, | 285 | .pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAL, |
286 | .podr = MCFGPIO_PODR_LCDDATAL, | 286 | .podr = (void __iomem *) MCFGPIO_PODR_LCDDATAL, |
287 | .ppdr = MCFGPIO_PPDSDR_LCDDATAL, | 287 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL, |
288 | .setr = MCFGPIO_PPDSDR_LCDDATAL, | 288 | .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL, |
289 | .clrr = MCFGPIO_PCLRR_LCDDATAL, | 289 | .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAL, |
290 | }, | 290 | }, |
291 | { | 291 | { |
292 | .gpio_chip = { | 292 | .gpio_chip = { |
@@ -300,11 +300,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
300 | .base = 120, | 300 | .base = 120, |
301 | .ngpio = 1, | 301 | .ngpio = 1, |
302 | }, | 302 | }, |
303 | .pddr = MCFGPIO_PDDR_LCDCTLH, | 303 | .pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLH, |
304 | .podr = MCFGPIO_PODR_LCDCTLH, | 304 | .podr = (void __iomem *) MCFGPIO_PODR_LCDCTLH, |
305 | .ppdr = MCFGPIO_PPDSDR_LCDCTLH, | 305 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH, |
306 | .setr = MCFGPIO_PPDSDR_LCDCTLH, | 306 | .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH, |
307 | .clrr = MCFGPIO_PCLRR_LCDCTLH, | 307 | .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLH, |
308 | }, | 308 | }, |
309 | { | 309 | { |
310 | .gpio_chip = { | 310 | .gpio_chip = { |
@@ -318,11 +318,11 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
318 | .base = 128, | 318 | .base = 128, |
319 | .ngpio = 8, | 319 | .ngpio = 8, |
320 | }, | 320 | }, |
321 | .pddr = MCFGPIO_PDDR_LCDCTLL, | 321 | .pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLL, |
322 | .podr = MCFGPIO_PODR_LCDCTLL, | 322 | .podr = (void __iomem *) MCFGPIO_PODR_LCDCTLL, |
323 | .ppdr = MCFGPIO_PPDSDR_LCDCTLL, | 323 | .ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL, |
324 | .setr = MCFGPIO_PPDSDR_LCDCTLL, | 324 | .setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL, |
325 | .clrr = MCFGPIO_PCLRR_LCDCTLL, | 325 | .clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLL, |
326 | }, | 326 | }, |
327 | }; | 327 | }; |
328 | 328 | ||
diff --git a/arch/m68knommu/platform/5407/gpio.c b/arch/m68knommu/platform/5407/gpio.c index 8da5880e4066..5850612b4a38 100644 --- a/arch/m68knommu/platform/5407/gpio.c +++ b/arch/m68knommu/platform/5407/gpio.c | |||
@@ -32,9 +32,9 @@ static struct mcf_gpio_chip mcf_gpio_chips[] = { | |||
32 | .set = mcf_gpio_set_value, | 32 | .set = mcf_gpio_set_value, |
33 | .ngpio = 16, | 33 | .ngpio = 16, |
34 | }, | 34 | }, |
35 | .pddr = MCFSIM_PADDR, | 35 | .pddr = (void __iomem *) MCFSIM_PADDR, |
36 | .podr = MCFSIM_PADAT, | 36 | .podr = (void __iomem *) MCFSIM_PADAT, |
37 | .ppdr = MCFSIM_PADAT, | 37 | .ppdr = (void __iomem *) MCFSIM_PADAT, |
38 | }, | 38 | }, |
39 | }; | 39 | }; |
40 | 40 | ||
diff --git a/arch/m68knommu/platform/54xx/Makefile b/arch/m68knommu/platform/54xx/Makefile index e6035e7a2d3f..6cfd090ec3cd 100644 --- a/arch/m68knommu/platform/54xx/Makefile +++ b/arch/m68knommu/platform/54xx/Makefile | |||
@@ -15,4 +15,5 @@ | |||
15 | asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 | 15 | asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1 |
16 | 16 | ||
17 | obj-y := config.o | 17 | obj-y := config.o |
18 | obj-$(CONFIG_FIREBEE) += firebee.o | ||
18 | 19 | ||
diff --git a/arch/m68knommu/platform/54xx/firebee.c b/arch/m68knommu/platform/54xx/firebee.c new file mode 100644 index 000000000000..46d50534f981 --- /dev/null +++ b/arch/m68knommu/platform/54xx/firebee.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /***************************************************************************/ | ||
2 | |||
3 | /* | ||
4 | * firebee.c -- extra startup code support for the FireBee boards | ||
5 | * | ||
6 | * Copyright (C) 2011, Greg Ungerer (gerg@snapgear.com) | ||
7 | */ | ||
8 | |||
9 | /***************************************************************************/ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/mtd/mtd.h> | ||
16 | #include <linux/mtd/partitions.h> | ||
17 | #include <linux/mtd/physmap.h> | ||
18 | #include <asm/coldfire.h> | ||
19 | #include <asm/mcfsim.h> | ||
20 | |||
21 | /***************************************************************************/ | ||
22 | |||
23 | /* | ||
24 | * 8MB of NOR flash fitted to the FireBee board. | ||
25 | */ | ||
26 | #define FLASH_PHYS_ADDR 0xe0000000 /* Physical address of flash */ | ||
27 | #define FLASH_PHYS_SIZE 0x00800000 /* Size of flash */ | ||
28 | |||
29 | #define PART_BOOT_START 0x00000000 /* Start at bottom of flash */ | ||
30 | #define PART_BOOT_SIZE 0x00040000 /* 256k in size */ | ||
31 | #define PART_IMAGE_START 0x00040000 /* Start after boot loader */ | ||
32 | #define PART_IMAGE_SIZE 0x006c0000 /* Most of flash */ | ||
33 | #define PART_FPGA_START 0x00700000 /* Start at offset 7MB */ | ||
34 | #define PART_FPGA_SIZE 0x00100000 /* 1MB in size */ | ||
35 | |||
36 | static struct mtd_partition firebee_flash_parts[] = { | ||
37 | { | ||
38 | .name = "dBUG", | ||
39 | .offset = PART_BOOT_START, | ||
40 | .size = PART_BOOT_SIZE, | ||
41 | }, | ||
42 | { | ||
43 | .name = "FPGA", | ||
44 | .offset = PART_FPGA_START, | ||
45 | .size = PART_FPGA_SIZE, | ||
46 | }, | ||
47 | { | ||
48 | .name = "image", | ||
49 | .offset = PART_IMAGE_START, | ||
50 | .size = PART_IMAGE_SIZE, | ||
51 | }, | ||
52 | }; | ||
53 | |||
54 | static struct physmap_flash_data firebee_flash_data = { | ||
55 | .width = 2, | ||
56 | .nr_parts = ARRAY_SIZE(firebee_flash_parts), | ||
57 | .parts = firebee_flash_parts, | ||
58 | }; | ||
59 | |||
60 | static struct resource firebee_flash_resource = { | ||
61 | .start = FLASH_PHYS_ADDR, | ||
62 | .end = FLASH_PHYS_ADDR + FLASH_PHYS_SIZE, | ||
63 | .flags = IORESOURCE_MEM, | ||
64 | }; | ||
65 | |||
66 | static struct platform_device firebee_flash = { | ||
67 | .name = "physmap-flash", | ||
68 | .id = 0, | ||
69 | .dev = { | ||
70 | .platform_data = &firebee_flash_data, | ||
71 | }, | ||
72 | .num_resources = 1, | ||
73 | .resource = &firebee_flash_resource, | ||
74 | }; | ||
75 | |||
76 | /***************************************************************************/ | ||
77 | |||
78 | static int __init init_firebee(void) | ||
79 | { | ||
80 | platform_device_register(&firebee_flash); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | arch_initcall(init_firebee); | ||
85 | |||
86 | /***************************************************************************/ | ||
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c index 2a3af193ccd3..e5631831a200 100644 --- a/arch/m68knommu/platform/68328/ints.c +++ b/arch/m68knommu/platform/68328/ints.c | |||
@@ -135,20 +135,20 @@ void process_int(int vec, struct pt_regs *fp) | |||
135 | } | 135 | } |
136 | } | 136 | } |
137 | 137 | ||
138 | static void intc_irq_unmask(unsigned int irq) | 138 | static void intc_irq_unmask(struct irq_data *d) |
139 | { | 139 | { |
140 | IMR &= ~(1<<irq); | 140 | IMR &= ~(1 << d->irq); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void intc_irq_mask(unsigned int irq) | 143 | static void intc_irq_mask(struct irq_data *d) |
144 | { | 144 | { |
145 | IMR |= (1<<irq); | 145 | IMR |= (1 << d->irq); |
146 | } | 146 | } |
147 | 147 | ||
148 | static struct irq_chip intc_irq_chip = { | 148 | static struct irq_chip intc_irq_chip = { |
149 | .name = "M68K-INTC", | 149 | .name = "M68K-INTC", |
150 | .mask = intc_irq_mask, | 150 | .irq_mask = intc_irq_mask, |
151 | .unmask = intc_irq_unmask, | 151 | .irq_unmask = intc_irq_unmask, |
152 | }; | 152 | }; |
153 | 153 | ||
154 | /* | 154 | /* |
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index a29041c1a8a0..8de3feb568c6 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c | |||
@@ -37,26 +37,26 @@ extern void *_ramvec[]; | |||
37 | /* The number of spurious interrupts */ | 37 | /* The number of spurious interrupts */ |
38 | volatile unsigned int num_spurious; | 38 | volatile unsigned int num_spurious; |
39 | 39 | ||
40 | static void intc_irq_unmask(unsigned int irq) | 40 | static void intc_irq_unmask(struct irq_data *d) |
41 | { | 41 | { |
42 | pquicc->intr_cimr |= (1 << irq); | 42 | pquicc->intr_cimr |= (1 << d->irq); |
43 | } | 43 | } |
44 | 44 | ||
45 | static void intc_irq_mask(unsigned int irq) | 45 | static void intc_irq_mask(struct irq_data *d) |
46 | { | 46 | { |
47 | pquicc->intr_cimr &= ~(1 << irq); | 47 | pquicc->intr_cimr &= ~(1 << d->irq); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void intc_irq_ack(unsigned int irq) | 50 | static void intc_irq_ack(struct irq_data *d) |
51 | { | 51 | { |
52 | pquicc->intr_cisr = (1 << irq); | 52 | pquicc->intr_cisr = (1 << d->irq); |
53 | } | 53 | } |
54 | 54 | ||
55 | static struct irq_chip intc_irq_chip = { | 55 | static struct irq_chip intc_irq_chip = { |
56 | .name = "M68K-INTC", | 56 | .name = "M68K-INTC", |
57 | .mask = intc_irq_mask, | 57 | .irq_mask = intc_irq_mask, |
58 | .unmask = intc_irq_unmask, | 58 | .irq_unmask = intc_irq_unmask, |
59 | .ack = intc_irq_ack, | 59 | .irq_ack = intc_irq_ack, |
60 | }; | 60 | }; |
61 | 61 | ||
62 | /* | 62 | /* |
diff --git a/arch/m68knommu/platform/coldfire/dma.c b/arch/m68knommu/platform/coldfire/dma.c index 2b30cf1b8f77..e88b95e2cc62 100644 --- a/arch/m68knommu/platform/coldfire/dma.c +++ b/arch/m68knommu/platform/coldfire/dma.c | |||
@@ -21,16 +21,16 @@ | |||
21 | */ | 21 | */ |
22 | unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = { | 22 | unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = { |
23 | #ifdef MCFDMA_BASE0 | 23 | #ifdef MCFDMA_BASE0 |
24 | MCF_MBAR + MCFDMA_BASE0, | 24 | MCFDMA_BASE0, |
25 | #endif | 25 | #endif |
26 | #ifdef MCFDMA_BASE1 | 26 | #ifdef MCFDMA_BASE1 |
27 | MCF_MBAR + MCFDMA_BASE1, | 27 | MCFDMA_BASE1, |
28 | #endif | 28 | #endif |
29 | #ifdef MCFDMA_BASE2 | 29 | #ifdef MCFDMA_BASE2 |
30 | MCF_MBAR + MCFDMA_BASE2, | 30 | MCFDMA_BASE2, |
31 | #endif | 31 | #endif |
32 | #ifdef MCFDMA_BASE3 | 32 | #ifdef MCFDMA_BASE3 |
33 | MCF_MBAR + MCFDMA_BASE3, | 33 | MCFDMA_BASE3, |
34 | #endif | 34 | #endif |
35 | }; | 35 | }; |
36 | 36 | ||
diff --git a/arch/m68knommu/platform/coldfire/head.S b/arch/m68knommu/platform/coldfire/head.S index d5977909ae5f..129bff4956b5 100644 --- a/arch/m68knommu/platform/coldfire/head.S +++ b/arch/m68knommu/platform/coldfire/head.S | |||
@@ -41,17 +41,17 @@ | |||
41 | * DRAM controller is quite different. | 41 | * DRAM controller is quite different. |
42 | */ | 42 | */ |
43 | .macro GET_MEM_SIZE | 43 | .macro GET_MEM_SIZE |
44 | movel MCF_MBAR+MCFSIM_DMR0,%d0 /* get mask for 1st bank */ | 44 | movel MCFSIM_DMR0,%d0 /* get mask for 1st bank */ |
45 | btst #0,%d0 /* check if region enabled */ | 45 | btst #0,%d0 /* check if region enabled */ |
46 | beq 1f | 46 | beq 1f |
47 | andl #0xfffc0000,%d0 | 47 | andl #0xfffc0000,%d0 |
48 | beq 1f | 48 | beq 1f |
49 | addl #0x00040000,%d0 /* convert mask to size */ | 49 | addl #0x00040000,%d0 /* convert mask to size */ |
50 | 1: | 50 | 1: |
51 | movel MCF_MBAR+MCFSIM_DMR1,%d1 /* get mask for 2nd bank */ | 51 | movel MCFSIM_DMR1,%d1 /* get mask for 2nd bank */ |
52 | btst #0,%d1 /* check if region enabled */ | 52 | btst #0,%d1 /* check if region enabled */ |
53 | beq 2f | 53 | beq 2f |
54 | andl #0xfffc0000, %d1 | 54 | andl #0xfffc0000,%d1 |
55 | beq 2f | 55 | beq 2f |
56 | addl #0x00040000,%d1 | 56 | addl #0x00040000,%d1 |
57 | addl %d1,%d0 /* total mem size in d0 */ | 57 | addl %d1,%d0 /* total mem size in d0 */ |
@@ -68,14 +68,14 @@ | |||
68 | #elif defined(CONFIG_M520x) | 68 | #elif defined(CONFIG_M520x) |
69 | .macro GET_MEM_SIZE | 69 | .macro GET_MEM_SIZE |
70 | clrl %d0 | 70 | clrl %d0 |
71 | movel MCF_MBAR+MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */ | 71 | movel MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */ |
72 | andl #0x1f, %d2 /* Get only the chip select size */ | 72 | andl #0x1f, %d2 /* Get only the chip select size */ |
73 | beq 3f /* Check if it is enabled */ | 73 | beq 3f /* Check if it is enabled */ |
74 | addql #1, %d2 /* Form exponent */ | 74 | addql #1, %d2 /* Form exponent */ |
75 | moveql #1, %d0 | 75 | moveql #1, %d0 |
76 | lsll %d2, %d0 /* 2 ^ exponent */ | 76 | lsll %d2, %d0 /* 2 ^ exponent */ |
77 | 3: | 77 | 3: |
78 | movel MCF_MBAR+MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */ | 78 | movel MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */ |
79 | andl #0x1f, %d2 /* Get only the chip select size */ | 79 | andl #0x1f, %d2 /* Get only the chip select size */ |
80 | beq 4f /* Check if it is enabled */ | 80 | beq 4f /* Check if it is enabled */ |
81 | addql #1, %d2 /* Form exponent */ | 81 | addql #1, %d2 /* Form exponent */ |
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c index 85daa2b3001a..2cbfbf035db9 100644 --- a/arch/m68knommu/platform/coldfire/intc-2.c +++ b/arch/m68knommu/platform/coldfire/intc-2.c | |||
@@ -7,7 +7,10 @@ | |||
7 | * family, the 5270, 5271, 5274, 5275, and the 528x family which have two such | 7 | * family, the 5270, 5271, 5274, 5275, and the 528x family which have two such |
8 | * controllers, and the 547x and 548x families which have only one of them. | 8 | * controllers, and the 547x and 548x families which have only one of them. |
9 | * | 9 | * |
10 | * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com> | 10 | * The external 7 fixed interrupts are part the the Edge Port unit of these |
11 | * ColdFire parts. They can be configured as level or edge triggered. | ||
12 | * | ||
13 | * (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com> | ||
11 | * | 14 | * |
12 | * This file is subject to the terms and conditions of the GNU General Public | 15 | * This file is subject to the terms and conditions of the GNU General Public |
13 | * License. See the file COPYING in the main directory of this archive | 16 | * License. See the file COPYING in the main directory of this archive |
@@ -31,11 +34,12 @@ | |||
31 | #define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */ | 34 | #define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */ |
32 | 35 | ||
33 | /* | 36 | /* |
34 | * Each vector needs a unique priority and level associated with it. | 37 | * The EDGE Port interrupts are the fixed 7 external interrupts. |
35 | * We don't really care so much what they are, we don't rely on the | 38 | * They need some special treatment, for example they need to be acked. |
36 | * traditional priority interrupt scheme of the m68k/ColdFire. | ||
37 | */ | 39 | */ |
38 | static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); | 40 | #define EINT0 64 /* Is not actually used, but spot reserved for it */ |
41 | #define EINT1 65 /* EDGE Port interrupt 1 */ | ||
42 | #define EINT7 71 /* EDGE Port interrupt 7 */ | ||
39 | 43 | ||
40 | #ifdef MCFICM_INTC1 | 44 | #ifdef MCFICM_INTC1 |
41 | #define NR_VECS 128 | 45 | #define NR_VECS 128 |
@@ -43,66 +47,147 @@ static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); | |||
43 | #define NR_VECS 64 | 47 | #define NR_VECS 64 |
44 | #endif | 48 | #endif |
45 | 49 | ||
46 | static void intc_irq_mask(unsigned int irq) | 50 | static void intc_irq_mask(struct irq_data *d) |
47 | { | 51 | { |
48 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) { | 52 | unsigned int irq = d->irq - MCFINT_VECBASE; |
49 | unsigned long imraddr; | 53 | unsigned long imraddr; |
50 | u32 val, imrbit; | 54 | u32 val, imrbit; |
51 | 55 | ||
52 | irq -= MCFINT_VECBASE; | ||
53 | imraddr = MCF_IPSBAR; | ||
54 | #ifdef MCFICM_INTC1 | 56 | #ifdef MCFICM_INTC1 |
55 | imraddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; | 57 | imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; |
56 | #else | 58 | #else |
57 | imraddr += MCFICM_INTC0; | 59 | imraddr = MCFICM_INTC0; |
58 | #endif | 60 | #endif |
59 | imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL; | 61 | imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL; |
60 | imrbit = 0x1 << (irq & 0x1f); | 62 | imrbit = 0x1 << (irq & 0x1f); |
61 | 63 | ||
62 | val = __raw_readl(imraddr); | 64 | val = __raw_readl(imraddr); |
63 | __raw_writel(val | imrbit, imraddr); | 65 | __raw_writel(val | imrbit, imraddr); |
64 | } | 66 | } |
67 | |||
68 | static void intc_irq_unmask(struct irq_data *d) | ||
69 | { | ||
70 | unsigned int irq = d->irq - MCFINT_VECBASE; | ||
71 | unsigned long imraddr; | ||
72 | u32 val, imrbit; | ||
73 | |||
74 | #ifdef MCFICM_INTC1 | ||
75 | imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; | ||
76 | #else | ||
77 | imraddr = MCFICM_INTC0; | ||
78 | #endif | ||
79 | imraddr += ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL); | ||
80 | imrbit = 0x1 << (irq & 0x1f); | ||
81 | |||
82 | /* Don't set the "maskall" bit! */ | ||
83 | if ((irq & 0x20) == 0) | ||
84 | imrbit |= 0x1; | ||
85 | |||
86 | val = __raw_readl(imraddr); | ||
87 | __raw_writel(val & ~imrbit, imraddr); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Only the external (or EDGE Port) interrupts need to be acknowledged | ||
92 | * here, as part of the IRQ handler. They only really need to be ack'ed | ||
93 | * if they are in edge triggered mode, but there is no harm in doing it | ||
94 | * for all types. | ||
95 | */ | ||
96 | static void intc_irq_ack(struct irq_data *d) | ||
97 | { | ||
98 | unsigned int irq = d->irq; | ||
99 | |||
100 | __raw_writeb(0x1 << (irq - EINT0), MCFEPORT_EPFR); | ||
65 | } | 101 | } |
66 | 102 | ||
67 | static void intc_irq_unmask(unsigned int irq) | 103 | /* |
104 | * Each vector needs a unique priority and level associated with it. | ||
105 | * We don't really care so much what they are, we don't rely on the | ||
106 | * traditional priority interrupt scheme of the m68k/ColdFire. This | ||
107 | * only needs to be set once for an interrupt, and we will never change | ||
108 | * these values once we have set them. | ||
109 | */ | ||
110 | static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6); | ||
111 | |||
112 | static unsigned int intc_irq_startup(struct irq_data *d) | ||
68 | { | 113 | { |
69 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECBASE + NR_VECS)) { | 114 | unsigned int irq = d->irq - MCFINT_VECBASE; |
70 | unsigned long intaddr, imraddr, icraddr; | 115 | unsigned long icraddr; |
71 | u32 val, imrbit; | ||
72 | 116 | ||
73 | irq -= MCFINT_VECBASE; | ||
74 | intaddr = MCF_IPSBAR; | ||
75 | #ifdef MCFICM_INTC1 | 117 | #ifdef MCFICM_INTC1 |
76 | intaddr += (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; | 118 | icraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0; |
77 | #else | 119 | #else |
78 | intaddr += MCFICM_INTC0; | 120 | icraddr = MCFICM_INTC0; |
79 | #endif | 121 | #endif |
80 | imraddr = intaddr + ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL); | 122 | icraddr += MCFINTC_ICR0 + (irq & 0x3f); |
81 | icraddr = intaddr + MCFINTC_ICR0 + (irq & 0x3f); | 123 | if (__raw_readb(icraddr) == 0) |
82 | imrbit = 0x1 << (irq & 0x1f); | 124 | __raw_writeb(intc_intpri--, icraddr); |
83 | 125 | ||
84 | /* Don't set the "maskall" bit! */ | 126 | irq = d->irq; |
85 | if ((irq & 0x20) == 0) | 127 | if ((irq >= EINT1) && (irq <= EINT7)) { |
86 | imrbit |= 0x1; | 128 | u8 v; |
87 | 129 | ||
88 | if (__raw_readb(icraddr) == 0) | 130 | irq -= EINT0; |
89 | __raw_writeb(intc_intpri--, icraddr); | ||
90 | 131 | ||
91 | val = __raw_readl(imraddr); | 132 | /* Set EPORT line as input */ |
92 | __raw_writel(val & ~imrbit, imraddr); | 133 | v = __raw_readb(MCFEPORT_EPDDR); |
134 | __raw_writeb(v & ~(0x1 << irq), MCFEPORT_EPDDR); | ||
135 | |||
136 | /* Set EPORT line as interrupt source */ | ||
137 | v = __raw_readb(MCFEPORT_EPIER); | ||
138 | __raw_writeb(v | (0x1 << irq), MCFEPORT_EPIER); | ||
93 | } | 139 | } |
140 | |||
141 | intc_irq_unmask(d); | ||
142 | return 0; | ||
94 | } | 143 | } |
95 | 144 | ||
96 | static int intc_irq_set_type(unsigned int irq, unsigned int type) | 145 | static int intc_irq_set_type(struct irq_data *d, unsigned int type) |
97 | { | 146 | { |
147 | unsigned int irq = d->irq; | ||
148 | u16 pa, tb; | ||
149 | |||
150 | switch (type) { | ||
151 | case IRQ_TYPE_EDGE_RISING: | ||
152 | tb = 0x1; | ||
153 | break; | ||
154 | case IRQ_TYPE_EDGE_FALLING: | ||
155 | tb = 0x2; | ||
156 | break; | ||
157 | case IRQ_TYPE_EDGE_BOTH: | ||
158 | tb = 0x3; | ||
159 | break; | ||
160 | default: | ||
161 | /* Level triggered */ | ||
162 | tb = 0; | ||
163 | break; | ||
164 | } | ||
165 | |||
166 | if (tb) | ||
167 | set_irq_handler(irq, handle_edge_irq); | ||
168 | |||
169 | irq -= EINT0; | ||
170 | pa = __raw_readw(MCFEPORT_EPPAR); | ||
171 | pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2)); | ||
172 | __raw_writew(pa, MCFEPORT_EPPAR); | ||
173 | |||
98 | return 0; | 174 | return 0; |
99 | } | 175 | } |
100 | 176 | ||
101 | static struct irq_chip intc_irq_chip = { | 177 | static struct irq_chip intc_irq_chip = { |
102 | .name = "CF-INTC", | 178 | .name = "CF-INTC", |
103 | .mask = intc_irq_mask, | 179 | .irq_startup = intc_irq_startup, |
104 | .unmask = intc_irq_unmask, | 180 | .irq_mask = intc_irq_mask, |
105 | .set_type = intc_irq_set_type, | 181 | .irq_unmask = intc_irq_unmask, |
182 | }; | ||
183 | |||
184 | static struct irq_chip intc_irq_chip_edge_port = { | ||
185 | .name = "CF-INTC-EP", | ||
186 | .irq_startup = intc_irq_startup, | ||
187 | .irq_mask = intc_irq_mask, | ||
188 | .irq_unmask = intc_irq_unmask, | ||
189 | .irq_ack = intc_irq_ack, | ||
190 | .irq_set_type = intc_irq_set_type, | ||
106 | }; | 191 | }; |
107 | 192 | ||
108 | void __init init_IRQ(void) | 193 | void __init init_IRQ(void) |
@@ -112,13 +197,16 @@ void __init init_IRQ(void) | |||
112 | init_vectors(); | 197 | init_vectors(); |
113 | 198 | ||
114 | /* Mask all interrupt sources */ | 199 | /* Mask all interrupt sources */ |
115 | __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL); | 200 | __raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL); |
116 | #ifdef MCFICM_INTC1 | 201 | #ifdef MCFICM_INTC1 |
117 | __raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL); | 202 | __raw_writel(0x1, MCFICM_INTC1 + MCFINTC_IMRL); |
118 | #endif | 203 | #endif |
119 | 204 | ||
120 | for (irq = 0; (irq < NR_IRQS); irq++) { | 205 | for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) { |
121 | set_irq_chip(irq, &intc_irq_chip); | 206 | if ((irq >= EINT1) && (irq <=EINT7)) |
207 | set_irq_chip(irq, &intc_irq_chip_edge_port); | ||
208 | else | ||
209 | set_irq_chip(irq, &intc_irq_chip); | ||
122 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); | 210 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); |
123 | set_irq_handler(irq, handle_level_irq); | 211 | set_irq_handler(irq, handle_level_irq); |
124 | } | 212 | } |
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c index bb7048636140..e642b24ab729 100644 --- a/arch/m68knommu/platform/coldfire/intc-simr.c +++ b/arch/m68knommu/platform/coldfire/intc-simr.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Interrupt controller code for the ColdFire 5208, 5207 & 532x parts. | 4 | * Interrupt controller code for the ColdFire 5208, 5207 & 532x parts. |
5 | * | 5 | * |
6 | * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com> | 6 | * (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com> |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file COPYING in the main directory of this archive | 9 | * License. See the file COPYING in the main directory of this archive |
@@ -20,47 +20,156 @@ | |||
20 | #include <asm/mcfsim.h> | 20 | #include <asm/mcfsim.h> |
21 | #include <asm/traps.h> | 21 | #include <asm/traps.h> |
22 | 22 | ||
23 | static void intc_irq_mask(unsigned int irq) | 23 | /* |
24 | * The EDGE Port interrupts are the fixed 7 external interrupts. | ||
25 | * They need some special treatment, for example they need to be acked. | ||
26 | */ | ||
27 | #ifdef CONFIG_M520x | ||
28 | /* | ||
29 | * The 520x parts only support a limited range of these external | ||
30 | * interrupts, only 1, 4 and 7 (as interrupts 65, 66 and 67). | ||
31 | */ | ||
32 | #define EINT0 64 /* Is not actually used, but spot reserved for it */ | ||
33 | #define EINT1 65 /* EDGE Port interrupt 1 */ | ||
34 | #define EINT4 66 /* EDGE Port interrupt 4 */ | ||
35 | #define EINT7 67 /* EDGE Port interrupt 7 */ | ||
36 | |||
37 | static unsigned int irqebitmap[] = { 0, 1, 4, 7 }; | ||
38 | static unsigned int inline irq2ebit(unsigned int irq) | ||
24 | { | 39 | { |
25 | if (irq >= MCFINT_VECBASE) { | 40 | return irqebitmap[irq - EINT0]; |
26 | if (irq < MCFINT_VECBASE + 64) | 41 | } |
27 | __raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_SIMR); | 42 | |
28 | else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_SIMR) | 43 | #else |
29 | __raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_SIMR); | 44 | |
30 | } | 45 | /* |
46 | * Most of the ColdFire parts with the EDGE Port module just have | ||
47 | * a strait direct mapping of the 7 external interrupts. Although | ||
48 | * there is a bit reserved for 0, it is not used. | ||
49 | */ | ||
50 | #define EINT0 64 /* Is not actually used, but spot reserved for it */ | ||
51 | #define EINT1 65 /* EDGE Port interrupt 1 */ | ||
52 | #define EINT7 71 /* EDGE Port interrupt 7 */ | ||
53 | |||
54 | static unsigned int inline irq2ebit(unsigned int irq) | ||
55 | { | ||
56 | return irq - EINT0; | ||
57 | } | ||
58 | |||
59 | #endif | ||
60 | |||
61 | /* | ||
62 | * There maybe one or two interrupt control units, each has 64 | ||
63 | * interrupts. If there is no second unit then MCFINTC1_* defines | ||
64 | * will be 0 (and code for them optimized away). | ||
65 | */ | ||
66 | |||
67 | static void intc_irq_mask(struct irq_data *d) | ||
68 | { | ||
69 | unsigned int irq = d->irq - MCFINT_VECBASE; | ||
70 | |||
71 | if (MCFINTC1_SIMR && (irq > 64)) | ||
72 | __raw_writeb(irq - 64, MCFINTC1_SIMR); | ||
73 | else | ||
74 | __raw_writeb(irq, MCFINTC0_SIMR); | ||
31 | } | 75 | } |
32 | 76 | ||
33 | static void intc_irq_unmask(unsigned int irq) | 77 | static void intc_irq_unmask(struct irq_data *d) |
34 | { | 78 | { |
35 | if (irq >= MCFINT_VECBASE) { | 79 | unsigned int irq = d->irq - MCFINT_VECBASE; |
36 | if (irq < MCFINT_VECBASE + 64) | 80 | |
37 | __raw_writeb(irq - MCFINT_VECBASE, MCFINTC0_CIMR); | 81 | if (MCFINTC1_CIMR && (irq > 64)) |
38 | else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_CIMR) | 82 | __raw_writeb(irq - 64, MCFINTC1_CIMR); |
39 | __raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_CIMR); | 83 | else |
84 | __raw_writeb(irq, MCFINTC0_CIMR); | ||
85 | } | ||
86 | |||
87 | static void intc_irq_ack(struct irq_data *d) | ||
88 | { | ||
89 | unsigned int ebit = irq2ebit(d->irq); | ||
90 | |||
91 | __raw_writeb(0x1 << ebit, MCFEPORT_EPFR); | ||
92 | } | ||
93 | |||
94 | static unsigned int intc_irq_startup(struct irq_data *d) | ||
95 | { | ||
96 | unsigned int irq = d->irq; | ||
97 | |||
98 | if ((irq >= EINT1) && (irq <= EINT7)) { | ||
99 | unsigned int ebit = irq2ebit(irq); | ||
100 | u8 v; | ||
101 | |||
102 | /* Set EPORT line as input */ | ||
103 | v = __raw_readb(MCFEPORT_EPDDR); | ||
104 | __raw_writeb(v & ~(0x1 << ebit), MCFEPORT_EPDDR); | ||
105 | |||
106 | /* Set EPORT line as interrupt source */ | ||
107 | v = __raw_readb(MCFEPORT_EPIER); | ||
108 | __raw_writeb(v | (0x1 << ebit), MCFEPORT_EPIER); | ||
40 | } | 109 | } |
110 | |||
111 | irq -= MCFINT_VECBASE; | ||
112 | if (MCFINTC1_ICR0 && (irq > 64)) | ||
113 | __raw_writeb(5, MCFINTC1_ICR0 + irq - 64); | ||
114 | else | ||
115 | __raw_writeb(5, MCFINTC0_ICR0 + irq); | ||
116 | |||
117 | |||
118 | intc_irq_unmask(d); | ||
119 | return 0; | ||
41 | } | 120 | } |
42 | 121 | ||
43 | static int intc_irq_set_type(unsigned int irq, unsigned int type) | 122 | static int intc_irq_set_type(struct irq_data *d, unsigned int type) |
44 | { | 123 | { |
45 | if (irq >= MCFINT_VECBASE) { | 124 | unsigned int ebit, irq = d->irq; |
46 | if (irq < MCFINT_VECBASE + 64) | 125 | u16 pa, tb; |
47 | __raw_writeb(5, MCFINTC0_ICR0 + irq - MCFINT_VECBASE); | 126 | |
48 | else if ((irq < MCFINT_VECBASE) && MCFINTC1_ICR0) | 127 | switch (type) { |
49 | __raw_writeb(5, MCFINTC1_ICR0 + irq - MCFINT_VECBASE - 64); | 128 | case IRQ_TYPE_EDGE_RISING: |
129 | tb = 0x1; | ||
130 | break; | ||
131 | case IRQ_TYPE_EDGE_FALLING: | ||
132 | tb = 0x2; | ||
133 | break; | ||
134 | case IRQ_TYPE_EDGE_BOTH: | ||
135 | tb = 0x3; | ||
136 | break; | ||
137 | default: | ||
138 | /* Level triggered */ | ||
139 | tb = 0; | ||
140 | break; | ||
50 | } | 141 | } |
142 | |||
143 | if (tb) | ||
144 | set_irq_handler(irq, handle_edge_irq); | ||
145 | |||
146 | ebit = irq2ebit(irq) * 2; | ||
147 | pa = __raw_readw(MCFEPORT_EPPAR); | ||
148 | pa = (pa & ~(0x3 << ebit)) | (tb << ebit); | ||
149 | __raw_writew(pa, MCFEPORT_EPPAR); | ||
150 | |||
51 | return 0; | 151 | return 0; |
52 | } | 152 | } |
53 | 153 | ||
54 | static struct irq_chip intc_irq_chip = { | 154 | static struct irq_chip intc_irq_chip = { |
55 | .name = "CF-INTC", | 155 | .name = "CF-INTC", |
56 | .mask = intc_irq_mask, | 156 | .irq_startup = intc_irq_startup, |
57 | .unmask = intc_irq_unmask, | 157 | .irq_mask = intc_irq_mask, |
58 | .set_type = intc_irq_set_type, | 158 | .irq_unmask = intc_irq_unmask, |
159 | }; | ||
160 | |||
161 | static struct irq_chip intc_irq_chip_edge_port = { | ||
162 | .name = "CF-INTC-EP", | ||
163 | .irq_startup = intc_irq_startup, | ||
164 | .irq_mask = intc_irq_mask, | ||
165 | .irq_unmask = intc_irq_unmask, | ||
166 | .irq_ack = intc_irq_ack, | ||
167 | .irq_set_type = intc_irq_set_type, | ||
59 | }; | 168 | }; |
60 | 169 | ||
61 | void __init init_IRQ(void) | 170 | void __init init_IRQ(void) |
62 | { | 171 | { |
63 | int irq; | 172 | int irq, eirq; |
64 | 173 | ||
65 | init_vectors(); | 174 | init_vectors(); |
66 | 175 | ||
@@ -69,8 +178,12 @@ void __init init_IRQ(void) | |||
69 | if (MCFINTC1_SIMR) | 178 | if (MCFINTC1_SIMR) |
70 | __raw_writeb(0xff, MCFINTC1_SIMR); | 179 | __raw_writeb(0xff, MCFINTC1_SIMR); |
71 | 180 | ||
72 | for (irq = 0; (irq < NR_IRQS); irq++) { | 181 | eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0); |
73 | set_irq_chip(irq, &intc_irq_chip); | 182 | for (irq = MCFINT_VECBASE; (irq < eirq); irq++) { |
183 | if ((irq >= EINT1) && (irq <= EINT7)) | ||
184 | set_irq_chip(irq, &intc_irq_chip_edge_port); | ||
185 | else | ||
186 | set_irq_chip(irq, &intc_irq_chip); | ||
74 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); | 187 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); |
75 | set_irq_handler(irq, handle_level_irq); | 188 | set_irq_handler(irq, handle_level_irq); |
76 | } | 189 | } |
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c index 60d2fcbe182b..d648081a63f6 100644 --- a/arch/m68knommu/platform/coldfire/intc.c +++ b/arch/m68knommu/platform/coldfire/intc.c | |||
@@ -111,28 +111,28 @@ void mcf_autovector(int irq) | |||
111 | #endif | 111 | #endif |
112 | } | 112 | } |
113 | 113 | ||
114 | static void intc_irq_mask(unsigned int irq) | 114 | static void intc_irq_mask(struct irq_data *d) |
115 | { | 115 | { |
116 | if (mcf_irq2imr[irq]) | 116 | if (mcf_irq2imr[d->irq]) |
117 | mcf_setimr(mcf_irq2imr[irq]); | 117 | mcf_setimr(mcf_irq2imr[d->irq]); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void intc_irq_unmask(unsigned int irq) | 120 | static void intc_irq_unmask(struct irq_data *d) |
121 | { | 121 | { |
122 | if (mcf_irq2imr[irq]) | 122 | if (mcf_irq2imr[d->irq]) |
123 | mcf_clrimr(mcf_irq2imr[irq]); | 123 | mcf_clrimr(mcf_irq2imr[d->irq]); |
124 | } | 124 | } |
125 | 125 | ||
126 | static int intc_irq_set_type(unsigned int irq, unsigned int type) | 126 | static int intc_irq_set_type(struct irq_data *d, unsigned int type) |
127 | { | 127 | { |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static struct irq_chip intc_irq_chip = { | 131 | static struct irq_chip intc_irq_chip = { |
132 | .name = "CF-INTC", | 132 | .name = "CF-INTC", |
133 | .mask = intc_irq_mask, | 133 | .irq_mask = intc_irq_mask, |
134 | .unmask = intc_irq_unmask, | 134 | .irq_unmask = intc_irq_unmask, |
135 | .set_type = intc_irq_set_type, | 135 | .irq_set_type = intc_irq_set_type, |
136 | }; | 136 | }; |
137 | 137 | ||
138 | void __init init_IRQ(void) | 138 | void __init init_IRQ(void) |
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index aebea19abd78..c2b980926bec 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c | |||
@@ -31,7 +31,7 @@ | |||
31 | * By default use timer1 as the system clock timer. | 31 | * By default use timer1 as the system clock timer. |
32 | */ | 32 | */ |
33 | #define FREQ ((MCF_CLK / 2) / 64) | 33 | #define FREQ ((MCF_CLK / 2) / 64) |
34 | #define TA(a) (MCF_IPSBAR + MCFPIT_BASE1 + (a)) | 34 | #define TA(a) (MCFPIT_BASE1 + (a)) |
35 | #define PIT_CYCLES_PER_JIFFY (FREQ / HZ) | 35 | #define PIT_CYCLES_PER_JIFFY (FREQ / HZ) |
36 | 36 | ||
37 | static u32 pit_cnt; | 37 | static u32 pit_cnt; |
diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c index 2304d736c701..60242f65fea9 100644 --- a/arch/m68knommu/platform/coldfire/timers.c +++ b/arch/m68knommu/platform/coldfire/timers.c | |||
@@ -28,7 +28,7 @@ | |||
28 | * By default use timer1 as the system clock timer. | 28 | * By default use timer1 as the system clock timer. |
29 | */ | 29 | */ |
30 | #define FREQ (MCF_BUSCLK / 16) | 30 | #define FREQ (MCF_BUSCLK / 16) |
31 | #define TA(a) (MCF_MBAR + MCFTIMER_BASE1 + (a)) | 31 | #define TA(a) (MCFTIMER_BASE1 + (a)) |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * These provide the underlying interrupt vector support. | 34 | * These provide the underlying interrupt vector support. |
@@ -126,7 +126,7 @@ void hw_timer_init(void) | |||
126 | /* | 126 | /* |
127 | * By default use timer2 as the profiler clock timer. | 127 | * By default use timer2 as the profiler clock timer. |
128 | */ | 128 | */ |
129 | #define PA(a) (MCF_MBAR + MCFTIMER_BASE2 + (a)) | 129 | #define PA(a) (MCFTIMER_BASE2 + (a)) |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Choose a reasonably fast profile timer. Make it an odd value to | 132 | * Choose a reasonably fast profile timer. Make it an odd value to |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 31680032053e..922c4194c7bb 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -17,6 +17,7 @@ config MICROBLAZE | |||
17 | select OF_EARLY_FLATTREE | 17 | select OF_EARLY_FLATTREE |
18 | select HAVE_GENERIC_HARDIRQS | 18 | select HAVE_GENERIC_HARDIRQS |
19 | select GENERIC_IRQ_PROBE | 19 | select GENERIC_IRQ_PROBE |
20 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
20 | 21 | ||
21 | config SWAP | 22 | config SWAP |
22 | def_bool n | 23 | def_bool n |
@@ -183,6 +184,17 @@ config LOWMEM_SIZE | |||
183 | hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL | 184 | hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL |
184 | default "0x30000000" | 185 | default "0x30000000" |
185 | 186 | ||
187 | config MANUAL_RESET_VECTOR | ||
188 | hex "Microblaze reset vector address setup" | ||
189 | default "0x0" | ||
190 | help | ||
191 | Set this option to have the kernel override the CPU Reset vector. | ||
192 | If zero, no change will be made to the MicroBlaze reset vector at | ||
193 | address 0x0. | ||
194 | If non-zero, a jump instruction to this address, will be written | ||
195 | to the reset vector at address 0x0. | ||
196 | If you are unsure, set it to default value 0x0. | ||
197 | |||
186 | config KERNEL_START_BOOL | 198 | config KERNEL_START_BOOL |
187 | bool "Set custom kernel base address" | 199 | bool "Set custom kernel base address" |
188 | depends on ADVANCED_OPTIONS | 200 | depends on ADVANCED_OPTIONS |
@@ -247,7 +259,7 @@ endmenu | |||
247 | 259 | ||
248 | source "mm/Kconfig" | 260 | source "mm/Kconfig" |
249 | 261 | ||
250 | menu "Exectuable file formats" | 262 | menu "Executable file formats" |
251 | 263 | ||
252 | source "fs/Kconfig.binfmt" | 264 | source "fs/Kconfig.binfmt" |
253 | 265 | ||
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index 7ebd955460d9..0f553bc009a0 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h | |||
@@ -84,12 +84,13 @@ do { \ | |||
84 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 84 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
85 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 85 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
86 | 86 | ||
87 | |||
88 | #define flush_cache_dup_mm(mm) do { } while (0) | 87 | #define flush_cache_dup_mm(mm) do { } while (0) |
89 | #define flush_cache_vmap(start, end) do { } while (0) | 88 | #define flush_cache_vmap(start, end) do { } while (0) |
90 | #define flush_cache_vunmap(start, end) do { } while (0) | 89 | #define flush_cache_vunmap(start, end) do { } while (0) |
91 | #define flush_cache_mm(mm) do { } while (0) | 90 | #define flush_cache_mm(mm) do { } while (0) |
92 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 91 | |
92 | #define flush_cache_page(vma, vmaddr, pfn) \ | ||
93 | flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE); | ||
93 | 94 | ||
94 | /* MS: kgdb code use this macro, wrong len with FLASH */ | 95 | /* MS: kgdb code use this macro, wrong len with FLASH */ |
95 | #if 0 | 96 | #if 0 |
@@ -104,9 +105,13 @@ do { \ | |||
104 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 105 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
105 | do { \ | 106 | do { \ |
106 | u32 addr = virt_to_phys(dst); \ | 107 | u32 addr = virt_to_phys(dst); \ |
107 | invalidate_icache_range((unsigned) (addr), (unsigned) (addr) + (len));\ | ||
108 | memcpy((dst), (src), (len)); \ | 108 | memcpy((dst), (src), (len)); \ |
109 | flush_dcache_range((unsigned) (addr), (unsigned) (addr) + (len));\ | 109 | if (vma->vm_flags & VM_EXEC) { \ |
110 | invalidate_icache_range((unsigned) (addr), \ | ||
111 | (unsigned) (addr) + PAGE_SIZE); \ | ||
112 | flush_dcache_range((unsigned) (addr), \ | ||
113 | (unsigned) (addr) + PAGE_SIZE); \ | ||
114 | } \ | ||
110 | } while (0) | 115 | } while (0) |
111 | 116 | ||
112 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 117 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h index cd257537ae54..d8f013347a9e 100644 --- a/arch/microblaze/include/asm/cpuinfo.h +++ b/arch/microblaze/include/asm/cpuinfo.h | |||
@@ -96,8 +96,8 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu); | |||
96 | 96 | ||
97 | static inline unsigned int fcpu(struct device_node *cpu, char *n) | 97 | static inline unsigned int fcpu(struct device_node *cpu, char *n) |
98 | { | 98 | { |
99 | int *val; | 99 | const __be32 *val; |
100 | return (val = (int *) of_get_property(cpu, n, NULL)) ? | 100 | return (val = of_get_property(cpu, n, NULL)) ? |
101 | be32_to_cpup(val) : 0; | 101 | be32_to_cpup(val) : 0; |
102 | } | 102 | } |
103 | 103 | ||
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h index ec89f2ad0fe1..af0144b91b79 100644 --- a/arch/microblaze/include/asm/entry.h +++ b/arch/microblaze/include/asm/entry.h | |||
@@ -31,40 +31,4 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ | |||
31 | DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ | 31 | DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ |
32 | # endif /* __ASSEMBLY__ */ | 32 | # endif /* __ASSEMBLY__ */ |
33 | 33 | ||
34 | #ifndef CONFIG_MMU | ||
35 | |||
36 | /* noMMU hasn't any space for args */ | ||
37 | # define STATE_SAVE_ARG_SPACE (0) | ||
38 | |||
39 | #else /* CONFIG_MMU */ | ||
40 | |||
41 | /* If true, system calls save and restore all registers (except result | ||
42 | * registers, of course). If false, then `call clobbered' registers | ||
43 | * will not be preserved, on the theory that system calls are basically | ||
44 | * function calls anyway, and the caller should be able to deal with it. | ||
45 | * This is a security risk, of course, as `internal' values may leak out | ||
46 | * after a system call, but that certainly doesn't matter very much for | ||
47 | * a processor with no MMU protection! For a protected-mode kernel, it | ||
48 | * would be faster to just zero those registers before returning. | ||
49 | * | ||
50 | * I can not rely on the glibc implementation. If you turn it off make | ||
51 | * sure that r11/r12 is saved in user-space. --KAA | ||
52 | * | ||
53 | * These are special variables using by the kernel trap/interrupt code | ||
54 | * to save registers in, at a time when there are no spare registers we | ||
55 | * can use to do so, and we can't depend on the value of the stack | ||
56 | * pointer. This means that they must be within a signed 16-bit | ||
57 | * displacement of 0x00000000. | ||
58 | */ | ||
59 | |||
60 | /* A `state save frame' is a struct pt_regs preceded by some extra space | ||
61 | * suitable for a function call stack frame. */ | ||
62 | |||
63 | /* Amount of room on the stack reserved for arguments and to satisfy the | ||
64 | * C calling conventions, in addition to the space used by the struct | ||
65 | * pt_regs that actually holds saved values. */ | ||
66 | #define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */ | ||
67 | |||
68 | #endif /* CONFIG_MMU */ | ||
69 | |||
70 | #endif /* _ASM_MICROBLAZE_ENTRY_H */ | 34 | #endif /* _ASM_MICROBLAZE_ENTRY_H */ |
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h index 6479097b802b..e6a8ddea1dca 100644 --- a/arch/microblaze/include/asm/exceptions.h +++ b/arch/microblaze/include/asm/exceptions.h | |||
@@ -66,6 +66,9 @@ | |||
66 | asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | 66 | asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, |
67 | int fsr, int addr); | 67 | int fsr, int addr); |
68 | 68 | ||
69 | asmlinkage void sw_exception(struct pt_regs *regs); | ||
70 | void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); | ||
71 | |||
69 | void die(const char *str, struct pt_regs *fp, long err); | 72 | void die(const char *str, struct pt_regs *fp, long err); |
70 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); | 73 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); |
71 | 74 | ||
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index ad3fd61b2fe7..b0526d2716fa 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h | |||
@@ -29,7 +29,7 @@ | |||
29 | }) | 29 | }) |
30 | 30 | ||
31 | static inline int | 31 | static inline int |
32 | futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 32 | futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
33 | { | 33 | { |
34 | int op = (encoded_op >> 28) & 7; | 34 | int op = (encoded_op >> 28) & 7; |
35 | int cmp = (encoded_op >> 24) & 15; | 35 | int cmp = (encoded_op >> 24) & 15; |
@@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
40 | oparg = 1 << oparg; | 40 | oparg = 1 << oparg; |
41 | 41 | ||
42 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 42 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
43 | return -EFAULT; | 43 | return -EFAULT; |
44 | 44 | ||
45 | pagefault_disable(); | 45 | pagefault_disable(); |
@@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | static inline int | 96 | static inline int |
97 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 97 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
98 | u32 oldval, u32 newval) | ||
98 | { | 99 | { |
99 | int prev, cmp; | 100 | int ret = 0, cmp; |
101 | u32 prev; | ||
100 | 102 | ||
101 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 103 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
102 | return -EFAULT; | 104 | return -EFAULT; |
103 | 105 | ||
104 | __asm__ __volatile__ ("1: lwx %0, %2, r0; \ | 106 | __asm__ __volatile__ ("1: lwx %1, %3, r0; \ |
105 | cmp %1, %0, %3; \ | 107 | cmp %2, %1, %4; \ |
106 | beqi %1, 3f; \ | 108 | beqi %2, 3f; \ |
107 | 2: swx %4, %2, r0; \ | 109 | 2: swx %5, %3, r0; \ |
108 | addic %1, r0, 0; \ | 110 | addic %2, r0, 0; \ |
109 | bnei %1, 1b; \ | 111 | bnei %2, 1b; \ |
110 | 3: \ | 112 | 3: \ |
111 | .section .fixup,\"ax\"; \ | 113 | .section .fixup,\"ax\"; \ |
112 | 4: brid 3b; \ | 114 | 4: brid 3b; \ |
113 | addik %0, r0, %5; \ | 115 | addik %0, r0, %6; \ |
114 | .previous; \ | 116 | .previous; \ |
115 | .section __ex_table,\"a\"; \ | 117 | .section __ex_table,\"a\"; \ |
116 | .word 1b,4b,2b,4b; \ | 118 | .word 1b,4b,2b,4b; \ |
117 | .previous;" \ | 119 | .previous;" \ |
118 | : "=&r" (prev), "=&r"(cmp) \ | 120 | : "+r" (ret), "=&r" (prev), "=&r"(cmp) \ |
119 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); | 121 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); |
120 | 122 | ||
121 | return prev; | 123 | *uval = prev; |
124 | return ret; | ||
122 | } | 125 | } |
123 | 126 | ||
124 | #endif /* __KERNEL__ */ | 127 | #endif /* __KERNEL__ */ |
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h index ec5583d6111c..cc54187f3d38 100644 --- a/arch/microblaze/include/asm/irq.h +++ b/arch/microblaze/include/asm/irq.h | |||
@@ -12,8 +12,6 @@ | |||
12 | #define NR_IRQS 32 | 12 | #define NR_IRQS 32 |
13 | #include <asm-generic/irq.h> | 13 | #include <asm-generic/irq.h> |
14 | 14 | ||
15 | #include <linux/interrupt.h> | ||
16 | |||
17 | /* This type is the placeholder for a hardware interrupt number. It has to | 15 | /* This type is the placeholder for a hardware interrupt number. It has to |
18 | * be big enough to enclose whatever representation is used by a given | 16 | * be big enough to enclose whatever representation is used by a given |
19 | * platform. | 17 | * platform. |
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h index 0c68764ab547..10717669e0c2 100644 --- a/arch/microblaze/include/asm/pci-bridge.h +++ b/arch/microblaze/include/asm/pci-bridge.h | |||
@@ -104,11 +104,22 @@ struct pci_controller { | |||
104 | int global_number; /* PCI domain number */ | 104 | int global_number; /* PCI domain number */ |
105 | }; | 105 | }; |
106 | 106 | ||
107 | #ifdef CONFIG_PCI | ||
107 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) | 108 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) |
108 | { | 109 | { |
109 | return bus->sysdata; | 110 | return bus->sysdata; |
110 | } | 111 | } |
111 | 112 | ||
113 | static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | ||
114 | { | ||
115 | struct pci_controller *host; | ||
116 | |||
117 | if (bus->self) | ||
118 | return pci_device_to_OF_node(bus->self); | ||
119 | host = pci_bus_to_host(bus); | ||
120 | return host ? host->dn : NULL; | ||
121 | } | ||
122 | |||
112 | static inline int isa_vaddr_is_ioport(void __iomem *address) | 123 | static inline int isa_vaddr_is_ioport(void __iomem *address) |
113 | { | 124 | { |
114 | /* No specific ISA handling on ppc32 at this stage, it | 125 | /* No specific ISA handling on ppc32 at this stage, it |
@@ -116,6 +127,7 @@ static inline int isa_vaddr_is_ioport(void __iomem *address) | |||
116 | */ | 127 | */ |
117 | return 0; | 128 | return 0; |
118 | } | 129 | } |
130 | #endif /* CONFIG_PCI */ | ||
119 | 131 | ||
120 | /* These are used for config access before all the PCI probing | 132 | /* These are used for config access before all the PCI probing |
121 | has been done. */ | 133 | has been done. */ |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 885574a73f01..b2af42311a12 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -572,7 +572,7 @@ void __init *early_get_page(void); | |||
572 | 572 | ||
573 | extern unsigned long ioremap_bot, ioremap_base; | 573 | extern unsigned long ioremap_bot, ioremap_base; |
574 | 574 | ||
575 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); | 575 | void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); |
576 | void consistent_free(size_t size, void *vaddr); | 576 | void consistent_free(size_t size, void *vaddr); |
577 | void consistent_sync(void *vaddr, size_t size, int direction); | 577 | void consistent_sync(void *vaddr, size_t size, int direction); |
578 | void consistent_sync_page(struct page *page, unsigned long offset, | 578 | void consistent_sync_page(struct page *page, unsigned long offset, |
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 8eeb09211ece..aed2a6be8e27 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h | |||
@@ -155,7 +155,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
155 | # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) | 155 | # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) |
156 | 156 | ||
157 | # define task_pt_regs_plus_args(tsk) \ | 157 | # define task_pt_regs_plus_args(tsk) \ |
158 | (((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE) | 158 | ((void *)task_pt_regs(tsk)) |
159 | 159 | ||
160 | # define task_sp(task) (task_regs(task)->r1) | 160 | # define task_sp(task) (task_regs(task)->r1) |
161 | # define task_pc(task) (task_regs(task)->pc) | 161 | # define task_pc(task) (task_regs(task)->pc) |
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 2e72af078b05..d0890d36ef61 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h | |||
@@ -64,21 +64,6 @@ extern void kdump_move_device_tree(void); | |||
64 | /* CPU OF node matching */ | 64 | /* CPU OF node matching */ |
65 | struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); | 65 | struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); |
66 | 66 | ||
67 | /** | ||
68 | * of_irq_map_pci - Resolve the interrupt for a PCI device | ||
69 | * @pdev: the device whose interrupt is to be resolved | ||
70 | * @out_irq: structure of_irq filled by this function | ||
71 | * | ||
72 | * This function resolves the PCI interrupt for a given PCI device. If a | ||
73 | * device-node exists for a given pci_dev, it will use normal OF tree | ||
74 | * walking. If not, it will implement standard swizzling and walk up the | ||
75 | * PCI tree until an device-node is found, at which point it will finish | ||
76 | * resolving using the OF tree walking. | ||
77 | */ | ||
78 | struct pci_dev; | ||
79 | struct of_irq; | ||
80 | extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); | ||
81 | |||
82 | #endif /* __ASSEMBLY__ */ | 67 | #endif /* __ASSEMBLY__ */ |
83 | #endif /* __KERNEL__ */ | 68 | #endif /* __KERNEL__ */ |
84 | 69 | ||
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h index d74dbfb92c04..d9b66304d5dd 100644 --- a/arch/microblaze/include/asm/ptrace.h +++ b/arch/microblaze/include/asm/ptrace.h | |||
@@ -66,13 +66,13 @@ void show_regs(struct pt_regs *); | |||
66 | #else /* __KERNEL__ */ | 66 | #else /* __KERNEL__ */ |
67 | 67 | ||
68 | /* pt_regs offsets used by gdbserver etc in ptrace syscalls */ | 68 | /* pt_regs offsets used by gdbserver etc in ptrace syscalls */ |
69 | #define PT_GPR(n) ((n) * sizeof(microblaze_reg_t)) | 69 | #define PT_GPR(n) ((n) * sizeof(microblaze_reg_t)) |
70 | #define PT_PC (32 * sizeof(microblaze_reg_t)) | 70 | #define PT_PC (32 * sizeof(microblaze_reg_t)) |
71 | #define PT_MSR (33 * sizeof(microblaze_reg_t)) | 71 | #define PT_MSR (33 * sizeof(microblaze_reg_t)) |
72 | #define PT_EAR (34 * sizeof(microblaze_reg_t)) | 72 | #define PT_EAR (34 * sizeof(microblaze_reg_t)) |
73 | #define PT_ESR (35 * sizeof(microblaze_reg_t)) | 73 | #define PT_ESR (35 * sizeof(microblaze_reg_t)) |
74 | #define PT_FSR (36 * sizeof(microblaze_reg_t)) | 74 | #define PT_FSR (36 * sizeof(microblaze_reg_t)) |
75 | #define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t)) | 75 | #define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t)) |
76 | 76 | ||
77 | #endif /* __KERNEL */ | 77 | #endif /* __KERNEL */ |
78 | 78 | ||
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 048dfcd8d89d..9bc431783105 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h | |||
@@ -96,4 +96,7 @@ static inline void syscall_set_arguments(struct task_struct *task, | |||
96 | microblaze_set_syscall_arg(regs, i++, *args++); | 96 | microblaze_set_syscall_arg(regs, i++, *args++); |
97 | } | 97 | } |
98 | 98 | ||
99 | asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); | ||
100 | asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); | ||
101 | |||
99 | #endif /* __ASM_MICROBLAZE_SYSCALL_H */ | 102 | #endif /* __ASM_MICROBLAZE_SYSCALL_H */ |
diff --git a/arch/microblaze/include/asm/syscalls.h b/arch/microblaze/include/asm/syscalls.h index 720761cc741f..27f2f4c0f39f 100644 --- a/arch/microblaze/include/asm/syscalls.h +++ b/arch/microblaze/include/asm/syscalls.h | |||
@@ -1,5 +1,13 @@ | |||
1 | #ifndef __ASM_MICROBLAZE_SYSCALLS_H | 1 | #ifndef __ASM_MICROBLAZE_SYSCALLS_H |
2 | 2 | ||
3 | asmlinkage long microblaze_vfork(struct pt_regs *regs); | ||
4 | asmlinkage long microblaze_clone(int flags, unsigned long stack, | ||
5 | struct pt_regs *regs); | ||
6 | asmlinkage long microblaze_execve(const char __user *filenamei, | ||
7 | const char __user *const __user *argv, | ||
8 | const char __user *const __user *envp, | ||
9 | struct pt_regs *regs); | ||
10 | |||
3 | asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); | 11 | asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); |
4 | #define sys_clone sys_clone | 12 | #define sys_clone sys_clone |
5 | 13 | ||
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index d840f4a2d3c9..5bb95a11880d 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -120,16 +120,16 @@ static inline unsigned long __must_check __clear_user(void __user *to, | |||
120 | { | 120 | { |
121 | /* normal memset with two words to __ex_table */ | 121 | /* normal memset with two words to __ex_table */ |
122 | __asm__ __volatile__ ( \ | 122 | __asm__ __volatile__ ( \ |
123 | "1: sb r0, %2, r0;" \ | 123 | "1: sb r0, %1, r0;" \ |
124 | " addik %0, %0, -1;" \ | 124 | " addik %0, %0, -1;" \ |
125 | " bneid %0, 1b;" \ | 125 | " bneid %0, 1b;" \ |
126 | " addik %2, %2, 1;" \ | 126 | " addik %1, %1, 1;" \ |
127 | "2: " \ | 127 | "2: " \ |
128 | __EX_TABLE_SECTION \ | 128 | __EX_TABLE_SECTION \ |
129 | ".word 1b,2b;" \ | 129 | ".word 1b,2b;" \ |
130 | ".previous;" \ | 130 | ".previous;" \ |
131 | : "=r"(n) \ | 131 | : "=r"(n), "=r"(to) \ |
132 | : "0"(n), "r"(to) | 132 | : "0"(n), "1"(to) |
133 | ); | 133 | ); |
134 | return n; | 134 | return n; |
135 | } | 135 | } |
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h index 2b97cbe500e9..b162ed880495 100644 --- a/arch/microblaze/include/asm/unaligned.h +++ b/arch/microblaze/include/asm/unaligned.h | |||
@@ -12,18 +12,19 @@ | |||
12 | 12 | ||
13 | # ifdef __KERNEL__ | 13 | # ifdef __KERNEL__ |
14 | 14 | ||
15 | # include <linux/unaligned/be_byteshift.h> | ||
16 | # include <linux/unaligned/le_byteshift.h> | ||
17 | # include <linux/unaligned/generic.h> | ||
18 | |||
19 | |||
20 | # ifdef __MICROBLAZEEL__ | 15 | # ifdef __MICROBLAZEEL__ |
16 | # include <linux/unaligned/le_struct.h> | ||
17 | # include <linux/unaligned/be_byteshift.h> | ||
21 | # define get_unaligned __get_unaligned_le | 18 | # define get_unaligned __get_unaligned_le |
22 | # define put_unaligned __put_unaligned_le | 19 | # define put_unaligned __put_unaligned_le |
23 | # else | 20 | # else |
21 | # include <linux/unaligned/be_struct.h> | ||
22 | # include <linux/unaligned/le_byteshift.h> | ||
24 | # define get_unaligned __get_unaligned_be | 23 | # define get_unaligned __get_unaligned_be |
25 | # define put_unaligned __put_unaligned_be | 24 | # define put_unaligned __put_unaligned_be |
26 | # endif | 25 | # endif |
27 | 26 | ||
27 | # include <linux/unaligned/generic.h> | ||
28 | |||
28 | # endif /* __KERNEL__ */ | 29 | # endif /* __KERNEL__ */ |
29 | #endif /* _ASM_MICROBLAZE_UNALIGNED_H */ | 30 | #endif /* _ASM_MICROBLAZE_UNALIGNED_H */ |
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 109876e8d643..cf0afd90a2c0 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c | |||
@@ -519,7 +519,7 @@ static void __flush_dcache_range_wb(unsigned long start, unsigned long end) | |||
519 | struct scache *mbc; | 519 | struct scache *mbc; |
520 | 520 | ||
521 | /* new wb cache model */ | 521 | /* new wb cache model */ |
522 | const struct scache wb_msr = { | 522 | static const struct scache wb_msr = { |
523 | .ie = __enable_icache_msr, | 523 | .ie = __enable_icache_msr, |
524 | .id = __disable_icache_msr, | 524 | .id = __disable_icache_msr, |
525 | .ifl = __flush_icache_all_noirq, | 525 | .ifl = __flush_icache_all_noirq, |
@@ -535,7 +535,7 @@ const struct scache wb_msr = { | |||
535 | }; | 535 | }; |
536 | 536 | ||
537 | /* There is only difference in ie, id, de, dd functions */ | 537 | /* There is only difference in ie, id, de, dd functions */ |
538 | const struct scache wb_nomsr = { | 538 | static const struct scache wb_nomsr = { |
539 | .ie = __enable_icache_nomsr, | 539 | .ie = __enable_icache_nomsr, |
540 | .id = __disable_icache_nomsr, | 540 | .id = __disable_icache_nomsr, |
541 | .ifl = __flush_icache_all_noirq, | 541 | .ifl = __flush_icache_all_noirq, |
@@ -551,7 +551,7 @@ const struct scache wb_nomsr = { | |||
551 | }; | 551 | }; |
552 | 552 | ||
553 | /* Old wt cache model with disabling irq and turn off cache */ | 553 | /* Old wt cache model with disabling irq and turn off cache */ |
554 | const struct scache wt_msr = { | 554 | static const struct scache wt_msr = { |
555 | .ie = __enable_icache_msr, | 555 | .ie = __enable_icache_msr, |
556 | .id = __disable_icache_msr, | 556 | .id = __disable_icache_msr, |
557 | .ifl = __flush_icache_all_msr_irq, | 557 | .ifl = __flush_icache_all_msr_irq, |
@@ -566,7 +566,7 @@ const struct scache wt_msr = { | |||
566 | .dinr = __invalidate_dcache_range_msr_irq_wt, | 566 | .dinr = __invalidate_dcache_range_msr_irq_wt, |
567 | }; | 567 | }; |
568 | 568 | ||
569 | const struct scache wt_nomsr = { | 569 | static const struct scache wt_nomsr = { |
570 | .ie = __enable_icache_nomsr, | 570 | .ie = __enable_icache_nomsr, |
571 | .id = __disable_icache_nomsr, | 571 | .id = __disable_icache_nomsr, |
572 | .ifl = __flush_icache_all_nomsr_irq, | 572 | .ifl = __flush_icache_all_nomsr_irq, |
@@ -582,7 +582,7 @@ const struct scache wt_nomsr = { | |||
582 | }; | 582 | }; |
583 | 583 | ||
584 | /* New wt cache model for newer Microblaze versions */ | 584 | /* New wt cache model for newer Microblaze versions */ |
585 | const struct scache wt_msr_noirq = { | 585 | static const struct scache wt_msr_noirq = { |
586 | .ie = __enable_icache_msr, | 586 | .ie = __enable_icache_msr, |
587 | .id = __disable_icache_msr, | 587 | .id = __disable_icache_msr, |
588 | .ifl = __flush_icache_all_noirq, | 588 | .ifl = __flush_icache_all_noirq, |
@@ -597,7 +597,7 @@ const struct scache wt_msr_noirq = { | |||
597 | .dinr = __invalidate_dcache_range_nomsr_wt, | 597 | .dinr = __invalidate_dcache_range_nomsr_wt, |
598 | }; | 598 | }; |
599 | 599 | ||
600 | const struct scache wt_nomsr_noirq = { | 600 | static const struct scache wt_nomsr_noirq = { |
601 | .ie = __enable_icache_nomsr, | 601 | .ie = __enable_icache_nomsr, |
602 | .id = __disable_icache_nomsr, | 602 | .id = __disable_icache_nomsr, |
603 | .ifl = __flush_icache_all_noirq, | 603 | .ifl = __flush_icache_all_noirq, |
@@ -624,7 +624,7 @@ void microblaze_cache_init(void) | |||
624 | if (cpuinfo.dcache_wb) { | 624 | if (cpuinfo.dcache_wb) { |
625 | INFO("wb_msr"); | 625 | INFO("wb_msr"); |
626 | mbc = (struct scache *)&wb_msr; | 626 | mbc = (struct scache *)&wb_msr; |
627 | if (cpuinfo.ver_code < CPUVER_7_20_D) { | 627 | if (cpuinfo.ver_code <= CPUVER_7_20_D) { |
628 | /* MS: problem with signal handling - hw bug */ | 628 | /* MS: problem with signal handling - hw bug */ |
629 | INFO("WB won't work properly"); | 629 | INFO("WB won't work properly"); |
630 | } | 630 | } |
@@ -641,7 +641,7 @@ void microblaze_cache_init(void) | |||
641 | if (cpuinfo.dcache_wb) { | 641 | if (cpuinfo.dcache_wb) { |
642 | INFO("wb_nomsr"); | 642 | INFO("wb_nomsr"); |
643 | mbc = (struct scache *)&wb_nomsr; | 643 | mbc = (struct scache *)&wb_nomsr; |
644 | if (cpuinfo.ver_code < CPUVER_7_20_D) { | 644 | if (cpuinfo.ver_code <= CPUVER_7_20_D) { |
645 | /* MS: problem with signal handling - hw bug */ | 645 | /* MS: problem with signal handling - hw bug */ |
646 | INFO("WB won't work properly"); | 646 | INFO("WB won't work properly"); |
647 | } | 647 | } |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 2c309fccf230..c1640c52711f 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c | |||
@@ -33,6 +33,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = { | |||
33 | {"7.30.b", 0x11}, | 33 | {"7.30.b", 0x11}, |
34 | {"8.00.a", 0x12}, | 34 | {"8.00.a", 0x12}, |
35 | {"8.00.b", 0x13}, | 35 | {"8.00.b", 0x13}, |
36 | {"8.10.a", 0x14}, | ||
36 | {NULL, 0}, | 37 | {NULL, 0}, |
37 | }; | 38 | }; |
38 | 39 | ||
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 79c74659f204..393e6b2db688 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -26,6 +26,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, | |||
26 | { | 26 | { |
27 | switch (direction) { | 27 | switch (direction) { |
28 | case DMA_TO_DEVICE: | 28 | case DMA_TO_DEVICE: |
29 | case DMA_BIDIRECTIONAL: | ||
29 | flush_dcache_range(paddr + offset, paddr + offset + size); | 30 | flush_dcache_range(paddr + offset, paddr + offset + size); |
30 | break; | 31 | break; |
31 | case DMA_FROM_DEVICE: | 32 | case DMA_FROM_DEVICE: |
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index ca84368570b6..34b526f59b43 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S | |||
@@ -115,7 +115,7 @@ ENTRY(_interrupt) | |||
115 | /* restore r31 */ | 115 | /* restore r31 */ |
116 | lwi r31, r0, PER_CPU(CURRENT_SAVE) | 116 | lwi r31, r0, PER_CPU(CURRENT_SAVE) |
117 | /* prepare the link register, the argument and jump */ | 117 | /* prepare the link register, the argument and jump */ |
118 | la r15, r0, ret_from_intr - 8 | 118 | addik r15, r0, ret_from_intr - 8 |
119 | addk r6, r0, r15 | 119 | addk r6, r0, r15 |
120 | braid do_IRQ | 120 | braid do_IRQ |
121 | add r5, r0, r1 | 121 | add r5, r0, r1 |
@@ -283,7 +283,7 @@ ENTRY(_user_exception) | |||
283 | add r12, r12, r12 /* convert num -> ptr */ | 283 | add r12, r12, r12 /* convert num -> ptr */ |
284 | add r12, r12, r12 | 284 | add r12, r12, r12 |
285 | lwi r12, r12, sys_call_table /* Get function pointer */ | 285 | lwi r12, r12, sys_call_table /* Get function pointer */ |
286 | la r15, r0, ret_to_user-8 /* set return address */ | 286 | addik r15, r0, ret_to_user-8 /* set return address */ |
287 | bra r12 /* Make the system call. */ | 287 | bra r12 /* Make the system call. */ |
288 | bri 0 /* won't reach here */ | 288 | bri 0 /* won't reach here */ |
289 | 1: | 289 | 1: |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 41c30cdb2704..ca15bc5c7449 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -33,11 +33,14 @@ | |||
33 | 33 | ||
34 | #undef DEBUG | 34 | #undef DEBUG |
35 | 35 | ||
36 | /* The size of a state save frame. */ | 36 | #ifdef DEBUG |
37 | #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) | 37 | /* Create space for syscalls counting. */ |
38 | 38 | .section .data | |
39 | /* The offset of the struct pt_regs in a `state save frame' on the stack. */ | 39 | .global syscall_debug_table |
40 | #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */ | 40 | .align 4 |
41 | syscall_debug_table: | ||
42 | .space (__NR_syscalls * 4) | ||
43 | #endif /* DEBUG */ | ||
41 | 44 | ||
42 | #define C_ENTRY(name) .globl name; .align 4; name | 45 | #define C_ENTRY(name) .globl name; .align 4; name |
43 | 46 | ||
@@ -172,72 +175,72 @@ | |||
172 | 1: | 175 | 1: |
173 | 176 | ||
174 | #define SAVE_REGS \ | 177 | #define SAVE_REGS \ |
175 | swi r2, r1, PTO+PT_R2; /* Save SDA */ \ | 178 | swi r2, r1, PT_R2; /* Save SDA */ \ |
176 | swi r3, r1, PTO+PT_R3; \ | 179 | swi r3, r1, PT_R3; \ |
177 | swi r4, r1, PTO+PT_R4; \ | 180 | swi r4, r1, PT_R4; \ |
178 | swi r5, r1, PTO+PT_R5; \ | 181 | swi r5, r1, PT_R5; \ |
179 | swi r6, r1, PTO+PT_R6; \ | 182 | swi r6, r1, PT_R6; \ |
180 | swi r7, r1, PTO+PT_R7; \ | 183 | swi r7, r1, PT_R7; \ |
181 | swi r8, r1, PTO+PT_R8; \ | 184 | swi r8, r1, PT_R8; \ |
182 | swi r9, r1, PTO+PT_R9; \ | 185 | swi r9, r1, PT_R9; \ |
183 | swi r10, r1, PTO+PT_R10; \ | 186 | swi r10, r1, PT_R10; \ |
184 | swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\ | 187 | swi r11, r1, PT_R11; /* save clobbered regs after rval */\ |
185 | swi r12, r1, PTO+PT_R12; \ | 188 | swi r12, r1, PT_R12; \ |
186 | swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \ | 189 | swi r13, r1, PT_R13; /* Save SDA2 */ \ |
187 | swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \ | 190 | swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \ |
188 | swi r15, r1, PTO+PT_R15; /* Save LP */ \ | 191 | swi r15, r1, PT_R15; /* Save LP */ \ |
189 | swi r16, r1, PTO+PT_R16; \ | 192 | swi r16, r1, PT_R16; \ |
190 | swi r17, r1, PTO+PT_R17; \ | 193 | swi r17, r1, PT_R17; \ |
191 | swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \ | 194 | swi r18, r1, PT_R18; /* Save asm scratch reg */ \ |
192 | swi r19, r1, PTO+PT_R19; \ | 195 | swi r19, r1, PT_R19; \ |
193 | swi r20, r1, PTO+PT_R20; \ | 196 | swi r20, r1, PT_R20; \ |
194 | swi r21, r1, PTO+PT_R21; \ | 197 | swi r21, r1, PT_R21; \ |
195 | swi r22, r1, PTO+PT_R22; \ | 198 | swi r22, r1, PT_R22; \ |
196 | swi r23, r1, PTO+PT_R23; \ | 199 | swi r23, r1, PT_R23; \ |
197 | swi r24, r1, PTO+PT_R24; \ | 200 | swi r24, r1, PT_R24; \ |
198 | swi r25, r1, PTO+PT_R25; \ | 201 | swi r25, r1, PT_R25; \ |
199 | swi r26, r1, PTO+PT_R26; \ | 202 | swi r26, r1, PT_R26; \ |
200 | swi r27, r1, PTO+PT_R27; \ | 203 | swi r27, r1, PT_R27; \ |
201 | swi r28, r1, PTO+PT_R28; \ | 204 | swi r28, r1, PT_R28; \ |
202 | swi r29, r1, PTO+PT_R29; \ | 205 | swi r29, r1, PT_R29; \ |
203 | swi r30, r1, PTO+PT_R30; \ | 206 | swi r30, r1, PT_R30; \ |
204 | swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ | 207 | swi r31, r1, PT_R31; /* Save current task reg */ \ |
205 | mfs r11, rmsr; /* save MSR */ \ | 208 | mfs r11, rmsr; /* save MSR */ \ |
206 | swi r11, r1, PTO+PT_MSR; | 209 | swi r11, r1, PT_MSR; |
207 | 210 | ||
208 | #define RESTORE_REGS \ | 211 | #define RESTORE_REGS \ |
209 | lwi r11, r1, PTO+PT_MSR; \ | 212 | lwi r11, r1, PT_MSR; \ |
210 | mts rmsr , r11; \ | 213 | mts rmsr , r11; \ |
211 | lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ | 214 | lwi r2, r1, PT_R2; /* restore SDA */ \ |
212 | lwi r3, r1, PTO+PT_R3; \ | 215 | lwi r3, r1, PT_R3; \ |
213 | lwi r4, r1, PTO+PT_R4; \ | 216 | lwi r4, r1, PT_R4; \ |
214 | lwi r5, r1, PTO+PT_R5; \ | 217 | lwi r5, r1, PT_R5; \ |
215 | lwi r6, r1, PTO+PT_R6; \ | 218 | lwi r6, r1, PT_R6; \ |
216 | lwi r7, r1, PTO+PT_R7; \ | 219 | lwi r7, r1, PT_R7; \ |
217 | lwi r8, r1, PTO+PT_R8; \ | 220 | lwi r8, r1, PT_R8; \ |
218 | lwi r9, r1, PTO+PT_R9; \ | 221 | lwi r9, r1, PT_R9; \ |
219 | lwi r10, r1, PTO+PT_R10; \ | 222 | lwi r10, r1, PT_R10; \ |
220 | lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\ | 223 | lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\ |
221 | lwi r12, r1, PTO+PT_R12; \ | 224 | lwi r12, r1, PT_R12; \ |
222 | lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \ | 225 | lwi r13, r1, PT_R13; /* restore SDA2 */ \ |
223 | lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ | 226 | lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ |
224 | lwi r15, r1, PTO+PT_R15; /* restore LP */ \ | 227 | lwi r15, r1, PT_R15; /* restore LP */ \ |
225 | lwi r16, r1, PTO+PT_R16; \ | 228 | lwi r16, r1, PT_R16; \ |
226 | lwi r17, r1, PTO+PT_R17; \ | 229 | lwi r17, r1, PT_R17; \ |
227 | lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \ | 230 | lwi r18, r1, PT_R18; /* restore asm scratch reg */ \ |
228 | lwi r19, r1, PTO+PT_R19; \ | 231 | lwi r19, r1, PT_R19; \ |
229 | lwi r20, r1, PTO+PT_R20; \ | 232 | lwi r20, r1, PT_R20; \ |
230 | lwi r21, r1, PTO+PT_R21; \ | 233 | lwi r21, r1, PT_R21; \ |
231 | lwi r22, r1, PTO+PT_R22; \ | 234 | lwi r22, r1, PT_R22; \ |
232 | lwi r23, r1, PTO+PT_R23; \ | 235 | lwi r23, r1, PT_R23; \ |
233 | lwi r24, r1, PTO+PT_R24; \ | 236 | lwi r24, r1, PT_R24; \ |
234 | lwi r25, r1, PTO+PT_R25; \ | 237 | lwi r25, r1, PT_R25; \ |
235 | lwi r26, r1, PTO+PT_R26; \ | 238 | lwi r26, r1, PT_R26; \ |
236 | lwi r27, r1, PTO+PT_R27; \ | 239 | lwi r27, r1, PT_R27; \ |
237 | lwi r28, r1, PTO+PT_R28; \ | 240 | lwi r28, r1, PT_R28; \ |
238 | lwi r29, r1, PTO+PT_R29; \ | 241 | lwi r29, r1, PT_R29; \ |
239 | lwi r30, r1, PTO+PT_R30; \ | 242 | lwi r30, r1, PT_R30; \ |
240 | lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ | 243 | lwi r31, r1, PT_R31; /* Restore cur task reg */ |
241 | 244 | ||
242 | #define SAVE_STATE \ | 245 | #define SAVE_STATE \ |
243 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ | 246 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ |
@@ -250,11 +253,11 @@ | |||
250 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | 253 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ |
251 | /* FIXME: I can add these two lines to one */ \ | 254 | /* FIXME: I can add these two lines to one */ \ |
252 | /* tophys(r1,r1); */ \ | 255 | /* tophys(r1,r1); */ \ |
253 | /* addik r1, r1, -STATE_SAVE_SIZE; */ \ | 256 | /* addik r1, r1, -PT_SIZE; */ \ |
254 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ | 257 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ |
255 | SAVE_REGS \ | 258 | SAVE_REGS \ |
256 | brid 2f; \ | 259 | brid 2f; \ |
257 | swi r1, r1, PTO+PT_MODE; \ | 260 | swi r1, r1, PT_MODE; \ |
258 | 1: /* User-mode state save. */ \ | 261 | 1: /* User-mode state save. */ \ |
259 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | 262 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ |
260 | tophys(r1,r1); \ | 263 | tophys(r1,r1); \ |
@@ -262,12 +265,12 @@ | |||
262 | /* MS these three instructions can be added to one */ \ | 265 | /* MS these three instructions can be added to one */ \ |
263 | /* addik r1, r1, THREAD_SIZE; */ \ | 266 | /* addik r1, r1, THREAD_SIZE; */ \ |
264 | /* tophys(r1,r1); */ \ | 267 | /* tophys(r1,r1); */ \ |
265 | /* addik r1, r1, -STATE_SAVE_SIZE; */ \ | 268 | /* addik r1, r1, -PT_SIZE; */ \ |
266 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \ | 269 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ |
267 | SAVE_REGS \ | 270 | SAVE_REGS \ |
268 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | 271 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ |
269 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | 272 | swi r11, r1, PT_R1; /* Store user SP. */ \ |
270 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \ | 273 | swi r0, r1, PT_MODE; /* Was in user-mode. */ \ |
271 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ | 274 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ |
272 | clear_ums; \ | 275 | clear_ums; \ |
273 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 276 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
@@ -299,10 +302,10 @@ C_ENTRY(_user_exception): | |||
299 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ | 302 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
300 | tophys(r1,r1); | 303 | tophys(r1,r1); |
301 | 304 | ||
302 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | 305 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
303 | SAVE_REGS | 306 | SAVE_REGS |
304 | 307 | ||
305 | swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */ | 308 | swi r1, r1, PT_MODE; /* pt_regs -> kernel mode */ |
306 | brid 2f; | 309 | brid 2f; |
307 | nop; /* Fill delay slot */ | 310 | nop; /* Fill delay slot */ |
308 | 311 | ||
@@ -315,18 +318,18 @@ C_ENTRY(_user_exception): | |||
315 | addik r1, r1, THREAD_SIZE; | 318 | addik r1, r1, THREAD_SIZE; |
316 | tophys(r1,r1); | 319 | tophys(r1,r1); |
317 | 320 | ||
318 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | 321 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
319 | SAVE_REGS | 322 | SAVE_REGS |
320 | swi r0, r1, PTO + PT_R3 | 323 | swi r0, r1, PT_R3 |
321 | swi r0, r1, PTO + PT_R4 | 324 | swi r0, r1, PT_R4 |
322 | 325 | ||
323 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ | 326 | swi r0, r1, PT_MODE; /* Was in user-mode. */ |
324 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 327 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
325 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 328 | swi r11, r1, PT_R1; /* Store user SP. */ |
326 | clear_ums; | 329 | clear_ums; |
327 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 330 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
328 | /* Save away the syscall number. */ | 331 | /* Save away the syscall number. */ |
329 | swi r12, r1, PTO+PT_R0; | 332 | swi r12, r1, PT_R0; |
330 | tovirt(r1,r1) | 333 | tovirt(r1,r1) |
331 | 334 | ||
332 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ | 335 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ |
@@ -345,18 +348,18 @@ C_ENTRY(_user_exception): | |||
345 | beqi r11, 4f | 348 | beqi r11, 4f |
346 | 349 | ||
347 | addik r3, r0, -ENOSYS | 350 | addik r3, r0, -ENOSYS |
348 | swi r3, r1, PTO + PT_R3 | 351 | swi r3, r1, PT_R3 |
349 | brlid r15, do_syscall_trace_enter | 352 | brlid r15, do_syscall_trace_enter |
350 | addik r5, r1, PTO + PT_R0 | 353 | addik r5, r1, PT_R0 |
351 | 354 | ||
352 | # do_syscall_trace_enter returns the new syscall nr. | 355 | # do_syscall_trace_enter returns the new syscall nr. |
353 | addk r12, r0, r3 | 356 | addk r12, r0, r3 |
354 | lwi r5, r1, PTO+PT_R5; | 357 | lwi r5, r1, PT_R5; |
355 | lwi r6, r1, PTO+PT_R6; | 358 | lwi r6, r1, PT_R6; |
356 | lwi r7, r1, PTO+PT_R7; | 359 | lwi r7, r1, PT_R7; |
357 | lwi r8, r1, PTO+PT_R8; | 360 | lwi r8, r1, PT_R8; |
358 | lwi r9, r1, PTO+PT_R9; | 361 | lwi r9, r1, PT_R9; |
359 | lwi r10, r1, PTO+PT_R10; | 362 | lwi r10, r1, PT_R10; |
360 | 4: | 363 | 4: |
361 | /* Jump to the appropriate function for the system call number in r12 | 364 | /* Jump to the appropriate function for the system call number in r12 |
362 | * (r12 is not preserved), or return an error if r12 is not valid. | 365 | * (r12 is not preserved), or return an error if r12 is not valid. |
@@ -371,10 +374,14 @@ C_ENTRY(_user_exception): | |||
371 | add r12, r12, r12; | 374 | add r12, r12, r12; |
372 | 375 | ||
373 | #ifdef DEBUG | 376 | #ifdef DEBUG |
374 | /* Trac syscalls and stored them to r0_ram */ | 377 | /* Trac syscalls and stored them to syscall_debug_table */ |
375 | lwi r3, r12, 0x400 + r0_ram | 378 | /* The first syscall location stores total syscall number */ |
379 | lwi r3, r0, syscall_debug_table | ||
380 | addi r3, r3, 1 | ||
381 | swi r3, r0, syscall_debug_table | ||
382 | lwi r3, r12, syscall_debug_table | ||
376 | addi r3, r3, 1 | 383 | addi r3, r3, 1 |
377 | swi r3, r12, 0x400 + r0_ram | 384 | swi r3, r12, syscall_debug_table |
378 | #endif | 385 | #endif |
379 | 386 | ||
380 | # Find and jump into the syscall handler. | 387 | # Find and jump into the syscall handler. |
@@ -391,10 +398,10 @@ C_ENTRY(_user_exception): | |||
391 | /* Entry point used to return from a syscall/trap */ | 398 | /* Entry point used to return from a syscall/trap */ |
392 | /* We re-enable BIP bit before state restore */ | 399 | /* We re-enable BIP bit before state restore */ |
393 | C_ENTRY(ret_from_trap): | 400 | C_ENTRY(ret_from_trap): |
394 | swi r3, r1, PTO + PT_R3 | 401 | swi r3, r1, PT_R3 |
395 | swi r4, r1, PTO + PT_R4 | 402 | swi r4, r1, PT_R4 |
396 | 403 | ||
397 | lwi r11, r1, PTO + PT_MODE; | 404 | lwi r11, r1, PT_MODE; |
398 | /* See if returning to kernel mode, if so, skip resched &c. */ | 405 | /* See if returning to kernel mode, if so, skip resched &c. */ |
399 | bnei r11, 2f; | 406 | bnei r11, 2f; |
400 | /* We're returning to user mode, so check for various conditions that | 407 | /* We're returning to user mode, so check for various conditions that |
@@ -406,7 +413,7 @@ C_ENTRY(ret_from_trap): | |||
406 | beqi r11, 1f | 413 | beqi r11, 1f |
407 | 414 | ||
408 | brlid r15, do_syscall_trace_leave | 415 | brlid r15, do_syscall_trace_leave |
409 | addik r5, r1, PTO + PT_R0 | 416 | addik r5, r1, PT_R0 |
410 | 1: | 417 | 1: |
411 | /* We're returning to user mode, so check for various conditions that | 418 | /* We're returning to user mode, so check for various conditions that |
412 | * trigger rescheduling. */ | 419 | * trigger rescheduling. */ |
@@ -426,7 +433,7 @@ C_ENTRY(ret_from_trap): | |||
426 | andi r11, r11, _TIF_SIGPENDING; | 433 | andi r11, r11, _TIF_SIGPENDING; |
427 | beqi r11, 1f; /* Signals to handle, handle them */ | 434 | beqi r11, 1f; /* Signals to handle, handle them */ |
428 | 435 | ||
429 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 436 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
430 | addi r7, r0, 1; /* Arg 3: int in_syscall */ | 437 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
431 | bralid r15, do_signal; /* Handle any signals */ | 438 | bralid r15, do_signal; /* Handle any signals */ |
432 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 439 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
@@ -437,7 +444,7 @@ C_ENTRY(ret_from_trap): | |||
437 | VM_OFF; | 444 | VM_OFF; |
438 | tophys(r1,r1); | 445 | tophys(r1,r1); |
439 | RESTORE_REGS; | 446 | RESTORE_REGS; |
440 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 447 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
441 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ | 448 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ |
442 | bri 6f; | 449 | bri 6f; |
443 | 450 | ||
@@ -446,7 +453,7 @@ C_ENTRY(ret_from_trap): | |||
446 | VM_OFF; | 453 | VM_OFF; |
447 | tophys(r1,r1); | 454 | tophys(r1,r1); |
448 | RESTORE_REGS; | 455 | RESTORE_REGS; |
449 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 456 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
450 | tovirt(r1,r1); | 457 | tovirt(r1,r1); |
451 | 6: | 458 | 6: |
452 | TRAP_return: /* Make global symbol for debugging */ | 459 | TRAP_return: /* Make global symbol for debugging */ |
@@ -459,8 +466,8 @@ TRAP_return: /* Make global symbol for debugging */ | |||
459 | 466 | ||
460 | C_ENTRY(sys_fork_wrapper): | 467 | C_ENTRY(sys_fork_wrapper): |
461 | addi r5, r0, SIGCHLD /* Arg 0: flags */ | 468 | addi r5, r0, SIGCHLD /* Arg 0: flags */ |
462 | lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ | 469 | lwi r6, r1, PT_R1 /* Arg 1: child SP (use parent's) */ |
463 | addik r7, r1, PTO /* Arg 2: parent context */ | 470 | addik r7, r1, 0 /* Arg 2: parent context */ |
464 | add r8. r0, r0 /* Arg 3: (unused) */ | 471 | add r8. r0, r0 /* Arg 3: (unused) */ |
465 | add r9, r0, r0; /* Arg 4: (unused) */ | 472 | add r9, r0, r0; /* Arg 4: (unused) */ |
466 | brid do_fork /* Do real work (tail-call) */ | 473 | brid do_fork /* Do real work (tail-call) */ |
@@ -480,12 +487,12 @@ C_ENTRY(ret_from_fork): | |||
480 | 487 | ||
481 | C_ENTRY(sys_vfork): | 488 | C_ENTRY(sys_vfork): |
482 | brid microblaze_vfork /* Do real work (tail-call) */ | 489 | brid microblaze_vfork /* Do real work (tail-call) */ |
483 | addik r5, r1, PTO | 490 | addik r5, r1, 0 |
484 | 491 | ||
485 | C_ENTRY(sys_clone): | 492 | C_ENTRY(sys_clone): |
486 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ | 493 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ |
487 | lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */ | 494 | lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */ |
488 | 1: addik r7, r1, PTO; /* Arg 2: parent context */ | 495 | 1: addik r7, r1, 0; /* Arg 2: parent context */ |
489 | add r8, r0, r0; /* Arg 3: (unused) */ | 496 | add r8, r0, r0; /* Arg 3: (unused) */ |
490 | add r9, r0, r0; /* Arg 4: (unused) */ | 497 | add r9, r0, r0; /* Arg 4: (unused) */ |
491 | brid do_fork /* Do real work (tail-call) */ | 498 | brid do_fork /* Do real work (tail-call) */ |
@@ -493,11 +500,11 @@ C_ENTRY(sys_clone): | |||
493 | 500 | ||
494 | C_ENTRY(sys_execve): | 501 | C_ENTRY(sys_execve): |
495 | brid microblaze_execve; /* Do real work (tail-call).*/ | 502 | brid microblaze_execve; /* Do real work (tail-call).*/ |
496 | addik r8, r1, PTO; /* add user context as 4th arg */ | 503 | addik r8, r1, 0; /* add user context as 4th arg */ |
497 | 504 | ||
498 | C_ENTRY(sys_rt_sigreturn_wrapper): | 505 | C_ENTRY(sys_rt_sigreturn_wrapper): |
499 | brid sys_rt_sigreturn /* Do real work */ | 506 | brid sys_rt_sigreturn /* Do real work */ |
500 | addik r5, r1, PTO; /* add user context as 1st arg */ | 507 | addik r5, r1, 0; /* add user context as 1st arg */ |
501 | 508 | ||
502 | /* | 509 | /* |
503 | * HW EXCEPTION rutine start | 510 | * HW EXCEPTION rutine start |
@@ -508,7 +515,7 @@ C_ENTRY(full_exception_trap): | |||
508 | addik r17, r17, -4 | 515 | addik r17, r17, -4 |
509 | SAVE_STATE /* Save registers */ | 516 | SAVE_STATE /* Save registers */ |
510 | /* PC, before IRQ/trap - this is one instruction above */ | 517 | /* PC, before IRQ/trap - this is one instruction above */ |
511 | swi r17, r1, PTO+PT_PC; | 518 | swi r17, r1, PT_PC; |
512 | tovirt(r1,r1) | 519 | tovirt(r1,r1) |
513 | /* FIXME this can be store directly in PT_ESR reg. | 520 | /* FIXME this can be store directly in PT_ESR reg. |
514 | * I tested it but there is a fault */ | 521 | * I tested it but there is a fault */ |
@@ -518,7 +525,7 @@ C_ENTRY(full_exception_trap): | |||
518 | mfs r7, rfsr; /* save FSR */ | 525 | mfs r7, rfsr; /* save FSR */ |
519 | mts rfsr, r0; /* Clear sticky fsr */ | 526 | mts rfsr, r0; /* Clear sticky fsr */ |
520 | rted r0, full_exception | 527 | rted r0, full_exception |
521 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | 528 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
522 | 529 | ||
523 | /* | 530 | /* |
524 | * Unaligned data trap. | 531 | * Unaligned data trap. |
@@ -544,14 +551,14 @@ C_ENTRY(unaligned_data_trap): | |||
544 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 551 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
545 | SAVE_STATE /* Save registers.*/ | 552 | SAVE_STATE /* Save registers.*/ |
546 | /* PC, before IRQ/trap - this is one instruction above */ | 553 | /* PC, before IRQ/trap - this is one instruction above */ |
547 | swi r17, r1, PTO+PT_PC; | 554 | swi r17, r1, PT_PC; |
548 | tovirt(r1,r1) | 555 | tovirt(r1,r1) |
549 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 556 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
550 | addik r15, r0, ret_from_exc-8 | 557 | addik r15, r0, ret_from_exc-8 |
551 | mfs r3, resr /* ESR */ | 558 | mfs r3, resr /* ESR */ |
552 | mfs r4, rear /* EAR */ | 559 | mfs r4, rear /* EAR */ |
553 | rtbd r0, _unaligned_data_exception | 560 | rtbd r0, _unaligned_data_exception |
554 | addik r7, r1, PTO /* parameter struct pt_regs * regs */ | 561 | addik r7, r1, 0 /* parameter struct pt_regs * regs */ |
555 | 562 | ||
556 | /* | 563 | /* |
557 | * Page fault traps. | 564 | * Page fault traps. |
@@ -574,30 +581,30 @@ C_ENTRY(unaligned_data_trap): | |||
574 | C_ENTRY(page_fault_data_trap): | 581 | C_ENTRY(page_fault_data_trap): |
575 | SAVE_STATE /* Save registers.*/ | 582 | SAVE_STATE /* Save registers.*/ |
576 | /* PC, before IRQ/trap - this is one instruction above */ | 583 | /* PC, before IRQ/trap - this is one instruction above */ |
577 | swi r17, r1, PTO+PT_PC; | 584 | swi r17, r1, PT_PC; |
578 | tovirt(r1,r1) | 585 | tovirt(r1,r1) |
579 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 586 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
580 | addik r15, r0, ret_from_exc-8 | 587 | addik r15, r0, ret_from_exc-8 |
581 | mfs r6, rear /* parameter unsigned long address */ | 588 | mfs r6, rear /* parameter unsigned long address */ |
582 | mfs r7, resr /* parameter unsigned long error_code */ | 589 | mfs r7, resr /* parameter unsigned long error_code */ |
583 | rted r0, do_page_fault | 590 | rted r0, do_page_fault |
584 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | 591 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
585 | 592 | ||
586 | C_ENTRY(page_fault_instr_trap): | 593 | C_ENTRY(page_fault_instr_trap): |
587 | SAVE_STATE /* Save registers.*/ | 594 | SAVE_STATE /* Save registers.*/ |
588 | /* PC, before IRQ/trap - this is one instruction above */ | 595 | /* PC, before IRQ/trap - this is one instruction above */ |
589 | swi r17, r1, PTO+PT_PC; | 596 | swi r17, r1, PT_PC; |
590 | tovirt(r1,r1) | 597 | tovirt(r1,r1) |
591 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | 598 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
592 | addik r15, r0, ret_from_exc-8 | 599 | addik r15, r0, ret_from_exc-8 |
593 | mfs r6, rear /* parameter unsigned long address */ | 600 | mfs r6, rear /* parameter unsigned long address */ |
594 | ori r7, r0, 0 /* parameter unsigned long error_code */ | 601 | ori r7, r0, 0 /* parameter unsigned long error_code */ |
595 | rted r0, do_page_fault | 602 | rted r0, do_page_fault |
596 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | 603 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
597 | 604 | ||
598 | /* Entry point used to return from an exception. */ | 605 | /* Entry point used to return from an exception. */ |
599 | C_ENTRY(ret_from_exc): | 606 | C_ENTRY(ret_from_exc): |
600 | lwi r11, r1, PTO + PT_MODE; | 607 | lwi r11, r1, PT_MODE; |
601 | bnei r11, 2f; /* See if returning to kernel mode, */ | 608 | bnei r11, 2f; /* See if returning to kernel mode, */ |
602 | /* ... if so, skip resched &c. */ | 609 | /* ... if so, skip resched &c. */ |
603 | 610 | ||
@@ -629,7 +636,7 @@ C_ENTRY(ret_from_exc): | |||
629 | * complete register state. Here we save anything not saved by | 636 | * complete register state. Here we save anything not saved by |
630 | * the normal entry sequence, so that it may be safely restored | 637 | * the normal entry sequence, so that it may be safely restored |
631 | * (in a possibly modified form) after do_signal returns. */ | 638 | * (in a possibly modified form) after do_signal returns. */ |
632 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 639 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
633 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 640 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
634 | bralid r15, do_signal; /* Handle any signals */ | 641 | bralid r15, do_signal; /* Handle any signals */ |
635 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 642 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
@@ -641,7 +648,7 @@ C_ENTRY(ret_from_exc): | |||
641 | tophys(r1,r1); | 648 | tophys(r1,r1); |
642 | 649 | ||
643 | RESTORE_REGS; | 650 | RESTORE_REGS; |
644 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 651 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
645 | 652 | ||
646 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ | 653 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ |
647 | bri 6f; | 654 | bri 6f; |
@@ -650,7 +657,7 @@ C_ENTRY(ret_from_exc): | |||
650 | VM_OFF; | 657 | VM_OFF; |
651 | tophys(r1,r1); | 658 | tophys(r1,r1); |
652 | RESTORE_REGS; | 659 | RESTORE_REGS; |
653 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | 660 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
654 | 661 | ||
655 | tovirt(r1,r1); | 662 | tovirt(r1,r1); |
656 | 6: | 663 | 6: |
@@ -683,10 +690,10 @@ C_ENTRY(_interrupt): | |||
683 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ | 690 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ |
684 | /* save registers */ | 691 | /* save registers */ |
685 | /* MS: Make room on the stack -> activation record */ | 692 | /* MS: Make room on the stack -> activation record */ |
686 | addik r1, r1, -STATE_SAVE_SIZE; | 693 | addik r1, r1, -PT_SIZE; |
687 | SAVE_REGS | 694 | SAVE_REGS |
688 | brid 2f; | 695 | brid 2f; |
689 | swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */ | 696 | swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */ |
690 | 1: | 697 | 1: |
691 | /* User-mode state save. */ | 698 | /* User-mode state save. */ |
692 | /* MS: get the saved current */ | 699 | /* MS: get the saved current */ |
@@ -696,23 +703,23 @@ C_ENTRY(_interrupt): | |||
696 | addik r1, r1, THREAD_SIZE; | 703 | addik r1, r1, THREAD_SIZE; |
697 | tophys(r1,r1); | 704 | tophys(r1,r1); |
698 | /* save registers */ | 705 | /* save registers */ |
699 | addik r1, r1, -STATE_SAVE_SIZE; | 706 | addik r1, r1, -PT_SIZE; |
700 | SAVE_REGS | 707 | SAVE_REGS |
701 | /* calculate mode */ | 708 | /* calculate mode */ |
702 | swi r0, r1, PTO + PT_MODE; | 709 | swi r0, r1, PT_MODE; |
703 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 710 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
704 | swi r11, r1, PTO+PT_R1; | 711 | swi r11, r1, PT_R1; |
705 | clear_ums; | 712 | clear_ums; |
706 | 2: | 713 | 2: |
707 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 714 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
708 | tovirt(r1,r1) | 715 | tovirt(r1,r1) |
709 | addik r15, r0, irq_call; | 716 | addik r15, r0, irq_call; |
710 | irq_call:rtbd r0, do_IRQ; | 717 | irq_call:rtbd r0, do_IRQ; |
711 | addik r5, r1, PTO; | 718 | addik r5, r1, 0; |
712 | 719 | ||
713 | /* MS: we are in virtual mode */ | 720 | /* MS: we are in virtual mode */ |
714 | ret_from_irq: | 721 | ret_from_irq: |
715 | lwi r11, r1, PTO + PT_MODE; | 722 | lwi r11, r1, PT_MODE; |
716 | bnei r11, 2f; | 723 | bnei r11, 2f; |
717 | 724 | ||
718 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | 725 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
@@ -729,7 +736,7 @@ ret_from_irq: | |||
729 | beqid r11, no_intr_resched | 736 | beqid r11, no_intr_resched |
730 | /* Handle a signal return; Pending signals should be in r18. */ | 737 | /* Handle a signal return; Pending signals should be in r18. */ |
731 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 738 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
732 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 739 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
733 | bralid r15, do_signal; /* Handle any signals */ | 740 | bralid r15, do_signal; /* Handle any signals */ |
734 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 741 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
735 | 742 | ||
@@ -741,7 +748,7 @@ no_intr_resched: | |||
741 | VM_OFF; | 748 | VM_OFF; |
742 | tophys(r1,r1); | 749 | tophys(r1,r1); |
743 | RESTORE_REGS | 750 | RESTORE_REGS |
744 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | 751 | addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ |
745 | lwi r1, r1, PT_R1 - PT_SIZE; | 752 | lwi r1, r1, PT_R1 - PT_SIZE; |
746 | bri 6f; | 753 | bri 6f; |
747 | /* MS: Return to kernel state. */ | 754 | /* MS: Return to kernel state. */ |
@@ -769,7 +776,7 @@ restore: | |||
769 | VM_OFF /* MS: turn off MMU */ | 776 | VM_OFF /* MS: turn off MMU */ |
770 | tophys(r1,r1) | 777 | tophys(r1,r1) |
771 | RESTORE_REGS | 778 | RESTORE_REGS |
772 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | 779 | addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ |
773 | tovirt(r1,r1); | 780 | tovirt(r1,r1); |
774 | 6: | 781 | 6: |
775 | IRQ_return: /* MS: Make global symbol for debugging */ | 782 | IRQ_return: /* MS: Make global symbol for debugging */ |
@@ -792,29 +799,29 @@ C_ENTRY(_debug_exception): | |||
792 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ | 799 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
793 | 800 | ||
794 | /* BIP bit is set on entry, no interrupts can occur */ | 801 | /* BIP bit is set on entry, no interrupts can occur */ |
795 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; | 802 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; |
796 | SAVE_REGS; | 803 | SAVE_REGS; |
797 | /* save all regs to pt_reg structure */ | 804 | /* save all regs to pt_reg structure */ |
798 | swi r0, r1, PTO+PT_R0; /* R0 must be saved too */ | 805 | swi r0, r1, PT_R0; /* R0 must be saved too */ |
799 | swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */ | 806 | swi r14, r1, PT_R14 /* rewrite saved R14 value */ |
800 | swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */ | 807 | swi r16, r1, PT_PC; /* PC and r16 are the same */ |
801 | /* save special purpose registers to pt_regs */ | 808 | /* save special purpose registers to pt_regs */ |
802 | mfs r11, rear; | 809 | mfs r11, rear; |
803 | swi r11, r1, PTO+PT_EAR; | 810 | swi r11, r1, PT_EAR; |
804 | mfs r11, resr; | 811 | mfs r11, resr; |
805 | swi r11, r1, PTO+PT_ESR; | 812 | swi r11, r1, PT_ESR; |
806 | mfs r11, rfsr; | 813 | mfs r11, rfsr; |
807 | swi r11, r1, PTO+PT_FSR; | 814 | swi r11, r1, PT_FSR; |
808 | 815 | ||
809 | /* stack pointer is in physical address at it is decrease | 816 | /* stack pointer is in physical address at it is decrease |
810 | * by STATE_SAVE_SIZE but we need to get correct R1 value */ | 817 | * by PT_SIZE but we need to get correct R1 value */ |
811 | addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE; | 818 | addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE; |
812 | swi r11, r1, PTO+PT_R1 | 819 | swi r11, r1, PT_R1 |
813 | /* MS: r31 - current pointer isn't changed */ | 820 | /* MS: r31 - current pointer isn't changed */ |
814 | tovirt(r1,r1) | 821 | tovirt(r1,r1) |
815 | #ifdef CONFIG_KGDB | 822 | #ifdef CONFIG_KGDB |
816 | addi r5, r1, PTO /* pass pt_reg address as the first arg */ | 823 | addi r5, r1, 0 /* pass pt_reg address as the first arg */ |
817 | la r15, r0, dbtrap_call; /* return address */ | 824 | addik r15, r0, dbtrap_call; /* return address */ |
818 | rtbd r0, microblaze_kgdb_break | 825 | rtbd r0, microblaze_kgdb_break |
819 | nop; | 826 | nop; |
820 | #endif | 827 | #endif |
@@ -829,16 +836,16 @@ C_ENTRY(_debug_exception): | |||
829 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ | 836 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ |
830 | tophys(r1,r1); | 837 | tophys(r1,r1); |
831 | 838 | ||
832 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | 839 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
833 | SAVE_REGS; | 840 | SAVE_REGS; |
834 | swi r16, r1, PTO+PT_PC; /* Save LP */ | 841 | swi r16, r1, PT_PC; /* Save LP */ |
835 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ | 842 | swi r0, r1, PT_MODE; /* Was in user-mode. */ |
836 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | 843 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
837 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 844 | swi r11, r1, PT_R1; /* Store user SP. */ |
838 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 845 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
839 | tovirt(r1,r1) | 846 | tovirt(r1,r1) |
840 | set_vms; | 847 | set_vms; |
841 | addik r5, r1, PTO; | 848 | addik r5, r1, 0; |
842 | addik r15, r0, dbtrap_call; | 849 | addik r15, r0, dbtrap_call; |
843 | dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ | 850 | dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ |
844 | rtbd r0, sw_exception | 851 | rtbd r0, sw_exception |
@@ -846,7 +853,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ | |||
846 | 853 | ||
847 | /* MS: The first instruction for the second part of the gdb/kgdb */ | 854 | /* MS: The first instruction for the second part of the gdb/kgdb */ |
848 | set_bip; /* Ints masked for state restore */ | 855 | set_bip; /* Ints masked for state restore */ |
849 | lwi r11, r1, PTO + PT_MODE; | 856 | lwi r11, r1, PT_MODE; |
850 | bnei r11, 2f; | 857 | bnei r11, 2f; |
851 | /* MS: Return to user space - gdb */ | 858 | /* MS: Return to user space - gdb */ |
852 | /* Get current task ptr into r11 */ | 859 | /* Get current task ptr into r11 */ |
@@ -865,7 +872,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ | |||
865 | andi r11, r11, _TIF_SIGPENDING; | 872 | andi r11, r11, _TIF_SIGPENDING; |
866 | beqi r11, 1f; /* Signals to handle, handle them */ | 873 | beqi r11, 1f; /* Signals to handle, handle them */ |
867 | 874 | ||
868 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 875 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
869 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | 876 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
870 | bralid r15, do_signal; /* Handle any signals */ | 877 | bralid r15, do_signal; /* Handle any signals */ |
871 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 878 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
@@ -876,7 +883,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ | |||
876 | tophys(r1,r1); | 883 | tophys(r1,r1); |
877 | /* MS: Restore all regs */ | 884 | /* MS: Restore all regs */ |
878 | RESTORE_REGS | 885 | RESTORE_REGS |
879 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */ | 886 | addik r1, r1, PT_SIZE /* Clean up stack space */ |
880 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ | 887 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ |
881 | DBTRAP_return_user: /* MS: Make global symbol for debugging */ | 888 | DBTRAP_return_user: /* MS: Make global symbol for debugging */ |
882 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ | 889 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ |
@@ -887,9 +894,9 @@ DBTRAP_return_user: /* MS: Make global symbol for debugging */ | |||
887 | tophys(r1,r1); | 894 | tophys(r1,r1); |
888 | /* MS: Restore all regs */ | 895 | /* MS: Restore all regs */ |
889 | RESTORE_REGS | 896 | RESTORE_REGS |
890 | lwi r14, r1, PTO+PT_R14; | 897 | lwi r14, r1, PT_R14; |
891 | lwi r16, r1, PTO+PT_PC; | 898 | lwi r16, r1, PT_PC; |
892 | addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */ | 899 | addik r1, r1, PT_SIZE; /* MS: Clean up stack space */ |
893 | tovirt(r1,r1); | 900 | tovirt(r1,r1); |
894 | DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ | 901 | DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ |
895 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ | 902 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ |
@@ -981,20 +988,22 @@ ENTRY(_switch_to) | |||
981 | nop | 988 | nop |
982 | 989 | ||
983 | ENTRY(_reset) | 990 | ENTRY(_reset) |
984 | brai 0x70; /* Jump back to FS-boot */ | 991 | brai 0; /* Jump to reset vector */ |
985 | 992 | ||
986 | /* These are compiled and loaded into high memory, then | 993 | /* These are compiled and loaded into high memory, then |
987 | * copied into place in mach_early_setup */ | 994 | * copied into place in mach_early_setup */ |
988 | .section .init.ivt, "ax" | 995 | .section .init.ivt, "ax" |
996 | #if CONFIG_MANUAL_RESET_VECTOR | ||
989 | .org 0x0 | 997 | .org 0x0 |
990 | /* this is very important - here is the reset vector */ | 998 | brai CONFIG_MANUAL_RESET_VECTOR |
991 | /* in current MMU branch you don't care what is here - it is | 999 | #endif |
992 | * used from bootloader site - but this is correct for FS-BOOT */ | 1000 | .org 0x8 |
993 | brai 0x70 | ||
994 | nop | ||
995 | brai TOPHYS(_user_exception); /* syscall handler */ | 1001 | brai TOPHYS(_user_exception); /* syscall handler */ |
1002 | .org 0x10 | ||
996 | brai TOPHYS(_interrupt); /* Interrupt handler */ | 1003 | brai TOPHYS(_interrupt); /* Interrupt handler */ |
1004 | .org 0x18 | ||
997 | brai TOPHYS(_debug_exception); /* debug trap handler */ | 1005 | brai TOPHYS(_debug_exception); /* debug trap handler */ |
1006 | .org 0x20 | ||
998 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ | 1007 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ |
999 | 1008 | ||
1000 | .section .rodata,"a" | 1009 | .section .rodata,"a" |
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index a7fa6ae76d89..66fad2301221 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c | |||
@@ -50,7 +50,7 @@ void die(const char *str, struct pt_regs *fp, long err) | |||
50 | } | 50 | } |
51 | 51 | ||
52 | /* for user application debugging */ | 52 | /* for user application debugging */ |
53 | void sw_exception(struct pt_regs *regs) | 53 | asmlinkage void sw_exception(struct pt_regs *regs) |
54 | { | 54 | { |
55 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); | 55 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); |
56 | flush_dcache_range(regs->r16, regs->r16 + 0x4); | 56 | flush_dcache_range(regs->r16, regs->r16 + 0x4); |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 778a5ce2e4fc..77320b8fc16a 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/mmu.h> | 39 | #include <asm/mmu.h> |
40 | #include <asm/processor.h> | 40 | #include <asm/processor.h> |
41 | 41 | ||
42 | .data | 42 | .section .data |
43 | .global empty_zero_page | 43 | .global empty_zero_page |
44 | .align 12 | 44 | .align 12 |
45 | empty_zero_page: | 45 | empty_zero_page: |
@@ -50,6 +50,11 @@ swapper_pg_dir: | |||
50 | 50 | ||
51 | #endif /* CONFIG_MMU */ | 51 | #endif /* CONFIG_MMU */ |
52 | 52 | ||
53 | .section .rodata | ||
54 | .align 4 | ||
55 | endian_check: | ||
56 | .word 1 | ||
57 | |||
53 | __HEAD | 58 | __HEAD |
54 | ENTRY(_start) | 59 | ENTRY(_start) |
55 | #if CONFIG_KERNEL_BASE_ADDR == 0 | 60 | #if CONFIG_KERNEL_BASE_ADDR == 0 |
@@ -79,10 +84,7 @@ real_start: | |||
79 | /* Does r7 point to a valid FDT? Load HEADER magic number */ | 84 | /* Does r7 point to a valid FDT? Load HEADER magic number */ |
80 | /* Run time Big/Little endian platform */ | 85 | /* Run time Big/Little endian platform */ |
81 | /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */ | 86 | /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */ |
82 | addik r11, r0, 0x1 /* BIG/LITTLE checking value */ | 87 | lbui r11, r0, TOPHYS(endian_check) |
83 | /* __bss_start will be zeroed later - it is just temp location */ | ||
84 | swi r11, r0, TOPHYS(__bss_start) | ||
85 | lbui r11, r0, TOPHYS(__bss_start) | ||
86 | beqid r11, big_endian /* DO NOT break delay stop dependency */ | 88 | beqid r11, big_endian /* DO NOT break delay stop dependency */ |
87 | lw r11, r0, r7 /* Big endian load in delay slot */ | 89 | lw r11, r0, r7 /* Big endian load in delay slot */ |
88 | lwr r11, r0, r7 /* Little endian load */ | 90 | lwr r11, r0, r7 /* Little endian load */ |
@@ -222,26 +224,26 @@ start_here: | |||
222 | #endif /* CONFIG_MMU */ | 224 | #endif /* CONFIG_MMU */ |
223 | 225 | ||
224 | /* Initialize small data anchors */ | 226 | /* Initialize small data anchors */ |
225 | la r13, r0, _KERNEL_SDA_BASE_ | 227 | addik r13, r0, _KERNEL_SDA_BASE_ |
226 | la r2, r0, _KERNEL_SDA2_BASE_ | 228 | addik r2, r0, _KERNEL_SDA2_BASE_ |
227 | 229 | ||
228 | /* Initialize stack pointer */ | 230 | /* Initialize stack pointer */ |
229 | la r1, r0, init_thread_union + THREAD_SIZE - 4 | 231 | addik r1, r0, init_thread_union + THREAD_SIZE - 4 |
230 | 232 | ||
231 | /* Initialize r31 with current task address */ | 233 | /* Initialize r31 with current task address */ |
232 | la r31, r0, init_task | 234 | addik r31, r0, init_task |
233 | 235 | ||
234 | /* | 236 | /* |
235 | * Call platform dependent initialize function. | 237 | * Call platform dependent initialize function. |
236 | * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for | 238 | * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for |
237 | * the function. | 239 | * the function. |
238 | */ | 240 | */ |
239 | la r9, r0, machine_early_init | 241 | addik r9, r0, machine_early_init |
240 | brald r15, r9 | 242 | brald r15, r9 |
241 | nop | 243 | nop |
242 | 244 | ||
243 | #ifndef CONFIG_MMU | 245 | #ifndef CONFIG_MMU |
244 | la r15, r0, machine_halt | 246 | addik r15, r0, machine_halt |
245 | braid start_kernel | 247 | braid start_kernel |
246 | nop | 248 | nop |
247 | #else | 249 | #else |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 782680de3121..56572e923a83 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
@@ -77,6 +77,8 @@ | |||
77 | #include <asm/signal.h> | 77 | #include <asm/signal.h> |
78 | #include <asm/asm-offsets.h> | 78 | #include <asm/asm-offsets.h> |
79 | 79 | ||
80 | #undef DEBUG | ||
81 | |||
80 | /* Helpful Macros */ | 82 | /* Helpful Macros */ |
81 | #define NUM_TO_REG(num) r ## num | 83 | #define NUM_TO_REG(num) r ## num |
82 | 84 | ||
@@ -91,7 +93,7 @@ | |||
91 | lwi r6, r1, PT_R6; \ | 93 | lwi r6, r1, PT_R6; \ |
92 | lwi r11, r1, PT_R11; \ | 94 | lwi r11, r1, PT_R11; \ |
93 | lwi r31, r1, PT_R31; \ | 95 | lwi r31, r1, PT_R31; \ |
94 | lwi r1, r0, TOPHYS(r0_ram + 0); | 96 | lwi r1, r1, PT_R1; |
95 | #endif /* CONFIG_MMU */ | 97 | #endif /* CONFIG_MMU */ |
96 | 98 | ||
97 | #define LWREG_NOP \ | 99 | #define LWREG_NOP \ |
@@ -206,8 +208,8 @@ | |||
206 | * | . | | 208 | * | . | |
207 | * | . | | 209 | * | . | |
208 | * | 210 | * |
209 | * NO_MMU kernel use the same r0_ram pointed space - look to vmlinux.lds.S | 211 | * MMU kernel uses the same 'pt_pool_space' pointed space |
210 | * which is used for storing register values - old style was, that value were | 212 | * which is used for storing register values - noMMu style was, that values were |
211 | * stored in stack but in case of failure you lost information about register. | 213 | * stored in stack but in case of failure you lost information about register. |
212 | * Currently you can see register value in memory in specific place. | 214 | * Currently you can see register value in memory in specific place. |
213 | * In compare to with previous solution the speed should be the same. | 215 | * In compare to with previous solution the speed should be the same. |
@@ -226,8 +228,22 @@ | |||
226 | */ | 228 | */ |
227 | 229 | ||
228 | /* wrappers to restore state before coming to entry.S */ | 230 | /* wrappers to restore state before coming to entry.S */ |
229 | |||
230 | #ifdef CONFIG_MMU | 231 | #ifdef CONFIG_MMU |
232 | .section .data | ||
233 | .align 4 | ||
234 | pt_pool_space: | ||
235 | .space PT_SIZE | ||
236 | |||
237 | #ifdef DEBUG | ||
238 | /* Create space for exception counting. */ | ||
239 | .section .data | ||
240 | .global exception_debug_table | ||
241 | .align 4 | ||
242 | exception_debug_table: | ||
243 | /* Look at exception vector table. There is 32 exceptions * word size */ | ||
244 | .space (32 * 4) | ||
245 | #endif /* DEBUG */ | ||
246 | |||
231 | .section .rodata | 247 | .section .rodata |
232 | .align 4 | 248 | .align 4 |
233 | _MB_HW_ExceptionVectorTable: | 249 | _MB_HW_ExceptionVectorTable: |
@@ -287,10 +303,10 @@ _hw_exception_handler: | |||
287 | #ifndef CONFIG_MMU | 303 | #ifndef CONFIG_MMU |
288 | addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ | 304 | addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ |
289 | #else | 305 | #else |
290 | swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */ | 306 | swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */ |
291 | /* Save date to kernel memory. Here is the problem | 307 | /* Save date to kernel memory. Here is the problem |
292 | * when you came from user space */ | 308 | * when you came from user space */ |
293 | ori r1, r0, TOPHYS(r0_ram + 28); | 309 | ori r1, r0, TOPHYS(pt_pool_space); |
294 | #endif | 310 | #endif |
295 | swi r3, r1, PT_R3 | 311 | swi r3, r1, PT_R3 |
296 | swi r4, r1, PT_R4 | 312 | swi r4, r1, PT_R4 |
@@ -329,12 +345,12 @@ not_in_delay_slot: | |||
329 | 345 | ||
330 | #ifdef DEBUG | 346 | #ifdef DEBUG |
331 | /* counting which exception happen */ | 347 | /* counting which exception happen */ |
332 | lwi r5, r0, 0x200 + TOPHYS(r0_ram) | 348 | lwi r5, r0, TOPHYS(exception_debug_table) |
333 | addi r5, r5, 1 | 349 | addi r5, r5, 1 |
334 | swi r5, r0, 0x200 + TOPHYS(r0_ram) | 350 | swi r5, r0, TOPHYS(exception_debug_table) |
335 | lwi r5, r6, 0x200 + TOPHYS(r0_ram) | 351 | lwi r5, r6, TOPHYS(exception_debug_table) |
336 | addi r5, r5, 1 | 352 | addi r5, r5, 1 |
337 | swi r5, r6, 0x200 + TOPHYS(r0_ram) | 353 | swi r5, r6, TOPHYS(exception_debug_table) |
338 | #endif | 354 | #endif |
339 | /* end */ | 355 | /* end */ |
340 | /* Load the HW Exception vector */ | 356 | /* Load the HW Exception vector */ |
@@ -474,7 +490,7 @@ ex_lw_tail: | |||
474 | /* Get the destination register number into r5 */ | 490 | /* Get the destination register number into r5 */ |
475 | lbui r5, r0, TOPHYS(ex_reg_op); | 491 | lbui r5, r0, TOPHYS(ex_reg_op); |
476 | /* Form load_word jump table offset (lw_table + (8 * regnum)) */ | 492 | /* Form load_word jump table offset (lw_table + (8 * regnum)) */ |
477 | la r6, r0, TOPHYS(lw_table); | 493 | addik r6, r0, TOPHYS(lw_table); |
478 | addk r5, r5, r5; | 494 | addk r5, r5, r5; |
479 | addk r5, r5, r5; | 495 | addk r5, r5, r5; |
480 | addk r5, r5, r5; | 496 | addk r5, r5, r5; |
@@ -485,7 +501,7 @@ ex_sw: | |||
485 | /* Get the destination register number into r5 */ | 501 | /* Get the destination register number into r5 */ |
486 | lbui r5, r0, TOPHYS(ex_reg_op); | 502 | lbui r5, r0, TOPHYS(ex_reg_op); |
487 | /* Form store_word jump table offset (sw_table + (8 * regnum)) */ | 503 | /* Form store_word jump table offset (sw_table + (8 * regnum)) */ |
488 | la r6, r0, TOPHYS(sw_table); | 504 | addik r6, r0, TOPHYS(sw_table); |
489 | add r5, r5, r5; | 505 | add r5, r5, r5; |
490 | add r5, r5, r5; | 506 | add r5, r5, r5; |
491 | add r5, r5, r5; | 507 | add r5, r5, r5; |
@@ -896,7 +912,7 @@ ex_lw_vm: | |||
896 | beqid r6, ex_lhw_vm; | 912 | beqid r6, ex_lhw_vm; |
897 | load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ | 913 | load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ |
898 | /* Load a word, byte-by-byte from destination address and save it in tmp space*/ | 914 | /* Load a word, byte-by-byte from destination address and save it in tmp space*/ |
899 | la r6, r0, ex_tmp_data_loc_0; | 915 | addik r6, r0, ex_tmp_data_loc_0; |
900 | sbi r5, r6, 0; | 916 | sbi r5, r6, 0; |
901 | load2: lbui r5, r4, 1; | 917 | load2: lbui r5, r4, 1; |
902 | sbi r5, r6, 1; | 918 | sbi r5, r6, 1; |
@@ -910,7 +926,7 @@ load4: lbui r5, r4, 3; | |||
910 | ex_lhw_vm: | 926 | ex_lhw_vm: |
911 | /* Load a half-word, byte-by-byte from destination address and | 927 | /* Load a half-word, byte-by-byte from destination address and |
912 | * save it in tmp space */ | 928 | * save it in tmp space */ |
913 | la r6, r0, ex_tmp_data_loc_0; | 929 | addik r6, r0, ex_tmp_data_loc_0; |
914 | sbi r5, r6, 0; | 930 | sbi r5, r6, 0; |
915 | load5: lbui r5, r4, 1; | 931 | load5: lbui r5, r4, 1; |
916 | sbi r5, r6, 1; | 932 | sbi r5, r6, 1; |
@@ -926,7 +942,7 @@ ex_sw_vm: | |||
926 | addik r5, r8, sw_table_vm; | 942 | addik r5, r8, sw_table_vm; |
927 | bra r5; | 943 | bra r5; |
928 | ex_sw_tail_vm: | 944 | ex_sw_tail_vm: |
929 | la r5, r0, ex_tmp_data_loc_0; | 945 | addik r5, r0, ex_tmp_data_loc_0; |
930 | beqid r6, ex_shw_vm; | 946 | beqid r6, ex_shw_vm; |
931 | swi r3, r5, 0; /* Get the word - delay slot */ | 947 | swi r3, r5, 0; /* Get the word - delay slot */ |
932 | /* Store the word, byte-by-byte into destination address */ | 948 | /* Store the word, byte-by-byte into destination address */ |
@@ -969,7 +985,7 @@ ex_unaligned_fixup: | |||
969 | addik r7, r0, SIGSEGV | 985 | addik r7, r0, SIGSEGV |
970 | /* call bad_page_fault for finding aligned fixup, fixup address is saved | 986 | /* call bad_page_fault for finding aligned fixup, fixup address is saved |
971 | * in PT_PC which is used as return address from exception */ | 987 | * in PT_PC which is used as return address from exception */ |
972 | la r15, r0, ret_from_exc-8 /* setup return address */ | 988 | addik r15, r0, ret_from_exc-8 /* setup return address */ |
973 | brid bad_page_fault | 989 | brid bad_page_fault |
974 | nop | 990 | nop |
975 | 991 | ||
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index d61ea33aff7c..e4661285118e 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c | |||
@@ -40,59 +40,46 @@ unsigned int nr_irq; | |||
40 | #define MER_ME (1<<0) | 40 | #define MER_ME (1<<0) |
41 | #define MER_HIE (1<<1) | 41 | #define MER_HIE (1<<1) |
42 | 42 | ||
43 | static void intc_enable_or_unmask(unsigned int irq) | 43 | static void intc_enable_or_unmask(struct irq_data *d) |
44 | { | 44 | { |
45 | unsigned long mask = 1 << irq; | 45 | unsigned long mask = 1 << d->irq; |
46 | pr_debug("enable_or_unmask: %d\n", irq); | 46 | pr_debug("enable_or_unmask: %d\n", d->irq); |
47 | out_be32(INTC_BASE + SIE, mask); | 47 | out_be32(INTC_BASE + SIE, mask); |
48 | 48 | ||
49 | /* ack level irqs because they can't be acked during | 49 | /* ack level irqs because they can't be acked during |
50 | * ack function since the handle_level_irq function | 50 | * ack function since the handle_level_irq function |
51 | * acks the irq before calling the interrupt handler | 51 | * acks the irq before calling the interrupt handler |
52 | */ | 52 | */ |
53 | if (irq_desc[irq].status & IRQ_LEVEL) | 53 | if (irq_to_desc(d->irq)->status & IRQ_LEVEL) |
54 | out_be32(INTC_BASE + IAR, mask); | 54 | out_be32(INTC_BASE + IAR, mask); |
55 | } | 55 | } |
56 | 56 | ||
57 | static void intc_disable_or_mask(unsigned int irq) | 57 | static void intc_disable_or_mask(struct irq_data *d) |
58 | { | 58 | { |
59 | pr_debug("disable: %d\n", irq); | 59 | pr_debug("disable: %d\n", d->irq); |
60 | out_be32(INTC_BASE + CIE, 1 << irq); | 60 | out_be32(INTC_BASE + CIE, 1 << d->irq); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void intc_ack(unsigned int irq) | 63 | static void intc_ack(struct irq_data *d) |
64 | { | 64 | { |
65 | pr_debug("ack: %d\n", irq); | 65 | pr_debug("ack: %d\n", d->irq); |
66 | out_be32(INTC_BASE + IAR, 1 << irq); | 66 | out_be32(INTC_BASE + IAR, 1 << d->irq); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void intc_mask_ack(unsigned int irq) | 69 | static void intc_mask_ack(struct irq_data *d) |
70 | { | 70 | { |
71 | unsigned long mask = 1 << irq; | 71 | unsigned long mask = 1 << d->irq; |
72 | pr_debug("disable_and_ack: %d\n", irq); | 72 | pr_debug("disable_and_ack: %d\n", d->irq); |
73 | out_be32(INTC_BASE + CIE, mask); | 73 | out_be32(INTC_BASE + CIE, mask); |
74 | out_be32(INTC_BASE + IAR, mask); | 74 | out_be32(INTC_BASE + IAR, mask); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void intc_end(unsigned int irq) | ||
78 | { | ||
79 | unsigned long mask = 1 << irq; | ||
80 | pr_debug("end: %d\n", irq); | ||
81 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
82 | out_be32(INTC_BASE + SIE, mask); | ||
83 | /* ack level sensitive intr */ | ||
84 | if (irq_desc[irq].status & IRQ_LEVEL) | ||
85 | out_be32(INTC_BASE + IAR, mask); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static struct irq_chip intc_dev = { | 77 | static struct irq_chip intc_dev = { |
90 | .name = "Xilinx INTC", | 78 | .name = "Xilinx INTC", |
91 | .unmask = intc_enable_or_unmask, | 79 | .irq_unmask = intc_enable_or_unmask, |
92 | .mask = intc_disable_or_mask, | 80 | .irq_mask = intc_disable_or_mask, |
93 | .ack = intc_ack, | 81 | .irq_ack = intc_ack, |
94 | .mask_ack = intc_mask_ack, | 82 | .irq_mask_ack = intc_mask_ack, |
95 | .end = intc_end, | ||
96 | }; | 83 | }; |
97 | 84 | ||
98 | unsigned int get_irq(struct pt_regs *regs) | 85 | unsigned int get_irq(struct pt_regs *regs) |
@@ -172,11 +159,11 @@ void __init init_IRQ(void) | |||
172 | if (intr_type & (0x00000001 << i)) { | 159 | if (intr_type & (0x00000001 << i)) { |
173 | set_irq_chip_and_handler_name(i, &intc_dev, | 160 | set_irq_chip_and_handler_name(i, &intc_dev, |
174 | handle_edge_irq, intc_dev.name); | 161 | handle_edge_irq, intc_dev.name); |
175 | irq_desc[i].status &= ~IRQ_LEVEL; | 162 | irq_clear_status_flags(i, IRQ_LEVEL); |
176 | } else { | 163 | } else { |
177 | set_irq_chip_and_handler_name(i, &intc_dev, | 164 | set_irq_chip_and_handler_name(i, &intc_dev, |
178 | handle_level_irq, intc_dev.name); | 165 | handle_level_irq, intc_dev.name); |
179 | irq_desc[i].status |= IRQ_LEVEL; | 166 | irq_set_status_flags(i, IRQ_LEVEL); |
180 | } | 167 | } |
181 | } | 168 | } |
182 | } | 169 | } |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index a9345fb4906a..098822413729 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
@@ -50,6 +50,7 @@ next_irq: | |||
50 | int show_interrupts(struct seq_file *p, void *v) | 50 | int show_interrupts(struct seq_file *p, void *v) |
51 | { | 51 | { |
52 | int i = *(loff_t *) v, j; | 52 | int i = *(loff_t *) v, j; |
53 | struct irq_desc *desc; | ||
53 | struct irqaction *action; | 54 | struct irqaction *action; |
54 | unsigned long flags; | 55 | unsigned long flags; |
55 | 56 | ||
@@ -61,8 +62,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
61 | } | 62 | } |
62 | 63 | ||
63 | if (i < nr_irq) { | 64 | if (i < nr_irq) { |
64 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 65 | desc = irq_to_desc(i); |
65 | action = irq_desc[i].action; | 66 | raw_spin_lock_irqsave(&desc->lock, flags); |
67 | action = desc->action; | ||
66 | if (!action) | 68 | if (!action) |
67 | goto skip; | 69 | goto skip; |
68 | seq_printf(p, "%3d: ", i); | 70 | seq_printf(p, "%3d: ", i); |
@@ -72,9 +74,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
72 | for_each_online_cpu(j) | 74 | for_each_online_cpu(j) |
73 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 75 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
74 | #endif | 76 | #endif |
75 | seq_printf(p, " %8s", irq_desc[i].status & | 77 | seq_printf(p, " %8s", desc->status & |
76 | IRQ_LEVEL ? "level" : "edge"); | 78 | IRQ_LEVEL ? "level" : "edge"); |
77 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 79 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
78 | seq_printf(p, " %s", action->name); | 80 | seq_printf(p, " %s", action->name); |
79 | 81 | ||
80 | for (action = action->next; action; action = action->next) | 82 | for (action = action->next; action; action = action->next) |
@@ -82,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
82 | 84 | ||
83 | seq_putc(p, '\n'); | 85 | seq_putc(p, '\n'); |
84 | skip: | 86 | skip: |
85 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 87 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
86 | } | 88 | } |
87 | return 0; | 89 | return 0; |
88 | } | 90 | } |
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 5cb034174005..49faeb429599 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | extern char *_ebss; | 25 | extern char *_ebss; |
26 | EXPORT_SYMBOL_GPL(_ebss); | 26 | EXPORT_SYMBOL_GPL(_ebss); |
27 | |||
27 | #ifdef CONFIG_FUNCTION_TRACER | 28 | #ifdef CONFIG_FUNCTION_TRACER |
28 | extern void _mcount(void); | 29 | extern void _mcount(void); |
29 | EXPORT_SYMBOL(_mcount); | 30 | EXPORT_SYMBOL(_mcount); |
@@ -45,3 +46,14 @@ EXPORT_SYMBOL(empty_zero_page); | |||
45 | #endif | 46 | #endif |
46 | 47 | ||
47 | EXPORT_SYMBOL(mbc); | 48 | EXPORT_SYMBOL(mbc); |
49 | |||
50 | extern void __divsi3(void); | ||
51 | EXPORT_SYMBOL(__divsi3); | ||
52 | extern void __modsi3(void); | ||
53 | EXPORT_SYMBOL(__modsi3); | ||
54 | extern void __mulsi3(void); | ||
55 | EXPORT_SYMBOL(__mulsi3); | ||
56 | extern void __udivsi3(void); | ||
57 | EXPORT_SYMBOL(__udivsi3); | ||
58 | extern void __umodsi3(void); | ||
59 | EXPORT_SYMBOL(__umodsi3); | ||
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index ba7c4b16ed35..968648a81c1e 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -159,7 +159,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
159 | } | 159 | } |
160 | 160 | ||
161 | /* FIXME STATE_SAVE_PT_OFFSET; */ | 161 | /* FIXME STATE_SAVE_PT_OFFSET; */ |
162 | ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE; | 162 | ti->cpu_context.r1 = (unsigned long)childregs; |
163 | /* we should consider the fact that childregs is a copy of the parent | 163 | /* we should consider the fact that childregs is a copy of the parent |
164 | * regs which were saved immediately after entering the kernel state | 164 | * regs which were saved immediately after entering the kernel state |
165 | * before enabling VM. This MSR will be restored in switch_to and | 165 | * before enabling VM. This MSR will be restored in switch_to and |
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index bceaa5543e39..00ee90f08343 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c | |||
@@ -59,7 +59,7 @@ static int __init early_init_dt_scan_serial(unsigned long node, | |||
59 | { | 59 | { |
60 | unsigned long l; | 60 | unsigned long l; |
61 | char *p; | 61 | char *p; |
62 | int *addr; | 62 | const __be32 *addr; |
63 | 63 | ||
64 | pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname); | 64 | pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname); |
65 | 65 | ||
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c index 9ae24f4b882b..47187cc2cf00 100644 --- a/arch/microblaze/kernel/prom_parse.c +++ b/arch/microblaze/kernel/prom_parse.c | |||
@@ -2,88 +2,11 @@ | |||
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/pci_regs.h> | ||
6 | #include <linux/module.h> | 5 | #include <linux/module.h> |
7 | #include <linux/ioport.h> | 6 | #include <linux/ioport.h> |
8 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
9 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
10 | #include <asm/prom.h> | 9 | #include <asm/prom.h> |
11 | #include <asm/pci-bridge.h> | ||
12 | |||
13 | #ifdef CONFIG_PCI | ||
14 | int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | ||
15 | { | ||
16 | struct device_node *dn, *ppnode; | ||
17 | struct pci_dev *ppdev; | ||
18 | u32 lspec; | ||
19 | u32 laddr[3]; | ||
20 | u8 pin; | ||
21 | int rc; | ||
22 | |||
23 | /* Check if we have a device node, if yes, fallback to standard OF | ||
24 | * parsing | ||
25 | */ | ||
26 | dn = pci_device_to_OF_node(pdev); | ||
27 | if (dn) | ||
28 | return of_irq_map_one(dn, 0, out_irq); | ||
29 | |||
30 | /* Ok, we don't, time to have fun. Let's start by building up an | ||
31 | * interrupt spec. we assume #interrupt-cells is 1, which is standard | ||
32 | * for PCI. If you do different, then don't use that routine. | ||
33 | */ | ||
34 | rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); | ||
35 | if (rc != 0) | ||
36 | return rc; | ||
37 | /* No pin, exit */ | ||
38 | if (pin == 0) | ||
39 | return -ENODEV; | ||
40 | |||
41 | /* Now we walk up the PCI tree */ | ||
42 | lspec = pin; | ||
43 | for (;;) { | ||
44 | /* Get the pci_dev of our parent */ | ||
45 | ppdev = pdev->bus->self; | ||
46 | |||
47 | /* Ouch, it's a host bridge... */ | ||
48 | if (ppdev == NULL) { | ||
49 | struct pci_controller *host; | ||
50 | host = pci_bus_to_host(pdev->bus); | ||
51 | ppnode = host ? host->dn : NULL; | ||
52 | /* No node for host bridge ? give up */ | ||
53 | if (ppnode == NULL) | ||
54 | return -EINVAL; | ||
55 | } else | ||
56 | /* We found a P2P bridge, check if it has a node */ | ||
57 | ppnode = pci_device_to_OF_node(ppdev); | ||
58 | |||
59 | /* Ok, we have found a parent with a device-node, hand over to | ||
60 | * the OF parsing code. | ||
61 | * We build a unit address from the linux device to be used for | ||
62 | * resolution. Note that we use the linux bus number which may | ||
63 | * not match your firmware bus numbering. | ||
64 | * Fortunately, in most cases, interrupt-map-mask doesn't | ||
65 | * include the bus number as part of the matching. | ||
66 | * You should still be careful about that though if you intend | ||
67 | * to rely on this function (you ship a firmware that doesn't | ||
68 | * create device nodes for all PCI devices). | ||
69 | */ | ||
70 | if (ppnode) | ||
71 | break; | ||
72 | |||
73 | /* We can only get here if we hit a P2P bridge with no node, | ||
74 | * let's do standard swizzling and try again | ||
75 | */ | ||
76 | lspec = pci_swizzle_interrupt_pin(pdev, lspec); | ||
77 | pdev = ppdev; | ||
78 | } | ||
79 | |||
80 | laddr[0] = (pdev->bus->number << 16) | ||
81 | | (pdev->devfn << 8); | ||
82 | laddr[1] = laddr[2] = 0; | ||
83 | return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(of_irq_map_pci); | ||
86 | #endif /* CONFIG_PCI */ | ||
87 | 10 | ||
88 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | 11 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, |
89 | unsigned long *busno, unsigned long *phys, unsigned long *size) | 12 | unsigned long *busno, unsigned long *phys, unsigned long *size) |
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 05ac8cc975d5..6a8e0cc5c57d 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/uaccess.h> | 39 | #include <linux/uaccess.h> |
40 | #include <asm/asm-offsets.h> | 40 | #include <asm/asm-offsets.h> |
41 | #include <asm/cacheflush.h> | 41 | #include <asm/cacheflush.h> |
42 | #include <asm/syscall.h> | ||
42 | #include <asm/io.h> | 43 | #include <asm/io.h> |
43 | 44 | ||
44 | /* Returns the address where the register at REG_OFFS in P is stashed away. */ | 45 | /* Returns the address where the register at REG_OFFS in P is stashed away. */ |
@@ -123,7 +124,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
123 | rval = -EIO; | 124 | rval = -EIO; |
124 | 125 | ||
125 | if (rval == 0 && request == PTRACE_PEEKUSR) | 126 | if (rval == 0 && request == PTRACE_PEEKUSR) |
126 | rval = put_user(val, (unsigned long *)data); | 127 | rval = put_user(val, (unsigned long __user *)data); |
127 | break; | 128 | break; |
128 | default: | 129 | default: |
129 | rval = ptrace_request(child, request, addr, data); | 130 | rval = ptrace_request(child, request, addr, data); |
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 9312fbb37efd..8e2c09b7ff26 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -95,7 +95,8 @@ inline unsigned get_romfs_len(unsigned *addr) | |||
95 | void __init machine_early_init(const char *cmdline, unsigned int ram, | 95 | void __init machine_early_init(const char *cmdline, unsigned int ram, |
96 | unsigned int fdt, unsigned int msr) | 96 | unsigned int fdt, unsigned int msr) |
97 | { | 97 | { |
98 | unsigned long *src, *dst = (unsigned long *)0x0; | 98 | unsigned long *src, *dst; |
99 | unsigned int offset = 0; | ||
99 | 100 | ||
100 | /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the | 101 | /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the |
101 | * end of kernel. There are two position which we want to check. | 102 | * end of kernel. There are two position which we want to check. |
@@ -168,7 +169,14 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
168 | "CPU have it %x\n", msr); | 169 | "CPU have it %x\n", msr); |
169 | #endif | 170 | #endif |
170 | 171 | ||
171 | for (src = __ivt_start; src < __ivt_end; src++, dst++) | 172 | /* Do not copy reset vectors. offset = 0x2 means skip the first |
173 | * two instructions. dst is pointer to MB vectors which are placed | ||
174 | * in block ram. If you want to copy reset vector setup offset to 0x0 */ | ||
175 | #if !CONFIG_MANUAL_RESET_VECTOR | ||
176 | offset = 0x2; | ||
177 | #endif | ||
178 | dst = (unsigned long *) (offset * sizeof(u32)); | ||
179 | for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) | ||
172 | *dst = *src; | 180 | *dst = *src; |
173 | 181 | ||
174 | /* Initialize global data */ | 182 | /* Initialize global data */ |
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index d8d3bb396cd6..599671168980 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c | |||
@@ -93,7 +93,7 @@ static int restore_sigcontext(struct pt_regs *regs, | |||
93 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 93 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) |
94 | { | 94 | { |
95 | struct rt_sigframe __user *frame = | 95 | struct rt_sigframe __user *frame = |
96 | (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE); | 96 | (struct rt_sigframe __user *)(regs->r1); |
97 | 97 | ||
98 | sigset_t set; | 98 | sigset_t set; |
99 | int rval; | 99 | int rval; |
@@ -197,8 +197,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
197 | 197 | ||
198 | /* Create the ucontext. */ | 198 | /* Create the ucontext. */ |
199 | err |= __put_user(0, &frame->uc.uc_flags); | 199 | err |= __put_user(0, &frame->uc.uc_flags); |
200 | err |= __put_user(0, &frame->uc.uc_link); | 200 | err |= __put_user(NULL, &frame->uc.uc_link); |
201 | err |= __put_user((void *)current->sas_ss_sp, | 201 | err |= __put_user((void __user *)current->sas_ss_sp, |
202 | &frame->uc.uc_stack.ss_sp); | 202 | &frame->uc.uc_stack.ss_sp); |
203 | err |= __put_user(sas_ss_flags(regs->r1), | 203 | err |= __put_user(sas_ss_flags(regs->r1), |
204 | &frame->uc.uc_stack.ss_flags); | 204 | &frame->uc.uc_stack.ss_flags); |
@@ -247,7 +247,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
247 | goto give_sigsegv; | 247 | goto give_sigsegv; |
248 | 248 | ||
249 | /* Set up registers for signal handler */ | 249 | /* Set up registers for signal handler */ |
250 | regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE; | 250 | regs->r1 = (unsigned long) frame; |
251 | 251 | ||
252 | /* Signal handler args: */ | 252 | /* Signal handler args: */ |
253 | regs->r5 = signal; /* arg 0: signum */ | 253 | regs->r5 = signal; /* arg 0: signum */ |
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 2250fe9d269b..e5b154f24f85 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c | |||
@@ -40,7 +40,8 @@ asmlinkage long microblaze_vfork(struct pt_regs *regs) | |||
40 | regs, 0, NULL, NULL); | 40 | regs, 0, NULL, NULL); |
41 | } | 41 | } |
42 | 42 | ||
43 | asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs *regs) | 43 | asmlinkage long microblaze_clone(int flags, unsigned long stack, |
44 | struct pt_regs *regs) | ||
44 | { | 45 | { |
45 | if (!stack) | 46 | if (!stack) |
46 | stack = regs->r1; | 47 | stack = regs->r1; |
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index a5aa33db1df3..d8a214f11ac2 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c | |||
@@ -38,8 +38,8 @@ static unsigned int timer_baseaddr; | |||
38 | #define TIMER_BASE timer_baseaddr | 38 | #define TIMER_BASE timer_baseaddr |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | unsigned int freq_div_hz; | 41 | static unsigned int freq_div_hz; |
42 | unsigned int timer_clock_freq; | 42 | static unsigned int timer_clock_freq; |
43 | 43 | ||
44 | #define TCSR0 (0x00) | 44 | #define TCSR0 (0x00) |
45 | #define TLR0 (0x04) | 45 | #define TLR0 (0x04) |
@@ -202,7 +202,7 @@ static struct cyclecounter microblaze_cc = { | |||
202 | .shift = 8, | 202 | .shift = 8, |
203 | }; | 203 | }; |
204 | 204 | ||
205 | int __init init_microblaze_timecounter(void) | 205 | static int __init init_microblaze_timecounter(void) |
206 | { | 206 | { |
207 | microblaze_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, | 207 | microblaze_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, |
208 | microblaze_cc.shift); | 208 | microblaze_cc.shift); |
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c index fefac5c33586..9781a528cfc9 100644 --- a/arch/microblaze/kernel/unwind.c +++ b/arch/microblaze/kernel/unwind.c | |||
@@ -183,7 +183,7 @@ static inline void unwind_trap(struct task_struct *task, unsigned long pc, | |||
183 | * @trace : Where to store stack backtrace (PC values). | 183 | * @trace : Where to store stack backtrace (PC values). |
184 | * NULL == print backtrace to kernel log | 184 | * NULL == print backtrace to kernel log |
185 | */ | 185 | */ |
186 | void microblaze_unwind_inner(struct task_struct *task, | 186 | static void microblaze_unwind_inner(struct task_struct *task, |
187 | unsigned long pc, unsigned long fp, | 187 | unsigned long pc, unsigned long fp, |
188 | unsigned long leaf_return, | 188 | unsigned long leaf_return, |
189 | struct stack_trace *trace) | 189 | struct stack_trace *trace) |
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 3451bdec9f05..ac0e1a5d4782 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S | |||
@@ -70,11 +70,6 @@ SECTIONS { | |||
70 | RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) | 70 | RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) |
71 | _edata = . ; | 71 | _edata = . ; |
72 | 72 | ||
73 | /* Reserve some low RAM for r0 based memory references */ | ||
74 | . = ALIGN(0x4) ; | ||
75 | r0_ram = . ; | ||
76 | . = . + PAGE_SIZE; /* a page should be enough */ | ||
77 | |||
78 | /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ | 73 | /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ |
79 | . = ALIGN(8); | 74 | . = ALIGN(8); |
80 | .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { | 75 | .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { |
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c index d4860e154d29..0585bccb7fad 100644 --- a/arch/microblaze/lib/muldi3.c +++ b/arch/microblaze/lib/muldi3.c | |||
@@ -58,3 +58,4 @@ DWtype __muldi3(DWtype u, DWtype v) | |||
58 | 58 | ||
59 | return w.ll; | 59 | return w.ll; |
60 | } | 60 | } |
61 | EXPORT_SYMBOL(__muldi3); | ||
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 5a59dad62bd2..a1e2e18e0961 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c | |||
@@ -59,7 +59,7 @@ | |||
59 | * uncached region. This will no doubt cause big problems if memory allocated | 59 | * uncached region. This will no doubt cause big problems if memory allocated |
60 | * here is not also freed properly. -- JW | 60 | * here is not also freed properly. -- JW |
61 | */ | 61 | */ |
62 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | 62 | void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) |
63 | { | 63 | { |
64 | unsigned long order, vaddr; | 64 | unsigned long order, vaddr; |
65 | void *ret; | 65 | void *ret; |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 57bd2a09610c..ae97d2ccdc22 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -48,7 +48,7 @@ static int store_updates_sp(struct pt_regs *regs) | |||
48 | { | 48 | { |
49 | unsigned int inst; | 49 | unsigned int inst; |
50 | 50 | ||
51 | if (get_user(inst, (unsigned int *)regs->pc)) | 51 | if (get_user(inst, (unsigned int __user *)regs->pc)) |
52 | return 0; | 52 | return 0; |
53 | /* check for 1 in the rD field */ | 53 | /* check for 1 in the rD field */ |
54 | if (((inst >> 21) & 0x1f) != 1) | 54 | if (((inst >> 21) & 0x1f) != 1) |
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index e363615d6798..1e01a1253631 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/of.h> | 30 | #include <linux/of.h> |
31 | #include <linux/of_address.h> | 31 | #include <linux/of_address.h> |
32 | #include <linux/of_pci.h> | ||
32 | 33 | ||
33 | #include <asm/processor.h> | 34 | #include <asm/processor.h> |
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c index 3c3d808d7ce0..92728a6cfd80 100644 --- a/arch/microblaze/pci/pci_32.c +++ b/arch/microblaze/pci/pci_32.c | |||
@@ -332,6 +332,7 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose) | |||
332 | hose->global_number); | 332 | hose->global_number); |
333 | return; | 333 | return; |
334 | } | 334 | } |
335 | bus.dev->of_node = of_node_get(node); | ||
335 | bus->secondary = hose->first_busno; | 336 | bus->secondary = hose->first_busno; |
336 | hose->bus = bus; | 337 | hose->bus = bus; |
337 | 338 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -4,6 +4,7 @@ config MIPS | |||
4 | select HAVE_GENERIC_DMA_COHERENT | 4 | select HAVE_GENERIC_DMA_COHERENT |
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_IRQ_WORK | ||
7 | select HAVE_PERF_EVENTS | 8 | select HAVE_PERF_EVENTS |
8 | select PERF_USE_VMALLOC | 9 | select PERF_USE_VMALLOC |
9 | select HAVE_ARCH_KGDB | 10 | select HAVE_ARCH_KGDB |
@@ -208,6 +209,7 @@ config MACH_JZ4740 | |||
208 | select ARCH_REQUIRE_GPIOLIB | 209 | select ARCH_REQUIRE_GPIOLIB |
209 | select SYS_HAS_EARLY_PRINTK | 210 | select SYS_HAS_EARLY_PRINTK |
210 | select HAVE_PWM | 211 | select HAVE_PWM |
212 | select HAVE_CLK | ||
211 | 213 | ||
212 | config LASAT | 214 | config LASAT |
213 | bool "LASAT Networks platforms" | 215 | bool "LASAT Networks platforms" |
@@ -333,6 +335,8 @@ config PNX8550_STB810 | |||
333 | config PMC_MSP | 335 | config PMC_MSP |
334 | bool "PMC-Sierra MSP chipsets" | 336 | bool "PMC-Sierra MSP chipsets" |
335 | depends on EXPERIMENTAL | 337 | depends on EXPERIMENTAL |
338 | select CEVT_R4K | ||
339 | select CSRC_R4K | ||
336 | select DMA_NONCOHERENT | 340 | select DMA_NONCOHERENT |
337 | select SWAP_IO_SPACE | 341 | select SWAP_IO_SPACE |
338 | select NO_EXCEPT_FILL | 342 | select NO_EXCEPT_FILL |
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c | |||
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); | |||
54 | 54 | ||
55 | static void mtx1_reset(char *c) | 55 | static void mtx1_reset(char *c) |
56 | { | 56 | { |
57 | /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ | 57 | /* Jump to the reset vector */ |
58 | au_writel(0x00000000, 0xAE00001C); | 58 | __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void mtx1_power_off(void) | 61 | static void mtx1_power_off(void) |
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/mtd/physmap.h> | 28 | #include <linux/mtd/physmap.h> |
29 | #include <mtd/mtd-abi.h> | 29 | #include <mtd/mtd-abi.h> |
30 | 30 | ||
31 | #include <asm/mach-au1x00/au1xxx_eth.h> | ||
32 | |||
31 | static struct gpio_keys_button mtx1_gpio_button[] = { | 33 | static struct gpio_keys_button mtx1_gpio_button[] = { |
32 | { | 34 | { |
33 | .gpio = 207, | 35 | .gpio = 207, |
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { | |||
140 | &mtx1_mtd, | 142 | &mtx1_mtd, |
141 | }; | 143 | }; |
142 | 144 | ||
145 | static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { | ||
146 | .phy_search_highest_addr = 1, | ||
147 | .phy1_search_mac0 = 1, | ||
148 | }; | ||
149 | |||
143 | static int __init mtx1_register_devices(void) | 150 | static int __init mtx1_register_devices(void) |
144 | { | 151 | { |
145 | int rc; | 152 | int rc; |
146 | 153 | ||
154 | au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); | ||
155 | |||
147 | rc = gpio_request(mtx1_gpio_button[0].gpio, | 156 | rc = gpio_request(mtx1_gpio_button[0].gpio, |
148 | mtx1_gpio_button[0].desc); | 157 | mtx1_gpio_button[0].desc); |
149 | if (rc < 0) { | 158 | if (rc < 0) { |
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c | |||
@@ -36,8 +36,8 @@ | |||
36 | 36 | ||
37 | static void xxs1500_reset(char *c) | 37 | static void xxs1500_reset(char *c) |
38 | { | 38 | { |
39 | /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ | 39 | /* Jump to the reset vector */ |
40 | au_writel(0x00000000, 0xAE00001C); | 40 | __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); |
41 | } | 41 | } |
42 | 42 | ||
43 | static void xxs1500_power_off(void) | 43 | static void xxs1500_power_off(void) |
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index b9cce90346cf..6ebf1734b411 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h | |||
@@ -75,7 +75,7 @@ | |||
75 | } | 75 | } |
76 | 76 | ||
77 | static inline int | 77 | static inline int |
78 | futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 78 | futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
79 | { | 79 | { |
80 | int op = (encoded_op >> 28) & 7; | 80 | int op = (encoded_op >> 28) & 7; |
81 | int cmp = (encoded_op >> 24) & 15; | 81 | int cmp = (encoded_op >> 24) & 15; |
@@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
85 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 85 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
86 | oparg = 1 << oparg; | 86 | oparg = 1 << oparg; |
87 | 87 | ||
88 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 88 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
89 | return -EFAULT; | 89 | return -EFAULT; |
90 | 90 | ||
91 | pagefault_disable(); | 91 | pagefault_disable(); |
@@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | static inline int | 134 | static inline int |
135 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 135 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
136 | u32 oldval, u32 newval) | ||
136 | { | 137 | { |
137 | int retval; | 138 | int ret = 0; |
139 | u32 val; | ||
138 | 140 | ||
139 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 141 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
140 | return -EFAULT; | 142 | return -EFAULT; |
141 | 143 | ||
142 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 144 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
@@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
145 | " .set push \n" | 147 | " .set push \n" |
146 | " .set noat \n" | 148 | " .set noat \n" |
147 | " .set mips3 \n" | 149 | " .set mips3 \n" |
148 | "1: ll %0, %2 \n" | 150 | "1: ll %1, %3 \n" |
149 | " bne %0, %z3, 3f \n" | 151 | " bne %1, %z4, 3f \n" |
150 | " .set mips0 \n" | 152 | " .set mips0 \n" |
151 | " move $1, %z4 \n" | 153 | " move $1, %z5 \n" |
152 | " .set mips3 \n" | 154 | " .set mips3 \n" |
153 | "2: sc $1, %1 \n" | 155 | "2: sc $1, %2 \n" |
154 | " beqzl $1, 1b \n" | 156 | " beqzl $1, 1b \n" |
155 | __WEAK_LLSC_MB | 157 | __WEAK_LLSC_MB |
156 | "3: \n" | 158 | "3: \n" |
157 | " .set pop \n" | 159 | " .set pop \n" |
158 | " .section .fixup,\"ax\" \n" | 160 | " .section .fixup,\"ax\" \n" |
159 | "4: li %0, %5 \n" | 161 | "4: li %0, %6 \n" |
160 | " j 3b \n" | 162 | " j 3b \n" |
161 | " .previous \n" | 163 | " .previous \n" |
162 | " .section __ex_table,\"a\" \n" | 164 | " .section __ex_table,\"a\" \n" |
163 | " "__UA_ADDR "\t1b, 4b \n" | 165 | " "__UA_ADDR "\t1b, 4b \n" |
164 | " "__UA_ADDR "\t2b, 4b \n" | 166 | " "__UA_ADDR "\t2b, 4b \n" |
165 | " .previous \n" | 167 | " .previous \n" |
166 | : "=&r" (retval), "=R" (*uaddr) | 168 | : "+r" (ret), "=&r" (val), "=R" (*uaddr) |
167 | : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) | 169 | : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) |
168 | : "memory"); | 170 | : "memory"); |
169 | } else if (cpu_has_llsc) { | 171 | } else if (cpu_has_llsc) { |
@@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
172 | " .set push \n" | 174 | " .set push \n" |
173 | " .set noat \n" | 175 | " .set noat \n" |
174 | " .set mips3 \n" | 176 | " .set mips3 \n" |
175 | "1: ll %0, %2 \n" | 177 | "1: ll %1, %3 \n" |
176 | " bne %0, %z3, 3f \n" | 178 | " bne %1, %z4, 3f \n" |
177 | " .set mips0 \n" | 179 | " .set mips0 \n" |
178 | " move $1, %z4 \n" | 180 | " move $1, %z5 \n" |
179 | " .set mips3 \n" | 181 | " .set mips3 \n" |
180 | "2: sc $1, %1 \n" | 182 | "2: sc $1, %2 \n" |
181 | " beqz $1, 1b \n" | 183 | " beqz $1, 1b \n" |
182 | __WEAK_LLSC_MB | 184 | __WEAK_LLSC_MB |
183 | "3: \n" | 185 | "3: \n" |
184 | " .set pop \n" | 186 | " .set pop \n" |
185 | " .section .fixup,\"ax\" \n" | 187 | " .section .fixup,\"ax\" \n" |
186 | "4: li %0, %5 \n" | 188 | "4: li %0, %6 \n" |
187 | " j 3b \n" | 189 | " j 3b \n" |
188 | " .previous \n" | 190 | " .previous \n" |
189 | " .section __ex_table,\"a\" \n" | 191 | " .section __ex_table,\"a\" \n" |
190 | " "__UA_ADDR "\t1b, 4b \n" | 192 | " "__UA_ADDR "\t1b, 4b \n" |
191 | " "__UA_ADDR "\t2b, 4b \n" | 193 | " "__UA_ADDR "\t2b, 4b \n" |
192 | " .previous \n" | 194 | " .previous \n" |
193 | : "=&r" (retval), "=R" (*uaddr) | 195 | : "+r" (ret), "=&r" (val), "=R" (*uaddr) |
194 | : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) | 196 | : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) |
195 | : "memory"); | 197 | : "memory"); |
196 | } else | 198 | } else |
197 | return -ENOSYS; | 199 | return -ENOSYS; |
198 | 200 | ||
199 | return retval; | 201 | *uval = val; |
202 | return ret; | ||
200 | } | 203 | } |
201 | 204 | ||
202 | #endif | 205 | #endif |
diff --git a/arch/mips/include/asm/ioctls.h b/arch/mips/include/asm/ioctls.h index d967b8997626..92403c3d6007 100644 --- a/arch/mips/include/asm/ioctls.h +++ b/arch/mips/include/asm/ioctls.h | |||
@@ -85,6 +85,7 @@ | |||
85 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | 85 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ |
86 | #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ | 86 | #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ |
87 | #define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ | 87 | #define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ |
88 | #define TIOCVHANGUP 0x5437 | ||
88 | 89 | ||
89 | /* I hope the range from 0x5480 on is free ... */ | 90 | /* I hope the range from 0x5480 on is free ... */ |
90 | #define TIOCSCTTY 0x5480 /* become controlling tty */ | 91 | #define TIOCSCTTY 0x5480 /* become controlling tty */ |
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h | |||
@@ -11,15 +11,5 @@ | |||
11 | 11 | ||
12 | #ifndef __MIPS_PERF_EVENT_H__ | 12 | #ifndef __MIPS_PERF_EVENT_H__ |
13 | #define __MIPS_PERF_EVENT_H__ | 13 | #define __MIPS_PERF_EVENT_H__ |
14 | 14 | /* Leave it empty here. The file is required by linux/perf_event.h */ | |
15 | /* | ||
16 | * MIPS performance counters do not raise NMI upon overflow, a regular | ||
17 | * interrupt will be signaled. Hence we can do the pending perf event | ||
18 | * work at the tail of the irq handler. | ||
19 | */ | ||
20 | static inline void | ||
21 | set_perf_event_pending(void) | ||
22 | { | ||
23 | } | ||
24 | |||
25 | #endif /* __MIPS_PERF_EVENT_H__ */ | 15 | #endif /* __MIPS_PERF_EVENT_H__ */ |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -17,29 +17,13 @@ | |||
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/uasm.h> | 18 | #include <asm/uasm.h> |
19 | 19 | ||
20 | /* | 20 | #include <asm-generic/sections.h> |
21 | * If the Instruction Pointer is in module space (0xc0000000), return true; | ||
22 | * otherwise, it is in kernel space (0x80000000), return false. | ||
23 | * | ||
24 | * FIXME: This will not work when the kernel space and module space are the | ||
25 | * same. If they are the same, we need to modify scripts/recordmcount.pl, | ||
26 | * ftrace_make_nop/call() and the other related parts to ensure the | ||
27 | * enabling/disabling of the calling site to _mcount is right for both kernel | ||
28 | * and module. | ||
29 | */ | ||
30 | |||
31 | static inline int in_module(unsigned long ip) | ||
32 | { | ||
33 | return ip & 0x40000000; | ||
34 | } | ||
35 | 21 | ||
36 | #ifdef CONFIG_DYNAMIC_FTRACE | 22 | #ifdef CONFIG_DYNAMIC_FTRACE |
37 | 23 | ||
38 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
39 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
40 | 26 | ||
41 | #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ | ||
42 | #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ | ||
43 | #define INSN_NOP 0x00000000 /* nop */ | 27 | #define INSN_NOP 0x00000000 /* nop */ |
44 | #define INSN_JAL(addr) \ | 28 | #define INSN_JAL(addr) \ |
45 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | 29 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) |
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
69 | #endif | 53 | #endif |
70 | } | 54 | } |
71 | 55 | ||
56 | /* | ||
57 | * Check if the address is in kernel space | ||
58 | * | ||
59 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
60 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
61 | */ | ||
62 | static inline int in_kernel_space(unsigned long ip) | ||
63 | { | ||
64 | if (ip >= (unsigned long)_stext && | ||
65 | ip <= (unsigned long)_etext) | ||
66 | return 1; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
72 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 70 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
73 | { | 71 | { |
74 | int faulted; | 72 | int faulted; |
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
84 | return 0; | 82 | return 0; |
85 | } | 83 | } |
86 | 84 | ||
85 | /* | ||
86 | * The details about the calling site of mcount on MIPS | ||
87 | * | ||
88 | * 1. For kernel: | ||
89 | * | ||
90 | * move at, ra | ||
91 | * jal _mcount --> nop | ||
92 | * | ||
93 | * 2. For modules: | ||
94 | * | ||
95 | * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT | ||
96 | * | ||
97 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
98 | * addiu v1, v1, low_16bit_of_mcount | ||
99 | * move at, ra | ||
100 | * move $12, ra_address | ||
101 | * jalr v1 | ||
102 | * sub sp, sp, 8 | ||
103 | * 1: offset = 5 instructions | ||
104 | * 2.2 For the Other situations | ||
105 | * | ||
106 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
107 | * addiu v1, v1, low_16bit_of_mcount | ||
108 | * move at, ra | ||
109 | * jalr v1 | ||
110 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
111 | * 1: offset = 4 instructions | ||
112 | */ | ||
113 | |||
114 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
115 | #define MCOUNT_OFFSET_INSNS 5 | ||
116 | #else | ||
117 | #define MCOUNT_OFFSET_INSNS 4 | ||
118 | #endif | ||
119 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) | ||
120 | |||
87 | int ftrace_make_nop(struct module *mod, | 121 | int ftrace_make_nop(struct module *mod, |
88 | struct dyn_ftrace *rec, unsigned long addr) | 122 | struct dyn_ftrace *rec, unsigned long addr) |
89 | { | 123 | { |
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, | |||
91 | unsigned long ip = rec->ip; | 125 | unsigned long ip = rec->ip; |
92 | 126 | ||
93 | /* | 127 | /* |
94 | * We have compiled module with -mlong-calls, but compiled the kernel | 128 | * If ip is in kernel space, no long call, otherwise, long call is |
95 | * without it, we need to cope with them respectively. | 129 | * needed. |
96 | */ | 130 | */ |
97 | if (in_module(ip)) { | 131 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; |
98 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | 132 | |
99 | /* | ||
100 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
101 | * addiu v1, v1, low_16bit_of_mcount | ||
102 | * move at, ra | ||
103 | * move $12, ra_address | ||
104 | * jalr v1 | ||
105 | * sub sp, sp, 8 | ||
106 | * 1: offset = 5 instructions | ||
107 | */ | ||
108 | new = INSN_B_1F_5; | ||
109 | #else | ||
110 | /* | ||
111 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
112 | * addiu v1, v1, low_16bit_of_mcount | ||
113 | * move at, ra | ||
114 | * jalr v1 | ||
115 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
116 | * 1: offset = 4 instructions | ||
117 | */ | ||
118 | new = INSN_B_1F_4; | ||
119 | #endif | ||
120 | } else { | ||
121 | /* | ||
122 | * move at, ra | ||
123 | * jal _mcount --> nop | ||
124 | */ | ||
125 | new = INSN_NOP; | ||
126 | } | ||
127 | return ftrace_modify_code(ip, new); | 133 | return ftrace_modify_code(ip, new); |
128 | } | 134 | } |
129 | 135 | ||
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
132 | unsigned int new; | 138 | unsigned int new; |
133 | unsigned long ip = rec->ip; | 139 | unsigned long ip = rec->ip; |
134 | 140 | ||
135 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | 141 | new = in_kernel_space(ip) ? insn_jal_ftrace_caller : |
136 | new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; | 142 | insn_lui_v1_hi16_mcount; |
137 | 143 | ||
138 | return ftrace_modify_code(ip, new); | 144 | return ftrace_modify_code(ip, new); |
139 | } | 145 | } |
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
190 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | 196 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ |
191 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | 197 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ |
192 | 198 | ||
193 | unsigned long ftrace_get_parent_addr(unsigned long self_addr, | 199 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long |
194 | unsigned long parent, | 200 | old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) |
195 | unsigned long parent_addr, | ||
196 | unsigned long fp) | ||
197 | { | 201 | { |
198 | unsigned long sp, ip, ra; | 202 | unsigned long sp, ip, tmp; |
199 | unsigned int code; | 203 | unsigned int code; |
200 | int faulted; | 204 | int faulted; |
201 | 205 | ||
202 | /* | 206 | /* |
203 | * For module, move the ip from calling site of mcount to the | 207 | * For module, move the ip from the return address after the |
204 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for | 208 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for |
205 | * kernel, move to the instruction "move ra, at"(offset is 12) | 209 | * kernel, move after the instruction "move ra, at"(offset is 16) |
206 | */ | 210 | */ |
207 | ip = self_addr - (in_module(self_addr) ? 20 : 12); | 211 | ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); |
208 | 212 | ||
209 | /* | 213 | /* |
210 | * search the text until finding the non-store instruction or "s{d,w} | 214 | * search the text until finding the non-store instruction or "s{d,w} |
211 | * ra, offset(sp)" instruction | 215 | * ra, offset(sp)" instruction |
212 | */ | 216 | */ |
213 | do { | 217 | do { |
214 | ip -= 4; | ||
215 | |||
216 | /* get the code at "ip": code = *(unsigned int *)ip; */ | 218 | /* get the code at "ip": code = *(unsigned int *)ip; */ |
217 | safe_load_code(code, ip, faulted); | 219 | safe_load_code(code, ip, faulted); |
218 | 220 | ||
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
224 | * store the ra on the stack | 226 | * store the ra on the stack |
225 | */ | 227 | */ |
226 | if ((code & S_R_SP) != S_R_SP) | 228 | if ((code & S_R_SP) != S_R_SP) |
227 | return parent_addr; | 229 | return parent_ra_addr; |
228 | 230 | ||
229 | } while (((code & S_RA_SP) != S_RA_SP)); | 231 | /* Move to the next instruction */ |
232 | ip -= 4; | ||
233 | } while ((code & S_RA_SP) != S_RA_SP); | ||
230 | 234 | ||
231 | sp = fp + (code & OFFSET_MASK); | 235 | sp = fp + (code & OFFSET_MASK); |
232 | 236 | ||
233 | /* ra = *(unsigned long *)sp; */ | 237 | /* tmp = *(unsigned long *)sp; */ |
234 | safe_load_stack(ra, sp, faulted); | 238 | safe_load_stack(tmp, sp, faulted); |
235 | if (unlikely(faulted)) | 239 | if (unlikely(faulted)) |
236 | return 0; | 240 | return 0; |
237 | 241 | ||
238 | if (ra == parent) | 242 | if (tmp == old_parent_ra) |
239 | return sp; | 243 | return sp; |
240 | return 0; | 244 | return 0; |
241 | } | 245 | } |
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
246 | * Hook the return address and push it in the stack of return addrs | 250 | * Hook the return address and push it in the stack of return addrs |
247 | * in current thread info. | 251 | * in current thread info. |
248 | */ | 252 | */ |
249 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | 253 | void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, |
250 | unsigned long fp) | 254 | unsigned long fp) |
251 | { | 255 | { |
252 | unsigned long old; | 256 | unsigned long old_parent_ra; |
253 | struct ftrace_graph_ent trace; | 257 | struct ftrace_graph_ent trace; |
254 | unsigned long return_hooker = (unsigned long) | 258 | unsigned long return_hooker = (unsigned long) |
255 | &return_to_handler; | 259 | &return_to_handler; |
256 | int faulted; | 260 | int faulted, insns; |
257 | 261 | ||
258 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 262 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
259 | return; | 263 | return; |
260 | 264 | ||
261 | /* | 265 | /* |
262 | * "parent" is the stack address saved the return address of the caller | 266 | * "parent_ra_addr" is the stack address saved the return address of |
263 | * of _mcount. | 267 | * the caller of _mcount. |
264 | * | 268 | * |
265 | * if the gcc < 4.5, a leaf function does not save the return address | 269 | * if the gcc < 4.5, a leaf function does not save the return address |
266 | * in the stack address, so, we "emulate" one in _mcount's stack space, | 270 | * in the stack address, so, we "emulate" one in _mcount's stack space, |
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
275 | * do it in ftrace_graph_caller of mcount.S. | 279 | * do it in ftrace_graph_caller of mcount.S. |
276 | */ | 280 | */ |
277 | 281 | ||
278 | /* old = *parent; */ | 282 | /* old_parent_ra = *parent_ra_addr; */ |
279 | safe_load_stack(old, parent, faulted); | 283 | safe_load_stack(old_parent_ra, parent_ra_addr, faulted); |
280 | if (unlikely(faulted)) | 284 | if (unlikely(faulted)) |
281 | goto out; | 285 | goto out; |
282 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 286 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
283 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | 287 | parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, |
284 | (unsigned long)parent, fp); | 288 | old_parent_ra, (unsigned long)parent_ra_addr, fp); |
285 | /* | 289 | /* |
286 | * If fails when getting the stack address of the non-leaf function's | 290 | * If fails when getting the stack address of the non-leaf function's |
287 | * ra, stop function graph tracer and return | 291 | * ra, stop function graph tracer and return |
288 | */ | 292 | */ |
289 | if (parent == 0) | 293 | if (parent_ra_addr == 0) |
290 | goto out; | 294 | goto out; |
291 | #endif | 295 | #endif |
292 | /* *parent = return_hooker; */ | 296 | /* *parent_ra_addr = return_hooker; */ |
293 | safe_store_stack(return_hooker, parent, faulted); | 297 | safe_store_stack(return_hooker, parent_ra_addr, faulted); |
294 | if (unlikely(faulted)) | 298 | if (unlikely(faulted)) |
295 | goto out; | 299 | goto out; |
296 | 300 | ||
297 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == | 301 | if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) |
298 | -EBUSY) { | 302 | == -EBUSY) { |
299 | *parent = old; | 303 | *parent_ra_addr = old_parent_ra; |
300 | return; | 304 | return; |
301 | } | 305 | } |
302 | 306 | ||
303 | trace.func = self_addr; | 307 | /* |
308 | * Get the recorded ip of the current mcount calling site in the | ||
309 | * __mcount_loc section, which will be used to filter the function | ||
310 | * entries configured through the tracing/set_graph_function interface. | ||
311 | */ | ||
312 | |||
313 | insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | ||
314 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); | ||
304 | 315 | ||
305 | /* Only trace if the calling function expects to */ | 316 | /* Only trace if the calling function expects to */ |
306 | if (!ftrace_graph_entry(&trace)) { | 317 | if (!ftrace_graph_entry(&trace)) { |
307 | current->curr_ret_stack--; | 318 | current->curr_ret_stack--; |
308 | *parent = old; | 319 | *parent_ra_addr = old_parent_ra; |
309 | } | 320 | } |
310 | return; | 321 | return; |
311 | out: | 322 | out: |
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int mipspmu_enable(struct perf_event *event) | ||
165 | { | ||
166 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
167 | struct hw_perf_event *hwc = &event->hw; | ||
168 | int idx; | ||
169 | int err = 0; | ||
170 | |||
171 | /* To look for a free counter for this event. */ | ||
172 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
173 | if (idx < 0) { | ||
174 | err = idx; | ||
175 | goto out; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * If there is an event in the counter we are going to use then | ||
180 | * make sure it is disabled. | ||
181 | */ | ||
182 | event->hw.idx = idx; | ||
183 | mipspmu->disable_event(idx); | ||
184 | cpuc->events[idx] = event; | ||
185 | |||
186 | /* Set the period for the event. */ | ||
187 | mipspmu_event_set_period(event, hwc, idx); | ||
188 | |||
189 | /* Enable the event. */ | ||
190 | mipspmu->enable_event(hwc, idx); | ||
191 | |||
192 | /* Propagate our changes to the userspace mapping. */ | ||
193 | perf_event_update_userpage(event); | ||
194 | |||
195 | out: | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | static void mipspmu_event_update(struct perf_event *event, | 164 | static void mipspmu_event_update(struct perf_event *event, |
200 | struct hw_perf_event *hwc, | 165 | struct hw_perf_event *hwc, |
201 | int idx) | 166 | int idx) |
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, | |||
204 | unsigned long flags; | 169 | unsigned long flags; |
205 | int shift = 64 - TOTAL_BITS; | 170 | int shift = 64 - TOTAL_BITS; |
206 | s64 prev_raw_count, new_raw_count; | 171 | s64 prev_raw_count, new_raw_count; |
207 | s64 delta; | 172 | u64 delta; |
208 | 173 | ||
209 | again: | 174 | again: |
210 | prev_raw_count = local64_read(&hwc->prev_count); | 175 | prev_raw_count = local64_read(&hwc->prev_count); |
@@ -231,32 +196,90 @@ again: | |||
231 | return; | 196 | return; |
232 | } | 197 | } |
233 | 198 | ||
234 | static void mipspmu_disable(struct perf_event *event) | 199 | static void mipspmu_start(struct perf_event *event, int flags) |
200 | { | ||
201 | struct hw_perf_event *hwc = &event->hw; | ||
202 | |||
203 | if (!mipspmu) | ||
204 | return; | ||
205 | |||
206 | if (flags & PERF_EF_RELOAD) | ||
207 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
208 | |||
209 | hwc->state = 0; | ||
210 | |||
211 | /* Set the period for the event. */ | ||
212 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
213 | |||
214 | /* Enable the event. */ | ||
215 | mipspmu->enable_event(hwc, hwc->idx); | ||
216 | } | ||
217 | |||
218 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
219 | { | ||
220 | struct hw_perf_event *hwc = &event->hw; | ||
221 | |||
222 | if (!mipspmu) | ||
223 | return; | ||
224 | |||
225 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
226 | /* We are working on a local event. */ | ||
227 | mipspmu->disable_event(hwc->idx); | ||
228 | barrier(); | ||
229 | mipspmu_event_update(event, hwc, hwc->idx); | ||
230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static int mipspmu_add(struct perf_event *event, int flags) | ||
235 | { | 235 | { |
236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
237 | struct hw_perf_event *hwc = &event->hw; | 237 | struct hw_perf_event *hwc = &event->hw; |
238 | int idx = hwc->idx; | 238 | int idx; |
239 | int err = 0; | ||
239 | 240 | ||
241 | perf_pmu_disable(event->pmu); | ||
240 | 242 | ||
241 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | 243 | /* To look for a free counter for this event. */ |
244 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
245 | if (idx < 0) { | ||
246 | err = idx; | ||
247 | goto out; | ||
248 | } | ||
242 | 249 | ||
243 | /* We are working on a local event. */ | 250 | /* |
251 | * If there is an event in the counter we are going to use then | ||
252 | * make sure it is disabled. | ||
253 | */ | ||
254 | event->hw.idx = idx; | ||
244 | mipspmu->disable_event(idx); | 255 | mipspmu->disable_event(idx); |
256 | cpuc->events[idx] = event; | ||
245 | 257 | ||
246 | barrier(); | 258 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
247 | 259 | if (flags & PERF_EF_START) | |
248 | mipspmu_event_update(event, hwc, idx); | 260 | mipspmu_start(event, PERF_EF_RELOAD); |
249 | cpuc->events[idx] = NULL; | ||
250 | clear_bit(idx, cpuc->used_mask); | ||
251 | 261 | ||
262 | /* Propagate our changes to the userspace mapping. */ | ||
252 | perf_event_update_userpage(event); | 263 | perf_event_update_userpage(event); |
264 | |||
265 | out: | ||
266 | perf_pmu_enable(event->pmu); | ||
267 | return err; | ||
253 | } | 268 | } |
254 | 269 | ||
255 | static void mipspmu_unthrottle(struct perf_event *event) | 270 | static void mipspmu_del(struct perf_event *event, int flags) |
256 | { | 271 | { |
272 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
257 | struct hw_perf_event *hwc = &event->hw; | 273 | struct hw_perf_event *hwc = &event->hw; |
274 | int idx = hwc->idx; | ||
258 | 275 | ||
259 | mipspmu->enable_event(hwc, hwc->idx); | 276 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); |
277 | |||
278 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
279 | cpuc->events[idx] = NULL; | ||
280 | clear_bit(idx, cpuc->used_mask); | ||
281 | |||
282 | perf_event_update_userpage(event); | ||
260 | } | 283 | } |
261 | 284 | ||
262 | static void mipspmu_read(struct perf_event *event) | 285 | static void mipspmu_read(struct perf_event *event) |
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) | |||
270 | mipspmu_event_update(event, hwc, hwc->idx); | 293 | mipspmu_event_update(event, hwc, hwc->idx); |
271 | } | 294 | } |
272 | 295 | ||
273 | static struct pmu pmu = { | 296 | static void mipspmu_enable(struct pmu *pmu) |
274 | .enable = mipspmu_enable, | 297 | { |
275 | .disable = mipspmu_disable, | 298 | if (mipspmu) |
276 | .unthrottle = mipspmu_unthrottle, | 299 | mipspmu->start(); |
277 | .read = mipspmu_read, | 300 | } |
278 | }; | 301 | |
302 | static void mipspmu_disable(struct pmu *pmu) | ||
303 | { | ||
304 | if (mipspmu) | ||
305 | mipspmu->stop(); | ||
306 | } | ||
279 | 307 | ||
280 | static atomic_t active_events = ATOMIC_INIT(0); | 308 | static atomic_t active_events = ATOMIC_INIT(0); |
281 | static DEFINE_MUTEX(pmu_reserve_mutex); | 309 | static DEFINE_MUTEX(pmu_reserve_mutex); |
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) | |||
318 | perf_irq = save_perf_irq; | 346 | perf_irq = save_perf_irq; |
319 | } | 347 | } |
320 | 348 | ||
349 | /* | ||
350 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
351 | * specific low-level init routines. | ||
352 | */ | ||
353 | static void reset_counters(void *arg); | ||
354 | static int __hw_perf_event_init(struct perf_event *event); | ||
355 | |||
356 | static void hw_perf_event_destroy(struct perf_event *event) | ||
357 | { | ||
358 | if (atomic_dec_and_mutex_lock(&active_events, | ||
359 | &pmu_reserve_mutex)) { | ||
360 | /* | ||
361 | * We must not call the destroy function with interrupts | ||
362 | * disabled. | ||
363 | */ | ||
364 | on_each_cpu(reset_counters, | ||
365 | (void *)(long)mipspmu->num_counters, 1); | ||
366 | mipspmu_free_irq(); | ||
367 | mutex_unlock(&pmu_reserve_mutex); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | static int mipspmu_event_init(struct perf_event *event) | ||
372 | { | ||
373 | int err = 0; | ||
374 | |||
375 | switch (event->attr.type) { | ||
376 | case PERF_TYPE_RAW: | ||
377 | case PERF_TYPE_HARDWARE: | ||
378 | case PERF_TYPE_HW_CACHE: | ||
379 | break; | ||
380 | |||
381 | default: | ||
382 | return -ENOENT; | ||
383 | } | ||
384 | |||
385 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
386 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
387 | return -ENODEV; | ||
388 | |||
389 | if (!atomic_inc_not_zero(&active_events)) { | ||
390 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
391 | atomic_dec(&active_events); | ||
392 | return -ENOSPC; | ||
393 | } | ||
394 | |||
395 | mutex_lock(&pmu_reserve_mutex); | ||
396 | if (atomic_read(&active_events) == 0) | ||
397 | err = mipspmu_get_irq(); | ||
398 | |||
399 | if (!err) | ||
400 | atomic_inc(&active_events); | ||
401 | mutex_unlock(&pmu_reserve_mutex); | ||
402 | } | ||
403 | |||
404 | if (err) | ||
405 | return err; | ||
406 | |||
407 | err = __hw_perf_event_init(event); | ||
408 | if (err) | ||
409 | hw_perf_event_destroy(event); | ||
410 | |||
411 | return err; | ||
412 | } | ||
413 | |||
414 | static struct pmu pmu = { | ||
415 | .pmu_enable = mipspmu_enable, | ||
416 | .pmu_disable = mipspmu_disable, | ||
417 | .event_init = mipspmu_event_init, | ||
418 | .add = mipspmu_add, | ||
419 | .del = mipspmu_del, | ||
420 | .start = mipspmu_start, | ||
421 | .stop = mipspmu_stop, | ||
422 | .read = mipspmu_read, | ||
423 | }; | ||
424 | |||
321 | static inline unsigned int | 425 | static inline unsigned int |
322 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | 426 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) |
323 | { | 427 | { |
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, | |||
382 | { | 486 | { |
383 | struct hw_perf_event fake_hwc = event->hw; | 487 | struct hw_perf_event fake_hwc = event->hw; |
384 | 488 | ||
385 | if (event->pmu && event->pmu != &pmu) | 489 | /* Allow mixed event group. So return 1 to pass validation. */ |
386 | return 0; | 490 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
491 | return 1; | ||
387 | 492 | ||
388 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | 493 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; |
389 | } | 494 | } |
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) | |||
409 | return 0; | 514 | return 0; |
410 | } | 515 | } |
411 | 516 | ||
412 | /* | ||
413 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
414 | * specific low-level init routines. | ||
415 | */ | ||
416 | static void reset_counters(void *arg); | ||
417 | static int __hw_perf_event_init(struct perf_event *event); | ||
418 | |||
419 | static void hw_perf_event_destroy(struct perf_event *event) | ||
420 | { | ||
421 | if (atomic_dec_and_mutex_lock(&active_events, | ||
422 | &pmu_reserve_mutex)) { | ||
423 | /* | ||
424 | * We must not call the destroy function with interrupts | ||
425 | * disabled. | ||
426 | */ | ||
427 | on_each_cpu(reset_counters, | ||
428 | (void *)(long)mipspmu->num_counters, 1); | ||
429 | mipspmu_free_irq(); | ||
430 | mutex_unlock(&pmu_reserve_mutex); | ||
431 | } | ||
432 | } | ||
433 | |||
434 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
435 | { | ||
436 | int err = 0; | ||
437 | |||
438 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
439 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
440 | return ERR_PTR(-ENODEV); | ||
441 | |||
442 | if (!atomic_inc_not_zero(&active_events)) { | ||
443 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
444 | atomic_dec(&active_events); | ||
445 | return ERR_PTR(-ENOSPC); | ||
446 | } | ||
447 | |||
448 | mutex_lock(&pmu_reserve_mutex); | ||
449 | if (atomic_read(&active_events) == 0) | ||
450 | err = mipspmu_get_irq(); | ||
451 | |||
452 | if (!err) | ||
453 | atomic_inc(&active_events); | ||
454 | mutex_unlock(&pmu_reserve_mutex); | ||
455 | } | ||
456 | |||
457 | if (err) | ||
458 | return ERR_PTR(err); | ||
459 | |||
460 | err = __hw_perf_event_init(event); | ||
461 | if (err) | ||
462 | hw_perf_event_destroy(event); | ||
463 | |||
464 | return err ? ERR_PTR(err) : &pmu; | ||
465 | } | ||
466 | |||
467 | void hw_perf_enable(void) | ||
468 | { | ||
469 | if (mipspmu) | ||
470 | mipspmu->start(); | ||
471 | } | ||
472 | |||
473 | void hw_perf_disable(void) | ||
474 | { | ||
475 | if (mipspmu) | ||
476 | mipspmu->stop(); | ||
477 | } | ||
478 | |||
479 | /* This is needed by specific irq handlers in perf_event_*.c */ | 517 | /* This is needed by specific irq handlers in perf_event_*.c */ |
480 | static void | 518 | static void |
481 | handle_associated_event(struct cpu_hw_events *cpuc, | 519 | handle_associated_event(struct cpu_hw_events *cpuc, |
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, | |||
496 | #include "perf_event_mipsxx.c" | 534 | #include "perf_event_mipsxx.c" |
497 | 535 | ||
498 | /* Callchain handling code. */ | 536 | /* Callchain handling code. */ |
499 | static inline void | ||
500 | callchain_store(struct perf_callchain_entry *entry, | ||
501 | u64 ip) | ||
502 | { | ||
503 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
504 | entry->ip[entry->nr++] = ip; | ||
505 | } | ||
506 | 537 | ||
507 | /* | 538 | /* |
508 | * Leave userspace callchain empty for now. When we find a way to trace | 539 | * Leave userspace callchain empty for now. When we find a way to trace |
509 | * the user stack callchains, we add here. | 540 | * the user stack callchains, we add here. |
510 | */ | 541 | */ |
511 | static void | 542 | void perf_callchain_user(struct perf_callchain_entry *entry, |
512 | perf_callchain_user(struct pt_regs *regs, | 543 | struct pt_regs *regs) |
513 | struct perf_callchain_entry *entry) | ||
514 | { | 544 | { |
515 | } | 545 | } |
516 | 546 | ||
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | |||
523 | while (!kstack_end(sp)) { | 553 | while (!kstack_end(sp)) { |
524 | addr = *sp++; | 554 | addr = *sp++; |
525 | if (__kernel_text_address(addr)) { | 555 | if (__kernel_text_address(addr)) { |
526 | callchain_store(entry, addr); | 556 | perf_callchain_store(entry, addr); |
527 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 557 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
528 | break; | 558 | break; |
529 | } | 559 | } |
530 | } | 560 | } |
531 | } | 561 | } |
532 | 562 | ||
533 | static void | 563 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
534 | perf_callchain_kernel(struct pt_regs *regs, | 564 | struct pt_regs *regs) |
535 | struct perf_callchain_entry *entry) | ||
536 | { | 565 | { |
537 | unsigned long sp = regs->regs[29]; | 566 | unsigned long sp = regs->regs[29]; |
538 | #ifdef CONFIG_KALLSYMS | 567 | #ifdef CONFIG_KALLSYMS |
539 | unsigned long ra = regs->regs[31]; | 568 | unsigned long ra = regs->regs[31]; |
540 | unsigned long pc = regs->cp0_epc; | 569 | unsigned long pc = regs->cp0_epc; |
541 | 570 | ||
542 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
543 | if (raw_show_trace || !__kernel_text_address(pc)) { | 571 | if (raw_show_trace || !__kernel_text_address(pc)) { |
544 | unsigned long stack_page = | 572 | unsigned long stack_page = |
545 | (unsigned long)task_stack_page(current); | 573 | (unsigned long)task_stack_page(current); |
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, | |||
549 | return; | 577 | return; |
550 | } | 578 | } |
551 | do { | 579 | do { |
552 | callchain_store(entry, pc); | 580 | perf_callchain_store(entry, pc); |
553 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 581 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
554 | break; | 582 | break; |
555 | pc = unwind_stack(current, &sp, pc, &ra); | 583 | pc = unwind_stack(current, &sp, pc, &ra); |
556 | } while (pc); | 584 | } while (pc); |
557 | #else | 585 | #else |
558 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
559 | save_raw_perf_callchain(entry, sp); | 586 | save_raw_perf_callchain(entry, sp); |
560 | #endif | 587 | #endif |
561 | } | 588 | } |
562 | |||
563 | static void | ||
564 | perf_do_callchain(struct pt_regs *regs, | ||
565 | struct perf_callchain_entry *entry) | ||
566 | { | ||
567 | int is_user; | ||
568 | |||
569 | if (!regs) | ||
570 | return; | ||
571 | |||
572 | is_user = user_mode(regs); | ||
573 | |||
574 | if (!current || !current->pid) | ||
575 | return; | ||
576 | |||
577 | if (is_user && current->state != TASK_RUNNING) | ||
578 | return; | ||
579 | |||
580 | if (!is_user) { | ||
581 | perf_callchain_kernel(regs, entry); | ||
582 | if (current->mm) | ||
583 | regs = task_pt_regs(current); | ||
584 | else | ||
585 | regs = NULL; | ||
586 | } | ||
587 | if (regs) | ||
588 | perf_callchain_user(regs, entry); | ||
589 | } | ||
590 | |||
591 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
592 | |||
593 | struct perf_callchain_entry * | ||
594 | perf_callchain(struct pt_regs *regs) | ||
595 | { | ||
596 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
597 | |||
598 | entry->nr = 0; | ||
599 | perf_do_callchain(regs, entry); | ||
600 | return entry; | ||
601 | } | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
696 | * interrupt, not NMI. | 696 | * interrupt, not NMI. |
697 | */ | 697 | */ |
698 | if (handled == IRQ_HANDLED) | 698 | if (handled == IRQ_HANDLED) |
699 | perf_event_do_pending(); | 699 | irq_work_run(); |
700 | 700 | ||
701 | #ifdef CONFIG_MIPS_MT_SMP | 701 | #ifdef CONFIG_MIPS_MT_SMP |
702 | read_unlock(&pmuint_rwlock); | 702 | read_unlock(&pmuint_rwlock); |
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void) | |||
1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, | 1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, |
1046 | irq < 0 ? " (share with timer interrupt)" : ""); | 1046 | irq < 0 ? " (share with timer interrupt)" : ""); |
1047 | 1047 | ||
1048 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
1049 | |||
1048 | return 0; | 1050 | return 0; |
1049 | } | 1051 | } |
1050 | early_initcall(init_hw_perf_events); | 1052 | early_initcall(init_hw_perf_events); |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) | |||
84 | 84 | ||
85 | static int protected_restore_fp_context(struct sigcontext __user *sc) | 85 | static int protected_restore_fp_context(struct sigcontext __user *sc) |
86 | { | 86 | { |
87 | int err, tmp; | 87 | int err, tmp __maybe_unused; |
88 | while (1) { | 88 | while (1) { |
89 | lock_fpu_owner(); | 89 | lock_fpu_owner(); |
90 | own_fpu_inatomic(0); | 90 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) | |||
115 | 115 | ||
116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) | 116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) |
117 | { | 117 | { |
118 | int err, tmp; | 118 | int err, tmp __maybe_unused; |
119 | while (1) { | 119 | while (1) { |
120 | lock_fpu_owner(); | 120 | lock_fpu_owner(); |
121 | own_fpu_inatomic(0); | 121 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) | |||
193 | */ | 193 | */ |
194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; | 194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; |
195 | 195 | ||
196 | struct create_idle { | ||
197 | struct work_struct work; | ||
198 | struct task_struct *idle; | ||
199 | struct completion done; | ||
200 | int cpu; | ||
201 | }; | ||
202 | |||
203 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
204 | { | ||
205 | struct create_idle *c_idle = | ||
206 | container_of(work, struct create_idle, work); | ||
207 | |||
208 | c_idle->idle = fork_idle(c_idle->cpu); | ||
209 | complete(&c_idle->done); | ||
210 | } | ||
211 | |||
196 | int __cpuinit __cpu_up(unsigned int cpu) | 212 | int __cpuinit __cpu_up(unsigned int cpu) |
197 | { | 213 | { |
198 | struct task_struct *idle; | 214 | struct task_struct *idle; |
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
203 | * Linux can schedule processes on this slave. | 219 | * Linux can schedule processes on this slave. |
204 | */ | 220 | */ |
205 | if (!cpu_idle_thread[cpu]) { | 221 | if (!cpu_idle_thread[cpu]) { |
206 | idle = fork_idle(cpu); | 222 | /* |
207 | cpu_idle_thread[cpu] = idle; | 223 | * Schedule work item to avoid forking user task |
224 | * Ported from arch/x86/kernel/smpboot.c | ||
225 | */ | ||
226 | struct create_idle c_idle = { | ||
227 | .cpu = cpu, | ||
228 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
229 | }; | ||
230 | |||
231 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
232 | schedule_work(&c_idle.work); | ||
233 | wait_for_completion(&c_idle.done); | ||
234 | idle = cpu_idle_thread[cpu] = c_idle.idle; | ||
208 | 235 | ||
209 | if (IS_ERR(idle)) | 236 | if (IS_ERR(idle)) |
210 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 237 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips); | |||
383 | static int __used noinline | 383 | static int __used noinline |
384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) | 384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) |
385 | { | 385 | { |
386 | long cmd, arg1, arg2, arg3; | 386 | long cmd, arg1, arg2; |
387 | 387 | ||
388 | cmd = regs.regs[4]; | 388 | cmd = regs.regs[4]; |
389 | arg1 = regs.regs[5]; | 389 | arg1 = regs.regs[5]; |
390 | arg2 = regs.regs[6]; | 390 | arg2 = regs.regs[6]; |
391 | arg3 = regs.regs[7]; | ||
392 | 391 | ||
393 | switch (cmd) { | 392 | switch (cmd) { |
394 | case MIPS_ATOMIC_SET: | 393 | case MIPS_ATOMIC_SET: |
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) | |||
405 | if (arg1 & 2) | 404 | if (arg1 & 2) |
406 | set_thread_flag(TIF_LOGADE); | 405 | set_thread_flag(TIF_LOGADE); |
407 | else | 406 | else |
408 | clear_thread_flag(TIF_FIXADE); | 407 | clear_thread_flag(TIF_LOGADE); |
409 | 408 | ||
410 | return 0; | 409 | return 0; |
411 | 410 | ||
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 570607b376b5..832afbb87588 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -115,7 +115,7 @@ SECTIONS | |||
115 | EXIT_DATA | 115 | EXIT_DATA |
116 | } | 116 | } |
117 | 117 | ||
118 | PERCPU(PAGE_SIZE) | 118 | PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) |
119 | . = ALIGN(PAGE_SIZE); | 119 | . = ALIGN(PAGE_SIZE); |
120 | __init_end = .; | 120 | __init_end = .; |
121 | /* freed after init ends here */ | 121 | /* freed after init ends here */ |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -148,9 +148,9 @@ struct { | |||
148 | spinlock_t tc_list_lock; | 148 | spinlock_t tc_list_lock; |
149 | struct list_head tc_list; /* Thread contexts */ | 149 | struct list_head tc_list; /* Thread contexts */ |
150 | } vpecontrol = { | 150 | } vpecontrol = { |
151 | .vpe_list_lock = SPIN_LOCK_UNLOCKED, | 151 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), |
152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), | 152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), |
153 | .tc_list_lock = SPIN_LOCK_UNLOCKED, | 153 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), |
154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) | 154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) |
155 | }; | 155 | }; |
156 | 156 | ||
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | if MACH_LOONGSON | ||
2 | |||
1 | choice | 3 | choice |
2 | prompt "Machine Type" | 4 | prompt "Machine Type" |
3 | depends on MACH_LOONGSON | ||
4 | 5 | ||
5 | config LEMOTE_FULOONG2E | 6 | config LEMOTE_FULOONG2E |
6 | bool "Lemote Fuloong(2e) mini-PC" | 7 | bool "Lemote Fuloong(2e) mini-PC" |
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE | |||
87 | config LOONGSON_MC146818 | 88 | config LOONGSON_MC146818 |
88 | bool | 89 | bool |
89 | default n | 90 | default n |
91 | |||
92 | endif # MACH_LOONGSON | ||
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c | |||
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) | |||
44 | strcat(arcs_cmdline, " "); | 44 | strcat(arcs_cmdline, " "); |
45 | } | 45 | } |
46 | 46 | ||
47 | if ((strstr(arcs_cmdline, "console=")) == NULL) | ||
48 | strcat(arcs_cmdline, " console=ttyS0,115200"); | ||
49 | if ((strstr(arcs_cmdline, "root=")) == NULL) | ||
50 | strcat(arcs_cmdline, " root=/dev/hda1"); | ||
51 | |||
52 | prom_init_machtype(); | 47 | prom_init_machtype(); |
53 | } | 48 | } |
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c | |||
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) | |||
41 | 41 | ||
42 | void __init prom_init_machtype(void) | 42 | void __init prom_init_machtype(void) |
43 | { | 43 | { |
44 | char *p, str[MACHTYPE_LEN]; | 44 | char *p, str[MACHTYPE_LEN + 1]; |
45 | int machtype = MACH_LEMOTE_FL2E; | 45 | int machtype = MACH_LEMOTE_FL2E; |
46 | 46 | ||
47 | mips_machtype = LOONGSON_MACHTYPE; | 47 | mips_machtype = LOONGSON_MACHTYPE; |
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void) | |||
53 | } | 53 | } |
54 | p += strlen("machtype="); | 54 | p += strlen("machtype="); |
55 | strncpy(str, p, MACHTYPE_LEN); | 55 | strncpy(str, p, MACHTYPE_LEN); |
56 | str[MACHTYPE_LEN] = '\0'; | ||
56 | p = strstr(str, " "); | 57 | p = strstr(str, " "); |
57 | if (p) | 58 | if (p) |
58 | *p = '\0'; | 59 | *p = '\0'; |
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | 71 | ||
72 | #define COMPXSP \ | 72 | #define COMPXSP \ |
73 | unsigned xm; int xe; int xs; int xc | 73 | unsigned xm; int xe; int xs __maybe_unused; int xc |
74 | 74 | ||
75 | #define COMPYSP \ | 75 | #define COMPYSP \ |
76 | unsigned ym; int ye; int ys; int yc | 76 | unsigned ym; int ye; int ys; int yc |
@@ -104,7 +104,7 @@ | |||
104 | 104 | ||
105 | 105 | ||
106 | #define COMPXDP \ | 106 | #define COMPXDP \ |
107 | u64 xm; int xe; int xs; int xc | 107 | u64 xm; int xe; int xs __maybe_unused; int xc |
108 | 108 | ||
109 | #define COMPYDP \ | 109 | #define COMPYDP \ |
110 | u64 ym; int ye; int ys; int yc | 110 | u64 ym; int ye; int ys; int yc |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) | |||
324 | void __init paging_init(void) | 324 | void __init paging_init(void) |
325 | { | 325 | { |
326 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 326 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
327 | unsigned long lastpfn; | 327 | unsigned long lastpfn __maybe_unused; |
328 | 328 | ||
329 | pagetable_init(); | 329 | pagetable_init(); |
330 | 330 | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -109,6 +109,8 @@ static bool scratchpad_available(void) | |||
109 | static int scratchpad_offset(int i) | 109 | static int scratchpad_offset(int i) |
110 | { | 110 | { |
111 | BUG(); | 111 | BUG(); |
112 | /* Really unreachable, but evidently some GCC want this. */ | ||
113 | return 0; | ||
112 | } | 114 | } |
113 | #endif | 115 | #endif |
114 | /* | 116 | /* |
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c | |||
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { | |||
308 | * RETURNS: PCIBIOS_SUCCESSFUL - success | 308 | * RETURNS: PCIBIOS_SUCCESSFUL - success |
309 | * | 309 | * |
310 | ****************************************************************************/ | 310 | ****************************************************************************/ |
311 | static int bpci_interrupt(int irq, void *dev_id) | 311 | static irqreturn_t bpci_interrupt(int irq, void *dev_id) |
312 | { | 312 | { |
313 | struct msp_pci_regs *preg = (void *)PCI_BASE_REG; | 313 | struct msp_pci_regs *preg = (void *)PCI_BASE_REG; |
314 | unsigned int stat = preg->if_status; | 314 | unsigned int stat = preg->if_status; |
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) | |||
326 | /* write to clear all asserted interrupts */ | 326 | /* write to clear all asserted interrupts */ |
327 | preg->if_status = stat; | 327 | preg->if_status = stat; |
328 | 328 | ||
329 | return PCIBIOS_SUCCESSFUL; | 329 | return IRQ_HANDLED; |
330 | } | 330 | } |
331 | 331 | ||
332 | /***************************************************************************** | 332 | /***************************************************************************** |
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig | |||
@@ -4,15 +4,11 @@ choice | |||
4 | 4 | ||
5 | config PMC_MSP4200_EVAL | 5 | config PMC_MSP4200_EVAL |
6 | bool "PMC-Sierra MSP4200 Eval Board" | 6 | bool "PMC-Sierra MSP4200 Eval Board" |
7 | select CEVT_R4K | ||
8 | select CSRC_R4K | ||
9 | select IRQ_MSP_SLP | 7 | select IRQ_MSP_SLP |
10 | select HW_HAS_PCI | 8 | select HW_HAS_PCI |
11 | 9 | ||
12 | config PMC_MSP4200_GW | 10 | config PMC_MSP4200_GW |
13 | bool "PMC-Sierra MSP4200 VoIP Gateway" | 11 | bool "PMC-Sierra MSP4200 VoIP Gateway" |
14 | select CEVT_R4K | ||
15 | select CSRC_R4K | ||
16 | select IRQ_MSP_SLP | 12 | select IRQ_MSP_SLP |
17 | select HW_HAS_PCI | 13 | select HW_HAS_PCI |
18 | 14 | ||
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c | |||
@@ -81,7 +81,7 @@ void __init plat_time_init(void) | |||
81 | mips_hpt_frequency = cpu_rate/2; | 81 | mips_hpt_frequency = cpu_rate/2; |
82 | } | 82 | } |
83 | 83 | ||
84 | unsigned int __init get_c0_compare_int(void) | 84 | unsigned int __cpuinit get_c0_compare_int(void) |
85 | { | 85 | { |
86 | return MSP_INT_VPE0_TIMER; | 86 | return MSP_INT_VPE0_TIMER; |
87 | } | 87 | } |
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 92d2f9298e38..9d773a639513 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h | |||
@@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m, | |||
139 | * Atomically reads the value of @v. Note that the guaranteed | 139 | * Atomically reads the value of @v. Note that the guaranteed |
140 | * useful range of an atomic_t is only 24 bits. | 140 | * useful range of an atomic_t is only 24 bits. |
141 | */ | 141 | */ |
142 | #define atomic_read(v) ((v)->counter) | 142 | #define atomic_read(v) (ACCESS_ONCE((v)->counter)) |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * atomic_set - set atomic variable | 145 | * atomic_set - set atomic variable |
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 679dee0bbd08..3d6e60dad9d9 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h | |||
@@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; }; | |||
160 | 160 | ||
161 | #define __get_user_check(x, ptr, size) \ | 161 | #define __get_user_check(x, ptr, size) \ |
162 | ({ \ | 162 | ({ \ |
163 | const __typeof__(ptr) __guc_ptr = (ptr); \ | ||
163 | int _e; \ | 164 | int _e; \ |
164 | if (likely(__access_ok((unsigned long) (ptr), (size)))) \ | 165 | if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ |
165 | _e = __get_user_nocheck((x), (ptr), (size)); \ | 166 | _e = __get_user_nocheck((x), __guc_ptr, (size)); \ |
166 | else { \ | 167 | else { \ |
167 | _e = -EFAULT; \ | 168 | _e = -EFAULT; \ |
168 | (x) = (__typeof__(x))0; \ | 169 | (x) = (__typeof__(x))0; \ |
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index 75da468090b9..5b955000626d 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c | |||
@@ -104,8 +104,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
104 | unsigned tsc, elapse; | 104 | unsigned tsc, elapse; |
105 | irqreturn_t ret; | 105 | irqreturn_t ret; |
106 | 106 | ||
107 | write_seqlock(&xtime_lock); | ||
108 | |||
109 | while (tsc = get_cycles(), | 107 | while (tsc = get_cycles(), |
110 | elapse = tsc - mn10300_last_tsc, /* time elapsed since last | 108 | elapse = tsc - mn10300_last_tsc, /* time elapsed since last |
111 | * tick */ | 109 | * tick */ |
@@ -114,11 +112,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
114 | mn10300_last_tsc += MN10300_TSC_PER_HZ; | 112 | mn10300_last_tsc += MN10300_TSC_PER_HZ; |
115 | 113 | ||
116 | /* advance the kernel's time tracking system */ | 114 | /* advance the kernel's time tracking system */ |
117 | do_timer(1); | 115 | xtime_update(1); |
118 | } | 116 | } |
119 | 117 | ||
120 | write_sequnlock(&xtime_lock); | ||
121 | |||
122 | ret = local_timer_interrupt(); | 118 | ret = local_timer_interrupt(); |
123 | #ifdef CONFIG_SMP | 119 | #ifdef CONFIG_SMP |
124 | send_IPI_allbutself(LOCAL_TIMER_IPI); | 120 | send_IPI_allbutself(LOCAL_TIMER_IPI); |
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index febbeee7f2f5..968bcd2cb022 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S | |||
@@ -70,7 +70,7 @@ SECTIONS | |||
70 | .exit.text : { EXIT_TEXT; } | 70 | .exit.text : { EXIT_TEXT; } |
71 | .exit.data : { EXIT_DATA; } | 71 | .exit.data : { EXIT_DATA; } |
72 | 72 | ||
73 | PERCPU(PAGE_SIZE) | 73 | PERCPU(32, PAGE_SIZE) |
74 | . = ALIGN(PAGE_SIZE); | 74 | . = ALIGN(PAGE_SIZE); |
75 | __init_end = .; | 75 | __init_end = .; |
76 | /* freed after init ends here */ | 76 | /* freed after init ends here */ |
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c index a8933a60b2d4..a6b63dde603d 100644 --- a/arch/mn10300/mm/cache-inv-icache.c +++ b/arch/mn10300/mm/cache-inv-icache.c | |||
@@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end) | |||
69 | 69 | ||
70 | /* invalidate the icache coverage on that region */ | 70 | /* invalidate the icache coverage on that region */ |
71 | mn10300_local_icache_inv_range2(addr + off, size); | 71 | mn10300_local_icache_inv_range2(addr + off, size); |
72 | smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); | 72 | smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); |
73 | } | 73 | } |
74 | 74 | ||
75 | /** | 75 | /** |
@@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
101 | * directly */ | 101 | * directly */ |
102 | start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; | 102 | start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; |
103 | mn10300_icache_inv_range(start_page, end); | 103 | mn10300_icache_inv_range(start_page, end); |
104 | smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); | 104 | smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); |
105 | if (start_page == start) | 105 | if (start_page == start) |
106 | goto done; | 106 | goto done; |
107 | end = start_page; | 107 | end = start_page; |
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index 30394081d9b6..6ab9580b0b00 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c | |||
@@ -185,26 +185,21 @@ struct hpux_statfs { | |||
185 | int16_t f_pad; | 185 | int16_t f_pad; |
186 | }; | 186 | }; |
187 | 187 | ||
188 | static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf) | 188 | static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p) |
189 | { | 189 | { |
190 | struct kstatfs st; | 190 | struct hpux_statfs buf; |
191 | int retval; | 191 | memset(&buf, 0, sizeof(buf)); |
192 | 192 | buf.f_type = st->f_type; | |
193 | retval = vfs_statfs(path, &st); | 193 | buf.f_bsize = st->f_bsize; |
194 | if (retval) | 194 | buf.f_blocks = st->f_blocks; |
195 | return retval; | 195 | buf.f_bfree = st->f_bfree; |
196 | 196 | buf.f_bavail = st->f_bavail; | |
197 | memset(buf, 0, sizeof(*buf)); | 197 | buf.f_files = st->f_files; |
198 | buf->f_type = st.f_type; | 198 | buf.f_ffree = st->f_ffree; |
199 | buf->f_bsize = st.f_bsize; | 199 | buf.f_fsid[0] = st->f_fsid.val[0]; |
200 | buf->f_blocks = st.f_blocks; | 200 | buf.f_fsid[1] = st->f_fsid.val[1]; |
201 | buf->f_bfree = st.f_bfree; | 201 | if (copy_to_user(p, &buf, sizeof(buf))) |
202 | buf->f_bavail = st.f_bavail; | 202 | return -EFAULT; |
203 | buf->f_files = st.f_files; | ||
204 | buf->f_ffree = st.f_ffree; | ||
205 | buf->f_fsid[0] = st.f_fsid.val[0]; | ||
206 | buf->f_fsid[1] = st.f_fsid.val[1]; | ||
207 | |||
208 | return 0; | 203 | return 0; |
209 | } | 204 | } |
210 | 205 | ||
@@ -212,35 +207,19 @@ static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf) | |||
212 | asmlinkage long hpux_statfs(const char __user *pathname, | 207 | asmlinkage long hpux_statfs(const char __user *pathname, |
213 | struct hpux_statfs __user *buf) | 208 | struct hpux_statfs __user *buf) |
214 | { | 209 | { |
215 | struct path path; | 210 | struct kstatfs st; |
216 | int error; | 211 | int error = user_statfs(pathname, &st); |
217 | 212 | if (!error) | |
218 | error = user_path(pathname, &path); | 213 | error = do_statfs_hpux(&st, buf); |
219 | if (!error) { | ||
220 | struct hpux_statfs tmp; | ||
221 | error = do_statfs_hpux(&path, &tmp); | ||
222 | if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) | ||
223 | error = -EFAULT; | ||
224 | path_put(&path); | ||
225 | } | ||
226 | return error; | 214 | return error; |
227 | } | 215 | } |
228 | 216 | ||
229 | asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf) | 217 | asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf) |
230 | { | 218 | { |
231 | struct file *file; | 219 | struct kstatfs st; |
232 | struct hpux_statfs tmp; | 220 | int error = fd_statfs(fd, &st); |
233 | int error; | 221 | if (!error) |
234 | 222 | error = do_statfs_hpux(&st, buf); | |
235 | error = -EBADF; | ||
236 | file = fget(fd); | ||
237 | if (!file) | ||
238 | goto out; | ||
239 | error = do_statfs_hpux(&file->f_path, &tmp); | ||
240 | if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) | ||
241 | error = -EFAULT; | ||
242 | fput(file); | ||
243 | out: | ||
244 | return error; | 223 | return error; |
245 | } | 224 | } |
246 | 225 | ||
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h index f357fc693c89..0304b92ccfea 100644 --- a/arch/parisc/include/asm/fcntl.h +++ b/arch/parisc/include/asm/fcntl.h | |||
@@ -19,6 +19,8 @@ | |||
19 | #define O_NOFOLLOW 000000200 /* don't follow links */ | 19 | #define O_NOFOLLOW 000000200 /* don't follow links */ |
20 | #define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ | 20 | #define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ |
21 | 21 | ||
22 | #define O_PATH 020000000 | ||
23 | |||
22 | #define F_GETLK64 8 | 24 | #define F_GETLK64 8 |
23 | #define F_SETLK64 9 | 25 | #define F_SETLK64 9 |
24 | #define F_SETLKW64 10 | 26 | #define F_SETLKW64 10 |
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 0c705c3a55ef..67a33cc27ef2 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <asm/errno.h> | 8 | #include <asm/errno.h> |
9 | 9 | ||
10 | static inline int | 10 | static inline int |
11 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 11 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
12 | { | 12 | { |
13 | int op = (encoded_op >> 28) & 7; | 13 | int op = (encoded_op >> 28) & 7; |
14 | int cmp = (encoded_op >> 24) & 15; | 14 | int cmp = (encoded_op >> 24) & 15; |
@@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
19 | oparg = 1 << oparg; | 19 | oparg = 1 << oparg; |
20 | 20 | ||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
22 | return -EFAULT; | 22 | return -EFAULT; |
23 | 23 | ||
24 | pagefault_disable(); | 24 | pagefault_disable(); |
@@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
51 | 51 | ||
52 | /* Non-atomic version */ | 52 | /* Non-atomic version */ |
53 | static inline int | 53 | static inline int |
54 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 54 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
55 | u32 oldval, u32 newval) | ||
55 | { | 56 | { |
56 | int err = 0; | 57 | u32 val; |
57 | int uval; | ||
58 | 58 | ||
59 | /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is | 59 | /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is |
60 | * our gateway page, and causes no end of trouble... | 60 | * our gateway page, and causes no end of trouble... |
@@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
62 | if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) | 62 | if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) |
63 | return -EFAULT; | 63 | return -EFAULT; |
64 | 64 | ||
65 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 65 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
66 | return -EFAULT; | 66 | return -EFAULT; |
67 | 67 | ||
68 | err = get_user(uval, uaddr); | 68 | if (get_user(val, uaddr)) |
69 | if (err) return -EFAULT; | 69 | return -EFAULT; |
70 | if (uval == oldval) | 70 | if (val == oldval && put_user(newval, uaddr)) |
71 | err = put_user(newval, uaddr); | 71 | return -EFAULT; |
72 | if (err) return -EFAULT; | 72 | *uval = val; |
73 | return uval; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | #endif /*__KERNEL__*/ | 76 | #endif /*__KERNEL__*/ |
diff --git a/arch/parisc/include/asm/ioctls.h b/arch/parisc/include/asm/ioctls.h index 6ba80d03623a..054ec06f9e23 100644 --- a/arch/parisc/include/asm/ioctls.h +++ b/arch/parisc/include/asm/ioctls.h | |||
@@ -54,6 +54,7 @@ | |||
54 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 54 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
55 | #define TIOCGDEV _IOR('T',0x32, int) /* Get primary device node of /dev/console */ | 55 | #define TIOCGDEV _IOR('T',0x32, int) /* Get primary device node of /dev/console */ |
56 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 56 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
57 | #define TIOCVHANGUP 0x5437 | ||
57 | 58 | ||
58 | #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ | 59 | #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ |
59 | #define FIOCLEX 0x5451 | 60 | #define FIOCLEX 0x5451 |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 05511ccb61d2..45b7389d77aa 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -162,11 +162,8 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
162 | update_process_times(user_mode(get_irq_regs())); | 162 | update_process_times(user_mode(get_irq_regs())); |
163 | } | 163 | } |
164 | 164 | ||
165 | if (cpu == 0) { | 165 | if (cpu == 0) |
166 | write_seqlock(&xtime_lock); | 166 | xtime_update(ticks_elapsed); |
167 | do_timer(ticks_elapsed); | ||
168 | write_sequnlock(&xtime_lock); | ||
169 | } | ||
170 | 167 | ||
171 | return IRQ_HANDLED; | 168 | return IRQ_HANDLED; |
172 | } | 169 | } |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index d64a6bbec2aa..8f1e4efd143e 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
@@ -145,7 +145,7 @@ SECTIONS | |||
145 | EXIT_DATA | 145 | EXIT_DATA |
146 | } | 146 | } |
147 | 147 | ||
148 | PERCPU(PAGE_SIZE) | 148 | PERCPU(L1_CACHE_BYTES, PAGE_SIZE) |
149 | . = ALIGN(PAGE_SIZE); | 149 | . = ALIGN(PAGE_SIZE); |
150 | __init_end = .; | 150 | __init_end = .; |
151 | /* freed after init ends here */ | 151 | /* freed after init ends here */ |
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 7c589ef81fb0..c94e4a3fe2ef 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h | |||
@@ -30,7 +30,7 @@ | |||
30 | : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ | 30 | : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ |
31 | : "cr0", "memory") | 31 | : "cr0", "memory") |
32 | 32 | ||
33 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 33 | static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
34 | { | 34 | { |
35 | int op = (encoded_op >> 28) & 7; | 35 | int op = (encoded_op >> 28) & 7; |
36 | int cmp = (encoded_op >> 24) & 15; | 36 | int cmp = (encoded_op >> 24) & 15; |
@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
40 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 40 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
41 | oparg = 1 << oparg; | 41 | oparg = 1 << oparg; |
42 | 42 | ||
43 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 43 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
44 | return -EFAULT; | 44 | return -EFAULT; |
45 | 45 | ||
46 | pagefault_disable(); | 46 | pagefault_disable(); |
@@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | static inline int | 84 | static inline int |
85 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 85 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
86 | u32 oldval, u32 newval) | ||
86 | { | 87 | { |
87 | int prev; | 88 | int ret = 0; |
89 | u32 prev; | ||
88 | 90 | ||
89 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 91 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
90 | return -EFAULT; | 92 | return -EFAULT; |
91 | 93 | ||
92 | __asm__ __volatile__ ( | 94 | __asm__ __volatile__ ( |
93 | PPC_RELEASE_BARRIER | 95 | PPC_RELEASE_BARRIER |
94 | "1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ | 96 | "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ |
95 | cmpw 0,%0,%3\n\ | 97 | cmpw 0,%1,%4\n\ |
96 | bne- 3f\n" | 98 | bne- 3f\n" |
97 | PPC405_ERR77(0,%2) | 99 | PPC405_ERR77(0,%3) |
98 | "2: stwcx. %4,0,%2\n\ | 100 | "2: stwcx. %5,0,%3\n\ |
99 | bne- 1b\n" | 101 | bne- 1b\n" |
100 | PPC_ACQUIRE_BARRIER | 102 | PPC_ACQUIRE_BARRIER |
101 | "3: .section .fixup,\"ax\"\n\ | 103 | "3: .section .fixup,\"ax\"\n\ |
102 | 4: li %0,%5\n\ | 104 | 4: li %0,%6\n\ |
103 | b 3b\n\ | 105 | b 3b\n\ |
104 | .previous\n\ | 106 | .previous\n\ |
105 | .section __ex_table,\"a\"\n\ | 107 | .section __ex_table,\"a\"\n\ |
106 | .align 3\n\ | 108 | .align 3\n\ |
107 | " PPC_LONG "1b,4b,2b,4b\n\ | 109 | " PPC_LONG "1b,4b,2b,4b\n\ |
108 | .previous" \ | 110 | .previous" \ |
109 | : "=&r" (prev), "+m" (*uaddr) | 111 | : "+r" (ret), "=&r" (prev), "+m" (*uaddr) |
110 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) | 112 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) |
111 | : "cc", "memory"); | 113 | : "cc", "memory"); |
112 | 114 | ||
113 | return prev; | 115 | *uval = prev; |
116 | return ret; | ||
114 | } | 117 | } |
115 | 118 | ||
116 | #endif /* __KERNEL__ */ | 119 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h index c7dc17cf84f1..e9b78870aaab 100644 --- a/arch/powerpc/include/asm/ioctls.h +++ b/arch/powerpc/include/asm/ioctls.h | |||
@@ -96,6 +96,7 @@ | |||
96 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 96 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
97 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | 97 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ |
98 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 98 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
99 | #define TIOCVHANGUP 0x5437 | ||
99 | 100 | ||
100 | #define TIOCSERCONFIG 0x5453 | 101 | #define TIOCSERCONFIG 0x5453 |
101 | #define TIOCSERGWILD 0x5454 | 102 | #define TIOCSERGWILD 0x5454 |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 380d48bacd16..26b8c807f8f1 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -33,9 +33,25 @@ | |||
33 | // | 33 | // |
34 | //---------------------------------------------------------------------------- | 34 | //---------------------------------------------------------------------------- |
35 | #include <linux/cache.h> | 35 | #include <linux/cache.h> |
36 | #include <linux/threads.h> | ||
36 | #include <asm/types.h> | 37 | #include <asm/types.h> |
37 | #include <asm/mmu.h> | 38 | #include <asm/mmu.h> |
38 | 39 | ||
40 | /* | ||
41 | * We only have to have statically allocated lppaca structs on | ||
42 | * legacy iSeries, which supports at most 64 cpus. | ||
43 | */ | ||
44 | #ifdef CONFIG_PPC_ISERIES | ||
45 | #if NR_CPUS < 64 | ||
46 | #define NR_LPPACAS NR_CPUS | ||
47 | #else | ||
48 | #define NR_LPPACAS 64 | ||
49 | #endif | ||
50 | #else /* not iSeries */ | ||
51 | #define NR_LPPACAS 1 | ||
52 | #endif | ||
53 | |||
54 | |||
39 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k | 55 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
40 | * alignment is sufficient to prevent this */ | 56 | * alignment is sufficient to prevent this */ |
41 | struct lppaca { | 57 | struct lppaca { |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 51e9e6f90d12..5e156e034fe2 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -164,13 +164,23 @@ extern void setup_indirect_pci(struct pci_controller* hose, | |||
164 | resource_size_t cfg_addr, | 164 | resource_size_t cfg_addr, |
165 | resource_size_t cfg_data, u32 flags); | 165 | resource_size_t cfg_data, u32 flags); |
166 | 166 | ||
167 | #ifndef CONFIG_PPC64 | ||
168 | |||
169 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) | 167 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) |
170 | { | 168 | { |
171 | return bus->sysdata; | 169 | return bus->sysdata; |
172 | } | 170 | } |
173 | 171 | ||
172 | #ifndef CONFIG_PPC64 | ||
173 | |||
174 | static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | ||
175 | { | ||
176 | struct pci_controller *host; | ||
177 | |||
178 | if (bus->self) | ||
179 | return pci_device_to_OF_node(bus->self); | ||
180 | host = pci_bus_to_host(bus); | ||
181 | return host ? host->dn : NULL; | ||
182 | } | ||
183 | |||
174 | static inline int isa_vaddr_is_ioport(void __iomem *address) | 184 | static inline int isa_vaddr_is_ioport(void __iomem *address) |
175 | { | 185 | { |
176 | /* No specific ISA handling on ppc32 at this stage, it | 186 | /* No specific ISA handling on ppc32 at this stage, it |
@@ -218,19 +228,10 @@ extern void * update_dn_pci_info(struct device_node *dn, void *data); | |||
218 | 228 | ||
219 | /* Get a device_node from a pci_dev. This code must be fast except | 229 | /* Get a device_node from a pci_dev. This code must be fast except |
220 | * in the case where the sysdata is incorrect and needs to be fixed | 230 | * in the case where the sysdata is incorrect and needs to be fixed |
221 | * up (this will only happen once). | 231 | * up (this will only happen once). */ |
222 | * In this case the sysdata will have been inherited from a PCI host | ||
223 | * bridge or a PCI-PCI bridge further up the tree, so it will point | ||
224 | * to a valid struct pci_dn, just not the one we want. | ||
225 | */ | ||
226 | static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) | 232 | static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) |
227 | { | 233 | { |
228 | struct device_node *dn = dev->sysdata; | 234 | return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev); |
229 | struct pci_dn *pdn = dn->data; | ||
230 | |||
231 | if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number) | ||
232 | return dn; /* fast path. sysdata is good */ | ||
233 | return fetch_dev_dn(dev); | ||
234 | } | 235 | } |
235 | 236 | ||
236 | static inline int pci_device_from_OF_node(struct device_node *np, | 237 | static inline int pci_device_from_OF_node(struct device_node *np, |
@@ -248,7 +249,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | |||
248 | if (bus->self) | 249 | if (bus->self) |
249 | return pci_device_to_OF_node(bus->self); | 250 | return pci_device_to_OF_node(bus->self); |
250 | else | 251 | else |
251 | return bus->sysdata; /* Must be root bus (PHB) */ | 252 | return bus->dev.of_node; /* Must be root bus (PHB) */ |
252 | } | 253 | } |
253 | 254 | ||
254 | /** Find the bus corresponding to the indicated device node */ | 255 | /** Find the bus corresponding to the indicated device node */ |
@@ -260,14 +261,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus); | |||
260 | /** Discover new pci devices under this bus, and add them */ | 261 | /** Discover new pci devices under this bus, and add them */ |
261 | extern void pcibios_add_pci_devices(struct pci_bus *bus); | 262 | extern void pcibios_add_pci_devices(struct pci_bus *bus); |
262 | 263 | ||
263 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) | ||
264 | { | ||
265 | struct device_node *busdn = bus->sysdata; | ||
266 | |||
267 | BUG_ON(busdn == NULL); | ||
268 | return PCI_DN(busdn)->phb; | ||
269 | } | ||
270 | |||
271 | 264 | ||
272 | extern void isa_bridge_find_early(struct pci_controller *hose); | 265 | extern void isa_bridge_find_early(struct pci_controller *hose); |
273 | 266 | ||
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a20a9ad2258b..7d7790954e02 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -201,7 +201,7 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
201 | extern void pcibios_setup_bus_devices(struct pci_bus *bus); | 201 | extern void pcibios_setup_bus_devices(struct pci_bus *bus); |
202 | extern void pcibios_setup_bus_self(struct pci_bus *bus); | 202 | extern void pcibios_setup_bus_self(struct pci_bus *bus); |
203 | extern void pcibios_setup_phb_io_space(struct pci_controller *hose); | 203 | extern void pcibios_setup_phb_io_space(struct pci_controller *hose); |
204 | extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata); | 204 | extern void pcibios_scan_phb(struct pci_controller *hose); |
205 | 205 | ||
206 | #endif /* __KERNEL__ */ | 206 | #endif /* __KERNEL__ */ |
207 | #endif /* __ASM_POWERPC_PCI_H */ | 207 | #endif /* __ASM_POWERPC_PCI_H */ |
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index d72757585595..c189aa5fe1f4 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
@@ -70,21 +70,6 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; } | |||
70 | #endif | 70 | #endif |
71 | #define of_node_to_nid of_node_to_nid | 71 | #define of_node_to_nid of_node_to_nid |
72 | 72 | ||
73 | /** | ||
74 | * of_irq_map_pci - Resolve the interrupt for a PCI device | ||
75 | * @pdev: the device whose interrupt is to be resolved | ||
76 | * @out_irq: structure of_irq filled by this function | ||
77 | * | ||
78 | * This function resolves the PCI interrupt for a given PCI device. If a | ||
79 | * device-node exists for a given pci_dev, it will use normal OF tree | ||
80 | * walking. If not, it will implement standard swizzling and walk up the | ||
81 | * PCI tree until an device-node is found, at which point it will finish | ||
82 | * resolving using the OF tree walking. | ||
83 | */ | ||
84 | struct pci_dev; | ||
85 | struct of_irq; | ||
86 | extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); | ||
87 | |||
88 | extern void of_instantiate_rtc(void); | 73 | extern void of_instantiate_rtc(void); |
89 | 74 | ||
90 | /* These includes are put at the bottom because they may contain things | 75 | /* These includes are put at the bottom because they may contain things |
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 8447d89fbe72..bb1e2cdeb9bf 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h | |||
@@ -13,11 +13,6 @@ | |||
13 | * by Paul Mackerras <paulus@samba.org>. | 13 | * by Paul Mackerras <paulus@samba.org>. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <asm/atomic.h> | ||
19 | #include <asm/system.h> | ||
20 | |||
21 | /* | 16 | /* |
22 | * the semaphore definition | 17 | * the semaphore definition |
23 | */ | 18 | */ |
@@ -33,47 +28,6 @@ | |||
33 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 28 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
34 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 29 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
35 | 30 | ||
36 | struct rw_semaphore { | ||
37 | long count; | ||
38 | spinlock_t wait_lock; | ||
39 | struct list_head wait_list; | ||
40 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
41 | struct lockdep_map dep_map; | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
46 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
47 | #else | ||
48 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
49 | #endif | ||
50 | |||
51 | #define __RWSEM_INITIALIZER(name) \ | ||
52 | { \ | ||
53 | RWSEM_UNLOCKED_VALUE, \ | ||
54 | __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
55 | LIST_HEAD_INIT((name).wait_list) \ | ||
56 | __RWSEM_DEP_MAP_INIT(name) \ | ||
57 | } | ||
58 | |||
59 | #define DECLARE_RWSEM(name) \ | ||
60 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
61 | |||
62 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
63 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
64 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
65 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
66 | |||
67 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
68 | struct lock_class_key *key); | ||
69 | |||
70 | #define init_rwsem(sem) \ | ||
71 | do { \ | ||
72 | static struct lock_class_key __key; \ | ||
73 | \ | ||
74 | __init_rwsem((sem), #sem, &__key); \ | ||
75 | } while (0) | ||
76 | |||
77 | /* | 31 | /* |
78 | * lock for reading | 32 | * lock for reading |
79 | */ | 33 | */ |
@@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
174 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); | 128 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); |
175 | } | 129 | } |
176 | 130 | ||
177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
178 | { | ||
179 | return sem->count != 0; | ||
180 | } | ||
181 | |||
182 | #endif /* __KERNEL__ */ | 131 | #endif /* __KERNEL__ */ |
183 | #endif /* _ASM_POWERPC_RWSEM_H */ | 132 | #endif /* _ASM_POWERPC_RWSEM_H */ |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index f62efdfd1769..c00d4ca1ee15 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -201,13 +201,14 @@ int ibmebus_register_driver(struct of_platform_driver *drv) | |||
201 | /* If the driver uses devices that ibmebus doesn't know, add them */ | 201 | /* If the driver uses devices that ibmebus doesn't know, add them */ |
202 | ibmebus_create_devices(drv->driver.of_match_table); | 202 | ibmebus_create_devices(drv->driver.of_match_table); |
203 | 203 | ||
204 | return of_register_driver(drv, &ibmebus_bus_type); | 204 | drv->driver.bus = &ibmebus_bus_type; |
205 | return driver_register(&drv->driver); | ||
205 | } | 206 | } |
206 | EXPORT_SYMBOL(ibmebus_register_driver); | 207 | EXPORT_SYMBOL(ibmebus_register_driver); |
207 | 208 | ||
208 | void ibmebus_unregister_driver(struct of_platform_driver *drv) | 209 | void ibmebus_unregister_driver(struct of_platform_driver *drv) |
209 | { | 210 | { |
210 | of_unregister_driver(drv); | 211 | driver_unregister(&drv->driver); |
211 | } | 212 | } |
212 | EXPORT_SYMBOL(ibmebus_unregister_driver); | 213 | EXPORT_SYMBOL(ibmebus_unregister_driver); |
213 | 214 | ||
@@ -308,15 +309,410 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, | |||
308 | } | 309 | } |
309 | } | 310 | } |
310 | 311 | ||
312 | |||
311 | static struct bus_attribute ibmebus_bus_attrs[] = { | 313 | static struct bus_attribute ibmebus_bus_attrs[] = { |
312 | __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), | 314 | __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), |
313 | __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), | 315 | __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), |
314 | __ATTR_NULL | 316 | __ATTR_NULL |
315 | }; | 317 | }; |
316 | 318 | ||
319 | static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) | ||
320 | { | ||
321 | const struct of_device_id *matches = drv->of_match_table; | ||
322 | |||
323 | if (!matches) | ||
324 | return 0; | ||
325 | |||
326 | return of_match_device(matches, dev) != NULL; | ||
327 | } | ||
328 | |||
329 | static int ibmebus_bus_device_probe(struct device *dev) | ||
330 | { | ||
331 | int error = -ENODEV; | ||
332 | struct of_platform_driver *drv; | ||
333 | struct platform_device *of_dev; | ||
334 | const struct of_device_id *match; | ||
335 | |||
336 | drv = to_of_platform_driver(dev->driver); | ||
337 | of_dev = to_platform_device(dev); | ||
338 | |||
339 | if (!drv->probe) | ||
340 | return error; | ||
341 | |||
342 | of_dev_get(of_dev); | ||
343 | |||
344 | match = of_match_device(drv->driver.of_match_table, dev); | ||
345 | if (match) | ||
346 | error = drv->probe(of_dev, match); | ||
347 | if (error) | ||
348 | of_dev_put(of_dev); | ||
349 | |||
350 | return error; | ||
351 | } | ||
352 | |||
353 | static int ibmebus_bus_device_remove(struct device *dev) | ||
354 | { | ||
355 | struct platform_device *of_dev = to_platform_device(dev); | ||
356 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
357 | |||
358 | if (dev->driver && drv->remove) | ||
359 | drv->remove(of_dev); | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void ibmebus_bus_device_shutdown(struct device *dev) | ||
364 | { | ||
365 | struct platform_device *of_dev = to_platform_device(dev); | ||
366 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
367 | |||
368 | if (dev->driver && drv->shutdown) | ||
369 | drv->shutdown(of_dev); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * ibmebus_bus_device_attrs | ||
374 | */ | ||
375 | static ssize_t devspec_show(struct device *dev, | ||
376 | struct device_attribute *attr, char *buf) | ||
377 | { | ||
378 | struct platform_device *ofdev; | ||
379 | |||
380 | ofdev = to_platform_device(dev); | ||
381 | return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); | ||
382 | } | ||
383 | |||
384 | static ssize_t name_show(struct device *dev, | ||
385 | struct device_attribute *attr, char *buf) | ||
386 | { | ||
387 | struct platform_device *ofdev; | ||
388 | |||
389 | ofdev = to_platform_device(dev); | ||
390 | return sprintf(buf, "%s\n", ofdev->dev.of_node->name); | ||
391 | } | ||
392 | |||
393 | static ssize_t modalias_show(struct device *dev, | ||
394 | struct device_attribute *attr, char *buf) | ||
395 | { | ||
396 | ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); | ||
397 | buf[len] = '\n'; | ||
398 | buf[len+1] = 0; | ||
399 | return len+1; | ||
400 | } | ||
401 | |||
402 | struct device_attribute ibmebus_bus_device_attrs[] = { | ||
403 | __ATTR_RO(devspec), | ||
404 | __ATTR_RO(name), | ||
405 | __ATTR_RO(modalias), | ||
406 | __ATTR_NULL | ||
407 | }; | ||
408 | |||
409 | #ifdef CONFIG_PM_SLEEP | ||
410 | static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) | ||
411 | { | ||
412 | struct platform_device *of_dev = to_platform_device(dev); | ||
413 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
414 | int ret = 0; | ||
415 | |||
416 | if (dev->driver && drv->suspend) | ||
417 | ret = drv->suspend(of_dev, mesg); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static int ibmebus_bus_legacy_resume(struct device *dev) | ||
422 | { | ||
423 | struct platform_device *of_dev = to_platform_device(dev); | ||
424 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
425 | int ret = 0; | ||
426 | |||
427 | if (dev->driver && drv->resume) | ||
428 | ret = drv->resume(of_dev); | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | static int ibmebus_bus_pm_prepare(struct device *dev) | ||
433 | { | ||
434 | struct device_driver *drv = dev->driver; | ||
435 | int ret = 0; | ||
436 | |||
437 | if (drv && drv->pm && drv->pm->prepare) | ||
438 | ret = drv->pm->prepare(dev); | ||
439 | |||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | static void ibmebus_bus_pm_complete(struct device *dev) | ||
444 | { | ||
445 | struct device_driver *drv = dev->driver; | ||
446 | |||
447 | if (drv && drv->pm && drv->pm->complete) | ||
448 | drv->pm->complete(dev); | ||
449 | } | ||
450 | |||
451 | #ifdef CONFIG_SUSPEND | ||
452 | |||
453 | static int ibmebus_bus_pm_suspend(struct device *dev) | ||
454 | { | ||
455 | struct device_driver *drv = dev->driver; | ||
456 | int ret = 0; | ||
457 | |||
458 | if (!drv) | ||
459 | return 0; | ||
460 | |||
461 | if (drv->pm) { | ||
462 | if (drv->pm->suspend) | ||
463 | ret = drv->pm->suspend(dev); | ||
464 | } else { | ||
465 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); | ||
466 | } | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static int ibmebus_bus_pm_suspend_noirq(struct device *dev) | ||
472 | { | ||
473 | struct device_driver *drv = dev->driver; | ||
474 | int ret = 0; | ||
475 | |||
476 | if (!drv) | ||
477 | return 0; | ||
478 | |||
479 | if (drv->pm) { | ||
480 | if (drv->pm->suspend_noirq) | ||
481 | ret = drv->pm->suspend_noirq(dev); | ||
482 | } | ||
483 | |||
484 | return ret; | ||
485 | } | ||
486 | |||
487 | static int ibmebus_bus_pm_resume(struct device *dev) | ||
488 | { | ||
489 | struct device_driver *drv = dev->driver; | ||
490 | int ret = 0; | ||
491 | |||
492 | if (!drv) | ||
493 | return 0; | ||
494 | |||
495 | if (drv->pm) { | ||
496 | if (drv->pm->resume) | ||
497 | ret = drv->pm->resume(dev); | ||
498 | } else { | ||
499 | ret = ibmebus_bus_legacy_resume(dev); | ||
500 | } | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static int ibmebus_bus_pm_resume_noirq(struct device *dev) | ||
506 | { | ||
507 | struct device_driver *drv = dev->driver; | ||
508 | int ret = 0; | ||
509 | |||
510 | if (!drv) | ||
511 | return 0; | ||
512 | |||
513 | if (drv->pm) { | ||
514 | if (drv->pm->resume_noirq) | ||
515 | ret = drv->pm->resume_noirq(dev); | ||
516 | } | ||
517 | |||
518 | return ret; | ||
519 | } | ||
520 | |||
521 | #else /* !CONFIG_SUSPEND */ | ||
522 | |||
523 | #define ibmebus_bus_pm_suspend NULL | ||
524 | #define ibmebus_bus_pm_resume NULL | ||
525 | #define ibmebus_bus_pm_suspend_noirq NULL | ||
526 | #define ibmebus_bus_pm_resume_noirq NULL | ||
527 | |||
528 | #endif /* !CONFIG_SUSPEND */ | ||
529 | |||
530 | #ifdef CONFIG_HIBERNATION | ||
531 | |||
532 | static int ibmebus_bus_pm_freeze(struct device *dev) | ||
533 | { | ||
534 | struct device_driver *drv = dev->driver; | ||
535 | int ret = 0; | ||
536 | |||
537 | if (!drv) | ||
538 | return 0; | ||
539 | |||
540 | if (drv->pm) { | ||
541 | if (drv->pm->freeze) | ||
542 | ret = drv->pm->freeze(dev); | ||
543 | } else { | ||
544 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); | ||
545 | } | ||
546 | |||
547 | return ret; | ||
548 | } | ||
549 | |||
550 | static int ibmebus_bus_pm_freeze_noirq(struct device *dev) | ||
551 | { | ||
552 | struct device_driver *drv = dev->driver; | ||
553 | int ret = 0; | ||
554 | |||
555 | if (!drv) | ||
556 | return 0; | ||
557 | |||
558 | if (drv->pm) { | ||
559 | if (drv->pm->freeze_noirq) | ||
560 | ret = drv->pm->freeze_noirq(dev); | ||
561 | } | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int ibmebus_bus_pm_thaw(struct device *dev) | ||
567 | { | ||
568 | struct device_driver *drv = dev->driver; | ||
569 | int ret = 0; | ||
570 | |||
571 | if (!drv) | ||
572 | return 0; | ||
573 | |||
574 | if (drv->pm) { | ||
575 | if (drv->pm->thaw) | ||
576 | ret = drv->pm->thaw(dev); | ||
577 | } else { | ||
578 | ret = ibmebus_bus_legacy_resume(dev); | ||
579 | } | ||
580 | |||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | static int ibmebus_bus_pm_thaw_noirq(struct device *dev) | ||
585 | { | ||
586 | struct device_driver *drv = dev->driver; | ||
587 | int ret = 0; | ||
588 | |||
589 | if (!drv) | ||
590 | return 0; | ||
591 | |||
592 | if (drv->pm) { | ||
593 | if (drv->pm->thaw_noirq) | ||
594 | ret = drv->pm->thaw_noirq(dev); | ||
595 | } | ||
596 | |||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | static int ibmebus_bus_pm_poweroff(struct device *dev) | ||
601 | { | ||
602 | struct device_driver *drv = dev->driver; | ||
603 | int ret = 0; | ||
604 | |||
605 | if (!drv) | ||
606 | return 0; | ||
607 | |||
608 | if (drv->pm) { | ||
609 | if (drv->pm->poweroff) | ||
610 | ret = drv->pm->poweroff(dev); | ||
611 | } else { | ||
612 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); | ||
613 | } | ||
614 | |||
615 | return ret; | ||
616 | } | ||
617 | |||
618 | static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) | ||
619 | { | ||
620 | struct device_driver *drv = dev->driver; | ||
621 | int ret = 0; | ||
622 | |||
623 | if (!drv) | ||
624 | return 0; | ||
625 | |||
626 | if (drv->pm) { | ||
627 | if (drv->pm->poweroff_noirq) | ||
628 | ret = drv->pm->poweroff_noirq(dev); | ||
629 | } | ||
630 | |||
631 | return ret; | ||
632 | } | ||
633 | |||
634 | static int ibmebus_bus_pm_restore(struct device *dev) | ||
635 | { | ||
636 | struct device_driver *drv = dev->driver; | ||
637 | int ret = 0; | ||
638 | |||
639 | if (!drv) | ||
640 | return 0; | ||
641 | |||
642 | if (drv->pm) { | ||
643 | if (drv->pm->restore) | ||
644 | ret = drv->pm->restore(dev); | ||
645 | } else { | ||
646 | ret = ibmebus_bus_legacy_resume(dev); | ||
647 | } | ||
648 | |||
649 | return ret; | ||
650 | } | ||
651 | |||
652 | static int ibmebus_bus_pm_restore_noirq(struct device *dev) | ||
653 | { | ||
654 | struct device_driver *drv = dev->driver; | ||
655 | int ret = 0; | ||
656 | |||
657 | if (!drv) | ||
658 | return 0; | ||
659 | |||
660 | if (drv->pm) { | ||
661 | if (drv->pm->restore_noirq) | ||
662 | ret = drv->pm->restore_noirq(dev); | ||
663 | } | ||
664 | |||
665 | return ret; | ||
666 | } | ||
667 | |||
668 | #else /* !CONFIG_HIBERNATION */ | ||
669 | |||
670 | #define ibmebus_bus_pm_freeze NULL | ||
671 | #define ibmebus_bus_pm_thaw NULL | ||
672 | #define ibmebus_bus_pm_poweroff NULL | ||
673 | #define ibmebus_bus_pm_restore NULL | ||
674 | #define ibmebus_bus_pm_freeze_noirq NULL | ||
675 | #define ibmebus_bus_pm_thaw_noirq NULL | ||
676 | #define ibmebus_bus_pm_poweroff_noirq NULL | ||
677 | #define ibmebus_bus_pm_restore_noirq NULL | ||
678 | |||
679 | #endif /* !CONFIG_HIBERNATION */ | ||
680 | |||
681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { | ||
682 | .prepare = ibmebus_bus_pm_prepare, | ||
683 | .complete = ibmebus_bus_pm_complete, | ||
684 | .suspend = ibmebus_bus_pm_suspend, | ||
685 | .resume = ibmebus_bus_pm_resume, | ||
686 | .freeze = ibmebus_bus_pm_freeze, | ||
687 | .thaw = ibmebus_bus_pm_thaw, | ||
688 | .poweroff = ibmebus_bus_pm_poweroff, | ||
689 | .restore = ibmebus_bus_pm_restore, | ||
690 | .suspend_noirq = ibmebus_bus_pm_suspend_noirq, | ||
691 | .resume_noirq = ibmebus_bus_pm_resume_noirq, | ||
692 | .freeze_noirq = ibmebus_bus_pm_freeze_noirq, | ||
693 | .thaw_noirq = ibmebus_bus_pm_thaw_noirq, | ||
694 | .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, | ||
695 | .restore_noirq = ibmebus_bus_pm_restore_noirq, | ||
696 | }; | ||
697 | |||
698 | #define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) | ||
699 | |||
700 | #else /* !CONFIG_PM_SLEEP */ | ||
701 | |||
702 | #define IBMEBUS_BUS_PM_OPS_PTR NULL | ||
703 | |||
704 | #endif /* !CONFIG_PM_SLEEP */ | ||
705 | |||
317 | struct bus_type ibmebus_bus_type = { | 706 | struct bus_type ibmebus_bus_type = { |
707 | .name = "ibmebus", | ||
318 | .uevent = of_device_uevent, | 708 | .uevent = of_device_uevent, |
319 | .bus_attrs = ibmebus_bus_attrs | 709 | .bus_attrs = ibmebus_bus_attrs, |
710 | .match = ibmebus_bus_bus_match, | ||
711 | .probe = ibmebus_bus_device_probe, | ||
712 | .remove = ibmebus_bus_device_remove, | ||
713 | .shutdown = ibmebus_bus_device_shutdown, | ||
714 | .dev_attrs = ibmebus_bus_device_attrs, | ||
715 | .pm = IBMEBUS_BUS_PM_OPS_PTR, | ||
320 | }; | 716 | }; |
321 | EXPORT_SYMBOL(ibmebus_bus_type); | 717 | EXPORT_SYMBOL(ibmebus_bus_type); |
322 | 718 | ||
@@ -326,7 +722,7 @@ static int __init ibmebus_bus_init(void) | |||
326 | 722 | ||
327 | printk(KERN_INFO "IBM eBus Device Driver\n"); | 723 | printk(KERN_INFO "IBM eBus Device Driver\n"); |
328 | 724 | ||
329 | err = of_bus_type_init(&ibmebus_bus_type, "ibmebus"); | 725 | err = bus_register(&ibmebus_bus_type); |
330 | if (err) { | 726 | if (err) { |
331 | printk(KERN_ERR "%s: failed to register IBM eBus.\n", | 727 | printk(KERN_ERR "%s: failed to register IBM eBus.\n", |
332 | __func__); | 728 | __func__); |
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index b2c363ef38ad..24582181b6ec 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -36,8 +36,7 @@ | |||
36 | * lacking some bits needed here. | 36 | * lacking some bits needed here. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | static int __devinit of_pci_phb_probe(struct platform_device *dev, | 39 | static int __devinit of_pci_phb_probe(struct platform_device *dev) |
40 | const struct of_device_id *match) | ||
41 | { | 40 | { |
42 | struct pci_controller *phb; | 41 | struct pci_controller *phb; |
43 | 42 | ||
@@ -74,7 +73,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev, | |||
74 | #endif /* CONFIG_EEH */ | 73 | #endif /* CONFIG_EEH */ |
75 | 74 | ||
76 | /* Scan the bus */ | 75 | /* Scan the bus */ |
77 | pcibios_scan_phb(phb, dev->dev.of_node); | 76 | pcibios_scan_phb(phb); |
78 | if (phb->bus == NULL) | 77 | if (phb->bus == NULL) |
79 | return -ENXIO; | 78 | return -ENXIO; |
80 | 79 | ||
@@ -104,7 +103,7 @@ static struct of_device_id of_pci_phb_ids[] = { | |||
104 | {} | 103 | {} |
105 | }; | 104 | }; |
106 | 105 | ||
107 | static struct of_platform_driver of_pci_phb_driver = { | 106 | static struct platform_driver of_pci_phb_driver = { |
108 | .probe = of_pci_phb_probe, | 107 | .probe = of_pci_phb_probe, |
109 | .driver = { | 108 | .driver = { |
110 | .name = "of-pci", | 109 | .name = "of-pci", |
@@ -115,7 +114,7 @@ static struct of_platform_driver of_pci_phb_driver = { | |||
115 | 114 | ||
116 | static __init int of_pci_phb_init(void) | 115 | static __init int of_pci_phb_init(void) |
117 | { | 116 | { |
118 | return of_register_platform_driver(&of_pci_phb_driver); | 117 | return platform_driver_register(&of_pci_phb_driver); |
119 | } | 118 | } |
120 | 119 | ||
121 | device_initcall(of_pci_phb_init); | 120 | device_initcall(of_pci_phb_init); |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ebf9846f3c3b..f4adf89d7614 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -27,20 +27,6 @@ extern unsigned long __toc_start; | |||
27 | #ifdef CONFIG_PPC_BOOK3S | 27 | #ifdef CONFIG_PPC_BOOK3S |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * We only have to have statically allocated lppaca structs on | ||
31 | * legacy iSeries, which supports at most 64 cpus. | ||
32 | */ | ||
33 | #ifdef CONFIG_PPC_ISERIES | ||
34 | #if NR_CPUS < 64 | ||
35 | #define NR_LPPACAS NR_CPUS | ||
36 | #else | ||
37 | #define NR_LPPACAS 64 | ||
38 | #endif | ||
39 | #else /* not iSeries */ | ||
40 | #define NR_LPPACAS 1 | ||
41 | #endif | ||
42 | |||
43 | /* | ||
44 | * The structure which the hypervisor knows about - this structure | 30 | * The structure which the hypervisor knows about - this structure |
45 | * should not cross a page boundary. The vpa_init/register_vpa call | 31 | * should not cross a page boundary. The vpa_init/register_vpa call |
46 | * is now known to fail if the lppaca structure crosses a page | 32 | * is now known to fail if the lppaca structure crosses a page |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 10a44e68ef11..3cd85faa8ac6 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/of_pci.h> | ||
25 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
26 | #include <linux/list.h> | 27 | #include <linux/list.h> |
27 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
@@ -1687,13 +1688,8 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, | |||
1687 | /** | 1688 | /** |
1688 | * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus | 1689 | * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus |
1689 | * @hose: Pointer to the PCI host controller instance structure | 1690 | * @hose: Pointer to the PCI host controller instance structure |
1690 | * @sysdata: value to use for sysdata pointer. ppc32 and ppc64 differ here | ||
1691 | * | ||
1692 | * Note: the 'data' pointer is a temporary measure. As 32 and 64 bit | ||
1693 | * pci code gets merged, this parameter should become unnecessary because | ||
1694 | * both will use the same value. | ||
1695 | */ | 1691 | */ |
1696 | void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) | 1692 | void __devinit pcibios_scan_phb(struct pci_controller *hose) |
1697 | { | 1693 | { |
1698 | struct pci_bus *bus; | 1694 | struct pci_bus *bus; |
1699 | struct device_node *node = hose->dn; | 1695 | struct device_node *node = hose->dn; |
@@ -1703,13 +1699,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) | |||
1703 | node ? node->full_name : "<NO NAME>"); | 1699 | node ? node->full_name : "<NO NAME>"); |
1704 | 1700 | ||
1705 | /* Create an empty bus for the toplevel */ | 1701 | /* Create an empty bus for the toplevel */ |
1706 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, | 1702 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); |
1707 | sysdata); | ||
1708 | if (bus == NULL) { | 1703 | if (bus == NULL) { |
1709 | pr_err("Failed to create bus for PCI domain %04x\n", | 1704 | pr_err("Failed to create bus for PCI domain %04x\n", |
1710 | hose->global_number); | 1705 | hose->global_number); |
1711 | return; | 1706 | return; |
1712 | } | 1707 | } |
1708 | bus->dev.of_node = of_node_get(node); | ||
1713 | bus->secondary = hose->first_busno; | 1709 | bus->secondary = hose->first_busno; |
1714 | hose->bus = bus; | 1710 | hose->bus = bus; |
1715 | 1711 | ||
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index e7db5b48004a..bedb370459f2 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c | |||
@@ -381,7 +381,7 @@ static int __init pcibios_init(void) | |||
381 | if (pci_assign_all_buses) | 381 | if (pci_assign_all_buses) |
382 | hose->first_busno = next_busno; | 382 | hose->first_busno = next_busno; |
383 | hose->last_busno = 0xff; | 383 | hose->last_busno = 0xff; |
384 | pcibios_scan_phb(hose, hose); | 384 | pcibios_scan_phb(hose); |
385 | pci_bus_add_devices(hose->bus); | 385 | pci_bus_add_devices(hose->bus); |
386 | if (pci_assign_all_buses || next_busno <= hose->last_busno) | 386 | if (pci_assign_all_buses || next_busno <= hose->last_busno) |
387 | next_busno = hose->last_busno + pcibios_assign_bus_offset; | 387 | next_busno = hose->last_busno + pcibios_assign_bus_offset; |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 851577608a78..fc6452b6be9f 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -64,7 +64,7 @@ static int __init pcibios_init(void) | |||
64 | 64 | ||
65 | /* Scan all of the recorded PCI controllers. */ | 65 | /* Scan all of the recorded PCI controllers. */ |
66 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | 66 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
67 | pcibios_scan_phb(hose, hose->dn); | 67 | pcibios_scan_phb(hose); |
68 | pci_bus_add_devices(hose->bus); | 68 | pci_bus_add_devices(hose->bus); |
69 | } | 69 | } |
70 | 70 | ||
@@ -242,10 +242,10 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, | |||
242 | break; | 242 | break; |
243 | bus = NULL; | 243 | bus = NULL; |
244 | } | 244 | } |
245 | if (bus == NULL || bus->sysdata == NULL) | 245 | if (bus == NULL || bus->dev.of_node == NULL) |
246 | return -ENODEV; | 246 | return -ENODEV; |
247 | 247 | ||
248 | hose_node = (struct device_node *)bus->sysdata; | 248 | hose_node = bus->dev.of_node; |
249 | hose = PCI_DN(hose_node)->phb; | 249 | hose = PCI_DN(hose_node)->phb; |
250 | 250 | ||
251 | switch (which) { | 251 | switch (which) { |
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index d56b35ee7f74..29852688ceaa 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c | |||
@@ -161,7 +161,7 @@ static void *is_devfn_node(struct device_node *dn, void *data) | |||
161 | /* | 161 | /* |
162 | * This is the "slow" path for looking up a device_node from a | 162 | * This is the "slow" path for looking up a device_node from a |
163 | * pci_dev. It will hunt for the device under its parent's | 163 | * pci_dev. It will hunt for the device under its parent's |
164 | * phb and then update sysdata for a future fastpath. | 164 | * phb and then update of_node pointer. |
165 | * | 165 | * |
166 | * It may also do fixups on the actual device since this happens | 166 | * It may also do fixups on the actual device since this happens |
167 | * on the first read/write. | 167 | * on the first read/write. |
@@ -170,16 +170,19 @@ static void *is_devfn_node(struct device_node *dn, void *data) | |||
170 | * In this case it may probe for real hardware ("just in case") | 170 | * In this case it may probe for real hardware ("just in case") |
171 | * and add a device_node to the device tree if necessary. | 171 | * and add a device_node to the device tree if necessary. |
172 | * | 172 | * |
173 | * Is this function necessary anymore now that dev->dev.of_node is | ||
174 | * used to store the node pointer? | ||
175 | * | ||
173 | */ | 176 | */ |
174 | struct device_node *fetch_dev_dn(struct pci_dev *dev) | 177 | struct device_node *fetch_dev_dn(struct pci_dev *dev) |
175 | { | 178 | { |
176 | struct device_node *orig_dn = dev->sysdata; | 179 | struct device_node *orig_dn = dev->dev.of_node; |
177 | struct device_node *dn; | 180 | struct device_node *dn; |
178 | unsigned long searchval = (dev->bus->number << 8) | dev->devfn; | 181 | unsigned long searchval = (dev->bus->number << 8) | dev->devfn; |
179 | 182 | ||
180 | dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); | 183 | dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); |
181 | if (dn) | 184 | if (dn) |
182 | dev->sysdata = dn; | 185 | dev->dev.of_node = dn; |
183 | return dn; | 186 | return dn; |
184 | } | 187 | } |
185 | EXPORT_SYMBOL(fetch_dev_dn); | 188 | EXPORT_SYMBOL(fetch_dev_dn); |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e751506323b4..1e89a72fd030 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -135,7 +135,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
135 | pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); | 135 | pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); |
136 | 136 | ||
137 | dev->bus = bus; | 137 | dev->bus = bus; |
138 | dev->sysdata = node; | 138 | dev->dev.of_node = of_node_get(node); |
139 | dev->dev.parent = bus->bridge; | 139 | dev->dev.parent = bus->bridge; |
140 | dev->dev.bus = &pci_bus_type; | 140 | dev->dev.bus = &pci_bus_type; |
141 | dev->devfn = devfn; | 141 | dev->devfn = devfn; |
@@ -238,7 +238,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, | |||
238 | bus->primary = dev->bus->number; | 238 | bus->primary = dev->bus->number; |
239 | bus->subordinate = busrange[1]; | 239 | bus->subordinate = busrange[1]; |
240 | bus->bridge_ctl = 0; | 240 | bus->bridge_ctl = 0; |
241 | bus->sysdata = node; | 241 | bus->dev.of_node = of_node_get(node); |
242 | 242 | ||
243 | /* parse ranges property */ | 243 | /* parse ranges property */ |
244 | /* PCI #address-cells == 3 and #size-cells == 2 always */ | 244 | /* PCI #address-cells == 3 and #size-cells == 2 always */ |
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index c2b7a07cc3d3..47187cc2cf00 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c | |||
@@ -2,95 +2,11 @@ | |||
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/pci_regs.h> | ||
6 | #include <linux/module.h> | 5 | #include <linux/module.h> |
7 | #include <linux/ioport.h> | 6 | #include <linux/ioport.h> |
8 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
9 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
10 | #include <asm/prom.h> | 9 | #include <asm/prom.h> |
11 | #include <asm/pci-bridge.h> | ||
12 | |||
13 | #ifdef CONFIG_PCI | ||
14 | int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | ||
15 | { | ||
16 | struct device_node *dn, *ppnode; | ||
17 | struct pci_dev *ppdev; | ||
18 | u32 lspec; | ||
19 | u32 laddr[3]; | ||
20 | u8 pin; | ||
21 | int rc; | ||
22 | |||
23 | /* Check if we have a device node, if yes, fallback to standard OF | ||
24 | * parsing | ||
25 | */ | ||
26 | dn = pci_device_to_OF_node(pdev); | ||
27 | if (dn) { | ||
28 | rc = of_irq_map_one(dn, 0, out_irq); | ||
29 | if (!rc) | ||
30 | return rc; | ||
31 | } | ||
32 | |||
33 | /* Ok, we don't, time to have fun. Let's start by building up an | ||
34 | * interrupt spec. we assume #interrupt-cells is 1, which is standard | ||
35 | * for PCI. If you do different, then don't use that routine. | ||
36 | */ | ||
37 | rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); | ||
38 | if (rc != 0) | ||
39 | return rc; | ||
40 | /* No pin, exit */ | ||
41 | if (pin == 0) | ||
42 | return -ENODEV; | ||
43 | |||
44 | /* Now we walk up the PCI tree */ | ||
45 | lspec = pin; | ||
46 | for (;;) { | ||
47 | /* Get the pci_dev of our parent */ | ||
48 | ppdev = pdev->bus->self; | ||
49 | |||
50 | /* Ouch, it's a host bridge... */ | ||
51 | if (ppdev == NULL) { | ||
52 | #ifdef CONFIG_PPC64 | ||
53 | ppnode = pci_bus_to_OF_node(pdev->bus); | ||
54 | #else | ||
55 | struct pci_controller *host; | ||
56 | host = pci_bus_to_host(pdev->bus); | ||
57 | ppnode = host ? host->dn : NULL; | ||
58 | #endif | ||
59 | /* No node for host bridge ? give up */ | ||
60 | if (ppnode == NULL) | ||
61 | return -EINVAL; | ||
62 | } else | ||
63 | /* We found a P2P bridge, check if it has a node */ | ||
64 | ppnode = pci_device_to_OF_node(ppdev); | ||
65 | |||
66 | /* Ok, we have found a parent with a device-node, hand over to | ||
67 | * the OF parsing code. | ||
68 | * We build a unit address from the linux device to be used for | ||
69 | * resolution. Note that we use the linux bus number which may | ||
70 | * not match your firmware bus numbering. | ||
71 | * Fortunately, in most cases, interrupt-map-mask doesn't include | ||
72 | * the bus number as part of the matching. | ||
73 | * You should still be careful about that though if you intend | ||
74 | * to rely on this function (you ship a firmware that doesn't | ||
75 | * create device nodes for all PCI devices). | ||
76 | */ | ||
77 | if (ppnode) | ||
78 | break; | ||
79 | |||
80 | /* We can only get here if we hit a P2P bridge with no node, | ||
81 | * let's do standard swizzling and try again | ||
82 | */ | ||
83 | lspec = pci_swizzle_interrupt_pin(pdev, lspec); | ||
84 | pdev = ppdev; | ||
85 | } | ||
86 | |||
87 | laddr[0] = (pdev->bus->number << 16) | ||
88 | | (pdev->devfn << 8); | ||
89 | laddr[1] = laddr[2] = 0; | ||
90 | return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(of_irq_map_pci); | ||
93 | #endif /* CONFIG_PCI */ | ||
94 | 10 | ||
95 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | 11 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, |
96 | unsigned long *busno, unsigned long *phys, unsigned long *size) | 12 | unsigned long *busno, unsigned long *phys, unsigned long *size) |
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8a0deefac08d..b9150f07d266 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -160,7 +160,7 @@ SECTIONS | |||
160 | INIT_RAM_FS | 160 | INIT_RAM_FS |
161 | } | 161 | } |
162 | 162 | ||
163 | PERCPU(PAGE_SIZE) | 163 | PERCPU(L1_CACHE_BYTES, PAGE_SIZE) |
164 | 164 | ||
165 | . = ALIGN(8); | 165 | . = ALIGN(8); |
166 | .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { | 166 | .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index fd4812329570..0dc95c0aa3be 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1516,7 +1516,8 @@ int start_topology_update(void) | |||
1516 | { | 1516 | { |
1517 | int rc = 0; | 1517 | int rc = 0; |
1518 | 1518 | ||
1519 | if (firmware_has_feature(FW_FEATURE_VPHN) && | 1519 | /* Disabled until races with load balancing are fixed */ |
1520 | if (0 && firmware_has_feature(FW_FEATURE_VPHN) && | ||
1520 | get_lppaca()->shared_proc) { | 1521 | get_lppaca()->shared_proc) { |
1521 | vphn_enabled = 1; | 1522 | vphn_enabled = 1; |
1522 | setup_cpu_associativity_change_counters(); | 1523 | setup_cpu_associativity_change_counters(); |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c index 0dad9a935eb5..1757d1db4b51 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c | |||
@@ -147,8 +147,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
150 | static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev, | 150 | static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) |
151 | const struct of_device_id *match) | ||
152 | { | 151 | { |
153 | struct mpc52xx_gpiochip *chip; | 152 | struct mpc52xx_gpiochip *chip; |
154 | struct mpc52xx_gpio_wkup __iomem *regs; | 153 | struct mpc52xx_gpio_wkup __iomem *regs; |
@@ -191,7 +190,7 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { | |||
191 | {} | 190 | {} |
192 | }; | 191 | }; |
193 | 192 | ||
194 | static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { | 193 | static struct platform_driver mpc52xx_wkup_gpiochip_driver = { |
195 | .driver = { | 194 | .driver = { |
196 | .name = "gpio_wkup", | 195 | .name = "gpio_wkup", |
197 | .owner = THIS_MODULE, | 196 | .owner = THIS_MODULE, |
@@ -310,8 +309,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
310 | return 0; | 309 | return 0; |
311 | } | 310 | } |
312 | 311 | ||
313 | static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev, | 312 | static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) |
314 | const struct of_device_id *match) | ||
315 | { | 313 | { |
316 | struct mpc52xx_gpiochip *chip; | 314 | struct mpc52xx_gpiochip *chip; |
317 | struct gpio_chip *gc; | 315 | struct gpio_chip *gc; |
@@ -349,7 +347,7 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { | |||
349 | {} | 347 | {} |
350 | }; | 348 | }; |
351 | 349 | ||
352 | static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { | 350 | static struct platform_driver mpc52xx_simple_gpiochip_driver = { |
353 | .driver = { | 351 | .driver = { |
354 | .name = "gpio", | 352 | .name = "gpio", |
355 | .owner = THIS_MODULE, | 353 | .owner = THIS_MODULE, |
@@ -361,10 +359,10 @@ static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { | |||
361 | 359 | ||
362 | static int __init mpc52xx_gpio_init(void) | 360 | static int __init mpc52xx_gpio_init(void) |
363 | { | 361 | { |
364 | if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver)) | 362 | if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) |
365 | printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); | 363 | printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); |
366 | 364 | ||
367 | if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver)) | 365 | if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) |
368 | printk(KERN_ERR "Unable to register simple GPIO driver\n"); | 366 | printk(KERN_ERR "Unable to register simple GPIO driver\n"); |
369 | 367 | ||
370 | return 0; | 368 | return 0; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index e0d703c7fdf7..859abf1c6d4b 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -721,8 +721,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, | |||
721 | /* --------------------------------------------------------------------- | 721 | /* --------------------------------------------------------------------- |
722 | * of_platform bus binding code | 722 | * of_platform bus binding code |
723 | */ | 723 | */ |
724 | static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev, | 724 | static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev) |
725 | const struct of_device_id *match) | ||
726 | { | 725 | { |
727 | struct mpc52xx_gpt_priv *gpt; | 726 | struct mpc52xx_gpt_priv *gpt; |
728 | 727 | ||
@@ -781,7 +780,7 @@ static const struct of_device_id mpc52xx_gpt_match[] = { | |||
781 | {} | 780 | {} |
782 | }; | 781 | }; |
783 | 782 | ||
784 | static struct of_platform_driver mpc52xx_gpt_driver = { | 783 | static struct platform_driver mpc52xx_gpt_driver = { |
785 | .driver = { | 784 | .driver = { |
786 | .name = "mpc52xx-gpt", | 785 | .name = "mpc52xx-gpt", |
787 | .owner = THIS_MODULE, | 786 | .owner = THIS_MODULE, |
@@ -793,10 +792,7 @@ static struct of_platform_driver mpc52xx_gpt_driver = { | |||
793 | 792 | ||
794 | static int __init mpc52xx_gpt_init(void) | 793 | static int __init mpc52xx_gpt_init(void) |
795 | { | 794 | { |
796 | if (of_register_platform_driver(&mpc52xx_gpt_driver)) | 795 | return platform_driver_register(&mpc52xx_gpt_driver); |
797 | pr_err("error registering MPC52xx GPT driver\n"); | ||
798 | |||
799 | return 0; | ||
800 | } | 796 | } |
801 | 797 | ||
802 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ | 798 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index f4ac213c89c0..6385d883cb8d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c | |||
@@ -436,8 +436,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) | |||
436 | } | 436 | } |
437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); | 437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); |
438 | 438 | ||
439 | static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op, | 439 | static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) |
440 | const struct of_device_id *match) | ||
441 | { | 440 | { |
442 | struct resource res; | 441 | struct resource res; |
443 | int rc = -ENOMEM; | 442 | int rc = -ENOMEM; |
@@ -536,7 +535,7 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { | |||
536 | {}, | 535 | {}, |
537 | }; | 536 | }; |
538 | 537 | ||
539 | static struct of_platform_driver mpc52xx_lpbfifo_driver = { | 538 | static struct platform_driver mpc52xx_lpbfifo_driver = { |
540 | .driver = { | 539 | .driver = { |
541 | .name = "mpc52xx-lpbfifo", | 540 | .name = "mpc52xx-lpbfifo", |
542 | .owner = THIS_MODULE, | 541 | .owner = THIS_MODULE, |
@@ -551,14 +550,12 @@ static struct of_platform_driver mpc52xx_lpbfifo_driver = { | |||
551 | */ | 550 | */ |
552 | static int __init mpc52xx_lpbfifo_init(void) | 551 | static int __init mpc52xx_lpbfifo_init(void) |
553 | { | 552 | { |
554 | pr_debug("Registering LocalPlus bus FIFO driver\n"); | 553 | return platform_driver_register(&mpc52xx_lpbfifo_driver); |
555 | return of_register_platform_driver(&mpc52xx_lpbfifo_driver); | ||
556 | } | 554 | } |
557 | module_init(mpc52xx_lpbfifo_init); | 555 | module_init(mpc52xx_lpbfifo_init); |
558 | 556 | ||
559 | static void __exit mpc52xx_lpbfifo_exit(void) | 557 | static void __exit mpc52xx_lpbfifo_exit(void) |
560 | { | 558 | { |
561 | pr_debug("Unregistering LocalPlus bus FIFO driver\n"); | 559 | platform_driver_unregister(&mpc52xx_lpbfifo_driver); |
562 | of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); | ||
563 | } | 560 | } |
564 | module_exit(mpc52xx_lpbfifo_exit); | 561 | module_exit(mpc52xx_lpbfifo_exit); |
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 1565e0446dc8..10ff526cd046 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c | |||
@@ -111,8 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = { | |||
111 | .ops = &ep8248e_mdio_ops, | 111 | .ops = &ep8248e_mdio_ops, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev, | 114 | static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev) |
115 | const struct of_device_id *match) | ||
116 | { | 115 | { |
117 | struct mii_bus *bus; | 116 | struct mii_bus *bus; |
118 | struct resource res; | 117 | struct resource res; |
@@ -167,7 +166,7 @@ static const struct of_device_id ep8248e_mdio_match[] = { | |||
167 | {}, | 166 | {}, |
168 | }; | 167 | }; |
169 | 168 | ||
170 | static struct of_platform_driver ep8248e_mdio_driver = { | 169 | static struct platform_driver ep8248e_mdio_driver = { |
171 | .driver = { | 170 | .driver = { |
172 | .name = "ep8248e-mdio-bitbang", | 171 | .name = "ep8248e-mdio-bitbang", |
173 | .owner = THIS_MODULE, | 172 | .owner = THIS_MODULE, |
@@ -308,7 +307,7 @@ static __initdata struct of_device_id of_bus_ids[] = { | |||
308 | static int __init declare_of_platform_devices(void) | 307 | static int __init declare_of_platform_devices(void) |
309 | { | 308 | { |
310 | of_platform_bus_probe(NULL, of_bus_ids, NULL); | 309 | of_platform_bus_probe(NULL, of_bus_ids, NULL); |
311 | of_register_platform_driver(&ep8248e_mdio_driver); | 310 | platform_driver_register(&ep8248e_mdio_driver); |
312 | 311 | ||
313 | return 0; | 312 | return 0; |
314 | } | 313 | } |
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index fd4f2f2f19e6..188272934cfb 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -318,14 +318,18 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { | |||
318 | .end = mpc83xx_suspend_end, | 318 | .end = mpc83xx_suspend_end, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static int pmc_probe(struct platform_device *ofdev, | 321 | static int pmc_probe(struct platform_device *ofdev) |
322 | const struct of_device_id *match) | ||
323 | { | 322 | { |
324 | struct device_node *np = ofdev->dev.of_node; | 323 | struct device_node *np = ofdev->dev.of_node; |
325 | struct resource res; | 324 | struct resource res; |
326 | struct pmc_type *type = match->data; | 325 | struct pmc_type *type; |
327 | int ret = 0; | 326 | int ret = 0; |
328 | 327 | ||
328 | if (!ofdev->dev.of_match) | ||
329 | return -EINVAL; | ||
330 | |||
331 | type = ofdev->dev.of_match->data; | ||
332 | |||
329 | if (!of_device_is_available(np)) | 333 | if (!of_device_is_available(np)) |
330 | return -ENODEV; | 334 | return -ENODEV; |
331 | 335 | ||
@@ -422,7 +426,7 @@ static struct of_device_id pmc_match[] = { | |||
422 | {} | 426 | {} |
423 | }; | 427 | }; |
424 | 428 | ||
425 | static struct of_platform_driver pmc_driver = { | 429 | static struct platform_driver pmc_driver = { |
426 | .driver = { | 430 | .driver = { |
427 | .name = "mpc83xx-pmc", | 431 | .name = "mpc83xx-pmc", |
428 | .owner = THIS_MODULE, | 432 | .owner = THIS_MODULE, |
@@ -434,7 +438,7 @@ static struct of_platform_driver pmc_driver = { | |||
434 | 438 | ||
435 | static int pmc_init(void) | 439 | static int pmc_init(void) |
436 | { | 440 | { |
437 | return of_register_platform_driver(&pmc_driver); | 441 | return platform_driver_register(&pmc_driver); |
438 | } | 442 | } |
439 | 443 | ||
440 | module_init(pmc_init); | 444 | module_init(pmc_init); |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index e3e379c6caa7..c35099af340e 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -328,7 +328,7 @@ static struct irq_host_ops msic_host_ops = { | |||
328 | .map = msic_host_map, | 328 | .map = msic_host_map, |
329 | }; | 329 | }; |
330 | 330 | ||
331 | static int axon_msi_shutdown(struct platform_device *device) | 331 | static void axon_msi_shutdown(struct platform_device *device) |
332 | { | 332 | { |
333 | struct axon_msic *msic = dev_get_drvdata(&device->dev); | 333 | struct axon_msic *msic = dev_get_drvdata(&device->dev); |
334 | u32 tmp; | 334 | u32 tmp; |
@@ -338,12 +338,9 @@ static int axon_msi_shutdown(struct platform_device *device) | |||
338 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); | 338 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); |
339 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; | 339 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; |
340 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); | 340 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); |
341 | |||
342 | return 0; | ||
343 | } | 341 | } |
344 | 342 | ||
345 | static int axon_msi_probe(struct platform_device *device, | 343 | static int axon_msi_probe(struct platform_device *device) |
346 | const struct of_device_id *device_id) | ||
347 | { | 344 | { |
348 | struct device_node *dn = device->dev.of_node; | 345 | struct device_node *dn = device->dev.of_node; |
349 | struct axon_msic *msic; | 346 | struct axon_msic *msic; |
@@ -446,7 +443,7 @@ static const struct of_device_id axon_msi_device_id[] = { | |||
446 | {} | 443 | {} |
447 | }; | 444 | }; |
448 | 445 | ||
449 | static struct of_platform_driver axon_msi_driver = { | 446 | static struct platform_driver axon_msi_driver = { |
450 | .probe = axon_msi_probe, | 447 | .probe = axon_msi_probe, |
451 | .shutdown = axon_msi_shutdown, | 448 | .shutdown = axon_msi_shutdown, |
452 | .driver = { | 449 | .driver = { |
@@ -458,7 +455,7 @@ static struct of_platform_driver axon_msi_driver = { | |||
458 | 455 | ||
459 | static int __init axon_msi_init(void) | 456 | static int __init axon_msi_init(void) |
460 | { | 457 | { |
461 | return of_register_platform_driver(&axon_msi_driver); | 458 | return platform_driver_register(&axon_msi_driver); |
462 | } | 459 | } |
463 | subsys_initcall(axon_msi_init); | 460 | subsys_initcall(axon_msi_init); |
464 | 461 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 187a7d32f86a..a3d2ce54ea2e 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c | |||
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, | |||
70 | if (!IS_ERR(tmp)) { | 70 | if (!IS_ERR(tmp)) { |
71 | struct nameidata nd; | 71 | struct nameidata nd; |
72 | 72 | ||
73 | ret = path_lookup(tmp, LOOKUP_PARENT, &nd); | 73 | ret = kern_path_parent(tmp, &nd); |
74 | if (!ret) { | 74 | if (!ret) { |
75 | nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; | 75 | nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; |
76 | ret = spufs_create(&nd, flags, mode, neighbor); | 76 | ret = spufs_create(&nd, flags, mode, neighbor); |
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index fdb7384c0c4f..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c | |||
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ | 242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ |
243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); | 243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); |
244 | 244 | ||
245 | for (i = 0; i < NR_CPUS; i++) { | 245 | for (i = 0; i < NR_LPPACAS; i++) { |
246 | if (lppaca_of(i).dyn_proc_status >= 2) | 246 | if (lppaca[i].dyn_proc_status >= 2) |
247 | continue; | 247 | continue; |
248 | 248 | ||
249 | snprintf(p, 32 - (p - buf), "@%d", i); | 249 | snprintf(p, 32 - (p - buf), "@%d", i); |
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
251 | 251 | ||
252 | dt_prop_str(dt, "device_type", device_type_cpu); | 252 | dt_prop_str(dt, "device_type", device_type_cpu); |
253 | 253 | ||
254 | index = lppaca_of(i).dyn_hv_phys_proc_index; | 254 | index = lppaca[i].dyn_hv_phys_proc_index; |
255 | d = &xIoHriProcessorVpd[index]; | 255 | d = &xIoHriProcessorVpd[index]; |
256 | 256 | ||
257 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); | 257 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..2946ae10fbfd 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void) | |||
680 | * on but calling this function multiple times is fine. | 680 | * on but calling this function multiple times is fine. |
681 | */ | 681 | */ |
682 | identify_cpu(0, mfspr(SPRN_PVR)); | 682 | identify_cpu(0, mfspr(SPRN_PVR)); |
683 | initialise_paca(&boot_paca, 0); | ||
683 | 684 | ||
684 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 685 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
685 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 686 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index a5d907b5a4c2..9886296e08da 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c | |||
@@ -216,8 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | 218 | ||
219 | static int __devinit gpio_mdio_probe(struct platform_device *ofdev, | 219 | static int __devinit gpio_mdio_probe(struct platform_device *ofdev) |
220 | const struct of_device_id *match) | ||
221 | { | 220 | { |
222 | struct device *dev = &ofdev->dev; | 221 | struct device *dev = &ofdev->dev; |
223 | struct device_node *np = ofdev->dev.of_node; | 222 | struct device_node *np = ofdev->dev.of_node; |
@@ -299,7 +298,7 @@ static struct of_device_id gpio_mdio_match[] = | |||
299 | }; | 298 | }; |
300 | MODULE_DEVICE_TABLE(of, gpio_mdio_match); | 299 | MODULE_DEVICE_TABLE(of, gpio_mdio_match); |
301 | 300 | ||
302 | static struct of_platform_driver gpio_mdio_driver = | 301 | static struct platform_driver gpio_mdio_driver = |
303 | { | 302 | { |
304 | .probe = gpio_mdio_probe, | 303 | .probe = gpio_mdio_probe, |
305 | .remove = gpio_mdio_remove, | 304 | .remove = gpio_mdio_remove, |
@@ -326,13 +325,13 @@ int gpio_mdio_init(void) | |||
326 | if (!gpio_regs) | 325 | if (!gpio_regs) |
327 | return -ENODEV; | 326 | return -ENODEV; |
328 | 327 | ||
329 | return of_register_platform_driver(&gpio_mdio_driver); | 328 | return platform_driver_register(&gpio_mdio_driver); |
330 | } | 329 | } |
331 | module_init(gpio_mdio_init); | 330 | module_init(gpio_mdio_init); |
332 | 331 | ||
333 | void gpio_mdio_exit(void) | 332 | void gpio_mdio_exit(void) |
334 | { | 333 | { |
335 | of_unregister_platform_driver(&gpio_mdio_driver); | 334 | platform_driver_unregister(&gpio_mdio_driver); |
336 | if (gpio_regs) | 335 | if (gpio_regs) |
337 | iounmap(gpio_regs); | 336 | iounmap(gpio_regs); |
338 | } | 337 | } |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index bc8803664140..33867ec4a234 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -17,6 +17,54 @@ | |||
17 | #include <asm/pSeries_reconfig.h> | 17 | #include <asm/pSeries_reconfig.h> |
18 | #include <asm/sparsemem.h> | 18 | #include <asm/sparsemem.h> |
19 | 19 | ||
20 | static unsigned long get_memblock_size(void) | ||
21 | { | ||
22 | struct device_node *np; | ||
23 | unsigned int memblock_size = 0; | ||
24 | |||
25 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | ||
26 | if (np) { | ||
27 | const unsigned long *size; | ||
28 | |||
29 | size = of_get_property(np, "ibm,lmb-size", NULL); | ||
30 | memblock_size = size ? *size : 0; | ||
31 | |||
32 | of_node_put(np); | ||
33 | } else { | ||
34 | unsigned int memzero_size = 0; | ||
35 | const unsigned int *regs; | ||
36 | |||
37 | np = of_find_node_by_path("/memory@0"); | ||
38 | if (np) { | ||
39 | regs = of_get_property(np, "reg", NULL); | ||
40 | memzero_size = regs ? regs[3] : 0; | ||
41 | of_node_put(np); | ||
42 | } | ||
43 | |||
44 | if (memzero_size) { | ||
45 | /* We now know the size of memory@0, use this to find | ||
46 | * the first memoryblock and get its size. | ||
47 | */ | ||
48 | char buf[64]; | ||
49 | |||
50 | sprintf(buf, "/memory@%x", memzero_size); | ||
51 | np = of_find_node_by_path(buf); | ||
52 | if (np) { | ||
53 | regs = of_get_property(np, "reg", NULL); | ||
54 | memblock_size = regs ? regs[3] : 0; | ||
55 | of_node_put(np); | ||
56 | } | ||
57 | } | ||
58 | } | ||
59 | |||
60 | return memblock_size; | ||
61 | } | ||
62 | |||
63 | unsigned long memory_block_size_bytes(void) | ||
64 | { | ||
65 | return get_memblock_size(); | ||
66 | } | ||
67 | |||
20 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) | 68 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) |
21 | { | 69 | { |
22 | unsigned long start, start_pfn; | 70 | unsigned long start, start_pfn; |
@@ -127,30 +175,22 @@ static int pseries_add_memory(struct device_node *np) | |||
127 | 175 | ||
128 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) | 176 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) |
129 | { | 177 | { |
130 | struct device_node *np; | 178 | unsigned long memblock_size; |
131 | const unsigned long *lmb_size; | ||
132 | int rc; | 179 | int rc; |
133 | 180 | ||
134 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 181 | memblock_size = get_memblock_size(); |
135 | if (!np) | 182 | if (!memblock_size) |
136 | return -EINVAL; | 183 | return -EINVAL; |
137 | 184 | ||
138 | lmb_size = of_get_property(np, "ibm,lmb-size", NULL); | ||
139 | if (!lmb_size) { | ||
140 | of_node_put(np); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | if (action == PSERIES_DRCONF_MEM_ADD) { | 185 | if (action == PSERIES_DRCONF_MEM_ADD) { |
145 | rc = memblock_add(*base, *lmb_size); | 186 | rc = memblock_add(*base, memblock_size); |
146 | rc = (rc < 0) ? -EINVAL : 0; | 187 | rc = (rc < 0) ? -EINVAL : 0; |
147 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { | 188 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { |
148 | rc = pseries_remove_memblock(*base, *lmb_size); | 189 | rc = pseries_remove_memblock(*base, memblock_size); |
149 | } else { | 190 | } else { |
150 | rc = -EINVAL; | 191 | rc = -EINVAL; |
151 | } | 192 | } |
152 | 193 | ||
153 | of_node_put(np); | ||
154 | return rc; | 194 | return rc; |
155 | } | 195 | } |
156 | 196 | ||
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 5fcc92a12d3e..3bf4488aaec6 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c | |||
@@ -149,7 +149,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) | |||
149 | if (dn->child) | 149 | if (dn->child) |
150 | eeh_add_device_tree_early(dn); | 150 | eeh_add_device_tree_early(dn); |
151 | 151 | ||
152 | pcibios_scan_phb(phb, dn); | 152 | pcibios_scan_phb(phb); |
153 | pcibios_finish_adding_to_bus(phb->bus); | 153 | pcibios_finish_adding_to_bus(phb->bus); |
154 | 154 | ||
155 | return phb; | 155 | return phb; |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 2659a60bd7b8..27402c7d309d 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -172,10 +172,9 @@ static const struct block_device_operations axon_ram_devops = { | |||
172 | 172 | ||
173 | /** | 173 | /** |
174 | * axon_ram_probe - probe() method for platform driver | 174 | * axon_ram_probe - probe() method for platform driver |
175 | * @device, @device_id: see of_platform_driver method | 175 | * @device: see platform_driver method |
176 | */ | 176 | */ |
177 | static int axon_ram_probe(struct platform_device *device, | 177 | static int axon_ram_probe(struct platform_device *device) |
178 | const struct of_device_id *device_id) | ||
179 | { | 178 | { |
180 | static int axon_ram_bank_id = -1; | 179 | static int axon_ram_bank_id = -1; |
181 | struct axon_ram_bank *bank; | 180 | struct axon_ram_bank *bank; |
@@ -326,7 +325,7 @@ static struct of_device_id axon_ram_device_id[] = { | |||
326 | {} | 325 | {} |
327 | }; | 326 | }; |
328 | 327 | ||
329 | static struct of_platform_driver axon_ram_driver = { | 328 | static struct platform_driver axon_ram_driver = { |
330 | .probe = axon_ram_probe, | 329 | .probe = axon_ram_probe, |
331 | .remove = axon_ram_remove, | 330 | .remove = axon_ram_remove, |
332 | .driver = { | 331 | .driver = { |
@@ -350,7 +349,7 @@ axon_ram_init(void) | |||
350 | } | 349 | } |
351 | azfs_minor = 0; | 350 | azfs_minor = 0; |
352 | 351 | ||
353 | return of_register_platform_driver(&axon_ram_driver); | 352 | return platform_driver_register(&axon_ram_driver); |
354 | } | 353 | } |
355 | 354 | ||
356 | /** | 355 | /** |
@@ -359,7 +358,7 @@ axon_ram_init(void) | |||
359 | static void __exit | 358 | static void __exit |
360 | axon_ram_exit(void) | 359 | axon_ram_exit(void) |
361 | { | 360 | { |
362 | of_unregister_platform_driver(&axon_ram_driver); | 361 | platform_driver_unregister(&axon_ram_driver); |
363 | unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); | 362 | unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); |
364 | } | 363 | } |
365 | 364 | ||
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index 650256115064..b3fbb271be87 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c | |||
@@ -365,8 +365,7 @@ bcom_engine_cleanup(void) | |||
365 | /* OF platform driver */ | 365 | /* OF platform driver */ |
366 | /* ======================================================================== */ | 366 | /* ======================================================================== */ |
367 | 367 | ||
368 | static int __devinit mpc52xx_bcom_probe(struct platform_device *op, | 368 | static int __devinit mpc52xx_bcom_probe(struct platform_device *op) |
369 | const struct of_device_id *match) | ||
370 | { | 369 | { |
371 | struct device_node *ofn_sram; | 370 | struct device_node *ofn_sram; |
372 | struct resource res_bcom; | 371 | struct resource res_bcom; |
@@ -492,7 +491,7 @@ static struct of_device_id mpc52xx_bcom_of_match[] = { | |||
492 | MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); | 491 | MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); |
493 | 492 | ||
494 | 493 | ||
495 | static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { | 494 | static struct platform_driver mpc52xx_bcom_of_platform_driver = { |
496 | .probe = mpc52xx_bcom_probe, | 495 | .probe = mpc52xx_bcom_probe, |
497 | .remove = mpc52xx_bcom_remove, | 496 | .remove = mpc52xx_bcom_remove, |
498 | .driver = { | 497 | .driver = { |
@@ -510,13 +509,13 @@ static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { | |||
510 | static int __init | 509 | static int __init |
511 | mpc52xx_bcom_init(void) | 510 | mpc52xx_bcom_init(void) |
512 | { | 511 | { |
513 | return of_register_platform_driver(&mpc52xx_bcom_of_platform_driver); | 512 | return platform_driver_register(&mpc52xx_bcom_of_platform_driver); |
514 | } | 513 | } |
515 | 514 | ||
516 | static void __exit | 515 | static void __exit |
517 | mpc52xx_bcom_exit(void) | 516 | mpc52xx_bcom_exit(void) |
518 | { | 517 | { |
519 | of_unregister_platform_driver(&mpc52xx_bcom_of_platform_driver); | 518 | platform_driver_unregister(&mpc52xx_bcom_of_platform_driver); |
520 | } | 519 | } |
521 | 520 | ||
522 | /* If we're not a module, we must make sure everything is setup before */ | 521 | /* If we're not a module, we must make sure everything is setup before */ |
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c index cc8d6556d799..2b9f0c925326 100644 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c | |||
@@ -71,8 +71,7 @@ static int __init get_offset_from_cmdline(char *str) | |||
71 | __setup("cache-sram-size=", get_size_from_cmdline); | 71 | __setup("cache-sram-size=", get_size_from_cmdline); |
72 | __setup("cache-sram-offset=", get_offset_from_cmdline); | 72 | __setup("cache-sram-offset=", get_offset_from_cmdline); |
73 | 73 | ||
74 | static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev, | 74 | static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) |
75 | const struct of_device_id *match) | ||
76 | { | 75 | { |
77 | long rval; | 76 | long rval; |
78 | unsigned int rem; | 77 | unsigned int rem; |
@@ -204,7 +203,7 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = { | |||
204 | {}, | 203 | {}, |
205 | }; | 204 | }; |
206 | 205 | ||
207 | static struct of_platform_driver mpc85xx_l2ctlr_of_platform_driver = { | 206 | static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { |
208 | .driver = { | 207 | .driver = { |
209 | .name = "fsl-l2ctlr", | 208 | .name = "fsl-l2ctlr", |
210 | .owner = THIS_MODULE, | 209 | .owner = THIS_MODULE, |
@@ -216,12 +215,12 @@ static struct of_platform_driver mpc85xx_l2ctlr_of_platform_driver = { | |||
216 | 215 | ||
217 | static __init int mpc85xx_l2ctlr_of_init(void) | 216 | static __init int mpc85xx_l2ctlr_of_init(void) |
218 | { | 217 | { |
219 | return of_register_platform_driver(&mpc85xx_l2ctlr_of_platform_driver); | 218 | return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver); |
220 | } | 219 | } |
221 | 220 | ||
222 | static void __exit mpc85xx_l2ctlr_of_exit(void) | 221 | static void __exit mpc85xx_l2ctlr_of_exit(void) |
223 | { | 222 | { |
224 | of_unregister_platform_driver(&mpc85xx_l2ctlr_of_platform_driver); | 223 | platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver); |
225 | } | 224 | } |
226 | 225 | ||
227 | subsys_initcall(mpc85xx_l2ctlr_of_init); | 226 | subsys_initcall(mpc85xx_l2ctlr_of_init); |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 108d76fa8f1c..ee6a8a52ac71 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -273,8 +273,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) | |||
273 | return 0; | 273 | return 0; |
274 | } | 274 | } |
275 | 275 | ||
276 | static int __devinit fsl_of_msi_probe(struct platform_device *dev, | 276 | static int __devinit fsl_of_msi_probe(struct platform_device *dev) |
277 | const struct of_device_id *match) | ||
278 | { | 277 | { |
279 | struct fsl_msi *msi; | 278 | struct fsl_msi *msi; |
280 | struct resource res; | 279 | struct resource res; |
@@ -282,11 +281,15 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev, | |||
282 | int rc; | 281 | int rc; |
283 | int virt_msir; | 282 | int virt_msir; |
284 | const u32 *p; | 283 | const u32 *p; |
285 | struct fsl_msi_feature *features = match->data; | 284 | struct fsl_msi_feature *features; |
286 | struct fsl_msi_cascade_data *cascade_data = NULL; | 285 | struct fsl_msi_cascade_data *cascade_data = NULL; |
287 | int len; | 286 | int len; |
288 | u32 offset; | 287 | u32 offset; |
289 | 288 | ||
289 | if (!dev->dev.of_match) | ||
290 | return -EINVAL; | ||
291 | features = dev->dev.of_match->data; | ||
292 | |||
290 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); | 293 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); |
291 | 294 | ||
292 | msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); | 295 | msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); |
@@ -411,7 +414,7 @@ static const struct of_device_id fsl_of_msi_ids[] = { | |||
411 | {} | 414 | {} |
412 | }; | 415 | }; |
413 | 416 | ||
414 | static struct of_platform_driver fsl_of_msi_driver = { | 417 | static struct platform_driver fsl_of_msi_driver = { |
415 | .driver = { | 418 | .driver = { |
416 | .name = "fsl-msi", | 419 | .name = "fsl-msi", |
417 | .owner = THIS_MODULE, | 420 | .owner = THIS_MODULE, |
@@ -423,7 +426,7 @@ static struct of_platform_driver fsl_of_msi_driver = { | |||
423 | 426 | ||
424 | static __init int fsl_of_msi_init(void) | 427 | static __init int fsl_of_msi_init(void) |
425 | { | 428 | { |
426 | return of_register_platform_driver(&fsl_of_msi_driver); | 429 | return platform_driver_register(&fsl_of_msi_driver); |
427 | } | 430 | } |
428 | 431 | ||
429 | subsys_initcall(fsl_of_msi_init); | 432 | subsys_initcall(fsl_of_msi_init); |
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index e9381bfefb21..f122e8961d32 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c | |||
@@ -58,8 +58,7 @@ static const struct platform_suspend_ops pmc_suspend_ops = { | |||
58 | .enter = pmc_suspend_enter, | 58 | .enter = pmc_suspend_enter, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static int pmc_probe(struct platform_device *ofdev, | 61 | static int pmc_probe(struct platform_device *ofdev) |
62 | const struct of_device_id *id) | ||
63 | { | 62 | { |
64 | pmc_regs = of_iomap(ofdev->dev.of_node, 0); | 63 | pmc_regs = of_iomap(ofdev->dev.of_node, 0); |
65 | if (!pmc_regs) | 64 | if (!pmc_regs) |
@@ -76,7 +75,7 @@ static const struct of_device_id pmc_ids[] = { | |||
76 | { }, | 75 | { }, |
77 | }; | 76 | }; |
78 | 77 | ||
79 | static struct of_platform_driver pmc_driver = { | 78 | static struct platform_driver pmc_driver = { |
80 | .driver = { | 79 | .driver = { |
81 | .name = "fsl-pmc", | 80 | .name = "fsl-pmc", |
82 | .owner = THIS_MODULE, | 81 | .owner = THIS_MODULE, |
@@ -87,6 +86,6 @@ static struct of_platform_driver pmc_driver = { | |||
87 | 86 | ||
88 | static int __init pmc_init(void) | 87 | static int __init pmc_init(void) |
89 | { | 88 | { |
90 | return of_register_platform_driver(&pmc_driver); | 89 | return platform_driver_register(&pmc_driver); |
91 | } | 90 | } |
92 | device_initcall(pmc_init); | 91 | device_initcall(pmc_init); |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 8c6cab013278..3eff2c3a9ad5 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -1570,8 +1570,7 @@ err_ops: | |||
1570 | 1570 | ||
1571 | /* The probe function for RapidIO peer-to-peer network. | 1571 | /* The probe function for RapidIO peer-to-peer network. |
1572 | */ | 1572 | */ |
1573 | static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev, | 1573 | static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) |
1574 | const struct of_device_id *match) | ||
1575 | { | 1574 | { |
1576 | int rc; | 1575 | int rc; |
1577 | printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", | 1576 | printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", |
@@ -1594,7 +1593,7 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = { | |||
1594 | {}, | 1593 | {}, |
1595 | }; | 1594 | }; |
1596 | 1595 | ||
1597 | static struct of_platform_driver fsl_of_rio_rpn_driver = { | 1596 | static struct platform_driver fsl_of_rio_rpn_driver = { |
1598 | .driver = { | 1597 | .driver = { |
1599 | .name = "fsl-of-rio", | 1598 | .name = "fsl-of-rio", |
1600 | .owner = THIS_MODULE, | 1599 | .owner = THIS_MODULE, |
@@ -1605,7 +1604,7 @@ static struct of_platform_driver fsl_of_rio_rpn_driver = { | |||
1605 | 1604 | ||
1606 | static __init int fsl_of_rio_rpn_init(void) | 1605 | static __init int fsl_of_rio_rpn_init(void) |
1607 | { | 1606 | { |
1608 | return of_register_platform_driver(&fsl_of_rio_rpn_driver); | 1607 | return platform_driver_register(&fsl_of_rio_rpn_driver); |
1609 | } | 1608 | } |
1610 | 1609 | ||
1611 | subsys_initcall(fsl_of_rio_rpn_init); | 1610 | subsys_initcall(fsl_of_rio_rpn_init); |
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 4260f368db52..8ce4fc3d9828 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c | |||
@@ -121,8 +121,7 @@ static void pmi_notify_handlers(struct work_struct *work) | |||
121 | spin_unlock(&data->handler_spinlock); | 121 | spin_unlock(&data->handler_spinlock); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int pmi_of_probe(struct platform_device *dev, | 124 | static int pmi_of_probe(struct platform_device *dev) |
125 | const struct of_device_id *match) | ||
126 | { | 125 | { |
127 | struct device_node *np = dev->dev.of_node; | 126 | struct device_node *np = dev->dev.of_node; |
128 | int rc; | 127 | int rc; |
@@ -205,7 +204,7 @@ static int pmi_of_remove(struct platform_device *dev) | |||
205 | return 0; | 204 | return 0; |
206 | } | 205 | } |
207 | 206 | ||
208 | static struct of_platform_driver pmi_of_platform_driver = { | 207 | static struct platform_driver pmi_of_platform_driver = { |
209 | .probe = pmi_of_probe, | 208 | .probe = pmi_of_probe, |
210 | .remove = pmi_of_remove, | 209 | .remove = pmi_of_remove, |
211 | .driver = { | 210 | .driver = { |
@@ -217,13 +216,13 @@ static struct of_platform_driver pmi_of_platform_driver = { | |||
217 | 216 | ||
218 | static int __init pmi_module_init(void) | 217 | static int __init pmi_module_init(void) |
219 | { | 218 | { |
220 | return of_register_platform_driver(&pmi_of_platform_driver); | 219 | return platform_driver_register(&pmi_of_platform_driver); |
221 | } | 220 | } |
222 | module_init(pmi_module_init); | 221 | module_init(pmi_module_init); |
223 | 222 | ||
224 | static void __exit pmi_module_exit(void) | 223 | static void __exit pmi_module_exit(void) |
225 | { | 224 | { |
226 | of_unregister_platform_driver(&pmi_of_platform_driver); | 225 | platform_driver_unregister(&pmi_of_platform_driver); |
227 | } | 226 | } |
228 | module_exit(pmi_module_exit); | 227 | module_exit(pmi_module_exit); |
229 | 228 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 90020de4dcf2..904c6cbaf45b 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -659,8 +659,7 @@ static int qe_resume(struct platform_device *ofdev) | |||
659 | return 0; | 659 | return 0; |
660 | } | 660 | } |
661 | 661 | ||
662 | static int qe_probe(struct platform_device *ofdev, | 662 | static int qe_probe(struct platform_device *ofdev) |
663 | const struct of_device_id *id) | ||
664 | { | 663 | { |
665 | return 0; | 664 | return 0; |
666 | } | 665 | } |
@@ -670,7 +669,7 @@ static const struct of_device_id qe_ids[] = { | |||
670 | { }, | 669 | { }, |
671 | }; | 670 | }; |
672 | 671 | ||
673 | static struct of_platform_driver qe_driver = { | 672 | static struct platform_driver qe_driver = { |
674 | .driver = { | 673 | .driver = { |
675 | .name = "fsl-qe", | 674 | .name = "fsl-qe", |
676 | .owner = THIS_MODULE, | 675 | .owner = THIS_MODULE, |
@@ -682,7 +681,7 @@ static struct of_platform_driver qe_driver = { | |||
682 | 681 | ||
683 | static int __init qe_drv_init(void) | 682 | static int __init qe_drv_init(void) |
684 | { | 683 | { |
685 | return of_register_platform_driver(&qe_driver); | 684 | return platform_driver_register(&qe_driver); |
686 | } | 685 | } |
687 | device_initcall(qe_drv_init); | 686 | device_initcall(qe_drv_init); |
688 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ | 687 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ |
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 5c5d02de49e9..81cf36b691f1 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <asm/errno.h> | 8 | #include <asm/errno.h> |
9 | 9 | ||
10 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 10 | static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
11 | { | 11 | { |
12 | int op = (encoded_op >> 28) & 7; | 12 | int op = (encoded_op >> 28) & 7; |
13 | int cmp = (encoded_op >> 24) & 15; | 13 | int cmp = (encoded_op >> 24) & 15; |
@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
19 | oparg = 1 << oparg; | 19 | oparg = 1 << oparg; |
20 | 20 | ||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
22 | return -EFAULT; | 22 | return -EFAULT; |
23 | 23 | ||
24 | pagefault_disable(); | 24 | pagefault_disable(); |
@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
39 | return ret; | 39 | return ret; |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, | 42 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
43 | int oldval, int newval) | 43 | u32 oldval, u32 newval) |
44 | { | 44 | { |
45 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 45 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
46 | return -EFAULT; | 46 | return -EFAULT; |
47 | 47 | ||
48 | return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); | 48 | return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); |
49 | } | 49 | } |
50 | 50 | ||
51 | #endif /* __KERNEL__ */ | 51 | #endif /* __KERNEL__ */ |
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 423fdda2322d..d0eb4653cebd 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h | |||
@@ -43,29 +43,6 @@ | |||
43 | 43 | ||
44 | #ifdef __KERNEL__ | 44 | #ifdef __KERNEL__ |
45 | 45 | ||
46 | #include <linux/list.h> | ||
47 | #include <linux/spinlock.h> | ||
48 | |||
49 | struct rwsem_waiter; | ||
50 | |||
51 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *); | ||
52 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *); | ||
53 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | ||
54 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *); | ||
55 | extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *); | ||
56 | |||
57 | /* | ||
58 | * the semaphore definition | ||
59 | */ | ||
60 | struct rw_semaphore { | ||
61 | signed long count; | ||
62 | spinlock_t wait_lock; | ||
63 | struct list_head wait_list; | ||
64 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
65 | struct lockdep_map dep_map; | ||
66 | #endif | ||
67 | }; | ||
68 | |||
69 | #ifndef __s390x__ | 46 | #ifndef __s390x__ |
70 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 47 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
71 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 48 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
@@ -81,41 +58,6 @@ struct rw_semaphore { | |||
81 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 58 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
82 | 59 | ||
83 | /* | 60 | /* |
84 | * initialisation | ||
85 | */ | ||
86 | |||
87 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
88 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
89 | #else | ||
90 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
91 | #endif | ||
92 | |||
93 | #define __RWSEM_INITIALIZER(name) \ | ||
94 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ | ||
95 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | ||
96 | |||
97 | #define DECLARE_RWSEM(name) \ | ||
98 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
99 | |||
100 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
101 | { | ||
102 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
103 | spin_lock_init(&sem->wait_lock); | ||
104 | INIT_LIST_HEAD(&sem->wait_list); | ||
105 | } | ||
106 | |||
107 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
108 | struct lock_class_key *key); | ||
109 | |||
110 | #define init_rwsem(sem) \ | ||
111 | do { \ | ||
112 | static struct lock_class_key __key; \ | ||
113 | \ | ||
114 | __init_rwsem((sem), #sem, &__key); \ | ||
115 | } while (0) | ||
116 | |||
117 | |||
118 | /* | ||
119 | * lock for reading | 61 | * lock for reading |
120 | */ | 62 | */ |
121 | static inline void __down_read(struct rw_semaphore *sem) | 63 | static inline void __down_read(struct rw_semaphore *sem) |
@@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
377 | return new; | 319 | return new; |
378 | } | 320 | } |
379 | 321 | ||
380 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
381 | { | ||
382 | return (sem->count != 0); | ||
383 | } | ||
384 | |||
385 | #endif /* __KERNEL__ */ | 322 | #endif /* __KERNEL__ */ |
386 | #endif /* _S390_RWSEM_H */ | 323 | #endif /* _S390_RWSEM_H */ |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0ec52b..2d9ea11f919a 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -83,8 +83,8 @@ struct uaccess_ops { | |||
83 | size_t (*clear_user)(size_t, void __user *); | 83 | size_t (*clear_user)(size_t, void __user *); |
84 | size_t (*strnlen_user)(size_t, const char __user *); | 84 | size_t (*strnlen_user)(size_t, const char __user *); |
85 | size_t (*strncpy_from_user)(size_t, const char __user *, char *); | 85 | size_t (*strncpy_from_user)(size_t, const char __user *, char *); |
86 | int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); | 86 | int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); |
87 | int (*futex_atomic_cmpxchg)(int __user *, int old, int new); | 87 | int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); |
88 | }; | 88 | }; |
89 | 89 | ||
90 | extern struct uaccess_ops uaccess; | 90 | extern struct uaccess_ops uaccess; |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a68ac10213b2..1bc18cdb525b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -77,7 +77,7 @@ SECTIONS | |||
77 | . = ALIGN(PAGE_SIZE); | 77 | . = ALIGN(PAGE_SIZE); |
78 | INIT_DATA_SECTION(0x100) | 78 | INIT_DATA_SECTION(0x100) |
79 | 79 | ||
80 | PERCPU(PAGE_SIZE) | 80 | PERCPU(0x100, PAGE_SIZE) |
81 | . = ALIGN(PAGE_SIZE); | 81 | . = ALIGN(PAGE_SIZE); |
82 | __init_end = .; /* freed after init ends here */ | 82 | __init_end = .; /* freed after init ends here */ |
83 | 83 | ||
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 126011df14f1..1d2536cb630b 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h | |||
@@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); | |||
12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | 12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); |
13 | extern size_t strnlen_user_std(size_t, const char __user *); | 13 | extern size_t strnlen_user_std(size_t, const char __user *); |
14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | 14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); |
15 | extern int futex_atomic_cmpxchg_std(int __user *, int, int); | 15 | extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); |
16 | extern int futex_atomic_op_std(int, int __user *, int, int *); | 16 | extern int futex_atomic_op_std(int, u32 __user *, int, int *); |
17 | 17 | ||
18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); | 18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); |
19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); | 19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); |
20 | extern int futex_atomic_op_pt(int, int __user *, int, int *); | 20 | extern int futex_atomic_op_pt(int, u32 __user *, int, int *); |
21 | extern int futex_atomic_cmpxchg_pt(int __user *, int, int); | 21 | extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); |
22 | 22 | ||
23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ | 23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 404f2de296dc..74833831417f 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -302,7 +302,7 @@ fault: | |||
302 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 302 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
303 | "m" (*uaddr) : "cc" ); | 303 | "m" (*uaddr) : "cc" ); |
304 | 304 | ||
305 | static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 305 | static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) |
306 | { | 306 | { |
307 | int oldval = 0, newval, ret; | 307 | int oldval = 0, newval, ret; |
308 | 308 | ||
@@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |||
335 | return ret; | 335 | return ret; |
336 | } | 336 | } |
337 | 337 | ||
338 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 338 | int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) |
339 | { | 339 | { |
340 | int ret; | 340 | int ret; |
341 | 341 | ||
@@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |||
354 | return ret; | 354 | return ret; |
355 | } | 355 | } |
356 | 356 | ||
357 | static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 357 | static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, |
358 | u32 oldval, u32 newval) | ||
358 | { | 359 | { |
359 | int ret; | 360 | int ret; |
360 | 361 | ||
361 | asm volatile("0: cs %1,%4,0(%5)\n" | 362 | asm volatile("0: cs %1,%4,0(%5)\n" |
362 | "1: lr %0,%1\n" | 363 | "1: la %0,0\n" |
363 | "2:\n" | 364 | "2:\n" |
364 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 365 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
365 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 366 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
366 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 367 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
367 | : "cc", "memory" ); | 368 | : "cc", "memory" ); |
369 | *uval = oldval; | ||
368 | return ret; | 370 | return ret; |
369 | } | 371 | } |
370 | 372 | ||
371 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 373 | int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, |
374 | u32 oldval, u32 newval) | ||
372 | { | 375 | { |
373 | int ret; | 376 | int ret; |
374 | 377 | ||
375 | if (segment_eq(get_fs(), KERNEL_DS)) | 378 | if (segment_eq(get_fs(), KERNEL_DS)) |
376 | return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | 379 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
377 | spin_lock(¤t->mm->page_table_lock); | 380 | spin_lock(¤t->mm->page_table_lock); |
378 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 381 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); |
379 | if (!uaddr) { | 382 | if (!uaddr) { |
@@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | |||
382 | } | 385 | } |
383 | get_page(virt_to_page(uaddr)); | 386 | get_page(virt_to_page(uaddr)); |
384 | spin_unlock(¤t->mm->page_table_lock); | 387 | spin_unlock(¤t->mm->page_table_lock); |
385 | ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | 388 | ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
386 | put_page(virt_to_page(uaddr)); | 389 | put_page(virt_to_page(uaddr)); |
387 | return ret; | 390 | return ret; |
388 | } | 391 | } |
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index a6c4f7ed24a4..bb1a7eed42ce 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
@@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) | |||
255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
256 | "m" (*uaddr) : "cc"); | 256 | "m" (*uaddr) : "cc"); |
257 | 257 | ||
258 | int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) | 258 | int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) |
259 | { | 259 | { |
260 | int oldval = 0, newval, ret; | 260 | int oldval = 0, newval, ret; |
261 | 261 | ||
@@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) | |||
287 | return ret; | 287 | return ret; |
288 | } | 288 | } |
289 | 289 | ||
290 | int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) | 290 | int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, |
291 | u32 oldval, u32 newval) | ||
291 | { | 292 | { |
292 | int ret; | 293 | int ret; |
293 | 294 | ||
294 | asm volatile( | 295 | asm volatile( |
295 | " sacf 256\n" | 296 | " sacf 256\n" |
296 | "0: cs %1,%4,0(%5)\n" | 297 | "0: cs %1,%4,0(%5)\n" |
297 | "1: lr %0,%1\n" | 298 | "1: la %0,0\n" |
298 | "2: sacf 0\n" | 299 | "2: sacf 0\n" |
299 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 300 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
300 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 301 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
301 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 302 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
302 | : "cc", "memory" ); | 303 | : "cc", "memory" ); |
304 | *uval = oldval; | ||
303 | return ret; | 305 | return ret; |
304 | } | 306 | } |
305 | 307 | ||
diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h index a9f16a7f9aea..6cb9f193a95e 100644 --- a/arch/sh/include/asm/futex-irq.h +++ b/arch/sh/include/asm/futex-irq.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | 5 | ||
6 | static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, | 6 | static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, |
7 | int *oldval) | 7 | int *oldval) |
8 | { | 8 | { |
9 | unsigned long flags; | 9 | unsigned long flags; |
@@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, | |||
20 | return ret; | 20 | return ret; |
21 | } | 21 | } |
22 | 22 | ||
23 | static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, | 23 | static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, |
24 | int *oldval) | 24 | int *oldval) |
25 | { | 25 | { |
26 | unsigned long flags; | 26 | unsigned long flags; |
@@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, | |||
37 | return ret; | 37 | return ret; |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, | 40 | static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, |
41 | int *oldval) | 41 | int *oldval) |
42 | { | 42 | { |
43 | unsigned long flags; | 43 | unsigned long flags; |
@@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, | |||
54 | return ret; | 54 | return ret; |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, | 57 | static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, |
58 | int *oldval) | 58 | int *oldval) |
59 | { | 59 | { |
60 | unsigned long flags; | 60 | unsigned long flags; |
@@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, | |||
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, | 74 | static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, |
75 | int *oldval) | 75 | int *oldval) |
76 | { | 76 | { |
77 | unsigned long flags; | 77 | unsigned long flags; |
@@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, | |||
88 | return ret; | 88 | return ret; |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, | 91 | static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, |
92 | int oldval, int newval) | 92 | u32 __user *uaddr, |
93 | u32 oldval, u32 newval) | ||
93 | { | 94 | { |
94 | unsigned long flags; | 95 | unsigned long flags; |
95 | int ret, prev = 0; | 96 | int ret; |
97 | u32 prev = 0; | ||
96 | 98 | ||
97 | local_irq_save(flags); | 99 | local_irq_save(flags); |
98 | 100 | ||
@@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, | |||
102 | 104 | ||
103 | local_irq_restore(flags); | 105 | local_irq_restore(flags); |
104 | 106 | ||
105 | if (ret) | 107 | *uval = prev; |
106 | return ret; | 108 | return ret; |
107 | |||
108 | return prev; | ||
109 | } | 109 | } |
110 | 110 | ||
111 | #endif /* __ASM_SH_FUTEX_IRQ_H */ | 111 | #endif /* __ASM_SH_FUTEX_IRQ_H */ |
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index 68256ec5fa35..7be39a646fbd 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h | |||
@@ -10,7 +10,7 @@ | |||
10 | /* XXX: UP variants, fix for SH-4A and SMP.. */ | 10 | /* XXX: UP variants, fix for SH-4A and SMP.. */ |
11 | #include <asm/futex-irq.h> | 11 | #include <asm/futex-irq.h> |
12 | 12 | ||
13 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 13 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
14 | { | 14 | { |
15 | int op = (encoded_op >> 28) & 7; | 15 | int op = (encoded_op >> 28) & 7; |
16 | int cmp = (encoded_op >> 24) & 15; | 16 | int cmp = (encoded_op >> 24) & 15; |
@@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
21 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 21 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
22 | oparg = 1 << oparg; | 22 | oparg = 1 << oparg; |
23 | 23 | ||
24 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 24 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
25 | return -EFAULT; | 25 | return -EFAULT; |
26 | 26 | ||
27 | pagefault_disable(); | 27 | pagefault_disable(); |
@@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | static inline int | 67 | static inline int |
68 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 68 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
69 | u32 oldval, u32 newval) | ||
69 | { | 70 | { |
70 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 71 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
71 | return -EFAULT; | 72 | return -EFAULT; |
72 | 73 | ||
73 | return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval); | 74 | return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); |
74 | } | 75 | } |
75 | 76 | ||
76 | #endif /* __KERNEL__ */ | 77 | #endif /* __KERNEL__ */ |
diff --git a/arch/sh/include/asm/ioctls.h b/arch/sh/include/asm/ioctls.h index 84e85a792638..a6769f352bf6 100644 --- a/arch/sh/include/asm/ioctls.h +++ b/arch/sh/include/asm/ioctls.h | |||
@@ -87,6 +87,7 @@ | |||
87 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 87 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
88 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | 88 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ |
89 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 89 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
90 | #define TIOCVHANGUP _IO('T', 0x37) | ||
90 | 91 | ||
91 | #define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ | 92 | #define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ |
92 | #define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ | 93 | #define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ |
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h index 06e2251a5e48..edab57265293 100644 --- a/arch/sh/include/asm/rwsem.h +++ b/arch/sh/include/asm/rwsem.h | |||
@@ -11,64 +11,13 @@ | |||
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | #include <linux/list.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <asm/atomic.h> | ||
17 | #include <asm/system.h> | ||
18 | 14 | ||
19 | /* | ||
20 | * the semaphore definition | ||
21 | */ | ||
22 | struct rw_semaphore { | ||
23 | long count; | ||
24 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 15 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
25 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 16 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
26 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 17 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
27 | #define RWSEM_WAITING_BIAS (-0x00010000) | 18 | #define RWSEM_WAITING_BIAS (-0x00010000) |
28 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 19 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
29 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 20 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
30 | spinlock_t wait_lock; | ||
31 | struct list_head wait_list; | ||
32 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
33 | struct lockdep_map dep_map; | ||
34 | #endif | ||
35 | }; | ||
36 | |||
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
38 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
39 | #else | ||
40 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
41 | #endif | ||
42 | |||
43 | #define __RWSEM_INITIALIZER(name) \ | ||
44 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
45 | LIST_HEAD_INIT((name).wait_list) \ | ||
46 | __RWSEM_DEP_MAP_INIT(name) } | ||
47 | |||
48 | #define DECLARE_RWSEM(name) \ | ||
49 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
50 | |||
51 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
52 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
53 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
54 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
55 | |||
56 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
57 | struct lock_class_key *key); | ||
58 | |||
59 | #define init_rwsem(sem) \ | ||
60 | do { \ | ||
61 | static struct lock_class_key __key; \ | ||
62 | \ | ||
63 | __init_rwsem((sem), #sem, &__key); \ | ||
64 | } while (0) | ||
65 | |||
66 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
67 | { | ||
68 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
69 | spin_lock_init(&sem->wait_lock); | ||
70 | INIT_LIST_HEAD(&sem->wait_list); | ||
71 | } | ||
72 | 21 | ||
73 | /* | 22 | /* |
74 | * lock for reading | 23 | * lock for reading |
@@ -179,10 +128,5 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | |||
179 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | 128 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); |
180 | } | 129 | } |
181 | 130 | ||
182 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
183 | { | ||
184 | return (sem->count != 0); | ||
185 | } | ||
186 | |||
187 | #endif /* __KERNEL__ */ | 131 | #endif /* __KERNEL__ */ |
188 | #endif /* _ASM_SH_RWSEM_H */ | 132 | #endif /* _ASM_SH_RWSEM_H */ |
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index f739061e2ee4..0f325da0f923 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h | |||
@@ -1,11 +1,21 @@ | |||
1 | #ifndef __ASM_SH_ETH_H__ | 1 | #ifndef __ASM_SH_ETH_H__ |
2 | #define __ASM_SH_ETH_H__ | 2 | #define __ASM_SH_ETH_H__ |
3 | 3 | ||
4 | #include <linux/phy.h> | ||
5 | |||
4 | enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; | 6 | enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; |
7 | enum { | ||
8 | SH_ETH_REG_GIGABIT, | ||
9 | SH_ETH_REG_FAST_SH4, | ||
10 | SH_ETH_REG_FAST_SH3_SH2 | ||
11 | }; | ||
5 | 12 | ||
6 | struct sh_eth_plat_data { | 13 | struct sh_eth_plat_data { |
7 | int phy; | 14 | int phy; |
8 | int edmac_endian; | 15 | int edmac_endian; |
16 | int register_type; | ||
17 | phy_interface_t phy_interface; | ||
18 | void (*set_mdio_gate)(unsigned long addr); | ||
9 | 19 | ||
10 | unsigned char mac_addr[6]; | 20 | unsigned char mac_addr[6]; |
11 | unsigned no_ether_link:1; | 21 | unsigned no_ether_link:1; |
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 7f8a709c3ada..af4d46187a79 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S | |||
@@ -66,7 +66,7 @@ SECTIONS | |||
66 | __machvec_end = .; | 66 | __machvec_end = .; |
67 | } | 67 | } |
68 | 68 | ||
69 | PERCPU(PAGE_SIZE) | 69 | PERCPU(L1_CACHE_BYTES, PAGE_SIZE) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * .exit.text is discarded at runtime, not link time, to deal with | 72 | * .exit.text is discarded at runtime, not link time, to deal with |
diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h index 38f37b333cc7..d0b83f66f356 100644 --- a/arch/sparc/include/asm/fcntl.h +++ b/arch/sparc/include/asm/fcntl.h | |||
@@ -34,6 +34,8 @@ | |||
34 | #define __O_SYNC 0x800000 | 34 | #define __O_SYNC 0x800000 |
35 | #define O_SYNC (__O_SYNC|O_DSYNC) | 35 | #define O_SYNC (__O_SYNC|O_DSYNC) |
36 | 36 | ||
37 | #define O_PATH 0x1000000 | ||
38 | |||
37 | #define F_GETOWN 5 /* for sockets. */ | 39 | #define F_GETOWN 5 /* for sockets. */ |
38 | #define F_SETOWN 6 /* for sockets. */ | 40 | #define F_SETOWN 6 /* for sockets. */ |
39 | #define F_GETLK 7 | 41 | #define F_GETLK 7 |
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index 47f95839dc69..444e7bea23bc 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h | |||
@@ -30,7 +30,7 @@ | |||
30 | : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ | 30 | : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ |
31 | : "memory") | 31 | : "memory") |
32 | 32 | ||
33 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 33 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
34 | { | 34 | { |
35 | int op = (encoded_op >> 28) & 7; | 35 | int op = (encoded_op >> 28) & 7; |
36 | int cmp = (encoded_op >> 24) & 15; | 36 | int cmp = (encoded_op >> 24) & 15; |
@@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
38 | int cmparg = (encoded_op << 20) >> 20; | 38 | int cmparg = (encoded_op << 20) >> 20; |
39 | int oldval = 0, ret, tem; | 39 | int oldval = 0, ret, tem; |
40 | 40 | ||
41 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) | 41 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) |
42 | return -EFAULT; | 42 | return -EFAULT; |
43 | if (unlikely((((unsigned long) uaddr) & 0x3UL))) | 43 | if (unlikely((((unsigned long) uaddr) & 0x3UL))) |
44 | return -EINVAL; | 44 | return -EINVAL; |
@@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | static inline int | 87 | static inline int |
88 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 88 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
89 | u32 oldval, u32 newval) | ||
89 | { | 90 | { |
91 | int ret = 0; | ||
92 | |||
90 | __asm__ __volatile__( | 93 | __asm__ __volatile__( |
91 | "\n1: casa [%3] %%asi, %2, %0\n" | 94 | "\n1: casa [%4] %%asi, %3, %1\n" |
92 | "2:\n" | 95 | "2:\n" |
93 | " .section .fixup,#alloc,#execinstr\n" | 96 | " .section .fixup,#alloc,#execinstr\n" |
94 | " .align 4\n" | 97 | " .align 4\n" |
95 | "3: sethi %%hi(2b), %0\n" | 98 | "3: sethi %%hi(2b), %0\n" |
96 | " jmpl %0 + %%lo(2b), %%g0\n" | 99 | " jmpl %0 + %%lo(2b), %%g0\n" |
97 | " mov %4, %0\n" | 100 | " mov %5, %0\n" |
98 | " .previous\n" | 101 | " .previous\n" |
99 | " .section __ex_table,\"a\"\n" | 102 | " .section __ex_table,\"a\"\n" |
100 | " .align 4\n" | 103 | " .align 4\n" |
101 | " .word 1b, 3b\n" | 104 | " .word 1b, 3b\n" |
102 | " .previous\n" | 105 | " .previous\n" |
103 | : "=r" (newval) | 106 | : "+r" (ret), "=r" (newval) |
104 | : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) | 107 | : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) |
105 | : "memory"); | 108 | : "memory"); |
106 | 109 | ||
107 | return newval; | 110 | *uval = newval; |
111 | return ret; | ||
108 | } | 112 | } |
109 | 113 | ||
110 | #endif /* !(_SPARC64_FUTEX_H) */ | 114 | #endif /* !(_SPARC64_FUTEX_H) */ |
diff --git a/arch/sparc/include/asm/ioctls.h b/arch/sparc/include/asm/ioctls.h index ed3807b96bb5..28d0c8b02cc3 100644 --- a/arch/sparc/include/asm/ioctls.h +++ b/arch/sparc/include/asm/ioctls.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define TCSETSW2 _IOW('T', 14, struct termios2) | 20 | #define TCSETSW2 _IOW('T', 14, struct termios2) |
21 | #define TCSETSF2 _IOW('T', 15, struct termios2) | 21 | #define TCSETSF2 _IOW('T', 15, struct termios2) |
22 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | 22 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ |
23 | #define TIOCVHANGUP _IO('T', 0x37) | ||
23 | 24 | ||
24 | /* Note that all the ioctls that are not available in Linux have a | 25 | /* Note that all the ioctls that are not available in Linux have a |
25 | * double underscore on the front to: a) avoid some programs to | 26 | * double underscore on the front to: a) avoid some programs to |
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index aa4c82648d88..cb33608cc68f 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h | |||
@@ -103,7 +103,7 @@ static inline unsigned int get_dma_residue(unsigned int dmanr) | |||
103 | return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); | 103 | return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); |
104 | } | 104 | } |
105 | 105 | ||
106 | static int __devinit ecpp_probe(struct platform_device *op, const struct of_device_id *match) | 106 | static int __devinit ecpp_probe(struct platform_device *op) |
107 | { | 107 | { |
108 | unsigned long base = op->resource[0].start; | 108 | unsigned long base = op->resource[0].start; |
109 | unsigned long config = op->resource[1].start; | 109 | unsigned long config = op->resource[1].start; |
@@ -235,7 +235,7 @@ static const struct of_device_id ecpp_match[] = { | |||
235 | {}, | 235 | {}, |
236 | }; | 236 | }; |
237 | 237 | ||
238 | static struct of_platform_driver ecpp_driver = { | 238 | static struct platform_driver ecpp_driver = { |
239 | .driver = { | 239 | .driver = { |
240 | .name = "ecpp", | 240 | .name = "ecpp", |
241 | .owner = THIS_MODULE, | 241 | .owner = THIS_MODULE, |
@@ -247,7 +247,7 @@ static struct of_platform_driver ecpp_driver = { | |||
247 | 247 | ||
248 | static int parport_pc_find_nonpci_ports(int autoirq, int autodma) | 248 | static int parport_pc_find_nonpci_ports(int autoirq, int autodma) |
249 | { | 249 | { |
250 | return of_register_platform_driver(&ecpp_driver); | 250 | return platform_driver_register(&ecpp_driver); |
251 | } | 251 | } |
252 | 252 | ||
253 | #endif /* !(_ASM_SPARC64_PARPORT_H */ | 253 | #endif /* !(_ASM_SPARC64_PARPORT_H */ |
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h index a2b4302869bc..069bf4d663a1 100644 --- a/arch/sparc/include/asm/rwsem.h +++ b/arch/sparc/include/asm/rwsem.h | |||
@@ -13,53 +13,12 @@ | |||
13 | 13 | ||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | 15 | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | struct rwsem_waiter; | ||
20 | |||
21 | struct rw_semaphore { | ||
22 | signed long count; | ||
23 | #define RWSEM_UNLOCKED_VALUE 0x00000000L | 16 | #define RWSEM_UNLOCKED_VALUE 0x00000000L |
24 | #define RWSEM_ACTIVE_BIAS 0x00000001L | 17 | #define RWSEM_ACTIVE_BIAS 0x00000001L |
25 | #define RWSEM_ACTIVE_MASK 0xffffffffL | 18 | #define RWSEM_ACTIVE_MASK 0xffffffffL |
26 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | 19 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) |
27 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 20 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
28 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 21 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
29 | spinlock_t wait_lock; | ||
30 | struct list_head wait_list; | ||
31 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
32 | struct lockdep_map dep_map; | ||
33 | #endif | ||
34 | }; | ||
35 | |||
36 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
37 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
38 | #else | ||
39 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
40 | #endif | ||
41 | |||
42 | #define __RWSEM_INITIALIZER(name) \ | ||
43 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
44 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | ||
45 | |||
46 | #define DECLARE_RWSEM(name) \ | ||
47 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
48 | |||
49 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
50 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
51 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
52 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
53 | |||
54 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
55 | struct lock_class_key *key); | ||
56 | |||
57 | #define init_rwsem(sem) \ | ||
58 | do { \ | ||
59 | static struct lock_class_key __key; \ | ||
60 | \ | ||
61 | __init_rwsem((sem), #sem, &__key); \ | ||
62 | } while (0) | ||
63 | 22 | ||
64 | /* | 23 | /* |
65 | * lock for reading | 24 | * lock for reading |
@@ -160,11 +119,6 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
160 | return atomic64_add_return(delta, (atomic64_t *)(&sem->count)); | 119 | return atomic64_add_return(delta, (atomic64_t *)(&sem->count)); |
161 | } | 120 | } |
162 | 121 | ||
163 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
164 | { | ||
165 | return (sem->count != 0); | ||
166 | } | ||
167 | |||
168 | #endif /* __KERNEL__ */ | 122 | #endif /* __KERNEL__ */ |
169 | 123 | ||
170 | #endif /* _SPARC64_RWSEM_H */ | 124 | #endif /* _SPARC64_RWSEM_H */ |
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 52de4a9424e8..f679c57644d5 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c | |||
@@ -137,8 +137,7 @@ static const struct file_operations apc_fops = { | |||
137 | 137 | ||
138 | static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; | 138 | static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; |
139 | 139 | ||
140 | static int __devinit apc_probe(struct platform_device *op, | 140 | static int __devinit apc_probe(struct platform_device *op) |
141 | const struct of_device_id *match) | ||
142 | { | 141 | { |
143 | int err; | 142 | int err; |
144 | 143 | ||
@@ -174,7 +173,7 @@ static struct of_device_id __initdata apc_match[] = { | |||
174 | }; | 173 | }; |
175 | MODULE_DEVICE_TABLE(of, apc_match); | 174 | MODULE_DEVICE_TABLE(of, apc_match); |
176 | 175 | ||
177 | static struct of_platform_driver apc_driver = { | 176 | static struct platform_driver apc_driver = { |
178 | .driver = { | 177 | .driver = { |
179 | .name = "apc", | 178 | .name = "apc", |
180 | .owner = THIS_MODULE, | 179 | .owner = THIS_MODULE, |
@@ -185,7 +184,7 @@ static struct of_platform_driver apc_driver = { | |||
185 | 184 | ||
186 | static int __init apc_init(void) | 185 | static int __init apc_init(void) |
187 | { | 186 | { |
188 | return of_register_platform_driver(&apc_driver); | 187 | return platform_driver_register(&apc_driver); |
189 | } | 188 | } |
190 | 189 | ||
191 | /* This driver is not critical to the boot process | 190 | /* This driver is not critical to the boot process |
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 3efd3c5af6a9..2abace076c7d 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c | |||
@@ -102,8 +102,7 @@ static struct of_device_id __initdata auxio_match[] = { | |||
102 | 102 | ||
103 | MODULE_DEVICE_TABLE(of, auxio_match); | 103 | MODULE_DEVICE_TABLE(of, auxio_match); |
104 | 104 | ||
105 | static int __devinit auxio_probe(struct platform_device *dev, | 105 | static int __devinit auxio_probe(struct platform_device *dev) |
106 | const struct of_device_id *match) | ||
107 | { | 106 | { |
108 | struct device_node *dp = dev->dev.of_node; | 107 | struct device_node *dp = dev->dev.of_node; |
109 | unsigned long size; | 108 | unsigned long size; |
@@ -132,7 +131,7 @@ static int __devinit auxio_probe(struct platform_device *dev, | |||
132 | return 0; | 131 | return 0; |
133 | } | 132 | } |
134 | 133 | ||
135 | static struct of_platform_driver auxio_driver = { | 134 | static struct platform_driver auxio_driver = { |
136 | .probe = auxio_probe, | 135 | .probe = auxio_probe, |
137 | .driver = { | 136 | .driver = { |
138 | .name = "auxio", | 137 | .name = "auxio", |
@@ -143,7 +142,7 @@ static struct of_platform_driver auxio_driver = { | |||
143 | 142 | ||
144 | static int __init auxio_init(void) | 143 | static int __init auxio_init(void) |
145 | { | 144 | { |
146 | return of_register_platform_driver(&auxio_driver); | 145 | return platform_driver_register(&auxio_driver); |
147 | } | 146 | } |
148 | 147 | ||
149 | /* Must be after subsys_initcall() so that busses are probed. Must | 148 | /* Must be after subsys_initcall() so that busses are probed. Must |
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index cfa2624c5332..136d3718a74a 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c | |||
@@ -59,8 +59,7 @@ static int __devinit clock_board_calc_nslots(struct clock_board *p) | |||
59 | } | 59 | } |
60 | } | 60 | } |
61 | 61 | ||
62 | static int __devinit clock_board_probe(struct platform_device *op, | 62 | static int __devinit clock_board_probe(struct platform_device *op) |
63 | const struct of_device_id *match) | ||
64 | { | 63 | { |
65 | struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); | 64 | struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); |
66 | int err = -ENOMEM; | 65 | int err = -ENOMEM; |
@@ -148,7 +147,7 @@ static struct of_device_id __initdata clock_board_match[] = { | |||
148 | {}, | 147 | {}, |
149 | }; | 148 | }; |
150 | 149 | ||
151 | static struct of_platform_driver clock_board_driver = { | 150 | static struct platform_driver clock_board_driver = { |
152 | .probe = clock_board_probe, | 151 | .probe = clock_board_probe, |
153 | .driver = { | 152 | .driver = { |
154 | .name = "clock_board", | 153 | .name = "clock_board", |
@@ -157,8 +156,7 @@ static struct of_platform_driver clock_board_driver = { | |||
157 | }, | 156 | }, |
158 | }; | 157 | }; |
159 | 158 | ||
160 | static int __devinit fhc_probe(struct platform_device *op, | 159 | static int __devinit fhc_probe(struct platform_device *op) |
161 | const struct of_device_id *match) | ||
162 | { | 160 | { |
163 | struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); | 161 | struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); |
164 | int err = -ENOMEM; | 162 | int err = -ENOMEM; |
@@ -254,7 +252,7 @@ static struct of_device_id __initdata fhc_match[] = { | |||
254 | {}, | 252 | {}, |
255 | }; | 253 | }; |
256 | 254 | ||
257 | static struct of_platform_driver fhc_driver = { | 255 | static struct platform_driver fhc_driver = { |
258 | .probe = fhc_probe, | 256 | .probe = fhc_probe, |
259 | .driver = { | 257 | .driver = { |
260 | .name = "fhc", | 258 | .name = "fhc", |
@@ -265,8 +263,8 @@ static struct of_platform_driver fhc_driver = { | |||
265 | 263 | ||
266 | static int __init sunfire_init(void) | 264 | static int __init sunfire_init(void) |
267 | { | 265 | { |
268 | (void) of_register_platform_driver(&fhc_driver); | 266 | (void) platform_driver_register(&fhc_driver); |
269 | (void) of_register_platform_driver(&clock_board_driver); | 267 | (void) platform_driver_register(&clock_board_driver); |
270 | return 0; | 268 | return 0; |
271 | } | 269 | } |
272 | 270 | ||
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 08c466ebb32b..668c7be5d365 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c | |||
@@ -392,8 +392,7 @@ static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, | |||
392 | } | 392 | } |
393 | } | 393 | } |
394 | 394 | ||
395 | static int __devinit jbusmc_probe(struct platform_device *op, | 395 | static int __devinit jbusmc_probe(struct platform_device *op) |
396 | const struct of_device_id *match) | ||
397 | { | 396 | { |
398 | const struct linux_prom64_registers *mem_regs; | 397 | const struct linux_prom64_registers *mem_regs; |
399 | struct device_node *mem_node; | 398 | struct device_node *mem_node; |
@@ -690,8 +689,7 @@ static void chmc_fetch_decode_regs(struct chmc *p) | |||
690 | chmc_read_mcreg(p, CHMCTRL_DECODE4)); | 689 | chmc_read_mcreg(p, CHMCTRL_DECODE4)); |
691 | } | 690 | } |
692 | 691 | ||
693 | static int __devinit chmc_probe(struct platform_device *op, | 692 | static int __devinit chmc_probe(struct platform_device *op) |
694 | const struct of_device_id *match) | ||
695 | { | 693 | { |
696 | struct device_node *dp = op->dev.of_node; | 694 | struct device_node *dp = op->dev.of_node; |
697 | unsigned long ver; | 695 | unsigned long ver; |
@@ -765,13 +763,12 @@ out_free: | |||
765 | goto out; | 763 | goto out; |
766 | } | 764 | } |
767 | 765 | ||
768 | static int __devinit us3mc_probe(struct platform_device *op, | 766 | static int __devinit us3mc_probe(struct platform_device *op) |
769 | const struct of_device_id *match) | ||
770 | { | 767 | { |
771 | if (mc_type == MC_TYPE_SAFARI) | 768 | if (mc_type == MC_TYPE_SAFARI) |
772 | return chmc_probe(op, match); | 769 | return chmc_probe(op); |
773 | else if (mc_type == MC_TYPE_JBUS) | 770 | else if (mc_type == MC_TYPE_JBUS) |
774 | return jbusmc_probe(op, match); | 771 | return jbusmc_probe(op); |
775 | return -ENODEV; | 772 | return -ENODEV; |
776 | } | 773 | } |
777 | 774 | ||
@@ -810,7 +807,7 @@ static const struct of_device_id us3mc_match[] = { | |||
810 | }; | 807 | }; |
811 | MODULE_DEVICE_TABLE(of, us3mc_match); | 808 | MODULE_DEVICE_TABLE(of, us3mc_match); |
812 | 809 | ||
813 | static struct of_platform_driver us3mc_driver = { | 810 | static struct platform_driver us3mc_driver = { |
814 | .driver = { | 811 | .driver = { |
815 | .name = "us3mc", | 812 | .name = "us3mc", |
816 | .owner = THIS_MODULE, | 813 | .owner = THIS_MODULE, |
@@ -848,7 +845,7 @@ static int __init us3mc_init(void) | |||
848 | ret = register_dimm_printer(us3mc_dimm_printer); | 845 | ret = register_dimm_printer(us3mc_dimm_printer); |
849 | 846 | ||
850 | if (!ret) { | 847 | if (!ret) { |
851 | ret = of_register_platform_driver(&us3mc_driver); | 848 | ret = platform_driver_register(&us3mc_driver); |
852 | if (ret) | 849 | if (ret) |
853 | unregister_dimm_printer(us3mc_dimm_printer); | 850 | unregister_dimm_printer(us3mc_dimm_printer); |
854 | } | 851 | } |
@@ -859,7 +856,7 @@ static void __exit us3mc_cleanup(void) | |||
859 | { | 856 | { |
860 | if (us3mc_platform()) { | 857 | if (us3mc_platform()) { |
861 | unregister_dimm_printer(us3mc_dimm_printer); | 858 | unregister_dimm_printer(us3mc_dimm_printer); |
862 | of_unregister_platform_driver(&us3mc_driver); | 859 | platform_driver_unregister(&us3mc_driver); |
863 | } | 860 | } |
864 | } | 861 | } |
865 | 862 | ||
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index efb896d68754..be5e2441c6d7 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c | |||
@@ -455,8 +455,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm, | |||
455 | return 0; | 455 | return 0; |
456 | } | 456 | } |
457 | 457 | ||
458 | static int __devinit fire_probe(struct platform_device *op, | 458 | static int __devinit fire_probe(struct platform_device *op) |
459 | const struct of_device_id *match) | ||
460 | { | 459 | { |
461 | struct device_node *dp = op->dev.of_node; | 460 | struct device_node *dp = op->dev.of_node; |
462 | struct pci_pbm_info *pbm; | 461 | struct pci_pbm_info *pbm; |
@@ -507,7 +506,7 @@ static struct of_device_id __initdata fire_match[] = { | |||
507 | {}, | 506 | {}, |
508 | }; | 507 | }; |
509 | 508 | ||
510 | static struct of_platform_driver fire_driver = { | 509 | static struct platform_driver fire_driver = { |
511 | .driver = { | 510 | .driver = { |
512 | .name = DRIVER_NAME, | 511 | .name = DRIVER_NAME, |
513 | .owner = THIS_MODULE, | 512 | .owner = THIS_MODULE, |
@@ -518,7 +517,7 @@ static struct of_platform_driver fire_driver = { | |||
518 | 517 | ||
519 | static int __init fire_init(void) | 518 | static int __init fire_init(void) |
520 | { | 519 | { |
521 | return of_register_platform_driver(&fire_driver); | 520 | return platform_driver_register(&fire_driver); |
522 | } | 521 | } |
523 | 522 | ||
524 | subsys_initcall(fire_init); | 523 | subsys_initcall(fire_init); |
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c index 22eab7cf3b11..56ee745064de 100644 --- a/arch/sparc/kernel/pci_psycho.c +++ b/arch/sparc/kernel/pci_psycho.c | |||
@@ -503,8 +503,7 @@ static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid) | |||
503 | 503 | ||
504 | #define PSYCHO_CONFIGSPACE 0x001000000UL | 504 | #define PSYCHO_CONFIGSPACE 0x001000000UL |
505 | 505 | ||
506 | static int __devinit psycho_probe(struct platform_device *op, | 506 | static int __devinit psycho_probe(struct platform_device *op) |
507 | const struct of_device_id *match) | ||
508 | { | 507 | { |
509 | const struct linux_prom64_registers *pr_regs; | 508 | const struct linux_prom64_registers *pr_regs; |
510 | struct device_node *dp = op->dev.of_node; | 509 | struct device_node *dp = op->dev.of_node; |
@@ -601,7 +600,7 @@ static struct of_device_id __initdata psycho_match[] = { | |||
601 | {}, | 600 | {}, |
602 | }; | 601 | }; |
603 | 602 | ||
604 | static struct of_platform_driver psycho_driver = { | 603 | static struct platform_driver psycho_driver = { |
605 | .driver = { | 604 | .driver = { |
606 | .name = DRIVER_NAME, | 605 | .name = DRIVER_NAME, |
607 | .owner = THIS_MODULE, | 606 | .owner = THIS_MODULE, |
@@ -612,7 +611,7 @@ static struct of_platform_driver psycho_driver = { | |||
612 | 611 | ||
613 | static int __init psycho_init(void) | 612 | static int __init psycho_init(void) |
614 | { | 613 | { |
615 | return of_register_platform_driver(&psycho_driver); | 614 | return platform_driver_register(&psycho_driver); |
616 | } | 615 | } |
617 | 616 | ||
618 | subsys_initcall(psycho_init); | 617 | subsys_initcall(psycho_init); |
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 5c3f5ec4cabc..2857073342d2 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c | |||
@@ -452,8 +452,7 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, | |||
452 | sabre_scan_bus(pbm, &op->dev); | 452 | sabre_scan_bus(pbm, &op->dev); |
453 | } | 453 | } |
454 | 454 | ||
455 | static int __devinit sabre_probe(struct platform_device *op, | 455 | static int __devinit sabre_probe(struct platform_device *op) |
456 | const struct of_device_id *match) | ||
457 | { | 456 | { |
458 | const struct linux_prom64_registers *pr_regs; | 457 | const struct linux_prom64_registers *pr_regs; |
459 | struct device_node *dp = op->dev.of_node; | 458 | struct device_node *dp = op->dev.of_node; |
@@ -464,7 +463,7 @@ static int __devinit sabre_probe(struct platform_device *op, | |||
464 | const u32 *vdma; | 463 | const u32 *vdma; |
465 | u64 clear_irq; | 464 | u64 clear_irq; |
466 | 465 | ||
467 | hummingbird_p = (match->data != NULL); | 466 | hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); |
468 | if (!hummingbird_p) { | 467 | if (!hummingbird_p) { |
469 | struct device_node *cpu_dp; | 468 | struct device_node *cpu_dp; |
470 | 469 | ||
@@ -595,7 +594,7 @@ static struct of_device_id __initdata sabre_match[] = { | |||
595 | {}, | 594 | {}, |
596 | }; | 595 | }; |
597 | 596 | ||
598 | static struct of_platform_driver sabre_driver = { | 597 | static struct platform_driver sabre_driver = { |
599 | .driver = { | 598 | .driver = { |
600 | .name = DRIVER_NAME, | 599 | .name = DRIVER_NAME, |
601 | .owner = THIS_MODULE, | 600 | .owner = THIS_MODULE, |
@@ -606,7 +605,7 @@ static struct of_platform_driver sabre_driver = { | |||
606 | 605 | ||
607 | static int __init sabre_init(void) | 606 | static int __init sabre_init(void) |
608 | { | 607 | { |
609 | return of_register_platform_driver(&sabre_driver); | 608 | return platform_driver_register(&sabre_driver); |
610 | } | 609 | } |
611 | 610 | ||
612 | subsys_initcall(sabre_init); | 611 | subsys_initcall(sabre_init); |
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 445a47a2fb3d..6783410ceb02 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c | |||
@@ -1460,10 +1460,11 @@ out_err: | |||
1460 | return err; | 1460 | return err; |
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | static int __devinit schizo_probe(struct platform_device *op, | 1463 | static int __devinit schizo_probe(struct platform_device *op) |
1464 | const struct of_device_id *match) | ||
1465 | { | 1464 | { |
1466 | return __schizo_init(op, (unsigned long) match->data); | 1465 | if (!op->dev.of_match) |
1466 | return -EINVAL; | ||
1467 | return __schizo_init(op, (unsigned long) op->dev.of_match->data); | ||
1467 | } | 1468 | } |
1468 | 1469 | ||
1469 | /* The ordering of this table is very important. Some Tomatillo | 1470 | /* The ordering of this table is very important. Some Tomatillo |
@@ -1490,7 +1491,7 @@ static struct of_device_id __initdata schizo_match[] = { | |||
1490 | {}, | 1491 | {}, |
1491 | }; | 1492 | }; |
1492 | 1493 | ||
1493 | static struct of_platform_driver schizo_driver = { | 1494 | static struct platform_driver schizo_driver = { |
1494 | .driver = { | 1495 | .driver = { |
1495 | .name = DRIVER_NAME, | 1496 | .name = DRIVER_NAME, |
1496 | .owner = THIS_MODULE, | 1497 | .owner = THIS_MODULE, |
@@ -1501,7 +1502,7 @@ static struct of_platform_driver schizo_driver = { | |||
1501 | 1502 | ||
1502 | static int __init schizo_init(void) | 1503 | static int __init schizo_init(void) |
1503 | { | 1504 | { |
1504 | return of_register_platform_driver(&schizo_driver); | 1505 | return platform_driver_register(&schizo_driver); |
1505 | } | 1506 | } |
1506 | 1507 | ||
1507 | subsys_initcall(schizo_init); | 1508 | subsys_initcall(schizo_init); |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 743344aa6d8a..158cd739b263 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -918,8 +918,7 @@ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, | |||
918 | return 0; | 918 | return 0; |
919 | } | 919 | } |
920 | 920 | ||
921 | static int __devinit pci_sun4v_probe(struct platform_device *op, | 921 | static int __devinit pci_sun4v_probe(struct platform_device *op) |
922 | const struct of_device_id *match) | ||
923 | { | 922 | { |
924 | const struct linux_prom64_registers *regs; | 923 | const struct linux_prom64_registers *regs; |
925 | static int hvapi_negotiated = 0; | 924 | static int hvapi_negotiated = 0; |
@@ -1008,7 +1007,7 @@ static struct of_device_id __initdata pci_sun4v_match[] = { | |||
1008 | {}, | 1007 | {}, |
1009 | }; | 1008 | }; |
1010 | 1009 | ||
1011 | static struct of_platform_driver pci_sun4v_driver = { | 1010 | static struct platform_driver pci_sun4v_driver = { |
1012 | .driver = { | 1011 | .driver = { |
1013 | .name = DRIVER_NAME, | 1012 | .name = DRIVER_NAME, |
1014 | .owner = THIS_MODULE, | 1013 | .owner = THIS_MODULE, |
@@ -1019,7 +1018,7 @@ static struct of_platform_driver pci_sun4v_driver = { | |||
1019 | 1018 | ||
1020 | static int __init pci_sun4v_init(void) | 1019 | static int __init pci_sun4v_init(void) |
1021 | { | 1020 | { |
1022 | return of_register_platform_driver(&pci_sun4v_driver); | 1021 | return platform_driver_register(&pci_sun4v_driver); |
1023 | } | 1022 | } |
1024 | 1023 | ||
1025 | subsys_initcall(pci_sun4v_init); | 1024 | subsys_initcall(pci_sun4v_init); |
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index aeaa09a3c655..2cdc131b50ac 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c | |||
@@ -700,10 +700,8 @@ static void pcic_clear_clock_irq(void) | |||
700 | 700 | ||
701 | static irqreturn_t pcic_timer_handler (int irq, void *h) | 701 | static irqreturn_t pcic_timer_handler (int irq, void *h) |
702 | { | 702 | { |
703 | write_seqlock(&xtime_lock); /* Dummy, to show that we remember */ | ||
704 | pcic_clear_clock_irq(); | 703 | pcic_clear_clock_irq(); |
705 | do_timer(1); | 704 | xtime_update(1); |
706 | write_sequnlock(&xtime_lock); | ||
707 | #ifndef CONFIG_SMP | 705 | #ifndef CONFIG_SMP |
708 | update_process_times(user_mode(get_irq_regs())); | 706 | update_process_times(user_mode(get_irq_regs())); |
709 | #endif | 707 | #endif |
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 94536a85f161..93d7b4465f8d 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c | |||
@@ -51,8 +51,7 @@ static void pmc_swift_idle(void) | |||
51 | #endif | 51 | #endif |
52 | } | 52 | } |
53 | 53 | ||
54 | static int __devinit pmc_probe(struct platform_device *op, | 54 | static int __devinit pmc_probe(struct platform_device *op) |
55 | const struct of_device_id *match) | ||
56 | { | 55 | { |
57 | regs = of_ioremap(&op->resource[0], 0, | 56 | regs = of_ioremap(&op->resource[0], 0, |
58 | resource_size(&op->resource[0]), PMC_OBPNAME); | 57 | resource_size(&op->resource[0]), PMC_OBPNAME); |
@@ -78,7 +77,7 @@ static struct of_device_id __initdata pmc_match[] = { | |||
78 | }; | 77 | }; |
79 | MODULE_DEVICE_TABLE(of, pmc_match); | 78 | MODULE_DEVICE_TABLE(of, pmc_match); |
80 | 79 | ||
81 | static struct of_platform_driver pmc_driver = { | 80 | static struct platform_driver pmc_driver = { |
82 | .driver = { | 81 | .driver = { |
83 | .name = "pmc", | 82 | .name = "pmc", |
84 | .owner = THIS_MODULE, | 83 | .owner = THIS_MODULE, |
@@ -89,7 +88,7 @@ static struct of_platform_driver pmc_driver = { | |||
89 | 88 | ||
90 | static int __init pmc_init(void) | 89 | static int __init pmc_init(void) |
91 | { | 90 | { |
92 | return of_register_platform_driver(&pmc_driver); | 91 | return platform_driver_register(&pmc_driver); |
93 | } | 92 | } |
94 | 93 | ||
95 | /* This driver is not critical to the boot process | 94 | /* This driver is not critical to the boot process |
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index 2c59f4d387dd..cd725fe238b2 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c | |||
@@ -33,7 +33,7 @@ static int __devinit has_button_interrupt(unsigned int irq, struct device_node * | |||
33 | return 1; | 33 | return 1; |
34 | } | 34 | } |
35 | 35 | ||
36 | static int __devinit power_probe(struct platform_device *op, const struct of_device_id *match) | 36 | static int __devinit power_probe(struct platform_device *op) |
37 | { | 37 | { |
38 | struct resource *res = &op->resource[0]; | 38 | struct resource *res = &op->resource[0]; |
39 | unsigned int irq = op->archdata.irqs[0]; | 39 | unsigned int irq = op->archdata.irqs[0]; |
@@ -59,7 +59,7 @@ static struct of_device_id __initdata power_match[] = { | |||
59 | {}, | 59 | {}, |
60 | }; | 60 | }; |
61 | 61 | ||
62 | static struct of_platform_driver power_driver = { | 62 | static struct platform_driver power_driver = { |
63 | .probe = power_probe, | 63 | .probe = power_probe, |
64 | .driver = { | 64 | .driver = { |
65 | .name = "power", | 65 | .name = "power", |
@@ -70,7 +70,7 @@ static struct of_platform_driver power_driver = { | |||
70 | 70 | ||
71 | static int __init power_init(void) | 71 | static int __init power_init(void) |
72 | { | 72 | { |
73 | return of_register_platform_driver(&power_driver); | 73 | return platform_driver_register(&power_driver); |
74 | } | 74 | } |
75 | 75 | ||
76 | device_initcall(power_init); | 76 | device_initcall(power_init); |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 9c743b1886ff..19ab42a932db 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now) | |||
85 | 85 | ||
86 | /* | 86 | /* |
87 | * timer_interrupt() needs to keep up the real-time clock, | 87 | * timer_interrupt() needs to keep up the real-time clock, |
88 | * as well as call the "do_timer()" routine every clocktick | 88 | * as well as call the "xtime_update()" routine every clocktick |
89 | */ | 89 | */ |
90 | 90 | ||
91 | #define TICK_SIZE (tick_nsec / 1000) | 91 | #define TICK_SIZE (tick_nsec / 1000) |
@@ -96,14 +96,9 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id) | |||
96 | profile_tick(CPU_PROFILING); | 96 | profile_tick(CPU_PROFILING); |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | /* Protect counter clear so that do_gettimeoffset works */ | ||
100 | write_seqlock(&xtime_lock); | ||
101 | |||
102 | clear_clock_irq(); | 99 | clear_clock_irq(); |
103 | 100 | ||
104 | do_timer(1); | 101 | xtime_update(1); |
105 | |||
106 | write_sequnlock(&xtime_lock); | ||
107 | 102 | ||
108 | #ifndef CONFIG_SMP | 103 | #ifndef CONFIG_SMP |
109 | update_process_times(user_mode(get_irq_regs())); | 104 | update_process_times(user_mode(get_irq_regs())); |
@@ -142,7 +137,7 @@ static struct platform_device m48t59_rtc = { | |||
142 | }, | 137 | }, |
143 | }; | 138 | }; |
144 | 139 | ||
145 | static int __devinit clock_probe(struct platform_device *op, const struct of_device_id *match) | 140 | static int __devinit clock_probe(struct platform_device *op) |
146 | { | 141 | { |
147 | struct device_node *dp = op->dev.of_node; | 142 | struct device_node *dp = op->dev.of_node; |
148 | const char *model = of_get_property(dp, "model", NULL); | 143 | const char *model = of_get_property(dp, "model", NULL); |
@@ -176,7 +171,7 @@ static struct of_device_id __initdata clock_match[] = { | |||
176 | {}, | 171 | {}, |
177 | }; | 172 | }; |
178 | 173 | ||
179 | static struct of_platform_driver clock_driver = { | 174 | static struct platform_driver clock_driver = { |
180 | .probe = clock_probe, | 175 | .probe = clock_probe, |
181 | .driver = { | 176 | .driver = { |
182 | .name = "rtc", | 177 | .name = "rtc", |
@@ -189,7 +184,7 @@ static struct of_platform_driver clock_driver = { | |||
189 | /* Probe for the mostek real time clock chip. */ | 184 | /* Probe for the mostek real time clock chip. */ |
190 | static int __init clock_init(void) | 185 | static int __init clock_init(void) |
191 | { | 186 | { |
192 | return of_register_platform_driver(&clock_driver); | 187 | return platform_driver_register(&clock_driver); |
193 | } | 188 | } |
194 | /* Must be after subsys_initcall() so that busses are probed. Must | 189 | /* Must be after subsys_initcall() so that busses are probed. Must |
195 | * be before device_initcall() because things like the RTC driver | 190 | * be before device_initcall() because things like the RTC driver |
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3bc9c9979b92..e1862793a61d 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c | |||
@@ -419,7 +419,7 @@ static struct platform_device rtc_cmos_device = { | |||
419 | .num_resources = 1, | 419 | .num_resources = 1, |
420 | }; | 420 | }; |
421 | 421 | ||
422 | static int __devinit rtc_probe(struct platform_device *op, const struct of_device_id *match) | 422 | static int __devinit rtc_probe(struct platform_device *op) |
423 | { | 423 | { |
424 | struct resource *r; | 424 | struct resource *r; |
425 | 425 | ||
@@ -462,7 +462,7 @@ static struct of_device_id __initdata rtc_match[] = { | |||
462 | {}, | 462 | {}, |
463 | }; | 463 | }; |
464 | 464 | ||
465 | static struct of_platform_driver rtc_driver = { | 465 | static struct platform_driver rtc_driver = { |
466 | .probe = rtc_probe, | 466 | .probe = rtc_probe, |
467 | .driver = { | 467 | .driver = { |
468 | .name = "rtc", | 468 | .name = "rtc", |
@@ -477,7 +477,7 @@ static struct platform_device rtc_bq4802_device = { | |||
477 | .num_resources = 1, | 477 | .num_resources = 1, |
478 | }; | 478 | }; |
479 | 479 | ||
480 | static int __devinit bq4802_probe(struct platform_device *op, const struct of_device_id *match) | 480 | static int __devinit bq4802_probe(struct platform_device *op) |
481 | { | 481 | { |
482 | 482 | ||
483 | printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n", | 483 | printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n", |
@@ -495,7 +495,7 @@ static struct of_device_id __initdata bq4802_match[] = { | |||
495 | {}, | 495 | {}, |
496 | }; | 496 | }; |
497 | 497 | ||
498 | static struct of_platform_driver bq4802_driver = { | 498 | static struct platform_driver bq4802_driver = { |
499 | .probe = bq4802_probe, | 499 | .probe = bq4802_probe, |
500 | .driver = { | 500 | .driver = { |
501 | .name = "bq4802", | 501 | .name = "bq4802", |
@@ -534,7 +534,7 @@ static struct platform_device m48t59_rtc = { | |||
534 | }, | 534 | }, |
535 | }; | 535 | }; |
536 | 536 | ||
537 | static int __devinit mostek_probe(struct platform_device *op, const struct of_device_id *match) | 537 | static int __devinit mostek_probe(struct platform_device *op) |
538 | { | 538 | { |
539 | struct device_node *dp = op->dev.of_node; | 539 | struct device_node *dp = op->dev.of_node; |
540 | 540 | ||
@@ -559,7 +559,7 @@ static struct of_device_id __initdata mostek_match[] = { | |||
559 | {}, | 559 | {}, |
560 | }; | 560 | }; |
561 | 561 | ||
562 | static struct of_platform_driver mostek_driver = { | 562 | static struct platform_driver mostek_driver = { |
563 | .probe = mostek_probe, | 563 | .probe = mostek_probe, |
564 | .driver = { | 564 | .driver = { |
565 | .name = "mostek", | 565 | .name = "mostek", |
@@ -586,9 +586,9 @@ static int __init clock_init(void) | |||
586 | if (tlb_type == hypervisor) | 586 | if (tlb_type == hypervisor) |
587 | return platform_device_register(&rtc_sun4v_device); | 587 | return platform_device_register(&rtc_sun4v_device); |
588 | 588 | ||
589 | (void) of_register_platform_driver(&rtc_driver); | 589 | (void) platform_driver_register(&rtc_driver); |
590 | (void) of_register_platform_driver(&mostek_driver); | 590 | (void) platform_driver_register(&mostek_driver); |
591 | (void) of_register_platform_driver(&bq4802_driver); | 591 | (void) platform_driver_register(&bq4802_driver); |
592 | 592 | ||
593 | return 0; | 593 | return 0; |
594 | } | 594 | } |
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 0c1e6783657f..92b557afe535 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -108,7 +108,7 @@ SECTIONS | |||
108 | __sun4v_2insn_patch_end = .; | 108 | __sun4v_2insn_patch_end = .; |
109 | } | 109 | } |
110 | 110 | ||
111 | PERCPU(PAGE_SIZE) | 111 | PERCPU(SMP_CACHE_BYTES, PAGE_SIZE) |
112 | 112 | ||
113 | . = ALIGN(PAGE_SIZE); | 113 | . = ALIGN(PAGE_SIZE); |
114 | __init_end = .; | 114 | __init_end = .; |
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index cbddeb38ffda..d3c7a12ad879 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) | 16 | #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) |
17 | 17 | ||
18 | spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { | 18 | spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { |
19 | [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED | 19 | [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash) |
20 | }; | 20 | }; |
21 | 21 | ||
22 | #else /* SMP */ | 22 | #else /* SMP */ |
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index fe0d10dcae57..d03ec124a598 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h | |||
@@ -29,16 +29,16 @@ | |||
29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
30 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
31 | 31 | ||
32 | extern struct __get_user futex_set(int __user *v, int i); | 32 | extern struct __get_user futex_set(u32 __user *v, int i); |
33 | extern struct __get_user futex_add(int __user *v, int n); | 33 | extern struct __get_user futex_add(u32 __user *v, int n); |
34 | extern struct __get_user futex_or(int __user *v, int n); | 34 | extern struct __get_user futex_or(u32 __user *v, int n); |
35 | extern struct __get_user futex_andn(int __user *v, int n); | 35 | extern struct __get_user futex_andn(u32 __user *v, int n); |
36 | extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); | 36 | extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); |
37 | 37 | ||
38 | #ifndef __tilegx__ | 38 | #ifndef __tilegx__ |
39 | extern struct __get_user futex_xor(int __user *v, int n); | 39 | extern struct __get_user futex_xor(u32 __user *v, int n); |
40 | #else | 40 | #else |
41 | static inline struct __get_user futex_xor(int __user *uaddr, int n) | 41 | static inline struct __get_user futex_xor(u32 __user *uaddr, int n) |
42 | { | 42 | { |
43 | struct __get_user asm_ret = __get_user_4(uaddr); | 43 | struct __get_user asm_ret = __get_user_4(uaddr); |
44 | if (!asm_ret.err) { | 44 | if (!asm_ret.err) { |
@@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) | |||
53 | } | 53 | } |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 56 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
57 | { | 57 | { |
58 | int op = (encoded_op >> 28) & 7; | 58 | int op = (encoded_op >> 28) & 7; |
59 | int cmp = (encoded_op >> 24) & 15; | 59 | int cmp = (encoded_op >> 24) & 15; |
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
66 | oparg = 1 << oparg; | 66 | oparg = 1 << oparg; |
67 | 67 | ||
68 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 68 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
69 | return -EFAULT; | 69 | return -EFAULT; |
70 | 70 | ||
71 | pagefault_disable(); | 71 | pagefault_disable(); |
@@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
119 | return ret; | 119 | return ret; |
120 | } | 120 | } |
121 | 121 | ||
122 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | 122 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
123 | int newval) | 123 | u32 oldval, u32 newval) |
124 | { | 124 | { |
125 | struct __get_user asm_ret; | 125 | struct __get_user asm_ret; |
126 | 126 | ||
127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
128 | return -EFAULT; | 128 | return -EFAULT; |
129 | 129 | ||
130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | 130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); |
131 | return asm_ret.err ? asm_ret.err : asm_ret.val; | 131 | *uval = asm_ret.val; |
132 | return asm_ret.err; | ||
132 | } | 133 | } |
133 | 134 | ||
134 | #ifndef __tilegx__ | 135 | #ifndef __tilegx__ |
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 25fdc0c1839a..c6ce378e0678 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S | |||
@@ -63,7 +63,7 @@ SECTIONS | |||
63 | *(.init.page) | 63 | *(.init.page) |
64 | } :data =0 | 64 | } :data =0 |
65 | INIT_DATA_SECTION(16) | 65 | INIT_DATA_SECTION(16) |
66 | PERCPU(PAGE_SIZE) | 66 | PERCPU(L2_CACHE_BYTES, PAGE_SIZE) |
67 | . = ALIGN(PAGE_SIZE); | 67 | . = ALIGN(PAGE_SIZE); |
68 | VMLINUX_SYMBOL(_einitdata) = .; | 68 | VMLINUX_SYMBOL(_einitdata) = .; |
69 | 69 | ||
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index e351e14b4339..1e78940218c0 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common | |||
@@ -7,6 +7,7 @@ config UML | |||
7 | bool | 7 | bool |
8 | default y | 8 | default y |
9 | select HAVE_GENERIC_HARDIRQS | 9 | select HAVE_GENERIC_HARDIRQS |
10 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
10 | 11 | ||
11 | config MMU | 12 | config MMU |
12 | bool | 13 | bool |
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 5ee328099c63..02fb017fed47 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 | |||
@@ -10,6 +10,8 @@ endmenu | |||
10 | 10 | ||
11 | config UML_X86 | 11 | config UML_X86 |
12 | def_bool y | 12 | def_bool y |
13 | select GENERIC_FIND_FIRST_BIT | ||
14 | select GENERIC_FIND_NEXT_BIT | ||
13 | 15 | ||
14 | config 64BIT | 16 | config 64BIT |
15 | bool | 17 | bool |
@@ -19,6 +21,9 @@ config X86_32 | |||
19 | def_bool !64BIT | 21 | def_bool !64BIT |
20 | select HAVE_AOUT | 22 | select HAVE_AOUT |
21 | 23 | ||
24 | config X86_64 | ||
25 | def_bool 64BIT | ||
26 | |||
22 | config RWSEM_XCHGADD_ALGORITHM | 27 | config RWSEM_XCHGADD_ALGORITHM |
23 | def_bool X86_XADD | 28 | def_bool X86_XADD |
24 | 29 | ||
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 975613b23dcf..c70e047eed72 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -124,35 +124,18 @@ void mconsole_log(struct mc_request *req) | |||
124 | #if 0 | 124 | #if 0 |
125 | void mconsole_proc(struct mc_request *req) | 125 | void mconsole_proc(struct mc_request *req) |
126 | { | 126 | { |
127 | struct nameidata nd; | ||
128 | struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt; | 127 | struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt; |
129 | struct file *file; | 128 | struct file *file; |
130 | int n, err; | 129 | int n; |
131 | char *ptr = req->request.data, *buf; | 130 | char *ptr = req->request.data, *buf; |
132 | mm_segment_t old_fs = get_fs(); | 131 | mm_segment_t old_fs = get_fs(); |
133 | 132 | ||
134 | ptr += strlen("proc"); | 133 | ptr += strlen("proc"); |
135 | ptr = skip_spaces(ptr); | 134 | ptr = skip_spaces(ptr); |
136 | 135 | ||
137 | err = vfs_path_lookup(mnt->mnt_root, mnt, ptr, LOOKUP_FOLLOW, &nd); | 136 | file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY); |
138 | if (err) { | ||
139 | mconsole_reply(req, "Failed to look up file", 1, 0); | ||
140 | goto out; | ||
141 | } | ||
142 | |||
143 | err = may_open(&nd.path, MAY_READ, O_RDONLY); | ||
144 | if (result) { | ||
145 | mconsole_reply(req, "Failed to open file", 1, 0); | ||
146 | path_put(&nd.path); | ||
147 | goto out; | ||
148 | } | ||
149 | |||
150 | file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY, | ||
151 | current_cred()); | ||
152 | err = PTR_ERR(file); | ||
153 | if (IS_ERR(file)) { | 137 | if (IS_ERR(file)) { |
154 | mconsole_reply(req, "Failed to open file", 1, 0); | 138 | mconsole_reply(req, "Failed to open file", 1, 0); |
155 | path_put(&nd.path); | ||
156 | goto out; | 139 | goto out; |
157 | } | 140 | } |
158 | 141 | ||
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index ba4a98ba39c0..620f5b70957d 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -185,7 +185,7 @@ struct ubd { | |||
185 | .no_cow = 0, \ | 185 | .no_cow = 0, \ |
186 | .shared = 0, \ | 186 | .shared = 0, \ |
187 | .cow = DEFAULT_COW, \ | 187 | .cow = DEFAULT_COW, \ |
188 | .lock = SPIN_LOCK_UNLOCKED, \ | 188 | .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ |
189 | .request = NULL, \ | 189 | .request = NULL, \ |
190 | .start_sg = 0, \ | 190 | .start_sg = 0, \ |
191 | .end_sg = 0, \ | 191 | .end_sg = 0, \ |
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index ac55b9efa1ce..34bede8aad4a 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S | |||
@@ -42,7 +42,7 @@ | |||
42 | INIT_SETUP(0) | 42 | INIT_SETUP(0) |
43 | } | 43 | } |
44 | 44 | ||
45 | PERCPU(32) | 45 | PERCPU(32, 32) |
46 | 46 | ||
47 | .initcall.init : { | 47 | .initcall.init : { |
48 | INIT_CALLS | 48 | INIT_CALLS |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 3f0ac9e0c966..64cfea80cfe2 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -35,8 +35,10 @@ int show_interrupts(struct seq_file *p, void *v) | |||
35 | } | 35 | } |
36 | 36 | ||
37 | if (i < NR_IRQS) { | 37 | if (i < NR_IRQS) { |
38 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 38 | struct irq_desc *desc = irq_to_desc(i); |
39 | action = irq_desc[i].action; | 39 | |
40 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
41 | action = desc->action; | ||
40 | if (!action) | 42 | if (!action) |
41 | goto skip; | 43 | goto skip; |
42 | seq_printf(p, "%3d: ",i); | 44 | seq_printf(p, "%3d: ",i); |
@@ -46,7 +48,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
46 | for_each_online_cpu(j) | 48 | for_each_online_cpu(j) |
47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 49 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
48 | #endif | 50 | #endif |
49 | seq_printf(p, " %14s", irq_desc[i].chip->name); | 51 | seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); |
50 | seq_printf(p, " %s", action->name); | 52 | seq_printf(p, " %s", action->name); |
51 | 53 | ||
52 | for (action=action->next; action; action = action->next) | 54 | for (action=action->next; action; action = action->next) |
@@ -54,7 +56,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
54 | 56 | ||
55 | seq_putc(p, '\n'); | 57 | seq_putc(p, '\n'); |
56 | skip: | 58 | skip: |
57 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 59 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
58 | } else if (i == NR_IRQS) | 60 | } else if (i == NR_IRQS) |
59 | seq_putc(p, '\n'); | 61 | seq_putc(p, '\n'); |
60 | 62 | ||
@@ -360,10 +362,10 @@ EXPORT_SYMBOL(um_request_irq); | |||
360 | EXPORT_SYMBOL(reactivate_fd); | 362 | EXPORT_SYMBOL(reactivate_fd); |
361 | 363 | ||
362 | /* | 364 | /* |
363 | * irq_chip must define (startup || enable) && | 365 | * irq_chip must define at least enable/disable and ack when |
364 | * (shutdown || disable) && end | 366 | * the edge handler is used. |
365 | */ | 367 | */ |
366 | static void dummy(unsigned int irq) | 368 | static void dummy(struct irq_data *d) |
367 | { | 369 | { |
368 | } | 370 | } |
369 | 371 | ||
@@ -371,20 +373,17 @@ static void dummy(unsigned int irq) | |||
371 | static struct irq_chip normal_irq_type = { | 373 | static struct irq_chip normal_irq_type = { |
372 | .name = "SIGIO", | 374 | .name = "SIGIO", |
373 | .release = free_irq_by_irq_and_dev, | 375 | .release = free_irq_by_irq_and_dev, |
374 | .disable = dummy, | 376 | .irq_disable = dummy, |
375 | .enable = dummy, | 377 | .irq_enable = dummy, |
376 | .ack = dummy, | 378 | .irq_ack = dummy, |
377 | .end = dummy | ||
378 | }; | 379 | }; |
379 | 380 | ||
380 | static struct irq_chip SIGVTALRM_irq_type = { | 381 | static struct irq_chip SIGVTALRM_irq_type = { |
381 | .name = "SIGVTALRM", | 382 | .name = "SIGVTALRM", |
382 | .release = free_irq_by_irq_and_dev, | 383 | .release = free_irq_by_irq_and_dev, |
383 | .shutdown = dummy, /* never called */ | 384 | .irq_disable = dummy, |
384 | .disable = dummy, | 385 | .irq_enable = dummy, |
385 | .enable = dummy, | 386 | .irq_ack = dummy, |
386 | .ack = dummy, | ||
387 | .end = dummy | ||
388 | }; | 387 | }; |
389 | 388 | ||
390 | void __init init_IRQ(void) | 389 | void __init init_IRQ(void) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d5ed94d30aad..e1f65c46bc93 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -64,8 +64,12 @@ config X86 | |||
64 | select HAVE_TEXT_POKE_SMP | 64 | select HAVE_TEXT_POKE_SMP |
65 | select HAVE_GENERIC_HARDIRQS | 65 | select HAVE_GENERIC_HARDIRQS |
66 | select HAVE_SPARSE_IRQ | 66 | select HAVE_SPARSE_IRQ |
67 | select GENERIC_FIND_FIRST_BIT | ||
68 | select GENERIC_FIND_NEXT_BIT | ||
67 | select GENERIC_IRQ_PROBE | 69 | select GENERIC_IRQ_PROBE |
68 | select GENERIC_PENDING_IRQ if SMP | 70 | select GENERIC_PENDING_IRQ if SMP |
71 | select GENERIC_IRQ_SHOW | ||
72 | select IRQ_FORCED_THREADING | ||
69 | select USE_GENERIC_SMP_HELPERS if SMP | 73 | select USE_GENERIC_SMP_HELPERS if SMP |
70 | 74 | ||
71 | config INSTRUCTION_DECODER | 75 | config INSTRUCTION_DECODER |
@@ -217,10 +221,6 @@ config X86_HT | |||
217 | def_bool y | 221 | def_bool y |
218 | depends on SMP | 222 | depends on SMP |
219 | 223 | ||
220 | config X86_TRAMPOLINE | ||
221 | def_bool y | ||
222 | depends on SMP || (64BIT && ACPI_SLEEP) | ||
223 | |||
224 | config X86_32_LAZY_GS | 224 | config X86_32_LAZY_GS |
225 | def_bool y | 225 | def_bool y |
226 | depends on X86_32 && !CC_STACKPROTECTOR | 226 | depends on X86_32 && !CC_STACKPROTECTOR |
@@ -382,6 +382,8 @@ config X86_INTEL_CE | |||
382 | depends on X86_32 | 382 | depends on X86_32 |
383 | depends on X86_EXTENDED_PLATFORM | 383 | depends on X86_EXTENDED_PLATFORM |
384 | select X86_REBOOTFIXUPS | 384 | select X86_REBOOTFIXUPS |
385 | select OF | ||
386 | select OF_EARLY_FLATTREE | ||
385 | ---help--- | 387 | ---help--- |
386 | Select for the Intel CE media processor (CE4100) SOC. | 388 | Select for the Intel CE media processor (CE4100) SOC. |
387 | This option compiles in support for the CE4100 SOC for settop | 389 | This option compiles in support for the CE4100 SOC for settop |
@@ -811,7 +813,7 @@ config X86_LOCAL_APIC | |||
811 | 813 | ||
812 | config X86_IO_APIC | 814 | config X86_IO_APIC |
813 | def_bool y | 815 | def_bool y |
814 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC | 816 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC |
815 | 817 | ||
816 | config X86_VISWS_APIC | 818 | config X86_VISWS_APIC |
817 | def_bool y | 819 | def_bool y |
@@ -1705,7 +1707,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID | |||
1705 | depends on NUMA | 1707 | depends on NUMA |
1706 | 1708 | ||
1707 | config USE_PERCPU_NUMA_NODE_ID | 1709 | config USE_PERCPU_NUMA_NODE_ID |
1708 | def_bool X86_64 | 1710 | def_bool y |
1709 | depends on NUMA | 1711 | depends on NUMA |
1710 | 1712 | ||
1711 | menu "Power management and ACPI options" | 1713 | menu "Power management and ACPI options" |
@@ -2066,9 +2068,10 @@ config SCx200HR_TIMER | |||
2066 | 2068 | ||
2067 | config OLPC | 2069 | config OLPC |
2068 | bool "One Laptop Per Child support" | 2070 | bool "One Laptop Per Child support" |
2071 | depends on !X86_PAE | ||
2069 | select GPIOLIB | 2072 | select GPIOLIB |
2070 | select OLPC_OPENFIRMWARE | 2073 | select OF |
2071 | depends on !X86_64 && !X86_PAE | 2074 | select OF_PROMTREE if PROC_DEVICETREE |
2072 | ---help--- | 2075 | ---help--- |
2073 | Add support for detecting the unique features of the OLPC | 2076 | Add support for detecting the unique features of the OLPC |
2074 | XO hardware. | 2077 | XO hardware. |
@@ -2079,21 +2082,6 @@ config OLPC_XO1 | |||
2079 | ---help--- | 2082 | ---help--- |
2080 | Add support for non-essential features of the OLPC XO-1 laptop. | 2083 | Add support for non-essential features of the OLPC XO-1 laptop. |
2081 | 2084 | ||
2082 | config OLPC_OPENFIRMWARE | ||
2083 | bool "Support for OLPC's Open Firmware" | ||
2084 | depends on !X86_64 && !X86_PAE | ||
2085 | default n | ||
2086 | select OF | ||
2087 | help | ||
2088 | This option adds support for the implementation of Open Firmware | ||
2089 | that is used on the OLPC XO-1 Children's Machine. | ||
2090 | If unsure, say N here. | ||
2091 | |||
2092 | config OLPC_OPENFIRMWARE_DT | ||
2093 | bool | ||
2094 | default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE | ||
2095 | select OF_PROMTREE | ||
2096 | |||
2097 | endif # X86_32 | 2085 | endif # X86_32 |
2098 | 2086 | ||
2099 | config AMD_NB | 2087 | config AMD_NB |
@@ -2138,6 +2126,11 @@ config SYSVIPC_COMPAT | |||
2138 | def_bool y | 2126 | def_bool y |
2139 | depends on COMPAT && SYSVIPC | 2127 | depends on COMPAT && SYSVIPC |
2140 | 2128 | ||
2129 | config KEYS_COMPAT | ||
2130 | bool | ||
2131 | depends on COMPAT && KEYS | ||
2132 | default y | ||
2133 | |||
2141 | endmenu | 2134 | endmenu |
2142 | 2135 | ||
2143 | 2136 | ||
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 283c5a6a03a6..ed47e6e1747f 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -294,11 +294,6 @@ config X86_GENERIC | |||
294 | 294 | ||
295 | endif | 295 | endif |
296 | 296 | ||
297 | config X86_CPU | ||
298 | def_bool y | ||
299 | select GENERIC_FIND_FIRST_BIT | ||
300 | select GENERIC_FIND_NEXT_BIT | ||
301 | |||
302 | # | 297 | # |
303 | # Define implied options from the CPU selection here | 298 | # Define implied options from the CPU selection here |
304 | config X86_INTERNODE_CACHE_SHIFT | 299 | config X86_INTERNODE_CACHE_SHIFT |
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 646aa78ba5fd..46a823882437 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c | |||
@@ -62,7 +62,12 @@ int main(int argc, char *argv[]) | |||
62 | if (fseek(f, -4L, SEEK_END)) { | 62 | if (fseek(f, -4L, SEEK_END)) { |
63 | perror(argv[1]); | 63 | perror(argv[1]); |
64 | } | 64 | } |
65 | fread(&olen, sizeof olen, 1, f); | 65 | |
66 | if (fread(&olen, sizeof(olen), 1, f) != 1) { | ||
67 | perror(argv[1]); | ||
68 | return 1; | ||
69 | } | ||
70 | |||
66 | ilen = ftell(f); | 71 | ilen = ftell(f); |
67 | olen = getle32(&olen); | 72 | olen = getle32(&olen); |
68 | fclose(f); | 73 | fclose(f); |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index e1e60c7d5813..e0e6340c8dad 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -873,22 +873,18 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) | |||
873 | crypto_ablkcipher_clear_flags(ctr_tfm, ~0); | 873 | crypto_ablkcipher_clear_flags(ctr_tfm, ~0); |
874 | 874 | ||
875 | ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); | 875 | ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); |
876 | if (ret) { | 876 | if (ret) |
877 | crypto_free_ablkcipher(ctr_tfm); | 877 | goto out_free_ablkcipher; |
878 | return ret; | ||
879 | } | ||
880 | 878 | ||
879 | ret = -ENOMEM; | ||
881 | req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); | 880 | req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); |
882 | if (!req) { | 881 | if (!req) |
883 | crypto_free_ablkcipher(ctr_tfm); | 882 | goto out_free_ablkcipher; |
884 | return -EINVAL; | ||
885 | } | ||
886 | 883 | ||
887 | req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); | 884 | req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); |
888 | if (!req_data) { | 885 | if (!req_data) |
889 | crypto_free_ablkcipher(ctr_tfm); | 886 | goto out_free_request; |
890 | return -ENOMEM; | 887 | |
891 | } | ||
892 | memset(req_data->iv, 0, sizeof(req_data->iv)); | 888 | memset(req_data->iv, 0, sizeof(req_data->iv)); |
893 | 889 | ||
894 | /* Clear the data in the hash sub key container to zero.*/ | 890 | /* Clear the data in the hash sub key container to zero.*/ |
@@ -913,8 +909,10 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) | |||
913 | if (!ret) | 909 | if (!ret) |
914 | ret = req_data->result.err; | 910 | ret = req_data->result.err; |
915 | } | 911 | } |
916 | ablkcipher_request_free(req); | ||
917 | kfree(req_data); | 912 | kfree(req_data); |
913 | out_free_request: | ||
914 | ablkcipher_request_free(req); | ||
915 | out_free_ablkcipher: | ||
918 | crypto_free_ablkcipher(ctr_tfm); | 916 | crypto_free_ablkcipher(ctr_tfm); |
919 | return ret; | 917 | return ret; |
920 | } | 918 | } |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 518bb99c3394..430312ba6e3f 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -25,6 +25,8 @@ | |||
25 | #define sysretl_audit ia32_ret_from_sys_call | 25 | #define sysretl_audit ia32_ret_from_sys_call |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | .section .entry.text, "ax" | ||
29 | |||
28 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) | 30 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) |
29 | 31 | ||
30 | .macro IA32_ARG_FIXUP noebp=0 | 32 | .macro IA32_ARG_FIXUP noebp=0 |
@@ -126,26 +128,20 @@ ENTRY(ia32_sysenter_target) | |||
126 | */ | 128 | */ |
127 | ENABLE_INTERRUPTS(CLBR_NONE) | 129 | ENABLE_INTERRUPTS(CLBR_NONE) |
128 | movl %ebp,%ebp /* zero extension */ | 130 | movl %ebp,%ebp /* zero extension */ |
129 | pushq $__USER32_DS | 131 | pushq_cfi $__USER32_DS |
130 | CFI_ADJUST_CFA_OFFSET 8 | ||
131 | /*CFI_REL_OFFSET ss,0*/ | 132 | /*CFI_REL_OFFSET ss,0*/ |
132 | pushq %rbp | 133 | pushq_cfi %rbp |
133 | CFI_ADJUST_CFA_OFFSET 8 | ||
134 | CFI_REL_OFFSET rsp,0 | 134 | CFI_REL_OFFSET rsp,0 |
135 | pushfq | 135 | pushfq_cfi |
136 | CFI_ADJUST_CFA_OFFSET 8 | ||
137 | /*CFI_REL_OFFSET rflags,0*/ | 136 | /*CFI_REL_OFFSET rflags,0*/ |
138 | movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d | 137 | movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d |
139 | CFI_REGISTER rip,r10 | 138 | CFI_REGISTER rip,r10 |
140 | pushq $__USER32_CS | 139 | pushq_cfi $__USER32_CS |
141 | CFI_ADJUST_CFA_OFFSET 8 | ||
142 | /*CFI_REL_OFFSET cs,0*/ | 140 | /*CFI_REL_OFFSET cs,0*/ |
143 | movl %eax, %eax | 141 | movl %eax, %eax |
144 | pushq %r10 | 142 | pushq_cfi %r10 |
145 | CFI_ADJUST_CFA_OFFSET 8 | ||
146 | CFI_REL_OFFSET rip,0 | 143 | CFI_REL_OFFSET rip,0 |
147 | pushq %rax | 144 | pushq_cfi %rax |
148 | CFI_ADJUST_CFA_OFFSET 8 | ||
149 | cld | 145 | cld |
150 | SAVE_ARGS 0,0,1 | 146 | SAVE_ARGS 0,0,1 |
151 | /* no need to do an access_ok check here because rbp has been | 147 | /* no need to do an access_ok check here because rbp has been |
@@ -182,11 +178,9 @@ sysexit_from_sys_call: | |||
182 | xorq %r9,%r9 | 178 | xorq %r9,%r9 |
183 | xorq %r10,%r10 | 179 | xorq %r10,%r10 |
184 | xorq %r11,%r11 | 180 | xorq %r11,%r11 |
185 | popfq | 181 | popfq_cfi |
186 | CFI_ADJUST_CFA_OFFSET -8 | ||
187 | /*CFI_RESTORE rflags*/ | 182 | /*CFI_RESTORE rflags*/ |
188 | popq %rcx /* User %esp */ | 183 | popq_cfi %rcx /* User %esp */ |
189 | CFI_ADJUST_CFA_OFFSET -8 | ||
190 | CFI_REGISTER rsp,rcx | 184 | CFI_REGISTER rsp,rcx |
191 | TRACE_IRQS_ON | 185 | TRACE_IRQS_ON |
192 | ENABLE_INTERRUPTS_SYSEXIT32 | 186 | ENABLE_INTERRUPTS_SYSEXIT32 |
@@ -421,8 +415,7 @@ ENTRY(ia32_syscall) | |||
421 | */ | 415 | */ |
422 | ENABLE_INTERRUPTS(CLBR_NONE) | 416 | ENABLE_INTERRUPTS(CLBR_NONE) |
423 | movl %eax,%eax | 417 | movl %eax,%eax |
424 | pushq %rax | 418 | pushq_cfi %rax |
425 | CFI_ADJUST_CFA_OFFSET 8 | ||
426 | cld | 419 | cld |
427 | /* note the registers are not zero extended to the sf. | 420 | /* note the registers are not zero extended to the sf. |
428 | this could be a problem. */ | 421 | this could be a problem. */ |
@@ -851,4 +844,7 @@ ia32_sys_call_table: | |||
851 | .quad sys_fanotify_init | 844 | .quad sys_fanotify_init |
852 | .quad sys32_fanotify_mark | 845 | .quad sys32_fanotify_mark |
853 | .quad sys_prlimit64 /* 340 */ | 846 | .quad sys_prlimit64 /* 340 */ |
847 | .quad sys_name_to_handle_at | ||
848 | .quad compat_sys_open_by_handle_at | ||
849 | .quad compat_sys_clock_adjtime | ||
854 | ia32_syscall_end: | 850 | ia32_syscall_end: |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4ea15ca89b2b..448d73a371ba 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/mmu.h> | 30 | #include <asm/mmu.h> |
31 | #include <asm/mpspec.h> | 31 | #include <asm/mpspec.h> |
32 | #include <asm/trampoline.h> | ||
32 | 33 | ||
33 | #define COMPILER_DEPENDENT_INT64 long long | 34 | #define COMPILER_DEPENDENT_INT64 long long |
34 | #define COMPILER_DEPENDENT_UINT64 unsigned long long | 35 | #define COMPILER_DEPENDENT_UINT64 unsigned long long |
@@ -117,7 +118,8 @@ static inline void acpi_disable_pci(void) | |||
117 | extern int acpi_save_state_mem(void); | 118 | extern int acpi_save_state_mem(void); |
118 | extern void acpi_restore_state_mem(void); | 119 | extern void acpi_restore_state_mem(void); |
119 | 120 | ||
120 | extern unsigned long acpi_wakeup_address; | 121 | extern const unsigned char acpi_wakeup_code[]; |
122 | #define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code))) | ||
121 | 123 | ||
122 | /* early initialization routine */ | 124 | /* early initialization routine */ |
123 | extern void acpi_reserve_wakeup_memory(void); | 125 | extern void acpi_reserve_wakeup_memory(void); |
@@ -186,15 +188,7 @@ struct bootnode; | |||
186 | 188 | ||
187 | #ifdef CONFIG_ACPI_NUMA | 189 | #ifdef CONFIG_ACPI_NUMA |
188 | extern int acpi_numa; | 190 | extern int acpi_numa; |
189 | extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start, | 191 | extern int x86_acpi_numa_init(void); |
190 | unsigned long end); | ||
191 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | ||
192 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | ||
193 | |||
194 | #ifdef CONFIG_NUMA_EMU | ||
195 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, | ||
196 | int num_nodes); | ||
197 | #endif | ||
198 | #endif /* CONFIG_ACPI_NUMA */ | 192 | #endif /* CONFIG_ACPI_NUMA */ |
199 | 193 | ||
200 | #define acpi_unlazy_tlb(x) leave_mm(x) | 194 | #define acpi_unlazy_tlb(x) leave_mm(x) |
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 64dc82ee19f0..331682231bb4 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -9,23 +9,20 @@ struct amd_nb_bus_dev_range { | |||
9 | u8 dev_limit; | 9 | u8 dev_limit; |
10 | }; | 10 | }; |
11 | 11 | ||
12 | extern struct pci_device_id amd_nb_misc_ids[]; | 12 | extern const struct pci_device_id amd_nb_misc_ids[]; |
13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; | 13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; |
14 | struct bootnode; | 14 | struct bootnode; |
15 | 15 | ||
16 | extern int early_is_amd_nb(u32 value); | 16 | extern bool early_is_amd_nb(u32 value); |
17 | extern int amd_cache_northbridges(void); | 17 | extern int amd_cache_northbridges(void); |
18 | extern void amd_flush_garts(void); | 18 | extern void amd_flush_garts(void); |
19 | extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); | 19 | extern int amd_numa_init(void); |
20 | extern int amd_scan_nodes(void); | 20 | extern int amd_get_subcaches(int); |
21 | 21 | extern int amd_set_subcaches(int, int); | |
22 | #ifdef CONFIG_NUMA_EMU | ||
23 | extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); | ||
24 | extern void amd_get_nodes(struct bootnode *nodes); | ||
25 | #endif | ||
26 | 22 | ||
27 | struct amd_northbridge { | 23 | struct amd_northbridge { |
28 | struct pci_dev *misc; | 24 | struct pci_dev *misc; |
25 | struct pci_dev *link; | ||
29 | }; | 26 | }; |
30 | 27 | ||
31 | struct amd_northbridge_info { | 28 | struct amd_northbridge_info { |
@@ -35,17 +32,18 @@ struct amd_northbridge_info { | |||
35 | }; | 32 | }; |
36 | extern struct amd_northbridge_info amd_northbridges; | 33 | extern struct amd_northbridge_info amd_northbridges; |
37 | 34 | ||
38 | #define AMD_NB_GART 0x1 | 35 | #define AMD_NB_GART BIT(0) |
39 | #define AMD_NB_L3_INDEX_DISABLE 0x2 | 36 | #define AMD_NB_L3_INDEX_DISABLE BIT(1) |
37 | #define AMD_NB_L3_PARTITIONING BIT(2) | ||
40 | 38 | ||
41 | #ifdef CONFIG_AMD_NB | 39 | #ifdef CONFIG_AMD_NB |
42 | 40 | ||
43 | static inline int amd_nb_num(void) | 41 | static inline u16 amd_nb_num(void) |
44 | { | 42 | { |
45 | return amd_northbridges.num; | 43 | return amd_northbridges.num; |
46 | } | 44 | } |
47 | 45 | ||
48 | static inline int amd_nb_has_feature(int feature) | 46 | static inline bool amd_nb_has_feature(unsigned feature) |
49 | { | 47 | { |
50 | return ((amd_northbridges.flags & feature) == feature); | 48 | return ((amd_northbridges.flags & feature) == feature); |
51 | } | 49 | } |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 3c896946f4cc..a279d98ea95e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -220,7 +220,6 @@ extern void enable_IR_x2apic(void); | |||
220 | 220 | ||
221 | extern int get_physical_broadcast(void); | 221 | extern int get_physical_broadcast(void); |
222 | 222 | ||
223 | extern void apic_disable(void); | ||
224 | extern int lapic_get_maxlvt(void); | 223 | extern int lapic_get_maxlvt(void); |
225 | extern void clear_local_APIC(void); | 224 | extern void clear_local_APIC(void); |
226 | extern void connect_bsp_APIC(void); | 225 | extern void connect_bsp_APIC(void); |
@@ -228,7 +227,6 @@ extern void disconnect_bsp_APIC(int virt_wire_setup); | |||
228 | extern void disable_local_APIC(void); | 227 | extern void disable_local_APIC(void); |
229 | extern void lapic_shutdown(void); | 228 | extern void lapic_shutdown(void); |
230 | extern int verify_local_APIC(void); | 229 | extern int verify_local_APIC(void); |
231 | extern void cache_APIC_registers(void); | ||
232 | extern void sync_Arb_IDs(void); | 230 | extern void sync_Arb_IDs(void); |
233 | extern void init_bsp_APIC(void); | 231 | extern void init_bsp_APIC(void); |
234 | extern void setup_local_APIC(void); | 232 | extern void setup_local_APIC(void); |
@@ -239,8 +237,7 @@ void register_lapic_address(unsigned long address); | |||
239 | extern void setup_boot_APIC_clock(void); | 237 | extern void setup_boot_APIC_clock(void); |
240 | extern void setup_secondary_APIC_clock(void); | 238 | extern void setup_secondary_APIC_clock(void); |
241 | extern int APIC_init_uniprocessor(void); | 239 | extern int APIC_init_uniprocessor(void); |
242 | extern void enable_NMI_through_LVT0(void); | 240 | extern int apic_force_enable(unsigned long addr); |
243 | extern int apic_force_enable(void); | ||
244 | 241 | ||
245 | /* | 242 | /* |
246 | * On 32bit this is mach-xxx local | 243 | * On 32bit this is mach-xxx local |
@@ -261,7 +258,6 @@ static inline void lapic_shutdown(void) { } | |||
261 | #define local_apic_timer_c2_ok 1 | 258 | #define local_apic_timer_c2_ok 1 |
262 | static inline void init_apic_mappings(void) { } | 259 | static inline void init_apic_mappings(void) { } |
263 | static inline void disable_local_APIC(void) { } | 260 | static inline void disable_local_APIC(void) { } |
264 | static inline void apic_disable(void) { } | ||
265 | # define setup_boot_APIC_clock x86_init_noop | 261 | # define setup_boot_APIC_clock x86_init_noop |
266 | # define setup_secondary_APIC_clock x86_init_noop | 262 | # define setup_secondary_APIC_clock x86_init_noop |
267 | #endif /* !CONFIG_X86_LOCAL_APIC */ | 263 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
@@ -307,8 +303,6 @@ struct apic { | |||
307 | 303 | ||
308 | void (*setup_apic_routing)(void); | 304 | void (*setup_apic_routing)(void); |
309 | int (*multi_timer_check)(int apic, int irq); | 305 | int (*multi_timer_check)(int apic, int irq); |
310 | int (*apicid_to_node)(int logical_apicid); | ||
311 | int (*cpu_to_logical_apicid)(int cpu); | ||
312 | int (*cpu_present_to_apicid)(int mps_cpu); | 306 | int (*cpu_present_to_apicid)(int mps_cpu); |
313 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); | 307 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); |
314 | void (*setup_portio_remap)(void); | 308 | void (*setup_portio_remap)(void); |
@@ -356,6 +350,23 @@ struct apic { | |||
356 | void (*icr_write)(u32 low, u32 high); | 350 | void (*icr_write)(u32 low, u32 high); |
357 | void (*wait_icr_idle)(void); | 351 | void (*wait_icr_idle)(void); |
358 | u32 (*safe_wait_icr_idle)(void); | 352 | u32 (*safe_wait_icr_idle)(void); |
353 | |||
354 | #ifdef CONFIG_X86_32 | ||
355 | /* | ||
356 | * Called very early during boot from get_smp_config(). It should | ||
357 | * return the logical apicid. x86_[bios]_cpu_to_apicid is | ||
358 | * initialized before this function is called. | ||
359 | * | ||
360 | * If logical apicid can't be determined that early, the function | ||
361 | * may return BAD_APICID. Logical apicid will be configured after | ||
362 | * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity | ||
363 | * won't be applied properly during early boot in this case. | ||
364 | */ | ||
365 | int (*x86_32_early_logical_apicid)(int cpu); | ||
366 | |||
367 | /* determine CPU -> NUMA node mapping */ | ||
368 | int (*x86_32_numa_cpu_node)(int cpu); | ||
369 | #endif | ||
359 | }; | 370 | }; |
360 | 371 | ||
361 | /* | 372 | /* |
@@ -503,6 +514,11 @@ extern struct apic apic_noop; | |||
503 | 514 | ||
504 | extern struct apic apic_default; | 515 | extern struct apic apic_default; |
505 | 516 | ||
517 | static inline int noop_x86_32_early_logical_apicid(int cpu) | ||
518 | { | ||
519 | return BAD_APICID; | ||
520 | } | ||
521 | |||
506 | /* | 522 | /* |
507 | * Set up the logical destination ID. | 523 | * Set up the logical destination ID. |
508 | * | 524 | * |
@@ -522,7 +538,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) | |||
522 | return cpuid_apic >> index_msb; | 538 | return cpuid_apic >> index_msb; |
523 | } | 539 | } |
524 | 540 | ||
525 | extern int default_apicid_to_node(int logical_apicid); | 541 | extern int default_x86_32_numa_cpu_node(int cpu); |
526 | 542 | ||
527 | #endif | 543 | #endif |
528 | 544 | ||
@@ -558,12 +574,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma | |||
558 | *retmap = *phys_map; | 574 | *retmap = *phys_map; |
559 | } | 575 | } |
560 | 576 | ||
561 | /* Mapping from cpu number to logical apicid */ | ||
562 | static inline int default_cpu_to_logical_apicid(int cpu) | ||
563 | { | ||
564 | return 1 << cpu; | ||
565 | } | ||
566 | |||
567 | static inline int __default_cpu_present_to_apicid(int mps_cpu) | 577 | static inline int __default_cpu_present_to_apicid(int mps_cpu) |
568 | { | 578 | { |
569 | if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) | 579 | if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) |
@@ -596,8 +606,4 @@ extern int default_check_phys_apicid_present(int phys_apicid); | |||
596 | 606 | ||
597 | #endif /* CONFIG_X86_LOCAL_APIC */ | 607 | #endif /* CONFIG_X86_LOCAL_APIC */ |
598 | 608 | ||
599 | #ifdef CONFIG_X86_32 | ||
600 | extern u8 cpu_2_logical_apicid[NR_CPUS]; | ||
601 | #endif | ||
602 | |||
603 | #endif /* _ASM_X86_APIC_H */ | 609 | #endif /* _ASM_X86_APIC_H */ |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 47a30ff8e517..d87988bacf3e 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -426,4 +426,16 @@ struct local_apic { | |||
426 | #else | 426 | #else |
427 | #define BAD_APICID 0xFFFFu | 427 | #define BAD_APICID 0xFFFFu |
428 | #endif | 428 | #endif |
429 | |||
430 | enum ioapic_irq_destination_types { | ||
431 | dest_Fixed = 0, | ||
432 | dest_LowestPrio = 1, | ||
433 | dest_SMI = 2, | ||
434 | dest__reserved_1 = 3, | ||
435 | dest_NMI = 4, | ||
436 | dest_INIT = 5, | ||
437 | dest__reserved_2 = 6, | ||
438 | dest_ExtINT = 7 | ||
439 | }; | ||
440 | |||
429 | #endif /* _ASM_X86_APICDEF_H */ | 441 | #endif /* _ASM_X86_APICDEF_H */ |
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h index c8bfe63a06de..e020d88ec02d 100644 --- a/arch/x86/include/asm/bootparam.h +++ b/arch/x86/include/asm/bootparam.h | |||
@@ -12,6 +12,7 @@ | |||
12 | /* setup data types */ | 12 | /* setup data types */ |
13 | #define SETUP_NONE 0 | 13 | #define SETUP_NONE 0 |
14 | #define SETUP_E820_EXT 1 | 14 | #define SETUP_E820_EXT 1 |
15 | #define SETUP_DTB 2 | ||
15 | 16 | ||
16 | /* extensible setup data list node */ | 17 | /* extensible setup data list node */ |
17 | struct setup_data { | 18 | struct setup_data { |
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h new file mode 100644 index 000000000000..e656ad8c0a2e --- /dev/null +++ b/arch/x86/include/asm/ce4100.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_CE4100_H_ | ||
2 | #define _ASM_CE4100_H_ | ||
3 | |||
4 | int ce4100_pci_init(void); | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 220e2ea08e80..91f3e087cf21 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -160,6 +160,7 @@ | |||
160 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ | 160 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ |
161 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ | 161 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ |
162 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ | 162 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ |
163 | #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ | ||
163 | 164 | ||
164 | /* | 165 | /* |
165 | * Auxiliary flags: Linux defined - For features scattered in various | 166 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -279,6 +280,7 @@ extern const char * const x86_power_flags[32]; | |||
279 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | 280 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
280 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) | 281 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
281 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) | 282 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
283 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) | ||
282 | 284 | ||
283 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 285 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
284 | # define cpu_has_invlpg 1 | 286 | # define cpu_has_invlpg 1 |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index e99d55d74df5..908b96957d88 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -96,7 +96,7 @@ extern void e820_setup_gap(void); | |||
96 | extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, | 96 | extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, |
97 | unsigned long start_addr, unsigned long long end_addr); | 97 | unsigned long start_addr, unsigned long long end_addr); |
98 | struct setup_data; | 98 | struct setup_data; |
99 | extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data); | 99 | extern void parse_e820_ext(struct setup_data *data); |
100 | 100 | ||
101 | #if defined(CONFIG_X86_64) || \ | 101 | #if defined(CONFIG_X86_64) || \ |
102 | (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) | 102 | (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 57650ab4a5f5..1cd6d26a0a8d 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -16,10 +16,13 @@ BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | |||
16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | 16 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) |
17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | 17 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) |
18 | 18 | ||
19 | .irpc idx, "01234567" | 19 | .irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ |
20 | 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 | ||
21 | .if NUM_INVALIDATE_TLB_VECTORS > \idx | ||
20 | BUILD_INTERRUPT3(invalidate_interrupt\idx, | 22 | BUILD_INTERRUPT3(invalidate_interrupt\idx, |
21 | (INVALIDATE_TLB_VECTOR_START)+\idx, | 23 | (INVALIDATE_TLB_VECTOR_START)+\idx, |
22 | smp_invalidate_interrupt) | 24 | smp_invalidate_interrupt) |
25 | .endif | ||
23 | .endr | 26 | .endr |
24 | #endif | 27 | #endif |
25 | 28 | ||
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h index 06850a7194e1..2c6fc9e62812 100644 --- a/arch/x86/include/asm/frame.h +++ b/arch/x86/include/asm/frame.h | |||
@@ -7,14 +7,12 @@ | |||
7 | frame pointer later */ | 7 | frame pointer later */ |
8 | #ifdef CONFIG_FRAME_POINTER | 8 | #ifdef CONFIG_FRAME_POINTER |
9 | .macro FRAME | 9 | .macro FRAME |
10 | pushl %ebp | 10 | pushl_cfi %ebp |
11 | CFI_ADJUST_CFA_OFFSET 4 | ||
12 | CFI_REL_OFFSET ebp,0 | 11 | CFI_REL_OFFSET ebp,0 |
13 | movl %esp,%ebp | 12 | movl %esp,%ebp |
14 | .endm | 13 | .endm |
15 | .macro ENDFRAME | 14 | .macro ENDFRAME |
16 | popl %ebp | 15 | popl_cfi %ebp |
17 | CFI_ADJUST_CFA_OFFSET -4 | ||
18 | CFI_RESTORE ebp | 16 | CFI_RESTORE ebp |
19 | .endm | 17 | .endm |
20 | #else | 18 | #else |
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 1f11ce44e956..d09bb03653f0 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -37,7 +37,7 @@ | |||
37 | "+m" (*uaddr), "=&r" (tem) \ | 37 | "+m" (*uaddr), "=&r" (tem) \ |
38 | : "r" (oparg), "i" (-EFAULT), "1" (0)) | 38 | : "r" (oparg), "i" (-EFAULT), "1" (0)) |
39 | 39 | ||
40 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | 40 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
41 | { | 41 | { |
42 | int op = (encoded_op >> 28) & 7; | 42 | int op = (encoded_op >> 28) & 7; |
43 | int cmp = (encoded_op >> 24) & 15; | 43 | int cmp = (encoded_op >> 24) & 15; |
@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
48 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 48 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
49 | oparg = 1 << oparg; | 49 | oparg = 1 << oparg; |
50 | 50 | ||
51 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 51 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
52 | return -EFAULT; | 52 | return -EFAULT; |
53 | 53 | ||
54 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) | 54 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) |
@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
109 | return ret; | 109 | return ret; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | 112 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
113 | int newval) | 113 | u32 oldval, u32 newval) |
114 | { | 114 | { |
115 | int ret = 0; | ||
115 | 116 | ||
116 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) | 117 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) |
117 | /* Real i386 machines have no cmpxchg instruction */ | 118 | /* Real i386 machines have no cmpxchg instruction */ |
@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | |||
119 | return -ENOSYS; | 120 | return -ENOSYS; |
120 | #endif | 121 | #endif |
121 | 122 | ||
122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 123 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
123 | return -EFAULT; | 124 | return -EFAULT; |
124 | 125 | ||
125 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" | 126 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" |
126 | "2:\t.section .fixup, \"ax\"\n" | 127 | "2:\t.section .fixup, \"ax\"\n" |
127 | "3:\tmov %2, %0\n" | 128 | "3:\tmov %3, %0\n" |
128 | "\tjmp 2b\n" | 129 | "\tjmp 2b\n" |
129 | "\t.previous\n" | 130 | "\t.previous\n" |
130 | _ASM_EXTABLE(1b, 3b) | 131 | _ASM_EXTABLE(1b, 3b) |
131 | : "=a" (oldval), "+m" (*uaddr) | 132 | : "+r" (ret), "=a" (oldval), "+m" (*uaddr) |
132 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | 133 | : "i" (-EFAULT), "r" (newval), "1" (oldval) |
133 | : "memory" | 134 | : "memory" |
134 | ); | 135 | ); |
135 | 136 | ||
136 | return oldval; | 137 | *uval = oldval; |
138 | return ret; | ||
137 | } | 139 | } |
138 | 140 | ||
139 | #endif | 141 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 0274ec5a7e62..bb9efe8706e2 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void); | |||
45 | extern void invalidate_interrupt5(void); | 45 | extern void invalidate_interrupt5(void); |
46 | extern void invalidate_interrupt6(void); | 46 | extern void invalidate_interrupt6(void); |
47 | extern void invalidate_interrupt7(void); | 47 | extern void invalidate_interrupt7(void); |
48 | extern void invalidate_interrupt8(void); | ||
49 | extern void invalidate_interrupt9(void); | ||
50 | extern void invalidate_interrupt10(void); | ||
51 | extern void invalidate_interrupt11(void); | ||
52 | extern void invalidate_interrupt12(void); | ||
53 | extern void invalidate_interrupt13(void); | ||
54 | extern void invalidate_interrupt14(void); | ||
55 | extern void invalidate_interrupt15(void); | ||
56 | extern void invalidate_interrupt16(void); | ||
57 | extern void invalidate_interrupt17(void); | ||
58 | extern void invalidate_interrupt18(void); | ||
59 | extern void invalidate_interrupt19(void); | ||
60 | extern void invalidate_interrupt20(void); | ||
61 | extern void invalidate_interrupt21(void); | ||
62 | extern void invalidate_interrupt22(void); | ||
63 | extern void invalidate_interrupt23(void); | ||
64 | extern void invalidate_interrupt24(void); | ||
65 | extern void invalidate_interrupt25(void); | ||
66 | extern void invalidate_interrupt26(void); | ||
67 | extern void invalidate_interrupt27(void); | ||
68 | extern void invalidate_interrupt28(void); | ||
69 | extern void invalidate_interrupt29(void); | ||
70 | extern void invalidate_interrupt30(void); | ||
71 | extern void invalidate_interrupt31(void); | ||
48 | 72 | ||
49 | extern void irq_move_cleanup_interrupt(void); | 73 | extern void irq_move_cleanup_interrupt(void); |
50 | extern void reboot_interrupt(void); | 74 | extern void reboot_interrupt(void); |
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index 36fb1a6a5109..8dbe353e41e1 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h | |||
@@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start, | |||
11 | unsigned long page_size_mask); | 11 | unsigned long page_size_mask); |
12 | 12 | ||
13 | 13 | ||
14 | extern unsigned long __initdata e820_table_start; | 14 | extern unsigned long __initdata pgt_buf_start; |
15 | extern unsigned long __meminitdata e820_table_end; | 15 | extern unsigned long __meminitdata pgt_buf_end; |
16 | extern unsigned long __meminitdata e820_table_top; | 16 | extern unsigned long __meminitdata pgt_buf_top; |
17 | 17 | ||
18 | #endif /* _ASM_X86_INIT_32_H */ | 18 | #endif /* _ASM_X86_INIT_32_H */ |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index f327d386d6cc..c4bd267dfc50 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -63,17 +63,6 @@ union IO_APIC_reg_03 { | |||
63 | } __attribute__ ((packed)) bits; | 63 | } __attribute__ ((packed)) bits; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | enum ioapic_irq_destination_types { | ||
67 | dest_Fixed = 0, | ||
68 | dest_LowestPrio = 1, | ||
69 | dest_SMI = 2, | ||
70 | dest__reserved_1 = 3, | ||
71 | dest_NMI = 4, | ||
72 | dest_INIT = 5, | ||
73 | dest__reserved_2 = 6, | ||
74 | dest_ExtINT = 7 | ||
75 | }; | ||
76 | |||
77 | struct IO_APIC_route_entry { | 66 | struct IO_APIC_route_entry { |
78 | __u32 vector : 8, | 67 | __u32 vector : 8, |
79 | delivery_mode : 3, /* 000: FIXED | 68 | delivery_mode : 3, /* 000: FIXED |
@@ -106,6 +95,10 @@ struct IR_IO_APIC_route_entry { | |||
106 | index : 15; | 95 | index : 15; |
107 | } __attribute__ ((packed)); | 96 | } __attribute__ ((packed)); |
108 | 97 | ||
98 | #define IOAPIC_AUTO -1 | ||
99 | #define IOAPIC_EDGE 0 | ||
100 | #define IOAPIC_LEVEL 1 | ||
101 | |||
109 | #ifdef CONFIG_X86_IO_APIC | 102 | #ifdef CONFIG_X86_IO_APIC |
110 | 103 | ||
111 | /* | 104 | /* |
@@ -150,11 +143,6 @@ extern int timer_through_8259; | |||
150 | #define io_apic_assign_pci_irqs \ | 143 | #define io_apic_assign_pci_irqs \ |
151 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
152 | 145 | ||
153 | extern u8 io_apic_unique_id(u8 id); | ||
154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); | ||
155 | extern int io_apic_get_version(int ioapic); | ||
156 | extern int io_apic_get_redir_entries(int ioapic); | ||
157 | |||
158 | struct io_apic_irq_attr; | 146 | struct io_apic_irq_attr; |
159 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 147 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
160 | struct io_apic_irq_attr *irq_attr); | 148 | struct io_apic_irq_attr *irq_attr); |
@@ -162,6 +150,8 @@ void setup_IO_APIC_irq_extra(u32 gsi); | |||
162 | extern void ioapic_and_gsi_init(void); | 150 | extern void ioapic_and_gsi_init(void); |
163 | extern void ioapic_insert_resources(void); | 151 | extern void ioapic_insert_resources(void); |
164 | 152 | ||
153 | int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr); | ||
154 | |||
165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); | 155 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); |
166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); | 156 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); |
167 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | 157 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
@@ -186,6 +176,8 @@ extern void __init pre_init_apic_IRQ0(void); | |||
186 | 176 | ||
187 | extern void mp_save_irq(struct mpc_intsrc *m); | 177 | extern void mp_save_irq(struct mpc_intsrc *m); |
188 | 178 | ||
179 | extern void disable_ioapic_support(void); | ||
180 | |||
189 | #else /* !CONFIG_X86_IO_APIC */ | 181 | #else /* !CONFIG_X86_IO_APIC */ |
190 | 182 | ||
191 | #define io_apic_assign_pci_irqs 0 | 183 | #define io_apic_assign_pci_irqs 0 |
@@ -199,6 +191,26 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; } | |||
199 | struct io_apic_irq_attr; | 191 | struct io_apic_irq_attr; |
200 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, | 192 | static inline int io_apic_set_pci_routing(struct device *dev, int irq, |
201 | struct io_apic_irq_attr *irq_attr) { return 0; } | 193 | struct io_apic_irq_attr *irq_attr) { return 0; } |
194 | |||
195 | static inline struct IO_APIC_route_entry **alloc_ioapic_entries(void) | ||
196 | { | ||
197 | return NULL; | ||
198 | } | ||
199 | |||
200 | static inline void free_ioapic_entries(struct IO_APIC_route_entry **ent) { } | ||
201 | static inline int save_IO_APIC_setup(struct IO_APIC_route_entry **ent) | ||
202 | { | ||
203 | return -ENOMEM; | ||
204 | } | ||
205 | |||
206 | static inline void mask_IO_APIC_setup(struct IO_APIC_route_entry **ent) { } | ||
207 | static inline int restore_IO_APIC_setup(struct IO_APIC_route_entry **ent) | ||
208 | { | ||
209 | return -ENOMEM; | ||
210 | } | ||
211 | |||
212 | static inline void mp_save_irq(struct mpc_intsrc *m) { }; | ||
213 | static inline void disable_ioapic_support(void) { } | ||
202 | #endif | 214 | #endif |
203 | 215 | ||
204 | #endif /* _ASM_X86_IO_APIC_H */ | 216 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index 0b7228268a63..615fa9061b57 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h | |||
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, | |||
123 | int vector); | 123 | int vector); |
124 | extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, | 124 | extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, |
125 | int vector); | 125 | int vector); |
126 | extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | ||
127 | int vector); | ||
128 | extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, | ||
129 | int vector); | ||
130 | 126 | ||
131 | /* Avoid include hell */ | 127 | /* Avoid include hell */ |
132 | #define NMI_VECTOR 0x02 | 128 | #define NMI_VECTOR 0x02 |
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector) | |||
150 | } | 146 | } |
151 | 147 | ||
152 | #ifdef CONFIG_X86_32 | 148 | #ifdef CONFIG_X86_32 |
149 | extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | ||
150 | int vector); | ||
151 | extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, | ||
152 | int vector); | ||
153 | extern void default_send_IPI_mask_logical(const struct cpumask *mask, | 153 | extern void default_send_IPI_mask_logical(const struct cpumask *mask, |
154 | int vector); | 154 | int vector); |
155 | extern void default_send_IPI_allbutself(int vector); | 155 | extern void default_send_IPI_allbutself(int vector); |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index c704b38c57a2..ba870bb6dd8e 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -10,9 +10,6 @@ | |||
10 | #include <asm/apicdef.h> | 10 | #include <asm/apicdef.h> |
11 | #include <asm/irq_vectors.h> | 11 | #include <asm/irq_vectors.h> |
12 | 12 | ||
13 | /* Even though we don't support this, supply it to appease OF */ | ||
14 | static inline void irq_dispose_mapping(unsigned int virq) { } | ||
15 | |||
16 | static inline int irq_canonicalize(int irq) | 13 | static inline int irq_canonicalize(int irq) |
17 | { | 14 | { |
18 | return ((irq == 2) ? 9 : irq); | 15 | return ((irq == 2) ? 9 : irq); |
diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h new file mode 100644 index 000000000000..423bbbddf36d --- /dev/null +++ b/arch/x86/include/asm/irq_controller.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef __IRQ_CONTROLLER__ | ||
2 | #define __IRQ_CONTROLLER__ | ||
3 | |||
4 | struct irq_domain { | ||
5 | int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize, | ||
6 | u32 *out_hwirq, u32 *out_type); | ||
7 | void *priv; | ||
8 | struct device_node *controller; | ||
9 | struct list_head l; | ||
10 | }; | ||
11 | |||
12 | #endif | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6af0894dafb4..6e976ee3b3ef 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_X86_IRQ_VECTORS_H | 1 | #ifndef _ASM_X86_IRQ_VECTORS_H |
2 | #define _ASM_X86_IRQ_VECTORS_H | 2 | #define _ASM_X86_IRQ_VECTORS_H |
3 | 3 | ||
4 | #include <linux/threads.h> | ||
4 | /* | 5 | /* |
5 | * Linux IRQ vector layout. | 6 | * Linux IRQ vector layout. |
6 | * | 7 | * |
@@ -16,8 +17,8 @@ | |||
16 | * Vectors 0 ... 31 : system traps and exceptions - hardcoded events | 17 | * Vectors 0 ... 31 : system traps and exceptions - hardcoded events |
17 | * Vectors 32 ... 127 : device interrupts | 18 | * Vectors 32 ... 127 : device interrupts |
18 | * Vector 128 : legacy int80 syscall interface | 19 | * Vector 128 : legacy int80 syscall interface |
19 | * Vectors 129 ... 237 : device interrupts | 20 | * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts |
20 | * Vectors 238 ... 255 : special interrupts | 21 | * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts |
21 | * | 22 | * |
22 | * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. | 23 | * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. |
23 | * | 24 | * |
@@ -96,37 +97,43 @@ | |||
96 | #define THRESHOLD_APIC_VECTOR 0xf9 | 97 | #define THRESHOLD_APIC_VECTOR 0xf9 |
97 | #define REBOOT_VECTOR 0xf8 | 98 | #define REBOOT_VECTOR 0xf8 |
98 | 99 | ||
99 | /* f0-f7 used for spreading out TLB flushes: */ | ||
100 | #define INVALIDATE_TLB_VECTOR_END 0xf7 | ||
101 | #define INVALIDATE_TLB_VECTOR_START 0xf0 | ||
102 | #define NUM_INVALIDATE_TLB_VECTORS 8 | ||
103 | |||
104 | /* | ||
105 | * Local APIC timer IRQ vector is on a different priority level, | ||
106 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
107 | * sources per level' errata. | ||
108 | */ | ||
109 | #define LOCAL_TIMER_VECTOR 0xef | ||
110 | |||
111 | /* | 100 | /* |
112 | * Generic system vector for platform specific use | 101 | * Generic system vector for platform specific use |
113 | */ | 102 | */ |
114 | #define X86_PLATFORM_IPI_VECTOR 0xed | 103 | #define X86_PLATFORM_IPI_VECTOR 0xf7 |
115 | 104 | ||
116 | /* | 105 | /* |
117 | * IRQ work vector: | 106 | * IRQ work vector: |
118 | */ | 107 | */ |
119 | #define IRQ_WORK_VECTOR 0xec | 108 | #define IRQ_WORK_VECTOR 0xf6 |
120 | 109 | ||
121 | #define UV_BAU_MESSAGE 0xea | 110 | #define UV_BAU_MESSAGE 0xf5 |
122 | 111 | ||
123 | /* | 112 | /* |
124 | * Self IPI vector for machine checks | 113 | * Self IPI vector for machine checks |
125 | */ | 114 | */ |
126 | #define MCE_SELF_VECTOR 0xeb | 115 | #define MCE_SELF_VECTOR 0xf4 |
127 | 116 | ||
128 | /* Xen vector callback to receive events in a HVM domain */ | 117 | /* Xen vector callback to receive events in a HVM domain */ |
129 | #define XEN_HVM_EVTCHN_CALLBACK 0xe9 | 118 | #define XEN_HVM_EVTCHN_CALLBACK 0xf3 |
119 | |||
120 | /* | ||
121 | * Local APIC timer IRQ vector is on a different priority level, | ||
122 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
123 | * sources per level' errata. | ||
124 | */ | ||
125 | #define LOCAL_TIMER_VECTOR 0xef | ||
126 | |||
127 | /* up to 32 vectors used for spreading out TLB flushes: */ | ||
128 | #if NR_CPUS <= 32 | ||
129 | # define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS) | ||
130 | #else | ||
131 | # define NUM_INVALIDATE_TLB_VECTORS (32) | ||
132 | #endif | ||
133 | |||
134 | #define INVALIDATE_TLB_VECTOR_END (0xee) | ||
135 | #define INVALIDATE_TLB_VECTOR_START \ | ||
136 | (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1) | ||
130 | 137 | ||
131 | #define NR_VECTORS 256 | 138 | #define NR_VECTORS 256 |
132 | 139 | ||
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index ca242d35e873..518bbbb9ee59 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h | |||
@@ -13,7 +13,6 @@ enum die_val { | |||
13 | DIE_PANIC, | 13 | DIE_PANIC, |
14 | DIE_NMI, | 14 | DIE_NMI, |
15 | DIE_DIE, | 15 | DIE_DIE, |
16 | DIE_NMIWATCHDOG, | ||
17 | DIE_KERNELDEBUG, | 16 | DIE_KERNELDEBUG, |
18 | DIE_TRAP, | 17 | DIE_TRAP, |
19 | DIE_GPF, | 18 | DIE_GPF, |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 0c90dd9f0505..9c7d95f6174b 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -25,7 +25,6 @@ extern int pic_mode; | |||
25 | #define MAX_IRQ_SOURCES 256 | 25 | #define MAX_IRQ_SOURCES 256 |
26 | 26 | ||
27 | extern unsigned int def_to_bigsmp; | 27 | extern unsigned int def_to_bigsmp; |
28 | extern u8 apicid_2_node[]; | ||
29 | 28 | ||
30 | #ifdef CONFIG_X86_NUMAQ | 29 | #ifdef CONFIG_X86_NUMAQ |
31 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | 30 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; |
@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES]; | |||
33 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | 32 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; |
34 | #endif | 33 | #endif |
35 | 34 | ||
36 | #define MAX_APICID 256 | ||
37 | |||
38 | #else /* CONFIG_X86_64: */ | 35 | #else /* CONFIG_X86_64: */ |
39 | 36 | ||
40 | #define MAX_MP_BUSSES 256 | 37 | #define MAX_MP_BUSSES 256 |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 43a18c77676d..823d48223400 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -52,6 +52,9 @@ | |||
52 | #define MSR_IA32_MCG_STATUS 0x0000017a | 52 | #define MSR_IA32_MCG_STATUS 0x0000017a |
53 | #define MSR_IA32_MCG_CTL 0x0000017b | 53 | #define MSR_IA32_MCG_CTL 0x0000017b |
54 | 54 | ||
55 | #define MSR_OFFCORE_RSP_0 0x000001a6 | ||
56 | #define MSR_OFFCORE_RSP_1 0x000001a7 | ||
57 | |||
55 | #define MSR_IA32_PEBS_ENABLE 0x000003f1 | 58 | #define MSR_IA32_PEBS_ENABLE 0x000003f1 |
56 | #define MSR_IA32_DS_AREA 0x00000600 | 59 | #define MSR_IA32_DS_AREA 0x00000600 |
57 | #define MSR_IA32_PERF_CAPABILITIES 0x00000345 | 60 | #define MSR_IA32_PERF_CAPABILITIES 0x00000345 |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c76f5b92b840..07f46016d3ff 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -7,7 +7,6 @@ | |||
7 | 7 | ||
8 | #ifdef CONFIG_X86_LOCAL_APIC | 8 | #ifdef CONFIG_X86_LOCAL_APIC |
9 | 9 | ||
10 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | ||
11 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | 10 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); |
12 | extern int reserve_perfctr_nmi(unsigned int); | 11 | extern int reserve_perfctr_nmi(unsigned int); |
13 | extern void release_perfctr_nmi(unsigned int); | 12 | extern void release_perfctr_nmi(unsigned int); |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 27da400d3138..3d4dab43c994 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
@@ -1,5 +1,57 @@ | |||
1 | #ifndef _ASM_X86_NUMA_H | ||
2 | #define _ASM_X86_NUMA_H | ||
3 | |||
4 | #include <asm/topology.h> | ||
5 | #include <asm/apicdef.h> | ||
6 | |||
7 | #ifdef CONFIG_NUMA | ||
8 | |||
9 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | ||
10 | |||
11 | /* | ||
12 | * __apicid_to_node[] stores the raw mapping between physical apicid and | ||
13 | * node and is used to initialize cpu_to_node mapping. | ||
14 | * | ||
15 | * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus | ||
16 | * should be accessed by the accessors - set_apicid_to_node() and | ||
17 | * numa_cpu_node(). | ||
18 | */ | ||
19 | extern s16 __apicid_to_node[MAX_LOCAL_APIC]; | ||
20 | |||
21 | static inline void set_apicid_to_node(int apicid, s16 node) | ||
22 | { | ||
23 | __apicid_to_node[apicid] = node; | ||
24 | } | ||
25 | #else /* CONFIG_NUMA */ | ||
26 | static inline void set_apicid_to_node(int apicid, s16 node) | ||
27 | { | ||
28 | } | ||
29 | #endif /* CONFIG_NUMA */ | ||
30 | |||
1 | #ifdef CONFIG_X86_32 | 31 | #ifdef CONFIG_X86_32 |
2 | # include "numa_32.h" | 32 | # include "numa_32.h" |
3 | #else | 33 | #else |
4 | # include "numa_64.h" | 34 | # include "numa_64.h" |
5 | #endif | 35 | #endif |
36 | |||
37 | #ifdef CONFIG_NUMA | ||
38 | extern void __cpuinit numa_set_node(int cpu, int node); | ||
39 | extern void __cpuinit numa_clear_node(int cpu); | ||
40 | extern void __init numa_init_array(void); | ||
41 | extern void __init init_cpu_to_node(void); | ||
42 | extern void __cpuinit numa_add_cpu(int cpu); | ||
43 | extern void __cpuinit numa_remove_cpu(int cpu); | ||
44 | #else /* CONFIG_NUMA */ | ||
45 | static inline void numa_set_node(int cpu, int node) { } | ||
46 | static inline void numa_clear_node(int cpu) { } | ||
47 | static inline void numa_init_array(void) { } | ||
48 | static inline void init_cpu_to_node(void) { } | ||
49 | static inline void numa_add_cpu(int cpu) { } | ||
50 | static inline void numa_remove_cpu(int cpu) { } | ||
51 | #endif /* CONFIG_NUMA */ | ||
52 | |||
53 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
54 | struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); | ||
55 | #endif | ||
56 | |||
57 | #endif /* _ASM_X86_NUMA_H */ | ||
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index b0ef2b449a9d..c6beed1ef103 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h | |||
@@ -4,7 +4,12 @@ | |||
4 | extern int numa_off; | 4 | extern int numa_off; |
5 | 5 | ||
6 | extern int pxm_to_nid(int pxm); | 6 | extern int pxm_to_nid(int pxm); |
7 | extern void numa_remove_cpu(int cpu); | 7 | |
8 | #ifdef CONFIG_NUMA | ||
9 | extern int __cpuinit numa_cpu_node(int cpu); | ||
10 | #else /* CONFIG_NUMA */ | ||
11 | static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } | ||
12 | #endif /* CONFIG_NUMA */ | ||
8 | 13 | ||
9 | #ifdef CONFIG_HIGHMEM | 14 | #ifdef CONFIG_HIGHMEM |
10 | extern void set_highmem_pages_init(void); | 15 | extern void set_highmem_pages_init(void); |
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 0493be39607c..344eb1790b46 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h | |||
@@ -2,23 +2,16 @@ | |||
2 | #define _ASM_X86_NUMA_64_H | 2 | #define _ASM_X86_NUMA_64_H |
3 | 3 | ||
4 | #include <linux/nodemask.h> | 4 | #include <linux/nodemask.h> |
5 | #include <asm/apicdef.h> | ||
6 | 5 | ||
7 | struct bootnode { | 6 | struct bootnode { |
8 | u64 start; | 7 | u64 start; |
9 | u64 end; | 8 | u64 end; |
10 | }; | 9 | }; |
11 | 10 | ||
12 | extern int compute_hash_shift(struct bootnode *nodes, int numblks, | ||
13 | int *nodeids); | ||
14 | |||
15 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) | 11 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) |
16 | 12 | ||
17 | extern void numa_init_array(void); | ||
18 | extern int numa_off; | 13 | extern int numa_off; |
19 | 14 | ||
20 | extern s16 apicid_to_node[MAX_LOCAL_APIC]; | ||
21 | |||
22 | extern unsigned long numa_free_all_bootmem(void); | 15 | extern unsigned long numa_free_all_bootmem(void); |
23 | extern void setup_node_bootmem(int nodeid, unsigned long start, | 16 | extern void setup_node_bootmem(int nodeid, unsigned long start, |
24 | unsigned long end); | 17 | unsigned long end); |
@@ -31,11 +24,11 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, | |||
31 | */ | 24 | */ |
32 | #define NODE_MIN_SIZE (4*1024*1024) | 25 | #define NODE_MIN_SIZE (4*1024*1024) |
33 | 26 | ||
34 | extern void __init init_cpu_to_node(void); | 27 | extern nodemask_t numa_nodes_parsed __initdata; |
35 | extern void __cpuinit numa_set_node(int cpu, int node); | 28 | |
36 | extern void __cpuinit numa_clear_node(int cpu); | 29 | extern int __cpuinit numa_cpu_node(int cpu); |
37 | extern void __cpuinit numa_add_cpu(int cpu); | 30 | extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); |
38 | extern void __cpuinit numa_remove_cpu(int cpu); | 31 | extern void __init numa_set_distance(int from, int to, int distance); |
39 | 32 | ||
40 | #ifdef CONFIG_NUMA_EMU | 33 | #ifdef CONFIG_NUMA_EMU |
41 | #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) | 34 | #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) |
@@ -43,11 +36,7 @@ extern void __cpuinit numa_remove_cpu(int cpu); | |||
43 | void numa_emu_cmdline(char *); | 36 | void numa_emu_cmdline(char *); |
44 | #endif /* CONFIG_NUMA_EMU */ | 37 | #endif /* CONFIG_NUMA_EMU */ |
45 | #else | 38 | #else |
46 | static inline void init_cpu_to_node(void) { } | 39 | static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } |
47 | static inline void numa_set_node(int cpu, int node) { } | ||
48 | static inline void numa_clear_node(int cpu) { } | ||
49 | static inline void numa_add_cpu(int cpu, int node) { } | ||
50 | static inline void numa_remove_cpu(int cpu) { } | ||
51 | #endif | 40 | #endif |
52 | 41 | ||
53 | #endif /* _ASM_X86_NUMA_64_H */ | 42 | #endif /* _ASM_X86_NUMA_64_H */ |
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h index 641988efe063..c5d3a5abbb9f 100644 --- a/arch/x86/include/asm/olpc_ofw.h +++ b/arch/x86/include/asm/olpc_ofw.h | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | #define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */ | 7 | #define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */ |
8 | 8 | ||
9 | #ifdef CONFIG_OLPC_OPENFIRMWARE | 9 | #ifdef CONFIG_OLPC |
10 | 10 | ||
11 | extern bool olpc_ofw_is_installed(void); | 11 | extern bool olpc_ofw_is_installed(void); |
12 | 12 | ||
@@ -26,19 +26,15 @@ extern void setup_olpc_ofw_pgd(void); | |||
26 | /* check if OFW was detected during boot */ | 26 | /* check if OFW was detected during boot */ |
27 | extern bool olpc_ofw_present(void); | 27 | extern bool olpc_ofw_present(void); |
28 | 28 | ||
29 | #else /* !CONFIG_OLPC_OPENFIRMWARE */ | 29 | #else /* !CONFIG_OLPC */ |
30 | |||
31 | static inline bool olpc_ofw_is_installed(void) { return false; } | ||
32 | static inline void olpc_ofw_detect(void) { } | 30 | static inline void olpc_ofw_detect(void) { } |
33 | static inline void setup_olpc_ofw_pgd(void) { } | 31 | static inline void setup_olpc_ofw_pgd(void) { } |
34 | static inline bool olpc_ofw_present(void) { return false; } | 32 | #endif /* !CONFIG_OLPC */ |
35 | |||
36 | #endif /* !CONFIG_OLPC_OPENFIRMWARE */ | ||
37 | 33 | ||
38 | #ifdef CONFIG_OLPC_OPENFIRMWARE_DT | 34 | #ifdef CONFIG_OF_PROMTREE |
39 | extern void olpc_dt_build_devicetree(void); | 35 | extern void olpc_dt_build_devicetree(void); |
40 | #else | 36 | #else |
41 | static inline void olpc_dt_build_devicetree(void) { } | 37 | static inline void olpc_dt_build_devicetree(void) { } |
42 | #endif /* CONFIG_OLPC_OPENFIRMWARE_DT */ | 38 | #endif |
43 | 39 | ||
44 | #endif /* _ASM_X86_OLPC_OFW_H */ | 40 | #endif /* _ASM_X86_OLPC_OFW_H */ |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 1df66211fd1b..bce688d54c12 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_PAGE_DEFS_H | 2 | #define _ASM_X86_PAGE_DEFS_H |
3 | 3 | ||
4 | #include <linux/const.h> | 4 | #include <linux/const.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
6 | /* PAGE_SHIFT determines the page size */ | 7 | /* PAGE_SHIFT determines the page size */ |
7 | #define PAGE_SHIFT 12 | 8 | #define PAGE_SHIFT 12 |
@@ -45,11 +46,15 @@ extern int devmem_is_allowed(unsigned long pagenr); | |||
45 | extern unsigned long max_low_pfn_mapped; | 46 | extern unsigned long max_low_pfn_mapped; |
46 | extern unsigned long max_pfn_mapped; | 47 | extern unsigned long max_pfn_mapped; |
47 | 48 | ||
49 | static inline phys_addr_t get_max_mapped(void) | ||
50 | { | ||
51 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; | ||
52 | } | ||
53 | |||
48 | extern unsigned long init_memory_mapping(unsigned long start, | 54 | extern unsigned long init_memory_mapping(unsigned long start, |
49 | unsigned long end); | 55 | unsigned long end); |
50 | 56 | ||
51 | extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, | 57 | extern void initmem_init(void); |
52 | int acpi, int k8); | ||
53 | extern void free_initmem(void); | 58 | extern void free_initmem(void); |
54 | 59 | ||
55 | #endif /* !__ASSEMBLY__ */ | 60 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 7e172955ee57..a09e1f052d84 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -451,6 +451,26 @@ do { \ | |||
451 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | 451 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
452 | #endif /* !CONFIG_M386 */ | 452 | #endif /* !CONFIG_M386 */ |
453 | 453 | ||
454 | #ifdef CONFIG_X86_CMPXCHG64 | ||
455 | #define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \ | ||
456 | ({ \ | ||
457 | char __ret; \ | ||
458 | typeof(o1) __o1 = o1; \ | ||
459 | typeof(o1) __n1 = n1; \ | ||
460 | typeof(o2) __o2 = o2; \ | ||
461 | typeof(o2) __n2 = n2; \ | ||
462 | typeof(o2) __dummy = n2; \ | ||
463 | asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ | ||
464 | : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \ | ||
465 | : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \ | ||
466 | __ret; \ | ||
467 | }) | ||
468 | |||
469 | #define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) | ||
470 | #define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) | ||
471 | #define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) | ||
472 | #endif /* CONFIG_X86_CMPXCHG64 */ | ||
473 | |||
454 | /* | 474 | /* |
455 | * Per cpu atomic 64 bit operations are only available under 64 bit. | 475 | * Per cpu atomic 64 bit operations are only available under 64 bit. |
456 | * 32 bit must fall back to generic operations. | 476 | * 32 bit must fall back to generic operations. |
@@ -480,6 +500,34 @@ do { \ | |||
480 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 500 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
481 | #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | 501 | #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) |
482 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | 502 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
503 | |||
504 | /* | ||
505 | * Pretty complex macro to generate cmpxchg16 instruction. The instruction | ||
506 | * is not supported on early AMD64 processors so we must be able to emulate | ||
507 | * it in software. The address used in the cmpxchg16 instruction must be | ||
508 | * aligned to a 16 byte boundary. | ||
509 | */ | ||
510 | #define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ | ||
511 | ({ \ | ||
512 | char __ret; \ | ||
513 | typeof(o1) __o1 = o1; \ | ||
514 | typeof(o1) __n1 = n1; \ | ||
515 | typeof(o2) __o2 = o2; \ | ||
516 | typeof(o2) __n2 = n2; \ | ||
517 | typeof(o2) __dummy; \ | ||
518 | alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \ | ||
519 | "cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \ | ||
520 | X86_FEATURE_CX16, \ | ||
521 | ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ | ||
522 | "S" (&pcp1), "b"(__n1), "c"(__n2), \ | ||
523 | "a"(__o1), "d"(__o2)); \ | ||
524 | __ret; \ | ||
525 | }) | ||
526 | |||
527 | #define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) | ||
528 | #define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) | ||
529 | #define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) | ||
530 | |||
483 | #endif | 531 | #endif |
484 | 532 | ||
485 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 533 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 45636cefa186..4c25ab48257b 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -94,10 +94,6 @@ struct cpuinfo_x86 { | |||
94 | int x86_cache_alignment; /* In bytes */ | 94 | int x86_cache_alignment; /* In bytes */ |
95 | int x86_power; | 95 | int x86_power; |
96 | unsigned long loops_per_jiffy; | 96 | unsigned long loops_per_jiffy; |
97 | #ifdef CONFIG_SMP | ||
98 | /* cpus sharing the last level cache: */ | ||
99 | cpumask_var_t llc_shared_map; | ||
100 | #endif | ||
101 | /* cpuid returned max cores value: */ | 97 | /* cpuid returned max cores value: */ |
102 | u16 x86_max_cores; | 98 | u16 x86_max_cores; |
103 | u16 apicid; | 99 | u16 apicid; |
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index b4ec95f07518..971e0b46446e 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h | |||
@@ -1 +1,69 @@ | |||
1 | /* dummy prom.h; here to make linux/of.h's #includes happy */ | 1 | /* |
2 | * Definitions for Device tree / OpenFirmware handling on X86 | ||
3 | * | ||
4 | * based on arch/powerpc/include/asm/prom.h which is | ||
5 | * Copyright (C) 1996-2005 Paul Mackerras. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_X86_PROM_H | ||
14 | #define _ASM_X86_PROM_H | ||
15 | #ifndef __ASSEMBLY__ | ||
16 | |||
17 | #include <linux/of.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | #include <asm/atomic.h> | ||
23 | #include <asm/setup.h> | ||
24 | #include <asm/irq_controller.h> | ||
25 | |||
26 | #ifdef CONFIG_OF | ||
27 | extern int of_ioapic; | ||
28 | extern u64 initial_dtb; | ||
29 | extern void add_dtb(u64 data); | ||
30 | extern void x86_add_irq_domains(void); | ||
31 | void __cpuinit x86_of_pci_init(void); | ||
32 | void x86_dtb_init(void); | ||
33 | |||
34 | static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) | ||
35 | { | ||
36 | return pdev ? pdev->dev.of_node : NULL; | ||
37 | } | ||
38 | |||
39 | static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | ||
40 | { | ||
41 | return pci_device_to_OF_node(bus->self); | ||
42 | } | ||
43 | |||
44 | #else | ||
45 | static inline void add_dtb(u64 data) { } | ||
46 | static inline void x86_add_irq_domains(void) { } | ||
47 | static inline void x86_of_pci_init(void) { } | ||
48 | static inline void x86_dtb_init(void) { } | ||
49 | #define of_ioapic 0 | ||
50 | #endif | ||
51 | |||
52 | extern char cmd_line[COMMAND_LINE_SIZE]; | ||
53 | |||
54 | #define pci_address_to_pio pci_address_to_pio | ||
55 | unsigned long pci_address_to_pio(phys_addr_t addr); | ||
56 | |||
57 | /** | ||
58 | * irq_dispose_mapping - Unmap an interrupt | ||
59 | * @virq: linux virq number of the interrupt to unmap | ||
60 | * | ||
61 | * FIXME: We really should implement proper virq handling like power, | ||
62 | * but that's going to be major surgery. | ||
63 | */ | ||
64 | static inline void irq_dispose_mapping(unsigned int virq) { } | ||
65 | |||
66 | #define HAVE_ARCH_DEVTREE_FIXUPS | ||
67 | |||
68 | #endif /* __ASSEMBLY__ */ | ||
69 | #endif | ||
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 562d4fd31ba8..3250e3d605d9 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h | |||
@@ -18,7 +18,10 @@ extern struct machine_ops machine_ops; | |||
18 | 18 | ||
19 | void native_machine_crash_shutdown(struct pt_regs *regs); | 19 | void native_machine_crash_shutdown(struct pt_regs *regs); |
20 | void native_machine_shutdown(void); | 20 | void native_machine_shutdown(void); |
21 | void machine_real_restart(const unsigned char *code, int length); | 21 | void machine_real_restart(unsigned int type); |
22 | /* These must match dispatch_table in reboot_32.S */ | ||
23 | #define MRR_BIOS 0 | ||
24 | #define MRR_APM 1 | ||
22 | 25 | ||
23 | typedef void (*nmi_shootdown_cb)(int, struct die_args*); | 26 | typedef void (*nmi_shootdown_cb)(int, struct die_args*); |
24 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); | 27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); |
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index d1e41b0f9b60..df4cd32b4cc6 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -37,26 +37,9 @@ | |||
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #ifdef __KERNEL__ | 39 | #ifdef __KERNEL__ |
40 | |||
41 | #include <linux/list.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/lockdep.h> | ||
44 | #include <asm/asm.h> | 40 | #include <asm/asm.h> |
45 | 41 | ||
46 | struct rwsem_waiter; | ||
47 | |||
48 | extern asmregparm struct rw_semaphore * | ||
49 | rwsem_down_read_failed(struct rw_semaphore *sem); | ||
50 | extern asmregparm struct rw_semaphore * | ||
51 | rwsem_down_write_failed(struct rw_semaphore *sem); | ||
52 | extern asmregparm struct rw_semaphore * | ||
53 | rwsem_wake(struct rw_semaphore *); | ||
54 | extern asmregparm struct rw_semaphore * | ||
55 | rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
56 | |||
57 | /* | 42 | /* |
58 | * the semaphore definition | ||
59 | * | ||
60 | * The bias values and the counter type limits the number of | 43 | * The bias values and the counter type limits the number of |
61 | * potential readers/writers to 32767 for 32 bits and 2147483647 | 44 | * potential readers/writers to 32767 for 32 bits and 2147483647 |
62 | * for 64 bits. | 45 | * for 64 bits. |
@@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore * | |||
74 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 57 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
75 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 58 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
76 | 59 | ||
77 | typedef signed long rwsem_count_t; | ||
78 | |||
79 | struct rw_semaphore { | ||
80 | rwsem_count_t count; | ||
81 | spinlock_t wait_lock; | ||
82 | struct list_head wait_list; | ||
83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
84 | struct lockdep_map dep_map; | ||
85 | #endif | ||
86 | }; | ||
87 | |||
88 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
89 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
90 | #else | ||
91 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
92 | #endif | ||
93 | |||
94 | |||
95 | #define __RWSEM_INITIALIZER(name) \ | ||
96 | { \ | ||
97 | RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
98 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ | ||
99 | } | ||
100 | |||
101 | #define DECLARE_RWSEM(name) \ | ||
102 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
103 | |||
104 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
105 | struct lock_class_key *key); | ||
106 | |||
107 | #define init_rwsem(sem) \ | ||
108 | do { \ | ||
109 | static struct lock_class_key __key; \ | ||
110 | \ | ||
111 | __init_rwsem((sem), #sem, &__key); \ | ||
112 | } while (0) | ||
113 | |||
114 | /* | 60 | /* |
115 | * lock for reading | 61 | * lock for reading |
116 | */ | 62 | */ |
@@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
133 | */ | 79 | */ |
134 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 80 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
135 | { | 81 | { |
136 | rwsem_count_t result, tmp; | 82 | long result, tmp; |
137 | asm volatile("# beginning __down_read_trylock\n\t" | 83 | asm volatile("# beginning __down_read_trylock\n\t" |
138 | " mov %0,%1\n\t" | 84 | " mov %0,%1\n\t" |
139 | "1:\n\t" | 85 | "1:\n\t" |
@@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
155 | */ | 101 | */ |
156 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 102 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
157 | { | 103 | { |
158 | rwsem_count_t tmp; | 104 | long tmp; |
159 | asm volatile("# beginning down_write\n\t" | 105 | asm volatile("# beginning down_write\n\t" |
160 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 106 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
161 | /* adds 0xffff0001, returns the old value */ | 107 | /* adds 0xffff0001, returns the old value */ |
@@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
180 | */ | 126 | */ |
181 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 127 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
182 | { | 128 | { |
183 | rwsem_count_t ret = cmpxchg(&sem->count, | 129 | long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, |
184 | RWSEM_UNLOCKED_VALUE, | 130 | RWSEM_ACTIVE_WRITE_BIAS); |
185 | RWSEM_ACTIVE_WRITE_BIAS); | ||
186 | if (ret == RWSEM_UNLOCKED_VALUE) | 131 | if (ret == RWSEM_UNLOCKED_VALUE) |
187 | return 1; | 132 | return 1; |
188 | return 0; | 133 | return 0; |
@@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
193 | */ | 138 | */ |
194 | static inline void __up_read(struct rw_semaphore *sem) | 139 | static inline void __up_read(struct rw_semaphore *sem) |
195 | { | 140 | { |
196 | rwsem_count_t tmp; | 141 | long tmp; |
197 | asm volatile("# beginning __up_read\n\t" | 142 | asm volatile("# beginning __up_read\n\t" |
198 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 143 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
199 | /* subtracts 1, returns the old value */ | 144 | /* subtracts 1, returns the old value */ |
@@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
211 | */ | 156 | */ |
212 | static inline void __up_write(struct rw_semaphore *sem) | 157 | static inline void __up_write(struct rw_semaphore *sem) |
213 | { | 158 | { |
214 | rwsem_count_t tmp; | 159 | long tmp; |
215 | asm volatile("# beginning __up_write\n\t" | 160 | asm volatile("# beginning __up_write\n\t" |
216 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 161 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
217 | /* subtracts 0xffff0001, returns the old value */ | 162 | /* subtracts 0xffff0001, returns the old value */ |
@@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
247 | /* | 192 | /* |
248 | * implement atomic add functionality | 193 | * implement atomic add functionality |
249 | */ | 194 | */ |
250 | static inline void rwsem_atomic_add(rwsem_count_t delta, | 195 | static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
251 | struct rw_semaphore *sem) | ||
252 | { | 196 | { |
253 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" | 197 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
254 | : "+m" (sem->count) | 198 | : "+m" (sem->count) |
@@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta, | |||
258 | /* | 202 | /* |
259 | * implement exchange and add functionality | 203 | * implement exchange and add functionality |
260 | */ | 204 | */ |
261 | static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, | 205 | static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
262 | struct rw_semaphore *sem) | ||
263 | { | 206 | { |
264 | rwsem_count_t tmp = delta; | 207 | long tmp = delta; |
265 | 208 | ||
266 | asm volatile(LOCK_PREFIX "xadd %0,%1" | 209 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
267 | : "+r" (tmp), "+m" (sem->count) | 210 | : "+r" (tmp), "+m" (sem->count) |
@@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, | |||
270 | return tmp + delta; | 213 | return tmp + delta; |
271 | } | 214 | } |
272 | 215 | ||
273 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
274 | { | ||
275 | return (sem->count != 0); | ||
276 | } | ||
277 | |||
278 | #endif /* __KERNEL__ */ | 216 | #endif /* __KERNEL__ */ |
279 | #endif /* _ASM_X86_RWSEM_H */ | 217 | #endif /* _ASM_X86_RWSEM_H */ |
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 231f1c1d6607..cd84f7208f76 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h | |||
@@ -1,14 +1,16 @@ | |||
1 | #ifndef _ASM_X86_SEGMENT_H | 1 | #ifndef _ASM_X86_SEGMENT_H |
2 | #define _ASM_X86_SEGMENT_H | 2 | #define _ASM_X86_SEGMENT_H |
3 | 3 | ||
4 | #include <linux/const.h> | ||
5 | |||
4 | /* Constructor for a conventional segment GDT (or LDT) entry */ | 6 | /* Constructor for a conventional segment GDT (or LDT) entry */ |
5 | /* This is a macro so it can be used in initializers */ | 7 | /* This is a macro so it can be used in initializers */ |
6 | #define GDT_ENTRY(flags, base, limit) \ | 8 | #define GDT_ENTRY(flags, base, limit) \ |
7 | ((((base) & 0xff000000ULL) << (56-24)) | \ | 9 | ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \ |
8 | (((flags) & 0x0000f0ffULL) << 40) | \ | 10 | (((flags) & _AC(0x0000f0ff,ULL)) << 40) | \ |
9 | (((limit) & 0x000f0000ULL) << (48-16)) | \ | 11 | (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \ |
10 | (((base) & 0x00ffffffULL) << 16) | \ | 12 | (((base) & _AC(0x00ffffff,ULL)) << 16) | \ |
11 | (((limit) & 0x0000ffffULL))) | 13 | (((limit) & _AC(0x0000ffff,ULL)))) |
12 | 14 | ||
13 | /* Simple and small GDT entries for booting only */ | 15 | /* Simple and small GDT entries for booting only */ |
14 | 16 | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 1f4695136776..73b11bc0ae6f 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -17,12 +17,24 @@ | |||
17 | #endif | 17 | #endif |
18 | #include <asm/thread_info.h> | 18 | #include <asm/thread_info.h> |
19 | #include <asm/cpumask.h> | 19 | #include <asm/cpumask.h> |
20 | #include <asm/cpufeature.h> | ||
20 | 21 | ||
21 | extern int smp_num_siblings; | 22 | extern int smp_num_siblings; |
22 | extern unsigned int num_processors; | 23 | extern unsigned int num_processors; |
23 | 24 | ||
25 | static inline bool cpu_has_ht_siblings(void) | ||
26 | { | ||
27 | bool has_siblings = false; | ||
28 | #ifdef CONFIG_SMP | ||
29 | has_siblings = cpu_has_ht && smp_num_siblings > 1; | ||
30 | #endif | ||
31 | return has_siblings; | ||
32 | } | ||
33 | |||
24 | DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); | 34 | DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
25 | DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); | 35 | DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); |
36 | /* cpus sharing the last level cache: */ | ||
37 | DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); | ||
26 | DECLARE_PER_CPU(u16, cpu_llc_id); | 38 | DECLARE_PER_CPU(u16, cpu_llc_id); |
27 | DECLARE_PER_CPU(int, cpu_number); | 39 | DECLARE_PER_CPU(int, cpu_number); |
28 | 40 | ||
@@ -36,8 +48,16 @@ static inline struct cpumask *cpu_core_mask(int cpu) | |||
36 | return per_cpu(cpu_core_map, cpu); | 48 | return per_cpu(cpu_core_map, cpu); |
37 | } | 49 | } |
38 | 50 | ||
51 | static inline struct cpumask *cpu_llc_shared_mask(int cpu) | ||
52 | { | ||
53 | return per_cpu(cpu_llc_shared_map, cpu); | ||
54 | } | ||
55 | |||
39 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | 56 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); |
40 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); | 57 | DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); |
58 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) | ||
59 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); | ||
60 | #endif | ||
41 | 61 | ||
42 | /* Static state in head.S used to set up a CPU */ | 62 | /* Static state in head.S used to set up a CPU */ |
43 | extern unsigned long stack_start; /* Initial stack pointer address */ | 63 | extern unsigned long stack_start; /* Initial stack pointer address */ |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 33ecc3ea8782..12569e691ce3 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -98,8 +98,6 @@ do { \ | |||
98 | */ | 98 | */ |
99 | #define HAVE_DISABLE_HLT | 99 | #define HAVE_DISABLE_HLT |
100 | #else | 100 | #else |
101 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | ||
102 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | ||
103 | 101 | ||
104 | /* frame pointer must be last for get_wchan */ | 102 | /* frame pointer must be last for get_wchan */ |
105 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | 103 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 21899cc31e52..910a7084f7f2 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -47,21 +47,6 @@ | |||
47 | 47 | ||
48 | #include <asm/mpspec.h> | 48 | #include <asm/mpspec.h> |
49 | 49 | ||
50 | #ifdef CONFIG_X86_32 | ||
51 | |||
52 | /* Mappings between logical cpu number and node number */ | ||
53 | extern int cpu_to_node_map[]; | ||
54 | |||
55 | /* Returns the number of the node containing CPU 'cpu' */ | ||
56 | static inline int __cpu_to_node(int cpu) | ||
57 | { | ||
58 | return cpu_to_node_map[cpu]; | ||
59 | } | ||
60 | #define early_cpu_to_node __cpu_to_node | ||
61 | #define cpu_to_node __cpu_to_node | ||
62 | |||
63 | #else /* CONFIG_X86_64 */ | ||
64 | |||
65 | /* Mappings between logical cpu number and node number */ | 50 | /* Mappings between logical cpu number and node number */ |
66 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); | 51 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); |
67 | 52 | ||
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu) | |||
84 | 69 | ||
85 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 70 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
86 | 71 | ||
87 | #endif /* CONFIG_X86_64 */ | ||
88 | |||
89 | /* Mappings between node number and cpus on that node. */ | 72 | /* Mappings between node number and cpus on that node. */ |
90 | extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | 73 | extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
91 | 74 | ||
@@ -155,7 +138,7 @@ extern unsigned long node_remap_size[]; | |||
155 | .balance_interval = 1, \ | 138 | .balance_interval = 1, \ |
156 | } | 139 | } |
157 | 140 | ||
158 | #ifdef CONFIG_X86_64_ACPI_NUMA | 141 | #ifdef CONFIG_X86_64 |
159 | extern int __node_distance(int, int); | 142 | extern int __node_distance(int, int); |
160 | #define node_distance(a, b) __node_distance(a, b) | 143 | #define node_distance(a, b) __node_distance(a, b) |
161 | #endif | 144 | #endif |
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index f4500fb3b485..feca3118a73b 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h | |||
@@ -3,25 +3,36 @@ | |||
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | 5 | ||
6 | #ifdef CONFIG_X86_TRAMPOLINE | 6 | #include <linux/types.h> |
7 | #include <asm/io.h> | ||
8 | |||
7 | /* | 9 | /* |
8 | * Trampoline 80x86 program as an array. | 10 | * Trampoline 80x86 program as an array. These are in the init rodata |
11 | * segment, but that's okay, because we only care about the relative | ||
12 | * addresses of the symbols. | ||
9 | */ | 13 | */ |
10 | extern const unsigned char trampoline_data []; | 14 | extern const unsigned char x86_trampoline_start []; |
11 | extern const unsigned char trampoline_end []; | 15 | extern const unsigned char x86_trampoline_end []; |
12 | extern unsigned char *trampoline_base; | 16 | extern unsigned char *x86_trampoline_base; |
13 | 17 | ||
14 | extern unsigned long init_rsp; | 18 | extern unsigned long init_rsp; |
15 | extern unsigned long initial_code; | 19 | extern unsigned long initial_code; |
16 | extern unsigned long initial_gs; | 20 | extern unsigned long initial_gs; |
17 | 21 | ||
18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) | 22 | extern void __init setup_trampolines(void); |
23 | |||
24 | extern const unsigned char trampoline_data[]; | ||
25 | extern const unsigned char trampoline_status[]; | ||
26 | |||
27 | #define TRAMPOLINE_SYM(x) \ | ||
28 | ((void *)(x86_trampoline_base + \ | ||
29 | ((const unsigned char *)(x) - x86_trampoline_start))) | ||
19 | 30 | ||
20 | extern unsigned long setup_trampoline(void); | 31 | /* Address of the SMP trampoline */ |
21 | extern void __init reserve_trampoline_memory(void); | 32 | static inline unsigned long trampoline_address(void) |
22 | #else | 33 | { |
23 | static inline void reserve_trampoline_memory(void) {} | 34 | return virt_to_phys(TRAMPOLINE_SYM(trampoline_data)); |
24 | #endif /* CONFIG_X86_TRAMPOLINE */ | 35 | } |
25 | 36 | ||
26 | #endif /* __ASSEMBLY__ */ | 37 | #endif /* __ASSEMBLY__ */ |
27 | 38 | ||
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index b766a5e8ba0e..ffaf183c619a 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -346,10 +346,13 @@ | |||
346 | #define __NR_fanotify_init 338 | 346 | #define __NR_fanotify_init 338 |
347 | #define __NR_fanotify_mark 339 | 347 | #define __NR_fanotify_mark 339 |
348 | #define __NR_prlimit64 340 | 348 | #define __NR_prlimit64 340 |
349 | #define __NR_name_to_handle_at 341 | ||
350 | #define __NR_open_by_handle_at 342 | ||
351 | #define __NR_clock_adjtime 343 | ||
349 | 352 | ||
350 | #ifdef __KERNEL__ | 353 | #ifdef __KERNEL__ |
351 | 354 | ||
352 | #define NR_syscalls 341 | 355 | #define NR_syscalls 344 |
353 | 356 | ||
354 | #define __ARCH_WANT_IPC_PARSE_VERSION | 357 | #define __ARCH_WANT_IPC_PARSE_VERSION |
355 | #define __ARCH_WANT_OLD_READDIR | 358 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 363e9b8a715b..5466bea670e7 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -669,6 +669,12 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init) | |||
669 | __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) | 669 | __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) |
670 | #define __NR_prlimit64 302 | 670 | #define __NR_prlimit64 302 |
671 | __SYSCALL(__NR_prlimit64, sys_prlimit64) | 671 | __SYSCALL(__NR_prlimit64, sys_prlimit64) |
672 | #define __NR_name_to_handle_at 303 | ||
673 | __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) | ||
674 | #define __NR_open_by_handle_at 304 | ||
675 | __SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) | ||
676 | #define __NR_clock_adjtime 305 | ||
677 | __SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) | ||
672 | 678 | ||
673 | #ifndef __NO_STUBS | 679 | #ifndef __NO_STUBS |
674 | #define __ARCH_WANT_OLD_READDIR | 680 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index ce1d54c8a433..3e094af443c3 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -176,7 +176,7 @@ struct bau_msg_payload { | |||
176 | struct bau_msg_header { | 176 | struct bau_msg_header { |
177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ | 177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
178 | /* bits 5:0 */ | 178 | /* bits 5:0 */ |
179 | unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ | 179 | unsigned int base_dest_nodeid:15; /* nasid of the */ |
180 | /* bits 20:6 */ /* first bit in uvhub map */ | 180 | /* bits 20:6 */ /* first bit in uvhub map */ |
181 | unsigned int command:8; /* message type */ | 181 | unsigned int command:8; /* message type */ |
182 | /* bits 28:21 */ | 182 | /* bits 28:21 */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 64642ad019fb..643ebf2e2ad8 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -83,11 +83,13 @@ struct x86_init_paging { | |||
83 | * boot cpu | 83 | * boot cpu |
84 | * @tsc_pre_init: platform function called before TSC init | 84 | * @tsc_pre_init: platform function called before TSC init |
85 | * @timer_init: initialize the platform timer (default PIT/HPET) | 85 | * @timer_init: initialize the platform timer (default PIT/HPET) |
86 | * @wallclock_init: init the wallclock device | ||
86 | */ | 87 | */ |
87 | struct x86_init_timers { | 88 | struct x86_init_timers { |
88 | void (*setup_percpu_clockev)(void); | 89 | void (*setup_percpu_clockev)(void); |
89 | void (*tsc_pre_init)(void); | 90 | void (*tsc_pre_init)(void); |
90 | void (*timer_init)(void); | 91 | void (*timer_init)(void); |
92 | void (*wallclock_init)(void); | ||
91 | }; | 93 | }; |
92 | 94 | ||
93 | /** | 95 | /** |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index a3c28ae4025b..8508bfe52296 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set) | |||
287 | static inline int | 287 | static inline int |
288 | HYPERVISOR_sched_op(int cmd, void *arg) | 288 | HYPERVISOR_sched_op(int cmd, void *arg) |
289 | { | 289 | { |
290 | return _hypercall2(int, sched_op_new, cmd, arg); | 290 | return _hypercall2(int, sched_op, cmd, arg); |
291 | } | 291 | } |
292 | 292 | ||
293 | static inline long | 293 | static inline long |
@@ -422,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value) | |||
422 | #endif | 422 | #endif |
423 | 423 | ||
424 | static inline int | 424 | static inline int |
425 | HYPERVISOR_suspend(unsigned long srec) | 425 | HYPERVISOR_suspend(unsigned long start_info_mfn) |
426 | { | 426 | { |
427 | return _hypercall3(int, sched_op, SCHEDOP_shutdown, | 427 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; |
428 | SHUTDOWN_suspend, srec); | 428 | |
429 | /* | ||
430 | * For a PV guest the tools require that the start_info mfn be | ||
431 | * present in rdx/edx when the hypercall is made. Per the | ||
432 | * hypercall calling convention this is the third hypercall | ||
433 | * argument, which is start_info_mfn here. | ||
434 | */ | ||
435 | return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn); | ||
429 | } | 436 | } |
430 | 437 | ||
431 | static inline int | 438 | static inline int |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index f25bdf238a33..c61934fbf22a 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -29,8 +29,10 @@ typedef struct xpaddr { | |||
29 | 29 | ||
30 | /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ | 30 | /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ |
31 | #define INVALID_P2M_ENTRY (~0UL) | 31 | #define INVALID_P2M_ENTRY (~0UL) |
32 | #define FOREIGN_FRAME_BIT (1UL<<31) | 32 | #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) |
33 | #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) | ||
33 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) | 34 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) |
35 | #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) | ||
34 | 36 | ||
35 | /* Maximum amount of memory we can handle in a domain in pages */ | 37 | /* Maximum amount of memory we can handle in a domain in pages */ |
36 | #define MAX_DOMAIN_PAGES \ | 38 | #define MAX_DOMAIN_PAGES \ |
@@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order; | |||
41 | 43 | ||
42 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 44 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
43 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 45 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
46 | extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | ||
47 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, | ||
48 | unsigned long pfn_e); | ||
44 | 49 | ||
45 | extern int m2p_add_override(unsigned long mfn, struct page *page); | 50 | extern int m2p_add_override(unsigned long mfn, struct page *page); |
46 | extern int m2p_remove_override(struct page *page); | 51 | extern int m2p_remove_override(struct page *page); |
47 | extern struct page *m2p_find_override(unsigned long mfn); | 52 | extern struct page *m2p_find_override(unsigned long mfn); |
48 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); | 53 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); |
49 | 54 | ||
55 | #ifdef CONFIG_XEN_DEBUG_FS | ||
56 | extern int p2m_dump_show(struct seq_file *m, void *v); | ||
57 | #endif | ||
50 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | 58 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
51 | { | 59 | { |
52 | unsigned long mfn; | 60 | unsigned long mfn; |
@@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) | |||
57 | mfn = get_phys_to_machine(pfn); | 65 | mfn = get_phys_to_machine(pfn); |
58 | 66 | ||
59 | if (mfn != INVALID_P2M_ENTRY) | 67 | if (mfn != INVALID_P2M_ENTRY) |
60 | mfn &= ~FOREIGN_FRAME_BIT; | 68 | mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); |
61 | 69 | ||
62 | return mfn; | 70 | return mfn; |
63 | } | 71 | } |
@@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) | |||
73 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | 81 | static inline unsigned long mfn_to_pfn(unsigned long mfn) |
74 | { | 82 | { |
75 | unsigned long pfn; | 83 | unsigned long pfn; |
84 | int ret = 0; | ||
76 | 85 | ||
77 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 86 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
78 | return mfn; | 87 | return mfn; |
79 | 88 | ||
89 | if (unlikely((mfn >> machine_to_phys_order) != 0)) { | ||
90 | pfn = ~0; | ||
91 | goto try_override; | ||
92 | } | ||
80 | pfn = 0; | 93 | pfn = 0; |
81 | /* | 94 | /* |
82 | * The array access can fail (e.g., device space beyond end of RAM). | 95 | * The array access can fail (e.g., device space beyond end of RAM). |
83 | * In such cases it doesn't matter what we return (we return garbage), | 96 | * In such cases it doesn't matter what we return (we return garbage), |
84 | * but we must handle the fault without crashing! | 97 | * but we must handle the fault without crashing! |
85 | */ | 98 | */ |
86 | __get_user(pfn, &machine_to_phys_mapping[mfn]); | 99 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); |
87 | 100 | try_override: | |
88 | /* | 101 | /* ret might be < 0 if there are no entries in the m2p for mfn */ |
89 | * If this appears to be a foreign mfn (because the pfn | 102 | if (ret < 0) |
90 | * doesn't map back to the mfn), then check the local override | 103 | pfn = ~0; |
91 | * table to see if there's a better pfn to use. | 104 | else if (get_phys_to_machine(pfn) != mfn) |
105 | /* | ||
106 | * If this appears to be a foreign mfn (because the pfn | ||
107 | * doesn't map back to the mfn), then check the local override | ||
108 | * table to see if there's a better pfn to use. | ||
109 | * | ||
110 | * m2p_find_override_pfn returns ~0 if it doesn't find anything. | ||
111 | */ | ||
112 | pfn = m2p_find_override_pfn(mfn, ~0); | ||
113 | |||
114 | /* | ||
115 | * pfn is ~0 if there are no entries in the m2p for mfn or if the | ||
116 | * entry doesn't map back to the mfn and m2p_override doesn't have a | ||
117 | * valid entry for it. | ||
92 | */ | 118 | */ |
93 | if (get_phys_to_machine(pfn) != mfn) | 119 | if (pfn == ~0 && |
94 | pfn = m2p_find_override_pfn(mfn, pfn); | 120 | get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn)) |
121 | pfn = mfn; | ||
95 | 122 | ||
96 | return pfn; | 123 | return pfn; |
97 | } | 124 | } |
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 2329b3eaf8d3..aa8620989162 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h | |||
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void) | |||
27 | * its own functions. | 27 | * its own functions. |
28 | */ | 28 | */ |
29 | struct xen_pci_frontend_ops { | 29 | struct xen_pci_frontend_ops { |
30 | int (*enable_msi)(struct pci_dev *dev, int **vectors); | 30 | int (*enable_msi)(struct pci_dev *dev, int vectors[]); |
31 | void (*disable_msi)(struct pci_dev *dev); | 31 | void (*disable_msi)(struct pci_dev *dev); |
32 | int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec); | 32 | int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec); |
33 | void (*disable_msix)(struct pci_dev *dev); | 33 | void (*disable_msix)(struct pci_dev *dev); |
34 | }; | 34 | }; |
35 | 35 | ||
36 | extern struct xen_pci_frontend_ops *xen_pci_frontend; | 36 | extern struct xen_pci_frontend_ops *xen_pci_frontend; |
37 | 37 | ||
38 | static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, | 38 | static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, |
39 | int **vectors) | 39 | int vectors[]) |
40 | { | 40 | { |
41 | if (xen_pci_frontend && xen_pci_frontend->enable_msi) | 41 | if (xen_pci_frontend && xen_pci_frontend->enable_msi) |
42 | return xen_pci_frontend->enable_msi(dev, vectors); | 42 | return xen_pci_frontend->enable_msi(dev, vectors); |
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) | |||
48 | xen_pci_frontend->disable_msi(dev); | 48 | xen_pci_frontend->disable_msi(dev); |
49 | } | 49 | } |
50 | static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, | 50 | static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, |
51 | int **vectors, int nvec) | 51 | int vectors[], int nvec) |
52 | { | 52 | { |
53 | if (xen_pci_frontend && xen_pci_frontend->enable_msix) | 53 | if (xen_pci_frontend && xen_pci_frontend->enable_msix) |
54 | return xen_pci_frontend->enable_msix(dev, vectors, nvec); | 54 | return xen_pci_frontend->enable_msix(dev, vectors, nvec); |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 34244b2cd880..743642f1a36c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -47,7 +47,7 @@ obj-y += tsc.o io_delay.o rtc.o | |||
47 | obj-y += pci-iommu_table.o | 47 | obj-y += pci-iommu_table.o |
48 | obj-y += resource.o | 48 | obj-y += resource.o |
49 | 49 | ||
50 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 50 | obj-y += trampoline.o trampoline_$(BITS).o |
51 | obj-y += process.o | 51 | obj-y += process.o |
52 | obj-y += i387.o xsave.o | 52 | obj-y += i387.o xsave.o |
53 | obj-y += ptrace.o | 53 | obj-y += ptrace.o |
@@ -59,6 +59,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o | |||
59 | obj-y += cpu/ | 59 | obj-y += cpu/ |
60 | obj-y += acpi/ | 60 | obj-y += acpi/ |
61 | obj-y += reboot.o | 61 | obj-y += reboot.o |
62 | obj-$(CONFIG_X86_32) += reboot_32.o | ||
62 | obj-$(CONFIG_MCA) += mca_32.o | 63 | obj-$(CONFIG_MCA) += mca_32.o |
63 | obj-$(CONFIG_X86_MSR) += msr.o | 64 | obj-$(CONFIG_X86_MSR) += msr.o |
64 | obj-$(CONFIG_X86_CPUID) += cpuid.o | 65 | obj-$(CONFIG_X86_CPUID) += cpuid.o |
@@ -66,10 +67,9 @@ obj-$(CONFIG_PCI) += early-quirks.o | |||
66 | apm-y := apm_32.o | 67 | apm-y := apm_32.o |
67 | obj-$(CONFIG_APM) += apm.o | 68 | obj-$(CONFIG_APM) += apm.o |
68 | obj-$(CONFIG_SMP) += smp.o | 69 | obj-$(CONFIG_SMP) += smp.o |
69 | obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o | 70 | obj-$(CONFIG_SMP) += smpboot.o |
71 | obj-$(CONFIG_SMP) += tsc_sync.o | ||
70 | obj-$(CONFIG_SMP) += setup_percpu.o | 72 | obj-$(CONFIG_SMP) += setup_percpu.o |
71 | obj-$(CONFIG_X86_64_SMP) += tsc_sync.o | ||
72 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o | ||
73 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o | 73 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o |
74 | obj-y += apic/ | 74 | obj-y += apic/ |
75 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 75 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
@@ -109,6 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
109 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | 109 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o |
110 | 110 | ||
111 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 111 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
112 | obj-$(CONFIG_OF) += devicetree.o | ||
112 | 113 | ||
113 | ### | 114 | ### |
114 | # 64 bit specific files | 115 | # 64 bit specific files |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3e6e2d68f761..9a966c579af5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -595,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | |||
595 | nid = acpi_get_node(handle); | 595 | nid = acpi_get_node(handle); |
596 | if (nid == -1 || !node_online(nid)) | 596 | if (nid == -1 || !node_online(nid)) |
597 | return; | 597 | return; |
598 | #ifdef CONFIG_X86_64 | 598 | set_apicid_to_node(physid, nid); |
599 | apicid_to_node[physid] = nid; | ||
600 | numa_set_node(cpu, nid); | 599 | numa_set_node(cpu, nid); |
601 | #else /* CONFIG_X86_32 */ | ||
602 | apicid_2_node[physid] = nid; | ||
603 | cpu_to_node_map[cpu] = nid; | ||
604 | #endif | ||
605 | |||
606 | #endif | 600 | #endif |
607 | } | 601 | } |
608 | 602 | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index 28595d6df47c..ead21b663117 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S | |||
@@ -6,11 +6,17 @@ | |||
6 | #include <asm/page_types.h> | 6 | #include <asm/page_types.h> |
7 | #include <asm/pgtable_types.h> | 7 | #include <asm/pgtable_types.h> |
8 | #include <asm/processor-flags.h> | 8 | #include <asm/processor-flags.h> |
9 | #include "wakeup.h" | ||
9 | 10 | ||
10 | .code16 | 11 | .code16 |
11 | .section ".header", "a" | 12 | .section ".jump", "ax" |
13 | .globl _start | ||
14 | _start: | ||
15 | cli | ||
16 | jmp wakeup_code | ||
12 | 17 | ||
13 | /* This should match the structure in wakeup.h */ | 18 | /* This should match the structure in wakeup.h */ |
19 | .section ".header", "a" | ||
14 | .globl wakeup_header | 20 | .globl wakeup_header |
15 | wakeup_header: | 21 | wakeup_header: |
16 | video_mode: .short 0 /* Video mode number */ | 22 | video_mode: .short 0 /* Video mode number */ |
@@ -30,14 +36,11 @@ wakeup_jmp: .byte 0xea /* ljmpw */ | |||
30 | wakeup_jmp_off: .word 3f | 36 | wakeup_jmp_off: .word 3f |
31 | wakeup_jmp_seg: .word 0 | 37 | wakeup_jmp_seg: .word 0 |
32 | wakeup_gdt: .quad 0, 0, 0 | 38 | wakeup_gdt: .quad 0, 0, 0 |
33 | signature: .long 0x51ee1111 | 39 | signature: .long WAKEUP_HEADER_SIGNATURE |
34 | 40 | ||
35 | .text | 41 | .text |
36 | .globl _start | ||
37 | .code16 | 42 | .code16 |
38 | wakeup_code: | 43 | wakeup_code: |
39 | _start: | ||
40 | cli | ||
41 | cld | 44 | cld |
42 | 45 | ||
43 | /* Apparently some dimwit BIOS programmers don't know how to | 46 | /* Apparently some dimwit BIOS programmers don't know how to |
@@ -77,12 +80,12 @@ _start: | |||
77 | 80 | ||
78 | /* Check header signature... */ | 81 | /* Check header signature... */ |
79 | movl signature, %eax | 82 | movl signature, %eax |
80 | cmpl $0x51ee1111, %eax | 83 | cmpl $WAKEUP_HEADER_SIGNATURE, %eax |
81 | jne bogus_real_magic | 84 | jne bogus_real_magic |
82 | 85 | ||
83 | /* Check we really have everything... */ | 86 | /* Check we really have everything... */ |
84 | movl end_signature, %eax | 87 | movl end_signature, %eax |
85 | cmpl $0x65a22c82, %eax | 88 | cmpl $WAKEUP_END_SIGNATURE, %eax |
86 | jne bogus_real_magic | 89 | jne bogus_real_magic |
87 | 90 | ||
88 | /* Call the C code */ | 91 | /* Call the C code */ |
@@ -147,3 +150,7 @@ wakeup_heap: | |||
147 | wakeup_stack: | 150 | wakeup_stack: |
148 | .space 2048 | 151 | .space 2048 |
149 | wakeup_stack_end: | 152 | wakeup_stack_end: |
153 | |||
154 | .section ".signature","a" | ||
155 | end_signature: | ||
156 | .long WAKEUP_END_SIGNATURE | ||
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h index 69d38d0b2b64..e1828c07e79c 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.h +++ b/arch/x86/kernel/acpi/realmode/wakeup.h | |||
@@ -35,7 +35,8 @@ struct wakeup_header { | |||
35 | extern struct wakeup_header wakeup_header; | 35 | extern struct wakeup_header wakeup_header; |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #define HEADER_OFFSET 0x3f00 | 38 | #define WAKEUP_HEADER_OFFSET 8 |
39 | #define WAKEUP_SIZE 0x4000 | 39 | #define WAKEUP_HEADER_SIGNATURE 0x51ee1111 |
40 | #define WAKEUP_END_SIGNATURE 0x65a22c82 | ||
40 | 41 | ||
41 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ | 42 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ |
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 060fff8f5c5b..d4f8010a5b1b 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S | |||
@@ -13,9 +13,19 @@ ENTRY(_start) | |||
13 | SECTIONS | 13 | SECTIONS |
14 | { | 14 | { |
15 | . = 0; | 15 | . = 0; |
16 | .jump : { | ||
17 | *(.jump) | ||
18 | } = 0x90909090 | ||
19 | |||
20 | . = WAKEUP_HEADER_OFFSET; | ||
21 | .header : { | ||
22 | *(.header) | ||
23 | } | ||
24 | |||
25 | . = ALIGN(16); | ||
16 | .text : { | 26 | .text : { |
17 | *(.text*) | 27 | *(.text*) |
18 | } | 28 | } = 0x90909090 |
19 | 29 | ||
20 | . = ALIGN(16); | 30 | . = ALIGN(16); |
21 | .rodata : { | 31 | .rodata : { |
@@ -33,11 +43,6 @@ SECTIONS | |||
33 | *(.data*) | 43 | *(.data*) |
34 | } | 44 | } |
35 | 45 | ||
36 | .signature : { | ||
37 | end_signature = .; | ||
38 | LONG(0x65a22c82) | ||
39 | } | ||
40 | |||
41 | . = ALIGN(16); | 46 | . = ALIGN(16); |
42 | .bss : { | 47 | .bss : { |
43 | __bss_start = .; | 48 | __bss_start = .; |
@@ -45,20 +50,13 @@ SECTIONS | |||
45 | __bss_end = .; | 50 | __bss_end = .; |
46 | } | 51 | } |
47 | 52 | ||
48 | . = HEADER_OFFSET; | 53 | .signature : { |
49 | .header : { | 54 | *(.signature) |
50 | *(.header) | ||
51 | } | 55 | } |
52 | 56 | ||
53 | . = ALIGN(16); | ||
54 | _end = .; | 57 | _end = .; |
55 | 58 | ||
56 | /DISCARD/ : { | 59 | /DISCARD/ : { |
57 | *(.note*) | 60 | *(.note*) |
58 | } | 61 | } |
59 | |||
60 | /* | ||
61 | * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: | ||
62 | */ | ||
63 | . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); | ||
64 | } | 62 | } |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 68d1537b8c81..4572c58e66d5 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -18,12 +18,8 @@ | |||
18 | #include "realmode/wakeup.h" | 18 | #include "realmode/wakeup.h" |
19 | #include "sleep.h" | 19 | #include "sleep.h" |
20 | 20 | ||
21 | unsigned long acpi_wakeup_address; | ||
22 | unsigned long acpi_realmode_flags; | 21 | unsigned long acpi_realmode_flags; |
23 | 22 | ||
24 | /* address in low memory of the wakeup routine. */ | ||
25 | static unsigned long acpi_realmode; | ||
26 | |||
27 | #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) | 23 | #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) |
28 | static char temp_stack[4096]; | 24 | static char temp_stack[4096]; |
29 | #endif | 25 | #endif |
@@ -33,22 +29,17 @@ static char temp_stack[4096]; | |||
33 | * | 29 | * |
34 | * Create an identity mapped page table and copy the wakeup routine to | 30 | * Create an identity mapped page table and copy the wakeup routine to |
35 | * low memory. | 31 | * low memory. |
36 | * | ||
37 | * Note that this is too late to change acpi_wakeup_address. | ||
38 | */ | 32 | */ |
39 | int acpi_save_state_mem(void) | 33 | int acpi_save_state_mem(void) |
40 | { | 34 | { |
41 | struct wakeup_header *header; | 35 | struct wakeup_header *header; |
36 | /* address in low memory of the wakeup routine. */ | ||
37 | char *acpi_realmode; | ||
42 | 38 | ||
43 | if (!acpi_realmode) { | 39 | acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code); |
44 | printk(KERN_ERR "Could not allocate memory during boot, " | ||
45 | "S3 disabled\n"); | ||
46 | return -ENOMEM; | ||
47 | } | ||
48 | memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE); | ||
49 | 40 | ||
50 | header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET); | 41 | header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET); |
51 | if (header->signature != 0x51ee1111) { | 42 | if (header->signature != WAKEUP_HEADER_SIGNATURE) { |
52 | printk(KERN_ERR "wakeup header does not match\n"); | 43 | printk(KERN_ERR "wakeup header does not match\n"); |
53 | return -EINVAL; | 44 | return -EINVAL; |
54 | } | 45 | } |
@@ -68,9 +59,7 @@ int acpi_save_state_mem(void) | |||
68 | /* GDT[0]: GDT self-pointer */ | 59 | /* GDT[0]: GDT self-pointer */ |
69 | header->wakeup_gdt[0] = | 60 | header->wakeup_gdt[0] = |
70 | (u64)(sizeof(header->wakeup_gdt) - 1) + | 61 | (u64)(sizeof(header->wakeup_gdt) - 1) + |
71 | ((u64)(acpi_wakeup_address + | 62 | ((u64)__pa(&header->wakeup_gdt) << 16); |
72 | ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) | ||
73 | << 16); | ||
74 | /* GDT[1]: big real mode-like code segment */ | 63 | /* GDT[1]: big real mode-like code segment */ |
75 | header->wakeup_gdt[1] = | 64 | header->wakeup_gdt[1] = |
76 | GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); | 65 | GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); |
@@ -96,7 +85,7 @@ int acpi_save_state_mem(void) | |||
96 | header->pmode_cr3 = (u32)__pa(&initial_page_table); | 85 | header->pmode_cr3 = (u32)__pa(&initial_page_table); |
97 | saved_magic = 0x12345678; | 86 | saved_magic = 0x12345678; |
98 | #else /* CONFIG_64BIT */ | 87 | #else /* CONFIG_64BIT */ |
99 | header->trampoline_segment = setup_trampoline() >> 4; | 88 | header->trampoline_segment = trampoline_address() >> 4; |
100 | #ifdef CONFIG_SMP | 89 | #ifdef CONFIG_SMP |
101 | stack_start = (unsigned long)temp_stack + sizeof(temp_stack); | 90 | stack_start = (unsigned long)temp_stack + sizeof(temp_stack); |
102 | early_gdt_descr.address = | 91 | early_gdt_descr.address = |
@@ -117,46 +106,6 @@ void acpi_restore_state_mem(void) | |||
117 | { | 106 | { |
118 | } | 107 | } |
119 | 108 | ||
120 | |||
121 | /** | ||
122 | * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation | ||
123 | * | ||
124 | * We allocate a page from the first 1MB of memory for the wakeup | ||
125 | * routine for when we come back from a sleep state. The | ||
126 | * runtime allocator allows specification of <16MB pages, but not | ||
127 | * <1MB pages. | ||
128 | */ | ||
129 | void __init acpi_reserve_wakeup_memory(void) | ||
130 | { | ||
131 | phys_addr_t mem; | ||
132 | |||
133 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { | ||
134 | printk(KERN_ERR | ||
135 | "ACPI: Wakeup code way too big, S3 disabled.\n"); | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); | ||
140 | |||
141 | if (mem == MEMBLOCK_ERROR) { | ||
142 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); | ||
143 | return; | ||
144 | } | ||
145 | acpi_realmode = (unsigned long) phys_to_virt(mem); | ||
146 | acpi_wakeup_address = mem; | ||
147 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | ||
148 | } | ||
149 | |||
150 | int __init acpi_configure_wakeup_memory(void) | ||
151 | { | ||
152 | if (acpi_realmode) | ||
153 | set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | arch_initcall(acpi_configure_wakeup_memory); | ||
158 | |||
159 | |||
160 | static int __init acpi_sleep_setup(char *str) | 109 | static int __init acpi_sleep_setup(char *str) |
161 | { | 110 | { |
162 | while ((str != NULL) && (*str != '\0')) { | 111 | while ((str != NULL) && (*str != '\0')) { |
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index adbcbaa6f1df..86ba1c87165b 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h | |||
@@ -4,13 +4,10 @@ | |||
4 | 4 | ||
5 | #include <asm/trampoline.h> | 5 | #include <asm/trampoline.h> |
6 | 6 | ||
7 | extern char wakeup_code_start, wakeup_code_end; | ||
8 | |||
9 | extern unsigned long saved_video_mode; | 7 | extern unsigned long saved_video_mode; |
10 | extern long saved_magic; | 8 | extern long saved_magic; |
11 | 9 | ||
12 | extern int wakeup_pmode_return; | 10 | extern int wakeup_pmode_return; |
13 | extern char swsusp_pg_dir[PAGE_SIZE]; | ||
14 | 11 | ||
15 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); | 12 | extern unsigned long acpi_copy_wakeup_routine(unsigned long); |
16 | extern void wakeup_long64(void); | 13 | extern void wakeup_long64(void); |
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S index 6ff3b5730575..63b8ab524f2c 100644 --- a/arch/x86/kernel/acpi/wakeup_rm.S +++ b/arch/x86/kernel/acpi/wakeup_rm.S | |||
@@ -2,9 +2,11 @@ | |||
2 | * Wrapper script for the realmode binary as a transport object | 2 | * Wrapper script for the realmode binary as a transport object |
3 | * before copying to low memory. | 3 | * before copying to low memory. |
4 | */ | 4 | */ |
5 | .section ".rodata","a" | 5 | #include <asm/page_types.h> |
6 | .globl wakeup_code_start, wakeup_code_end | 6 | |
7 | wakeup_code_start: | 7 | .section ".x86_trampoline","a" |
8 | .balign PAGE_SIZE | ||
9 | .globl acpi_wakeup_code | ||
10 | acpi_wakeup_code: | ||
8 | .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" | 11 | .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin" |
9 | wakeup_code_end: | 12 | .size acpi_wakeup_code, .-acpi_wakeup_code |
10 | .size wakeup_code_start, .-wakeup_code_start | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 7038b95d363f..4db35544de73 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data) | |||
620 | flush_icache_range((unsigned long)p->addr, | 620 | flush_icache_range((unsigned long)p->addr, |
621 | (unsigned long)p->addr + p->len); | 621 | (unsigned long)p->addr + p->len); |
622 | } | 622 | } |
623 | 623 | /* | |
624 | * Intel Archiecture Software Developer's Manual section 7.1.3 specifies | ||
625 | * that a core serializing instruction such as "cpuid" should be | ||
626 | * executed on _each_ core before the new instruction is made visible. | ||
627 | */ | ||
628 | sync_core(); | ||
624 | return 0; | 629 | return 0; |
625 | } | 630 | } |
626 | 631 | ||
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 0a99f7198bc3..65634190ffd6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | static u32 *flush_words; | 13 | static u32 *flush_words; |
14 | 14 | ||
15 | struct pci_device_id amd_nb_misc_ids[] = { | 15 | const struct pci_device_id amd_nb_misc_ids[] = { |
16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, | 18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, |
@@ -20,6 +20,11 @@ struct pci_device_id amd_nb_misc_ids[] = { | |||
20 | }; | 20 | }; |
21 | EXPORT_SYMBOL(amd_nb_misc_ids); | 21 | EXPORT_SYMBOL(amd_nb_misc_ids); |
22 | 22 | ||
23 | static struct pci_device_id amd_nb_link_ids[] = { | ||
24 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) }, | ||
25 | {} | ||
26 | }; | ||
27 | |||
23 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { | 28 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
24 | { 0x00, 0x18, 0x20 }, | 29 | { 0x00, 0x18, 0x20 }, |
25 | { 0xff, 0x00, 0x20 }, | 30 | { 0xff, 0x00, 0x20 }, |
@@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges; | |||
31 | EXPORT_SYMBOL(amd_northbridges); | 36 | EXPORT_SYMBOL(amd_northbridges); |
32 | 37 | ||
33 | static struct pci_dev *next_northbridge(struct pci_dev *dev, | 38 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
34 | struct pci_device_id *ids) | 39 | const struct pci_device_id *ids) |
35 | { | 40 | { |
36 | do { | 41 | do { |
37 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | 42 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
@@ -43,9 +48,9 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev, | |||
43 | 48 | ||
44 | int amd_cache_northbridges(void) | 49 | int amd_cache_northbridges(void) |
45 | { | 50 | { |
46 | int i = 0; | 51 | u16 i = 0; |
47 | struct amd_northbridge *nb; | 52 | struct amd_northbridge *nb; |
48 | struct pci_dev *misc; | 53 | struct pci_dev *misc, *link; |
49 | 54 | ||
50 | if (amd_nb_num()) | 55 | if (amd_nb_num()) |
51 | return 0; | 56 | return 0; |
@@ -64,10 +69,12 @@ int amd_cache_northbridges(void) | |||
64 | amd_northbridges.nb = nb; | 69 | amd_northbridges.nb = nb; |
65 | amd_northbridges.num = i; | 70 | amd_northbridges.num = i; |
66 | 71 | ||
67 | misc = NULL; | 72 | link = misc = NULL; |
68 | for (i = 0; i != amd_nb_num(); i++) { | 73 | for (i = 0; i != amd_nb_num(); i++) { |
69 | node_to_amd_nb(i)->misc = misc = | 74 | node_to_amd_nb(i)->misc = misc = |
70 | next_northbridge(misc, amd_nb_misc_ids); | 75 | next_northbridge(misc, amd_nb_misc_ids); |
76 | node_to_amd_nb(i)->link = link = | ||
77 | next_northbridge(link, amd_nb_link_ids); | ||
71 | } | 78 | } |
72 | 79 | ||
73 | /* some CPU families (e.g. family 0x11) do not support GART */ | 80 | /* some CPU families (e.g. family 0x11) do not support GART */ |
@@ -85,26 +92,95 @@ int amd_cache_northbridges(void) | |||
85 | boot_cpu_data.x86_mask >= 0x1)) | 92 | boot_cpu_data.x86_mask >= 0x1)) |
86 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | 93 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
87 | 94 | ||
95 | if (boot_cpu_data.x86 == 0x15) | ||
96 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | ||
97 | |||
98 | /* L3 cache partitioning is supported on family 0x15 */ | ||
99 | if (boot_cpu_data.x86 == 0x15) | ||
100 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; | ||
101 | |||
88 | return 0; | 102 | return 0; |
89 | } | 103 | } |
90 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); | 104 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
91 | 105 | ||
92 | /* Ignores subdevice/subvendor but as far as I can figure out | 106 | /* |
93 | they're useless anyways */ | 107 | * Ignores subdevice/subvendor but as far as I can figure out |
94 | int __init early_is_amd_nb(u32 device) | 108 | * they're useless anyways |
109 | */ | ||
110 | bool __init early_is_amd_nb(u32 device) | ||
95 | { | 111 | { |
96 | struct pci_device_id *id; | 112 | const struct pci_device_id *id; |
97 | u32 vendor = device & 0xffff; | 113 | u32 vendor = device & 0xffff; |
114 | |||
98 | device >>= 16; | 115 | device >>= 16; |
99 | for (id = amd_nb_misc_ids; id->vendor; id++) | 116 | for (id = amd_nb_misc_ids; id->vendor; id++) |
100 | if (vendor == id->vendor && device == id->device) | 117 | if (vendor == id->vendor && device == id->device) |
101 | return 1; | 118 | return true; |
119 | return false; | ||
120 | } | ||
121 | |||
122 | int amd_get_subcaches(int cpu) | ||
123 | { | ||
124 | struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; | ||
125 | unsigned int mask; | ||
126 | int cuid = 0; | ||
127 | |||
128 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
129 | return 0; | ||
130 | |||
131 | pci_read_config_dword(link, 0x1d4, &mask); | ||
132 | |||
133 | #ifdef CONFIG_SMP | ||
134 | cuid = cpu_data(cpu).compute_unit_id; | ||
135 | #endif | ||
136 | return (mask >> (4 * cuid)) & 0xf; | ||
137 | } | ||
138 | |||
139 | int amd_set_subcaches(int cpu, int mask) | ||
140 | { | ||
141 | static unsigned int reset, ban; | ||
142 | struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); | ||
143 | unsigned int reg; | ||
144 | int cuid = 0; | ||
145 | |||
146 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ | ||
150 | if (reset == 0) { | ||
151 | pci_read_config_dword(nb->link, 0x1d4, &reset); | ||
152 | pci_read_config_dword(nb->misc, 0x1b8, &ban); | ||
153 | ban &= 0x180000; | ||
154 | } | ||
155 | |||
156 | /* deactivate BAN mode if any subcaches are to be disabled */ | ||
157 | if (mask != 0xf) { | ||
158 | pci_read_config_dword(nb->misc, 0x1b8, ®); | ||
159 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); | ||
160 | } | ||
161 | |||
162 | #ifdef CONFIG_SMP | ||
163 | cuid = cpu_data(cpu).compute_unit_id; | ||
164 | #endif | ||
165 | mask <<= 4 * cuid; | ||
166 | mask |= (0xf ^ (1 << cuid)) << 26; | ||
167 | |||
168 | pci_write_config_dword(nb->link, 0x1d4, mask); | ||
169 | |||
170 | /* reset BAN mode if L3 partitioning returned to reset state */ | ||
171 | pci_read_config_dword(nb->link, 0x1d4, ®); | ||
172 | if (reg == reset) { | ||
173 | pci_read_config_dword(nb->misc, 0x1b8, ®); | ||
174 | reg &= ~0x180000; | ||
175 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); | ||
176 | } | ||
177 | |||
102 | return 0; | 178 | return 0; |
103 | } | 179 | } |
104 | 180 | ||
105 | int amd_cache_gart(void) | 181 | static int amd_cache_gart(void) |
106 | { | 182 | { |
107 | int i; | 183 | u16 i; |
108 | 184 | ||
109 | if (!amd_nb_has_feature(AMD_NB_GART)) | 185 | if (!amd_nb_has_feature(AMD_NB_GART)) |
110 | return 0; | 186 | return 0; |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51d4e1663066..1293c709ee85 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta, | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | /* | ||
512 | * APB timer clock is not in sync with pclk on Langwell, which translates to | ||
513 | * unreliable read value caused by sampling error. the error does not add up | ||
514 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | ||
515 | * would go backwards. the following code is trying to prevent time traveling | ||
516 | * backwards. little bit paranoid. | ||
517 | */ | ||
518 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | 511 | static cycle_t apbt_read_clocksource(struct clocksource *cs) |
519 | { | 512 | { |
520 | unsigned long t0, t1, t2; | 513 | unsigned long current_count; |
521 | static unsigned long last_read; | 514 | |
522 | 515 | current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE); | |
523 | bad_count: | 516 | return (cycle_t)~current_count; |
524 | t1 = apbt_readl(phy_cs_timer_id, | ||
525 | APBTMR_N_CURRENT_VALUE); | ||
526 | t2 = apbt_readl(phy_cs_timer_id, | ||
527 | APBTMR_N_CURRENT_VALUE); | ||
528 | if (unlikely(t1 < t2)) { | ||
529 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | ||
530 | t1, t2, t2 - t1); | ||
531 | goto bad_count; | ||
532 | } | ||
533 | /* | ||
534 | * check against cached last read, makes sure time does not go back. | ||
535 | * it could be a normal rollover but we will do tripple check anyway | ||
536 | */ | ||
537 | if (unlikely(t2 > last_read)) { | ||
538 | /* check if we have a normal rollover */ | ||
539 | unsigned long raw_intr_status = | ||
540 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | ||
541 | /* | ||
542 | * cs timer interrupt is masked but raw intr bit is set if | ||
543 | * rollover occurs. then we read EOI reg to clear it. | ||
544 | */ | ||
545 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | ||
546 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | ||
547 | goto out; | ||
548 | } | ||
549 | pr_debug("APB CS going back %lx:%lx:%lx ", | ||
550 | t2, last_read, t2 - last_read); | ||
551 | bad_count_x3: | ||
552 | pr_debug("triple check enforced\n"); | ||
553 | t0 = apbt_readl(phy_cs_timer_id, | ||
554 | APBTMR_N_CURRENT_VALUE); | ||
555 | udelay(1); | ||
556 | t1 = apbt_readl(phy_cs_timer_id, | ||
557 | APBTMR_N_CURRENT_VALUE); | ||
558 | udelay(1); | ||
559 | t2 = apbt_readl(phy_cs_timer_id, | ||
560 | APBTMR_N_CURRENT_VALUE); | ||
561 | if ((t2 > t1) || (t1 > t0)) { | ||
562 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | ||
563 | goto bad_count_x3; | ||
564 | } | ||
565 | } | ||
566 | out: | ||
567 | last_read = t2; | ||
568 | return (cycle_t)~t2; | ||
569 | } | 517 | } |
570 | 518 | ||
571 | static int apbt_clocksource_register(void) | 519 | static int apbt_clocksource_register(void) |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 5955a7800a96..7b1e8e10b89c 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/mmzone.h> | 17 | #include <linux/mmzone.h> |
18 | #include <linux/pci_ids.h> | 18 | #include <linux/pci_ids.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) | |||
57 | static u32 __init allocate_aperture(void) | 57 | static u32 __init allocate_aperture(void) |
58 | { | 58 | { |
59 | u32 aper_size; | 59 | u32 aper_size; |
60 | void *p; | 60 | unsigned long addr; |
61 | 61 | ||
62 | /* aper_size should <= 1G */ | 62 | /* aper_size should <= 1G */ |
63 | if (fallback_aper_order > 5) | 63 | if (fallback_aper_order > 5) |
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void) | |||
83 | * so don't use 512M below as gart iommu, leave the space for kernel | 83 | * so don't use 512M below as gart iommu, leave the space for kernel |
84 | * code for safe | 84 | * code for safe |
85 | */ | 85 | */ |
86 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 86 | addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); |
87 | if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { | ||
88 | printk(KERN_ERR | ||
89 | "Cannot allocate aperture memory hole (%lx,%uK)\n", | ||
90 | addr, aper_size>>10); | ||
91 | return 0; | ||
92 | } | ||
93 | memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); | ||
87 | /* | 94 | /* |
88 | * Kmemleak should not scan this block as it may not be mapped via the | 95 | * Kmemleak should not scan this block as it may not be mapped via the |
89 | * kernel direct mapping. | 96 | * kernel direct mapping. |
90 | */ | 97 | */ |
91 | kmemleak_ignore(p); | 98 | kmemleak_ignore(phys_to_virt(addr)); |
92 | if (!p || __pa(p)+aper_size > 0xffffffff) { | ||
93 | printk(KERN_ERR | ||
94 | "Cannot allocate aperture memory hole (%p,%uK)\n", | ||
95 | p, aper_size>>10); | ||
96 | if (p) | ||
97 | free_bootmem(__pa(p), aper_size); | ||
98 | return 0; | ||
99 | } | ||
100 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", | 99 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", |
101 | aper_size >> 10, __pa(p)); | 100 | aper_size >> 10, addr); |
102 | insert_aperture_resource((u32)__pa(p), aper_size); | 101 | insert_aperture_resource((u32)addr, aper_size); |
103 | register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, | 102 | register_nosave_region(addr >> PAGE_SHIFT, |
104 | (u32)__pa(p+aper_size) >> PAGE_SHIFT); | 103 | (addr+aper_size) >> PAGE_SHIFT); |
105 | 104 | ||
106 | return (u32)__pa(p); | 105 | return (u32)addr; |
107 | } | 106 | } |
108 | 107 | ||
109 | 108 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 76b96d74978a..966673f44141 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <asm/i8259.h> | 43 | #include <asm/i8259.h> |
44 | #include <asm/proto.h> | 44 | #include <asm/proto.h> |
45 | #include <asm/apic.h> | 45 | #include <asm/apic.h> |
46 | #include <asm/io_apic.h> | ||
46 | #include <asm/desc.h> | 47 | #include <asm/desc.h> |
47 | #include <asm/hpet.h> | 48 | #include <asm/hpet.h> |
48 | #include <asm/idle.h> | 49 | #include <asm/idle.h> |
@@ -78,12 +79,21 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | |||
78 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | 79 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); |
79 | 80 | ||
80 | #ifdef CONFIG_X86_32 | 81 | #ifdef CONFIG_X86_32 |
82 | |||
83 | /* | ||
84 | * On x86_32, the mapping between cpu and logical apicid may vary | ||
85 | * depending on apic in use. The following early percpu variable is | ||
86 | * used for the mapping. This is where the behaviors of x86_64 and 32 | ||
87 | * actually diverge. Let's keep it ugly for now. | ||
88 | */ | ||
89 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID); | ||
90 | |||
81 | /* | 91 | /* |
82 | * Knob to control our willingness to enable the local APIC. | 92 | * Knob to control our willingness to enable the local APIC. |
83 | * | 93 | * |
84 | * +1=force-enable | 94 | * +1=force-enable |
85 | */ | 95 | */ |
86 | static int force_enable_local_apic; | 96 | static int force_enable_local_apic __initdata; |
87 | /* | 97 | /* |
88 | * APIC command line parameters | 98 | * APIC command line parameters |
89 | */ | 99 | */ |
@@ -153,7 +163,7 @@ early_param("nox2apic", setup_nox2apic); | |||
153 | unsigned long mp_lapic_addr; | 163 | unsigned long mp_lapic_addr; |
154 | int disable_apic; | 164 | int disable_apic; |
155 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ | 165 | /* Disable local APIC timer from the kernel commandline or via dmi quirk */ |
156 | static int disable_apic_timer __cpuinitdata; | 166 | static int disable_apic_timer __initdata; |
157 | /* Local APIC timer works in C2 */ | 167 | /* Local APIC timer works in C2 */ |
158 | int local_apic_timer_c2_ok; | 168 | int local_apic_timer_c2_ok; |
159 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | 169 | EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); |
@@ -177,29 +187,8 @@ static struct resource lapic_resource = { | |||
177 | 187 | ||
178 | static unsigned int calibration_result; | 188 | static unsigned int calibration_result; |
179 | 189 | ||
180 | static int lapic_next_event(unsigned long delta, | ||
181 | struct clock_event_device *evt); | ||
182 | static void lapic_timer_setup(enum clock_event_mode mode, | ||
183 | struct clock_event_device *evt); | ||
184 | static void lapic_timer_broadcast(const struct cpumask *mask); | ||
185 | static void apic_pm_activate(void); | 190 | static void apic_pm_activate(void); |
186 | 191 | ||
187 | /* | ||
188 | * The local apic timer can be used for any function which is CPU local. | ||
189 | */ | ||
190 | static struct clock_event_device lapic_clockevent = { | ||
191 | .name = "lapic", | ||
192 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | ||
193 | | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, | ||
194 | .shift = 32, | ||
195 | .set_mode = lapic_timer_setup, | ||
196 | .set_next_event = lapic_next_event, | ||
197 | .broadcast = lapic_timer_broadcast, | ||
198 | .rating = 100, | ||
199 | .irq = -1, | ||
200 | }; | ||
201 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | ||
202 | |||
203 | static unsigned long apic_phys; | 192 | static unsigned long apic_phys; |
204 | 193 | ||
205 | /* | 194 | /* |
@@ -238,7 +227,7 @@ static int modern_apic(void) | |||
238 | * right after this call apic become NOOP driven | 227 | * right after this call apic become NOOP driven |
239 | * so apic->write/read doesn't do anything | 228 | * so apic->write/read doesn't do anything |
240 | */ | 229 | */ |
241 | void apic_disable(void) | 230 | static void __init apic_disable(void) |
242 | { | 231 | { |
243 | pr_info("APIC: switched to apic NOOP\n"); | 232 | pr_info("APIC: switched to apic NOOP\n"); |
244 | apic = &apic_noop; | 233 | apic = &apic_noop; |
@@ -282,23 +271,6 @@ u64 native_apic_icr_read(void) | |||
282 | return icr1 | ((u64)icr2 << 32); | 271 | return icr1 | ((u64)icr2 << 32); |
283 | } | 272 | } |
284 | 273 | ||
285 | /** | ||
286 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | ||
287 | */ | ||
288 | void __cpuinit enable_NMI_through_LVT0(void) | ||
289 | { | ||
290 | unsigned int v; | ||
291 | |||
292 | /* unmask and set to NMI */ | ||
293 | v = APIC_DM_NMI; | ||
294 | |||
295 | /* Level triggered for 82489DX (32bit mode) */ | ||
296 | if (!lapic_is_integrated()) | ||
297 | v |= APIC_LVT_LEVEL_TRIGGER; | ||
298 | |||
299 | apic_write(APIC_LVT0, v); | ||
300 | } | ||
301 | |||
302 | #ifdef CONFIG_X86_32 | 274 | #ifdef CONFIG_X86_32 |
303 | /** | 275 | /** |
304 | * get_physical_broadcast - Get number of physical broadcast IDs | 276 | * get_physical_broadcast - Get number of physical broadcast IDs |
@@ -508,6 +480,23 @@ static void lapic_timer_broadcast(const struct cpumask *mask) | |||
508 | #endif | 480 | #endif |
509 | } | 481 | } |
510 | 482 | ||
483 | |||
484 | /* | ||
485 | * The local apic timer can be used for any function which is CPU local. | ||
486 | */ | ||
487 | static struct clock_event_device lapic_clockevent = { | ||
488 | .name = "lapic", | ||
489 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | ||
490 | | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, | ||
491 | .shift = 32, | ||
492 | .set_mode = lapic_timer_setup, | ||
493 | .set_next_event = lapic_next_event, | ||
494 | .broadcast = lapic_timer_broadcast, | ||
495 | .rating = 100, | ||
496 | .irq = -1, | ||
497 | }; | ||
498 | static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | ||
499 | |||
511 | /* | 500 | /* |
512 | * Setup the local APIC timer for this CPU. Copy the initialized values | 501 | * Setup the local APIC timer for this CPU. Copy the initialized values |
513 | * of the boot CPU and register the clock event in the framework. | 502 | * of the boot CPU and register the clock event in the framework. |
@@ -1209,7 +1198,7 @@ void __cpuinit setup_local_APIC(void) | |||
1209 | rdtscll(tsc); | 1198 | rdtscll(tsc); |
1210 | 1199 | ||
1211 | if (disable_apic) { | 1200 | if (disable_apic) { |
1212 | arch_disable_smp_support(); | 1201 | disable_ioapic_support(); |
1213 | return; | 1202 | return; |
1214 | } | 1203 | } |
1215 | 1204 | ||
@@ -1237,6 +1226,19 @@ void __cpuinit setup_local_APIC(void) | |||
1237 | */ | 1226 | */ |
1238 | apic->init_apic_ldr(); | 1227 | apic->init_apic_ldr(); |
1239 | 1228 | ||
1229 | #ifdef CONFIG_X86_32 | ||
1230 | /* | ||
1231 | * APIC LDR is initialized. If logical_apicid mapping was | ||
1232 | * initialized during get_smp_config(), make sure it matches the | ||
1233 | * actual value. | ||
1234 | */ | ||
1235 | i = early_per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
1236 | WARN_ON(i != BAD_APICID && i != logical_smp_processor_id()); | ||
1237 | /* always use the value from LDR */ | ||
1238 | early_per_cpu(x86_cpu_to_logical_apicid, cpu) = | ||
1239 | logical_smp_processor_id(); | ||
1240 | #endif | ||
1241 | |||
1240 | /* | 1242 | /* |
1241 | * Set Task Priority to 'accept all'. We never change this | 1243 | * Set Task Priority to 'accept all'. We never change this |
1242 | * later on. | 1244 | * later on. |
@@ -1448,7 +1450,7 @@ int __init enable_IR(void) | |||
1448 | void __init enable_IR_x2apic(void) | 1450 | void __init enable_IR_x2apic(void) |
1449 | { | 1451 | { |
1450 | unsigned long flags; | 1452 | unsigned long flags; |
1451 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 1453 | struct IO_APIC_route_entry **ioapic_entries; |
1452 | int ret, x2apic_enabled = 0; | 1454 | int ret, x2apic_enabled = 0; |
1453 | int dmar_table_init_ret; | 1455 | int dmar_table_init_ret; |
1454 | 1456 | ||
@@ -1537,7 +1539,7 @@ static int __init detect_init_APIC(void) | |||
1537 | } | 1539 | } |
1538 | #else | 1540 | #else |
1539 | 1541 | ||
1540 | static int apic_verify(void) | 1542 | static int __init apic_verify(void) |
1541 | { | 1543 | { |
1542 | u32 features, h, l; | 1544 | u32 features, h, l; |
1543 | 1545 | ||
@@ -1562,7 +1564,7 @@ static int apic_verify(void) | |||
1562 | return 0; | 1564 | return 0; |
1563 | } | 1565 | } |
1564 | 1566 | ||
1565 | int apic_force_enable(void) | 1567 | int __init apic_force_enable(unsigned long addr) |
1566 | { | 1568 | { |
1567 | u32 h, l; | 1569 | u32 h, l; |
1568 | 1570 | ||
@@ -1578,7 +1580,7 @@ int apic_force_enable(void) | |||
1578 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { | 1580 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { |
1579 | pr_info("Local APIC disabled by BIOS -- reenabling.\n"); | 1581 | pr_info("Local APIC disabled by BIOS -- reenabling.\n"); |
1580 | l &= ~MSR_IA32_APICBASE_BASE; | 1582 | l &= ~MSR_IA32_APICBASE_BASE; |
1581 | l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; | 1583 | l |= MSR_IA32_APICBASE_ENABLE | addr; |
1582 | wrmsr(MSR_IA32_APICBASE, l, h); | 1584 | wrmsr(MSR_IA32_APICBASE, l, h); |
1583 | enabled_via_apicbase = 1; | 1585 | enabled_via_apicbase = 1; |
1584 | } | 1586 | } |
@@ -1619,7 +1621,7 @@ static int __init detect_init_APIC(void) | |||
1619 | "you can enable it with \"lapic\"\n"); | 1621 | "you can enable it with \"lapic\"\n"); |
1620 | return -1; | 1622 | return -1; |
1621 | } | 1623 | } |
1622 | if (apic_force_enable()) | 1624 | if (apic_force_enable(APIC_DEFAULT_PHYS_BASE)) |
1623 | return -1; | 1625 | return -1; |
1624 | } else { | 1626 | } else { |
1625 | if (apic_verify()) | 1627 | if (apic_verify()) |
@@ -1930,17 +1932,6 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1930 | { | 1932 | { |
1931 | int cpu; | 1933 | int cpu; |
1932 | 1934 | ||
1933 | /* | ||
1934 | * Validate version | ||
1935 | */ | ||
1936 | if (version == 0x0) { | ||
1937 | pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " | ||
1938 | "fixing up to 0x10. (tell your hw vendor)\n", | ||
1939 | version); | ||
1940 | version = 0x10; | ||
1941 | } | ||
1942 | apic_version[apicid] = version; | ||
1943 | |||
1944 | if (num_processors >= nr_cpu_ids) { | 1935 | if (num_processors >= nr_cpu_ids) { |
1945 | int max = nr_cpu_ids; | 1936 | int max = nr_cpu_ids; |
1946 | int thiscpu = max + disabled_cpus; | 1937 | int thiscpu = max + disabled_cpus; |
@@ -1954,22 +1945,34 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1954 | } | 1945 | } |
1955 | 1946 | ||
1956 | num_processors++; | 1947 | num_processors++; |
1957 | cpu = cpumask_next_zero(-1, cpu_present_mask); | ||
1958 | |||
1959 | if (version != apic_version[boot_cpu_physical_apicid]) | ||
1960 | WARN_ONCE(1, | ||
1961 | "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", | ||
1962 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
1963 | |||
1964 | physid_set(apicid, phys_cpu_present_map); | ||
1965 | if (apicid == boot_cpu_physical_apicid) { | 1948 | if (apicid == boot_cpu_physical_apicid) { |
1966 | /* | 1949 | /* |
1967 | * x86_bios_cpu_apicid is required to have processors listed | 1950 | * x86_bios_cpu_apicid is required to have processors listed |
1968 | * in same order as logical cpu numbers. Hence the first | 1951 | * in same order as logical cpu numbers. Hence the first |
1969 | * entry is BSP, and so on. | 1952 | * entry is BSP, and so on. |
1953 | * boot_cpu_init() already hold bit 0 in cpu_present_mask | ||
1954 | * for BSP. | ||
1970 | */ | 1955 | */ |
1971 | cpu = 0; | 1956 | cpu = 0; |
1957 | } else | ||
1958 | cpu = cpumask_next_zero(-1, cpu_present_mask); | ||
1959 | |||
1960 | /* | ||
1961 | * Validate version | ||
1962 | */ | ||
1963 | if (version == 0x0) { | ||
1964 | pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n", | ||
1965 | cpu, apicid); | ||
1966 | version = 0x10; | ||
1972 | } | 1967 | } |
1968 | apic_version[apicid] = version; | ||
1969 | |||
1970 | if (version != apic_version[boot_cpu_physical_apicid]) { | ||
1971 | pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n", | ||
1972 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
1973 | } | ||
1974 | |||
1975 | physid_set(apicid, phys_cpu_present_map); | ||
1973 | if (apicid > max_physical_apicid) | 1976 | if (apicid > max_physical_apicid) |
1974 | max_physical_apicid = apicid; | 1977 | max_physical_apicid = apicid; |
1975 | 1978 | ||
@@ -1977,7 +1980,10 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1977 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; | 1980 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; |
1978 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | 1981 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; |
1979 | #endif | 1982 | #endif |
1980 | 1983 | #ifdef CONFIG_X86_32 | |
1984 | early_per_cpu(x86_cpu_to_logical_apicid, cpu) = | ||
1985 | apic->x86_32_early_logical_apicid(cpu); | ||
1986 | #endif | ||
1981 | set_cpu_possible(cpu, true); | 1987 | set_cpu_possible(cpu, true); |
1982 | set_cpu_present(cpu, true); | 1988 | set_cpu_present(cpu, true); |
1983 | } | 1989 | } |
@@ -1998,10 +2004,14 @@ void default_init_apic_ldr(void) | |||
1998 | } | 2004 | } |
1999 | 2005 | ||
2000 | #ifdef CONFIG_X86_32 | 2006 | #ifdef CONFIG_X86_32 |
2001 | int default_apicid_to_node(int logical_apicid) | 2007 | int default_x86_32_numa_cpu_node(int cpu) |
2002 | { | 2008 | { |
2003 | #ifdef CONFIG_SMP | 2009 | #ifdef CONFIG_NUMA |
2004 | return apicid_2_node[hard_smp_processor_id()]; | 2010 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); |
2011 | |||
2012 | if (apicid != BAD_APICID) | ||
2013 | return __apicid_to_node[apicid]; | ||
2014 | return NUMA_NO_NODE; | ||
2005 | #else | 2015 | #else |
2006 | return 0; | 2016 | return 0; |
2007 | #endif | 2017 | #endif |
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 09d3b17ce0c2..5652d31fe108 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -185,8 +185,6 @@ struct apic apic_flat = { | |||
185 | .ioapic_phys_id_map = NULL, | 185 | .ioapic_phys_id_map = NULL, |
186 | .setup_apic_routing = NULL, | 186 | .setup_apic_routing = NULL, |
187 | .multi_timer_check = NULL, | 187 | .multi_timer_check = NULL, |
188 | .apicid_to_node = NULL, | ||
189 | .cpu_to_logical_apicid = NULL, | ||
190 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 188 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
191 | .apicid_to_cpu_present = NULL, | 189 | .apicid_to_cpu_present = NULL, |
192 | .setup_portio_remap = NULL, | 190 | .setup_portio_remap = NULL, |
@@ -337,8 +335,6 @@ struct apic apic_physflat = { | |||
337 | .ioapic_phys_id_map = NULL, | 335 | .ioapic_phys_id_map = NULL, |
338 | .setup_apic_routing = NULL, | 336 | .setup_apic_routing = NULL, |
339 | .multi_timer_check = NULL, | 337 | .multi_timer_check = NULL, |
340 | .apicid_to_node = NULL, | ||
341 | .cpu_to_logical_apicid = NULL, | ||
342 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 338 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
343 | .apicid_to_cpu_present = NULL, | 339 | .apicid_to_cpu_present = NULL, |
344 | .setup_portio_remap = NULL, | 340 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e31b9ffe25f5..f1baa2dc087a 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void) | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | static int noop_cpu_to_logical_apicid(int cpu) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static int noop_phys_pkg_id(int cpuid_apic, int index_msb) | 57 | static int noop_phys_pkg_id(int cpuid_apic, int index_msb) |
63 | { | 58 | { |
64 | return 0; | 59 | return 0; |
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
113 | cpumask_set_cpu(cpu, retmask); | 108 | cpumask_set_cpu(cpu, retmask); |
114 | } | 109 | } |
115 | 110 | ||
116 | int noop_apicid_to_node(int logical_apicid) | ||
117 | { | ||
118 | /* we're always on node 0 */ | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static u32 noop_apic_read(u32 reg) | 111 | static u32 noop_apic_read(u32 reg) |
123 | { | 112 | { |
124 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | 113 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); |
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v) | |||
130 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); | 119 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); |
131 | } | 120 | } |
132 | 121 | ||
122 | #ifdef CONFIG_X86_32 | ||
123 | static int noop_x86_32_numa_cpu_node(int cpu) | ||
124 | { | ||
125 | /* we're always on node 0 */ | ||
126 | return 0; | ||
127 | } | ||
128 | #endif | ||
129 | |||
133 | struct apic apic_noop = { | 130 | struct apic apic_noop = { |
134 | .name = "noop", | 131 | .name = "noop", |
135 | .probe = noop_probe, | 132 | .probe = noop_probe, |
@@ -153,9 +150,7 @@ struct apic apic_noop = { | |||
153 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | 150 | .ioapic_phys_id_map = default_ioapic_phys_id_map, |
154 | .setup_apic_routing = NULL, | 151 | .setup_apic_routing = NULL, |
155 | .multi_timer_check = NULL, | 152 | .multi_timer_check = NULL, |
156 | .apicid_to_node = noop_apicid_to_node, | ||
157 | 153 | ||
158 | .cpu_to_logical_apicid = noop_cpu_to_logical_apicid, | ||
159 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 154 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
160 | .apicid_to_cpu_present = physid_set_mask_of_physid, | 155 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
161 | 156 | ||
@@ -197,4 +192,9 @@ struct apic apic_noop = { | |||
197 | .icr_write = noop_apic_icr_write, | 192 | .icr_write = noop_apic_icr_write, |
198 | .wait_icr_idle = noop_apic_wait_icr_idle, | 193 | .wait_icr_idle = noop_apic_wait_icr_idle, |
199 | .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, | 194 | .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, |
195 | |||
196 | #ifdef CONFIG_X86_32 | ||
197 | .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, | ||
198 | .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node, | ||
199 | #endif | ||
200 | }; | 200 | }; |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index cb804c5091b9..541a2e431659 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit) | |||
45 | return 1; | 45 | return 1; |
46 | } | 46 | } |
47 | 47 | ||
48 | static int bigsmp_early_logical_apicid(int cpu) | ||
49 | { | ||
50 | /* on bigsmp, logical apicid is the same as physical */ | ||
51 | return early_per_cpu(x86_cpu_to_apicid, cpu); | ||
52 | } | ||
53 | |||
48 | static inline unsigned long calculate_ldr(int cpu) | 54 | static inline unsigned long calculate_ldr(int cpu) |
49 | { | 55 | { |
50 | unsigned long val, id; | 56 | unsigned long val, id; |
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void) | |||
80 | nr_ioapics); | 86 | nr_ioapics); |
81 | } | 87 | } |
82 | 88 | ||
83 | static int bigsmp_apicid_to_node(int logical_apicid) | ||
84 | { | ||
85 | return apicid_2_node[hard_smp_processor_id()]; | ||
86 | } | ||
87 | |||
88 | static int bigsmp_cpu_present_to_apicid(int mps_cpu) | 89 | static int bigsmp_cpu_present_to_apicid(int mps_cpu) |
89 | { | 90 | { |
90 | if (mps_cpu < nr_cpu_ids) | 91 | if (mps_cpu < nr_cpu_ids) |
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) | |||
93 | return BAD_APICID; | 94 | return BAD_APICID; |
94 | } | 95 | } |
95 | 96 | ||
96 | /* Mapping from cpu number to logical apicid */ | ||
97 | static inline int bigsmp_cpu_to_logical_apicid(int cpu) | ||
98 | { | ||
99 | if (cpu >= nr_cpu_ids) | ||
100 | return BAD_APICID; | ||
101 | return cpu_physical_id(cpu); | ||
102 | } | ||
103 | |||
104 | static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) | 97 | static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
105 | { | 98 | { |
106 | /* For clustered we don't have a good way to do this yet - hack */ | 99 | /* For clustered we don't have a good way to do this yet - hack */ |
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) | |||
115 | /* As we are using single CPU as destination, pick only one CPU here */ | 108 | /* As we are using single CPU as destination, pick only one CPU here */ |
116 | static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) | 109 | static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) |
117 | { | 110 | { |
118 | return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); | 111 | int cpu = cpumask_first(cpumask); |
112 | |||
113 | if (cpu < nr_cpu_ids) | ||
114 | return cpu_physical_id(cpu); | ||
115 | return BAD_APICID; | ||
119 | } | 116 | } |
120 | 117 | ||
121 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 118 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
129 | */ | 126 | */ |
130 | for_each_cpu_and(cpu, cpumask, andmask) { | 127 | for_each_cpu_and(cpu, cpumask, andmask) { |
131 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | 128 | if (cpumask_test_cpu(cpu, cpu_online_mask)) |
132 | break; | 129 | return cpu_physical_id(cpu); |
133 | } | 130 | } |
134 | return bigsmp_cpu_to_logical_apicid(cpu); | 131 | return BAD_APICID; |
135 | } | 132 | } |
136 | 133 | ||
137 | static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) | 134 | static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) |
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = { | |||
219 | .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, | 216 | .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, |
220 | .setup_apic_routing = bigsmp_setup_apic_routing, | 217 | .setup_apic_routing = bigsmp_setup_apic_routing, |
221 | .multi_timer_check = NULL, | 218 | .multi_timer_check = NULL, |
222 | .apicid_to_node = bigsmp_apicid_to_node, | ||
223 | .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, | ||
224 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, | 219 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, |
225 | .apicid_to_cpu_present = physid_set_mask_of_physid, | 220 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
226 | .setup_portio_remap = NULL, | 221 | .setup_portio_remap = NULL, |
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = { | |||
256 | .icr_write = native_apic_icr_write, | 251 | .icr_write = native_apic_icr_write, |
257 | .wait_icr_idle = native_apic_wait_icr_idle, | 252 | .wait_icr_idle = native_apic_wait_icr_idle, |
258 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 253 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
254 | |||
255 | .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, | ||
256 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
259 | }; | 257 | }; |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8593582d8022..3e9de4854c5b 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit) | |||
460 | return physid_isset(bit, phys_cpu_present_map); | 460 | return physid_isset(bit, phys_cpu_present_map); |
461 | } | 461 | } |
462 | 462 | ||
463 | static int es7000_early_logical_apicid(int cpu) | ||
464 | { | ||
465 | /* on es7000, logical apicid is the same as physical */ | ||
466 | return early_per_cpu(x86_bios_cpu_apicid, cpu); | ||
467 | } | ||
468 | |||
463 | static unsigned long calculate_ldr(int cpu) | 469 | static unsigned long calculate_ldr(int cpu) |
464 | { | 470 | { |
465 | unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); | 471 | unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); |
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void) | |||
504 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); | 510 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
505 | } | 511 | } |
506 | 512 | ||
507 | static int es7000_apicid_to_node(int logical_apicid) | 513 | static int es7000_numa_cpu_node(int cpu) |
508 | { | 514 | { |
509 | return 0; | 515 | return 0; |
510 | } | 516 | } |
511 | 517 | ||
512 | |||
513 | static int es7000_cpu_present_to_apicid(int mps_cpu) | 518 | static int es7000_cpu_present_to_apicid(int mps_cpu) |
514 | { | 519 | { |
515 | if (!mps_cpu) | 520 | if (!mps_cpu) |
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) | |||
528 | ++cpu_id; | 533 | ++cpu_id; |
529 | } | 534 | } |
530 | 535 | ||
531 | /* Mapping from cpu number to logical apicid */ | ||
532 | static int es7000_cpu_to_logical_apicid(int cpu) | ||
533 | { | ||
534 | #ifdef CONFIG_SMP | ||
535 | if (cpu >= nr_cpu_ids) | ||
536 | return BAD_APICID; | ||
537 | return cpu_2_logical_apicid[cpu]; | ||
538 | #else | ||
539 | return logical_smp_processor_id(); | ||
540 | #endif | ||
541 | } | ||
542 | |||
543 | static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) | 536 | static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
544 | { | 537 | { |
545 | /* For clustered we don't have a good way to do this yet - hack */ | 538 | /* For clustered we don't have a good way to do this yet - hack */ |
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
561 | * The cpus in the mask must all be on the apic cluster. | 554 | * The cpus in the mask must all be on the apic cluster. |
562 | */ | 555 | */ |
563 | for_each_cpu(cpu, cpumask) { | 556 | for_each_cpu(cpu, cpumask) { |
564 | int new_apicid = es7000_cpu_to_logical_apicid(cpu); | 557 | int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); |
565 | 558 | ||
566 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 559 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
567 | WARN(1, "Not a valid mask!"); | 560 | WARN(1, "Not a valid mask!"); |
@@ -578,7 +571,7 @@ static unsigned int | |||
578 | es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, | 571 | es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, |
579 | const struct cpumask *andmask) | 572 | const struct cpumask *andmask) |
580 | { | 573 | { |
581 | int apicid = es7000_cpu_to_logical_apicid(0); | 574 | int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); |
582 | cpumask_var_t cpumask; | 575 | cpumask_var_t cpumask; |
583 | 576 | ||
584 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | 577 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) |
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = { | |||
655 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, | 648 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, |
656 | .setup_apic_routing = es7000_setup_apic_routing, | 649 | .setup_apic_routing = es7000_setup_apic_routing, |
657 | .multi_timer_check = NULL, | 650 | .multi_timer_check = NULL, |
658 | .apicid_to_node = es7000_apicid_to_node, | ||
659 | .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, | ||
660 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, | 651 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, |
661 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, | 652 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, |
662 | .setup_portio_remap = NULL, | 653 | .setup_portio_remap = NULL, |
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = { | |||
695 | .icr_write = native_apic_icr_write, | 686 | .icr_write = native_apic_icr_write, |
696 | .wait_icr_idle = native_apic_wait_icr_idle, | 687 | .wait_icr_idle = native_apic_wait_icr_idle, |
697 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 688 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
689 | |||
690 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, | ||
691 | .x86_32_numa_cpu_node = es7000_numa_cpu_node, | ||
698 | }; | 692 | }; |
699 | 693 | ||
700 | struct apic __refdata apic_es7000 = { | 694 | struct apic __refdata apic_es7000 = { |
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = { | |||
720 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, | 714 | .ioapic_phys_id_map = es7000_ioapic_phys_id_map, |
721 | .setup_apic_routing = es7000_setup_apic_routing, | 715 | .setup_apic_routing = es7000_setup_apic_routing, |
722 | .multi_timer_check = NULL, | 716 | .multi_timer_check = NULL, |
723 | .apicid_to_node = es7000_apicid_to_node, | ||
724 | .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid, | ||
725 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, | 717 | .cpu_present_to_apicid = es7000_cpu_present_to_apicid, |
726 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, | 718 | .apicid_to_cpu_present = es7000_apicid_to_cpu_present, |
727 | .setup_portio_remap = NULL, | 719 | .setup_portio_remap = NULL, |
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = { | |||
758 | .icr_write = native_apic_icr_write, | 750 | .icr_write = native_apic_icr_write, |
759 | .wait_icr_idle = native_apic_wait_icr_idle, | 751 | .wait_icr_idle = native_apic_wait_icr_idle, |
760 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 752 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
753 | |||
754 | .x86_32_early_logical_apicid = es7000_early_logical_apicid, | ||
755 | .x86_32_numa_cpu_node = es7000_numa_cpu_node, | ||
761 | }; | 756 | }; |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 79fd43ca6f96..c4e557a1ebb6 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | |||
83 | arch_spin_lock(&lock); | 83 | arch_spin_lock(&lock); |
84 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 84 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
85 | show_regs(regs); | 85 | show_regs(regs); |
86 | dump_stack(); | ||
87 | arch_spin_unlock(&lock); | 86 | arch_spin_unlock(&lock); |
88 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 87 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
89 | return NOTIFY_STOP; | 88 | return NOTIFY_STOP; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ca9e2a3545a9..4b5ebd26f565 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | |||
108 | 108 | ||
109 | int skip_ioapic_setup; | 109 | int skip_ioapic_setup; |
110 | 110 | ||
111 | void arch_disable_smp_support(void) | 111 | /** |
112 | * disable_ioapic_support() - disables ioapic support at runtime | ||
113 | */ | ||
114 | void disable_ioapic_support(void) | ||
112 | { | 115 | { |
113 | #ifdef CONFIG_PCI | 116 | #ifdef CONFIG_PCI |
114 | noioapicquirk = 1; | 117 | noioapicquirk = 1; |
@@ -120,11 +123,14 @@ void arch_disable_smp_support(void) | |||
120 | static int __init parse_noapic(char *str) | 123 | static int __init parse_noapic(char *str) |
121 | { | 124 | { |
122 | /* disable IO-APIC */ | 125 | /* disable IO-APIC */ |
123 | arch_disable_smp_support(); | 126 | disable_ioapic_support(); |
124 | return 0; | 127 | return 0; |
125 | } | 128 | } |
126 | early_param("noapic", parse_noapic); | 129 | early_param("noapic", parse_noapic); |
127 | 130 | ||
131 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | ||
132 | struct io_apic_irq_attr *attr); | ||
133 | |||
128 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ | 134 | /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ |
129 | void mp_save_irq(struct mpc_intsrc *m) | 135 | void mp_save_irq(struct mpc_intsrc *m) |
130 | { | 136 | { |
@@ -181,7 +187,7 @@ int __init arch_early_irq_init(void) | |||
181 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); | 187 | irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); |
182 | 188 | ||
183 | for (i = 0; i < count; i++) { | 189 | for (i = 0; i < count; i++) { |
184 | set_irq_chip_data(i, &cfg[i]); | 190 | irq_set_chip_data(i, &cfg[i]); |
185 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); | 191 | zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); |
186 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); | 192 | zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); |
187 | /* | 193 | /* |
@@ -200,7 +206,7 @@ int __init arch_early_irq_init(void) | |||
200 | #ifdef CONFIG_SPARSE_IRQ | 206 | #ifdef CONFIG_SPARSE_IRQ |
201 | static struct irq_cfg *irq_cfg(unsigned int irq) | 207 | static struct irq_cfg *irq_cfg(unsigned int irq) |
202 | { | 208 | { |
203 | return get_irq_chip_data(irq); | 209 | return irq_get_chip_data(irq); |
204 | } | 210 | } |
205 | 211 | ||
206 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) | 212 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) |
@@ -226,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) | |||
226 | { | 232 | { |
227 | if (!cfg) | 233 | if (!cfg) |
228 | return; | 234 | return; |
229 | set_irq_chip_data(at, NULL); | 235 | irq_set_chip_data(at, NULL); |
230 | free_cpumask_var(cfg->domain); | 236 | free_cpumask_var(cfg->domain); |
231 | free_cpumask_var(cfg->old_domain); | 237 | free_cpumask_var(cfg->old_domain); |
232 | kfree(cfg); | 238 | kfree(cfg); |
@@ -256,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |||
256 | if (res < 0) { | 262 | if (res < 0) { |
257 | if (res != -EEXIST) | 263 | if (res != -EEXIST) |
258 | return NULL; | 264 | return NULL; |
259 | cfg = get_irq_chip_data(at); | 265 | cfg = irq_get_chip_data(at); |
260 | if (cfg) | 266 | if (cfg) |
261 | return cfg; | 267 | return cfg; |
262 | } | 268 | } |
263 | 269 | ||
264 | cfg = alloc_irq_cfg(at, node); | 270 | cfg = alloc_irq_cfg(at, node); |
265 | if (cfg) | 271 | if (cfg) |
266 | set_irq_chip_data(at, cfg); | 272 | irq_set_chip_data(at, cfg); |
267 | else | 273 | else |
268 | irq_free_desc(at); | 274 | irq_free_desc(at); |
269 | return cfg; | 275 | return cfg; |
@@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq) | |||
818 | #define default_MCA_trigger(idx) (1) | 824 | #define default_MCA_trigger(idx) (1) |
819 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) | 825 | #define default_MCA_polarity(idx) default_ISA_polarity(idx) |
820 | 826 | ||
821 | static int MPBIOS_polarity(int idx) | 827 | static int irq_polarity(int idx) |
822 | { | 828 | { |
823 | int bus = mp_irqs[idx].srcbus; | 829 | int bus = mp_irqs[idx].srcbus; |
824 | int polarity; | 830 | int polarity; |
@@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx) | |||
860 | return polarity; | 866 | return polarity; |
861 | } | 867 | } |
862 | 868 | ||
863 | static int MPBIOS_trigger(int idx) | 869 | static int irq_trigger(int idx) |
864 | { | 870 | { |
865 | int bus = mp_irqs[idx].srcbus; | 871 | int bus = mp_irqs[idx].srcbus; |
866 | int trigger; | 872 | int trigger; |
@@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx) | |||
932 | return trigger; | 938 | return trigger; |
933 | } | 939 | } |
934 | 940 | ||
935 | static inline int irq_polarity(int idx) | ||
936 | { | ||
937 | return MPBIOS_polarity(idx); | ||
938 | } | ||
939 | |||
940 | static inline int irq_trigger(int idx) | ||
941 | { | ||
942 | return MPBIOS_trigger(idx); | ||
943 | } | ||
944 | |||
945 | static int pin_2_irq(int idx, int apic, int pin) | 941 | static int pin_2_irq(int idx, int apic, int pin) |
946 | { | 942 | { |
947 | int irq; | 943 | int irq; |
@@ -1189,7 +1185,7 @@ void __setup_vector_irq(int cpu) | |||
1189 | raw_spin_lock(&vector_lock); | 1185 | raw_spin_lock(&vector_lock); |
1190 | /* Mark the inuse vectors */ | 1186 | /* Mark the inuse vectors */ |
1191 | for_each_active_irq(irq) { | 1187 | for_each_active_irq(irq) { |
1192 | cfg = get_irq_chip_data(irq); | 1188 | cfg = irq_get_chip_data(irq); |
1193 | if (!cfg) | 1189 | if (!cfg) |
1194 | continue; | 1190 | continue; |
1195 | /* | 1191 | /* |
@@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu) | |||
1220 | static struct irq_chip ioapic_chip; | 1216 | static struct irq_chip ioapic_chip; |
1221 | static struct irq_chip ir_ioapic_chip; | 1217 | static struct irq_chip ir_ioapic_chip; |
1222 | 1218 | ||
1223 | #define IOAPIC_AUTO -1 | ||
1224 | #define IOAPIC_EDGE 0 | ||
1225 | #define IOAPIC_LEVEL 1 | ||
1226 | |||
1227 | #ifdef CONFIG_X86_32 | 1219 | #ifdef CONFIG_X86_32 |
1228 | static inline int IO_APIC_irq_trigger(int irq) | 1220 | static inline int IO_APIC_irq_trigger(int irq) |
1229 | { | 1221 | { |
@@ -1248,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
1248 | } | 1240 | } |
1249 | #endif | 1241 | #endif |
1250 | 1242 | ||
1251 | static void ioapic_register_intr(unsigned int irq, unsigned long trigger) | 1243 | static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, |
1244 | unsigned long trigger) | ||
1252 | { | 1245 | { |
1246 | struct irq_chip *chip = &ioapic_chip; | ||
1247 | irq_flow_handler_t hdl; | ||
1248 | bool fasteoi; | ||
1253 | 1249 | ||
1254 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1250 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1255 | trigger == IOAPIC_LEVEL) | 1251 | trigger == IOAPIC_LEVEL) { |
1256 | irq_set_status_flags(irq, IRQ_LEVEL); | 1252 | irq_set_status_flags(irq, IRQ_LEVEL); |
1257 | else | 1253 | fasteoi = true; |
1254 | } else { | ||
1258 | irq_clear_status_flags(irq, IRQ_LEVEL); | 1255 | irq_clear_status_flags(irq, IRQ_LEVEL); |
1256 | fasteoi = false; | ||
1257 | } | ||
1259 | 1258 | ||
1260 | if (irq_remapped(get_irq_chip_data(irq))) { | 1259 | if (irq_remapped(cfg)) { |
1261 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 1260 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
1262 | if (trigger) | 1261 | chip = &ir_ioapic_chip; |
1263 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 1262 | fasteoi = trigger != 0; |
1264 | handle_fasteoi_irq, | ||
1265 | "fasteoi"); | ||
1266 | else | ||
1267 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
1268 | handle_edge_irq, "edge"); | ||
1269 | return; | ||
1270 | } | 1263 | } |
1271 | 1264 | ||
1272 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1265 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; |
1273 | trigger == IOAPIC_LEVEL) | 1266 | irq_set_chip_and_handler_name(irq, chip, hdl, |
1274 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 1267 | fasteoi ? "fasteoi" : "edge"); |
1275 | handle_fasteoi_irq, | ||
1276 | "fasteoi"); | ||
1277 | else | ||
1278 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
1279 | handle_edge_irq, "edge"); | ||
1280 | } | 1268 | } |
1281 | 1269 | ||
1282 | static int setup_ioapic_entry(int apic_id, int irq, | 1270 | static int setup_ioapic_entry(int apic_id, int irq, |
@@ -1374,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, | |||
1374 | return; | 1362 | return; |
1375 | } | 1363 | } |
1376 | 1364 | ||
1377 | ioapic_register_intr(irq, trigger); | 1365 | ioapic_register_intr(irq, cfg, trigger); |
1378 | if (irq < legacy_pic->nr_legacy_irqs) | 1366 | if (irq < legacy_pic->nr_legacy_irqs) |
1379 | legacy_pic->mask(irq); | 1367 | legacy_pic->mask(irq); |
1380 | 1368 | ||
@@ -1385,33 +1373,26 @@ static struct { | |||
1385 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); | 1373 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); |
1386 | } mp_ioapic_routing[MAX_IO_APICS]; | 1374 | } mp_ioapic_routing[MAX_IO_APICS]; |
1387 | 1375 | ||
1388 | static void __init setup_IO_APIC_irqs(void) | 1376 | static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin) |
1389 | { | 1377 | { |
1390 | int apic_id, pin, idx, irq, notcon = 0; | 1378 | if (idx != -1) |
1391 | int node = cpu_to_node(0); | 1379 | return false; |
1392 | struct irq_cfg *cfg; | ||
1393 | 1380 | ||
1394 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1381 | apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", |
1382 | mp_ioapics[apic_id].apicid, pin); | ||
1383 | return true; | ||
1384 | } | ||
1385 | |||
1386 | static void __init __io_apic_setup_irqs(unsigned int apic_id) | ||
1387 | { | ||
1388 | int idx, node = cpu_to_node(0); | ||
1389 | struct io_apic_irq_attr attr; | ||
1390 | unsigned int pin, irq; | ||
1395 | 1391 | ||
1396 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) | ||
1397 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { | 1392 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { |
1398 | idx = find_irq_entry(apic_id, pin, mp_INT); | 1393 | idx = find_irq_entry(apic_id, pin, mp_INT); |
1399 | if (idx == -1) { | 1394 | if (io_apic_pin_not_connected(idx, apic_id, pin)) |
1400 | if (!notcon) { | ||
1401 | notcon = 1; | ||
1402 | apic_printk(APIC_VERBOSE, | ||
1403 | KERN_DEBUG " %d-%d", | ||
1404 | mp_ioapics[apic_id].apicid, pin); | ||
1405 | } else | ||
1406 | apic_printk(APIC_VERBOSE, " %d-%d", | ||
1407 | mp_ioapics[apic_id].apicid, pin); | ||
1408 | continue; | 1395 | continue; |
1409 | } | ||
1410 | if (notcon) { | ||
1411 | apic_printk(APIC_VERBOSE, | ||
1412 | " (apicid-pin) not connected\n"); | ||
1413 | notcon = 0; | ||
1414 | } | ||
1415 | 1396 | ||
1416 | irq = pin_2_irq(idx, apic_id, pin); | 1397 | irq = pin_2_irq(idx, apic_id, pin); |
1417 | 1398 | ||
@@ -1423,25 +1404,24 @@ static void __init setup_IO_APIC_irqs(void) | |||
1423 | * installed and if it returns 1: | 1404 | * installed and if it returns 1: |
1424 | */ | 1405 | */ |
1425 | if (apic->multi_timer_check && | 1406 | if (apic->multi_timer_check && |
1426 | apic->multi_timer_check(apic_id, irq)) | 1407 | apic->multi_timer_check(apic_id, irq)) |
1427 | continue; | 1408 | continue; |
1428 | 1409 | ||
1429 | cfg = alloc_irq_and_cfg_at(irq, node); | 1410 | set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), |
1430 | if (!cfg) | 1411 | irq_polarity(idx)); |
1431 | continue; | ||
1432 | 1412 | ||
1433 | add_pin_to_irq_node(cfg, node, apic_id, pin); | 1413 | io_apic_setup_irq_pin(irq, node, &attr); |
1434 | /* | ||
1435 | * don't mark it in pin_programmed, so later acpi could | ||
1436 | * set it correctly when irq < 16 | ||
1437 | */ | ||
1438 | setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), | ||
1439 | irq_polarity(idx)); | ||
1440 | } | 1414 | } |
1415 | } | ||
1441 | 1416 | ||
1442 | if (notcon) | 1417 | static void __init setup_IO_APIC_irqs(void) |
1443 | apic_printk(APIC_VERBOSE, | 1418 | { |
1444 | " (apicid-pin) not connected\n"); | 1419 | unsigned int apic_id; |
1420 | |||
1421 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | ||
1422 | |||
1423 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) | ||
1424 | __io_apic_setup_irqs(apic_id); | ||
1445 | } | 1425 | } |
1446 | 1426 | ||
1447 | /* | 1427 | /* |
@@ -1452,7 +1432,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1452 | void setup_IO_APIC_irq_extra(u32 gsi) | 1432 | void setup_IO_APIC_irq_extra(u32 gsi) |
1453 | { | 1433 | { |
1454 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); | 1434 | int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); |
1455 | struct irq_cfg *cfg; | 1435 | struct io_apic_irq_attr attr; |
1456 | 1436 | ||
1457 | /* | 1437 | /* |
1458 | * Convert 'gsi' to 'ioapic.pin'. | 1438 | * Convert 'gsi' to 'ioapic.pin'. |
@@ -1472,21 +1452,10 @@ void setup_IO_APIC_irq_extra(u32 gsi) | |||
1472 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) | 1452 | if (apic_id == 0 || irq < NR_IRQS_LEGACY) |
1473 | return; | 1453 | return; |
1474 | 1454 | ||
1475 | cfg = alloc_irq_and_cfg_at(irq, node); | 1455 | set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx), |
1476 | if (!cfg) | 1456 | irq_polarity(idx)); |
1477 | return; | ||
1478 | |||
1479 | add_pin_to_irq_node(cfg, node, apic_id, pin); | ||
1480 | |||
1481 | if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { | ||
1482 | pr_debug("Pin %d-%d already programmed\n", | ||
1483 | mp_ioapics[apic_id].apicid, pin); | ||
1484 | return; | ||
1485 | } | ||
1486 | set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); | ||
1487 | 1457 | ||
1488 | setup_ioapic_irq(apic_id, pin, irq, cfg, | 1458 | io_apic_setup_irq_pin_once(irq, node, &attr); |
1489 | irq_trigger(idx), irq_polarity(idx)); | ||
1490 | } | 1459 | } |
1491 | 1460 | ||
1492 | /* | 1461 | /* |
@@ -1518,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, | |||
1518 | * The timer IRQ doesn't have to know that behind the | 1487 | * The timer IRQ doesn't have to know that behind the |
1519 | * scene we may have a 8259A-master in AEOI mode ... | 1488 | * scene we may have a 8259A-master in AEOI mode ... |
1520 | */ | 1489 | */ |
1521 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 1490 | irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, |
1491 | "edge"); | ||
1522 | 1492 | ||
1523 | /* | 1493 | /* |
1524 | * Add it to the IO-APIC irq-routing table: | 1494 | * Add it to the IO-APIC irq-routing table: |
@@ -1625,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1625 | for_each_active_irq(irq) { | 1595 | for_each_active_irq(irq) { |
1626 | struct irq_pin_list *entry; | 1596 | struct irq_pin_list *entry; |
1627 | 1597 | ||
1628 | cfg = get_irq_chip_data(irq); | 1598 | cfg = irq_get_chip_data(irq); |
1629 | if (!cfg) | 1599 | if (!cfg) |
1630 | continue; | 1600 | continue; |
1631 | entry = cfg->irq_2_pin; | 1601 | entry = cfg->irq_2_pin; |
@@ -2391,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg) | |||
2391 | 2361 | ||
2392 | void irq_force_complete_move(int irq) | 2362 | void irq_force_complete_move(int irq) |
2393 | { | 2363 | { |
2394 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 2364 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
2395 | 2365 | ||
2396 | if (!cfg) | 2366 | if (!cfg) |
2397 | return; | 2367 | return; |
@@ -2405,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { } | |||
2405 | static void ack_apic_edge(struct irq_data *data) | 2375 | static void ack_apic_edge(struct irq_data *data) |
2406 | { | 2376 | { |
2407 | irq_complete_move(data->chip_data); | 2377 | irq_complete_move(data->chip_data); |
2408 | move_native_irq(data->irq); | 2378 | irq_move_irq(data); |
2409 | ack_APIC_irq(); | 2379 | ack_APIC_irq(); |
2410 | } | 2380 | } |
2411 | 2381 | ||
@@ -2462,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data) | |||
2462 | irq_complete_move(cfg); | 2432 | irq_complete_move(cfg); |
2463 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 2433 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
2464 | /* If we are moving the irq we need to mask it */ | 2434 | /* If we are moving the irq we need to mask it */ |
2465 | if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { | 2435 | if (unlikely(irqd_is_setaffinity_pending(data))) { |
2466 | do_unmask_irq = 1; | 2436 | do_unmask_irq = 1; |
2467 | mask_ioapic(cfg); | 2437 | mask_ioapic(cfg); |
2468 | } | 2438 | } |
@@ -2551,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data) | |||
2551 | * and you can go talk to the chipset vendor about it. | 2521 | * and you can go talk to the chipset vendor about it. |
2552 | */ | 2522 | */ |
2553 | if (!io_apic_level_ack_pending(cfg)) | 2523 | if (!io_apic_level_ack_pending(cfg)) |
2554 | move_masked_irq(irq); | 2524 | irq_move_masked_irq(data); |
2555 | unmask_ioapic(cfg); | 2525 | unmask_ioapic(cfg); |
2556 | } | 2526 | } |
2557 | } | 2527 | } |
@@ -2614,7 +2584,7 @@ static inline void init_IO_APIC_traps(void) | |||
2614 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2584 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2615 | */ | 2585 | */ |
2616 | for_each_active_irq(irq) { | 2586 | for_each_active_irq(irq) { |
2617 | cfg = get_irq_chip_data(irq); | 2587 | cfg = irq_get_chip_data(irq); |
2618 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2588 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
2619 | /* | 2589 | /* |
2620 | * Hmm.. We don't have an entry for this, | 2590 | * Hmm.. We don't have an entry for this, |
@@ -2625,7 +2595,7 @@ static inline void init_IO_APIC_traps(void) | |||
2625 | legacy_pic->make_irq(irq); | 2595 | legacy_pic->make_irq(irq); |
2626 | else | 2596 | else |
2627 | /* Strange. Oh, well.. */ | 2597 | /* Strange. Oh, well.. */ |
2628 | set_irq_chip(irq, &no_irq_chip); | 2598 | irq_set_chip(irq, &no_irq_chip); |
2629 | } | 2599 | } |
2630 | } | 2600 | } |
2631 | } | 2601 | } |
@@ -2665,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
2665 | static void lapic_register_intr(int irq) | 2635 | static void lapic_register_intr(int irq) |
2666 | { | 2636 | { |
2667 | irq_clear_status_flags(irq, IRQ_LEVEL); | 2637 | irq_clear_status_flags(irq, IRQ_LEVEL); |
2668 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2638 | irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
2669 | "edge"); | 2639 | "edge"); |
2670 | } | 2640 | } |
2671 | 2641 | ||
@@ -2749,7 +2719,7 @@ int timer_through_8259 __initdata; | |||
2749 | */ | 2719 | */ |
2750 | static inline void __init check_timer(void) | 2720 | static inline void __init check_timer(void) |
2751 | { | 2721 | { |
2752 | struct irq_cfg *cfg = get_irq_chip_data(0); | 2722 | struct irq_cfg *cfg = irq_get_chip_data(0); |
2753 | int node = cpu_to_node(0); | 2723 | int node = cpu_to_node(0); |
2754 | int apic1, pin1, apic2, pin2; | 2724 | int apic1, pin1, apic2, pin2; |
2755 | unsigned long flags; | 2725 | unsigned long flags; |
@@ -3060,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node) | |||
3060 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 3030 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3061 | 3031 | ||
3062 | if (ret) { | 3032 | if (ret) { |
3063 | set_irq_chip_data(irq, cfg); | 3033 | irq_set_chip_data(irq, cfg); |
3064 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | 3034 | irq_clear_status_flags(irq, IRQ_NOREQUEST); |
3065 | } else { | 3035 | } else { |
3066 | free_irq_at(irq, cfg); | 3036 | free_irq_at(irq, cfg); |
@@ -3085,7 +3055,7 @@ int create_irq(void) | |||
3085 | 3055 | ||
3086 | void destroy_irq(unsigned int irq) | 3056 | void destroy_irq(unsigned int irq) |
3087 | { | 3057 | { |
3088 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 3058 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
3089 | unsigned long flags; | 3059 | unsigned long flags; |
3090 | 3060 | ||
3091 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); | 3061 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
@@ -3119,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3119 | 3089 | ||
3120 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); | 3090 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); |
3121 | 3091 | ||
3122 | if (irq_remapped(get_irq_chip_data(irq))) { | 3092 | if (irq_remapped(cfg)) { |
3123 | struct irte irte; | 3093 | struct irte irte; |
3124 | int ir_index; | 3094 | int ir_index; |
3125 | u16 sub_handle; | 3095 | u16 sub_handle; |
@@ -3291,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | |||
3291 | 3261 | ||
3292 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3262 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) |
3293 | { | 3263 | { |
3264 | struct irq_chip *chip = &msi_chip; | ||
3294 | struct msi_msg msg; | 3265 | struct msi_msg msg; |
3295 | int ret; | 3266 | int ret; |
3296 | 3267 | ||
@@ -3298,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3298 | if (ret < 0) | 3269 | if (ret < 0) |
3299 | return ret; | 3270 | return ret; |
3300 | 3271 | ||
3301 | set_irq_msi(irq, msidesc); | 3272 | irq_set_msi_desc(irq, msidesc); |
3302 | write_msi_msg(irq, &msg); | 3273 | write_msi_msg(irq, &msg); |
3303 | 3274 | ||
3304 | if (irq_remapped(get_irq_chip_data(irq))) { | 3275 | if (irq_remapped(irq_get_chip_data(irq))) { |
3305 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3276 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3306 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | 3277 | chip = &msi_ir_chip; |
3307 | } else | 3278 | } |
3308 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 3279 | |
3280 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
3309 | 3281 | ||
3310 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); | 3282 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); |
3311 | 3283 | ||
@@ -3423,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3423 | if (ret < 0) | 3395 | if (ret < 0) |
3424 | return ret; | 3396 | return ret; |
3425 | dmar_msi_write(irq, &msg); | 3397 | dmar_msi_write(irq, &msg); |
3426 | set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | 3398 | irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, |
3427 | "edge"); | 3399 | "edge"); |
3428 | return 0; | 3400 | return 0; |
3429 | } | 3401 | } |
3430 | #endif | 3402 | #endif |
@@ -3482,6 +3454,7 @@ static struct irq_chip hpet_msi_type = { | |||
3482 | 3454 | ||
3483 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3455 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) |
3484 | { | 3456 | { |
3457 | struct irq_chip *chip = &hpet_msi_type; | ||
3485 | struct msi_msg msg; | 3458 | struct msi_msg msg; |
3486 | int ret; | 3459 | int ret; |
3487 | 3460 | ||
@@ -3501,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | |||
3501 | if (ret < 0) | 3474 | if (ret < 0) |
3502 | return ret; | 3475 | return ret; |
3503 | 3476 | ||
3504 | hpet_msi_write(get_irq_data(irq), &msg); | 3477 | hpet_msi_write(irq_get_handler_data(irq), &msg); |
3505 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3478 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3506 | if (irq_remapped(get_irq_chip_data(irq))) | 3479 | if (irq_remapped(irq_get_chip_data(irq))) |
3507 | set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, | 3480 | chip = &ir_hpet_msi_type; |
3508 | handle_edge_irq, "edge"); | ||
3509 | else | ||
3510 | set_irq_chip_and_handler_name(irq, &hpet_msi_type, | ||
3511 | handle_edge_irq, "edge"); | ||
3512 | 3481 | ||
3482 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | ||
3513 | return 0; | 3483 | return 0; |
3514 | } | 3484 | } |
3515 | #endif | 3485 | #endif |
@@ -3596,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3596 | 3566 | ||
3597 | write_ht_irq_msg(irq, &msg); | 3567 | write_ht_irq_msg(irq, &msg); |
3598 | 3568 | ||
3599 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, | 3569 | irq_set_chip_and_handler_name(irq, &ht_irq_chip, |
3600 | handle_edge_irq, "edge"); | 3570 | handle_edge_irq, "edge"); |
3601 | 3571 | ||
3602 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); | 3572 | dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); |
@@ -3605,7 +3575,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3605 | } | 3575 | } |
3606 | #endif /* CONFIG_HT_IRQ */ | 3576 | #endif /* CONFIG_HT_IRQ */ |
3607 | 3577 | ||
3608 | int __init io_apic_get_redir_entries (int ioapic) | 3578 | int |
3579 | io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) | ||
3580 | { | ||
3581 | struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); | ||
3582 | int ret; | ||
3583 | |||
3584 | if (!cfg) | ||
3585 | return -EINVAL; | ||
3586 | ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); | ||
3587 | if (!ret) | ||
3588 | setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg, | ||
3589 | attr->trigger, attr->polarity); | ||
3590 | return ret; | ||
3591 | } | ||
3592 | |||
3593 | static int io_apic_setup_irq_pin_once(unsigned int irq, int node, | ||
3594 | struct io_apic_irq_attr *attr) | ||
3595 | { | ||
3596 | unsigned int id = attr->ioapic, pin = attr->ioapic_pin; | ||
3597 | int ret; | ||
3598 | |||
3599 | /* Avoid redundant programming */ | ||
3600 | if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) { | ||
3601 | pr_debug("Pin %d-%d already programmed\n", | ||
3602 | mp_ioapics[id].apicid, pin); | ||
3603 | return 0; | ||
3604 | } | ||
3605 | ret = io_apic_setup_irq_pin(irq, node, attr); | ||
3606 | if (!ret) | ||
3607 | set_bit(pin, mp_ioapic_routing[id].pin_programmed); | ||
3608 | return ret; | ||
3609 | } | ||
3610 | |||
3611 | static int __init io_apic_get_redir_entries(int ioapic) | ||
3609 | { | 3612 | { |
3610 | union IO_APIC_reg_01 reg_01; | 3613 | union IO_APIC_reg_01 reg_01; |
3611 | unsigned long flags; | 3614 | unsigned long flags; |
@@ -3659,96 +3662,24 @@ int __init arch_probe_nr_irqs(void) | |||
3659 | } | 3662 | } |
3660 | #endif | 3663 | #endif |
3661 | 3664 | ||
3662 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | 3665 | int io_apic_set_pci_routing(struct device *dev, int irq, |
3663 | struct io_apic_irq_attr *irq_attr) | 3666 | struct io_apic_irq_attr *irq_attr) |
3664 | { | 3667 | { |
3665 | struct irq_cfg *cfg; | ||
3666 | int node; | 3668 | int node; |
3667 | int ioapic, pin; | ||
3668 | int trigger, polarity; | ||
3669 | 3669 | ||
3670 | ioapic = irq_attr->ioapic; | ||
3671 | if (!IO_APIC_IRQ(irq)) { | 3670 | if (!IO_APIC_IRQ(irq)) { |
3672 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | 3671 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", |
3673 | ioapic); | 3672 | irq_attr->ioapic); |
3674 | return -EINVAL; | 3673 | return -EINVAL; |
3675 | } | 3674 | } |
3676 | 3675 | ||
3677 | if (dev) | 3676 | node = dev ? dev_to_node(dev) : cpu_to_node(0); |
3678 | node = dev_to_node(dev); | ||
3679 | else | ||
3680 | node = cpu_to_node(0); | ||
3681 | |||
3682 | cfg = alloc_irq_and_cfg_at(irq, node); | ||
3683 | if (!cfg) | ||
3684 | return 0; | ||
3685 | |||
3686 | pin = irq_attr->ioapic_pin; | ||
3687 | trigger = irq_attr->trigger; | ||
3688 | polarity = irq_attr->polarity; | ||
3689 | 3677 | ||
3690 | /* | 3678 | return io_apic_setup_irq_pin_once(irq, node, irq_attr); |
3691 | * IRQs < 16 are already in the irq_2_pin[] map | ||
3692 | */ | ||
3693 | if (irq >= legacy_pic->nr_legacy_irqs) { | ||
3694 | if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { | ||
3695 | printk(KERN_INFO "can not add pin %d for irq %d\n", | ||
3696 | pin, irq); | ||
3697 | return 0; | ||
3698 | } | ||
3699 | } | ||
3700 | |||
3701 | setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); | ||
3702 | |||
3703 | return 0; | ||
3704 | } | 3679 | } |
3705 | 3680 | ||
3706 | int io_apic_set_pci_routing(struct device *dev, int irq, | ||
3707 | struct io_apic_irq_attr *irq_attr) | ||
3708 | { | ||
3709 | int ioapic, pin; | ||
3710 | /* | ||
3711 | * Avoid pin reprogramming. PRTs typically include entries | ||
3712 | * with redundant pin->gsi mappings (but unique PCI devices); | ||
3713 | * we only program the IOAPIC on the first. | ||
3714 | */ | ||
3715 | ioapic = irq_attr->ioapic; | ||
3716 | pin = irq_attr->ioapic_pin; | ||
3717 | if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { | ||
3718 | pr_debug("Pin %d-%d already programmed\n", | ||
3719 | mp_ioapics[ioapic].apicid, pin); | ||
3720 | return 0; | ||
3721 | } | ||
3722 | set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); | ||
3723 | |||
3724 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | ||
3725 | } | ||
3726 | |||
3727 | u8 __init io_apic_unique_id(u8 id) | ||
3728 | { | ||
3729 | #ifdef CONFIG_X86_32 | 3681 | #ifdef CONFIG_X86_32 |
3730 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | 3682 | static int __init io_apic_get_unique_id(int ioapic, int apic_id) |
3731 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3732 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3733 | else | ||
3734 | return id; | ||
3735 | #else | ||
3736 | int i; | ||
3737 | DECLARE_BITMAP(used, 256); | ||
3738 | |||
3739 | bitmap_zero(used, 256); | ||
3740 | for (i = 0; i < nr_ioapics; i++) { | ||
3741 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3742 | __set_bit(ia->apicid, used); | ||
3743 | } | ||
3744 | if (!test_bit(id, used)) | ||
3745 | return id; | ||
3746 | return find_first_zero_bit(used, 256); | ||
3747 | #endif | ||
3748 | } | ||
3749 | |||
3750 | #ifdef CONFIG_X86_32 | ||
3751 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | ||
3752 | { | 3683 | { |
3753 | union IO_APIC_reg_00 reg_00; | 3684 | union IO_APIC_reg_00 reg_00; |
3754 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | 3685 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; |
@@ -3821,9 +3752,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3821 | 3752 | ||
3822 | return apic_id; | 3753 | return apic_id; |
3823 | } | 3754 | } |
3755 | |||
3756 | static u8 __init io_apic_unique_id(u8 id) | ||
3757 | { | ||
3758 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
3759 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3760 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3761 | else | ||
3762 | return id; | ||
3763 | } | ||
3764 | #else | ||
3765 | static u8 __init io_apic_unique_id(u8 id) | ||
3766 | { | ||
3767 | int i; | ||
3768 | DECLARE_BITMAP(used, 256); | ||
3769 | |||
3770 | bitmap_zero(used, 256); | ||
3771 | for (i = 0; i < nr_ioapics; i++) { | ||
3772 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3773 | __set_bit(ia->apicid, used); | ||
3774 | } | ||
3775 | if (!test_bit(id, used)) | ||
3776 | return id; | ||
3777 | return find_first_zero_bit(used, 256); | ||
3778 | } | ||
3824 | #endif | 3779 | #endif |
3825 | 3780 | ||
3826 | int __init io_apic_get_version(int ioapic) | 3781 | static int __init io_apic_get_version(int ioapic) |
3827 | { | 3782 | { |
3828 | union IO_APIC_reg_01 reg_01; | 3783 | union IO_APIC_reg_01 reg_01; |
3829 | unsigned long flags; | 3784 | unsigned long flags; |
@@ -3868,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) | |||
3868 | void __init setup_ioapic_dest(void) | 3823 | void __init setup_ioapic_dest(void) |
3869 | { | 3824 | { |
3870 | int pin, ioapic, irq, irq_entry; | 3825 | int pin, ioapic, irq, irq_entry; |
3871 | struct irq_desc *desc; | ||
3872 | const struct cpumask *mask; | 3826 | const struct cpumask *mask; |
3827 | struct irq_data *idata; | ||
3873 | 3828 | ||
3874 | if (skip_ioapic_setup == 1) | 3829 | if (skip_ioapic_setup == 1) |
3875 | return; | 3830 | return; |
@@ -3884,21 +3839,20 @@ void __init setup_ioapic_dest(void) | |||
3884 | if ((ioapic > 0) && (irq > 16)) | 3839 | if ((ioapic > 0) && (irq > 16)) |
3885 | continue; | 3840 | continue; |
3886 | 3841 | ||
3887 | desc = irq_to_desc(irq); | 3842 | idata = irq_get_irq_data(irq); |
3888 | 3843 | ||
3889 | /* | 3844 | /* |
3890 | * Honour affinities which have been set in early boot | 3845 | * Honour affinities which have been set in early boot |
3891 | */ | 3846 | */ |
3892 | if (desc->status & | 3847 | if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) |
3893 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3848 | mask = idata->affinity; |
3894 | mask = desc->irq_data.affinity; | ||
3895 | else | 3849 | else |
3896 | mask = apic->target_cpus(); | 3850 | mask = apic->target_cpus(); |
3897 | 3851 | ||
3898 | if (intr_remapping_enabled) | 3852 | if (intr_remapping_enabled) |
3899 | ir_ioapic_set_affinity(&desc->irq_data, mask, false); | 3853 | ir_ioapic_set_affinity(idata, mask, false); |
3900 | else | 3854 | else |
3901 | ioapic_set_affinity(&desc->irq_data, mask, false); | 3855 | ioapic_set_affinity(idata, mask, false); |
3902 | } | 3856 | } |
3903 | 3857 | ||
3904 | } | 3858 | } |
@@ -4026,7 +3980,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi) | |||
4026 | return gsi - mp_gsi_routing[ioapic].gsi_base; | 3980 | return gsi - mp_gsi_routing[ioapic].gsi_base; |
4027 | } | 3981 | } |
4028 | 3982 | ||
4029 | static int bad_ioapic(unsigned long address) | 3983 | static __init int bad_ioapic(unsigned long address) |
4030 | { | 3984 | { |
4031 | if (nr_ioapics >= MAX_IO_APICS) { | 3985 | if (nr_ioapics >= MAX_IO_APICS) { |
4032 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | 3986 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " |
@@ -4086,20 +4040,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
4086 | /* Enable IOAPIC early just for system timer */ | 4040 | /* Enable IOAPIC early just for system timer */ |
4087 | void __init pre_init_apic_IRQ0(void) | 4041 | void __init pre_init_apic_IRQ0(void) |
4088 | { | 4042 | { |
4089 | struct irq_cfg *cfg; | 4043 | struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; |
4090 | 4044 | ||
4091 | printk(KERN_INFO "Early APIC setup for system timer0\n"); | 4045 | printk(KERN_INFO "Early APIC setup for system timer0\n"); |
4092 | #ifndef CONFIG_SMP | 4046 | #ifndef CONFIG_SMP |
4093 | physid_set_mask_of_physid(boot_cpu_physical_apicid, | 4047 | physid_set_mask_of_physid(boot_cpu_physical_apicid, |
4094 | &phys_cpu_present_map); | 4048 | &phys_cpu_present_map); |
4095 | #endif | 4049 | #endif |
4096 | /* Make sure the irq descriptor is set up */ | ||
4097 | cfg = alloc_irq_and_cfg_at(0, 0); | ||
4098 | |||
4099 | setup_local_APIC(); | 4050 | setup_local_APIC(); |
4100 | 4051 | ||
4101 | add_pin_to_irq_node(cfg, 0, 0, 0); | 4052 | io_apic_setup_irq_pin(0, 0, &attr); |
4102 | set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); | 4053 | irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, |
4103 | 4054 | "edge"); | |
4104 | setup_ioapic_irq(0, 0, 0, cfg, 0, 0); | ||
4105 | } | 4055 | } |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 08385e090a6f..cce91bf26676 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, | |||
56 | local_irq_restore(flags); | 56 | local_irq_restore(flags); |
57 | } | 57 | } |
58 | 58 | ||
59 | #ifdef CONFIG_X86_32 | ||
60 | |||
59 | void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | 61 | void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, |
60 | int vector) | 62 | int vector) |
61 | { | 63 | { |
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, | |||
71 | local_irq_save(flags); | 73 | local_irq_save(flags); |
72 | for_each_cpu(query_cpu, mask) | 74 | for_each_cpu(query_cpu, mask) |
73 | __default_send_IPI_dest_field( | 75 | __default_send_IPI_dest_field( |
74 | apic->cpu_to_logical_apicid(query_cpu), vector, | 76 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
75 | apic->dest_logical); | 77 | vector, apic->dest_logical); |
76 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
77 | } | 79 | } |
78 | 80 | ||
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, | |||
90 | if (query_cpu == this_cpu) | 92 | if (query_cpu == this_cpu) |
91 | continue; | 93 | continue; |
92 | __default_send_IPI_dest_field( | 94 | __default_send_IPI_dest_field( |
93 | apic->cpu_to_logical_apicid(query_cpu), vector, | 95 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
94 | apic->dest_logical); | 96 | vector, apic->dest_logical); |
95 | } | 97 | } |
96 | local_irq_restore(flags); | 98 | local_irq_restore(flags); |
97 | } | 99 | } |
98 | 100 | ||
99 | #ifdef CONFIG_X86_32 | ||
100 | |||
101 | /* | 101 | /* |
102 | * This is only used on smaller machines. | 102 | * This is only used on smaller machines. |
103 | */ | 103 | */ |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 960f26ab5c9f..6273eee5134b 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask | |||
373 | return physids_promote(0xFUL, retmap); | 373 | return physids_promote(0xFUL, retmap); |
374 | } | 374 | } |
375 | 375 | ||
376 | static inline int numaq_cpu_to_logical_apicid(int cpu) | ||
377 | { | ||
378 | if (cpu >= nr_cpu_ids) | ||
379 | return BAD_APICID; | ||
380 | return cpu_2_logical_apicid[cpu]; | ||
381 | } | ||
382 | |||
383 | /* | 376 | /* |
384 | * Supporting over 60 cpus on NUMA-Q requires a locality-dependent | 377 | * Supporting over 60 cpus on NUMA-Q requires a locality-dependent |
385 | * cpu to APIC ID relation to properly interact with the intelligent | 378 | * cpu to APIC ID relation to properly interact with the intelligent |
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid) | |||
398 | return logical_apicid >> 4; | 391 | return logical_apicid >> 4; |
399 | } | 392 | } |
400 | 393 | ||
394 | static int numaq_numa_cpu_node(int cpu) | ||
395 | { | ||
396 | int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
397 | |||
398 | if (logical_apicid != BAD_APICID) | ||
399 | return numaq_apicid_to_node(logical_apicid); | ||
400 | return NUMA_NO_NODE; | ||
401 | } | ||
402 | |||
401 | static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) | 403 | static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) |
402 | { | 404 | { |
403 | int node = numaq_apicid_to_node(logical_apicid); | 405 | int node = numaq_apicid_to_node(logical_apicid); |
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = { | |||
508 | .ioapic_phys_id_map = numaq_ioapic_phys_id_map, | 510 | .ioapic_phys_id_map = numaq_ioapic_phys_id_map, |
509 | .setup_apic_routing = numaq_setup_apic_routing, | 511 | .setup_apic_routing = numaq_setup_apic_routing, |
510 | .multi_timer_check = numaq_multi_timer_check, | 512 | .multi_timer_check = numaq_multi_timer_check, |
511 | .apicid_to_node = numaq_apicid_to_node, | ||
512 | .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid, | ||
513 | .cpu_present_to_apicid = numaq_cpu_present_to_apicid, | 513 | .cpu_present_to_apicid = numaq_cpu_present_to_apicid, |
514 | .apicid_to_cpu_present = numaq_apicid_to_cpu_present, | 514 | .apicid_to_cpu_present = numaq_apicid_to_cpu_present, |
515 | .setup_portio_remap = numaq_setup_portio_remap, | 515 | .setup_portio_remap = numaq_setup_portio_remap, |
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = { | |||
547 | .icr_write = native_apic_icr_write, | 547 | .icr_write = native_apic_icr_write, |
548 | .wait_icr_idle = native_apic_wait_icr_idle, | 548 | .wait_icr_idle = native_apic_wait_icr_idle, |
549 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 549 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
550 | |||
551 | .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, | ||
552 | .x86_32_numa_cpu_node = numaq_numa_cpu_node, | ||
550 | }; | 553 | }; |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 99d2fe016084..fc84c7b61108 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void) | |||
77 | apic->setup_apic_routing(); | 77 | apic->setup_apic_routing(); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int default_x86_32_early_logical_apicid(int cpu) | ||
81 | { | ||
82 | return 1 << cpu; | ||
83 | } | ||
84 | |||
80 | static void setup_apic_flat_routing(void) | 85 | static void setup_apic_flat_routing(void) |
81 | { | 86 | { |
82 | #ifdef CONFIG_X86_IO_APIC | 87 | #ifdef CONFIG_X86_IO_APIC |
@@ -130,8 +135,6 @@ struct apic apic_default = { | |||
130 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | 135 | .ioapic_phys_id_map = default_ioapic_phys_id_map, |
131 | .setup_apic_routing = setup_apic_flat_routing, | 136 | .setup_apic_routing = setup_apic_flat_routing, |
132 | .multi_timer_check = NULL, | 137 | .multi_timer_check = NULL, |
133 | .apicid_to_node = default_apicid_to_node, | ||
134 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, | ||
135 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 138 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
136 | .apicid_to_cpu_present = physid_set_mask_of_physid, | 139 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
137 | .setup_portio_remap = NULL, | 140 | .setup_portio_remap = NULL, |
@@ -167,6 +170,9 @@ struct apic apic_default = { | |||
167 | .icr_write = native_apic_icr_write, | 170 | .icr_write = native_apic_icr_write, |
168 | .wait_icr_idle = native_apic_wait_icr_idle, | 171 | .wait_icr_idle = native_apic_wait_icr_idle, |
169 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 172 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
173 | |||
174 | .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, | ||
175 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
170 | }; | 176 | }; |
171 | 177 | ||
172 | extern struct apic apic_numaq; | 178 | extern struct apic apic_numaq; |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 9b419263d90d..e4b8059b414a 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit) | |||
194 | return 1; | 194 | return 1; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void summit_init_apic_ldr(void) | 197 | static int summit_early_logical_apicid(int cpu) |
198 | { | 198 | { |
199 | unsigned long val, id; | ||
200 | int count = 0; | 199 | int count = 0; |
201 | u8 my_id = (u8)hard_smp_processor_id(); | 200 | u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); |
202 | u8 my_cluster = APIC_CLUSTER(my_id); | 201 | u8 my_cluster = APIC_CLUSTER(my_id); |
203 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
204 | u8 lid; | 203 | u8 lid; |
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void) | |||
206 | 205 | ||
207 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | 206 | /* Create logical APIC IDs by counting CPUs already in cluster. */ |
208 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { | 207 | for (count = 0, i = nr_cpu_ids; --i >= 0; ) { |
209 | lid = cpu_2_logical_apicid[i]; | 208 | lid = early_per_cpu(x86_cpu_to_logical_apicid, i); |
210 | if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) | 209 | if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) |
211 | ++count; | 210 | ++count; |
212 | } | 211 | } |
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void) | |||
214 | /* We only have a 4 wide bitmap in cluster mode. If a deranged | 213 | /* We only have a 4 wide bitmap in cluster mode. If a deranged |
215 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | 214 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ |
216 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | 215 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); |
217 | id = my_cluster | (1UL << count); | 216 | return my_cluster | (1UL << count); |
217 | } | ||
218 | |||
219 | static void summit_init_apic_ldr(void) | ||
220 | { | ||
221 | int cpu = smp_processor_id(); | ||
222 | unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
223 | unsigned long val; | ||
224 | |||
218 | apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); | 225 | apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); |
219 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | 226 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; |
220 | val |= SET_APIC_LOGICAL_ID(id); | 227 | val |= SET_APIC_LOGICAL_ID(id); |
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void) | |||
232 | nr_ioapics); | 239 | nr_ioapics); |
233 | } | 240 | } |
234 | 241 | ||
235 | static int summit_apicid_to_node(int logical_apicid) | ||
236 | { | ||
237 | #ifdef CONFIG_SMP | ||
238 | return apicid_2_node[hard_smp_processor_id()]; | ||
239 | #else | ||
240 | return 0; | ||
241 | #endif | ||
242 | } | ||
243 | |||
244 | /* Mapping from cpu number to logical apicid */ | ||
245 | static inline int summit_cpu_to_logical_apicid(int cpu) | ||
246 | { | ||
247 | #ifdef CONFIG_SMP | ||
248 | if (cpu >= nr_cpu_ids) | ||
249 | return BAD_APICID; | ||
250 | return cpu_2_logical_apicid[cpu]; | ||
251 | #else | ||
252 | return logical_smp_processor_id(); | ||
253 | #endif | ||
254 | } | ||
255 | |||
256 | static int summit_cpu_present_to_apicid(int mps_cpu) | 242 | static int summit_cpu_present_to_apicid(int mps_cpu) |
257 | { | 243 | { |
258 | if (mps_cpu < nr_cpu_ids) | 244 | if (mps_cpu < nr_cpu_ids) |
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
286 | * The cpus in the mask must all be on the apic cluster. | 272 | * The cpus in the mask must all be on the apic cluster. |
287 | */ | 273 | */ |
288 | for_each_cpu(cpu, cpumask) { | 274 | for_each_cpu(cpu, cpumask) { |
289 | int new_apicid = summit_cpu_to_logical_apicid(cpu); | 275 | int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); |
290 | 276 | ||
291 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { | 277 | if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { |
292 | printk("%s: Not a valid mask!\n", __func__); | 278 | printk("%s: Not a valid mask!\n", __func__); |
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
301 | static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, | 287 | static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, |
302 | const struct cpumask *andmask) | 288 | const struct cpumask *andmask) |
303 | { | 289 | { |
304 | int apicid = summit_cpu_to_logical_apicid(0); | 290 | int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); |
305 | cpumask_var_t cpumask; | 291 | cpumask_var_t cpumask; |
306 | 292 | ||
307 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | 293 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) |
@@ -528,8 +514,6 @@ struct apic apic_summit = { | |||
528 | .ioapic_phys_id_map = summit_ioapic_phys_id_map, | 514 | .ioapic_phys_id_map = summit_ioapic_phys_id_map, |
529 | .setup_apic_routing = summit_setup_apic_routing, | 515 | .setup_apic_routing = summit_setup_apic_routing, |
530 | .multi_timer_check = NULL, | 516 | .multi_timer_check = NULL, |
531 | .apicid_to_node = summit_apicid_to_node, | ||
532 | .cpu_to_logical_apicid = summit_cpu_to_logical_apicid, | ||
533 | .cpu_present_to_apicid = summit_cpu_present_to_apicid, | 517 | .cpu_present_to_apicid = summit_cpu_present_to_apicid, |
534 | .apicid_to_cpu_present = summit_apicid_to_cpu_present, | 518 | .apicid_to_cpu_present = summit_apicid_to_cpu_present, |
535 | .setup_portio_remap = NULL, | 519 | .setup_portio_remap = NULL, |
@@ -565,4 +549,7 @@ struct apic apic_summit = { | |||
565 | .icr_write = native_apic_icr_write, | 549 | .icr_write = native_apic_icr_write, |
566 | .wait_icr_idle = native_apic_wait_icr_idle, | 550 | .wait_icr_idle = native_apic_wait_icr_idle, |
567 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, | 551 | .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, |
552 | |||
553 | .x86_32_early_logical_apicid = summit_early_logical_apicid, | ||
554 | .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, | ||
568 | }; | 555 | }; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index cf69c59f4910..90949bbd566d 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = { | |||
206 | .ioapic_phys_id_map = NULL, | 206 | .ioapic_phys_id_map = NULL, |
207 | .setup_apic_routing = NULL, | 207 | .setup_apic_routing = NULL, |
208 | .multi_timer_check = NULL, | 208 | .multi_timer_check = NULL, |
209 | .apicid_to_node = NULL, | ||
210 | .cpu_to_logical_apicid = NULL, | ||
211 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 209 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
212 | .apicid_to_cpu_present = NULL, | 210 | .apicid_to_cpu_present = NULL, |
213 | .setup_portio_remap = NULL, | 211 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8972f38c5ced..c7e6d6645bf4 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = { | |||
195 | .ioapic_phys_id_map = NULL, | 195 | .ioapic_phys_id_map = NULL, |
196 | .setup_apic_routing = NULL, | 196 | .setup_apic_routing = NULL, |
197 | .multi_timer_check = NULL, | 197 | .multi_timer_check = NULL, |
198 | .apicid_to_node = NULL, | ||
199 | .cpu_to_logical_apicid = NULL, | ||
200 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 198 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
201 | .apicid_to_cpu_present = NULL, | 199 | .apicid_to_cpu_present = NULL, |
202 | .setup_portio_remap = NULL, | 200 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index bd16b58b8850..3c289281394c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = { | |||
338 | .ioapic_phys_id_map = NULL, | 338 | .ioapic_phys_id_map = NULL, |
339 | .setup_apic_routing = NULL, | 339 | .setup_apic_routing = NULL, |
340 | .multi_timer_check = NULL, | 340 | .multi_timer_check = NULL, |
341 | .apicid_to_node = NULL, | ||
342 | .cpu_to_logical_apicid = NULL, | ||
343 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 341 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
344 | .apicid_to_cpu_present = NULL, | 342 | .apicid_to_cpu_present = NULL, |
345 | .setup_portio_remap = NULL, | 343 | .setup_portio_remap = NULL, |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 0e4f24c2a746..9079926a5b18 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -227,6 +227,7 @@ | |||
227 | #include <linux/suspend.h> | 227 | #include <linux/suspend.h> |
228 | #include <linux/kthread.h> | 228 | #include <linux/kthread.h> |
229 | #include <linux/jiffies.h> | 229 | #include <linux/jiffies.h> |
230 | #include <linux/acpi.h> | ||
230 | 231 | ||
231 | #include <asm/system.h> | 232 | #include <asm/system.h> |
232 | #include <asm/uaccess.h> | 233 | #include <asm/uaccess.h> |
@@ -975,20 +976,10 @@ recalc: | |||
975 | 976 | ||
976 | static void apm_power_off(void) | 977 | static void apm_power_off(void) |
977 | { | 978 | { |
978 | unsigned char po_bios_call[] = { | ||
979 | 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ | ||
980 | 0x8e, 0xd0, /* movw ax,ss */ | ||
981 | 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ | ||
982 | 0xb8, 0x07, 0x53, /* movw $0x5307,ax */ | ||
983 | 0xbb, 0x01, 0x00, /* movw $0x0001,bx */ | ||
984 | 0xb9, 0x03, 0x00, /* movw $0x0003,cx */ | ||
985 | 0xcd, 0x15 /* int $0x15 */ | ||
986 | }; | ||
987 | |||
988 | /* Some bioses don't like being called from CPU != 0 */ | 979 | /* Some bioses don't like being called from CPU != 0 */ |
989 | if (apm_info.realmode_power_off) { | 980 | if (apm_info.realmode_power_off) { |
990 | set_cpus_allowed_ptr(current, cpumask_of(0)); | 981 | set_cpus_allowed_ptr(current, cpumask_of(0)); |
991 | machine_real_restart(po_bios_call, sizeof(po_bios_call)); | 982 | machine_real_restart(MRR_APM); |
992 | } else { | 983 | } else { |
993 | (void)set_system_power_state(APM_STATE_OFF); | 984 | (void)set_system_power_state(APM_STATE_OFF); |
994 | } | 985 | } |
@@ -2331,12 +2322,11 @@ static int __init apm_init(void) | |||
2331 | apm_info.disabled = 1; | 2322 | apm_info.disabled = 1; |
2332 | return -ENODEV; | 2323 | return -ENODEV; |
2333 | } | 2324 | } |
2334 | if (pm_flags & PM_ACPI) { | 2325 | if (!acpi_disabled) { |
2335 | printk(KERN_NOTICE "apm: overridden by ACPI.\n"); | 2326 | printk(KERN_NOTICE "apm: overridden by ACPI.\n"); |
2336 | apm_info.disabled = 1; | 2327 | apm_info.disabled = 1; |
2337 | return -ENODEV; | 2328 | return -ENODEV; |
2338 | } | 2329 | } |
2339 | pm_flags |= PM_APM; | ||
2340 | 2330 | ||
2341 | /* | 2331 | /* |
2342 | * Set up the long jump entry point to the APM BIOS, which is called | 2332 | * Set up the long jump entry point to the APM BIOS, which is called |
@@ -2428,7 +2418,6 @@ static void __exit apm_exit(void) | |||
2428 | kthread_stop(kapmd_task); | 2418 | kthread_stop(kapmd_task); |
2429 | kapmd_task = NULL; | 2419 | kapmd_task = NULL; |
2430 | } | 2420 | } |
2431 | pm_flags &= ~PM_APM; | ||
2432 | } | 2421 | } |
2433 | 2422 | ||
2434 | module_init(apm_init); | 2423 | module_init(apm_init); |
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index cfa82c899f47..4f13fafc5264 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c | |||
@@ -1,5 +1,70 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed to extract | ||
4 | * and format the required data. | ||
5 | */ | ||
6 | #define COMPILE_OFFSETS | ||
7 | |||
8 | #include <linux/crypto.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/stddef.h> | ||
11 | #include <linux/hardirq.h> | ||
12 | #include <linux/suspend.h> | ||
13 | #include <linux/kbuild.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/thread_info.h> | ||
16 | #include <asm/sigframe.h> | ||
17 | #include <asm/bootparam.h> | ||
18 | #include <asm/suspend.h> | ||
19 | |||
20 | #ifdef CONFIG_XEN | ||
21 | #include <xen/interface/xen.h> | ||
22 | #endif | ||
23 | |||
1 | #ifdef CONFIG_X86_32 | 24 | #ifdef CONFIG_X86_32 |
2 | # include "asm-offsets_32.c" | 25 | # include "asm-offsets_32.c" |
3 | #else | 26 | #else |
4 | # include "asm-offsets_64.c" | 27 | # include "asm-offsets_64.c" |
5 | #endif | 28 | #endif |
29 | |||
30 | void common(void) { | ||
31 | BLANK(); | ||
32 | OFFSET(TI_flags, thread_info, flags); | ||
33 | OFFSET(TI_status, thread_info, status); | ||
34 | OFFSET(TI_addr_limit, thread_info, addr_limit); | ||
35 | OFFSET(TI_preempt_count, thread_info, preempt_count); | ||
36 | |||
37 | BLANK(); | ||
38 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); | ||
39 | |||
40 | BLANK(); | ||
41 | OFFSET(pbe_address, pbe, address); | ||
42 | OFFSET(pbe_orig_address, pbe, orig_address); | ||
43 | OFFSET(pbe_next, pbe, next); | ||
44 | |||
45 | #ifdef CONFIG_PARAVIRT | ||
46 | BLANK(); | ||
47 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); | ||
48 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); | ||
49 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); | ||
50 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | ||
51 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | ||
52 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | ||
53 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
54 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); | ||
55 | OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); | ||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_XEN | ||
59 | BLANK(); | ||
60 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
61 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
62 | #endif | ||
63 | |||
64 | BLANK(); | ||
65 | OFFSET(BP_scratch, boot_params, scratch); | ||
66 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
67 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
68 | OFFSET(BP_version, boot_params, hdr.version); | ||
69 | OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); | ||
70 | } | ||
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 1a4088dda37a..c29d631af6fc 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -1,26 +1,4 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed | ||
4 | * to extract and format the required data. | ||
5 | */ | ||
6 | |||
7 | #include <linux/crypto.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/signal.h> | ||
10 | #include <linux/personality.h> | ||
11 | #include <linux/suspend.h> | ||
12 | #include <linux/kbuild.h> | ||
13 | #include <asm/ucontext.h> | 1 | #include <asm/ucontext.h> |
14 | #include <asm/sigframe.h> | ||
15 | #include <asm/pgtable.h> | ||
16 | #include <asm/fixmap.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | #include <asm/bootparam.h> | ||
20 | #include <asm/elf.h> | ||
21 | #include <asm/suspend.h> | ||
22 | |||
23 | #include <xen/interface/xen.h> | ||
24 | 2 | ||
25 | #include <linux/lguest.h> | 3 | #include <linux/lguest.h> |
26 | #include "../../../drivers/lguest/lg.h" | 4 | #include "../../../drivers/lguest/lg.h" |
@@ -51,21 +29,10 @@ void foo(void) | |||
51 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); | 29 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); |
52 | BLANK(); | 30 | BLANK(); |
53 | 31 | ||
54 | OFFSET(TI_task, thread_info, task); | ||
55 | OFFSET(TI_exec_domain, thread_info, exec_domain); | ||
56 | OFFSET(TI_flags, thread_info, flags); | ||
57 | OFFSET(TI_status, thread_info, status); | ||
58 | OFFSET(TI_preempt_count, thread_info, preempt_count); | ||
59 | OFFSET(TI_addr_limit, thread_info, addr_limit); | ||
60 | OFFSET(TI_restart_block, thread_info, restart_block); | ||
61 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); | 32 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); |
62 | OFFSET(TI_cpu, thread_info, cpu); | 33 | OFFSET(TI_cpu, thread_info, cpu); |
63 | BLANK(); | 34 | BLANK(); |
64 | 35 | ||
65 | OFFSET(GDS_size, desc_ptr, size); | ||
66 | OFFSET(GDS_address, desc_ptr, address); | ||
67 | BLANK(); | ||
68 | |||
69 | OFFSET(PT_EBX, pt_regs, bx); | 36 | OFFSET(PT_EBX, pt_regs, bx); |
70 | OFFSET(PT_ECX, pt_regs, cx); | 37 | OFFSET(PT_ECX, pt_regs, cx); |
71 | OFFSET(PT_EDX, pt_regs, dx); | 38 | OFFSET(PT_EDX, pt_regs, dx); |
@@ -85,42 +52,13 @@ void foo(void) | |||
85 | OFFSET(PT_OLDSS, pt_regs, ss); | 52 | OFFSET(PT_OLDSS, pt_regs, ss); |
86 | BLANK(); | 53 | BLANK(); |
87 | 54 | ||
88 | OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); | ||
89 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext); | 55 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext); |
90 | BLANK(); | 56 | BLANK(); |
91 | 57 | ||
92 | OFFSET(pbe_address, pbe, address); | ||
93 | OFFSET(pbe_orig_address, pbe, orig_address); | ||
94 | OFFSET(pbe_next, pbe, next); | ||
95 | |||
96 | /* Offset from the sysenter stack to tss.sp0 */ | 58 | /* Offset from the sysenter stack to tss.sp0 */ |
97 | DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - | 59 | DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - |
98 | sizeof(struct tss_struct)); | 60 | sizeof(struct tss_struct)); |
99 | 61 | ||
100 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | ||
101 | DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); | ||
102 | DEFINE(THREAD_SIZE_asm, THREAD_SIZE); | ||
103 | |||
104 | OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); | ||
105 | |||
106 | #ifdef CONFIG_PARAVIRT | ||
107 | BLANK(); | ||
108 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); | ||
109 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); | ||
110 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); | ||
111 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | ||
112 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | ||
113 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | ||
114 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
115 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); | ||
116 | #endif | ||
117 | |||
118 | #ifdef CONFIG_XEN | ||
119 | BLANK(); | ||
120 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
121 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
122 | #endif | ||
123 | |||
124 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) | 62 | #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) |
125 | BLANK(); | 63 | BLANK(); |
126 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); | 64 | OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); |
@@ -139,11 +77,4 @@ void foo(void) | |||
139 | OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); | 77 | OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode); |
140 | OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); | 78 | OFFSET(LGUEST_PAGES_regs, lguest_pages, regs); |
141 | #endif | 79 | #endif |
142 | |||
143 | BLANK(); | ||
144 | OFFSET(BP_scratch, boot_params, scratch); | ||
145 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
146 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
147 | OFFSET(BP_version, boot_params, hdr.version); | ||
148 | OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); | ||
149 | } | 80 | } |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 4a6aeedcd965..e72a1194af22 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -1,27 +1,4 @@ | |||
1 | /* | ||
2 | * Generate definitions needed by assembly language modules. | ||
3 | * This code generates raw asm output which is post-processed to extract | ||
4 | * and format the required data. | ||
5 | */ | ||
6 | #define COMPILE_OFFSETS | ||
7 | |||
8 | #include <linux/crypto.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/stddef.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/hardirq.h> | ||
13 | #include <linux/suspend.h> | ||
14 | #include <linux/kbuild.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/segment.h> | ||
17 | #include <asm/thread_info.h> | ||
18 | #include <asm/ia32.h> | 1 | #include <asm/ia32.h> |
19 | #include <asm/bootparam.h> | ||
20 | #include <asm/suspend.h> | ||
21 | |||
22 | #include <xen/interface/xen.h> | ||
23 | |||
24 | #include <asm/sigframe.h> | ||
25 | 2 | ||
26 | #define __NO_STUBS 1 | 3 | #define __NO_STUBS 1 |
27 | #undef __SYSCALL | 4 | #undef __SYSCALL |
@@ -33,41 +10,19 @@ static char syscalls[] = { | |||
33 | 10 | ||
34 | int main(void) | 11 | int main(void) |
35 | { | 12 | { |
36 | #define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry)) | ||
37 | ENTRY(state); | ||
38 | ENTRY(flags); | ||
39 | ENTRY(pid); | ||
40 | BLANK(); | ||
41 | #undef ENTRY | ||
42 | #define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry)) | ||
43 | ENTRY(flags); | ||
44 | ENTRY(addr_limit); | ||
45 | ENTRY(preempt_count); | ||
46 | ENTRY(status); | ||
47 | #ifdef CONFIG_IA32_EMULATION | ||
48 | ENTRY(sysenter_return); | ||
49 | #endif | ||
50 | BLANK(); | ||
51 | #undef ENTRY | ||
52 | #ifdef CONFIG_PARAVIRT | 13 | #ifdef CONFIG_PARAVIRT |
53 | BLANK(); | ||
54 | OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled); | ||
55 | OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops); | ||
56 | OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops); | ||
57 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | ||
58 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | ||
59 | OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); | 14 | OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); |
60 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | ||
61 | OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); | 15 | OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); |
62 | OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); | 16 | OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); |
63 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | ||
64 | OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); | 17 | OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); |
65 | OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); | 18 | BLANK(); |
66 | #endif | 19 | #endif |
67 | 20 | ||
68 | |||
69 | #ifdef CONFIG_IA32_EMULATION | 21 | #ifdef CONFIG_IA32_EMULATION |
70 | #define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry)) | 22 | OFFSET(TI_sysenter_return, thread_info, sysenter_return); |
23 | BLANK(); | ||
24 | |||
25 | #define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry) | ||
71 | ENTRY(ax); | 26 | ENTRY(ax); |
72 | ENTRY(bx); | 27 | ENTRY(bx); |
73 | ENTRY(cx); | 28 | ENTRY(cx); |
@@ -79,15 +34,12 @@ int main(void) | |||
79 | ENTRY(ip); | 34 | ENTRY(ip); |
80 | BLANK(); | 35 | BLANK(); |
81 | #undef ENTRY | 36 | #undef ENTRY |
82 | DEFINE(IA32_RT_SIGFRAME_sigcontext, | 37 | |
83 | offsetof (struct rt_sigframe_ia32, uc.uc_mcontext)); | 38 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); |
84 | BLANK(); | 39 | BLANK(); |
85 | #endif | 40 | #endif |
86 | DEFINE(pbe_address, offsetof(struct pbe, address)); | 41 | |
87 | DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); | 42 | #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry) |
88 | DEFINE(pbe_next, offsetof(struct pbe, next)); | ||
89 | BLANK(); | ||
90 | #define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry)) | ||
91 | ENTRY(bx); | 43 | ENTRY(bx); |
92 | ENTRY(bx); | 44 | ENTRY(bx); |
93 | ENTRY(cx); | 45 | ENTRY(cx); |
@@ -107,7 +59,8 @@ int main(void) | |||
107 | ENTRY(flags); | 59 | ENTRY(flags); |
108 | BLANK(); | 60 | BLANK(); |
109 | #undef ENTRY | 61 | #undef ENTRY |
110 | #define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry)) | 62 | |
63 | #define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry) | ||
111 | ENTRY(cr0); | 64 | ENTRY(cr0); |
112 | ENTRY(cr2); | 65 | ENTRY(cr2); |
113 | ENTRY(cr3); | 66 | ENTRY(cr3); |
@@ -115,26 +68,11 @@ int main(void) | |||
115 | ENTRY(cr8); | 68 | ENTRY(cr8); |
116 | BLANK(); | 69 | BLANK(); |
117 | #undef ENTRY | 70 | #undef ENTRY |
118 | DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); | ||
119 | BLANK(); | ||
120 | DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); | ||
121 | BLANK(); | ||
122 | DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); | ||
123 | 71 | ||
72 | OFFSET(TSS_ist, tss_struct, x86_tss.ist); | ||
124 | BLANK(); | 73 | BLANK(); |
125 | OFFSET(BP_scratch, boot_params, scratch); | ||
126 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
127 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
128 | OFFSET(BP_version, boot_params, hdr.version); | ||
129 | OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); | ||
130 | 74 | ||
131 | BLANK(); | 75 | DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); |
132 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | 76 | |
133 | #ifdef CONFIG_XEN | ||
134 | BLANK(); | ||
135 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
136 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
137 | #undef ENTRY | ||
138 | #endif | ||
139 | return 0; | 77 | return 0; |
140 | } | 78 | } |
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 13a389179514..452932d34730 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) | |||
106 | addr += size; | 106 | addr += size; |
107 | } | 107 | } |
108 | 108 | ||
109 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", | 109 | if (num_scan_areas) |
110 | num_scan_areas); | 110 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); |
111 | } | 111 | } |
112 | 112 | ||
113 | 113 | ||
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) | |||
143 | { | 143 | { |
144 | check_for_bios_corruption(); | 144 | check_for_bios_corruption(); |
145 | schedule_delayed_work(&bios_check_work, | 145 | schedule_delayed_work(&bios_check_work, |
146 | round_jiffies_relative(corruption_check_period*HZ)); | 146 | round_jiffies_relative(corruption_check_period*HZ)); |
147 | } | 147 | } |
148 | 148 | ||
149 | static int start_periodic_check_for_corruption(void) | 149 | static int start_periodic_check_for_corruption(void) |
150 | { | 150 | { |
151 | if (!memory_corruption_check || corruption_check_period == 0) | 151 | if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) |
152 | return 0; | 152 | return 0; |
153 | 153 | ||
154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", | 154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb83c5a..3ecece0217ef 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
233 | } | 233 | } |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 236 | #ifdef CONFIG_NUMA |
237 | /* | ||
238 | * To workaround broken NUMA config. Read the comment in | ||
239 | * srat_detect_node(). | ||
240 | */ | ||
237 | static int __cpuinit nearby_node(int apicid) | 241 | static int __cpuinit nearby_node(int apicid) |
238 | { | 242 | { |
239 | int i, node; | 243 | int i, node; |
240 | 244 | ||
241 | for (i = apicid - 1; i >= 0; i--) { | 245 | for (i = apicid - 1; i >= 0; i--) { |
242 | node = apicid_to_node[i]; | 246 | node = __apicid_to_node[i]; |
243 | if (node != NUMA_NO_NODE && node_online(node)) | 247 | if (node != NUMA_NO_NODE && node_online(node)) |
244 | return node; | 248 | return node; |
245 | } | 249 | } |
246 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | 250 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { |
247 | node = apicid_to_node[i]; | 251 | node = __apicid_to_node[i]; |
248 | if (node != NUMA_NO_NODE && node_online(node)) | 252 | if (node != NUMA_NO_NODE && node_online(node)) |
249 | return node; | 253 | return node; |
250 | } | 254 | } |
@@ -261,7 +265,7 @@ static int __cpuinit nearby_node(int apicid) | |||
261 | #ifdef CONFIG_X86_HT | 265 | #ifdef CONFIG_X86_HT |
262 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | 266 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) |
263 | { | 267 | { |
264 | u32 nodes; | 268 | u32 nodes, cores_per_cu = 1; |
265 | u8 node_id; | 269 | u8 node_id; |
266 | int cpu = smp_processor_id(); | 270 | int cpu = smp_processor_id(); |
267 | 271 | ||
@@ -276,6 +280,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
276 | /* get compute unit information */ | 280 | /* get compute unit information */ |
277 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | 281 | smp_num_siblings = ((ebx >> 8) & 3) + 1; |
278 | c->compute_unit_id = ebx & 0xff; | 282 | c->compute_unit_id = ebx & 0xff; |
283 | cores_per_cu += ((ebx >> 8) & 3); | ||
279 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | 284 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { |
280 | u64 value; | 285 | u64 value; |
281 | 286 | ||
@@ -288,15 +293,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
288 | /* fixup multi-node processor information */ | 293 | /* fixup multi-node processor information */ |
289 | if (nodes > 1) { | 294 | if (nodes > 1) { |
290 | u32 cores_per_node; | 295 | u32 cores_per_node; |
296 | u32 cus_per_node; | ||
291 | 297 | ||
292 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | 298 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); |
293 | cores_per_node = c->x86_max_cores / nodes; | 299 | cores_per_node = c->x86_max_cores / nodes; |
300 | cus_per_node = cores_per_node / cores_per_cu; | ||
294 | 301 | ||
295 | /* store NodeID, use llc_shared_map to store sibling info */ | 302 | /* store NodeID, use llc_shared_map to store sibling info */ |
296 | per_cpu(cpu_llc_id, cpu) = node_id; | 303 | per_cpu(cpu_llc_id, cpu) = node_id; |
297 | 304 | ||
298 | /* core id to be in range from 0 to (cores_per_node - 1) */ | 305 | /* core id has to be in the [0 .. cores_per_node - 1] range */ |
299 | c->cpu_core_id = c->cpu_core_id % cores_per_node; | 306 | c->cpu_core_id %= cores_per_node; |
307 | c->compute_unit_id %= cus_per_node; | ||
300 | } | 308 | } |
301 | } | 309 | } |
302 | #endif | 310 | #endif |
@@ -334,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id); | |||
334 | 342 | ||
335 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 343 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
336 | { | 344 | { |
337 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 345 | #ifdef CONFIG_NUMA |
338 | int cpu = smp_processor_id(); | 346 | int cpu = smp_processor_id(); |
339 | int node; | 347 | int node; |
340 | unsigned apicid = c->apicid; | 348 | unsigned apicid = c->apicid; |
341 | 349 | ||
342 | node = per_cpu(cpu_llc_id, cpu); | 350 | node = numa_cpu_node(cpu); |
351 | if (node == NUMA_NO_NODE) | ||
352 | node = per_cpu(cpu_llc_id, cpu); | ||
343 | 353 | ||
344 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
345 | node = apicid_to_node[apicid]; | ||
346 | if (!node_online(node)) { | 354 | if (!node_online(node)) { |
347 | /* Two possibilities here: | 355 | /* |
348 | - The CPU is missing memory and no node was created. | 356 | * Two possibilities here: |
349 | In that case try picking one from a nearby CPU | 357 | * |
350 | - The APIC IDs differ from the HyperTransport node IDs | 358 | * - The CPU is missing memory and no node was created. In |
351 | which the K8 northbridge parsing fills in. | 359 | * that case try picking one from a nearby CPU. |
352 | Assume they are all increased by a constant offset, | 360 | * |
353 | but in the same order as the HT nodeids. | 361 | * - The APIC IDs differ from the HyperTransport node IDs |
354 | If that doesn't result in a usable node fall back to the | 362 | * which the K8 northbridge parsing fills in. Assume |
355 | path for the previous case. */ | 363 | * they are all increased by a constant offset, but in |
356 | 364 | * the same order as the HT nodeids. If that doesn't | |
365 | * result in a usable node fall back to the path for the | ||
366 | * previous case. | ||
367 | * | ||
368 | * This workaround operates directly on the mapping between | ||
369 | * APIC ID and NUMA node, assuming certain relationship | ||
370 | * between APIC ID, HT node ID and NUMA topology. As going | ||
371 | * through CPU mapping may alter the outcome, directly | ||
372 | * access __apicid_to_node[]. | ||
373 | */ | ||
357 | int ht_nodeid = c->initial_apicid; | 374 | int ht_nodeid = c->initial_apicid; |
358 | 375 | ||
359 | if (ht_nodeid >= 0 && | 376 | if (ht_nodeid >= 0 && |
360 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | 377 | __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) |
361 | node = apicid_to_node[ht_nodeid]; | 378 | node = __apicid_to_node[ht_nodeid]; |
362 | /* Pick a nearby node */ | 379 | /* Pick a nearby node */ |
363 | if (!node_online(node)) | 380 | if (!node_online(node)) |
364 | node = nearby_node(apicid); | 381 | node = nearby_node(apicid); |
@@ -594,6 +611,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
594 | } | 611 | } |
595 | } | 612 | } |
596 | #endif | 613 | #endif |
614 | |||
615 | /* As a rule processors have APIC timer running in deep C states */ | ||
616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | ||
617 | set_cpu_cap(c, X86_FEATURE_ARAT); | ||
597 | } | 618 | } |
598 | 619 | ||
599 | #ifdef CONFIG_X86_32 | 620 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834396bd..e2ced0074a45 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -675,7 +675,7 @@ void __init early_cpu_init(void) | |||
675 | const struct cpu_dev *const *cdev; | 675 | const struct cpu_dev *const *cdev; |
676 | int count = 0; | 676 | int count = 0; |
677 | 677 | ||
678 | #ifdef PROCESSOR_SELECT | 678 | #ifdef CONFIG_PROCESSOR_SELECT |
679 | printk(KERN_INFO "KERNEL supported cpus:\n"); | 679 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
680 | #endif | 680 | #endif |
681 | 681 | ||
@@ -687,7 +687,7 @@ void __init early_cpu_init(void) | |||
687 | cpu_devs[count] = cpudev; | 687 | cpu_devs[count] = cpudev; |
688 | count++; | 688 | count++; |
689 | 689 | ||
690 | #ifdef PROCESSOR_SELECT | 690 | #ifdef CONFIG_PROCESSOR_SELECT |
691 | { | 691 | { |
692 | unsigned int j; | 692 | unsigned int j; |
693 | 693 | ||
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
869 | 869 | ||
870 | select_idle_routine(c); | 870 | select_idle_routine(c); |
871 | 871 | ||
872 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 872 | #ifdef CONFIG_NUMA |
873 | numa_add_cpu(smp_processor_id()); | 873 | numa_add_cpu(smp_processor_id()); |
874 | #endif | 874 | #endif |
875 | } | 875 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4f6f679f2799..4a5a42b842ad 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
195 | cmd_incomplete: | 195 | cmd_incomplete: |
196 | iowrite16(0, &pcch_hdr->status); | 196 | iowrite16(0, &pcch_hdr->status); |
197 | spin_unlock(&pcc_lock); | 197 | spin_unlock(&pcc_lock); |
198 | return -EINVAL; | 198 | return 0; |
199 | } | 199 | } |
200 | 200 | ||
201 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, | 201 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d16c2c53d6bf..df86bc8c859d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
276 | 276 | ||
277 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 277 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
278 | { | 278 | { |
279 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 279 | #ifdef CONFIG_NUMA |
280 | unsigned node; | 280 | unsigned node; |
281 | int cpu = smp_processor_id(); | 281 | int cpu = smp_processor_id(); |
282 | int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; | ||
283 | 282 | ||
284 | /* Don't do the funky fallback heuristics the AMD version employs | 283 | /* Don't do the funky fallback heuristics the AMD version employs |
285 | for now. */ | 284 | for now. */ |
286 | node = apicid_to_node[apicid]; | 285 | node = numa_cpu_node(cpu); |
287 | if (node == NUMA_NO_NODE || !node_online(node)) { | 286 | if (node == NUMA_NO_NODE || !node_online(node)) { |
288 | /* reuse the value from init_cpu_to_node() */ | 287 | /* reuse the value from init_cpu_to_node() */ |
289 | node = cpu_to_node(cpu); | 288 | node = cpu_to_node(cpu); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ec2c19a7b8ef..1ce1af2899df 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
304 | 304 | ||
305 | struct _cache_attr { | 305 | struct _cache_attr { |
306 | struct attribute attr; | 306 | struct attribute attr; |
307 | ssize_t (*show)(struct _cpuid4_info *, char *); | 307 | ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); |
308 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | 308 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, |
309 | unsigned int); | ||
309 | }; | 310 | }; |
310 | 311 | ||
311 | #ifdef CONFIG_AMD_NB | 312 | #ifdef CONFIG_AMD_NB |
@@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
400 | 401 | ||
401 | #define SHOW_CACHE_DISABLE(slot) \ | 402 | #define SHOW_CACHE_DISABLE(slot) \ |
402 | static ssize_t \ | 403 | static ssize_t \ |
403 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ | 404 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ |
405 | unsigned int cpu) \ | ||
404 | { \ | 406 | { \ |
405 | return show_cache_disable(this_leaf, buf, slot); \ | 407 | return show_cache_disable(this_leaf, buf, slot); \ |
406 | } | 408 | } |
@@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
512 | #define STORE_CACHE_DISABLE(slot) \ | 514 | #define STORE_CACHE_DISABLE(slot) \ |
513 | static ssize_t \ | 515 | static ssize_t \ |
514 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ | 516 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ |
515 | const char *buf, size_t count) \ | 517 | const char *buf, size_t count, \ |
518 | unsigned int cpu) \ | ||
516 | { \ | 519 | { \ |
517 | return store_cache_disable(this_leaf, buf, count, slot); \ | 520 | return store_cache_disable(this_leaf, buf, count, slot); \ |
518 | } | 521 | } |
@@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
524 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 527 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
525 | show_cache_disable_1, store_cache_disable_1); | 528 | show_cache_disable_1, store_cache_disable_1); |
526 | 529 | ||
530 | static ssize_t | ||
531 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | ||
532 | { | ||
533 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
534 | return -EINVAL; | ||
535 | |||
536 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | ||
537 | } | ||
538 | |||
539 | static ssize_t | ||
540 | store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | ||
541 | unsigned int cpu) | ||
542 | { | ||
543 | unsigned long val; | ||
544 | |||
545 | if (!capable(CAP_SYS_ADMIN)) | ||
546 | return -EPERM; | ||
547 | |||
548 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
549 | return -EINVAL; | ||
550 | |||
551 | if (strict_strtoul(buf, 16, &val) < 0) | ||
552 | return -EINVAL; | ||
553 | |||
554 | if (amd_set_subcaches(cpu, val)) | ||
555 | return -EINVAL; | ||
556 | |||
557 | return count; | ||
558 | } | ||
559 | |||
560 | static struct _cache_attr subcaches = | ||
561 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); | ||
562 | |||
527 | #else /* CONFIG_AMD_NB */ | 563 | #else /* CONFIG_AMD_NB */ |
528 | #define amd_init_l3_cache(x, y) | 564 | #define amd_init_l3_cache(x, y) |
529 | #endif /* CONFIG_AMD_NB */ | 565 | #endif /* CONFIG_AMD_NB */ |
@@ -532,9 +568,9 @@ static int | |||
532 | __cpuinit cpuid4_cache_lookup_regs(int index, | 568 | __cpuinit cpuid4_cache_lookup_regs(int index, |
533 | struct _cpuid4_info_regs *this_leaf) | 569 | struct _cpuid4_info_regs *this_leaf) |
534 | { | 570 | { |
535 | union _cpuid4_leaf_eax eax; | 571 | union _cpuid4_leaf_eax eax; |
536 | union _cpuid4_leaf_ebx ebx; | 572 | union _cpuid4_leaf_ebx ebx; |
537 | union _cpuid4_leaf_ecx ecx; | 573 | union _cpuid4_leaf_ecx ecx; |
538 | unsigned edx; | 574 | unsigned edx; |
539 | 575 | ||
540 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | 576 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
@@ -732,11 +768,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
732 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 768 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
733 | 769 | ||
734 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 770 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
735 | for_each_cpu(i, c->llc_shared_map) { | 771 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
736 | if (!per_cpu(ici_cpuid4_info, i)) | 772 | if (!per_cpu(ici_cpuid4_info, i)) |
737 | continue; | 773 | continue; |
738 | this_leaf = CPUID4_INFO_IDX(i, index); | 774 | this_leaf = CPUID4_INFO_IDX(i, index); |
739 | for_each_cpu(sibling, c->llc_shared_map) { | 775 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { |
740 | if (!cpu_online(sibling)) | 776 | if (!cpu_online(sibling)) |
741 | continue; | 777 | continue; |
742 | set_bit(sibling, this_leaf->shared_cpu_map); | 778 | set_bit(sibling, this_leaf->shared_cpu_map); |
@@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); | |||
870 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) | 906 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
871 | 907 | ||
872 | #define show_one_plus(file_name, object, val) \ | 908 | #define show_one_plus(file_name, object, val) \ |
873 | static ssize_t show_##file_name \ | 909 | static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ |
874 | (struct _cpuid4_info *this_leaf, char *buf) \ | 910 | unsigned int cpu) \ |
875 | { \ | 911 | { \ |
876 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 912 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
877 | } | 913 | } |
@@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); | |||
882 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); | 918 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); |
883 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | 919 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); |
884 | 920 | ||
885 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 921 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, |
922 | unsigned int cpu) | ||
886 | { | 923 | { |
887 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); | 924 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
888 | } | 925 | } |
@@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
906 | return n; | 943 | return n; |
907 | } | 944 | } |
908 | 945 | ||
909 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf) | 946 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, |
947 | unsigned int cpu) | ||
910 | { | 948 | { |
911 | return show_shared_cpu_map_func(leaf, 0, buf); | 949 | return show_shared_cpu_map_func(leaf, 0, buf); |
912 | } | 950 | } |
913 | 951 | ||
914 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf) | 952 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, |
953 | unsigned int cpu) | ||
915 | { | 954 | { |
916 | return show_shared_cpu_map_func(leaf, 1, buf); | 955 | return show_shared_cpu_map_func(leaf, 1, buf); |
917 | } | 956 | } |
918 | 957 | ||
919 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) | 958 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, |
959 | unsigned int cpu) | ||
920 | { | 960 | { |
921 | switch (this_leaf->eax.split.type) { | 961 | switch (this_leaf->eax.split.type) { |
922 | case CACHE_TYPE_DATA: | 962 | case CACHE_TYPE_DATA: |
@@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) | |||
974 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 1014 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
975 | n += 2; | 1015 | n += 2; |
976 | 1016 | ||
1017 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1018 | n += 1; | ||
1019 | |||
977 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); | 1020 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); |
978 | if (attrs == NULL) | 1021 | if (attrs == NULL) |
979 | return attrs = default_attrs; | 1022 | return attrs = default_attrs; |
@@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) | |||
986 | attrs[n++] = &cache_disable_1.attr; | 1029 | attrs[n++] = &cache_disable_1.attr; |
987 | } | 1030 | } |
988 | 1031 | ||
1032 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1033 | attrs[n++] = &subcaches.attr; | ||
1034 | |||
989 | return attrs; | 1035 | return attrs; |
990 | } | 1036 | } |
991 | #endif | 1037 | #endif |
@@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | |||
998 | 1044 | ||
999 | ret = fattr->show ? | 1045 | ret = fattr->show ? |
1000 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 1046 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
1001 | buf) : | 1047 | buf, this_leaf->cpu) : |
1002 | 0; | 1048 | 0; |
1003 | return ret; | 1049 | return ret; |
1004 | } | 1050 | } |
@@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
1012 | 1058 | ||
1013 | ret = fattr->store ? | 1059 | ret = fattr->store ? |
1014 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 1060 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
1015 | buf, count) : | 1061 | buf, count, this_leaf->cpu) : |
1016 | 0; | 1062 | 0; |
1017 | return ret; | 1063 | return ret; |
1018 | } | 1064 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5bf2fac52aca..167f97b5596e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
527 | int i, err = 0; | 527 | int i, err = 0; |
528 | struct threshold_bank *b = NULL; | 528 | struct threshold_bank *b = NULL; |
529 | char name[32]; | 529 | char name[32]; |
530 | #ifdef CONFIG_SMP | ||
531 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
532 | #endif | ||
533 | 530 | ||
534 | sprintf(name, "threshold_bank%i", bank); | 531 | sprintf(name, "threshold_bank%i", bank); |
535 | 532 | ||
536 | #ifdef CONFIG_SMP | 533 | #ifdef CONFIG_SMP |
537 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 534 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
538 | i = cpumask_first(c->llc_shared_map); | 535 | i = cpumask_first(cpu_llc_shared_mask(cpu)); |
539 | 536 | ||
540 | /* first core not up yet */ | 537 | /* first core not up yet */ |
541 | if (cpu_data(i).cpu_core_id) | 538 | if (cpu_data(i).cpu_core_id) |
@@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
555 | if (err) | 552 | if (err) |
556 | goto out; | 553 | goto out; |
557 | 554 | ||
558 | cpumask_copy(b->cpus, c->llc_shared_map); | 555 | cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu)); |
559 | per_cpu(threshold_banks, cpu)[bank] = b; | 556 | per_cpu(threshold_banks, cpu)[bank] = b; |
560 | 557 | ||
561 | goto out; | 558 | goto out; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d977a2ea693..26604188aa49 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
33 | #include <asm/smp.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #undef wrmsrl | 36 | #undef wrmsrl |
@@ -93,6 +94,8 @@ struct amd_nb { | |||
93 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | 94 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
94 | }; | 95 | }; |
95 | 96 | ||
97 | struct intel_percore; | ||
98 | |||
96 | #define MAX_LBR_ENTRIES 16 | 99 | #define MAX_LBR_ENTRIES 16 |
97 | 100 | ||
98 | struct cpu_hw_events { | 101 | struct cpu_hw_events { |
@@ -128,6 +131,13 @@ struct cpu_hw_events { | |||
128 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
129 | 132 | ||
130 | /* | 133 | /* |
134 | * Intel percore register state. | ||
135 | * Coordinate shared resources between HT threads. | ||
136 | */ | ||
137 | int percore_used; /* Used by this CPU? */ | ||
138 | struct intel_percore *per_core; | ||
139 | |||
140 | /* | ||
131 | * AMD specific bits | 141 | * AMD specific bits |
132 | */ | 142 | */ |
133 | struct amd_nb *amd_nb; | 143 | struct amd_nb *amd_nb; |
@@ -166,8 +176,10 @@ struct cpu_hw_events { | |||
166 | /* | 176 | /* |
167 | * Constraint on the Event code + UMask | 177 | * Constraint on the Event code + UMask |
168 | */ | 178 | */ |
169 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | 179 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
170 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 180 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
181 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | ||
182 | INTEL_UEVENT_CONSTRAINT(c, n) | ||
171 | 183 | ||
172 | #define EVENT_CONSTRAINT_END \ | 184 | #define EVENT_CONSTRAINT_END \ |
173 | EVENT_CONSTRAINT(0, 0, 0) | 185 | EVENT_CONSTRAINT(0, 0, 0) |
@@ -175,6 +187,28 @@ struct cpu_hw_events { | |||
175 | #define for_each_event_constraint(e, c) \ | 187 | #define for_each_event_constraint(e, c) \ |
176 | for ((e) = (c); (e)->weight; (e)++) | 188 | for ((e) = (c); (e)->weight; (e)++) |
177 | 189 | ||
190 | /* | ||
191 | * Extra registers for specific events. | ||
192 | * Some events need large masks and require external MSRs. | ||
193 | * Define a mapping to these extra registers. | ||
194 | */ | ||
195 | struct extra_reg { | ||
196 | unsigned int event; | ||
197 | unsigned int msr; | ||
198 | u64 config_mask; | ||
199 | u64 valid_mask; | ||
200 | }; | ||
201 | |||
202 | #define EVENT_EXTRA_REG(e, ms, m, vm) { \ | ||
203 | .event = (e), \ | ||
204 | .msr = (ms), \ | ||
205 | .config_mask = (m), \ | ||
206 | .valid_mask = (vm), \ | ||
207 | } | ||
208 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ | ||
209 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) | ||
210 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) | ||
211 | |||
178 | union perf_capabilities { | 212 | union perf_capabilities { |
179 | struct { | 213 | struct { |
180 | u64 lbr_format : 6; | 214 | u64 lbr_format : 6; |
@@ -219,6 +253,7 @@ struct x86_pmu { | |||
219 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 253 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
220 | struct perf_event *event); | 254 | struct perf_event *event); |
221 | struct event_constraint *event_constraints; | 255 | struct event_constraint *event_constraints; |
256 | struct event_constraint *percore_constraints; | ||
222 | void (*quirks)(void); | 257 | void (*quirks)(void); |
223 | int perfctr_second_write; | 258 | int perfctr_second_write; |
224 | 259 | ||
@@ -247,6 +282,11 @@ struct x86_pmu { | |||
247 | */ | 282 | */ |
248 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | 283 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
249 | int lbr_nr; /* hardware stack size */ | 284 | int lbr_nr; /* hardware stack size */ |
285 | |||
286 | /* | ||
287 | * Extra registers for events | ||
288 | */ | ||
289 | struct extra_reg *extra_regs; | ||
250 | }; | 290 | }; |
251 | 291 | ||
252 | static struct x86_pmu x86_pmu __read_mostly; | 292 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -271,6 +311,10 @@ static u64 __read_mostly hw_cache_event_ids | |||
271 | [PERF_COUNT_HW_CACHE_MAX] | 311 | [PERF_COUNT_HW_CACHE_MAX] |
272 | [PERF_COUNT_HW_CACHE_OP_MAX] | 312 | [PERF_COUNT_HW_CACHE_OP_MAX] |
273 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 313 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
314 | static u64 __read_mostly hw_cache_extra_regs | ||
315 | [PERF_COUNT_HW_CACHE_MAX] | ||
316 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
317 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
274 | 318 | ||
275 | /* | 319 | /* |
276 | * Propagate event elapsed time into the generic event. | 320 | * Propagate event elapsed time into the generic event. |
@@ -298,7 +342,7 @@ x86_perf_event_update(struct perf_event *event) | |||
298 | */ | 342 | */ |
299 | again: | 343 | again: |
300 | prev_raw_count = local64_read(&hwc->prev_count); | 344 | prev_raw_count = local64_read(&hwc->prev_count); |
301 | rdmsrl(hwc->event_base + idx, new_raw_count); | 345 | rdmsrl(hwc->event_base, new_raw_count); |
302 | 346 | ||
303 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 347 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
304 | new_raw_count) != prev_raw_count) | 348 | new_raw_count) != prev_raw_count) |
@@ -321,6 +365,49 @@ again: | |||
321 | return new_raw_count; | 365 | return new_raw_count; |
322 | } | 366 | } |
323 | 367 | ||
368 | /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ | ||
369 | static inline int x86_pmu_addr_offset(int index) | ||
370 | { | ||
371 | if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) | ||
372 | return index << 1; | ||
373 | return index; | ||
374 | } | ||
375 | |||
376 | static inline unsigned int x86_pmu_config_addr(int index) | ||
377 | { | ||
378 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
379 | } | ||
380 | |||
381 | static inline unsigned int x86_pmu_event_addr(int index) | ||
382 | { | ||
383 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * Find and validate any extra registers to set up. | ||
388 | */ | ||
389 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | ||
390 | { | ||
391 | struct extra_reg *er; | ||
392 | |||
393 | event->hw.extra_reg = 0; | ||
394 | event->hw.extra_config = 0; | ||
395 | |||
396 | if (!x86_pmu.extra_regs) | ||
397 | return 0; | ||
398 | |||
399 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
400 | if (er->event != (config & er->config_mask)) | ||
401 | continue; | ||
402 | if (event->attr.config1 & ~er->valid_mask) | ||
403 | return -EINVAL; | ||
404 | event->hw.extra_reg = er->msr; | ||
405 | event->hw.extra_config = event->attr.config1; | ||
406 | break; | ||
407 | } | ||
408 | return 0; | ||
409 | } | ||
410 | |||
324 | static atomic_t active_events; | 411 | static atomic_t active_events; |
325 | static DEFINE_MUTEX(pmc_reserve_mutex); | 412 | static DEFINE_MUTEX(pmc_reserve_mutex); |
326 | 413 | ||
@@ -331,12 +418,12 @@ static bool reserve_pmc_hardware(void) | |||
331 | int i; | 418 | int i; |
332 | 419 | ||
333 | for (i = 0; i < x86_pmu.num_counters; i++) { | 420 | for (i = 0; i < x86_pmu.num_counters; i++) { |
334 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 421 | if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) |
335 | goto perfctr_fail; | 422 | goto perfctr_fail; |
336 | } | 423 | } |
337 | 424 | ||
338 | for (i = 0; i < x86_pmu.num_counters; i++) { | 425 | for (i = 0; i < x86_pmu.num_counters; i++) { |
339 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 426 | if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) |
340 | goto eventsel_fail; | 427 | goto eventsel_fail; |
341 | } | 428 | } |
342 | 429 | ||
@@ -344,13 +431,13 @@ static bool reserve_pmc_hardware(void) | |||
344 | 431 | ||
345 | eventsel_fail: | 432 | eventsel_fail: |
346 | for (i--; i >= 0; i--) | 433 | for (i--; i >= 0; i--) |
347 | release_evntsel_nmi(x86_pmu.eventsel + i); | 434 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
348 | 435 | ||
349 | i = x86_pmu.num_counters; | 436 | i = x86_pmu.num_counters; |
350 | 437 | ||
351 | perfctr_fail: | 438 | perfctr_fail: |
352 | for (i--; i >= 0; i--) | 439 | for (i--; i >= 0; i--) |
353 | release_perfctr_nmi(x86_pmu.perfctr + i); | 440 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
354 | 441 | ||
355 | return false; | 442 | return false; |
356 | } | 443 | } |
@@ -360,8 +447,8 @@ static void release_pmc_hardware(void) | |||
360 | int i; | 447 | int i; |
361 | 448 | ||
362 | for (i = 0; i < x86_pmu.num_counters; i++) { | 449 | for (i = 0; i < x86_pmu.num_counters; i++) { |
363 | release_perfctr_nmi(x86_pmu.perfctr + i); | 450 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
364 | release_evntsel_nmi(x86_pmu.eventsel + i); | 451 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
365 | } | 452 | } |
366 | } | 453 | } |
367 | 454 | ||
@@ -382,7 +469,7 @@ static bool check_hw_exists(void) | |||
382 | * complain and bail. | 469 | * complain and bail. |
383 | */ | 470 | */ |
384 | for (i = 0; i < x86_pmu.num_counters; i++) { | 471 | for (i = 0; i < x86_pmu.num_counters; i++) { |
385 | reg = x86_pmu.eventsel + i; | 472 | reg = x86_pmu_config_addr(i); |
386 | ret = rdmsrl_safe(reg, &val); | 473 | ret = rdmsrl_safe(reg, &val); |
387 | if (ret) | 474 | if (ret) |
388 | goto msr_fail; | 475 | goto msr_fail; |
@@ -407,8 +494,8 @@ static bool check_hw_exists(void) | |||
407 | * that don't trap on the MSR access and always return 0s. | 494 | * that don't trap on the MSR access and always return 0s. |
408 | */ | 495 | */ |
409 | val = 0xabcdUL; | 496 | val = 0xabcdUL; |
410 | ret = checking_wrmsrl(x86_pmu.perfctr, val); | 497 | ret = checking_wrmsrl(x86_pmu_event_addr(0), val); |
411 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | 498 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
412 | if (ret || val != val_new) | 499 | if (ret || val != val_new) |
413 | goto msr_fail; | 500 | goto msr_fail; |
414 | 501 | ||
@@ -442,8 +529,9 @@ static inline int x86_pmu_initialized(void) | |||
442 | } | 529 | } |
443 | 530 | ||
444 | static inline int | 531 | static inline int |
445 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | 532 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) |
446 | { | 533 | { |
534 | struct perf_event_attr *attr = &event->attr; | ||
447 | unsigned int cache_type, cache_op, cache_result; | 535 | unsigned int cache_type, cache_op, cache_result; |
448 | u64 config, val; | 536 | u64 config, val; |
449 | 537 | ||
@@ -470,8 +558,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | |||
470 | return -EINVAL; | 558 | return -EINVAL; |
471 | 559 | ||
472 | hwc->config |= val; | 560 | hwc->config |= val; |
473 | 561 | attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; | |
474 | return 0; | 562 | return x86_pmu_extra_regs(val, event); |
475 | } | 563 | } |
476 | 564 | ||
477 | static int x86_setup_perfctr(struct perf_event *event) | 565 | static int x86_setup_perfctr(struct perf_event *event) |
@@ -496,10 +584,10 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
496 | } | 584 | } |
497 | 585 | ||
498 | if (attr->type == PERF_TYPE_RAW) | 586 | if (attr->type == PERF_TYPE_RAW) |
499 | return 0; | 587 | return x86_pmu_extra_regs(event->attr.config, event); |
500 | 588 | ||
501 | if (attr->type == PERF_TYPE_HW_CACHE) | 589 | if (attr->type == PERF_TYPE_HW_CACHE) |
502 | return set_ext_hw_attr(hwc, attr); | 590 | return set_ext_hw_attr(hwc, event); |
503 | 591 | ||
504 | if (attr->config >= x86_pmu.max_events) | 592 | if (attr->config >= x86_pmu.max_events) |
505 | return -EINVAL; | 593 | return -EINVAL; |
@@ -617,11 +705,11 @@ static void x86_pmu_disable_all(void) | |||
617 | 705 | ||
618 | if (!test_bit(idx, cpuc->active_mask)) | 706 | if (!test_bit(idx, cpuc->active_mask)) |
619 | continue; | 707 | continue; |
620 | rdmsrl(x86_pmu.eventsel + idx, val); | 708 | rdmsrl(x86_pmu_config_addr(idx), val); |
621 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) | 709 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
622 | continue; | 710 | continue; |
623 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 711 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
624 | wrmsrl(x86_pmu.eventsel + idx, val); | 712 | wrmsrl(x86_pmu_config_addr(idx), val); |
625 | } | 713 | } |
626 | } | 714 | } |
627 | 715 | ||
@@ -642,21 +730,26 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
642 | x86_pmu.disable_all(); | 730 | x86_pmu.disable_all(); |
643 | } | 731 | } |
644 | 732 | ||
733 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
734 | u64 enable_mask) | ||
735 | { | ||
736 | if (hwc->extra_reg) | ||
737 | wrmsrl(hwc->extra_reg, hwc->extra_config); | ||
738 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
739 | } | ||
740 | |||
645 | static void x86_pmu_enable_all(int added) | 741 | static void x86_pmu_enable_all(int added) |
646 | { | 742 | { |
647 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 743 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
648 | int idx; | 744 | int idx; |
649 | 745 | ||
650 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 746 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
651 | struct perf_event *event = cpuc->events[idx]; | 747 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; |
652 | u64 val; | ||
653 | 748 | ||
654 | if (!test_bit(idx, cpuc->active_mask)) | 749 | if (!test_bit(idx, cpuc->active_mask)) |
655 | continue; | 750 | continue; |
656 | 751 | ||
657 | val = event->hw.config; | 752 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
658 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | ||
659 | wrmsrl(x86_pmu.eventsel + idx, val); | ||
660 | } | 753 | } |
661 | } | 754 | } |
662 | 755 | ||
@@ -821,15 +914,10 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
821 | hwc->event_base = 0; | 914 | hwc->event_base = 0; |
822 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { | 915 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { |
823 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | 916 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
824 | /* | 917 | hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; |
825 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
826 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
827 | */ | ||
828 | hwc->event_base = | ||
829 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
830 | } else { | 918 | } else { |
831 | hwc->config_base = x86_pmu.eventsel; | 919 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
832 | hwc->event_base = x86_pmu.perfctr; | 920 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
833 | } | 921 | } |
834 | } | 922 | } |
835 | 923 | ||
@@ -915,17 +1003,11 @@ static void x86_pmu_enable(struct pmu *pmu) | |||
915 | x86_pmu.enable_all(added); | 1003 | x86_pmu.enable_all(added); |
916 | } | 1004 | } |
917 | 1005 | ||
918 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
919 | u64 enable_mask) | ||
920 | { | ||
921 | wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask); | ||
922 | } | ||
923 | |||
924 | static inline void x86_pmu_disable_event(struct perf_event *event) | 1006 | static inline void x86_pmu_disable_event(struct perf_event *event) |
925 | { | 1007 | { |
926 | struct hw_perf_event *hwc = &event->hw; | 1008 | struct hw_perf_event *hwc = &event->hw; |
927 | 1009 | ||
928 | wrmsrl(hwc->config_base + hwc->idx, hwc->config); | 1010 | wrmsrl(hwc->config_base, hwc->config); |
929 | } | 1011 | } |
930 | 1012 | ||
931 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 1013 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -978,7 +1060,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
978 | */ | 1060 | */ |
979 | local64_set(&hwc->prev_count, (u64)-left); | 1061 | local64_set(&hwc->prev_count, (u64)-left); |
980 | 1062 | ||
981 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); | 1063 | wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); |
982 | 1064 | ||
983 | /* | 1065 | /* |
984 | * Due to erratum on certan cpu we need | 1066 | * Due to erratum on certan cpu we need |
@@ -986,7 +1068,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
986 | * is updated properly | 1068 | * is updated properly |
987 | */ | 1069 | */ |
988 | if (x86_pmu.perfctr_second_write) { | 1070 | if (x86_pmu.perfctr_second_write) { |
989 | wrmsrl(hwc->event_base + idx, | 1071 | wrmsrl(hwc->event_base, |
990 | (u64)(-left) & x86_pmu.cntval_mask); | 1072 | (u64)(-left) & x86_pmu.cntval_mask); |
991 | } | 1073 | } |
992 | 1074 | ||
@@ -1113,8 +1195,8 @@ void perf_event_print_debug(void) | |||
1113 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); | 1195 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); |
1114 | 1196 | ||
1115 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1197 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1116 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1198 | rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); |
1117 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1199 | rdmsrl(x86_pmu_event_addr(idx), pmc_count); |
1118 | 1200 | ||
1119 | prev_left = per_cpu(pmc_prev_left[idx], cpu); | 1201 | prev_left = per_cpu(pmc_prev_left[idx], cpu); |
1120 | 1202 | ||
@@ -1389,7 +1471,7 @@ static void __init pmu_check_apic(void) | |||
1389 | pr_info("no hardware sampling interrupt available.\n"); | 1471 | pr_info("no hardware sampling interrupt available.\n"); |
1390 | } | 1472 | } |
1391 | 1473 | ||
1392 | int __init init_hw_perf_events(void) | 1474 | static int __init init_hw_perf_events(void) |
1393 | { | 1475 | { |
1394 | struct event_constraint *c; | 1476 | struct event_constraint *c; |
1395 | int err; | 1477 | int err; |
@@ -1608,7 +1690,7 @@ out: | |||
1608 | return ret; | 1690 | return ret; |
1609 | } | 1691 | } |
1610 | 1692 | ||
1611 | int x86_pmu_event_init(struct perf_event *event) | 1693 | static int x86_pmu_event_init(struct perf_event *event) |
1612 | { | 1694 | { |
1613 | struct pmu *tmp; | 1695 | struct pmu *tmp; |
1614 | int err; | 1696 | int err; |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 67e2202a6039..461f62bbd774 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -127,6 +127,11 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
127 | /* | 127 | /* |
128 | * AMD64 events are detected based on their event codes. | 128 | * AMD64 events are detected based on their event codes. |
129 | */ | 129 | */ |
130 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) | ||
131 | { | ||
132 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | ||
133 | } | ||
134 | |||
130 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) | 135 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
131 | { | 136 | { |
132 | return (hwc->config & 0xe0) == 0xe0; | 137 | return (hwc->config & 0xe0) == 0xe0; |
@@ -385,13 +390,181 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
385 | .cpu_dead = amd_pmu_cpu_dead, | 390 | .cpu_dead = amd_pmu_cpu_dead, |
386 | }; | 391 | }; |
387 | 392 | ||
393 | /* AMD Family 15h */ | ||
394 | |||
395 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | ||
396 | |||
397 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | ||
398 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | ||
399 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | ||
400 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | ||
401 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | ||
402 | #define AMD_EVENT_EX_LS 0x000000C0ULL | ||
403 | #define AMD_EVENT_DE 0x000000D0ULL | ||
404 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | ||
405 | |||
406 | /* | ||
407 | * AMD family 15h event code/PMC mappings: | ||
408 | * | ||
409 | * type = event_code & 0x0F0: | ||
410 | * | ||
411 | * 0x000 FP PERF_CTL[5:3] | ||
412 | * 0x010 FP PERF_CTL[5:3] | ||
413 | * 0x020 LS PERF_CTL[5:0] | ||
414 | * 0x030 LS PERF_CTL[5:0] | ||
415 | * 0x040 DC PERF_CTL[5:0] | ||
416 | * 0x050 DC PERF_CTL[5:0] | ||
417 | * 0x060 CU PERF_CTL[2:0] | ||
418 | * 0x070 CU PERF_CTL[2:0] | ||
419 | * 0x080 IC/DE PERF_CTL[2:0] | ||
420 | * 0x090 IC/DE PERF_CTL[2:0] | ||
421 | * 0x0A0 --- | ||
422 | * 0x0B0 --- | ||
423 | * 0x0C0 EX/LS PERF_CTL[5:0] | ||
424 | * 0x0D0 DE PERF_CTL[2:0] | ||
425 | * 0x0E0 NB NB_PERF_CTL[3:0] | ||
426 | * 0x0F0 NB NB_PERF_CTL[3:0] | ||
427 | * | ||
428 | * Exceptions: | ||
429 | * | ||
430 | * 0x003 FP PERF_CTL[3] | ||
431 | * 0x00B FP PERF_CTL[3] | ||
432 | * 0x00D FP PERF_CTL[3] | ||
433 | * 0x023 DE PERF_CTL[2:0] | ||
434 | * 0x02D LS PERF_CTL[3] | ||
435 | * 0x02E LS PERF_CTL[3,0] | ||
436 | * 0x043 CU PERF_CTL[2:0] | ||
437 | * 0x045 CU PERF_CTL[2:0] | ||
438 | * 0x046 CU PERF_CTL[2:0] | ||
439 | * 0x054 CU PERF_CTL[2:0] | ||
440 | * 0x055 CU PERF_CTL[2:0] | ||
441 | * 0x08F IC PERF_CTL[0] | ||
442 | * 0x187 DE PERF_CTL[0] | ||
443 | * 0x188 DE PERF_CTL[0] | ||
444 | * 0x0DB EX PERF_CTL[5:0] | ||
445 | * 0x0DC LS PERF_CTL[5:0] | ||
446 | * 0x0DD LS PERF_CTL[5:0] | ||
447 | * 0x0DE LS PERF_CTL[5:0] | ||
448 | * 0x0DF LS PERF_CTL[5:0] | ||
449 | * 0x1D6 EX PERF_CTL[5:0] | ||
450 | * 0x1D8 EX PERF_CTL[5:0] | ||
451 | */ | ||
452 | |||
453 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | ||
454 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | ||
455 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | ||
456 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); | ||
457 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | ||
458 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | ||
459 | |||
460 | static struct event_constraint * | ||
461 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
462 | { | ||
463 | unsigned int event_code = amd_get_event_code(&event->hw); | ||
464 | |||
465 | switch (event_code & AMD_EVENT_TYPE_MASK) { | ||
466 | case AMD_EVENT_FP: | ||
467 | switch (event_code) { | ||
468 | case 0x003: | ||
469 | case 0x00B: | ||
470 | case 0x00D: | ||
471 | return &amd_f15_PMC3; | ||
472 | default: | ||
473 | return &amd_f15_PMC53; | ||
474 | } | ||
475 | case AMD_EVENT_LS: | ||
476 | case AMD_EVENT_DC: | ||
477 | case AMD_EVENT_EX_LS: | ||
478 | switch (event_code) { | ||
479 | case 0x023: | ||
480 | case 0x043: | ||
481 | case 0x045: | ||
482 | case 0x046: | ||
483 | case 0x054: | ||
484 | case 0x055: | ||
485 | return &amd_f15_PMC20; | ||
486 | case 0x02D: | ||
487 | return &amd_f15_PMC3; | ||
488 | case 0x02E: | ||
489 | return &amd_f15_PMC30; | ||
490 | default: | ||
491 | return &amd_f15_PMC50; | ||
492 | } | ||
493 | case AMD_EVENT_CU: | ||
494 | case AMD_EVENT_IC_DE: | ||
495 | case AMD_EVENT_DE: | ||
496 | switch (event_code) { | ||
497 | case 0x08F: | ||
498 | case 0x187: | ||
499 | case 0x188: | ||
500 | return &amd_f15_PMC0; | ||
501 | case 0x0DB ... 0x0DF: | ||
502 | case 0x1D6: | ||
503 | case 0x1D8: | ||
504 | return &amd_f15_PMC50; | ||
505 | default: | ||
506 | return &amd_f15_PMC20; | ||
507 | } | ||
508 | case AMD_EVENT_NB: | ||
509 | /* not yet implemented */ | ||
510 | return &emptyconstraint; | ||
511 | default: | ||
512 | return &emptyconstraint; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | static __initconst const struct x86_pmu amd_pmu_f15h = { | ||
517 | .name = "AMD Family 15h", | ||
518 | .handle_irq = x86_pmu_handle_irq, | ||
519 | .disable_all = x86_pmu_disable_all, | ||
520 | .enable_all = x86_pmu_enable_all, | ||
521 | .enable = x86_pmu_enable_event, | ||
522 | .disable = x86_pmu_disable_event, | ||
523 | .hw_config = amd_pmu_hw_config, | ||
524 | .schedule_events = x86_schedule_events, | ||
525 | .eventsel = MSR_F15H_PERF_CTL, | ||
526 | .perfctr = MSR_F15H_PERF_CTR, | ||
527 | .event_map = amd_pmu_event_map, | ||
528 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
529 | .num_counters = 6, | ||
530 | .cntval_bits = 48, | ||
531 | .cntval_mask = (1ULL << 48) - 1, | ||
532 | .apic = 1, | ||
533 | /* use highest bit to detect overflow */ | ||
534 | .max_period = (1ULL << 47) - 1, | ||
535 | .get_event_constraints = amd_get_event_constraints_f15h, | ||
536 | /* nortbridge counters not yet implemented: */ | ||
537 | #if 0 | ||
538 | .put_event_constraints = amd_put_event_constraints, | ||
539 | |||
540 | .cpu_prepare = amd_pmu_cpu_prepare, | ||
541 | .cpu_starting = amd_pmu_cpu_starting, | ||
542 | .cpu_dead = amd_pmu_cpu_dead, | ||
543 | #endif | ||
544 | }; | ||
545 | |||
388 | static __init int amd_pmu_init(void) | 546 | static __init int amd_pmu_init(void) |
389 | { | 547 | { |
390 | /* Performance-monitoring supported from K7 and later: */ | 548 | /* Performance-monitoring supported from K7 and later: */ |
391 | if (boot_cpu_data.x86 < 6) | 549 | if (boot_cpu_data.x86 < 6) |
392 | return -ENODEV; | 550 | return -ENODEV; |
393 | 551 | ||
394 | x86_pmu = amd_pmu; | 552 | /* |
553 | * If core performance counter extensions exists, it must be | ||
554 | * family 15h, otherwise fail. See x86_pmu_addr_offset(). | ||
555 | */ | ||
556 | switch (boot_cpu_data.x86) { | ||
557 | case 0x15: | ||
558 | if (!cpu_has_perfctr_core) | ||
559 | return -ENODEV; | ||
560 | x86_pmu = amd_pmu_f15h; | ||
561 | break; | ||
562 | default: | ||
563 | if (cpu_has_perfctr_core) | ||
564 | return -ENODEV; | ||
565 | x86_pmu = amd_pmu; | ||
566 | break; | ||
567 | } | ||
395 | 568 | ||
396 | /* Events are common for all AMDs */ | 569 | /* Events are common for all AMDs */ |
397 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 570 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 008835c1d79c..8fc2b2cee1da 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1,5 +1,27 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | 2 | ||
3 | #define MAX_EXTRA_REGS 2 | ||
4 | |||
5 | /* | ||
6 | * Per register state. | ||
7 | */ | ||
8 | struct er_account { | ||
9 | int ref; /* reference count */ | ||
10 | unsigned int extra_reg; /* extra MSR number */ | ||
11 | u64 extra_config; /* extra MSR config */ | ||
12 | }; | ||
13 | |||
14 | /* | ||
15 | * Per core state | ||
16 | * This used to coordinate shared registers for HT threads. | ||
17 | */ | ||
18 | struct intel_percore { | ||
19 | raw_spinlock_t lock; /* protect structure */ | ||
20 | struct er_account regs[MAX_EXTRA_REGS]; | ||
21 | int refcnt; /* number of threads */ | ||
22 | unsigned core_id; | ||
23 | }; | ||
24 | |||
3 | /* | 25 | /* |
4 | * Intel PerfMon, used on Core and later. | 26 | * Intel PerfMon, used on Core and later. |
5 | */ | 27 | */ |
@@ -64,6 +86,18 @@ static struct event_constraint intel_nehalem_event_constraints[] = | |||
64 | EVENT_CONSTRAINT_END | 86 | EVENT_CONSTRAINT_END |
65 | }; | 87 | }; |
66 | 88 | ||
89 | static struct extra_reg intel_nehalem_extra_regs[] = | ||
90 | { | ||
91 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | ||
92 | EVENT_EXTRA_END | ||
93 | }; | ||
94 | |||
95 | static struct event_constraint intel_nehalem_percore_constraints[] = | ||
96 | { | ||
97 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | ||
98 | EVENT_CONSTRAINT_END | ||
99 | }; | ||
100 | |||
67 | static struct event_constraint intel_westmere_event_constraints[] = | 101 | static struct event_constraint intel_westmere_event_constraints[] = |
68 | { | 102 | { |
69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
@@ -76,6 +110,33 @@ static struct event_constraint intel_westmere_event_constraints[] = | |||
76 | EVENT_CONSTRAINT_END | 110 | EVENT_CONSTRAINT_END |
77 | }; | 111 | }; |
78 | 112 | ||
113 | static struct event_constraint intel_snb_event_constraints[] = | ||
114 | { | ||
115 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
116 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
117 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
118 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | ||
119 | INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ | ||
120 | INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ | ||
121 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | ||
122 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | ||
123 | EVENT_CONSTRAINT_END | ||
124 | }; | ||
125 | |||
126 | static struct extra_reg intel_westmere_extra_regs[] = | ||
127 | { | ||
128 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | ||
129 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), | ||
130 | EVENT_EXTRA_END | ||
131 | }; | ||
132 | |||
133 | static struct event_constraint intel_westmere_percore_constraints[] = | ||
134 | { | ||
135 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | ||
136 | INTEL_EVENT_CONSTRAINT(0xbb, 0), | ||
137 | EVENT_CONSTRAINT_END | ||
138 | }; | ||
139 | |||
79 | static struct event_constraint intel_gen_event_constraints[] = | 140 | static struct event_constraint intel_gen_event_constraints[] = |
80 | { | 141 | { |
81 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 142 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
@@ -89,6 +150,106 @@ static u64 intel_pmu_event_map(int hw_event) | |||
89 | return intel_perfmon_event_map[hw_event]; | 150 | return intel_perfmon_event_map[hw_event]; |
90 | } | 151 | } |
91 | 152 | ||
153 | static __initconst const u64 snb_hw_cache_event_ids | ||
154 | [PERF_COUNT_HW_CACHE_MAX] | ||
155 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
156 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
157 | { | ||
158 | [ C(L1D) ] = { | ||
159 | [ C(OP_READ) ] = { | ||
160 | [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ | ||
161 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ | ||
162 | }, | ||
163 | [ C(OP_WRITE) ] = { | ||
164 | [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ | ||
165 | [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ | ||
166 | }, | ||
167 | [ C(OP_PREFETCH) ] = { | ||
168 | [ C(RESULT_ACCESS) ] = 0x0, | ||
169 | [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ | ||
170 | }, | ||
171 | }, | ||
172 | [ C(L1I ) ] = { | ||
173 | [ C(OP_READ) ] = { | ||
174 | [ C(RESULT_ACCESS) ] = 0x0, | ||
175 | [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ | ||
176 | }, | ||
177 | [ C(OP_WRITE) ] = { | ||
178 | [ C(RESULT_ACCESS) ] = -1, | ||
179 | [ C(RESULT_MISS) ] = -1, | ||
180 | }, | ||
181 | [ C(OP_PREFETCH) ] = { | ||
182 | [ C(RESULT_ACCESS) ] = 0x0, | ||
183 | [ C(RESULT_MISS) ] = 0x0, | ||
184 | }, | ||
185 | }, | ||
186 | [ C(LL ) ] = { | ||
187 | /* | ||
188 | * TBD: Need Off-core Response Performance Monitoring support | ||
189 | */ | ||
190 | [ C(OP_READ) ] = { | ||
191 | /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ | ||
192 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
193 | /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ | ||
194 | [ C(RESULT_MISS) ] = 0x01bb, | ||
195 | }, | ||
196 | [ C(OP_WRITE) ] = { | ||
197 | /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ | ||
198 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
199 | /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ | ||
200 | [ C(RESULT_MISS) ] = 0x01bb, | ||
201 | }, | ||
202 | [ C(OP_PREFETCH) ] = { | ||
203 | /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ | ||
204 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
205 | /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ | ||
206 | [ C(RESULT_MISS) ] = 0x01bb, | ||
207 | }, | ||
208 | }, | ||
209 | [ C(DTLB) ] = { | ||
210 | [ C(OP_READ) ] = { | ||
211 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ | ||
212 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ | ||
213 | }, | ||
214 | [ C(OP_WRITE) ] = { | ||
215 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ | ||
216 | [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | ||
217 | }, | ||
218 | [ C(OP_PREFETCH) ] = { | ||
219 | [ C(RESULT_ACCESS) ] = 0x0, | ||
220 | [ C(RESULT_MISS) ] = 0x0, | ||
221 | }, | ||
222 | }, | ||
223 | [ C(ITLB) ] = { | ||
224 | [ C(OP_READ) ] = { | ||
225 | [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ | ||
226 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ | ||
227 | }, | ||
228 | [ C(OP_WRITE) ] = { | ||
229 | [ C(RESULT_ACCESS) ] = -1, | ||
230 | [ C(RESULT_MISS) ] = -1, | ||
231 | }, | ||
232 | [ C(OP_PREFETCH) ] = { | ||
233 | [ C(RESULT_ACCESS) ] = -1, | ||
234 | [ C(RESULT_MISS) ] = -1, | ||
235 | }, | ||
236 | }, | ||
237 | [ C(BPU ) ] = { | ||
238 | [ C(OP_READ) ] = { | ||
239 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
240 | [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
241 | }, | ||
242 | [ C(OP_WRITE) ] = { | ||
243 | [ C(RESULT_ACCESS) ] = -1, | ||
244 | [ C(RESULT_MISS) ] = -1, | ||
245 | }, | ||
246 | [ C(OP_PREFETCH) ] = { | ||
247 | [ C(RESULT_ACCESS) ] = -1, | ||
248 | [ C(RESULT_MISS) ] = -1, | ||
249 | }, | ||
250 | }, | ||
251 | }; | ||
252 | |||
92 | static __initconst const u64 westmere_hw_cache_event_ids | 253 | static __initconst const u64 westmere_hw_cache_event_ids |
93 | [PERF_COUNT_HW_CACHE_MAX] | 254 | [PERF_COUNT_HW_CACHE_MAX] |
94 | [PERF_COUNT_HW_CACHE_OP_MAX] | 255 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -124,16 +285,26 @@ static __initconst const u64 westmere_hw_cache_event_ids | |||
124 | }, | 285 | }, |
125 | [ C(LL ) ] = { | 286 | [ C(LL ) ] = { |
126 | [ C(OP_READ) ] = { | 287 | [ C(OP_READ) ] = { |
127 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | 288 | /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ |
128 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | 289 | [ C(RESULT_ACCESS) ] = 0x01b7, |
290 | /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ | ||
291 | [ C(RESULT_MISS) ] = 0x01bb, | ||
129 | }, | 292 | }, |
293 | /* | ||
294 | * Use RFO, not WRITEBACK, because a write miss would typically occur | ||
295 | * on RFO. | ||
296 | */ | ||
130 | [ C(OP_WRITE) ] = { | 297 | [ C(OP_WRITE) ] = { |
131 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | 298 | /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ |
132 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | 299 | [ C(RESULT_ACCESS) ] = 0x01bb, |
300 | /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ | ||
301 | [ C(RESULT_MISS) ] = 0x01b7, | ||
133 | }, | 302 | }, |
134 | [ C(OP_PREFETCH) ] = { | 303 | [ C(OP_PREFETCH) ] = { |
135 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | 304 | /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ |
136 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | 305 | [ C(RESULT_ACCESS) ] = 0x01b7, |
306 | /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ | ||
307 | [ C(RESULT_MISS) ] = 0x01bb, | ||
137 | }, | 308 | }, |
138 | }, | 309 | }, |
139 | [ C(DTLB) ] = { | 310 | [ C(DTLB) ] = { |
@@ -180,6 +351,39 @@ static __initconst const u64 westmere_hw_cache_event_ids | |||
180 | }, | 351 | }, |
181 | }; | 352 | }; |
182 | 353 | ||
354 | /* | ||
355 | * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 | ||
356 | */ | ||
357 | |||
358 | #define DMND_DATA_RD (1 << 0) | ||
359 | #define DMND_RFO (1 << 1) | ||
360 | #define DMND_WB (1 << 3) | ||
361 | #define PF_DATA_RD (1 << 4) | ||
362 | #define PF_DATA_RFO (1 << 5) | ||
363 | #define RESP_UNCORE_HIT (1 << 8) | ||
364 | #define RESP_MISS (0xf600) /* non uncore hit */ | ||
365 | |||
366 | static __initconst const u64 nehalem_hw_cache_extra_regs | ||
367 | [PERF_COUNT_HW_CACHE_MAX] | ||
368 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
369 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
370 | { | ||
371 | [ C(LL ) ] = { | ||
372 | [ C(OP_READ) ] = { | ||
373 | [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, | ||
374 | [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, | ||
375 | }, | ||
376 | [ C(OP_WRITE) ] = { | ||
377 | [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, | ||
378 | [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, | ||
379 | }, | ||
380 | [ C(OP_PREFETCH) ] = { | ||
381 | [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, | ||
382 | [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, | ||
383 | }, | ||
384 | } | ||
385 | }; | ||
386 | |||
183 | static __initconst const u64 nehalem_hw_cache_event_ids | 387 | static __initconst const u64 nehalem_hw_cache_event_ids |
184 | [PERF_COUNT_HW_CACHE_MAX] | 388 | [PERF_COUNT_HW_CACHE_MAX] |
185 | [PERF_COUNT_HW_CACHE_OP_MAX] | 389 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -215,16 +419,26 @@ static __initconst const u64 nehalem_hw_cache_event_ids | |||
215 | }, | 419 | }, |
216 | [ C(LL ) ] = { | 420 | [ C(LL ) ] = { |
217 | [ C(OP_READ) ] = { | 421 | [ C(OP_READ) ] = { |
218 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | 422 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ |
219 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | 423 | [ C(RESULT_ACCESS) ] = 0x01b7, |
424 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | ||
425 | [ C(RESULT_MISS) ] = 0x01b7, | ||
220 | }, | 426 | }, |
427 | /* | ||
428 | * Use RFO, not WRITEBACK, because a write miss would typically occur | ||
429 | * on RFO. | ||
430 | */ | ||
221 | [ C(OP_WRITE) ] = { | 431 | [ C(OP_WRITE) ] = { |
222 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | 432 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ |
223 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | 433 | [ C(RESULT_ACCESS) ] = 0x01b7, |
434 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | ||
435 | [ C(RESULT_MISS) ] = 0x01b7, | ||
224 | }, | 436 | }, |
225 | [ C(OP_PREFETCH) ] = { | 437 | [ C(OP_PREFETCH) ] = { |
226 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | 438 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ |
227 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | 439 | [ C(RESULT_ACCESS) ] = 0x01b7, |
440 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | ||
441 | [ C(RESULT_MISS) ] = 0x01b7, | ||
228 | }, | 442 | }, |
229 | }, | 443 | }, |
230 | [ C(DTLB) ] = { | 444 | [ C(DTLB) ] = { |
@@ -691,8 +905,8 @@ static void intel_pmu_reset(void) | |||
691 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | 905 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); |
692 | 906 | ||
693 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 907 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
694 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | 908 | checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); |
695 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | 909 | checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); |
696 | } | 910 | } |
697 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) | 911 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) |
698 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 912 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
@@ -794,6 +1008,67 @@ intel_bts_constraints(struct perf_event *event) | |||
794 | } | 1008 | } |
795 | 1009 | ||
796 | static struct event_constraint * | 1010 | static struct event_constraint * |
1011 | intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
1012 | { | ||
1013 | struct hw_perf_event *hwc = &event->hw; | ||
1014 | unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; | ||
1015 | struct event_constraint *c; | ||
1016 | struct intel_percore *pc; | ||
1017 | struct er_account *era; | ||
1018 | int i; | ||
1019 | int free_slot; | ||
1020 | int found; | ||
1021 | |||
1022 | if (!x86_pmu.percore_constraints || hwc->extra_alloc) | ||
1023 | return NULL; | ||
1024 | |||
1025 | for (c = x86_pmu.percore_constraints; c->cmask; c++) { | ||
1026 | if (e != c->code) | ||
1027 | continue; | ||
1028 | |||
1029 | /* | ||
1030 | * Allocate resource per core. | ||
1031 | */ | ||
1032 | pc = cpuc->per_core; | ||
1033 | if (!pc) | ||
1034 | break; | ||
1035 | c = &emptyconstraint; | ||
1036 | raw_spin_lock(&pc->lock); | ||
1037 | free_slot = -1; | ||
1038 | found = 0; | ||
1039 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | ||
1040 | era = &pc->regs[i]; | ||
1041 | if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { | ||
1042 | /* Allow sharing same config */ | ||
1043 | if (hwc->extra_config == era->extra_config) { | ||
1044 | era->ref++; | ||
1045 | cpuc->percore_used = 1; | ||
1046 | hwc->extra_alloc = 1; | ||
1047 | c = NULL; | ||
1048 | } | ||
1049 | /* else conflict */ | ||
1050 | found = 1; | ||
1051 | break; | ||
1052 | } else if (era->ref == 0 && free_slot == -1) | ||
1053 | free_slot = i; | ||
1054 | } | ||
1055 | if (!found && free_slot != -1) { | ||
1056 | era = &pc->regs[free_slot]; | ||
1057 | era->ref = 1; | ||
1058 | era->extra_reg = hwc->extra_reg; | ||
1059 | era->extra_config = hwc->extra_config; | ||
1060 | cpuc->percore_used = 1; | ||
1061 | hwc->extra_alloc = 1; | ||
1062 | c = NULL; | ||
1063 | } | ||
1064 | raw_spin_unlock(&pc->lock); | ||
1065 | return c; | ||
1066 | } | ||
1067 | |||
1068 | return NULL; | ||
1069 | } | ||
1070 | |||
1071 | static struct event_constraint * | ||
797 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 1072 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) |
798 | { | 1073 | { |
799 | struct event_constraint *c; | 1074 | struct event_constraint *c; |
@@ -806,9 +1081,51 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
806 | if (c) | 1081 | if (c) |
807 | return c; | 1082 | return c; |
808 | 1083 | ||
1084 | c = intel_percore_constraints(cpuc, event); | ||
1085 | if (c) | ||
1086 | return c; | ||
1087 | |||
809 | return x86_get_event_constraints(cpuc, event); | 1088 | return x86_get_event_constraints(cpuc, event); |
810 | } | 1089 | } |
811 | 1090 | ||
1091 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | ||
1092 | struct perf_event *event) | ||
1093 | { | ||
1094 | struct extra_reg *er; | ||
1095 | struct intel_percore *pc; | ||
1096 | struct er_account *era; | ||
1097 | struct hw_perf_event *hwc = &event->hw; | ||
1098 | int i, allref; | ||
1099 | |||
1100 | if (!cpuc->percore_used) | ||
1101 | return; | ||
1102 | |||
1103 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
1104 | if (er->event != (hwc->config & er->config_mask)) | ||
1105 | continue; | ||
1106 | |||
1107 | pc = cpuc->per_core; | ||
1108 | raw_spin_lock(&pc->lock); | ||
1109 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | ||
1110 | era = &pc->regs[i]; | ||
1111 | if (era->ref > 0 && | ||
1112 | era->extra_config == hwc->extra_config && | ||
1113 | era->extra_reg == er->msr) { | ||
1114 | era->ref--; | ||
1115 | hwc->extra_alloc = 0; | ||
1116 | break; | ||
1117 | } | ||
1118 | } | ||
1119 | allref = 0; | ||
1120 | for (i = 0; i < MAX_EXTRA_REGS; i++) | ||
1121 | allref += pc->regs[i].ref; | ||
1122 | if (allref == 0) | ||
1123 | cpuc->percore_used = 0; | ||
1124 | raw_spin_unlock(&pc->lock); | ||
1125 | break; | ||
1126 | } | ||
1127 | } | ||
1128 | |||
812 | static int intel_pmu_hw_config(struct perf_event *event) | 1129 | static int intel_pmu_hw_config(struct perf_event *event) |
813 | { | 1130 | { |
814 | int ret = x86_pmu_hw_config(event); | 1131 | int ret = x86_pmu_hw_config(event); |
@@ -880,20 +1197,67 @@ static __initconst const struct x86_pmu core_pmu = { | |||
880 | */ | 1197 | */ |
881 | .max_period = (1ULL << 31) - 1, | 1198 | .max_period = (1ULL << 31) - 1, |
882 | .get_event_constraints = intel_get_event_constraints, | 1199 | .get_event_constraints = intel_get_event_constraints, |
1200 | .put_event_constraints = intel_put_event_constraints, | ||
883 | .event_constraints = intel_core_event_constraints, | 1201 | .event_constraints = intel_core_event_constraints, |
884 | }; | 1202 | }; |
885 | 1203 | ||
1204 | static int intel_pmu_cpu_prepare(int cpu) | ||
1205 | { | ||
1206 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1207 | |||
1208 | if (!cpu_has_ht_siblings()) | ||
1209 | return NOTIFY_OK; | ||
1210 | |||
1211 | cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), | ||
1212 | GFP_KERNEL, cpu_to_node(cpu)); | ||
1213 | if (!cpuc->per_core) | ||
1214 | return NOTIFY_BAD; | ||
1215 | |||
1216 | raw_spin_lock_init(&cpuc->per_core->lock); | ||
1217 | cpuc->per_core->core_id = -1; | ||
1218 | return NOTIFY_OK; | ||
1219 | } | ||
1220 | |||
886 | static void intel_pmu_cpu_starting(int cpu) | 1221 | static void intel_pmu_cpu_starting(int cpu) |
887 | { | 1222 | { |
1223 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1224 | int core_id = topology_core_id(cpu); | ||
1225 | int i; | ||
1226 | |||
888 | init_debug_store_on_cpu(cpu); | 1227 | init_debug_store_on_cpu(cpu); |
889 | /* | 1228 | /* |
890 | * Deal with CPUs that don't clear their LBRs on power-up. | 1229 | * Deal with CPUs that don't clear their LBRs on power-up. |
891 | */ | 1230 | */ |
892 | intel_pmu_lbr_reset(); | 1231 | intel_pmu_lbr_reset(); |
1232 | |||
1233 | if (!cpu_has_ht_siblings()) | ||
1234 | return; | ||
1235 | |||
1236 | for_each_cpu(i, topology_thread_cpumask(cpu)) { | ||
1237 | struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; | ||
1238 | |||
1239 | if (pc && pc->core_id == core_id) { | ||
1240 | kfree(cpuc->per_core); | ||
1241 | cpuc->per_core = pc; | ||
1242 | break; | ||
1243 | } | ||
1244 | } | ||
1245 | |||
1246 | cpuc->per_core->core_id = core_id; | ||
1247 | cpuc->per_core->refcnt++; | ||
893 | } | 1248 | } |
894 | 1249 | ||
895 | static void intel_pmu_cpu_dying(int cpu) | 1250 | static void intel_pmu_cpu_dying(int cpu) |
896 | { | 1251 | { |
1252 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
1253 | struct intel_percore *pc = cpuc->per_core; | ||
1254 | |||
1255 | if (pc) { | ||
1256 | if (pc->core_id == -1 || --pc->refcnt == 0) | ||
1257 | kfree(pc); | ||
1258 | cpuc->per_core = NULL; | ||
1259 | } | ||
1260 | |||
897 | fini_debug_store_on_cpu(cpu); | 1261 | fini_debug_store_on_cpu(cpu); |
898 | } | 1262 | } |
899 | 1263 | ||
@@ -918,7 +1282,9 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
918 | */ | 1282 | */ |
919 | .max_period = (1ULL << 31) - 1, | 1283 | .max_period = (1ULL << 31) - 1, |
920 | .get_event_constraints = intel_get_event_constraints, | 1284 | .get_event_constraints = intel_get_event_constraints, |
1285 | .put_event_constraints = intel_put_event_constraints, | ||
921 | 1286 | ||
1287 | .cpu_prepare = intel_pmu_cpu_prepare, | ||
922 | .cpu_starting = intel_pmu_cpu_starting, | 1288 | .cpu_starting = intel_pmu_cpu_starting, |
923 | .cpu_dying = intel_pmu_cpu_dying, | 1289 | .cpu_dying = intel_pmu_cpu_dying, |
924 | }; | 1290 | }; |
@@ -1024,6 +1390,7 @@ static __init int intel_pmu_init(void) | |||
1024 | intel_pmu_lbr_init_core(); | 1390 | intel_pmu_lbr_init_core(); |
1025 | 1391 | ||
1026 | x86_pmu.event_constraints = intel_core2_event_constraints; | 1392 | x86_pmu.event_constraints = intel_core2_event_constraints; |
1393 | x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; | ||
1027 | pr_cont("Core2 events, "); | 1394 | pr_cont("Core2 events, "); |
1028 | break; | 1395 | break; |
1029 | 1396 | ||
@@ -1032,11 +1399,16 @@ static __init int intel_pmu_init(void) | |||
1032 | case 46: /* 45 nm nehalem-ex, "Beckton" */ | 1399 | case 46: /* 45 nm nehalem-ex, "Beckton" */ |
1033 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 1400 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
1034 | sizeof(hw_cache_event_ids)); | 1401 | sizeof(hw_cache_event_ids)); |
1402 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | ||
1403 | sizeof(hw_cache_extra_regs)); | ||
1035 | 1404 | ||
1036 | intel_pmu_lbr_init_nhm(); | 1405 | intel_pmu_lbr_init_nhm(); |
1037 | 1406 | ||
1038 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 1407 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
1408 | x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; | ||
1409 | x86_pmu.percore_constraints = intel_nehalem_percore_constraints; | ||
1039 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1410 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1411 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | ||
1040 | pr_cont("Nehalem events, "); | 1412 | pr_cont("Nehalem events, "); |
1041 | break; | 1413 | break; |
1042 | 1414 | ||
@@ -1047,6 +1419,7 @@ static __init int intel_pmu_init(void) | |||
1047 | intel_pmu_lbr_init_atom(); | 1419 | intel_pmu_lbr_init_atom(); |
1048 | 1420 | ||
1049 | x86_pmu.event_constraints = intel_gen_event_constraints; | 1421 | x86_pmu.event_constraints = intel_gen_event_constraints; |
1422 | x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; | ||
1050 | pr_cont("Atom events, "); | 1423 | pr_cont("Atom events, "); |
1051 | break; | 1424 | break; |
1052 | 1425 | ||
@@ -1054,14 +1427,30 @@ static __init int intel_pmu_init(void) | |||
1054 | case 44: /* 32 nm nehalem, "Gulftown" */ | 1427 | case 44: /* 32 nm nehalem, "Gulftown" */ |
1055 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | 1428 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, |
1056 | sizeof(hw_cache_event_ids)); | 1429 | sizeof(hw_cache_event_ids)); |
1430 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, | ||
1431 | sizeof(hw_cache_extra_regs)); | ||
1057 | 1432 | ||
1058 | intel_pmu_lbr_init_nhm(); | 1433 | intel_pmu_lbr_init_nhm(); |
1059 | 1434 | ||
1060 | x86_pmu.event_constraints = intel_westmere_event_constraints; | 1435 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
1436 | x86_pmu.percore_constraints = intel_westmere_percore_constraints; | ||
1061 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; | 1437 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1438 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; | ||
1439 | x86_pmu.extra_regs = intel_westmere_extra_regs; | ||
1062 | pr_cont("Westmere events, "); | 1440 | pr_cont("Westmere events, "); |
1063 | break; | 1441 | break; |
1064 | 1442 | ||
1443 | case 42: /* SandyBridge */ | ||
1444 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | ||
1445 | sizeof(hw_cache_event_ids)); | ||
1446 | |||
1447 | intel_pmu_lbr_init_nhm(); | ||
1448 | |||
1449 | x86_pmu.event_constraints = intel_snb_event_constraints; | ||
1450 | x86_pmu.pebs_constraints = intel_snb_pebs_events; | ||
1451 | pr_cont("SandyBridge events, "); | ||
1452 | break; | ||
1453 | |||
1065 | default: | 1454 | default: |
1066 | /* | 1455 | /* |
1067 | * default constraints for v2 and up | 1456 | * default constraints for v2 and up |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b7dcd9f2b8a0..b95c66ae4a2a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -361,30 +361,88 @@ static int intel_pmu_drain_bts_buffer(void) | |||
361 | /* | 361 | /* |
362 | * PEBS | 362 | * PEBS |
363 | */ | 363 | */ |
364 | 364 | static struct event_constraint intel_core2_pebs_event_constraints[] = { | |
365 | static struct event_constraint intel_core_pebs_events[] = { | 365 | PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ |
366 | PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */ | ||
367 | PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ | 366 | PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */ |
368 | PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ | 367 | PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */ |
369 | PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ | 368 | PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ |
370 | PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */ | 369 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ |
371 | PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ | 370 | EVENT_CONSTRAINT_END |
372 | PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */ | 371 | }; |
373 | PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ | 372 | |
374 | PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */ | 373 | static struct event_constraint intel_atom_pebs_event_constraints[] = { |
374 | PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */ | ||
375 | PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ | ||
376 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ | ||
375 | EVENT_CONSTRAINT_END | 377 | EVENT_CONSTRAINT_END |
376 | }; | 378 | }; |
377 | 379 | ||
378 | static struct event_constraint intel_nehalem_pebs_events[] = { | 380 | static struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
379 | PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */ | 381 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ |
380 | PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */ | 382 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
381 | PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */ | 383 | PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ |
382 | PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */ | 384 | INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ |
383 | PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */ | 385 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ |
384 | PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */ | 386 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
385 | PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */ | 387 | PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ |
386 | PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */ | 388 | INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ |
387 | PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */ | 389 | PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ |
390 | INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | ||
391 | INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | ||
392 | EVENT_CONSTRAINT_END | ||
393 | }; | ||
394 | |||
395 | static struct event_constraint intel_westmere_pebs_event_constraints[] = { | ||
396 | INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ | ||
397 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ | ||
398 | PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ | ||
399 | INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ | ||
400 | INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */ | ||
401 | |||
402 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | ||
403 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | ||
404 | INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */ | ||
405 | PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */ | ||
406 | INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ | ||
407 | INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ | ||
408 | EVENT_CONSTRAINT_END | ||
409 | }; | ||
410 | |||
411 | static struct event_constraint intel_snb_pebs_events[] = { | ||
412 | PEBS_EVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ | ||
413 | PEBS_EVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ | ||
414 | PEBS_EVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ | ||
415 | PEBS_EVENT_CONSTRAINT(0x01c4, 0xf), /* BR_INST_RETIRED.CONDITIONAL */ | ||
416 | PEBS_EVENT_CONSTRAINT(0x02c4, 0xf), /* BR_INST_RETIRED.NEAR_CALL */ | ||
417 | PEBS_EVENT_CONSTRAINT(0x04c4, 0xf), /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
418 | PEBS_EVENT_CONSTRAINT(0x08c4, 0xf), /* BR_INST_RETIRED.NEAR_RETURN */ | ||
419 | PEBS_EVENT_CONSTRAINT(0x10c4, 0xf), /* BR_INST_RETIRED.NOT_TAKEN */ | ||
420 | PEBS_EVENT_CONSTRAINT(0x20c4, 0xf), /* BR_INST_RETIRED.NEAR_TAKEN */ | ||
421 | PEBS_EVENT_CONSTRAINT(0x40c4, 0xf), /* BR_INST_RETIRED.FAR_BRANCH */ | ||
422 | PEBS_EVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */ | ||
423 | PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */ | ||
424 | PEBS_EVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
425 | PEBS_EVENT_CONSTRAINT(0x10c5, 0xf), /* BR_MISP_RETIRED.NOT_TAKEN */ | ||
426 | PEBS_EVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.TAKEN */ | ||
427 | PEBS_EVENT_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | ||
428 | PEBS_EVENT_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORE */ | ||
429 | PEBS_EVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ | ||
430 | PEBS_EVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ | ||
431 | PEBS_EVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ | ||
432 | PEBS_EVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ | ||
433 | PEBS_EVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ | ||
434 | PEBS_EVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ | ||
435 | PEBS_EVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ | ||
436 | PEBS_EVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ | ||
437 | PEBS_EVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */ | ||
438 | PEBS_EVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */ | ||
439 | PEBS_EVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.LLC_HIT */ | ||
440 | PEBS_EVENT_CONSTRAINT(0x40d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */ | ||
441 | PEBS_EVENT_CONSTRAINT(0x01d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */ | ||
442 | PEBS_EVENT_CONSTRAINT(0x02d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */ | ||
443 | PEBS_EVENT_CONSTRAINT(0x04d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM */ | ||
444 | PEBS_EVENT_CONSTRAINT(0x08d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE */ | ||
445 | PEBS_EVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ | ||
388 | EVENT_CONSTRAINT_END | 446 | EVENT_CONSTRAINT_END |
389 | }; | 447 | }; |
390 | 448 | ||
@@ -695,20 +753,17 @@ static void intel_ds_init(void) | |||
695 | printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); | 753 | printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); |
696 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); | 754 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); |
697 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; | 755 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; |
698 | x86_pmu.pebs_constraints = intel_core_pebs_events; | ||
699 | break; | 756 | break; |
700 | 757 | ||
701 | case 1: | 758 | case 1: |
702 | printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); | 759 | printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); |
703 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); | 760 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); |
704 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; | 761 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
705 | x86_pmu.pebs_constraints = intel_nehalem_pebs_events; | ||
706 | break; | 762 | break; |
707 | 763 | ||
708 | default: | 764 | default: |
709 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); | 765 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); |
710 | x86_pmu.pebs = 0; | 766 | x86_pmu.pebs = 0; |
711 | break; | ||
712 | } | 767 | } |
713 | } | 768 | } |
714 | } | 769 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ff751a9f182b..3769ac822f96 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -764,9 +764,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | |||
764 | u64 v; | 764 | u64 v; |
765 | 765 | ||
766 | /* an official way for overflow indication */ | 766 | /* an official way for overflow indication */ |
767 | rdmsrl(hwc->config_base + hwc->idx, v); | 767 | rdmsrl(hwc->config_base, v); |
768 | if (v & P4_CCCR_OVF) { | 768 | if (v & P4_CCCR_OVF) { |
769 | wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF); | 769 | wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); |
770 | return 1; | 770 | return 1; |
771 | } | 771 | } |
772 | 772 | ||
@@ -815,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
815 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get | 815 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get |
816 | * asserted again and again | 816 | * asserted again and again |
817 | */ | 817 | */ |
818 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | 818 | (void)checking_wrmsrl(hwc->config_base, |
819 | (u64)(p4_config_unpack_cccr(hwc->config)) & | 819 | (u64)(p4_config_unpack_cccr(hwc->config)) & |
820 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); | 820 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); |
821 | } | 821 | } |
@@ -885,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
885 | p4_pmu_enable_pebs(hwc->config); | 885 | p4_pmu_enable_pebs(hwc->config); |
886 | 886 | ||
887 | (void)checking_wrmsrl(escr_addr, escr_conf); | 887 | (void)checking_wrmsrl(escr_addr, escr_conf); |
888 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | 888 | (void)checking_wrmsrl(hwc->config_base, |
889 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); | 889 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); |
890 | } | 890 | } |
891 | 891 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 34ba07be2cda..20c097e33860 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event) | |||
68 | if (cpuc->enabled) | 68 | if (cpuc->enabled) |
69 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 69 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
70 | 70 | ||
71 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); | 71 | (void)checking_wrmsrl(hwc->config_base, val); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void p6_pmu_enable_event(struct perf_event *event) | 74 | static void p6_pmu_enable_event(struct perf_event *event) |
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event) | |||
81 | if (cpuc->enabled) | 81 | if (cpuc->enabled) |
82 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 82 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
83 | 83 | ||
84 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); | 84 | (void)checking_wrmsrl(hwc->config_base, val); |
85 | } | 85 | } |
86 | 86 | ||
87 | static __initconst const struct x86_pmu p6_pmu = { | 87 | static __initconst const struct x86_pmu p6_pmu = { |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index d5a236615501..966512b2cacf 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -46,6 +46,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |||
46 | /* returns the bit offset of the performance counter register */ | 46 | /* returns the bit offset of the performance counter register */ |
47 | switch (boot_cpu_data.x86_vendor) { | 47 | switch (boot_cpu_data.x86_vendor) { |
48 | case X86_VENDOR_AMD: | 48 | case X86_VENDOR_AMD: |
49 | if (msr >= MSR_F15H_PERF_CTR) | ||
50 | return (msr - MSR_F15H_PERF_CTR) >> 1; | ||
49 | return msr - MSR_K7_PERFCTR0; | 51 | return msr - MSR_K7_PERFCTR0; |
50 | case X86_VENDOR_INTEL: | 52 | case X86_VENDOR_INTEL: |
51 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 53 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
@@ -70,6 +72,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
70 | /* returns the bit offset of the event selection register */ | 72 | /* returns the bit offset of the event selection register */ |
71 | switch (boot_cpu_data.x86_vendor) { | 73 | switch (boot_cpu_data.x86_vendor) { |
72 | case X86_VENDOR_AMD: | 74 | case X86_VENDOR_AMD: |
75 | if (msr >= MSR_F15H_PERF_CTL) | ||
76 | return (msr - MSR_F15H_PERF_CTL) >> 1; | ||
73 | return msr - MSR_K7_EVNTSEL0; | 77 | return msr - MSR_K7_EVNTSEL0; |
74 | case X86_VENDOR_INTEL: | 78 | case X86_VENDOR_INTEL: |
75 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 79 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c new file mode 100644 index 000000000000..7a8cebc9ff29 --- /dev/null +++ b/arch/x86/kernel/devicetree.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * Architecture specific OF callbacks. | ||
3 | */ | ||
4 | #include <linux/bootmem.h> | ||
5 | #include <linux/io.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/of.h> | ||
9 | #include <linux/of_fdt.h> | ||
10 | #include <linux/of_address.h> | ||
11 | #include <linux/of_platform.h> | ||
12 | #include <linux/of_irq.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/of_pci.h> | ||
16 | |||
17 | #include <asm/hpet.h> | ||
18 | #include <asm/irq_controller.h> | ||
19 | #include <asm/apic.h> | ||
20 | #include <asm/pci_x86.h> | ||
21 | |||
22 | __initdata u64 initial_dtb; | ||
23 | char __initdata cmd_line[COMMAND_LINE_SIZE]; | ||
24 | static LIST_HEAD(irq_domains); | ||
25 | static DEFINE_RAW_SPINLOCK(big_irq_lock); | ||
26 | |||
27 | int __initdata of_ioapic; | ||
28 | |||
29 | #ifdef CONFIG_X86_IO_APIC | ||
30 | static void add_interrupt_host(struct irq_domain *ih) | ||
31 | { | ||
32 | unsigned long flags; | ||
33 | |||
34 | raw_spin_lock_irqsave(&big_irq_lock, flags); | ||
35 | list_add(&ih->l, &irq_domains); | ||
36 | raw_spin_unlock_irqrestore(&big_irq_lock, flags); | ||
37 | } | ||
38 | #endif | ||
39 | |||
40 | static struct irq_domain *get_ih_from_node(struct device_node *controller) | ||
41 | { | ||
42 | struct irq_domain *ih, *found = NULL; | ||
43 | unsigned long flags; | ||
44 | |||
45 | raw_spin_lock_irqsave(&big_irq_lock, flags); | ||
46 | list_for_each_entry(ih, &irq_domains, l) { | ||
47 | if (ih->controller == controller) { | ||
48 | found = ih; | ||
49 | break; | ||
50 | } | ||
51 | } | ||
52 | raw_spin_unlock_irqrestore(&big_irq_lock, flags); | ||
53 | return found; | ||
54 | } | ||
55 | |||
56 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
57 | const u32 *intspec, unsigned int intsize) | ||
58 | { | ||
59 | struct irq_domain *ih; | ||
60 | u32 virq, type; | ||
61 | int ret; | ||
62 | |||
63 | ih = get_ih_from_node(controller); | ||
64 | if (!ih) | ||
65 | return 0; | ||
66 | ret = ih->xlate(ih, intspec, intsize, &virq, &type); | ||
67 | if (ret) | ||
68 | return ret; | ||
69 | if (type == IRQ_TYPE_NONE) | ||
70 | return virq; | ||
71 | /* set the mask if it is different from current */ | ||
72 | if (type == (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) | ||
73 | set_irq_type(virq, type); | ||
74 | return virq; | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
77 | |||
78 | unsigned long pci_address_to_pio(phys_addr_t address) | ||
79 | { | ||
80 | /* | ||
81 | * The ioport address can be directly used by inX / outX | ||
82 | */ | ||
83 | BUG_ON(address >= (1 << 16)); | ||
84 | return (unsigned long)address; | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(pci_address_to_pio); | ||
87 | |||
88 | void __init early_init_dt_scan_chosen_arch(unsigned long node) | ||
89 | { | ||
90 | BUG(); | ||
91 | } | ||
92 | |||
93 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | ||
94 | { | ||
95 | BUG(); | ||
96 | } | ||
97 | |||
98 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | ||
99 | { | ||
100 | return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); | ||
101 | } | ||
102 | |||
103 | void __init add_dtb(u64 data) | ||
104 | { | ||
105 | initial_dtb = data + offsetof(struct setup_data, data); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * CE4100 ids. Will be moved to machine_device_initcall() once we have it. | ||
110 | */ | ||
111 | static struct of_device_id __initdata ce4100_ids[] = { | ||
112 | { .compatible = "intel,ce4100-cp", }, | ||
113 | { .compatible = "isa", }, | ||
114 | { .compatible = "pci", }, | ||
115 | {}, | ||
116 | }; | ||
117 | |||
118 | static int __init add_bus_probe(void) | ||
119 | { | ||
120 | if (!of_have_populated_dt()) | ||
121 | return 0; | ||
122 | |||
123 | return of_platform_bus_probe(NULL, ce4100_ids, NULL); | ||
124 | } | ||
125 | module_init(add_bus_probe); | ||
126 | |||
127 | #ifdef CONFIG_PCI | ||
128 | static int x86_of_pci_irq_enable(struct pci_dev *dev) | ||
129 | { | ||
130 | struct of_irq oirq; | ||
131 | u32 virq; | ||
132 | int ret; | ||
133 | u8 pin; | ||
134 | |||
135 | ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | ||
136 | if (ret) | ||
137 | return ret; | ||
138 | if (!pin) | ||
139 | return 0; | ||
140 | |||
141 | ret = of_irq_map_pci(dev, &oirq); | ||
142 | if (ret) | ||
143 | return ret; | ||
144 | |||
145 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
146 | oirq.size); | ||
147 | if (virq == 0) | ||
148 | return -EINVAL; | ||
149 | dev->irq = virq; | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static void x86_of_pci_irq_disable(struct pci_dev *dev) | ||
154 | { | ||
155 | } | ||
156 | |||
157 | void __cpuinit x86_of_pci_init(void) | ||
158 | { | ||
159 | struct device_node *np; | ||
160 | |||
161 | pcibios_enable_irq = x86_of_pci_irq_enable; | ||
162 | pcibios_disable_irq = x86_of_pci_irq_disable; | ||
163 | |||
164 | for_each_node_by_type(np, "pci") { | ||
165 | const void *prop; | ||
166 | struct pci_bus *bus; | ||
167 | unsigned int bus_min; | ||
168 | struct device_node *child; | ||
169 | |||
170 | prop = of_get_property(np, "bus-range", NULL); | ||
171 | if (!prop) | ||
172 | continue; | ||
173 | bus_min = be32_to_cpup(prop); | ||
174 | |||
175 | bus = pci_find_bus(0, bus_min); | ||
176 | if (!bus) { | ||
177 | printk(KERN_ERR "Can't find a node for bus %s.\n", | ||
178 | np->full_name); | ||
179 | continue; | ||
180 | } | ||
181 | |||
182 | if (bus->self) | ||
183 | bus->self->dev.of_node = np; | ||
184 | else | ||
185 | bus->dev.of_node = np; | ||
186 | |||
187 | for_each_child_of_node(np, child) { | ||
188 | struct pci_dev *dev; | ||
189 | u32 devfn; | ||
190 | |||
191 | prop = of_get_property(child, "reg", NULL); | ||
192 | if (!prop) | ||
193 | continue; | ||
194 | |||
195 | devfn = (be32_to_cpup(prop) >> 8) & 0xff; | ||
196 | dev = pci_get_slot(bus, devfn); | ||
197 | if (!dev) | ||
198 | continue; | ||
199 | dev->dev.of_node = child; | ||
200 | pci_dev_put(dev); | ||
201 | } | ||
202 | } | ||
203 | } | ||
204 | #endif | ||
205 | |||
206 | static void __init dtb_setup_hpet(void) | ||
207 | { | ||
208 | #ifdef CONFIG_HPET_TIMER | ||
209 | struct device_node *dn; | ||
210 | struct resource r; | ||
211 | int ret; | ||
212 | |||
213 | dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet"); | ||
214 | if (!dn) | ||
215 | return; | ||
216 | ret = of_address_to_resource(dn, 0, &r); | ||
217 | if (ret) { | ||
218 | WARN_ON(1); | ||
219 | return; | ||
220 | } | ||
221 | hpet_address = r.start; | ||
222 | #endif | ||
223 | } | ||
224 | |||
225 | static void __init dtb_lapic_setup(void) | ||
226 | { | ||
227 | #ifdef CONFIG_X86_LOCAL_APIC | ||
228 | struct device_node *dn; | ||
229 | struct resource r; | ||
230 | int ret; | ||
231 | |||
232 | dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic"); | ||
233 | if (!dn) | ||
234 | return; | ||
235 | |||
236 | ret = of_address_to_resource(dn, 0, &r); | ||
237 | if (WARN_ON(ret)) | ||
238 | return; | ||
239 | |||
240 | /* Did the boot loader setup the local APIC ? */ | ||
241 | if (!cpu_has_apic) { | ||
242 | if (apic_force_enable(r.start)) | ||
243 | return; | ||
244 | } | ||
245 | smp_found_config = 1; | ||
246 | pic_mode = 1; | ||
247 | register_lapic_address(r.start); | ||
248 | generic_processor_info(boot_cpu_physical_apicid, | ||
249 | GET_APIC_VERSION(apic_read(APIC_LVR))); | ||
250 | #endif | ||
251 | } | ||
252 | |||
253 | #ifdef CONFIG_X86_IO_APIC | ||
254 | static unsigned int ioapic_id; | ||
255 | |||
256 | static void __init dtb_add_ioapic(struct device_node *dn) | ||
257 | { | ||
258 | struct resource r; | ||
259 | int ret; | ||
260 | |||
261 | ret = of_address_to_resource(dn, 0, &r); | ||
262 | if (ret) { | ||
263 | printk(KERN_ERR "Can't obtain address from node %s.\n", | ||
264 | dn->full_name); | ||
265 | return; | ||
266 | } | ||
267 | mp_register_ioapic(++ioapic_id, r.start, gsi_top); | ||
268 | } | ||
269 | |||
270 | static void __init dtb_ioapic_setup(void) | ||
271 | { | ||
272 | struct device_node *dn; | ||
273 | |||
274 | for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic") | ||
275 | dtb_add_ioapic(dn); | ||
276 | |||
277 | if (nr_ioapics) { | ||
278 | of_ioapic = 1; | ||
279 | return; | ||
280 | } | ||
281 | printk(KERN_ERR "Error: No information about IO-APIC in OF.\n"); | ||
282 | } | ||
283 | #else | ||
284 | static void __init dtb_ioapic_setup(void) {} | ||
285 | #endif | ||
286 | |||
287 | static void __init dtb_apic_setup(void) | ||
288 | { | ||
289 | dtb_lapic_setup(); | ||
290 | dtb_ioapic_setup(); | ||
291 | } | ||
292 | |||
293 | #ifdef CONFIG_OF_FLATTREE | ||
294 | static void __init x86_flattree_get_config(void) | ||
295 | { | ||
296 | u32 size, map_len; | ||
297 | void *new_dtb; | ||
298 | |||
299 | if (!initial_dtb) | ||
300 | return; | ||
301 | |||
302 | map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), | ||
303 | (u64)sizeof(struct boot_param_header)); | ||
304 | |||
305 | initial_boot_params = early_memremap(initial_dtb, map_len); | ||
306 | size = be32_to_cpu(initial_boot_params->totalsize); | ||
307 | if (map_len < size) { | ||
308 | early_iounmap(initial_boot_params, map_len); | ||
309 | initial_boot_params = early_memremap(initial_dtb, size); | ||
310 | map_len = size; | ||
311 | } | ||
312 | |||
313 | new_dtb = alloc_bootmem(size); | ||
314 | memcpy(new_dtb, initial_boot_params, size); | ||
315 | early_iounmap(initial_boot_params, map_len); | ||
316 | |||
317 | initial_boot_params = new_dtb; | ||
318 | |||
319 | /* root level address cells */ | ||
320 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | ||
321 | |||
322 | unflatten_device_tree(); | ||
323 | } | ||
324 | #else | ||
325 | static inline void x86_flattree_get_config(void) { } | ||
326 | #endif | ||
327 | |||
328 | void __init x86_dtb_init(void) | ||
329 | { | ||
330 | x86_flattree_get_config(); | ||
331 | |||
332 | if (!of_have_populated_dt()) | ||
333 | return; | ||
334 | |||
335 | dtb_setup_hpet(); | ||
336 | dtb_apic_setup(); | ||
337 | } | ||
338 | |||
339 | #ifdef CONFIG_X86_IO_APIC | ||
340 | |||
341 | struct of_ioapic_type { | ||
342 | u32 out_type; | ||
343 | u32 trigger; | ||
344 | u32 polarity; | ||
345 | }; | ||
346 | |||
347 | static struct of_ioapic_type of_ioapic_type[] = | ||
348 | { | ||
349 | { | ||
350 | .out_type = IRQ_TYPE_EDGE_RISING, | ||
351 | .trigger = IOAPIC_EDGE, | ||
352 | .polarity = 1, | ||
353 | }, | ||
354 | { | ||
355 | .out_type = IRQ_TYPE_LEVEL_LOW, | ||
356 | .trigger = IOAPIC_LEVEL, | ||
357 | .polarity = 0, | ||
358 | }, | ||
359 | { | ||
360 | .out_type = IRQ_TYPE_LEVEL_HIGH, | ||
361 | .trigger = IOAPIC_LEVEL, | ||
362 | .polarity = 1, | ||
363 | }, | ||
364 | { | ||
365 | .out_type = IRQ_TYPE_EDGE_FALLING, | ||
366 | .trigger = IOAPIC_EDGE, | ||
367 | .polarity = 0, | ||
368 | }, | ||
369 | }; | ||
370 | |||
371 | static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, | ||
372 | u32 *out_hwirq, u32 *out_type) | ||
373 | { | ||
374 | struct io_apic_irq_attr attr; | ||
375 | struct of_ioapic_type *it; | ||
376 | u32 line, idx, type; | ||
377 | |||
378 | if (intsize < 2) | ||
379 | return -EINVAL; | ||
380 | |||
381 | line = *intspec; | ||
382 | idx = (u32) id->priv; | ||
383 | *out_hwirq = line + mp_gsi_routing[idx].gsi_base; | ||
384 | |||
385 | intspec++; | ||
386 | type = *intspec; | ||
387 | |||
388 | if (type >= ARRAY_SIZE(of_ioapic_type)) | ||
389 | return -EINVAL; | ||
390 | |||
391 | it = of_ioapic_type + type; | ||
392 | *out_type = it->out_type; | ||
393 | |||
394 | set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); | ||
395 | |||
396 | return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); | ||
397 | } | ||
398 | |||
399 | static void __init ioapic_add_ofnode(struct device_node *np) | ||
400 | { | ||
401 | struct resource r; | ||
402 | int i, ret; | ||
403 | |||
404 | ret = of_address_to_resource(np, 0, &r); | ||
405 | if (ret) { | ||
406 | printk(KERN_ERR "Failed to obtain address for %s\n", | ||
407 | np->full_name); | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | for (i = 0; i < nr_ioapics; i++) { | ||
412 | if (r.start == mp_ioapics[i].apicaddr) { | ||
413 | struct irq_domain *id; | ||
414 | |||
415 | id = kzalloc(sizeof(*id), GFP_KERNEL); | ||
416 | BUG_ON(!id); | ||
417 | id->controller = np; | ||
418 | id->xlate = ioapic_xlate; | ||
419 | id->priv = (void *)i; | ||
420 | add_interrupt_host(id); | ||
421 | return; | ||
422 | } | ||
423 | } | ||
424 | printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name); | ||
425 | } | ||
426 | |||
427 | void __init x86_add_irq_domains(void) | ||
428 | { | ||
429 | struct device_node *dp; | ||
430 | |||
431 | if (!of_have_populated_dt()) | ||
432 | return; | ||
433 | |||
434 | for_each_node_with_property(dp, "interrupt-controller") { | ||
435 | if (of_device_is_compatible(dp, "intel,ce4100-ioapic")) | ||
436 | ioapic_add_ofnode(dp); | ||
437 | } | ||
438 | } | ||
439 | #else | ||
440 | void __init x86_add_irq_domains(void) { } | ||
441 | #endif | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index df20723a6a1b..220a1c11cfde 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -320,31 +320,6 @@ void die(const char *str, struct pt_regs *regs, long err) | |||
320 | oops_end(flags, regs, sig); | 320 | oops_end(flags, regs, sig); |
321 | } | 321 | } |
322 | 322 | ||
323 | void notrace __kprobes | ||
324 | die_nmi(char *str, struct pt_regs *regs, int do_panic) | ||
325 | { | ||
326 | unsigned long flags; | ||
327 | |||
328 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
329 | return; | ||
330 | |||
331 | /* | ||
332 | * We are in trouble anyway, lets at least try | ||
333 | * to get a message out. | ||
334 | */ | ||
335 | flags = oops_begin(); | ||
336 | printk(KERN_EMERG "%s", str); | ||
337 | printk(" on CPU%d, ip %08lx, registers:\n", | ||
338 | smp_processor_id(), regs->ip); | ||
339 | show_registers(regs); | ||
340 | oops_end(flags, regs, 0); | ||
341 | if (do_panic || panic_on_oops) | ||
342 | panic("Non maskable interrupt"); | ||
343 | nmi_exit(); | ||
344 | local_irq_enable(); | ||
345 | do_exit(SIGBUS); | ||
346 | } | ||
347 | |||
348 | static int __init oops_setup(char *s) | 323 | static int __init oops_setup(char *s) |
349 | { | 324 | { |
350 | if (!s) | 325 | if (!s) |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 294f26da0c0c..cdf5bfd9d4d5 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -667,21 +667,15 @@ __init void e820_setup_gap(void) | |||
667 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of | 667 | * boot_params.e820_map, others are passed via SETUP_E820_EXT node of |
668 | * linked list of struct setup_data, which is parsed here. | 668 | * linked list of struct setup_data, which is parsed here. |
669 | */ | 669 | */ |
670 | void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) | 670 | void __init parse_e820_ext(struct setup_data *sdata) |
671 | { | 671 | { |
672 | u32 map_len; | ||
673 | int entries; | 672 | int entries; |
674 | struct e820entry *extmap; | 673 | struct e820entry *extmap; |
675 | 674 | ||
676 | entries = sdata->len / sizeof(struct e820entry); | 675 | entries = sdata->len / sizeof(struct e820entry); |
677 | map_len = sdata->len + sizeof(struct setup_data); | ||
678 | if (map_len > PAGE_SIZE) | ||
679 | sdata = early_ioremap(pa_data, map_len); | ||
680 | extmap = (struct e820entry *)(sdata->data); | 676 | extmap = (struct e820entry *)(sdata->data); |
681 | __append_e820_map(extmap, entries); | 677 | __append_e820_map(extmap, entries); |
682 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 678 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
683 | if (map_len > PAGE_SIZE) | ||
684 | early_iounmap(sdata, map_len); | ||
685 | printk(KERN_INFO "extended physical RAM map:\n"); | 679 | printk(KERN_INFO "extended physical RAM map:\n"); |
686 | e820_print_map("extended"); | 680 | e820_print_map("extended"); |
687 | } | 681 | } |
@@ -847,15 +841,21 @@ static int __init parse_memopt(char *p) | |||
847 | if (!p) | 841 | if (!p) |
848 | return -EINVAL; | 842 | return -EINVAL; |
849 | 843 | ||
850 | #ifdef CONFIG_X86_32 | ||
851 | if (!strcmp(p, "nopentium")) { | 844 | if (!strcmp(p, "nopentium")) { |
845 | #ifdef CONFIG_X86_32 | ||
852 | setup_clear_cpu_cap(X86_FEATURE_PSE); | 846 | setup_clear_cpu_cap(X86_FEATURE_PSE); |
853 | return 0; | 847 | return 0; |
854 | } | 848 | #else |
849 | printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); | ||
850 | return -EINVAL; | ||
855 | #endif | 851 | #endif |
852 | } | ||
856 | 853 | ||
857 | userdef = 1; | 854 | userdef = 1; |
858 | mem_size = memparse(p, &p); | 855 | mem_size = memparse(p, &p); |
856 | /* don't remove all of memory when handling "mem={invalid}" param */ | ||
857 | if (mem_size == 0) | ||
858 | return -EINVAL; | ||
859 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); | 859 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); |
860 | 860 | ||
861 | return 0; | 861 | return 0; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9efbdcc56425..3755ef494390 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
159 | if (rev >= 0x40) | 159 | if (rev >= 0x40) |
160 | acpi_fix_pin2_polarity = 1; | 160 | acpi_fix_pin2_polarity = 1; |
161 | 161 | ||
162 | if (rev > 0x13) | 162 | /* |
163 | * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... | ||
164 | * SB700: revisions 0x39, 0x3a, ... | ||
165 | * SB800: revisions 0x40, 0x41, ... | ||
166 | */ | ||
167 | if (rev >= 0x39) | ||
163 | return; | 168 | return; |
164 | 169 | ||
165 | if (acpi_use_timer_override) | 170 | if (acpi_use_timer_override) |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c8b4efad7ebb..5c1a91974918 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -65,6 +65,8 @@ | |||
65 | #define sysexit_audit syscall_exit_work | 65 | #define sysexit_audit syscall_exit_work |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | .section .entry.text, "ax" | ||
69 | |||
68 | /* | 70 | /* |
69 | * We use macros for low-level operations which need to be overridden | 71 | * We use macros for low-level operations which need to be overridden |
70 | * for paravirtualization. The following will never clobber any registers: | 72 | * for paravirtualization. The following will never clobber any registers: |
@@ -395,7 +397,7 @@ sysenter_past_esp: | |||
395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 397 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 398 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
397 | */ | 399 | */ |
398 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) | 400 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) |
399 | CFI_REL_OFFSET eip, 0 | 401 | CFI_REL_OFFSET eip, 0 |
400 | 402 | ||
401 | pushl_cfi %eax | 403 | pushl_cfi %eax |
@@ -788,7 +790,7 @@ ENDPROC(ptregs_clone) | |||
788 | */ | 790 | */ |
789 | .section .init.rodata,"a" | 791 | .section .init.rodata,"a" |
790 | ENTRY(interrupt) | 792 | ENTRY(interrupt) |
791 | .text | 793 | .section .entry.text, "ax" |
792 | .p2align 5 | 794 | .p2align 5 |
793 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 795 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
794 | ENTRY(irq_entries_start) | 796 | ENTRY(irq_entries_start) |
@@ -807,7 +809,7 @@ vector=FIRST_EXTERNAL_VECTOR | |||
807 | .endif | 809 | .endif |
808 | .previous | 810 | .previous |
809 | .long 1b | 811 | .long 1b |
810 | .text | 812 | .section .entry.text, "ax" |
811 | vector=vector+1 | 813 | vector=vector+1 |
812 | .endif | 814 | .endif |
813 | .endr | 815 | .endr |
@@ -1409,11 +1411,10 @@ END(general_protection) | |||
1409 | #ifdef CONFIG_KVM_GUEST | 1411 | #ifdef CONFIG_KVM_GUEST |
1410 | ENTRY(async_page_fault) | 1412 | ENTRY(async_page_fault) |
1411 | RING0_EC_FRAME | 1413 | RING0_EC_FRAME |
1412 | pushl $do_async_page_fault | 1414 | pushl_cfi $do_async_page_fault |
1413 | CFI_ADJUST_CFA_OFFSET 4 | ||
1414 | jmp error_code | 1415 | jmp error_code |
1415 | CFI_ENDPROC | 1416 | CFI_ENDPROC |
1416 | END(apf_page_fault) | 1417 | END(async_page_fault) |
1417 | #endif | 1418 | #endif |
1418 | 1419 | ||
1419 | /* | 1420 | /* |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index aed1ffbeb0c9..b72b4a6466a9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -61,6 +61,8 @@ | |||
61 | #define __AUDIT_ARCH_LE 0x40000000 | 61 | #define __AUDIT_ARCH_LE 0x40000000 |
62 | 62 | ||
63 | .code64 | 63 | .code64 |
64 | .section .entry.text, "ax" | ||
65 | |||
64 | #ifdef CONFIG_FUNCTION_TRACER | 66 | #ifdef CONFIG_FUNCTION_TRACER |
65 | #ifdef CONFIG_DYNAMIC_FTRACE | 67 | #ifdef CONFIG_DYNAMIC_FTRACE |
66 | ENTRY(mcount) | 68 | ENTRY(mcount) |
@@ -744,7 +746,7 @@ END(stub_rt_sigreturn) | |||
744 | */ | 746 | */ |
745 | .section .init.rodata,"a" | 747 | .section .init.rodata,"a" |
746 | ENTRY(interrupt) | 748 | ENTRY(interrupt) |
747 | .text | 749 | .section .entry.text |
748 | .p2align 5 | 750 | .p2align 5 |
749 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 751 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
750 | ENTRY(irq_entries_start) | 752 | ENTRY(irq_entries_start) |
@@ -763,7 +765,7 @@ vector=FIRST_EXTERNAL_VECTOR | |||
763 | .endif | 765 | .endif |
764 | .previous | 766 | .previous |
765 | .quad 1b | 767 | .quad 1b |
766 | .text | 768 | .section .entry.text |
767 | vector=vector+1 | 769 | vector=vector+1 |
768 | .endif | 770 | .endif |
769 | .endr | 771 | .endr |
@@ -975,9 +977,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \ | |||
975 | x86_platform_ipi smp_x86_platform_ipi | 977 | x86_platform_ipi smp_x86_platform_ipi |
976 | 978 | ||
977 | #ifdef CONFIG_SMP | 979 | #ifdef CONFIG_SMP |
978 | .irpc idx, "01234567" | 980 | .irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ |
981 | 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 | ||
982 | .if NUM_INVALIDATE_TLB_VECTORS > \idx | ||
979 | apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ | 983 | apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \ |
980 | invalidate_interrupt\idx smp_invalidate_interrupt | 984 | invalidate_interrupt\idx smp_invalidate_interrupt |
985 | .endif | ||
981 | .endr | 986 | .endr |
982 | #endif | 987 | #endif |
983 | 988 | ||
@@ -1248,7 +1253,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1248 | decl PER_CPU_VAR(irq_count) | 1253 | decl PER_CPU_VAR(irq_count) |
1249 | jmp error_exit | 1254 | jmp error_exit |
1250 | CFI_ENDPROC | 1255 | CFI_ENDPROC |
1251 | END(do_hypervisor_callback) | 1256 | END(xen_do_hypervisor_callback) |
1252 | 1257 | ||
1253 | /* | 1258 | /* |
1254 | * Hypervisor uses this for application faults while it executes. | 1259 | * Hypervisor uses this for application faults while it executes. |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 382eb2936d4d..a93742a57468 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -437,18 +437,19 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
437 | return; | 437 | return; |
438 | } | 438 | } |
439 | 439 | ||
440 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
441 | frame_pointer) == -EBUSY) { | ||
442 | *parent = old; | ||
443 | return; | ||
444 | } | ||
445 | |||
446 | trace.func = self_addr; | 440 | trace.func = self_addr; |
441 | trace.depth = current->curr_ret_stack + 1; | ||
447 | 442 | ||
448 | /* Only trace if the calling function expects to */ | 443 | /* Only trace if the calling function expects to */ |
449 | if (!ftrace_graph_entry(&trace)) { | 444 | if (!ftrace_graph_entry(&trace)) { |
450 | current->curr_ret_stack--; | ||
451 | *parent = old; | 445 | *parent = old; |
446 | return; | ||
447 | } | ||
448 | |||
449 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
450 | frame_pointer) == -EBUSY) { | ||
451 | *parent = old; | ||
452 | return; | ||
452 | } | 453 | } |
453 | } | 454 | } |
454 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 455 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 7f138b3c3c52..d6d6bb361931 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -34,15 +34,6 @@ void __init i386_start_kernel(void) | |||
34 | { | 34 | { |
35 | memblock_init(); | 35 | memblock_init(); |
36 | 36 | ||
37 | #ifdef CONFIG_X86_TRAMPOLINE | ||
38 | /* | ||
39 | * But first pinch a few for the stack/trampoline stuff | ||
40 | * FIXME: Don't need the extra page at 4K, but need to fix | ||
41 | * trampoline before removing it. (see the GDT stuff) | ||
42 | */ | ||
43 | memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); | ||
44 | #endif | ||
45 | |||
46 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | 37 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); |
47 | 38 | ||
48 | #ifdef CONFIG_BLK_DEV_INITRD | 39 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 767d6c43de37..ce0be7cd085e 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -73,7 +73,7 @@ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT | |||
73 | */ | 73 | */ |
74 | KERNEL_PAGES = LOWMEM_PAGES | 74 | KERNEL_PAGES = LOWMEM_PAGES |
75 | 75 | ||
76 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm | 76 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE |
77 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) | 77 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) |
78 | 78 | ||
79 | /* | 79 | /* |
@@ -137,7 +137,7 @@ ENTRY(startup_32) | |||
137 | movsl | 137 | movsl |
138 | 1: | 138 | 1: |
139 | 139 | ||
140 | #ifdef CONFIG_OLPC_OPENFIRMWARE | 140 | #ifdef CONFIG_OLPC |
141 | /* save OFW's pgdir table for later use when calling into OFW */ | 141 | /* save OFW's pgdir table for later use when calling into OFW */ |
142 | movl %cr3, %eax | 142 | movl %cr3, %eax |
143 | movl %eax, pa(olpc_ofw_pgd) | 143 | movl %eax, pa(olpc_ofw_pgd) |
@@ -623,7 +623,7 @@ ENTRY(initial_code) | |||
623 | * BSS section | 623 | * BSS section |
624 | */ | 624 | */ |
625 | __PAGE_ALIGNED_BSS | 625 | __PAGE_ALIGNED_BSS |
626 | .align PAGE_SIZE_asm | 626 | .align PAGE_SIZE |
627 | #ifdef CONFIG_X86_PAE | 627 | #ifdef CONFIG_X86_PAE |
628 | initial_pg_pmd: | 628 | initial_pg_pmd: |
629 | .fill 1024*KPMDS,4,0 | 629 | .fill 1024*KPMDS,4,0 |
@@ -644,7 +644,7 @@ ENTRY(swapper_pg_dir) | |||
644 | #ifdef CONFIG_X86_PAE | 644 | #ifdef CONFIG_X86_PAE |
645 | __PAGE_ALIGNED_DATA | 645 | __PAGE_ALIGNED_DATA |
646 | /* Page-aligned for the benefit of paravirt? */ | 646 | /* Page-aligned for the benefit of paravirt? */ |
647 | .align PAGE_SIZE_asm | 647 | .align PAGE_SIZE |
648 | ENTRY(initial_page_table) | 648 | ENTRY(initial_page_table) |
649 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ | 649 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ |
650 | # if KPMDS == 3 | 650 | # if KPMDS == 3 |
@@ -662,7 +662,7 @@ ENTRY(initial_page_table) | |||
662 | # else | 662 | # else |
663 | # error "Kernel PMDs should be 1, 2 or 3" | 663 | # error "Kernel PMDs should be 1, 2 or 3" |
664 | # endif | 664 | # endif |
665 | .align PAGE_SIZE_asm /* needs to be page-sized too */ | 665 | .align PAGE_SIZE /* needs to be page-sized too */ |
666 | #endif | 666 | #endif |
667 | 667 | ||
668 | .data | 668 | .data |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 239046bd447f..e11e39478a49 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -136,10 +136,9 @@ ident_complete: | |||
136 | /* Fixup phys_base */ | 136 | /* Fixup phys_base */ |
137 | addq %rbp, phys_base(%rip) | 137 | addq %rbp, phys_base(%rip) |
138 | 138 | ||
139 | #ifdef CONFIG_X86_TRAMPOLINE | 139 | /* Fixup trampoline */ |
140 | addq %rbp, trampoline_level4_pgt + 0(%rip) | 140 | addq %rbp, trampoline_level4_pgt + 0(%rip) |
141 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) | 141 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) |
142 | #endif | ||
143 | 142 | ||
144 | /* Due to ENTRY(), sometimes the empty space gets filled with | 143 | /* Due to ENTRY(), sometimes the empty space gets filled with |
145 | * zeros. Better take a jmp than relying on empty space being | 144 | * zeros. Better take a jmp than relying on empty space being |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4ff5968f12d2..bfe8f729e086 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
503 | if (!irq) | 503 | if (!irq) |
504 | return -EINVAL; | 504 | return -EINVAL; |
505 | 505 | ||
506 | set_irq_data(irq, dev); | 506 | irq_set_handler_data(irq, dev); |
507 | 507 | ||
508 | if (hpet_setup_msi_irq(irq)) | 508 | if (hpet_setup_msi_irq(irq)) |
509 | return -EINVAL; | 509 | return -EINVAL; |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 20757cb2efa3..d9ca749c123b 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq) | |||
112 | { | 112 | { |
113 | disable_irq_nosync(irq); | 113 | disable_irq_nosync(irq); |
114 | io_apic_irqs &= ~(1<<irq); | 114 | io_apic_irqs &= ~(1<<irq); |
115 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, | 115 | irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
116 | i8259A_chip.name); | 116 | i8259A_chip.name); |
117 | enable_irq(irq); | 117 | enable_irq(irq); |
118 | } | 118 | } |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 8eec0ec59af2..8c968974253d 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -14,22 +14,9 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | #include <linux/syscalls.h> | 16 | #include <linux/syscalls.h> |
17 | #include <linux/bitmap.h> | ||
17 | #include <asm/syscalls.h> | 18 | #include <asm/syscalls.h> |
18 | 19 | ||
19 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ | ||
20 | static void set_bitmap(unsigned long *bitmap, unsigned int base, | ||
21 | unsigned int extent, int new_value) | ||
22 | { | ||
23 | unsigned int i; | ||
24 | |||
25 | for (i = base; i < base + extent; i++) { | ||
26 | if (new_value) | ||
27 | __set_bit(i, bitmap); | ||
28 | else | ||
29 | __clear_bit(i, bitmap); | ||
30 | } | ||
31 | } | ||
32 | |||
33 | /* | 20 | /* |
34 | * this changes the io permissions bitmap in the current task. | 21 | * this changes the io permissions bitmap in the current task. |
35 | */ | 22 | */ |
@@ -69,7 +56,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | |||
69 | */ | 56 | */ |
70 | tss = &per_cpu(init_tss, get_cpu()); | 57 | tss = &per_cpu(init_tss, get_cpu()); |
71 | 58 | ||
72 | set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); | 59 | if (turn_on) |
60 | bitmap_clear(t->io_bitmap_ptr, from, num); | ||
61 | else | ||
62 | bitmap_set(t->io_bitmap_ptr, from, num); | ||
73 | 63 | ||
74 | /* | 64 | /* |
75 | * Search for a (possibly new) maximum. This is simple and stupid, | 65 | * Search for a (possibly new) maximum. This is simple and stupid, |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 387b6a0c9e81..948a31eae75f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq) | |||
44 | 44 | ||
45 | #define irq_stats(x) (&per_cpu(irq_stat, x)) | 45 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
46 | /* | 46 | /* |
47 | * /proc/interrupts printing: | 47 | * /proc/interrupts printing for arch specific interrupts |
48 | */ | 48 | */ |
49 | static int show_other_interrupts(struct seq_file *p, int prec) | 49 | int arch_show_interrupts(struct seq_file *p, int prec) |
50 | { | 50 | { |
51 | int j; | 51 | int j; |
52 | 52 | ||
@@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
122 | return 0; | 122 | return 0; |
123 | } | 123 | } |
124 | 124 | ||
125 | int show_interrupts(struct seq_file *p, void *v) | ||
126 | { | ||
127 | unsigned long flags, any_count = 0; | ||
128 | int i = *(loff_t *) v, j, prec; | ||
129 | struct irqaction *action; | ||
130 | struct irq_desc *desc; | ||
131 | |||
132 | if (i > nr_irqs) | ||
133 | return 0; | ||
134 | |||
135 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
136 | j *= 10; | ||
137 | |||
138 | if (i == nr_irqs) | ||
139 | return show_other_interrupts(p, prec); | ||
140 | |||
141 | /* print header */ | ||
142 | if (i == 0) { | ||
143 | seq_printf(p, "%*s", prec + 8, ""); | ||
144 | for_each_online_cpu(j) | ||
145 | seq_printf(p, "CPU%-8d", j); | ||
146 | seq_putc(p, '\n'); | ||
147 | } | ||
148 | |||
149 | desc = irq_to_desc(i); | ||
150 | if (!desc) | ||
151 | return 0; | ||
152 | |||
153 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
154 | for_each_online_cpu(j) | ||
155 | any_count |= kstat_irqs_cpu(i, j); | ||
156 | action = desc->action; | ||
157 | if (!action && !any_count) | ||
158 | goto out; | ||
159 | |||
160 | seq_printf(p, "%*d: ", prec, i); | ||
161 | for_each_online_cpu(j) | ||
162 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
163 | seq_printf(p, " %8s", desc->irq_data.chip->name); | ||
164 | seq_printf(p, "-%-8s", desc->name); | ||
165 | |||
166 | if (action) { | ||
167 | seq_printf(p, " %s", action->name); | ||
168 | while ((action = action->next) != NULL) | ||
169 | seq_printf(p, ", %s", action->name); | ||
170 | } | ||
171 | |||
172 | seq_putc(p, '\n'); | ||
173 | out: | ||
174 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* | 125 | /* |
179 | * /proc/stat helpers | 126 | * /proc/stat helpers |
180 | */ | 127 | */ |
@@ -276,15 +223,6 @@ void smp_x86_platform_ipi(struct pt_regs *regs) | |||
276 | 223 | ||
277 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 224 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
278 | 225 | ||
279 | #ifdef CONFIG_OF | ||
280 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
281 | const u32 *intspec, unsigned int intsize) | ||
282 | { | ||
283 | return intspec[0]; | ||
284 | } | ||
285 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
286 | #endif | ||
287 | |||
288 | #ifdef CONFIG_HOTPLUG_CPU | 226 | #ifdef CONFIG_HOTPLUG_CPU |
289 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | 227 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
290 | void fixup_irqs(void) | 228 | void fixup_irqs(void) |
@@ -293,6 +231,7 @@ void fixup_irqs(void) | |||
293 | static int warned; | 231 | static int warned; |
294 | struct irq_desc *desc; | 232 | struct irq_desc *desc; |
295 | struct irq_data *data; | 233 | struct irq_data *data; |
234 | struct irq_chip *chip; | ||
296 | 235 | ||
297 | for_each_irq_desc(irq, desc) { | 236 | for_each_irq_desc(irq, desc) { |
298 | int break_affinity = 0; | 237 | int break_affinity = 0; |
@@ -307,10 +246,10 @@ void fixup_irqs(void) | |||
307 | /* interrupt's are disabled at this point */ | 246 | /* interrupt's are disabled at this point */ |
308 | raw_spin_lock(&desc->lock); | 247 | raw_spin_lock(&desc->lock); |
309 | 248 | ||
310 | data = &desc->irq_data; | 249 | data = irq_desc_get_irq_data(desc); |
311 | affinity = data->affinity; | 250 | affinity = data->affinity; |
312 | if (!irq_has_action(irq) || | 251 | if (!irq_has_action(irq) || |
313 | cpumask_equal(affinity, cpu_online_mask)) { | 252 | cpumask_subset(affinity, cpu_online_mask)) { |
314 | raw_spin_unlock(&desc->lock); | 253 | raw_spin_unlock(&desc->lock); |
315 | continue; | 254 | continue; |
316 | } | 255 | } |
@@ -327,16 +266,17 @@ void fixup_irqs(void) | |||
327 | affinity = cpu_all_mask; | 266 | affinity = cpu_all_mask; |
328 | } | 267 | } |
329 | 268 | ||
330 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) | 269 | chip = irq_data_get_irq_chip(data); |
331 | data->chip->irq_mask(data); | 270 | if (!irqd_can_move_in_process_context(data) && chip->irq_mask) |
271 | chip->irq_mask(data); | ||
332 | 272 | ||
333 | if (data->chip->irq_set_affinity) | 273 | if (chip->irq_set_affinity) |
334 | data->chip->irq_set_affinity(data, affinity, true); | 274 | chip->irq_set_affinity(data, affinity, true); |
335 | else if (!(warned++)) | 275 | else if (!(warned++)) |
336 | set_affinity = 0; | 276 | set_affinity = 0; |
337 | 277 | ||
338 | if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) | 278 | if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) |
339 | data->chip->irq_unmask(data); | 279 | chip->irq_unmask(data); |
340 | 280 | ||
341 | raw_spin_unlock(&desc->lock); | 281 | raw_spin_unlock(&desc->lock); |
342 | 282 | ||
@@ -368,10 +308,11 @@ void fixup_irqs(void) | |||
368 | irq = __this_cpu_read(vector_irq[vector]); | 308 | irq = __this_cpu_read(vector_irq[vector]); |
369 | 309 | ||
370 | desc = irq_to_desc(irq); | 310 | desc = irq_to_desc(irq); |
371 | data = &desc->irq_data; | 311 | data = irq_desc_get_irq_data(desc); |
312 | chip = irq_data_get_irq_chip(data); | ||
372 | raw_spin_lock(&desc->lock); | 313 | raw_spin_lock(&desc->lock); |
373 | if (data->chip->irq_retrigger) | 314 | if (chip->irq_retrigger) |
374 | data->chip->irq_retrigger(data); | 315 | chip->irq_retrigger(data); |
375 | raw_spin_unlock(&desc->lock); | 316 | raw_spin_unlock(&desc->lock); |
376 | } | 317 | } |
377 | } | 318 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index c752e973958d..f470e4ef993e 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
26 | #include <asm/i8259.h> | 26 | #include <asm/i8259.h> |
27 | #include <asm/traps.h> | 27 | #include <asm/traps.h> |
28 | #include <asm/prom.h> | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: | 31 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: |
@@ -71,6 +72,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) | |||
71 | static struct irqaction fpu_irq = { | 72 | static struct irqaction fpu_irq = { |
72 | .handler = math_error_irq, | 73 | .handler = math_error_irq, |
73 | .name = "fpu", | 74 | .name = "fpu", |
75 | .flags = IRQF_NO_THREAD, | ||
74 | }; | 76 | }; |
75 | #endif | 77 | #endif |
76 | 78 | ||
@@ -80,6 +82,7 @@ static struct irqaction fpu_irq = { | |||
80 | static struct irqaction irq2 = { | 82 | static struct irqaction irq2 = { |
81 | .handler = no_action, | 83 | .handler = no_action, |
82 | .name = "cascade", | 84 | .name = "cascade", |
85 | .flags = IRQF_NO_THREAD, | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 88 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
@@ -110,7 +113,7 @@ void __init init_ISA_irqs(void) | |||
110 | legacy_pic->init(0); | 113 | legacy_pic->init(0); |
111 | 114 | ||
112 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) | 115 | for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) |
113 | set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); | 116 | irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); |
114 | } | 117 | } |
115 | 118 | ||
116 | void __init init_IRQ(void) | 119 | void __init init_IRQ(void) |
@@ -118,6 +121,12 @@ void __init init_IRQ(void) | |||
118 | int i; | 121 | int i; |
119 | 122 | ||
120 | /* | 123 | /* |
124 | * We probably need a better place for this, but it works for | ||
125 | * now ... | ||
126 | */ | ||
127 | x86_add_irq_domains(); | ||
128 | |||
129 | /* | ||
121 | * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. | 130 | * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. |
122 | * If these IRQ's are handled by legacy interrupt-controllers like PIC, | 131 | * If these IRQ's are handled by legacy interrupt-controllers like PIC, |
123 | * then this configuration will likely be static after the boot. If | 132 | * then this configuration will likely be static after the boot. If |
@@ -164,14 +173,77 @@ static void __init smp_intr_init(void) | |||
164 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | 173 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); |
165 | 174 | ||
166 | /* IPIs for invalidation */ | 175 | /* IPIs for invalidation */ |
167 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); | 176 | #define ALLOC_INVTLB_VEC(NR) \ |
168 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); | 177 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \ |
169 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); | 178 | invalidate_interrupt##NR) |
170 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); | 179 | |
171 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); | 180 | switch (NUM_INVALIDATE_TLB_VECTORS) { |
172 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); | 181 | default: |
173 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); | 182 | ALLOC_INVTLB_VEC(31); |
174 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); | 183 | case 31: |
184 | ALLOC_INVTLB_VEC(30); | ||
185 | case 30: | ||
186 | ALLOC_INVTLB_VEC(29); | ||
187 | case 29: | ||
188 | ALLOC_INVTLB_VEC(28); | ||
189 | case 28: | ||
190 | ALLOC_INVTLB_VEC(27); | ||
191 | case 27: | ||
192 | ALLOC_INVTLB_VEC(26); | ||
193 | case 26: | ||
194 | ALLOC_INVTLB_VEC(25); | ||
195 | case 25: | ||
196 | ALLOC_INVTLB_VEC(24); | ||
197 | case 24: | ||
198 | ALLOC_INVTLB_VEC(23); | ||
199 | case 23: | ||
200 | ALLOC_INVTLB_VEC(22); | ||
201 | case 22: | ||
202 | ALLOC_INVTLB_VEC(21); | ||
203 | case 21: | ||
204 | ALLOC_INVTLB_VEC(20); | ||
205 | case 20: | ||
206 | ALLOC_INVTLB_VEC(19); | ||
207 | case 19: | ||
208 | ALLOC_INVTLB_VEC(18); | ||
209 | case 18: | ||
210 | ALLOC_INVTLB_VEC(17); | ||
211 | case 17: | ||
212 | ALLOC_INVTLB_VEC(16); | ||
213 | case 16: | ||
214 | ALLOC_INVTLB_VEC(15); | ||
215 | case 15: | ||
216 | ALLOC_INVTLB_VEC(14); | ||
217 | case 14: | ||
218 | ALLOC_INVTLB_VEC(13); | ||
219 | case 13: | ||
220 | ALLOC_INVTLB_VEC(12); | ||
221 | case 12: | ||
222 | ALLOC_INVTLB_VEC(11); | ||
223 | case 11: | ||
224 | ALLOC_INVTLB_VEC(10); | ||
225 | case 10: | ||
226 | ALLOC_INVTLB_VEC(9); | ||
227 | case 9: | ||
228 | ALLOC_INVTLB_VEC(8); | ||
229 | case 8: | ||
230 | ALLOC_INVTLB_VEC(7); | ||
231 | case 7: | ||
232 | ALLOC_INVTLB_VEC(6); | ||
233 | case 6: | ||
234 | ALLOC_INVTLB_VEC(5); | ||
235 | case 5: | ||
236 | ALLOC_INVTLB_VEC(4); | ||
237 | case 4: | ||
238 | ALLOC_INVTLB_VEC(3); | ||
239 | case 3: | ||
240 | ALLOC_INVTLB_VEC(2); | ||
241 | case 2: | ||
242 | ALLOC_INVTLB_VEC(1); | ||
243 | case 1: | ||
244 | ALLOC_INVTLB_VEC(0); | ||
245 | break; | ||
246 | } | ||
175 | 247 | ||
176 | /* IPI for generic function call */ | 248 | /* IPI for generic function call */ |
177 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 249 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
@@ -243,7 +315,7 @@ void __init native_init_IRQ(void) | |||
243 | set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); | 315 | set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); |
244 | } | 316 | } |
245 | 317 | ||
246 | if (!acpi_ioapic) | 318 | if (!acpi_ioapic && !of_ioapic) |
247 | setup_irq(2, &irq2); | 319 | setup_irq(2, &irq2); |
248 | 320 | ||
249 | #ifdef CONFIG_X86_32 | 321 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index a4130005028a..7c64c420a9f6 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -533,15 +533,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
533 | } | 533 | } |
534 | return NOTIFY_DONE; | 534 | return NOTIFY_DONE; |
535 | 535 | ||
536 | case DIE_NMIWATCHDOG: | ||
537 | if (atomic_read(&kgdb_active) != -1) { | ||
538 | /* KGDB CPU roundup: */ | ||
539 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
540 | return NOTIFY_STOP; | ||
541 | } | ||
542 | /* Enter debugger: */ | ||
543 | break; | ||
544 | |||
545 | case DIE_DEBUG: | 536 | case DIE_DEBUG: |
546 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | 537 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
547 | if (user_mode(regs)) | 538 | if (user_mode(regs)) |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index d91c477b3f62..c969fd9d1566 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -1276,6 +1276,14 @@ static int __kprobes can_optimize(unsigned long paddr) | |||
1276 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) | 1276 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) |
1277 | return 0; | 1277 | return 0; |
1278 | 1278 | ||
1279 | /* | ||
1280 | * Do not optimize in the entry code due to the unstable | ||
1281 | * stack handling. | ||
1282 | */ | ||
1283 | if ((paddr >= (unsigned long )__entry_text_start) && | ||
1284 | (paddr < (unsigned long )__entry_text_end)) | ||
1285 | return 0; | ||
1286 | |||
1279 | /* Check there is enough space for a relative jump. */ | 1287 | /* Check there is enough space for a relative jump. */ |
1280 | if (size - offset < RELATIVEJUMP_SIZE) | 1288 | if (size - offset < RELATIVEJUMP_SIZE) |
1281 | return 0; | 1289 | return 0; |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 0fe6d1a66c38..c5610384ab16 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -66,7 +66,6 @@ struct microcode_amd { | |||
66 | unsigned int mpb[0]; | 66 | unsigned int mpb[0]; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #define UCODE_MAX_SIZE 2048 | ||
70 | #define UCODE_CONTAINER_SECTION_HDR 8 | 69 | #define UCODE_CONTAINER_SECTION_HDR 8 |
71 | #define UCODE_CONTAINER_HEADER_SIZE 12 | 70 | #define UCODE_CONTAINER_HEADER_SIZE 12 |
72 | 71 | ||
@@ -77,20 +76,20 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
77 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 76 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
78 | u32 dummy; | 77 | u32 dummy; |
79 | 78 | ||
80 | memset(csig, 0, sizeof(*csig)); | ||
81 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | 79 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { |
82 | pr_warning("microcode: CPU%d: AMD CPU family 0x%x not " | 80 | pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); |
83 | "supported\n", cpu, c->x86); | ||
84 | return -1; | 81 | return -1; |
85 | } | 82 | } |
83 | |||
86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); | 84 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); |
87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); | 85 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
86 | |||
88 | return 0; | 87 | return 0; |
89 | } | 88 | } |
90 | 89 | ||
91 | static int get_matching_microcode(int cpu, void *mc, int rev) | 90 | static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr, |
91 | int rev) | ||
92 | { | 92 | { |
93 | struct microcode_header_amd *mc_header = mc; | ||
94 | unsigned int current_cpu_id; | 93 | unsigned int current_cpu_id; |
95 | u16 equiv_cpu_id = 0; | 94 | u16 equiv_cpu_id = 0; |
96 | unsigned int i = 0; | 95 | unsigned int i = 0; |
@@ -109,17 +108,17 @@ static int get_matching_microcode(int cpu, void *mc, int rev) | |||
109 | if (!equiv_cpu_id) | 108 | if (!equiv_cpu_id) |
110 | return 0; | 109 | return 0; |
111 | 110 | ||
112 | if (mc_header->processor_rev_id != equiv_cpu_id) | 111 | if (mc_hdr->processor_rev_id != equiv_cpu_id) |
113 | return 0; | 112 | return 0; |
114 | 113 | ||
115 | /* ucode might be chipset specific -- currently we don't support this */ | 114 | /* ucode might be chipset specific -- currently we don't support this */ |
116 | if (mc_header->nb_dev_id || mc_header->sb_dev_id) { | 115 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { |
117 | pr_err("CPU%d: loading of chipset specific code not yet supported\n", | 116 | pr_err("CPU%d: chipset specific code not yet supported\n", |
118 | cpu); | 117 | cpu); |
119 | return 0; | 118 | return 0; |
120 | } | 119 | } |
121 | 120 | ||
122 | if (mc_header->patch_id <= rev) | 121 | if (mc_hdr->patch_id <= rev) |
123 | return 0; | 122 | return 0; |
124 | 123 | ||
125 | return 1; | 124 | return 1; |
@@ -144,71 +143,93 @@ static int apply_microcode_amd(int cpu) | |||
144 | 143 | ||
145 | /* check current patch id and patch's id for match */ | 144 | /* check current patch id and patch's id for match */ |
146 | if (rev != mc_amd->hdr.patch_id) { | 145 | if (rev != mc_amd->hdr.patch_id) { |
147 | pr_err("CPU%d: update failed (for patch_level=0x%x)\n", | 146 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
148 | cpu, mc_amd->hdr.patch_id); | 147 | cpu, mc_amd->hdr.patch_id); |
149 | return -1; | 148 | return -1; |
150 | } | 149 | } |
151 | 150 | ||
152 | pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); | 151 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); |
153 | uci->cpu_sig.rev = rev; | 152 | uci->cpu_sig.rev = rev; |
154 | 153 | ||
155 | return 0; | 154 | return 0; |
156 | } | 155 | } |
157 | 156 | ||
158 | static void * | 157 | static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) |
159 | get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) | ||
160 | { | 158 | { |
161 | unsigned int total_size; | 159 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
162 | u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; | 160 | unsigned int max_size, actual_size; |
163 | void *mc; | 161 | |
162 | #define F1XH_MPB_MAX_SIZE 2048 | ||
163 | #define F14H_MPB_MAX_SIZE 1824 | ||
164 | #define F15H_MPB_MAX_SIZE 4096 | ||
165 | |||
166 | switch (c->x86) { | ||
167 | case 0x14: | ||
168 | max_size = F14H_MPB_MAX_SIZE; | ||
169 | break; | ||
170 | case 0x15: | ||
171 | max_size = F15H_MPB_MAX_SIZE; | ||
172 | break; | ||
173 | default: | ||
174 | max_size = F1XH_MPB_MAX_SIZE; | ||
175 | break; | ||
176 | } | ||
164 | 177 | ||
165 | get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR); | 178 | actual_size = buf[4] + (buf[5] << 8); |
166 | 179 | ||
167 | if (section_hdr[0] != UCODE_UCODE_TYPE) { | 180 | if (actual_size > size || actual_size > max_size) { |
168 | pr_err("error: invalid type field in container file section header\n"); | 181 | pr_err("section size mismatch\n"); |
169 | return NULL; | 182 | return 0; |
170 | } | 183 | } |
171 | 184 | ||
172 | total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); | 185 | return actual_size; |
186 | } | ||
173 | 187 | ||
174 | if (total_size > size || total_size > UCODE_MAX_SIZE) { | 188 | static struct microcode_header_amd * |
175 | pr_err("error: size mismatch\n"); | 189 | get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size) |
176 | return NULL; | 190 | { |
191 | struct microcode_header_amd *mc = NULL; | ||
192 | unsigned int actual_size = 0; | ||
193 | |||
194 | if (buf[0] != UCODE_UCODE_TYPE) { | ||
195 | pr_err("invalid type field in container file section header\n"); | ||
196 | goto out; | ||
177 | } | 197 | } |
178 | 198 | ||
179 | mc = vzalloc(UCODE_MAX_SIZE); | 199 | actual_size = verify_ucode_size(cpu, buf, size); |
200 | if (!actual_size) | ||
201 | goto out; | ||
202 | |||
203 | mc = vzalloc(actual_size); | ||
180 | if (!mc) | 204 | if (!mc) |
181 | return NULL; | 205 | goto out; |
182 | 206 | ||
183 | get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size); | 207 | get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size); |
184 | *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR; | 208 | *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR; |
185 | 209 | ||
210 | out: | ||
186 | return mc; | 211 | return mc; |
187 | } | 212 | } |
188 | 213 | ||
189 | static int install_equiv_cpu_table(const u8 *buf) | 214 | static int install_equiv_cpu_table(const u8 *buf) |
190 | { | 215 | { |
191 | u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; | 216 | unsigned int *ibuf = (unsigned int *)buf; |
192 | unsigned int *buf_pos = (unsigned int *)container_hdr; | 217 | unsigned int type = ibuf[1]; |
193 | unsigned long size; | 218 | unsigned int size = ibuf[2]; |
194 | 219 | ||
195 | get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE); | 220 | if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
196 | 221 | pr_err("empty section/" | |
197 | size = buf_pos[2]; | 222 | "invalid type field in container file section header\n"); |
198 | 223 | return -EINVAL; | |
199 | if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { | ||
200 | pr_err("error: invalid type field in container file section header\n"); | ||
201 | return 0; | ||
202 | } | 224 | } |
203 | 225 | ||
204 | equiv_cpu_table = vmalloc(size); | 226 | equiv_cpu_table = vmalloc(size); |
205 | if (!equiv_cpu_table) { | 227 | if (!equiv_cpu_table) { |
206 | pr_err("failed to allocate equivalent CPU table\n"); | 228 | pr_err("failed to allocate equivalent CPU table\n"); |
207 | return 0; | 229 | return -ENOMEM; |
208 | } | 230 | } |
209 | 231 | ||
210 | buf += UCODE_CONTAINER_HEADER_SIZE; | 232 | get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size); |
211 | get_ucode_data(equiv_cpu_table, buf, size); | ||
212 | 233 | ||
213 | return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ | 234 | return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */ |
214 | } | 235 | } |
@@ -223,16 +244,16 @@ static enum ucode_state | |||
223 | generic_load_microcode(int cpu, const u8 *data, size_t size) | 244 | generic_load_microcode(int cpu, const u8 *data, size_t size) |
224 | { | 245 | { |
225 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 246 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
247 | struct microcode_header_amd *mc_hdr = NULL; | ||
248 | unsigned int mc_size, leftover; | ||
249 | int offset; | ||
226 | const u8 *ucode_ptr = data; | 250 | const u8 *ucode_ptr = data; |
227 | void *new_mc = NULL; | 251 | void *new_mc = NULL; |
228 | void *mc; | 252 | unsigned int new_rev = uci->cpu_sig.rev; |
229 | int new_rev = uci->cpu_sig.rev; | ||
230 | unsigned int leftover; | ||
231 | unsigned long offset; | ||
232 | enum ucode_state state = UCODE_OK; | 253 | enum ucode_state state = UCODE_OK; |
233 | 254 | ||
234 | offset = install_equiv_cpu_table(ucode_ptr); | 255 | offset = install_equiv_cpu_table(ucode_ptr); |
235 | if (!offset) { | 256 | if (offset < 0) { |
236 | pr_err("failed to create equivalent cpu table\n"); | 257 | pr_err("failed to create equivalent cpu table\n"); |
237 | return UCODE_ERROR; | 258 | return UCODE_ERROR; |
238 | } | 259 | } |
@@ -241,64 +262,65 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
241 | leftover = size - offset; | 262 | leftover = size - offset; |
242 | 263 | ||
243 | while (leftover) { | 264 | while (leftover) { |
244 | unsigned int uninitialized_var(mc_size); | 265 | mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size); |
245 | struct microcode_header_amd *mc_header; | 266 | if (!mc_hdr) |
246 | |||
247 | mc = get_next_ucode(ucode_ptr, leftover, &mc_size); | ||
248 | if (!mc) | ||
249 | break; | 267 | break; |
250 | 268 | ||
251 | mc_header = (struct microcode_header_amd *)mc; | 269 | if (get_matching_microcode(cpu, mc_hdr, new_rev)) { |
252 | if (get_matching_microcode(cpu, mc, new_rev)) { | ||
253 | vfree(new_mc); | 270 | vfree(new_mc); |
254 | new_rev = mc_header->patch_id; | 271 | new_rev = mc_hdr->patch_id; |
255 | new_mc = mc; | 272 | new_mc = mc_hdr; |
256 | } else | 273 | } else |
257 | vfree(mc); | 274 | vfree(mc_hdr); |
258 | 275 | ||
259 | ucode_ptr += mc_size; | 276 | ucode_ptr += mc_size; |
260 | leftover -= mc_size; | 277 | leftover -= mc_size; |
261 | } | 278 | } |
262 | 279 | ||
263 | if (new_mc) { | 280 | if (!new_mc) { |
264 | if (!leftover) { | ||
265 | vfree(uci->mc); | ||
266 | uci->mc = new_mc; | ||
267 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", | ||
268 | cpu, new_rev, uci->cpu_sig.rev); | ||
269 | } else { | ||
270 | vfree(new_mc); | ||
271 | state = UCODE_ERROR; | ||
272 | } | ||
273 | } else | ||
274 | state = UCODE_NFOUND; | 281 | state = UCODE_NFOUND; |
282 | goto free_table; | ||
283 | } | ||
275 | 284 | ||
285 | if (!leftover) { | ||
286 | vfree(uci->mc); | ||
287 | uci->mc = new_mc; | ||
288 | pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n", | ||
289 | cpu, uci->cpu_sig.rev, new_rev); | ||
290 | } else { | ||
291 | vfree(new_mc); | ||
292 | state = UCODE_ERROR; | ||
293 | } | ||
294 | |||
295 | free_table: | ||
276 | free_equiv_cpu_table(); | 296 | free_equiv_cpu_table(); |
277 | 297 | ||
278 | return state; | 298 | return state; |
279 | } | 299 | } |
280 | 300 | ||
281 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) | 301 | static enum ucode_state request_microcode_amd(int cpu, struct device *device) |
282 | { | 302 | { |
283 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | 303 | const char *fw_name = "amd-ucode/microcode_amd.bin"; |
284 | const struct firmware *firmware; | 304 | const struct firmware *fw; |
285 | enum ucode_state ret; | 305 | enum ucode_state ret = UCODE_NFOUND; |
286 | 306 | ||
287 | if (request_firmware(&firmware, fw_name, device)) { | 307 | if (request_firmware(&fw, fw_name, device)) { |
288 | printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); | 308 | pr_err("failed to load file %s\n", fw_name); |
289 | return UCODE_NFOUND; | 309 | goto out; |
290 | } | 310 | } |
291 | 311 | ||
292 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | 312 | ret = UCODE_ERROR; |
293 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", | 313 | if (*(u32 *)fw->data != UCODE_MAGIC) { |
294 | *(u32 *)firmware->data); | 314 | pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); |
295 | return UCODE_ERROR; | 315 | goto fw_release; |
296 | } | 316 | } |
297 | 317 | ||
298 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); | 318 | ret = generic_load_microcode(cpu, fw->data, fw->size); |
299 | 319 | ||
300 | release_firmware(firmware); | 320 | fw_release: |
321 | release_firmware(fw); | ||
301 | 322 | ||
323 | out: | ||
302 | return ret; | 324 | return ret; |
303 | } | 325 | } |
304 | 326 | ||
@@ -319,7 +341,7 @@ static void microcode_fini_cpu_amd(int cpu) | |||
319 | 341 | ||
320 | static struct microcode_ops microcode_amd_ops = { | 342 | static struct microcode_ops microcode_amd_ops = { |
321 | .request_microcode_user = request_microcode_user, | 343 | .request_microcode_user = request_microcode_user, |
322 | .request_microcode_fw = request_microcode_fw, | 344 | .request_microcode_fw = request_microcode_amd, |
323 | .collect_cpu_info = collect_cpu_info_amd, | 345 | .collect_cpu_info = collect_cpu_info_amd, |
324 | .apply_microcode = apply_microcode_amd, | 346 | .apply_microcode = apply_microcode_amd, |
325 | .microcode_fini_cpu = microcode_fini_cpu_amd, | 347 | .microcode_fini_cpu = microcode_fini_cpu_amd, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 1cca374a2bac..87af68e0e1e1 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -417,8 +417,10 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
417 | if (err) | 417 | if (err) |
418 | return err; | 418 | return err; |
419 | 419 | ||
420 | if (microcode_init_cpu(cpu) == UCODE_ERROR) | 420 | if (microcode_init_cpu(cpu) == UCODE_ERROR) { |
421 | err = -EINVAL; | 421 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
422 | return -EINVAL; | ||
423 | } | ||
422 | 424 | ||
423 | return err; | 425 | return err; |
424 | } | 426 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ff4554198981..99fa3adf0141 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -110,12 +110,9 @@ void show_regs_common(void) | |||
110 | init_utsname()->release, | 110 | init_utsname()->release, |
111 | (int)strcspn(init_utsname()->version, " "), | 111 | (int)strcspn(init_utsname()->version, " "), |
112 | init_utsname()->version); | 112 | init_utsname()->version); |
113 | printk(KERN_CONT " "); | 113 | printk(KERN_CONT " %s %s", vendor, product); |
114 | printk(KERN_CONT "%s %s", vendor, product); | 114 | if (board) |
115 | if (board) { | 115 | printk(KERN_CONT "/%s", board); |
116 | printk(KERN_CONT "/"); | ||
117 | printk(KERN_CONT "%s", board); | ||
118 | } | ||
119 | printk(KERN_CONT "\n"); | 116 | printk(KERN_CONT "\n"); |
120 | } | 117 | } |
121 | 118 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 715037caeb43..d3ce37edb54d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -303,68 +303,16 @@ static int __init reboot_init(void) | |||
303 | } | 303 | } |
304 | core_initcall(reboot_init); | 304 | core_initcall(reboot_init); |
305 | 305 | ||
306 | /* The following code and data reboots the machine by switching to real | 306 | extern const unsigned char machine_real_restart_asm[]; |
307 | mode and jumping to the BIOS reset entry point, as if the CPU has | 307 | extern const u64 machine_real_restart_gdt[3]; |
308 | really been reset. The previous version asked the keyboard | ||
309 | controller to pulse the CPU reset line, which is more thorough, but | ||
310 | doesn't work with at least one type of 486 motherboard. It is easy | ||
311 | to stop this code working; hence the copious comments. */ | ||
312 | static const unsigned long long | ||
313 | real_mode_gdt_entries [3] = | ||
314 | { | ||
315 | 0x0000000000000000ULL, /* Null descriptor */ | ||
316 | 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ | ||
317 | 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ | ||
318 | }; | ||
319 | 308 | ||
320 | static const struct desc_ptr | 309 | void machine_real_restart(unsigned int type) |
321 | real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries }, | ||
322 | real_mode_idt = { 0x3ff, 0 }; | ||
323 | |||
324 | /* This is 16-bit protected mode code to disable paging and the cache, | ||
325 | switch to real mode and jump to the BIOS reset code. | ||
326 | |||
327 | The instruction that switches to real mode by writing to CR0 must be | ||
328 | followed immediately by a far jump instruction, which set CS to a | ||
329 | valid value for real mode, and flushes the prefetch queue to avoid | ||
330 | running instructions that have already been decoded in protected | ||
331 | mode. | ||
332 | |||
333 | Clears all the flags except ET, especially PG (paging), PE | ||
334 | (protected-mode enable) and TS (task switch for coprocessor state | ||
335 | save). Flushes the TLB after paging has been disabled. Sets CD and | ||
336 | NW, to disable the cache on a 486, and invalidates the cache. This | ||
337 | is more like the state of a 486 after reset. I don't know if | ||
338 | something else should be done for other chips. | ||
339 | |||
340 | More could be done here to set up the registers as if a CPU reset had | ||
341 | occurred; hopefully real BIOSs don't assume much. */ | ||
342 | static const unsigned char real_mode_switch [] = | ||
343 | { | ||
344 | 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */ | ||
345 | 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */ | ||
346 | 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */ | ||
347 | 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */ | ||
348 | 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */ | ||
349 | 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */ | ||
350 | 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */ | ||
351 | 0x74, 0x02, /* jz f */ | ||
352 | 0x0f, 0x09, /* wbinvd */ | ||
353 | 0x24, 0x10, /* f: andb $0x10,al */ | ||
354 | 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ | ||
355 | }; | ||
356 | static const unsigned char jump_to_bios [] = | ||
357 | { | 310 | { |
358 | 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */ | 311 | void *restart_va; |
359 | }; | 312 | unsigned long restart_pa; |
313 | void (*restart_lowmem)(unsigned int); | ||
314 | u64 *lowmem_gdt; | ||
360 | 315 | ||
361 | /* | ||
362 | * Switch to real mode and then execute the code | ||
363 | * specified by the code and length parameters. | ||
364 | * We assume that length will aways be less that 100! | ||
365 | */ | ||
366 | void machine_real_restart(const unsigned char *code, int length) | ||
367 | { | ||
368 | local_irq_disable(); | 316 | local_irq_disable(); |
369 | 317 | ||
370 | /* Write zero to CMOS register number 0x0f, which the BIOS POST | 318 | /* Write zero to CMOS register number 0x0f, which the BIOS POST |
@@ -392,41 +340,23 @@ void machine_real_restart(const unsigned char *code, int length) | |||
392 | too. */ | 340 | too. */ |
393 | *((unsigned short *)0x472) = reboot_mode; | 341 | *((unsigned short *)0x472) = reboot_mode; |
394 | 342 | ||
395 | /* For the switch to real mode, copy some code to low memory. It has | 343 | /* Patch the GDT in the low memory trampoline */ |
396 | to be in the first 64k because it is running in 16-bit mode, and it | 344 | lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt); |
397 | has to have the same physical and virtual address, because it turns | 345 | |
398 | off paging. Copy it near the end of the first page, out of the way | 346 | restart_va = TRAMPOLINE_SYM(machine_real_restart_asm); |
399 | of BIOS variables. */ | 347 | restart_pa = virt_to_phys(restart_va); |
400 | memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100), | 348 | restart_lowmem = (void (*)(unsigned int))restart_pa; |
401 | real_mode_switch, sizeof (real_mode_switch)); | 349 | |
402 | memcpy((void *)(0x1000 - 100), code, length); | 350 | /* GDT[0]: GDT self-pointer */ |
403 | 351 | lowmem_gdt[0] = | |
404 | /* Set up the IDT for real mode. */ | 352 | (u64)(sizeof(machine_real_restart_gdt) - 1) + |
405 | load_idt(&real_mode_idt); | 353 | ((u64)virt_to_phys(lowmem_gdt) << 16); |
406 | 354 | /* GDT[1]: 64K real mode code segment */ | |
407 | /* Set up a GDT from which we can load segment descriptors for real | 355 | lowmem_gdt[1] = |
408 | mode. The GDT is not used in real mode; it is just needed here to | 356 | GDT_ENTRY(0x009b, restart_pa, 0xffff); |
409 | prepare the descriptors. */ | 357 | |
410 | load_gdt(&real_mode_gdt); | 358 | /* Jump to the identity-mapped low memory code */ |
411 | 359 | restart_lowmem(type); | |
412 | /* Load the data segment registers, and thus the descriptors ready for | ||
413 | real mode. The base address of each segment is 0x100, 16 times the | ||
414 | selector value being loaded here. This is so that the segment | ||
415 | registers don't have to be reloaded after switching to real mode: | ||
416 | the values are consistent for real mode operation already. */ | ||
417 | __asm__ __volatile__ ("movl $0x0010,%%eax\n" | ||
418 | "\tmovl %%eax,%%ds\n" | ||
419 | "\tmovl %%eax,%%es\n" | ||
420 | "\tmovl %%eax,%%fs\n" | ||
421 | "\tmovl %%eax,%%gs\n" | ||
422 | "\tmovl %%eax,%%ss" : : : "eax"); | ||
423 | |||
424 | /* Jump to the 16-bit code that we copied earlier. It disables paging | ||
425 | and the cache, switches to real mode, and jumps to the BIOS reset | ||
426 | entry point. */ | ||
427 | __asm__ __volatile__ ("ljmp $0x0008,%0" | ||
428 | : | ||
429 | : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100))); | ||
430 | } | 360 | } |
431 | #ifdef CONFIG_APM_MODULE | 361 | #ifdef CONFIG_APM_MODULE |
432 | EXPORT_SYMBOL(machine_real_restart); | 362 | EXPORT_SYMBOL(machine_real_restart); |
@@ -581,7 +511,7 @@ static void native_machine_emergency_restart(void) | |||
581 | 511 | ||
582 | #ifdef CONFIG_X86_32 | 512 | #ifdef CONFIG_X86_32 |
583 | case BOOT_BIOS: | 513 | case BOOT_BIOS: |
584 | machine_real_restart(jump_to_bios, sizeof(jump_to_bios)); | 514 | machine_real_restart(MRR_BIOS); |
585 | 515 | ||
586 | reboot_type = BOOT_KBD; | 516 | reboot_type = BOOT_KBD; |
587 | break; | 517 | break; |
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/kernel/reboot_32.S new file mode 100644 index 000000000000..29092b38d816 --- /dev/null +++ b/arch/x86/kernel/reboot_32.S | |||
@@ -0,0 +1,135 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <linux/init.h> | ||
3 | #include <asm/segment.h> | ||
4 | #include <asm/page_types.h> | ||
5 | |||
6 | /* | ||
7 | * The following code and data reboots the machine by switching to real | ||
8 | * mode and jumping to the BIOS reset entry point, as if the CPU has | ||
9 | * really been reset. The previous version asked the keyboard | ||
10 | * controller to pulse the CPU reset line, which is more thorough, but | ||
11 | * doesn't work with at least one type of 486 motherboard. It is easy | ||
12 | * to stop this code working; hence the copious comments. | ||
13 | * | ||
14 | * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax. | ||
15 | */ | ||
16 | .section ".x86_trampoline","a" | ||
17 | .balign 16 | ||
18 | .code32 | ||
19 | ENTRY(machine_real_restart_asm) | ||
20 | r_base = . | ||
21 | /* Get our own relocated address */ | ||
22 | call 1f | ||
23 | 1: popl %ebx | ||
24 | subl $1b, %ebx | ||
25 | |||
26 | /* Compute the equivalent real-mode segment */ | ||
27 | movl %ebx, %ecx | ||
28 | shrl $4, %ecx | ||
29 | |||
30 | /* Patch post-real-mode segment jump */ | ||
31 | movw dispatch_table(%ebx,%eax,2),%ax | ||
32 | movw %ax, 101f(%ebx) | ||
33 | movw %cx, 102f(%ebx) | ||
34 | |||
35 | /* Set up the IDT for real mode. */ | ||
36 | lidtl machine_real_restart_idt(%ebx) | ||
37 | |||
38 | /* | ||
39 | * Set up a GDT from which we can load segment descriptors for real | ||
40 | * mode. The GDT is not used in real mode; it is just needed here to | ||
41 | * prepare the descriptors. | ||
42 | */ | ||
43 | lgdtl machine_real_restart_gdt(%ebx) | ||
44 | |||
45 | /* | ||
46 | * Load the data segment registers with 16-bit compatible values | ||
47 | */ | ||
48 | movl $16, %ecx | ||
49 | movl %ecx, %ds | ||
50 | movl %ecx, %es | ||
51 | movl %ecx, %fs | ||
52 | movl %ecx, %gs | ||
53 | movl %ecx, %ss | ||
54 | ljmpl $8, $1f - r_base | ||
55 | |||
56 | /* | ||
57 | * This is 16-bit protected mode code to disable paging and the cache, | ||
58 | * switch to real mode and jump to the BIOS reset code. | ||
59 | * | ||
60 | * The instruction that switches to real mode by writing to CR0 must be | ||
61 | * followed immediately by a far jump instruction, which set CS to a | ||
62 | * valid value for real mode, and flushes the prefetch queue to avoid | ||
63 | * running instructions that have already been decoded in protected | ||
64 | * mode. | ||
65 | * | ||
66 | * Clears all the flags except ET, especially PG (paging), PE | ||
67 | * (protected-mode enable) and TS (task switch for coprocessor state | ||
68 | * save). Flushes the TLB after paging has been disabled. Sets CD and | ||
69 | * NW, to disable the cache on a 486, and invalidates the cache. This | ||
70 | * is more like the state of a 486 after reset. I don't know if | ||
71 | * something else should be done for other chips. | ||
72 | * | ||
73 | * More could be done here to set up the registers as if a CPU reset had | ||
74 | * occurred; hopefully real BIOSs don't assume much. This is not the | ||
75 | * actual BIOS entry point, anyway (that is at 0xfffffff0). | ||
76 | * | ||
77 | * Most of this work is probably excessive, but it is what is tested. | ||
78 | */ | ||
79 | .code16 | ||
80 | 1: | ||
81 | xorl %ecx, %ecx | ||
82 | movl %cr0, %eax | ||
83 | andl $0x00000011, %eax | ||
84 | orl $0x60000000, %eax | ||
85 | movl %eax, %cr0 | ||
86 | movl %ecx, %cr3 | ||
87 | movl %cr0, %edx | ||
88 | andl $0x60000000, %edx /* If no cache bits -> no wbinvd */ | ||
89 | jz 2f | ||
90 | wbinvd | ||
91 | 2: | ||
92 | andb $0x10, %al | ||
93 | movl %eax, %cr0 | ||
94 | .byte 0xea /* ljmpw */ | ||
95 | 101: .word 0 /* Offset */ | ||
96 | 102: .word 0 /* Segment */ | ||
97 | |||
98 | bios: | ||
99 | ljmpw $0xf000, $0xfff0 | ||
100 | |||
101 | apm: | ||
102 | movw $0x1000, %ax | ||
103 | movw %ax, %ss | ||
104 | movw $0xf000, %sp | ||
105 | movw $0x5307, %ax | ||
106 | movw $0x0001, %bx | ||
107 | movw $0x0003, %cx | ||
108 | int $0x15 | ||
109 | |||
110 | END(machine_real_restart_asm) | ||
111 | |||
112 | .balign 16 | ||
113 | /* These must match <asm/reboot.h */ | ||
114 | dispatch_table: | ||
115 | .word bios - r_base | ||
116 | .word apm - r_base | ||
117 | END(dispatch_table) | ||
118 | |||
119 | .balign 16 | ||
120 | machine_real_restart_idt: | ||
121 | .word 0xffff /* Length - real mode default value */ | ||
122 | .long 0 /* Base - real mode default value */ | ||
123 | END(machine_real_restart_idt) | ||
124 | |||
125 | .balign 16 | ||
126 | ENTRY(machine_real_restart_gdt) | ||
127 | .quad 0 /* Self-pointer, filled in by PM code */ | ||
128 | .quad 0 /* 16-bit code segment, filled in by PM code */ | ||
129 | /* | ||
130 | * 16-bit data segment with the selector value 16 = 0x10 and | ||
131 | * base value 0x100; since this is consistent with real mode | ||
132 | * semantics we don't have to reload the segments once CR0.PE = 0. | ||
133 | */ | ||
134 | .quad GDT_ENTRY(0x0093, 0x100, 0xffff) | ||
135 | END(machine_real_restart_gdt) | ||
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 6f39cab052d5..3f2ad2640d85 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/acpi.h> | 6 | #include <linux/acpi.h> |
7 | #include <linux/bcd.h> | 7 | #include <linux/bcd.h> |
8 | #include <linux/pnp.h> | 8 | #include <linux/pnp.h> |
9 | #include <linux/of.h> | ||
9 | 10 | ||
10 | #include <asm/vsyscall.h> | 11 | #include <asm/vsyscall.h> |
11 | #include <asm/x86_init.h> | 12 | #include <asm/x86_init.h> |
@@ -236,6 +237,8 @@ static __init int add_rtc_cmos(void) | |||
236 | } | 237 | } |
237 | } | 238 | } |
238 | #endif | 239 | #endif |
240 | if (of_have_populated_dt()) | ||
241 | return 0; | ||
239 | 242 | ||
240 | platform_device_register(&rtc_device); | 243 | platform_device_register(&rtc_device); |
241 | dev_info(&rtc_device.dev, | 244 | dev_info(&rtc_device.dev, |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3cfe26c0252..9d43b28e0728 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -113,6 +113,7 @@ | |||
113 | #endif | 113 | #endif |
114 | #include <asm/mce.h> | 114 | #include <asm/mce.h> |
115 | #include <asm/alternative.h> | 115 | #include <asm/alternative.h> |
116 | #include <asm/prom.h> | ||
116 | 117 | ||
117 | /* | 118 | /* |
118 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 119 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
@@ -293,10 +294,32 @@ static void __init init_gbpages(void) | |||
293 | else | 294 | else |
294 | direct_gbpages = 0; | 295 | direct_gbpages = 0; |
295 | } | 296 | } |
297 | |||
298 | static void __init cleanup_highmap_brk_end(void) | ||
299 | { | ||
300 | pud_t *pud; | ||
301 | pmd_t *pmd; | ||
302 | |||
303 | mmu_cr4_features = read_cr4(); | ||
304 | |||
305 | /* | ||
306 | * _brk_end cannot change anymore, but it and _end may be | ||
307 | * located on different 2M pages. cleanup_highmap(), however, | ||
308 | * can only consider _end when it runs, so destroy any | ||
309 | * mappings beyond _brk_end here. | ||
310 | */ | ||
311 | pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); | ||
312 | pmd = pmd_offset(pud, _brk_end - 1); | ||
313 | while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) | ||
314 | pmd_clear(pmd); | ||
315 | } | ||
296 | #else | 316 | #else |
297 | static inline void init_gbpages(void) | 317 | static inline void init_gbpages(void) |
298 | { | 318 | { |
299 | } | 319 | } |
320 | static inline void cleanup_highmap_brk_end(void) | ||
321 | { | ||
322 | } | ||
300 | #endif | 323 | #endif |
301 | 324 | ||
302 | static void __init reserve_brk(void) | 325 | static void __init reserve_brk(void) |
@@ -307,6 +330,8 @@ static void __init reserve_brk(void) | |||
307 | /* Mark brk area as locked down and no longer taking any | 330 | /* Mark brk area as locked down and no longer taking any |
308 | new allocations */ | 331 | new allocations */ |
309 | _brk_start = 0; | 332 | _brk_start = 0; |
333 | |||
334 | cleanup_highmap_brk_end(); | ||
310 | } | 335 | } |
311 | 336 | ||
312 | #ifdef CONFIG_BLK_DEV_INITRD | 337 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -429,16 +454,30 @@ static void __init parse_setup_data(void) | |||
429 | return; | 454 | return; |
430 | pa_data = boot_params.hdr.setup_data; | 455 | pa_data = boot_params.hdr.setup_data; |
431 | while (pa_data) { | 456 | while (pa_data) { |
432 | data = early_memremap(pa_data, PAGE_SIZE); | 457 | u32 data_len, map_len; |
458 | |||
459 | map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), | ||
460 | (u64)sizeof(struct setup_data)); | ||
461 | data = early_memremap(pa_data, map_len); | ||
462 | data_len = data->len + sizeof(struct setup_data); | ||
463 | if (data_len > map_len) { | ||
464 | early_iounmap(data, map_len); | ||
465 | data = early_memremap(pa_data, data_len); | ||
466 | map_len = data_len; | ||
467 | } | ||
468 | |||
433 | switch (data->type) { | 469 | switch (data->type) { |
434 | case SETUP_E820_EXT: | 470 | case SETUP_E820_EXT: |
435 | parse_e820_ext(data, pa_data); | 471 | parse_e820_ext(data); |
472 | break; | ||
473 | case SETUP_DTB: | ||
474 | add_dtb(pa_data); | ||
436 | break; | 475 | break; |
437 | default: | 476 | default: |
438 | break; | 477 | break; |
439 | } | 478 | } |
440 | pa_data = data->next; | 479 | pa_data = data->next; |
441 | early_iounmap(data, PAGE_SIZE); | 480 | early_iounmap(data, map_len); |
442 | } | 481 | } |
443 | } | 482 | } |
444 | 483 | ||
@@ -680,15 +719,6 @@ static int __init parse_reservelow(char *p) | |||
680 | 719 | ||
681 | early_param("reservelow", parse_reservelow); | 720 | early_param("reservelow", parse_reservelow); |
682 | 721 | ||
683 | static u64 __init get_max_mapped(void) | ||
684 | { | ||
685 | u64 end = max_pfn_mapped; | ||
686 | |||
687 | end <<= PAGE_SHIFT; | ||
688 | |||
689 | return end; | ||
690 | } | ||
691 | |||
692 | /* | 722 | /* |
693 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 723 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
694 | * passed the efi memmap, systab, etc., so we should use these data structures | 724 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -704,8 +734,6 @@ static u64 __init get_max_mapped(void) | |||
704 | 734 | ||
705 | void __init setup_arch(char **cmdline_p) | 735 | void __init setup_arch(char **cmdline_p) |
706 | { | 736 | { |
707 | int acpi = 0; | ||
708 | int amd = 0; | ||
709 | unsigned long flags; | 737 | unsigned long flags; |
710 | 738 | ||
711 | #ifdef CONFIG_X86_32 | 739 | #ifdef CONFIG_X86_32 |
@@ -935,15 +963,8 @@ void __init setup_arch(char **cmdline_p) | |||
935 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", | 963 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", |
936 | max_pfn_mapped<<PAGE_SHIFT); | 964 | max_pfn_mapped<<PAGE_SHIFT); |
937 | 965 | ||
938 | reserve_trampoline_memory(); | 966 | setup_trampolines(); |
939 | 967 | ||
940 | #ifdef CONFIG_ACPI_SLEEP | ||
941 | /* | ||
942 | * Reserve low memory region for sleep support. | ||
943 | * even before init_memory_mapping | ||
944 | */ | ||
945 | acpi_reserve_wakeup_memory(); | ||
946 | #endif | ||
947 | init_gbpages(); | 968 | init_gbpages(); |
948 | 969 | ||
949 | /* max_pfn_mapped is updated here */ | 970 | /* max_pfn_mapped is updated here */ |
@@ -984,19 +1005,7 @@ void __init setup_arch(char **cmdline_p) | |||
984 | 1005 | ||
985 | early_acpi_boot_init(); | 1006 | early_acpi_boot_init(); |
986 | 1007 | ||
987 | #ifdef CONFIG_ACPI_NUMA | 1008 | initmem_init(); |
988 | /* | ||
989 | * Parse SRAT to discover nodes. | ||
990 | */ | ||
991 | acpi = acpi_numa_init(); | ||
992 | #endif | ||
993 | |||
994 | #ifdef CONFIG_AMD_NUMA | ||
995 | if (!acpi) | ||
996 | amd = !amd_numa_init(0, max_pfn); | ||
997 | #endif | ||
998 | |||
999 | initmem_init(0, max_pfn, acpi, amd); | ||
1000 | memblock_find_dma_reserve(); | 1009 | memblock_find_dma_reserve(); |
1001 | dma32_reserve_bootmem(); | 1010 | dma32_reserve_bootmem(); |
1002 | 1011 | ||
@@ -1029,8 +1038,8 @@ void __init setup_arch(char **cmdline_p) | |||
1029 | * Read APIC and some other early information from ACPI tables. | 1038 | * Read APIC and some other early information from ACPI tables. |
1030 | */ | 1039 | */ |
1031 | acpi_boot_init(); | 1040 | acpi_boot_init(); |
1032 | |||
1033 | sfi_init(); | 1041 | sfi_init(); |
1042 | x86_dtb_init(); | ||
1034 | 1043 | ||
1035 | /* | 1044 | /* |
1036 | * get boot-time SMP configuration: | 1045 | * get boot-time SMP configuration: |
@@ -1040,9 +1049,7 @@ void __init setup_arch(char **cmdline_p) | |||
1040 | 1049 | ||
1041 | prefill_possible_map(); | 1050 | prefill_possible_map(); |
1042 | 1051 | ||
1043 | #ifdef CONFIG_X86_64 | ||
1044 | init_cpu_to_node(); | 1052 | init_cpu_to_node(); |
1045 | #endif | ||
1046 | 1053 | ||
1047 | init_apic_mappings(); | 1054 | init_apic_mappings(); |
1048 | ioapic_and_gsi_init(); | 1055 | ioapic_and_gsi_init(); |
@@ -1066,6 +1073,8 @@ void __init setup_arch(char **cmdline_p) | |||
1066 | #endif | 1073 | #endif |
1067 | x86_init.oem.banner(); | 1074 | x86_init.oem.banner(); |
1068 | 1075 | ||
1076 | x86_init.timers.wallclock_init(); | ||
1077 | |||
1069 | mcheck_init(); | 1078 | mcheck_init(); |
1070 | 1079 | ||
1071 | local_irq_save(flags); | 1080 | local_irq_save(flags); |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 002b79685f73..71f4727da373 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void) | |||
225 | per_cpu(x86_bios_cpu_apicid, cpu) = | 225 | per_cpu(x86_bios_cpu_apicid, cpu) = |
226 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); | 226 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
227 | #endif | 227 | #endif |
228 | #ifdef CONFIG_X86_32 | ||
229 | per_cpu(x86_cpu_to_logical_apicid, cpu) = | ||
230 | early_per_cpu_map(x86_cpu_to_logical_apicid, cpu); | ||
231 | #endif | ||
228 | #ifdef CONFIG_X86_64 | 232 | #ifdef CONFIG_X86_64 |
229 | per_cpu(irq_stack_ptr, cpu) = | 233 | per_cpu(irq_stack_ptr, cpu) = |
230 | per_cpu(irq_stack_union.irq_stack, cpu) + | 234 | per_cpu(irq_stack_union.irq_stack, cpu) + |
231 | IRQ_STACK_SIZE - 64; | 235 | IRQ_STACK_SIZE - 64; |
236 | #endif | ||
232 | #ifdef CONFIG_NUMA | 237 | #ifdef CONFIG_NUMA |
233 | per_cpu(x86_cpu_to_node_map, cpu) = | 238 | per_cpu(x86_cpu_to_node_map, cpu) = |
234 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | 239 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
@@ -242,7 +247,6 @@ void __init setup_per_cpu_areas(void) | |||
242 | */ | 247 | */ |
243 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); | 248 | set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); |
244 | #endif | 249 | #endif |
245 | #endif | ||
246 | /* | 250 | /* |
247 | * Up to this point, the boot CPU has been using .init.data | 251 | * Up to this point, the boot CPU has been using .init.data |
248 | * area. Reload any changed state for the boot CPU. | 252 | * area. Reload any changed state for the boot CPU. |
@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void) | |||
256 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; | 260 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
257 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | 261 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; |
258 | #endif | 262 | #endif |
259 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) | 263 | #ifdef CONFIG_X86_32 |
264 | early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL; | ||
265 | #endif | ||
266 | #ifdef CONFIG_NUMA | ||
260 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | 267 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
261 | #endif | 268 | #endif |
262 | 269 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 08776a953487..c2871d3c71b6 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <asm/mtrr.h> | 64 | #include <asm/mtrr.h> |
65 | #include <asm/mwait.h> | 65 | #include <asm/mwait.h> |
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | #include <asm/io_apic.h> | ||
67 | #include <asm/setup.h> | 68 | #include <asm/setup.h> |
68 | #include <asm/uv/uv.h> | 69 | #include <asm/uv/uv.h> |
69 | #include <linux/mc146818rtc.h> | 70 | #include <linux/mc146818rtc.h> |
@@ -71,10 +72,6 @@ | |||
71 | #include <asm/smpboot_hooks.h> | 72 | #include <asm/smpboot_hooks.h> |
72 | #include <asm/i8259.h> | 73 | #include <asm/i8259.h> |
73 | 74 | ||
74 | #ifdef CONFIG_X86_32 | ||
75 | u8 apicid_2_node[MAX_APICID]; | ||
76 | #endif | ||
77 | |||
78 | /* State of each CPU */ | 75 | /* State of each CPU */ |
79 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 76 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
80 | 77 | ||
@@ -130,68 +127,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |||
130 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | 127 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
131 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 128 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
132 | 129 | ||
130 | DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); | ||
131 | |||
133 | /* Per CPU bogomips and other parameters */ | 132 | /* Per CPU bogomips and other parameters */ |
134 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 133 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
135 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 134 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
136 | 135 | ||
137 | atomic_t init_deasserted; | 136 | atomic_t init_deasserted; |
138 | 137 | ||
139 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | ||
140 | /* which node each logical CPU is on */ | ||
141 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | ||
142 | EXPORT_SYMBOL(cpu_to_node_map); | ||
143 | |||
144 | /* set up a mapping between cpu and node. */ | ||
145 | static void map_cpu_to_node(int cpu, int node) | ||
146 | { | ||
147 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | ||
148 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | ||
149 | cpu_to_node_map[cpu] = node; | ||
150 | } | ||
151 | |||
152 | /* undo a mapping between cpu and node. */ | ||
153 | static void unmap_cpu_to_node(int cpu) | ||
154 | { | ||
155 | int node; | ||
156 | |||
157 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | ||
158 | for (node = 0; node < MAX_NUMNODES; node++) | ||
159 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); | ||
160 | cpu_to_node_map[cpu] = 0; | ||
161 | } | ||
162 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | ||
163 | #define map_cpu_to_node(cpu, node) ({}) | ||
164 | #define unmap_cpu_to_node(cpu) ({}) | ||
165 | #endif | ||
166 | |||
167 | #ifdef CONFIG_X86_32 | ||
168 | static int boot_cpu_logical_apicid; | ||
169 | |||
170 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | ||
171 | { [0 ... NR_CPUS-1] = BAD_APICID }; | ||
172 | |||
173 | static void map_cpu_to_logical_apicid(void) | ||
174 | { | ||
175 | int cpu = smp_processor_id(); | ||
176 | int apicid = logical_smp_processor_id(); | ||
177 | int node = apic->apicid_to_node(apicid); | ||
178 | |||
179 | if (!node_online(node)) | ||
180 | node = first_online_node; | ||
181 | |||
182 | cpu_2_logical_apicid[cpu] = apicid; | ||
183 | map_cpu_to_node(cpu, node); | ||
184 | } | ||
185 | |||
186 | void numa_remove_cpu(int cpu) | ||
187 | { | ||
188 | cpu_2_logical_apicid[cpu] = BAD_APICID; | ||
189 | unmap_cpu_to_node(cpu); | ||
190 | } | ||
191 | #else | ||
192 | #define map_cpu_to_logical_apicid() do {} while (0) | ||
193 | #endif | ||
194 | |||
195 | /* | 138 | /* |
196 | * Report back to the Boot Processor. | 139 | * Report back to the Boot Processor. |
197 | * Running on AP. | 140 | * Running on AP. |
@@ -259,7 +202,6 @@ static void __cpuinit smp_callin(void) | |||
259 | apic->smp_callin_clear_local_apic(); | 202 | apic->smp_callin_clear_local_apic(); |
260 | setup_local_APIC(); | 203 | setup_local_APIC(); |
261 | end_local_APIC_setup(); | 204 | end_local_APIC_setup(); |
262 | map_cpu_to_logical_apicid(); | ||
263 | 205 | ||
264 | /* | 206 | /* |
265 | * Need to setup vector mappings before we enable interrupts. | 207 | * Need to setup vector mappings before we enable interrupts. |
@@ -355,23 +297,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
355 | cpu_idle(); | 297 | cpu_idle(); |
356 | } | 298 | } |
357 | 299 | ||
358 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
359 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | ||
360 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
361 | const struct cpuinfo_x86 *src) | ||
362 | { | ||
363 | struct cpumask *llc = dst->llc_shared_map; | ||
364 | *dst = *src; | ||
365 | dst->llc_shared_map = llc; | ||
366 | } | ||
367 | #else | ||
368 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
369 | const struct cpuinfo_x86 *src) | ||
370 | { | ||
371 | *dst = *src; | ||
372 | } | ||
373 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
374 | |||
375 | /* | 300 | /* |
376 | * The bootstrap kernel entry code has set these up. Save them for | 301 | * The bootstrap kernel entry code has set these up. Save them for |
377 | * a given CPU | 302 | * a given CPU |
@@ -381,7 +306,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
381 | { | 306 | { |
382 | struct cpuinfo_x86 *c = &cpu_data(id); | 307 | struct cpuinfo_x86 *c = &cpu_data(id); |
383 | 308 | ||
384 | copy_cpuinfo_x86(c, &boot_cpu_data); | 309 | *c = boot_cpu_data; |
385 | c->cpu_index = id; | 310 | c->cpu_index = id; |
386 | if (id != 0) | 311 | if (id != 0) |
387 | identify_secondary_cpu(c); | 312 | identify_secondary_cpu(c); |
@@ -389,15 +314,12 @@ void __cpuinit smp_store_cpu_info(int id) | |||
389 | 314 | ||
390 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 315 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
391 | { | 316 | { |
392 | struct cpuinfo_x86 *c1 = &cpu_data(cpu1); | ||
393 | struct cpuinfo_x86 *c2 = &cpu_data(cpu2); | ||
394 | |||
395 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 317 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
396 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); | 318 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); |
397 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); | 319 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); |
398 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 320 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
399 | cpumask_set_cpu(cpu1, c2->llc_shared_map); | 321 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
400 | cpumask_set_cpu(cpu2, c1->llc_shared_map); | 322 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
401 | } | 323 | } |
402 | 324 | ||
403 | 325 | ||
@@ -414,6 +336,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
414 | 336 | ||
415 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { | 337 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
416 | if (c->phys_proc_id == o->phys_proc_id && | 338 | if (c->phys_proc_id == o->phys_proc_id && |
339 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) && | ||
417 | c->compute_unit_id == o->compute_unit_id) | 340 | c->compute_unit_id == o->compute_unit_id) |
418 | link_thread_siblings(cpu, i); | 341 | link_thread_siblings(cpu, i); |
419 | } else if (c->phys_proc_id == o->phys_proc_id && | 342 | } else if (c->phys_proc_id == o->phys_proc_id && |
@@ -425,7 +348,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
425 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 348 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
426 | } | 349 | } |
427 | 350 | ||
428 | cpumask_set_cpu(cpu, c->llc_shared_map); | 351 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
429 | 352 | ||
430 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { | 353 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { |
431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 354 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
@@ -436,8 +359,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
436 | for_each_cpu(i, cpu_sibling_setup_mask) { | 359 | for_each_cpu(i, cpu_sibling_setup_mask) { |
437 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 360 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
438 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 361 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
439 | cpumask_set_cpu(i, c->llc_shared_map); | 362 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
440 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); | 363 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
441 | } | 364 | } |
442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 365 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
443 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 366 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
@@ -476,7 +399,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
476 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) | 399 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) |
477 | return cpu_core_mask(cpu); | 400 | return cpu_core_mask(cpu); |
478 | else | 401 | else |
479 | return c->llc_shared_map; | 402 | return cpu_llc_shared_mask(cpu); |
480 | } | 403 | } |
481 | 404 | ||
482 | static void impress_friends(void) | 405 | static void impress_friends(void) |
@@ -788,7 +711,7 @@ do_rest: | |||
788 | stack_start = c_idle.idle->thread.sp; | 711 | stack_start = c_idle.idle->thread.sp; |
789 | 712 | ||
790 | /* start_ip had better be page-aligned! */ | 713 | /* start_ip had better be page-aligned! */ |
791 | start_ip = setup_trampoline(); | 714 | start_ip = trampoline_address(); |
792 | 715 | ||
793 | /* So we see what's up */ | 716 | /* So we see what's up */ |
794 | announce_cpu(cpu, apicid); | 717 | announce_cpu(cpu, apicid); |
@@ -798,6 +721,8 @@ do_rest: | |||
798 | * the targeted processor. | 721 | * the targeted processor. |
799 | */ | 722 | */ |
800 | 723 | ||
724 | printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip); | ||
725 | |||
801 | atomic_set(&init_deasserted, 0); | 726 | atomic_set(&init_deasserted, 0); |
802 | 727 | ||
803 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { | 728 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
@@ -851,8 +776,8 @@ do_rest: | |||
851 | pr_debug("CPU%d: has booted.\n", cpu); | 776 | pr_debug("CPU%d: has booted.\n", cpu); |
852 | else { | 777 | else { |
853 | boot_error = 1; | 778 | boot_error = 1; |
854 | if (*((volatile unsigned char *)trampoline_base) | 779 | if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) |
855 | == 0xA5) | 780 | == 0xA5A5A5A5) |
856 | /* trampoline started but...? */ | 781 | /* trampoline started but...? */ |
857 | pr_err("CPU%d: Stuck ??\n", cpu); | 782 | pr_err("CPU%d: Stuck ??\n", cpu); |
858 | else | 783 | else |
@@ -878,7 +803,7 @@ do_rest: | |||
878 | } | 803 | } |
879 | 804 | ||
880 | /* mark "stuck" area as not stuck */ | 805 | /* mark "stuck" area as not stuck */ |
881 | *((volatile unsigned long *)trampoline_base) = 0; | 806 | *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0; |
882 | 807 | ||
883 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { | 808 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
884 | /* | 809 | /* |
@@ -945,6 +870,14 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
945 | return 0; | 870 | return 0; |
946 | } | 871 | } |
947 | 872 | ||
873 | /** | ||
874 | * arch_disable_smp_support() - disables SMP support for x86 at runtime | ||
875 | */ | ||
876 | void arch_disable_smp_support(void) | ||
877 | { | ||
878 | disable_ioapic_support(); | ||
879 | } | ||
880 | |||
948 | /* | 881 | /* |
949 | * Fall back to non SMP mode after errors. | 882 | * Fall back to non SMP mode after errors. |
950 | * | 883 | * |
@@ -960,7 +893,6 @@ static __init void disable_smp(void) | |||
960 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); | 893 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
961 | else | 894 | else |
962 | physid_set_mask_of_physid(0, &phys_cpu_present_map); | 895 | physid_set_mask_of_physid(0, &phys_cpu_present_map); |
963 | map_cpu_to_logical_apicid(); | ||
964 | cpumask_set_cpu(0, cpu_sibling_mask(0)); | 896 | cpumask_set_cpu(0, cpu_sibling_mask(0)); |
965 | cpumask_set_cpu(0, cpu_core_mask(0)); | 897 | cpumask_set_cpu(0, cpu_core_mask(0)); |
966 | } | 898 | } |
@@ -1045,7 +977,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
1045 | "(tell your hw vendor)\n"); | 977 | "(tell your hw vendor)\n"); |
1046 | } | 978 | } |
1047 | smpboot_clear_io_apic(); | 979 | smpboot_clear_io_apic(); |
1048 | arch_disable_smp_support(); | 980 | disable_ioapic_support(); |
1049 | return -1; | 981 | return -1; |
1050 | } | 982 | } |
1051 | 983 | ||
@@ -1089,21 +1021,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1089 | 1021 | ||
1090 | preempt_disable(); | 1022 | preempt_disable(); |
1091 | smp_cpu_index_default(); | 1023 | smp_cpu_index_default(); |
1092 | memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info)); | 1024 | |
1093 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); | ||
1094 | mb(); | ||
1095 | /* | 1025 | /* |
1096 | * Setup boot CPU information | 1026 | * Setup boot CPU information |
1097 | */ | 1027 | */ |
1098 | smp_store_cpu_info(0); /* Final full version of the data */ | 1028 | smp_store_cpu_info(0); /* Final full version of the data */ |
1099 | #ifdef CONFIG_X86_32 | 1029 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
1100 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1030 | mb(); |
1101 | #endif | 1031 | |
1102 | current_thread_info()->cpu = 0; /* needed? */ | 1032 | current_thread_info()->cpu = 0; /* needed? */ |
1103 | for_each_possible_cpu(i) { | 1033 | for_each_possible_cpu(i) { |
1104 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | 1034 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1105 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | 1035 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
1106 | zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | 1036 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
1107 | } | 1037 | } |
1108 | set_cpu_sibling_map(0); | 1038 | set_cpu_sibling_map(0); |
1109 | 1039 | ||
@@ -1139,8 +1069,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1139 | 1069 | ||
1140 | bsp_end_local_APIC_setup(); | 1070 | bsp_end_local_APIC_setup(); |
1141 | 1071 | ||
1142 | map_cpu_to_logical_apicid(); | ||
1143 | |||
1144 | if (apic->setup_portio_remap) | 1072 | if (apic->setup_portio_remap) |
1145 | apic->setup_portio_remap(); | 1073 | apic->setup_portio_remap(); |
1146 | 1074 | ||
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index b35786dc9b8f..5f181742e8f9 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -340,3 +340,6 @@ ENTRY(sys_call_table) | |||
340 | .long sys_fanotify_init | 340 | .long sys_fanotify_init |
341 | .long sys_fanotify_mark | 341 | .long sys_fanotify_mark |
342 | .long sys_prlimit64 /* 340 */ | 342 | .long sys_prlimit64 /* 340 */ |
343 | .long sys_name_to_handle_at | ||
344 | .long sys_open_by_handle_at | ||
345 | .long sys_clock_adjtime | ||
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a375616d77f7..a91ae7709b49 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -2,39 +2,41 @@ | |||
2 | #include <linux/memblock.h> | 2 | #include <linux/memblock.h> |
3 | 3 | ||
4 | #include <asm/trampoline.h> | 4 | #include <asm/trampoline.h> |
5 | #include <asm/cacheflush.h> | ||
5 | #include <asm/pgtable.h> | 6 | #include <asm/pgtable.h> |
6 | 7 | ||
7 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) | 8 | unsigned char *x86_trampoline_base; |
8 | #define __trampinit | ||
9 | #define __trampinitdata | ||
10 | #else | ||
11 | #define __trampinit __cpuinit | ||
12 | #define __trampinitdata __cpuinitdata | ||
13 | #endif | ||
14 | 9 | ||
15 | /* ready for x86_64 and x86 */ | 10 | void __init setup_trampolines(void) |
16 | unsigned char *__trampinitdata trampoline_base; | ||
17 | |||
18 | void __init reserve_trampoline_memory(void) | ||
19 | { | 11 | { |
20 | phys_addr_t mem; | 12 | phys_addr_t mem; |
13 | size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); | ||
21 | 14 | ||
22 | /* Has to be in very low memory so we can execute real-mode AP code. */ | 15 | /* Has to be in very low memory so we can execute real-mode AP code. */ |
23 | mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); | 16 | mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); |
24 | if (mem == MEMBLOCK_ERROR) | 17 | if (mem == MEMBLOCK_ERROR) |
25 | panic("Cannot allocate trampoline\n"); | 18 | panic("Cannot allocate trampoline\n"); |
26 | 19 | ||
27 | trampoline_base = __va(mem); | 20 | x86_trampoline_base = __va(mem); |
28 | memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); | 21 | memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); |
22 | |||
23 | printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", | ||
24 | x86_trampoline_base, (unsigned long long)mem, size); | ||
25 | |||
26 | memcpy(x86_trampoline_base, x86_trampoline_start, size); | ||
29 | } | 27 | } |
30 | 28 | ||
31 | /* | 29 | /* |
32 | * Currently trivial. Write the real->protected mode | 30 | * setup_trampolines() gets called very early, to guarantee the |
33 | * bootstrap into the page concerned. The caller | 31 | * availability of low memory. This is before the proper kernel page |
34 | * has made sure it's suitably aligned. | 32 | * tables are set up, so we cannot set page permissions in that |
33 | * function. Thus, we use an arch_initcall instead. | ||
35 | */ | 34 | */ |
36 | unsigned long __trampinit setup_trampoline(void) | 35 | static int __init configure_trampolines(void) |
37 | { | 36 | { |
38 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); | 37 | size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); |
39 | return virt_to_phys(trampoline_base); | 38 | |
39 | set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT); | ||
40 | return 0; | ||
40 | } | 41 | } |
42 | arch_initcall(configure_trampolines); | ||
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 8508237e8e43..451c0a7ef7fd 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S | |||
@@ -32,9 +32,11 @@ | |||
32 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
33 | #include <asm/page_types.h> | 33 | #include <asm/page_types.h> |
34 | 34 | ||
35 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ | 35 | #ifdef CONFIG_SMP |
36 | __CPUINITRODATA | 36 | |
37 | .code16 | 37 | .section ".x86_trampoline","a" |
38 | .balign PAGE_SIZE | ||
39 | .code16 | ||
38 | 40 | ||
39 | ENTRY(trampoline_data) | 41 | ENTRY(trampoline_data) |
40 | r_base = . | 42 | r_base = . |
@@ -44,7 +46,7 @@ r_base = . | |||
44 | 46 | ||
45 | cli # We should be safe anyway | 47 | cli # We should be safe anyway |
46 | 48 | ||
47 | movl $0xA5A5A5A5, trampoline_data - r_base | 49 | movl $0xA5A5A5A5, trampoline_status - r_base |
48 | # write marker for master knows we're running | 50 | # write marker for master knows we're running |
49 | 51 | ||
50 | /* GDT tables in non default location kernel can be beyond 16MB and | 52 | /* GDT tables in non default location kernel can be beyond 16MB and |
@@ -72,5 +74,10 @@ boot_idt_descr: | |||
72 | .word 0 # idt limit = 0 | 74 | .word 0 # idt limit = 0 |
73 | .long 0 # idt base = 0L | 75 | .long 0 # idt base = 0L |
74 | 76 | ||
77 | ENTRY(trampoline_status) | ||
78 | .long 0 | ||
79 | |||
75 | .globl trampoline_end | 80 | .globl trampoline_end |
76 | trampoline_end: | 81 | trampoline_end: |
82 | |||
83 | #endif /* CONFIG_SMP */ | ||
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 075d130efcf9..09ff51799e96 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S | |||
@@ -32,13 +32,9 @@ | |||
32 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
33 | #include <asm/processor-flags.h> | 33 | #include <asm/processor-flags.h> |
34 | 34 | ||
35 | #ifdef CONFIG_ACPI_SLEEP | 35 | .section ".x86_trampoline","a" |
36 | .section .rodata, "a", @progbits | 36 | .balign PAGE_SIZE |
37 | #else | 37 | .code16 |
38 | /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ | ||
39 | __CPUINITRODATA | ||
40 | #endif | ||
41 | .code16 | ||
42 | 38 | ||
43 | ENTRY(trampoline_data) | 39 | ENTRY(trampoline_data) |
44 | r_base = . | 40 | r_base = . |
@@ -50,7 +46,7 @@ r_base = . | |||
50 | mov %ax, %ss | 46 | mov %ax, %ss |
51 | 47 | ||
52 | 48 | ||
53 | movl $0xA5A5A5A5, trampoline_data - r_base | 49 | movl $0xA5A5A5A5, trampoline_status - r_base |
54 | # write marker for master knows we're running | 50 | # write marker for master knows we're running |
55 | 51 | ||
56 | # Setup stack | 52 | # Setup stack |
@@ -64,10 +60,13 @@ r_base = . | |||
64 | movzx %ax, %esi # Find the 32bit trampoline location | 60 | movzx %ax, %esi # Find the 32bit trampoline location |
65 | shll $4, %esi | 61 | shll $4, %esi |
66 | 62 | ||
67 | # Fixup the vectors | 63 | # Fixup the absolute vectors |
68 | addl %esi, startup_32_vector - r_base | 64 | leal (startup_32 - r_base)(%esi), %eax |
69 | addl %esi, startup_64_vector - r_base | 65 | movl %eax, startup_32_vector - r_base |
70 | addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer | 66 | leal (startup_64 - r_base)(%esi), %eax |
67 | movl %eax, startup_64_vector - r_base | ||
68 | leal (tgdt - r_base)(%esi), %eax | ||
69 | movl %eax, (tgdt + 2 - r_base) | ||
71 | 70 | ||
72 | /* | 71 | /* |
73 | * GDT tables in non default location kernel can be beyond 16MB and | 72 | * GDT tables in non default location kernel can be beyond 16MB and |
@@ -129,6 +128,7 @@ no_longmode: | |||
129 | jmp no_longmode | 128 | jmp no_longmode |
130 | #include "verify_cpu.S" | 129 | #include "verify_cpu.S" |
131 | 130 | ||
131 | .balign 4 | ||
132 | # Careful these need to be in the same 64K segment as the above; | 132 | # Careful these need to be in the same 64K segment as the above; |
133 | tidt: | 133 | tidt: |
134 | .word 0 # idt limit = 0 | 134 | .word 0 # idt limit = 0 |
@@ -156,6 +156,10 @@ startup_64_vector: | |||
156 | .long startup_64 - r_base | 156 | .long startup_64 - r_base |
157 | .word __KERNEL_CS, 0 | 157 | .word __KERNEL_CS, 0 |
158 | 158 | ||
159 | .balign 4 | ||
160 | ENTRY(trampoline_status) | ||
161 | .long 0 | ||
162 | |||
159 | trampoline_stack: | 163 | trampoline_stack: |
160 | .org 0x1000 | 164 | .org 0x1000 |
161 | trampoline_stack_end: | 165 | trampoline_stack_end: |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index bf4700755184..624a2016198e 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -105,6 +105,7 @@ SECTIONS | |||
105 | SCHED_TEXT | 105 | SCHED_TEXT |
106 | LOCK_TEXT | 106 | LOCK_TEXT |
107 | KPROBES_TEXT | 107 | KPROBES_TEXT |
108 | ENTRY_TEXT | ||
108 | IRQENTRY_TEXT | 109 | IRQENTRY_TEXT |
109 | *(.fixup) | 110 | *(.fixup) |
110 | *(.gnu.warning) | 111 | *(.gnu.warning) |
@@ -230,7 +231,7 @@ SECTIONS | |||
230 | * output PHDR, so the next output section - .init.text - should | 231 | * output PHDR, so the next output section - .init.text - should |
231 | * start another segment - init. | 232 | * start another segment - init. |
232 | */ | 233 | */ |
233 | PERCPU_VADDR(0, :percpu) | 234 | PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) |
234 | #endif | 235 | #endif |
235 | 236 | ||
236 | INIT_TEXT_SECTION(PAGE_SIZE) | 237 | INIT_TEXT_SECTION(PAGE_SIZE) |
@@ -240,6 +241,18 @@ SECTIONS | |||
240 | 241 | ||
241 | INIT_DATA_SECTION(16) | 242 | INIT_DATA_SECTION(16) |
242 | 243 | ||
244 | /* | ||
245 | * Code and data for a variety of lowlevel trampolines, to be | ||
246 | * copied into base memory (< 1 MiB) during initialization. | ||
247 | * Since it is copied early, the main copy can be discarded | ||
248 | * afterwards. | ||
249 | */ | ||
250 | .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) { | ||
251 | x86_trampoline_start = .; | ||
252 | *(.x86_trampoline) | ||
253 | x86_trampoline_end = .; | ||
254 | } | ||
255 | |||
243 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { | 256 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
244 | __x86_cpu_dev_start = .; | 257 | __x86_cpu_dev_start = .; |
245 | *(.x86_cpu_dev.init) | 258 | *(.x86_cpu_dev.init) |
@@ -291,6 +304,7 @@ SECTIONS | |||
291 | *(.iommu_table) | 304 | *(.iommu_table) |
292 | __iommu_table_end = .; | 305 | __iommu_table_end = .; |
293 | } | 306 | } |
307 | |||
294 | . = ALIGN(8); | 308 | . = ALIGN(8); |
295 | /* | 309 | /* |
296 | * .exit.text is discard at runtime, not link time, to deal with | 310 | * .exit.text is discard at runtime, not link time, to deal with |
@@ -305,7 +319,7 @@ SECTIONS | |||
305 | } | 319 | } |
306 | 320 | ||
307 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) | 321 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
308 | PERCPU(THREAD_SIZE) | 322 | PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) |
309 | #endif | 323 | #endif |
310 | 324 | ||
311 | . = ALIGN(PAGE_SIZE); | 325 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 1b950d151e58..9796c2f3d074 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -52,6 +52,7 @@ extern void *__memcpy(void *, const void *, __kernel_size_t); | |||
52 | EXPORT_SYMBOL(memset); | 52 | EXPORT_SYMBOL(memset); |
53 | EXPORT_SYMBOL(memcpy); | 53 | EXPORT_SYMBOL(memcpy); |
54 | EXPORT_SYMBOL(__memcpy); | 54 | EXPORT_SYMBOL(__memcpy); |
55 | EXPORT_SYMBOL(memmove); | ||
55 | 56 | ||
56 | EXPORT_SYMBOL(empty_zero_page); | 57 | EXPORT_SYMBOL(empty_zero_page); |
57 | #ifndef CONFIG_PARAVIRT | 58 | #ifndef CONFIG_PARAVIRT |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index ceb2911aa439..c11514e9128b 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -70,6 +70,7 @@ struct x86_init_ops x86_init __initdata = { | |||
70 | .setup_percpu_clockev = setup_boot_APIC_clock, | 70 | .setup_percpu_clockev = setup_boot_APIC_clock, |
71 | .tsc_pre_init = x86_init_noop, | 71 | .tsc_pre_init = x86_init_noop, |
72 | .timer_init = hpet_time_init, | 72 | .timer_init = hpet_time_init, |
73 | .wallclock_init = x86_init_noop, | ||
73 | }, | 74 | }, |
74 | 75 | ||
75 | .iommu = { | 76 | .iommu = { |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 1357d7cf4ec8..db932760ea82 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h | |||
@@ -62,21 +62,21 @@ TRACE_EVENT(kvm_hv_hypercall, | |||
62 | TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), | 62 | TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), |
63 | 63 | ||
64 | TP_STRUCT__entry( | 64 | TP_STRUCT__entry( |
65 | __field( __u16, code ) | ||
66 | __field( bool, fast ) | ||
67 | __field( __u16, rep_cnt ) | 65 | __field( __u16, rep_cnt ) |
68 | __field( __u16, rep_idx ) | 66 | __field( __u16, rep_idx ) |
69 | __field( __u64, ingpa ) | 67 | __field( __u64, ingpa ) |
70 | __field( __u64, outgpa ) | 68 | __field( __u64, outgpa ) |
69 | __field( __u16, code ) | ||
70 | __field( bool, fast ) | ||
71 | ), | 71 | ), |
72 | 72 | ||
73 | TP_fast_assign( | 73 | TP_fast_assign( |
74 | __entry->code = code; | ||
75 | __entry->fast = fast; | ||
76 | __entry->rep_cnt = rep_cnt; | 74 | __entry->rep_cnt = rep_cnt; |
77 | __entry->rep_idx = rep_idx; | 75 | __entry->rep_idx = rep_idx; |
78 | __entry->ingpa = ingpa; | 76 | __entry->ingpa = ingpa; |
79 | __entry->outgpa = outgpa; | 77 | __entry->outgpa = outgpa; |
78 | __entry->code = code; | ||
79 | __entry->fast = fast; | ||
80 | ), | 80 | ), |
81 | 81 | ||
82 | TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", | 82 | TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index eba687f0cc0c..b9ec1c74943c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -847,7 +847,7 @@ static void __init lguest_init_IRQ(void) | |||
847 | void lguest_setup_irq(unsigned int irq) | 847 | void lguest_setup_irq(unsigned int irq) |
848 | { | 848 | { |
849 | irq_alloc_desc_at(irq, 0); | 849 | irq_alloc_desc_at(irq, 0); |
850 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | 850 | irq_set_chip_and_handler_name(irq, &lguest_irq_controller, |
851 | handle_level_irq, "level"); | 851 | handle_level_irq, "level"); |
852 | } | 852 | } |
853 | 853 | ||
@@ -995,7 +995,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | |||
995 | static void lguest_time_init(void) | 995 | static void lguest_time_init(void) |
996 | { | 996 | { |
997 | /* Set up the timer interrupt (0) to go to our simple timer routine */ | 997 | /* Set up the timer interrupt (0) to go to our simple timer routine */ |
998 | set_irq_handler(0, lguest_time_irq); | 998 | irq_set_handler(0, lguest_time_irq); |
999 | 999 | ||
1000 | clocksource_register(&lguest_clock); | 1000 | clocksource_register(&lguest_clock); |
1001 | 1001 | ||
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index e10cf070ede0..f2479f19ddde 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -42,4 +42,5 @@ else | |||
42 | lib-y += memmove_64.o memset_64.o | 42 | lib-y += memmove_64.o memset_64.o |
43 | lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o | 43 | lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o |
44 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o | 44 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o |
45 | lib-y += cmpxchg16b_emu.o | ||
45 | endif | 46 | endif |
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index 2cda60a06e65..e8e7e0d06f42 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S | |||
@@ -15,14 +15,12 @@ | |||
15 | 15 | ||
16 | /* if you want SMP support, implement these with real spinlocks */ | 16 | /* if you want SMP support, implement these with real spinlocks */ |
17 | .macro LOCK reg | 17 | .macro LOCK reg |
18 | pushfl | 18 | pushfl_cfi |
19 | CFI_ADJUST_CFA_OFFSET 4 | ||
20 | cli | 19 | cli |
21 | .endm | 20 | .endm |
22 | 21 | ||
23 | .macro UNLOCK reg | 22 | .macro UNLOCK reg |
24 | popfl | 23 | popfl_cfi |
25 | CFI_ADJUST_CFA_OFFSET -4 | ||
26 | .endm | 24 | .endm |
27 | 25 | ||
28 | #define BEGIN(op) \ | 26 | #define BEGIN(op) \ |
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 71e080de3352..391a083674b4 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S | |||
@@ -14,14 +14,12 @@ | |||
14 | #include <asm/dwarf2.h> | 14 | #include <asm/dwarf2.h> |
15 | 15 | ||
16 | .macro SAVE reg | 16 | .macro SAVE reg |
17 | pushl %\reg | 17 | pushl_cfi %\reg |
18 | CFI_ADJUST_CFA_OFFSET 4 | ||
19 | CFI_REL_OFFSET \reg, 0 | 18 | CFI_REL_OFFSET \reg, 0 |
20 | .endm | 19 | .endm |
21 | 20 | ||
22 | .macro RESTORE reg | 21 | .macro RESTORE reg |
23 | popl %\reg | 22 | popl_cfi %\reg |
24 | CFI_ADJUST_CFA_OFFSET -4 | ||
25 | CFI_RESTORE \reg | 23 | CFI_RESTORE \reg |
26 | .endm | 24 | .endm |
27 | 25 | ||
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index adbccd0bbb78..78d16a554db0 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S | |||
@@ -50,11 +50,9 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
50 | */ | 50 | */ |
51 | ENTRY(csum_partial) | 51 | ENTRY(csum_partial) |
52 | CFI_STARTPROC | 52 | CFI_STARTPROC |
53 | pushl %esi | 53 | pushl_cfi %esi |
54 | CFI_ADJUST_CFA_OFFSET 4 | ||
55 | CFI_REL_OFFSET esi, 0 | 54 | CFI_REL_OFFSET esi, 0 |
56 | pushl %ebx | 55 | pushl_cfi %ebx |
57 | CFI_ADJUST_CFA_OFFSET 4 | ||
58 | CFI_REL_OFFSET ebx, 0 | 56 | CFI_REL_OFFSET ebx, 0 |
59 | movl 20(%esp),%eax # Function arg: unsigned int sum | 57 | movl 20(%esp),%eax # Function arg: unsigned int sum |
60 | movl 16(%esp),%ecx # Function arg: int len | 58 | movl 16(%esp),%ecx # Function arg: int len |
@@ -132,11 +130,9 @@ ENTRY(csum_partial) | |||
132 | jz 8f | 130 | jz 8f |
133 | roll $8, %eax | 131 | roll $8, %eax |
134 | 8: | 132 | 8: |
135 | popl %ebx | 133 | popl_cfi %ebx |
136 | CFI_ADJUST_CFA_OFFSET -4 | ||
137 | CFI_RESTORE ebx | 134 | CFI_RESTORE ebx |
138 | popl %esi | 135 | popl_cfi %esi |
139 | CFI_ADJUST_CFA_OFFSET -4 | ||
140 | CFI_RESTORE esi | 136 | CFI_RESTORE esi |
141 | ret | 137 | ret |
142 | CFI_ENDPROC | 138 | CFI_ENDPROC |
@@ -148,11 +144,9 @@ ENDPROC(csum_partial) | |||
148 | 144 | ||
149 | ENTRY(csum_partial) | 145 | ENTRY(csum_partial) |
150 | CFI_STARTPROC | 146 | CFI_STARTPROC |
151 | pushl %esi | 147 | pushl_cfi %esi |
152 | CFI_ADJUST_CFA_OFFSET 4 | ||
153 | CFI_REL_OFFSET esi, 0 | 148 | CFI_REL_OFFSET esi, 0 |
154 | pushl %ebx | 149 | pushl_cfi %ebx |
155 | CFI_ADJUST_CFA_OFFSET 4 | ||
156 | CFI_REL_OFFSET ebx, 0 | 150 | CFI_REL_OFFSET ebx, 0 |
157 | movl 20(%esp),%eax # Function arg: unsigned int sum | 151 | movl 20(%esp),%eax # Function arg: unsigned int sum |
158 | movl 16(%esp),%ecx # Function arg: int len | 152 | movl 16(%esp),%ecx # Function arg: int len |
@@ -260,11 +254,9 @@ ENTRY(csum_partial) | |||
260 | jz 90f | 254 | jz 90f |
261 | roll $8, %eax | 255 | roll $8, %eax |
262 | 90: | 256 | 90: |
263 | popl %ebx | 257 | popl_cfi %ebx |
264 | CFI_ADJUST_CFA_OFFSET -4 | ||
265 | CFI_RESTORE ebx | 258 | CFI_RESTORE ebx |
266 | popl %esi | 259 | popl_cfi %esi |
267 | CFI_ADJUST_CFA_OFFSET -4 | ||
268 | CFI_RESTORE esi | 260 | CFI_RESTORE esi |
269 | ret | 261 | ret |
270 | CFI_ENDPROC | 262 | CFI_ENDPROC |
@@ -309,14 +301,11 @@ ENTRY(csum_partial_copy_generic) | |||
309 | CFI_STARTPROC | 301 | CFI_STARTPROC |
310 | subl $4,%esp | 302 | subl $4,%esp |
311 | CFI_ADJUST_CFA_OFFSET 4 | 303 | CFI_ADJUST_CFA_OFFSET 4 |
312 | pushl %edi | 304 | pushl_cfi %edi |
313 | CFI_ADJUST_CFA_OFFSET 4 | ||
314 | CFI_REL_OFFSET edi, 0 | 305 | CFI_REL_OFFSET edi, 0 |
315 | pushl %esi | 306 | pushl_cfi %esi |
316 | CFI_ADJUST_CFA_OFFSET 4 | ||
317 | CFI_REL_OFFSET esi, 0 | 307 | CFI_REL_OFFSET esi, 0 |
318 | pushl %ebx | 308 | pushl_cfi %ebx |
319 | CFI_ADJUST_CFA_OFFSET 4 | ||
320 | CFI_REL_OFFSET ebx, 0 | 309 | CFI_REL_OFFSET ebx, 0 |
321 | movl ARGBASE+16(%esp),%eax # sum | 310 | movl ARGBASE+16(%esp),%eax # sum |
322 | movl ARGBASE+12(%esp),%ecx # len | 311 | movl ARGBASE+12(%esp),%ecx # len |
@@ -426,17 +415,13 @@ DST( movb %cl, (%edi) ) | |||
426 | 415 | ||
427 | .previous | 416 | .previous |
428 | 417 | ||
429 | popl %ebx | 418 | popl_cfi %ebx |
430 | CFI_ADJUST_CFA_OFFSET -4 | ||
431 | CFI_RESTORE ebx | 419 | CFI_RESTORE ebx |
432 | popl %esi | 420 | popl_cfi %esi |
433 | CFI_ADJUST_CFA_OFFSET -4 | ||
434 | CFI_RESTORE esi | 421 | CFI_RESTORE esi |
435 | popl %edi | 422 | popl_cfi %edi |
436 | CFI_ADJUST_CFA_OFFSET -4 | ||
437 | CFI_RESTORE edi | 423 | CFI_RESTORE edi |
438 | popl %ecx # equivalent to addl $4,%esp | 424 | popl_cfi %ecx # equivalent to addl $4,%esp |
439 | CFI_ADJUST_CFA_OFFSET -4 | ||
440 | ret | 425 | ret |
441 | CFI_ENDPROC | 426 | CFI_ENDPROC |
442 | ENDPROC(csum_partial_copy_generic) | 427 | ENDPROC(csum_partial_copy_generic) |
@@ -459,14 +444,11 @@ ENDPROC(csum_partial_copy_generic) | |||
459 | 444 | ||
460 | ENTRY(csum_partial_copy_generic) | 445 | ENTRY(csum_partial_copy_generic) |
461 | CFI_STARTPROC | 446 | CFI_STARTPROC |
462 | pushl %ebx | 447 | pushl_cfi %ebx |
463 | CFI_ADJUST_CFA_OFFSET 4 | ||
464 | CFI_REL_OFFSET ebx, 0 | 448 | CFI_REL_OFFSET ebx, 0 |
465 | pushl %edi | 449 | pushl_cfi %edi |
466 | CFI_ADJUST_CFA_OFFSET 4 | ||
467 | CFI_REL_OFFSET edi, 0 | 450 | CFI_REL_OFFSET edi, 0 |
468 | pushl %esi | 451 | pushl_cfi %esi |
469 | CFI_ADJUST_CFA_OFFSET 4 | ||
470 | CFI_REL_OFFSET esi, 0 | 452 | CFI_REL_OFFSET esi, 0 |
471 | movl ARGBASE+4(%esp),%esi #src | 453 | movl ARGBASE+4(%esp),%esi #src |
472 | movl ARGBASE+8(%esp),%edi #dst | 454 | movl ARGBASE+8(%esp),%edi #dst |
@@ -527,14 +509,11 @@ DST( movb %dl, (%edi) ) | |||
527 | jmp 7b | 509 | jmp 7b |
528 | .previous | 510 | .previous |
529 | 511 | ||
530 | popl %esi | 512 | popl_cfi %esi |
531 | CFI_ADJUST_CFA_OFFSET -4 | ||
532 | CFI_RESTORE esi | 513 | CFI_RESTORE esi |
533 | popl %edi | 514 | popl_cfi %edi |
534 | CFI_ADJUST_CFA_OFFSET -4 | ||
535 | CFI_RESTORE edi | 515 | CFI_RESTORE edi |
536 | popl %ebx | 516 | popl_cfi %ebx |
537 | CFI_ADJUST_CFA_OFFSET -4 | ||
538 | CFI_RESTORE ebx | 517 | CFI_RESTORE ebx |
539 | ret | 518 | ret |
540 | CFI_ENDPROC | 519 | CFI_ENDPROC |
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S new file mode 100644 index 000000000000..3e8b08a6de2b --- /dev/null +++ b/arch/x86/lib/cmpxchg16b_emu.S | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or | ||
3 | * modify it under the terms of the GNU General Public License | ||
4 | * as published by the Free Software Foundation; version 2 | ||
5 | * of the License. | ||
6 | * | ||
7 | */ | ||
8 | #include <linux/linkage.h> | ||
9 | #include <asm/alternative-asm.h> | ||
10 | #include <asm/frame.h> | ||
11 | #include <asm/dwarf2.h> | ||
12 | |||
13 | .text | ||
14 | |||
15 | /* | ||
16 | * Inputs: | ||
17 | * %rsi : memory location to compare | ||
18 | * %rax : low 64 bits of old value | ||
19 | * %rdx : high 64 bits of old value | ||
20 | * %rbx : low 64 bits of new value | ||
21 | * %rcx : high 64 bits of new value | ||
22 | * %al : Operation successful | ||
23 | */ | ||
24 | ENTRY(this_cpu_cmpxchg16b_emu) | ||
25 | CFI_STARTPROC | ||
26 | |||
27 | # | ||
28 | # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not | ||
29 | # via the ZF. Caller will access %al to get result. | ||
30 | # | ||
31 | # Note that this is only useful for a cpuops operation. Meaning that we | ||
32 | # do *not* have a fully atomic operation but just an operation that is | ||
33 | # *atomic* on a single cpu (as provided by the this_cpu_xx class of | ||
34 | # macros). | ||
35 | # | ||
36 | this_cpu_cmpxchg16b_emu: | ||
37 | pushf | ||
38 | cli | ||
39 | |||
40 | cmpq %gs:(%rsi), %rax | ||
41 | jne not_same | ||
42 | cmpq %gs:8(%rsi), %rdx | ||
43 | jne not_same | ||
44 | |||
45 | movq %rbx, %gs:(%rsi) | ||
46 | movq %rcx, %gs:8(%rsi) | ||
47 | |||
48 | popf | ||
49 | mov $1, %al | ||
50 | ret | ||
51 | |||
52 | not_same: | ||
53 | popf | ||
54 | xor %al,%al | ||
55 | ret | ||
56 | |||
57 | CFI_ENDPROC | ||
58 | |||
59 | ENDPROC(this_cpu_cmpxchg16b_emu) | ||
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S new file mode 100644 index 000000000000..0ecb8433e5a8 --- /dev/null +++ b/arch/x86/lib/memmove_64.S | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Normally compiler builtins are used, but sometimes the compiler calls out | ||
3 | * of line code. Based on asm-i386/string.h. | ||
4 | * | ||
5 | * This assembly file is re-written from memmove_64.c file. | ||
6 | * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> | ||
7 | */ | ||
8 | #define _STRING_C | ||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/dwarf2.h> | ||
11 | |||
12 | #undef memmove | ||
13 | |||
14 | /* | ||
15 | * Implement memmove(). This can handle overlap between src and dst. | ||
16 | * | ||
17 | * Input: | ||
18 | * rdi: dest | ||
19 | * rsi: src | ||
20 | * rdx: count | ||
21 | * | ||
22 | * Output: | ||
23 | * rax: dest | ||
24 | */ | ||
25 | ENTRY(memmove) | ||
26 | CFI_STARTPROC | ||
27 | /* Handle more 32bytes in loop */ | ||
28 | mov %rdi, %rax | ||
29 | cmp $0x20, %rdx | ||
30 | jb 1f | ||
31 | |||
32 | /* Decide forward/backward copy mode */ | ||
33 | cmp %rdi, %rsi | ||
34 | jb 2f | ||
35 | |||
36 | /* | ||
37 | * movsq instruction have many startup latency | ||
38 | * so we handle small size by general register. | ||
39 | */ | ||
40 | cmp $680, %rdx | ||
41 | jb 3f | ||
42 | /* | ||
43 | * movsq instruction is only good for aligned case. | ||
44 | */ | ||
45 | |||
46 | cmpb %dil, %sil | ||
47 | je 4f | ||
48 | 3: | ||
49 | sub $0x20, %rdx | ||
50 | /* | ||
51 | * We gobble 32byts forward in each loop. | ||
52 | */ | ||
53 | 5: | ||
54 | sub $0x20, %rdx | ||
55 | movq 0*8(%rsi), %r11 | ||
56 | movq 1*8(%rsi), %r10 | ||
57 | movq 2*8(%rsi), %r9 | ||
58 | movq 3*8(%rsi), %r8 | ||
59 | leaq 4*8(%rsi), %rsi | ||
60 | |||
61 | movq %r11, 0*8(%rdi) | ||
62 | movq %r10, 1*8(%rdi) | ||
63 | movq %r9, 2*8(%rdi) | ||
64 | movq %r8, 3*8(%rdi) | ||
65 | leaq 4*8(%rdi), %rdi | ||
66 | jae 5b | ||
67 | addq $0x20, %rdx | ||
68 | jmp 1f | ||
69 | /* | ||
70 | * Handle data forward by movsq. | ||
71 | */ | ||
72 | .p2align 4 | ||
73 | 4: | ||
74 | movq %rdx, %rcx | ||
75 | movq -8(%rsi, %rdx), %r11 | ||
76 | lea -8(%rdi, %rdx), %r10 | ||
77 | shrq $3, %rcx | ||
78 | rep movsq | ||
79 | movq %r11, (%r10) | ||
80 | jmp 13f | ||
81 | /* | ||
82 | * Handle data backward by movsq. | ||
83 | */ | ||
84 | .p2align 4 | ||
85 | 7: | ||
86 | movq %rdx, %rcx | ||
87 | movq (%rsi), %r11 | ||
88 | movq %rdi, %r10 | ||
89 | leaq -8(%rsi, %rdx), %rsi | ||
90 | leaq -8(%rdi, %rdx), %rdi | ||
91 | shrq $3, %rcx | ||
92 | std | ||
93 | rep movsq | ||
94 | cld | ||
95 | movq %r11, (%r10) | ||
96 | jmp 13f | ||
97 | |||
98 | /* | ||
99 | * Start to prepare for backward copy. | ||
100 | */ | ||
101 | .p2align 4 | ||
102 | 2: | ||
103 | cmp $680, %rdx | ||
104 | jb 6f | ||
105 | cmp %dil, %sil | ||
106 | je 7b | ||
107 | 6: | ||
108 | /* | ||
109 | * Calculate copy position to tail. | ||
110 | */ | ||
111 | addq %rdx, %rsi | ||
112 | addq %rdx, %rdi | ||
113 | subq $0x20, %rdx | ||
114 | /* | ||
115 | * We gobble 32byts backward in each loop. | ||
116 | */ | ||
117 | 8: | ||
118 | subq $0x20, %rdx | ||
119 | movq -1*8(%rsi), %r11 | ||
120 | movq -2*8(%rsi), %r10 | ||
121 | movq -3*8(%rsi), %r9 | ||
122 | movq -4*8(%rsi), %r8 | ||
123 | leaq -4*8(%rsi), %rsi | ||
124 | |||
125 | movq %r11, -1*8(%rdi) | ||
126 | movq %r10, -2*8(%rdi) | ||
127 | movq %r9, -3*8(%rdi) | ||
128 | movq %r8, -4*8(%rdi) | ||
129 | leaq -4*8(%rdi), %rdi | ||
130 | jae 8b | ||
131 | /* | ||
132 | * Calculate copy position to head. | ||
133 | */ | ||
134 | addq $0x20, %rdx | ||
135 | subq %rdx, %rsi | ||
136 | subq %rdx, %rdi | ||
137 | 1: | ||
138 | cmpq $16, %rdx | ||
139 | jb 9f | ||
140 | /* | ||
141 | * Move data from 16 bytes to 31 bytes. | ||
142 | */ | ||
143 | movq 0*8(%rsi), %r11 | ||
144 | movq 1*8(%rsi), %r10 | ||
145 | movq -2*8(%rsi, %rdx), %r9 | ||
146 | movq -1*8(%rsi, %rdx), %r8 | ||
147 | movq %r11, 0*8(%rdi) | ||
148 | movq %r10, 1*8(%rdi) | ||
149 | movq %r9, -2*8(%rdi, %rdx) | ||
150 | movq %r8, -1*8(%rdi, %rdx) | ||
151 | jmp 13f | ||
152 | .p2align 4 | ||
153 | 9: | ||
154 | cmpq $8, %rdx | ||
155 | jb 10f | ||
156 | /* | ||
157 | * Move data from 8 bytes to 15 bytes. | ||
158 | */ | ||
159 | movq 0*8(%rsi), %r11 | ||
160 | movq -1*8(%rsi, %rdx), %r10 | ||
161 | movq %r11, 0*8(%rdi) | ||
162 | movq %r10, -1*8(%rdi, %rdx) | ||
163 | jmp 13f | ||
164 | 10: | ||
165 | cmpq $4, %rdx | ||
166 | jb 11f | ||
167 | /* | ||
168 | * Move data from 4 bytes to 7 bytes. | ||
169 | */ | ||
170 | movl (%rsi), %r11d | ||
171 | movl -4(%rsi, %rdx), %r10d | ||
172 | movl %r11d, (%rdi) | ||
173 | movl %r10d, -4(%rdi, %rdx) | ||
174 | jmp 13f | ||
175 | 11: | ||
176 | cmp $2, %rdx | ||
177 | jb 12f | ||
178 | /* | ||
179 | * Move data from 2 bytes to 3 bytes. | ||
180 | */ | ||
181 | movw (%rsi), %r11w | ||
182 | movw -2(%rsi, %rdx), %r10w | ||
183 | movw %r11w, (%rdi) | ||
184 | movw %r10w, -2(%rdi, %rdx) | ||
185 | jmp 13f | ||
186 | 12: | ||
187 | cmp $1, %rdx | ||
188 | jb 13f | ||
189 | /* | ||
190 | * Move data for 1 byte. | ||
191 | */ | ||
192 | movb (%rsi), %r11b | ||
193 | movb %r11b, (%rdi) | ||
194 | 13: | ||
195 | retq | ||
196 | CFI_ENDPROC | ||
197 | ENDPROC(memmove) | ||
diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c deleted file mode 100644 index 6d0f0ec41b34..000000000000 --- a/arch/x86/lib/memmove_64.c +++ /dev/null | |||
@@ -1,192 +0,0 @@ | |||
1 | /* Normally compiler builtins are used, but sometimes the compiler calls out | ||
2 | of line code. Based on asm-i386/string.h. | ||
3 | */ | ||
4 | #define _STRING_C | ||
5 | #include <linux/string.h> | ||
6 | #include <linux/module.h> | ||
7 | |||
8 | #undef memmove | ||
9 | void *memmove(void *dest, const void *src, size_t count) | ||
10 | { | ||
11 | unsigned long d0,d1,d2,d3,d4,d5,d6,d7; | ||
12 | char *ret; | ||
13 | |||
14 | __asm__ __volatile__( | ||
15 | /* Handle more 32bytes in loop */ | ||
16 | "mov %2, %3\n\t" | ||
17 | "cmp $0x20, %0\n\t" | ||
18 | "jb 1f\n\t" | ||
19 | |||
20 | /* Decide forward/backward copy mode */ | ||
21 | "cmp %2, %1\n\t" | ||
22 | "jb 2f\n\t" | ||
23 | |||
24 | /* | ||
25 | * movsq instruction have many startup latency | ||
26 | * so we handle small size by general register. | ||
27 | */ | ||
28 | "cmp $680, %0\n\t" | ||
29 | "jb 3f\n\t" | ||
30 | /* | ||
31 | * movsq instruction is only good for aligned case. | ||
32 | */ | ||
33 | "cmpb %%dil, %%sil\n\t" | ||
34 | "je 4f\n\t" | ||
35 | "3:\n\t" | ||
36 | "sub $0x20, %0\n\t" | ||
37 | /* | ||
38 | * We gobble 32byts forward in each loop. | ||
39 | */ | ||
40 | "5:\n\t" | ||
41 | "sub $0x20, %0\n\t" | ||
42 | "movq 0*8(%1), %4\n\t" | ||
43 | "movq 1*8(%1), %5\n\t" | ||
44 | "movq 2*8(%1), %6\n\t" | ||
45 | "movq 3*8(%1), %7\n\t" | ||
46 | "leaq 4*8(%1), %1\n\t" | ||
47 | |||
48 | "movq %4, 0*8(%2)\n\t" | ||
49 | "movq %5, 1*8(%2)\n\t" | ||
50 | "movq %6, 2*8(%2)\n\t" | ||
51 | "movq %7, 3*8(%2)\n\t" | ||
52 | "leaq 4*8(%2), %2\n\t" | ||
53 | "jae 5b\n\t" | ||
54 | "addq $0x20, %0\n\t" | ||
55 | "jmp 1f\n\t" | ||
56 | /* | ||
57 | * Handle data forward by movsq. | ||
58 | */ | ||
59 | ".p2align 4\n\t" | ||
60 | "4:\n\t" | ||
61 | "movq %0, %8\n\t" | ||
62 | "movq -8(%1, %0), %4\n\t" | ||
63 | "lea -8(%2, %0), %5\n\t" | ||
64 | "shrq $3, %8\n\t" | ||
65 | "rep movsq\n\t" | ||
66 | "movq %4, (%5)\n\t" | ||
67 | "jmp 13f\n\t" | ||
68 | /* | ||
69 | * Handle data backward by movsq. | ||
70 | */ | ||
71 | ".p2align 4\n\t" | ||
72 | "7:\n\t" | ||
73 | "movq %0, %8\n\t" | ||
74 | "movq (%1), %4\n\t" | ||
75 | "movq %2, %5\n\t" | ||
76 | "leaq -8(%1, %0), %1\n\t" | ||
77 | "leaq -8(%2, %0), %2\n\t" | ||
78 | "shrq $3, %8\n\t" | ||
79 | "std\n\t" | ||
80 | "rep movsq\n\t" | ||
81 | "cld\n\t" | ||
82 | "movq %4, (%5)\n\t" | ||
83 | "jmp 13f\n\t" | ||
84 | |||
85 | /* | ||
86 | * Start to prepare for backward copy. | ||
87 | */ | ||
88 | ".p2align 4\n\t" | ||
89 | "2:\n\t" | ||
90 | "cmp $680, %0\n\t" | ||
91 | "jb 6f \n\t" | ||
92 | "cmp %%dil, %%sil\n\t" | ||
93 | "je 7b \n\t" | ||
94 | "6:\n\t" | ||
95 | /* | ||
96 | * Calculate copy position to tail. | ||
97 | */ | ||
98 | "addq %0, %1\n\t" | ||
99 | "addq %0, %2\n\t" | ||
100 | "subq $0x20, %0\n\t" | ||
101 | /* | ||
102 | * We gobble 32byts backward in each loop. | ||
103 | */ | ||
104 | "8:\n\t" | ||
105 | "subq $0x20, %0\n\t" | ||
106 | "movq -1*8(%1), %4\n\t" | ||
107 | "movq -2*8(%1), %5\n\t" | ||
108 | "movq -3*8(%1), %6\n\t" | ||
109 | "movq -4*8(%1), %7\n\t" | ||
110 | "leaq -4*8(%1), %1\n\t" | ||
111 | |||
112 | "movq %4, -1*8(%2)\n\t" | ||
113 | "movq %5, -2*8(%2)\n\t" | ||
114 | "movq %6, -3*8(%2)\n\t" | ||
115 | "movq %7, -4*8(%2)\n\t" | ||
116 | "leaq -4*8(%2), %2\n\t" | ||
117 | "jae 8b\n\t" | ||
118 | /* | ||
119 | * Calculate copy position to head. | ||
120 | */ | ||
121 | "addq $0x20, %0\n\t" | ||
122 | "subq %0, %1\n\t" | ||
123 | "subq %0, %2\n\t" | ||
124 | "1:\n\t" | ||
125 | "cmpq $16, %0\n\t" | ||
126 | "jb 9f\n\t" | ||
127 | /* | ||
128 | * Move data from 16 bytes to 31 bytes. | ||
129 | */ | ||
130 | "movq 0*8(%1), %4\n\t" | ||
131 | "movq 1*8(%1), %5\n\t" | ||
132 | "movq -2*8(%1, %0), %6\n\t" | ||
133 | "movq -1*8(%1, %0), %7\n\t" | ||
134 | "movq %4, 0*8(%2)\n\t" | ||
135 | "movq %5, 1*8(%2)\n\t" | ||
136 | "movq %6, -2*8(%2, %0)\n\t" | ||
137 | "movq %7, -1*8(%2, %0)\n\t" | ||
138 | "jmp 13f\n\t" | ||
139 | ".p2align 4\n\t" | ||
140 | "9:\n\t" | ||
141 | "cmpq $8, %0\n\t" | ||
142 | "jb 10f\n\t" | ||
143 | /* | ||
144 | * Move data from 8 bytes to 15 bytes. | ||
145 | */ | ||
146 | "movq 0*8(%1), %4\n\t" | ||
147 | "movq -1*8(%1, %0), %5\n\t" | ||
148 | "movq %4, 0*8(%2)\n\t" | ||
149 | "movq %5, -1*8(%2, %0)\n\t" | ||
150 | "jmp 13f\n\t" | ||
151 | "10:\n\t" | ||
152 | "cmpq $4, %0\n\t" | ||
153 | "jb 11f\n\t" | ||
154 | /* | ||
155 | * Move data from 4 bytes to 7 bytes. | ||
156 | */ | ||
157 | "movl (%1), %4d\n\t" | ||
158 | "movl -4(%1, %0), %5d\n\t" | ||
159 | "movl %4d, (%2)\n\t" | ||
160 | "movl %5d, -4(%2, %0)\n\t" | ||
161 | "jmp 13f\n\t" | ||
162 | "11:\n\t" | ||
163 | "cmp $2, %0\n\t" | ||
164 | "jb 12f\n\t" | ||
165 | /* | ||
166 | * Move data from 2 bytes to 3 bytes. | ||
167 | */ | ||
168 | "movw (%1), %4w\n\t" | ||
169 | "movw -2(%1, %0), %5w\n\t" | ||
170 | "movw %4w, (%2)\n\t" | ||
171 | "movw %5w, -2(%2, %0)\n\t" | ||
172 | "jmp 13f\n\t" | ||
173 | "12:\n\t" | ||
174 | "cmp $1, %0\n\t" | ||
175 | "jb 13f\n\t" | ||
176 | /* | ||
177 | * Move data for 1 byte. | ||
178 | */ | ||
179 | "movb (%1), %4b\n\t" | ||
180 | "movb %4b, (%2)\n\t" | ||
181 | "13:\n\t" | ||
182 | : "=&d" (d0), "=&S" (d1), "=&D" (d2), "=&a" (ret) , | ||
183 | "=r"(d3), "=r"(d4), "=r"(d5), "=r"(d6), "=&c" (d7) | ||
184 | :"0" (count), | ||
185 | "1" (src), | ||
186 | "2" (dest) | ||
187 | :"memory"); | ||
188 | |||
189 | return ret; | ||
190 | |||
191 | } | ||
192 | EXPORT_SYMBOL(memmove); | ||
diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S index 41fcf00e49df..67743977398b 100644 --- a/arch/x86/lib/rwsem_64.S +++ b/arch/x86/lib/rwsem_64.S | |||
@@ -23,43 +23,50 @@ | |||
23 | #include <asm/dwarf2.h> | 23 | #include <asm/dwarf2.h> |
24 | 24 | ||
25 | #define save_common_regs \ | 25 | #define save_common_regs \ |
26 | pushq %rdi; \ | 26 | pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ |
27 | pushq %rsi; \ | 27 | pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ |
28 | pushq %rcx; \ | 28 | pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \ |
29 | pushq %r8; \ | 29 | pushq_cfi %r8; CFI_REL_OFFSET r8, 0; \ |
30 | pushq %r9; \ | 30 | pushq_cfi %r9; CFI_REL_OFFSET r9, 0; \ |
31 | pushq %r10; \ | 31 | pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \ |
32 | pushq %r11 | 32 | pushq_cfi %r11; CFI_REL_OFFSET r11, 0 |
33 | 33 | ||
34 | #define restore_common_regs \ | 34 | #define restore_common_regs \ |
35 | popq %r11; \ | 35 | popq_cfi %r11; CFI_RESTORE r11; \ |
36 | popq %r10; \ | 36 | popq_cfi %r10; CFI_RESTORE r10; \ |
37 | popq %r9; \ | 37 | popq_cfi %r9; CFI_RESTORE r9; \ |
38 | popq %r8; \ | 38 | popq_cfi %r8; CFI_RESTORE r8; \ |
39 | popq %rcx; \ | 39 | popq_cfi %rcx; CFI_RESTORE rcx; \ |
40 | popq %rsi; \ | 40 | popq_cfi %rsi; CFI_RESTORE rsi; \ |
41 | popq %rdi | 41 | popq_cfi %rdi; CFI_RESTORE rdi |
42 | 42 | ||
43 | /* Fix up special calling conventions */ | 43 | /* Fix up special calling conventions */ |
44 | ENTRY(call_rwsem_down_read_failed) | 44 | ENTRY(call_rwsem_down_read_failed) |
45 | CFI_STARTPROC | ||
45 | save_common_regs | 46 | save_common_regs |
46 | pushq %rdx | 47 | pushq_cfi %rdx |
48 | CFI_REL_OFFSET rdx, 0 | ||
47 | movq %rax,%rdi | 49 | movq %rax,%rdi |
48 | call rwsem_down_read_failed | 50 | call rwsem_down_read_failed |
49 | popq %rdx | 51 | popq_cfi %rdx |
52 | CFI_RESTORE rdx | ||
50 | restore_common_regs | 53 | restore_common_regs |
51 | ret | 54 | ret |
52 | ENDPROC(call_rwsem_down_read_failed) | 55 | CFI_ENDPROC |
56 | ENDPROC(call_rwsem_down_read_failed) | ||
53 | 57 | ||
54 | ENTRY(call_rwsem_down_write_failed) | 58 | ENTRY(call_rwsem_down_write_failed) |
59 | CFI_STARTPROC | ||
55 | save_common_regs | 60 | save_common_regs |
56 | movq %rax,%rdi | 61 | movq %rax,%rdi |
57 | call rwsem_down_write_failed | 62 | call rwsem_down_write_failed |
58 | restore_common_regs | 63 | restore_common_regs |
59 | ret | 64 | ret |
60 | ENDPROC(call_rwsem_down_write_failed) | 65 | CFI_ENDPROC |
66 | ENDPROC(call_rwsem_down_write_failed) | ||
61 | 67 | ||
62 | ENTRY(call_rwsem_wake) | 68 | ENTRY(call_rwsem_wake) |
69 | CFI_STARTPROC | ||
63 | decl %edx /* do nothing if still outstanding active readers */ | 70 | decl %edx /* do nothing if still outstanding active readers */ |
64 | jnz 1f | 71 | jnz 1f |
65 | save_common_regs | 72 | save_common_regs |
@@ -67,15 +74,20 @@ ENTRY(call_rwsem_wake) | |||
67 | call rwsem_wake | 74 | call rwsem_wake |
68 | restore_common_regs | 75 | restore_common_regs |
69 | 1: ret | 76 | 1: ret |
70 | ENDPROC(call_rwsem_wake) | 77 | CFI_ENDPROC |
78 | ENDPROC(call_rwsem_wake) | ||
71 | 79 | ||
72 | /* Fix up special calling conventions */ | 80 | /* Fix up special calling conventions */ |
73 | ENTRY(call_rwsem_downgrade_wake) | 81 | ENTRY(call_rwsem_downgrade_wake) |
82 | CFI_STARTPROC | ||
74 | save_common_regs | 83 | save_common_regs |
75 | pushq %rdx | 84 | pushq_cfi %rdx |
85 | CFI_REL_OFFSET rdx, 0 | ||
76 | movq %rax,%rdi | 86 | movq %rax,%rdi |
77 | call rwsem_downgrade_wake | 87 | call rwsem_downgrade_wake |
78 | popq %rdx | 88 | popq_cfi %rdx |
89 | CFI_RESTORE rdx | ||
79 | restore_common_regs | 90 | restore_common_regs |
80 | ret | 91 | ret |
81 | ENDPROC(call_rwsem_downgrade_wake) | 92 | CFI_ENDPROC |
93 | ENDPROC(call_rwsem_downgrade_wake) | ||
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index 648fe4741782..06691daa4108 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S | |||
@@ -36,7 +36,7 @@ | |||
36 | */ | 36 | */ |
37 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
38 | ENTRY(__write_lock_failed) | 38 | ENTRY(__write_lock_failed) |
39 | CFI_STARTPROC simple | 39 | CFI_STARTPROC |
40 | FRAME | 40 | FRAME |
41 | 2: LOCK_PREFIX | 41 | 2: LOCK_PREFIX |
42 | addl $ RW_LOCK_BIAS,(%eax) | 42 | addl $ RW_LOCK_BIAS,(%eax) |
@@ -74,29 +74,23 @@ ENTRY(__read_lock_failed) | |||
74 | /* Fix up special calling conventions */ | 74 | /* Fix up special calling conventions */ |
75 | ENTRY(call_rwsem_down_read_failed) | 75 | ENTRY(call_rwsem_down_read_failed) |
76 | CFI_STARTPROC | 76 | CFI_STARTPROC |
77 | push %ecx | 77 | pushl_cfi %ecx |
78 | CFI_ADJUST_CFA_OFFSET 4 | ||
79 | CFI_REL_OFFSET ecx,0 | 78 | CFI_REL_OFFSET ecx,0 |
80 | push %edx | 79 | pushl_cfi %edx |
81 | CFI_ADJUST_CFA_OFFSET 4 | ||
82 | CFI_REL_OFFSET edx,0 | 80 | CFI_REL_OFFSET edx,0 |
83 | call rwsem_down_read_failed | 81 | call rwsem_down_read_failed |
84 | pop %edx | 82 | popl_cfi %edx |
85 | CFI_ADJUST_CFA_OFFSET -4 | 83 | popl_cfi %ecx |
86 | pop %ecx | ||
87 | CFI_ADJUST_CFA_OFFSET -4 | ||
88 | ret | 84 | ret |
89 | CFI_ENDPROC | 85 | CFI_ENDPROC |
90 | ENDPROC(call_rwsem_down_read_failed) | 86 | ENDPROC(call_rwsem_down_read_failed) |
91 | 87 | ||
92 | ENTRY(call_rwsem_down_write_failed) | 88 | ENTRY(call_rwsem_down_write_failed) |
93 | CFI_STARTPROC | 89 | CFI_STARTPROC |
94 | push %ecx | 90 | pushl_cfi %ecx |
95 | CFI_ADJUST_CFA_OFFSET 4 | ||
96 | CFI_REL_OFFSET ecx,0 | 91 | CFI_REL_OFFSET ecx,0 |
97 | calll rwsem_down_write_failed | 92 | calll rwsem_down_write_failed |
98 | pop %ecx | 93 | popl_cfi %ecx |
99 | CFI_ADJUST_CFA_OFFSET -4 | ||
100 | ret | 94 | ret |
101 | CFI_ENDPROC | 95 | CFI_ENDPROC |
102 | ENDPROC(call_rwsem_down_write_failed) | 96 | ENDPROC(call_rwsem_down_write_failed) |
@@ -105,12 +99,10 @@ ENTRY(call_rwsem_wake) | |||
105 | CFI_STARTPROC | 99 | CFI_STARTPROC |
106 | decw %dx /* do nothing if still outstanding active readers */ | 100 | decw %dx /* do nothing if still outstanding active readers */ |
107 | jnz 1f | 101 | jnz 1f |
108 | push %ecx | 102 | pushl_cfi %ecx |
109 | CFI_ADJUST_CFA_OFFSET 4 | ||
110 | CFI_REL_OFFSET ecx,0 | 103 | CFI_REL_OFFSET ecx,0 |
111 | call rwsem_wake | 104 | call rwsem_wake |
112 | pop %ecx | 105 | popl_cfi %ecx |
113 | CFI_ADJUST_CFA_OFFSET -4 | ||
114 | 1: ret | 106 | 1: ret |
115 | CFI_ENDPROC | 107 | CFI_ENDPROC |
116 | ENDPROC(call_rwsem_wake) | 108 | ENDPROC(call_rwsem_wake) |
@@ -118,17 +110,13 @@ ENTRY(call_rwsem_wake) | |||
118 | /* Fix up special calling conventions */ | 110 | /* Fix up special calling conventions */ |
119 | ENTRY(call_rwsem_downgrade_wake) | 111 | ENTRY(call_rwsem_downgrade_wake) |
120 | CFI_STARTPROC | 112 | CFI_STARTPROC |
121 | push %ecx | 113 | pushl_cfi %ecx |
122 | CFI_ADJUST_CFA_OFFSET 4 | ||
123 | CFI_REL_OFFSET ecx,0 | 114 | CFI_REL_OFFSET ecx,0 |
124 | push %edx | 115 | pushl_cfi %edx |
125 | CFI_ADJUST_CFA_OFFSET 4 | ||
126 | CFI_REL_OFFSET edx,0 | 116 | CFI_REL_OFFSET edx,0 |
127 | call rwsem_downgrade_wake | 117 | call rwsem_downgrade_wake |
128 | pop %edx | 118 | popl_cfi %edx |
129 | CFI_ADJUST_CFA_OFFSET -4 | 119 | popl_cfi %ecx |
130 | pop %ecx | ||
131 | CFI_ADJUST_CFA_OFFSET -4 | ||
132 | ret | 120 | ret |
133 | CFI_ENDPROC | 121 | CFI_ENDPROC |
134 | ENDPROC(call_rwsem_downgrade_wake) | 122 | ENDPROC(call_rwsem_downgrade_wake) |
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S index 650b11e00ecc..2930ae05d773 100644 --- a/arch/x86/lib/thunk_32.S +++ b/arch/x86/lib/thunk_32.S | |||
@@ -7,24 +7,6 @@ | |||
7 | 7 | ||
8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | 9 | ||
10 | #define ARCH_TRACE_IRQS_ON \ | ||
11 | pushl %eax; \ | ||
12 | pushl %ecx; \ | ||
13 | pushl %edx; \ | ||
14 | call trace_hardirqs_on; \ | ||
15 | popl %edx; \ | ||
16 | popl %ecx; \ | ||
17 | popl %eax; | ||
18 | |||
19 | #define ARCH_TRACE_IRQS_OFF \ | ||
20 | pushl %eax; \ | ||
21 | pushl %ecx; \ | ||
22 | pushl %edx; \ | ||
23 | call trace_hardirqs_off; \ | ||
24 | popl %edx; \ | ||
25 | popl %ecx; \ | ||
26 | popl %eax; | ||
27 | |||
28 | #ifdef CONFIG_TRACE_IRQFLAGS | 10 | #ifdef CONFIG_TRACE_IRQFLAGS |
29 | /* put return address in eax (arg1) */ | 11 | /* put return address in eax (arg1) */ |
30 | .macro thunk_ra name,func | 12 | .macro thunk_ra name,func |
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index bf9a7d5a5428..782b082c9ff7 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S | |||
@@ -22,26 +22,6 @@ | |||
22 | CFI_ENDPROC | 22 | CFI_ENDPROC |
23 | .endm | 23 | .endm |
24 | 24 | ||
25 | /* rdi: arg1 ... normal C conventions. rax is passed from C. */ | ||
26 | .macro thunk_retrax name,func | ||
27 | .globl \name | ||
28 | \name: | ||
29 | CFI_STARTPROC | ||
30 | SAVE_ARGS | ||
31 | call \func | ||
32 | jmp restore_norax | ||
33 | CFI_ENDPROC | ||
34 | .endm | ||
35 | |||
36 | |||
37 | .section .sched.text, "ax" | ||
38 | #ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM | ||
39 | thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed | ||
40 | thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed | ||
41 | thunk rwsem_wake_thunk,rwsem_wake | ||
42 | thunk rwsem_downgrade_thunk,rwsem_downgrade_wake | ||
43 | #endif | ||
44 | |||
45 | #ifdef CONFIG_TRACE_IRQFLAGS | 25 | #ifdef CONFIG_TRACE_IRQFLAGS |
46 | /* put return address in rdi (arg1) */ | 26 | /* put return address in rdi (arg1) */ |
47 | .macro thunk_ra name,func | 27 | .macro thunk_ra name,func |
@@ -72,10 +52,3 @@ restore: | |||
72 | RESTORE_ARGS | 52 | RESTORE_ARGS |
73 | ret | 53 | ret |
74 | CFI_ENDPROC | 54 | CFI_ENDPROC |
75 | |||
76 | CFI_STARTPROC | ||
77 | SAVE_ARGS | ||
78 | restore_norax: | ||
79 | RESTORE_ARGS 1 | ||
80 | ret | ||
81 | CFI_ENDPROC | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 09df2f9a3d69..3e608edf9958 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -25,6 +25,7 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o | |||
25 | obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o | 25 | obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o |
26 | obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o | 26 | obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o |
27 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o | 27 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o |
28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o | ||
28 | 29 | ||
29 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | 30 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o |
30 | 31 | ||
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index f21962c435ed..0919c26820d4 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c | |||
@@ -26,9 +26,7 @@ | |||
26 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
27 | #include <asm/amd_nb.h> | 27 | #include <asm/amd_nb.h> |
28 | 28 | ||
29 | static struct bootnode __initdata nodes[8]; | ||
30 | static unsigned char __initdata nodeids[8]; | 29 | static unsigned char __initdata nodeids[8]; |
31 | static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; | ||
32 | 30 | ||
33 | static __init int find_northbridge(void) | 31 | static __init int find_northbridge(void) |
34 | { | 32 | { |
@@ -51,7 +49,7 @@ static __init int find_northbridge(void) | |||
51 | return num; | 49 | return num; |
52 | } | 50 | } |
53 | 51 | ||
54 | return -1; | 52 | return -ENOENT; |
55 | } | 53 | } |
56 | 54 | ||
57 | static __init void early_get_boot_cpu_id(void) | 55 | static __init void early_get_boot_cpu_id(void) |
@@ -69,17 +67,18 @@ static __init void early_get_boot_cpu_id(void) | |||
69 | #endif | 67 | #endif |
70 | } | 68 | } |
71 | 69 | ||
72 | int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | 70 | int __init amd_numa_init(void) |
73 | { | 71 | { |
74 | unsigned long start = PFN_PHYS(start_pfn); | 72 | unsigned long start = PFN_PHYS(0); |
75 | unsigned long end = PFN_PHYS(end_pfn); | 73 | unsigned long end = PFN_PHYS(max_pfn); |
76 | unsigned numnodes; | 74 | unsigned numnodes; |
77 | unsigned long prevbase; | 75 | unsigned long prevbase; |
78 | int i, nb, found = 0; | 76 | int i, j, nb; |
79 | u32 nodeid, reg; | 77 | u32 nodeid, reg; |
78 | unsigned int bits, cores, apicid_base; | ||
80 | 79 | ||
81 | if (!early_pci_allowed()) | 80 | if (!early_pci_allowed()) |
82 | return -1; | 81 | return -EINVAL; |
83 | 82 | ||
84 | nb = find_northbridge(); | 83 | nb = find_northbridge(); |
85 | if (nb < 0) | 84 | if (nb < 0) |
@@ -90,7 +89,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | |||
90 | reg = read_pci_config(0, nb, 0, 0x60); | 89 | reg = read_pci_config(0, nb, 0, 0x60); |
91 | numnodes = ((reg >> 4) & 0xF) + 1; | 90 | numnodes = ((reg >> 4) & 0xF) + 1; |
92 | if (numnodes <= 1) | 91 | if (numnodes <= 1) |
93 | return -1; | 92 | return -ENOENT; |
94 | 93 | ||
95 | pr_info("Number of physical nodes %d\n", numnodes); | 94 | pr_info("Number of physical nodes %d\n", numnodes); |
96 | 95 | ||
@@ -121,9 +120,9 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | |||
121 | if ((base >> 8) & 3 || (limit >> 8) & 3) { | 120 | if ((base >> 8) & 3 || (limit >> 8) & 3) { |
122 | pr_err("Node %d using interleaving mode %lx/%lx\n", | 121 | pr_err("Node %d using interleaving mode %lx/%lx\n", |
123 | nodeid, (base >> 8) & 3, (limit >> 8) & 3); | 122 | nodeid, (base >> 8) & 3, (limit >> 8) & 3); |
124 | return -1; | 123 | return -EINVAL; |
125 | } | 124 | } |
126 | if (node_isset(nodeid, nodes_parsed)) { | 125 | if (node_isset(nodeid, numa_nodes_parsed)) { |
127 | pr_info("Node %d already present, skipping\n", | 126 | pr_info("Node %d already present, skipping\n", |
128 | nodeid); | 127 | nodeid); |
129 | continue; | 128 | continue; |
@@ -160,117 +159,28 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | |||
160 | if (prevbase > base) { | 159 | if (prevbase > base) { |
161 | pr_err("Node map not sorted %lx,%lx\n", | 160 | pr_err("Node map not sorted %lx,%lx\n", |
162 | prevbase, base); | 161 | prevbase, base); |
163 | return -1; | 162 | return -EINVAL; |
164 | } | 163 | } |
165 | 164 | ||
166 | pr_info("Node %d MemBase %016lx Limit %016lx\n", | 165 | pr_info("Node %d MemBase %016lx Limit %016lx\n", |
167 | nodeid, base, limit); | 166 | nodeid, base, limit); |
168 | 167 | ||
169 | found++; | ||
170 | |||
171 | nodes[nodeid].start = base; | ||
172 | nodes[nodeid].end = limit; | ||
173 | |||
174 | prevbase = base; | 168 | prevbase = base; |
175 | 169 | numa_add_memblk(nodeid, base, limit); | |
176 | node_set(nodeid, nodes_parsed); | 170 | node_set(nodeid, numa_nodes_parsed); |
177 | } | ||
178 | |||
179 | if (!found) | ||
180 | return -1; | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | #ifdef CONFIG_NUMA_EMU | ||
185 | static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { | ||
186 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
187 | }; | ||
188 | |||
189 | void __init amd_get_nodes(struct bootnode *physnodes) | ||
190 | { | ||
191 | int i; | ||
192 | |||
193 | for_each_node_mask(i, nodes_parsed) { | ||
194 | physnodes[i].start = nodes[i].start; | ||
195 | physnodes[i].end = nodes[i].end; | ||
196 | } | 171 | } |
197 | } | ||
198 | |||
199 | static int __init find_node_by_addr(unsigned long addr) | ||
200 | { | ||
201 | int ret = NUMA_NO_NODE; | ||
202 | int i; | ||
203 | |||
204 | for (i = 0; i < 8; i++) | ||
205 | if (addr >= nodes[i].start && addr < nodes[i].end) { | ||
206 | ret = i; | ||
207 | break; | ||
208 | } | ||
209 | return ret; | ||
210 | } | ||
211 | 172 | ||
212 | /* | 173 | if (!nodes_weight(numa_nodes_parsed)) |
213 | * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be | 174 | return -ENOENT; |
214 | * setup to represent the physical topology but reflect the emulated | ||
215 | * environment. For each emulated node, the real node which it appears on is | ||
216 | * found and a fake pxm to nid mapping is created which mirrors the actual | ||
217 | * locality. node_distance() then represents the correct distances between | ||
218 | * emulated nodes by using the fake acpi mappings to pxms. | ||
219 | */ | ||
220 | void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) | ||
221 | { | ||
222 | unsigned int bits; | ||
223 | unsigned int cores; | ||
224 | unsigned int apicid_base = 0; | ||
225 | int i; | ||
226 | 175 | ||
176 | /* | ||
177 | * We seem to have valid NUMA configuration. Map apicids to nodes | ||
178 | * using the coreid bits from early_identify_cpu. | ||
179 | */ | ||
227 | bits = boot_cpu_data.x86_coreid_bits; | 180 | bits = boot_cpu_data.x86_coreid_bits; |
228 | cores = 1 << bits; | 181 | cores = 1 << bits; |
229 | early_get_boot_cpu_id(); | ||
230 | if (boot_cpu_physical_apicid > 0) | ||
231 | apicid_base = boot_cpu_physical_apicid; | ||
232 | |||
233 | for (i = 0; i < nr_nodes; i++) { | ||
234 | int index; | ||
235 | int nid; | ||
236 | int j; | ||
237 | |||
238 | nid = find_node_by_addr(nodes[i].start); | ||
239 | if (nid == NUMA_NO_NODE) | ||
240 | continue; | ||
241 | |||
242 | index = nodeids[nid] << bits; | ||
243 | if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE) | ||
244 | for (j = apicid_base; j < cores + apicid_base; j++) | ||
245 | fake_apicid_to_node[index + j] = i; | ||
246 | #ifdef CONFIG_ACPI_NUMA | ||
247 | __acpi_map_pxm_to_node(nid, i); | ||
248 | #endif | ||
249 | } | ||
250 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); | ||
251 | } | ||
252 | #endif /* CONFIG_NUMA_EMU */ | ||
253 | |||
254 | int __init amd_scan_nodes(void) | ||
255 | { | ||
256 | unsigned int bits; | ||
257 | unsigned int cores; | ||
258 | unsigned int apicid_base; | ||
259 | int i; | ||
260 | |||
261 | BUG_ON(nodes_empty(nodes_parsed)); | ||
262 | node_possible_map = nodes_parsed; | ||
263 | memnode_shift = compute_hash_shift(nodes, 8, NULL); | ||
264 | if (memnode_shift < 0) { | ||
265 | pr_err("No NUMA node hash function found. Contact maintainer\n"); | ||
266 | return -1; | ||
267 | } | ||
268 | pr_info("Using node hash shift of %d\n", memnode_shift); | ||
269 | |||
270 | /* use the coreid bits from early_identify_cpu */ | ||
271 | bits = boot_cpu_data.x86_coreid_bits; | ||
272 | cores = (1<<bits); | ||
273 | apicid_base = 0; | 182 | apicid_base = 0; |
183 | |||
274 | /* get the APIC ID of the BSP early for systems with apicid lifting */ | 184 | /* get the APIC ID of the BSP early for systems with apicid lifting */ |
275 | early_get_boot_cpu_id(); | 185 | early_get_boot_cpu_id(); |
276 | if (boot_cpu_physical_apicid > 0) { | 186 | if (boot_cpu_physical_apicid > 0) { |
@@ -278,17 +188,9 @@ int __init amd_scan_nodes(void) | |||
278 | apicid_base = boot_cpu_physical_apicid; | 188 | apicid_base = boot_cpu_physical_apicid; |
279 | } | 189 | } |
280 | 190 | ||
281 | for_each_node_mask(i, node_possible_map) { | 191 | for_each_node_mask(i, numa_nodes_parsed) |
282 | int j; | ||
283 | |||
284 | memblock_x86_register_active_regions(i, | ||
285 | nodes[i].start >> PAGE_SHIFT, | ||
286 | nodes[i].end >> PAGE_SHIFT); | ||
287 | for (j = apicid_base; j < cores + apicid_base; j++) | 192 | for (j = apicid_base; j < cores + apicid_base; j++) |
288 | apicid_to_node[(i << bits) + j] = i; | 193 | set_apicid_to_node((i << bits) + j, i); |
289 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | ||
290 | } | ||
291 | 194 | ||
292 | numa_init_array(); | ||
293 | return 0; | 195 | return 0; |
294 | } | 196 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d90ceb882a4..20e3f8702d1e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void) | |||
229 | for (address = VMALLOC_START & PMD_MASK; | 229 | for (address = VMALLOC_START & PMD_MASK; |
230 | address >= TASK_SIZE && address < FIXADDR_TOP; | 230 | address >= TASK_SIZE && address < FIXADDR_TOP; |
231 | address += PMD_SIZE) { | 231 | address += PMD_SIZE) { |
232 | |||
233 | unsigned long flags; | ||
234 | struct page *page; | 232 | struct page *page; |
235 | 233 | ||
236 | spin_lock_irqsave(&pgd_lock, flags); | 234 | spin_lock(&pgd_lock); |
237 | list_for_each_entry(page, &pgd_list, lru) { | 235 | list_for_each_entry(page, &pgd_list, lru) { |
238 | spinlock_t *pgt_lock; | 236 | spinlock_t *pgt_lock; |
239 | pmd_t *ret; | 237 | pmd_t *ret; |
240 | 238 | ||
239 | /* the pgt_lock only for Xen */ | ||
241 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 240 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
242 | 241 | ||
243 | spin_lock(pgt_lock); | 242 | spin_lock(pgt_lock); |
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void) | |||
247 | if (!ret) | 246 | if (!ret) |
248 | break; | 247 | break; |
249 | } | 248 | } |
250 | spin_unlock_irqrestore(&pgd_lock, flags); | 249 | spin_unlock(&pgd_lock); |
251 | } | 250 | } |
252 | } | 251 | } |
253 | 252 | ||
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
828 | unsigned long address, unsigned int fault) | 827 | unsigned long address, unsigned int fault) |
829 | { | 828 | { |
830 | if (fault & VM_FAULT_OOM) { | 829 | if (fault & VM_FAULT_OOM) { |
830 | /* Kernel mode? Handle exceptions or die: */ | ||
831 | if (!(error_code & PF_USER)) { | ||
832 | up_read(¤t->mm->mmap_sem); | ||
833 | no_context(regs, error_code, address); | ||
834 | return; | ||
835 | } | ||
836 | |||
831 | out_of_memory(regs, error_code, address); | 837 | out_of_memory(regs, error_code, address); |
832 | } else { | 838 | } else { |
833 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 839 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 947f42abe820..286d289b039b 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -18,9 +18,9 @@ | |||
18 | 18 | ||
19 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 19 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
20 | 20 | ||
21 | unsigned long __initdata e820_table_start; | 21 | unsigned long __initdata pgt_buf_start; |
22 | unsigned long __meminitdata e820_table_end; | 22 | unsigned long __meminitdata pgt_buf_end; |
23 | unsigned long __meminitdata e820_table_top; | 23 | unsigned long __meminitdata pgt_buf_top; |
24 | 24 | ||
25 | int after_bootmem; | 25 | int after_bootmem; |
26 | 26 | ||
@@ -33,7 +33,7 @@ int direct_gbpages | |||
33 | static void __init find_early_table_space(unsigned long end, int use_pse, | 33 | static void __init find_early_table_space(unsigned long end, int use_pse, |
34 | int use_gbpages) | 34 | int use_gbpages) |
35 | { | 35 | { |
36 | unsigned long puds, pmds, ptes, tables, start; | 36 | unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; |
37 | phys_addr_t base; | 37 | phys_addr_t base; |
38 | 38 | ||
39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
@@ -65,29 +65,20 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
65 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
66 | /* for fixmap */ | 66 | /* for fixmap */ |
67 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | 67 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
68 | #endif | ||
69 | 68 | ||
70 | /* | 69 | good_end = max_pfn_mapped << PAGE_SHIFT; |
71 | * RED-PEN putting page tables only on node 0 could | ||
72 | * cause a hotspot and fill up ZONE_DMA. The page tables | ||
73 | * need roughly 0.5KB per GB. | ||
74 | */ | ||
75 | #ifdef CONFIG_X86_32 | ||
76 | start = 0x7000; | ||
77 | #else | ||
78 | start = 0x8000; | ||
79 | #endif | 70 | #endif |
80 | base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT, | 71 | |
81 | tables, PAGE_SIZE); | 72 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
82 | if (base == MEMBLOCK_ERROR) | 73 | if (base == MEMBLOCK_ERROR) |
83 | panic("Cannot find space for the kernel page tables"); | 74 | panic("Cannot find space for the kernel page tables"); |
84 | 75 | ||
85 | e820_table_start = base >> PAGE_SHIFT; | 76 | pgt_buf_start = base >> PAGE_SHIFT; |
86 | e820_table_end = e820_table_start; | 77 | pgt_buf_end = pgt_buf_start; |
87 | e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); | 78 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); |
88 | 79 | ||
89 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", | 80 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
90 | end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT); | 81 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); |
91 | } | 82 | } |
92 | 83 | ||
93 | struct map_range { | 84 | struct map_range { |
@@ -279,30 +270,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
279 | load_cr3(swapper_pg_dir); | 270 | load_cr3(swapper_pg_dir); |
280 | #endif | 271 | #endif |
281 | 272 | ||
282 | #ifdef CONFIG_X86_64 | ||
283 | if (!after_bootmem && !start) { | ||
284 | pud_t *pud; | ||
285 | pmd_t *pmd; | ||
286 | |||
287 | mmu_cr4_features = read_cr4(); | ||
288 | |||
289 | /* | ||
290 | * _brk_end cannot change anymore, but it and _end may be | ||
291 | * located on different 2M pages. cleanup_highmap(), however, | ||
292 | * can only consider _end when it runs, so destroy any | ||
293 | * mappings beyond _brk_end here. | ||
294 | */ | ||
295 | pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); | ||
296 | pmd = pmd_offset(pud, _brk_end - 1); | ||
297 | while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) | ||
298 | pmd_clear(pmd); | ||
299 | } | ||
300 | #endif | ||
301 | __flush_tlb_all(); | 273 | __flush_tlb_all(); |
302 | 274 | ||
303 | if (!after_bootmem && e820_table_end > e820_table_start) | 275 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) |
304 | memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, | 276 | memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, |
305 | e820_table_end << PAGE_SHIFT, "PGTABLE"); | 277 | pgt_buf_end << PAGE_SHIFT, "PGTABLE"); |
306 | 278 | ||
307 | if (!after_bootmem) | 279 | if (!after_bootmem) |
308 | early_memtest(start, end); | 280 | early_memtest(start, end); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c821074b7f0b..73ad7ebd6e9c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -62,10 +62,10 @@ bool __read_mostly __vmalloc_start_set = false; | |||
62 | 62 | ||
63 | static __init void *alloc_low_page(void) | 63 | static __init void *alloc_low_page(void) |
64 | { | 64 | { |
65 | unsigned long pfn = e820_table_end++; | 65 | unsigned long pfn = pgt_buf_end++; |
66 | void *adr; | 66 | void *adr; |
67 | 67 | ||
68 | if (pfn >= e820_table_top) | 68 | if (pfn >= pgt_buf_top) |
69 | panic("alloc_low_page: ran out of memory"); | 69 | panic("alloc_low_page: ran out of memory"); |
70 | 70 | ||
71 | adr = __va(pfn * PAGE_SIZE); | 71 | adr = __va(pfn * PAGE_SIZE); |
@@ -163,8 +163,8 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | |||
163 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | 163 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end |
164 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | 164 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin |
165 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | 165 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end |
166 | && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start | 166 | && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start |
167 | || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) { | 167 | || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { |
168 | pte_t *newpte; | 168 | pte_t *newpte; |
169 | int i; | 169 | int i; |
170 | 170 | ||
@@ -644,8 +644,7 @@ void __init find_low_pfn_range(void) | |||
644 | } | 644 | } |
645 | 645 | ||
646 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 646 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
647 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | 647 | void __init initmem_init(void) |
648 | int acpi, int k8) | ||
649 | { | 648 | { |
650 | #ifdef CONFIG_HIGHMEM | 649 | #ifdef CONFIG_HIGHMEM |
651 | highstart_pfn = highend_pfn = max_pfn; | 650 | highstart_pfn = highend_pfn = max_pfn; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af80..0aa34669ed3f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/numa.h> | 51 | #include <asm/numa.h> |
52 | #include <asm/cacheflush.h> | 52 | #include <asm/cacheflush.h> |
53 | #include <asm/init.h> | 53 | #include <asm/init.h> |
54 | #include <asm/uv/uv.h> | ||
54 | 55 | ||
55 | static int __init parse_direct_gbpages_off(char *arg) | 56 | static int __init parse_direct_gbpages_off(char *arg) |
56 | { | 57 | { |
@@ -105,18 +106,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
105 | 106 | ||
106 | for (address = start; address <= end; address += PGDIR_SIZE) { | 107 | for (address = start; address <= end; address += PGDIR_SIZE) { |
107 | const pgd_t *pgd_ref = pgd_offset_k(address); | 108 | const pgd_t *pgd_ref = pgd_offset_k(address); |
108 | unsigned long flags; | ||
109 | struct page *page; | 109 | struct page *page; |
110 | 110 | ||
111 | if (pgd_none(*pgd_ref)) | 111 | if (pgd_none(*pgd_ref)) |
112 | continue; | 112 | continue; |
113 | 113 | ||
114 | spin_lock_irqsave(&pgd_lock, flags); | 114 | spin_lock(&pgd_lock); |
115 | list_for_each_entry(page, &pgd_list, lru) { | 115 | list_for_each_entry(page, &pgd_list, lru) { |
116 | pgd_t *pgd; | 116 | pgd_t *pgd; |
117 | spinlock_t *pgt_lock; | 117 | spinlock_t *pgt_lock; |
118 | 118 | ||
119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
120 | /* the pgt_lock only for Xen */ | ||
120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 121 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
121 | spin_lock(pgt_lock); | 122 | spin_lock(pgt_lock); |
122 | 123 | ||
@@ -128,7 +129,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
128 | 129 | ||
129 | spin_unlock(pgt_lock); | 130 | spin_unlock(pgt_lock); |
130 | } | 131 | } |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 132 | spin_unlock(&pgd_lock); |
132 | } | 133 | } |
133 | } | 134 | } |
134 | 135 | ||
@@ -314,7 +315,7 @@ void __init cleanup_highmap(void) | |||
314 | 315 | ||
315 | static __ref void *alloc_low_page(unsigned long *phys) | 316 | static __ref void *alloc_low_page(unsigned long *phys) |
316 | { | 317 | { |
317 | unsigned long pfn = e820_table_end++; | 318 | unsigned long pfn = pgt_buf_end++; |
318 | void *adr; | 319 | void *adr; |
319 | 320 | ||
320 | if (after_bootmem) { | 321 | if (after_bootmem) { |
@@ -324,7 +325,7 @@ static __ref void *alloc_low_page(unsigned long *phys) | |||
324 | return adr; | 325 | return adr; |
325 | } | 326 | } |
326 | 327 | ||
327 | if (pfn >= e820_table_top) | 328 | if (pfn >= pgt_buf_top) |
328 | panic("alloc_low_page: ran out of memory"); | 329 | panic("alloc_low_page: ran out of memory"); |
329 | 330 | ||
330 | adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); | 331 | adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); |
@@ -333,12 +334,28 @@ static __ref void *alloc_low_page(unsigned long *phys) | |||
333 | return adr; | 334 | return adr; |
334 | } | 335 | } |
335 | 336 | ||
337 | static __ref void *map_low_page(void *virt) | ||
338 | { | ||
339 | void *adr; | ||
340 | unsigned long phys, left; | ||
341 | |||
342 | if (after_bootmem) | ||
343 | return virt; | ||
344 | |||
345 | phys = __pa(virt); | ||
346 | left = phys & (PAGE_SIZE - 1); | ||
347 | adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); | ||
348 | adr = (void *)(((unsigned long)adr) | left); | ||
349 | |||
350 | return adr; | ||
351 | } | ||
352 | |||
336 | static __ref void unmap_low_page(void *adr) | 353 | static __ref void unmap_low_page(void *adr) |
337 | { | 354 | { |
338 | if (after_bootmem) | 355 | if (after_bootmem) |
339 | return; | 356 | return; |
340 | 357 | ||
341 | early_iounmap(adr, PAGE_SIZE); | 358 | early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE); |
342 | } | 359 | } |
343 | 360 | ||
344 | static unsigned long __meminit | 361 | static unsigned long __meminit |
@@ -386,15 +403,6 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, | |||
386 | } | 403 | } |
387 | 404 | ||
388 | static unsigned long __meminit | 405 | static unsigned long __meminit |
389 | phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end, | ||
390 | pgprot_t prot) | ||
391 | { | ||
392 | pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); | ||
393 | |||
394 | return phys_pte_init(pte, address, end, prot); | ||
395 | } | ||
396 | |||
397 | static unsigned long __meminit | ||
398 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | 406 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
399 | unsigned long page_size_mask, pgprot_t prot) | 407 | unsigned long page_size_mask, pgprot_t prot) |
400 | { | 408 | { |
@@ -420,8 +428,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
420 | if (pmd_val(*pmd)) { | 428 | if (pmd_val(*pmd)) { |
421 | if (!pmd_large(*pmd)) { | 429 | if (!pmd_large(*pmd)) { |
422 | spin_lock(&init_mm.page_table_lock); | 430 | spin_lock(&init_mm.page_table_lock); |
423 | last_map_addr = phys_pte_update(pmd, address, | 431 | pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd)); |
432 | last_map_addr = phys_pte_init(pte, address, | ||
424 | end, prot); | 433 | end, prot); |
434 | unmap_low_page(pte); | ||
425 | spin_unlock(&init_mm.page_table_lock); | 435 | spin_unlock(&init_mm.page_table_lock); |
426 | continue; | 436 | continue; |
427 | } | 437 | } |
@@ -468,18 +478,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
468 | } | 478 | } |
469 | 479 | ||
470 | static unsigned long __meminit | 480 | static unsigned long __meminit |
471 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | ||
472 | unsigned long page_size_mask, pgprot_t prot) | ||
473 | { | ||
474 | pmd_t *pmd = pmd_offset(pud, 0); | ||
475 | unsigned long last_map_addr; | ||
476 | |||
477 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot); | ||
478 | __flush_tlb_all(); | ||
479 | return last_map_addr; | ||
480 | } | ||
481 | |||
482 | static unsigned long __meminit | ||
483 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | 481 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
484 | unsigned long page_size_mask) | 482 | unsigned long page_size_mask) |
485 | { | 483 | { |
@@ -504,8 +502,11 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
504 | 502 | ||
505 | if (pud_val(*pud)) { | 503 | if (pud_val(*pud)) { |
506 | if (!pud_large(*pud)) { | 504 | if (!pud_large(*pud)) { |
507 | last_map_addr = phys_pmd_update(pud, addr, end, | 505 | pmd = map_low_page(pmd_offset(pud, 0)); |
506 | last_map_addr = phys_pmd_init(pmd, addr, end, | ||
508 | page_size_mask, prot); | 507 | page_size_mask, prot); |
508 | unmap_low_page(pmd); | ||
509 | __flush_tlb_all(); | ||
509 | continue; | 510 | continue; |
510 | } | 511 | } |
511 | /* | 512 | /* |
@@ -553,17 +554,6 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
553 | return last_map_addr; | 554 | return last_map_addr; |
554 | } | 555 | } |
555 | 556 | ||
556 | static unsigned long __meminit | ||
557 | phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
558 | unsigned long page_size_mask) | ||
559 | { | ||
560 | pud_t *pud; | ||
561 | |||
562 | pud = (pud_t *)pgd_page_vaddr(*pgd); | ||
563 | |||
564 | return phys_pud_init(pud, addr, end, page_size_mask); | ||
565 | } | ||
566 | |||
567 | unsigned long __meminit | 557 | unsigned long __meminit |
568 | kernel_physical_mapping_init(unsigned long start, | 558 | kernel_physical_mapping_init(unsigned long start, |
569 | unsigned long end, | 559 | unsigned long end, |
@@ -587,8 +577,10 @@ kernel_physical_mapping_init(unsigned long start, | |||
587 | next = end; | 577 | next = end; |
588 | 578 | ||
589 | if (pgd_val(*pgd)) { | 579 | if (pgd_val(*pgd)) { |
590 | last_map_addr = phys_pud_update(pgd, __pa(start), | 580 | pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd)); |
581 | last_map_addr = phys_pud_init(pud, __pa(start), | ||
591 | __pa(end), page_size_mask); | 582 | __pa(end), page_size_mask); |
583 | unmap_low_page(pud); | ||
592 | continue; | 584 | continue; |
593 | } | 585 | } |
594 | 586 | ||
@@ -612,10 +604,9 @@ kernel_physical_mapping_init(unsigned long start, | |||
612 | } | 604 | } |
613 | 605 | ||
614 | #ifndef CONFIG_NUMA | 606 | #ifndef CONFIG_NUMA |
615 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | 607 | void __init initmem_init(void) |
616 | int acpi, int k8) | ||
617 | { | 608 | { |
618 | memblock_x86_register_active_regions(0, start_pfn, end_pfn); | 609 | memblock_x86_register_active_regions(0, 0, max_pfn); |
619 | } | 610 | } |
620 | #endif | 611 | #endif |
621 | 612 | ||
@@ -908,6 +899,19 @@ const char *arch_vma_name(struct vm_area_struct *vma) | |||
908 | return NULL; | 899 | return NULL; |
909 | } | 900 | } |
910 | 901 | ||
902 | #ifdef CONFIG_X86_UV | ||
903 | #define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) | ||
904 | |||
905 | unsigned long memory_block_size_bytes(void) | ||
906 | { | ||
907 | if (is_uv_system()) { | ||
908 | printk(KERN_INFO "UV: memory block size 2GB\n"); | ||
909 | return 2UL * 1024 * 1024 * 1024; | ||
910 | } | ||
911 | return MIN_MEMORY_BLOCK_SIZE; | ||
912 | } | ||
913 | #endif | ||
914 | |||
911 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 915 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
912 | /* | 916 | /* |
913 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | 917 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index ebf6d7887a38..9559d360fde7 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -26,12 +26,50 @@ static __init int numa_setup(char *opt) | |||
26 | early_param("numa", numa_setup); | 26 | early_param("numa", numa_setup); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Which logical CPUs are on which nodes | 29 | * apicid, cpu, node mappings |
30 | */ | 30 | */ |
31 | s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | ||
32 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
33 | }; | ||
34 | |||
31 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | 35 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
32 | EXPORT_SYMBOL(node_to_cpumask_map); | 36 | EXPORT_SYMBOL(node_to_cpumask_map); |
33 | 37 | ||
34 | /* | 38 | /* |
39 | * Map cpu index to node index | ||
40 | */ | ||
41 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | ||
42 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | ||
43 | |||
44 | void __cpuinit numa_set_node(int cpu, int node) | ||
45 | { | ||
46 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | ||
47 | |||
48 | /* early setting, no percpu area yet */ | ||
49 | if (cpu_to_node_map) { | ||
50 | cpu_to_node_map[cpu] = node; | ||
51 | return; | ||
52 | } | ||
53 | |||
54 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
55 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | ||
56 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | ||
57 | dump_stack(); | ||
58 | return; | ||
59 | } | ||
60 | #endif | ||
61 | per_cpu(x86_cpu_to_node_map, cpu) = node; | ||
62 | |||
63 | if (node != NUMA_NO_NODE) | ||
64 | set_cpu_numa_node(cpu, node); | ||
65 | } | ||
66 | |||
67 | void __cpuinit numa_clear_node(int cpu) | ||
68 | { | ||
69 | numa_set_node(cpu, NUMA_NO_NODE); | ||
70 | } | ||
71 | |||
72 | /* | ||
35 | * Allocate node_to_cpumask_map based on number of available nodes | 73 | * Allocate node_to_cpumask_map based on number of available nodes |
36 | * Requires node_possible_map to be valid. | 74 | * Requires node_possible_map to be valid. |
37 | * | 75 | * |
@@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void) | |||
57 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | 95 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); |
58 | } | 96 | } |
59 | 97 | ||
60 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 98 | /* |
99 | * There are unfortunately some poorly designed mainboards around that | ||
100 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | ||
101 | * mapping. To avoid this fill in the mapping for all possible CPUs, | ||
102 | * as the number of CPUs is not known yet. We round robin the existing | ||
103 | * nodes. | ||
104 | */ | ||
105 | void __init numa_init_array(void) | ||
106 | { | ||
107 | int rr, i; | ||
108 | |||
109 | rr = first_node(node_online_map); | ||
110 | for (i = 0; i < nr_cpu_ids; i++) { | ||
111 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | ||
112 | continue; | ||
113 | numa_set_node(i, rr); | ||
114 | rr = next_node(rr, node_online_map); | ||
115 | if (rr == MAX_NUMNODES) | ||
116 | rr = first_node(node_online_map); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static __init int find_near_online_node(int node) | ||
121 | { | ||
122 | int n, val; | ||
123 | int min_val = INT_MAX; | ||
124 | int best_node = -1; | ||
125 | |||
126 | for_each_online_node(n) { | ||
127 | val = node_distance(node, n); | ||
128 | |||
129 | if (val < min_val) { | ||
130 | min_val = val; | ||
131 | best_node = n; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | return best_node; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Setup early cpu_to_node. | ||
140 | * | ||
141 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | ||
142 | * and apicid_to_node[] tables have valid entries for a CPU. | ||
143 | * This means we skip cpu_to_node[] initialisation for NUMA | ||
144 | * emulation and faking node case (when running a kernel compiled | ||
145 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | ||
146 | * is already initialized in a round robin manner at numa_init_array, | ||
147 | * prior to this call, and this initialization is good enough | ||
148 | * for the fake NUMA cases. | ||
149 | * | ||
150 | * Called before the per_cpu areas are setup. | ||
151 | */ | ||
152 | void __init init_cpu_to_node(void) | ||
153 | { | ||
154 | int cpu; | ||
155 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | ||
156 | |||
157 | BUG_ON(cpu_to_apicid == NULL); | ||
158 | |||
159 | for_each_possible_cpu(cpu) { | ||
160 | int node = numa_cpu_node(cpu); | ||
161 | |||
162 | if (node == NUMA_NO_NODE) | ||
163 | continue; | ||
164 | if (!node_online(node)) | ||
165 | node = find_near_online_node(node); | ||
166 | numa_set_node(cpu, node); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | ||
171 | |||
172 | # ifndef CONFIG_NUMA_EMU | ||
173 | void __cpuinit numa_add_cpu(int cpu) | ||
174 | { | ||
175 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
176 | } | ||
177 | |||
178 | void __cpuinit numa_remove_cpu(int cpu) | ||
179 | { | ||
180 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
181 | } | ||
182 | # endif /* !CONFIG_NUMA_EMU */ | ||
183 | |||
184 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | ||
185 | |||
186 | int __cpu_to_node(int cpu) | ||
187 | { | ||
188 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | ||
189 | printk(KERN_WARNING | ||
190 | "cpu_to_node(%d): usage too early!\n", cpu); | ||
191 | dump_stack(); | ||
192 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
193 | } | ||
194 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
195 | } | ||
196 | EXPORT_SYMBOL(__cpu_to_node); | ||
197 | |||
198 | /* | ||
199 | * Same function as cpu_to_node() but used if called before the | ||
200 | * per_cpu areas are setup. | ||
201 | */ | ||
202 | int early_cpu_to_node(int cpu) | ||
203 | { | ||
204 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | ||
205 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
206 | |||
207 | if (!cpu_possible(cpu)) { | ||
208 | printk(KERN_WARNING | ||
209 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | ||
210 | dump_stack(); | ||
211 | return NUMA_NO_NODE; | ||
212 | } | ||
213 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
214 | } | ||
215 | |||
216 | struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) | ||
217 | { | ||
218 | int node = early_cpu_to_node(cpu); | ||
219 | struct cpumask *mask; | ||
220 | char buf[64]; | ||
221 | |||
222 | if (node == NUMA_NO_NODE) { | ||
223 | /* early_cpu_to_node() already emits a warning and trace */ | ||
224 | return NULL; | ||
225 | } | ||
226 | mask = node_to_cpumask_map[node]; | ||
227 | if (!mask) { | ||
228 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | ||
229 | dump_stack(); | ||
230 | return NULL; | ||
231 | } | ||
232 | |||
233 | cpulist_scnprintf(buf, sizeof(buf), mask); | ||
234 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | ||
235 | enable ? "numa_add_cpu" : "numa_remove_cpu", | ||
236 | cpu, node, buf); | ||
237 | return mask; | ||
238 | } | ||
239 | |||
240 | # ifndef CONFIG_NUMA_EMU | ||
241 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
242 | { | ||
243 | struct cpumask *mask; | ||
244 | |||
245 | mask = debug_cpumask_set_cpu(cpu, enable); | ||
246 | if (!mask) | ||
247 | return; | ||
248 | |||
249 | if (enable) | ||
250 | cpumask_set_cpu(cpu, mask); | ||
251 | else | ||
252 | cpumask_clear_cpu(cpu, mask); | ||
253 | } | ||
254 | |||
255 | void __cpuinit numa_add_cpu(int cpu) | ||
256 | { | ||
257 | numa_set_cpumask(cpu, 1); | ||
258 | } | ||
259 | |||
260 | void __cpuinit numa_remove_cpu(int cpu) | ||
261 | { | ||
262 | numa_set_cpumask(cpu, 0); | ||
263 | } | ||
264 | # endif /* !CONFIG_NUMA_EMU */ | ||
265 | |||
61 | /* | 266 | /* |
62 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | 267 | * Returns a pointer to the bitmask of CPUs on Node 'node'. |
63 | */ | 268 | */ |
@@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node) | |||
80 | return node_to_cpumask_map[node]; | 285 | return node_to_cpumask_map[node]; |
81 | } | 286 | } |
82 | EXPORT_SYMBOL(cpumask_of_node); | 287 | EXPORT_SYMBOL(cpumask_of_node); |
83 | #endif | 288 | |
289 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | ||
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 84a3e4c9f277..bde3906420df 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | |||
110 | 110 | ||
111 | static unsigned long kva_start_pfn; | 111 | static unsigned long kva_start_pfn; |
112 | static unsigned long kva_pages; | 112 | static unsigned long kva_pages; |
113 | |||
114 | int __cpuinit numa_cpu_node(int cpu) | ||
115 | { | ||
116 | return apic->x86_32_numa_cpu_node(cpu); | ||
117 | } | ||
118 | |||
113 | /* | 119 | /* |
114 | * FLAT - support for basic PC memory model with discontig enabled, essentially | 120 | * FLAT - support for basic PC memory model with discontig enabled, essentially |
115 | * a single node with all available processors in it with a flat | 121 | * a single node with all available processors in it with a flat |
@@ -346,8 +352,7 @@ static void init_remap_allocator(int nid) | |||
346 | (ulong) node_remap_end_vaddr[nid]); | 352 | (ulong) node_remap_end_vaddr[nid]); |
347 | } | 353 | } |
348 | 354 | ||
349 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | 355 | void __init initmem_init(void) |
350 | int acpi, int k8) | ||
351 | { | 356 | { |
352 | int nid; | 357 | int nid; |
353 | long kva_target_pfn; | 358 | long kva_target_pfn; |
@@ -361,6 +366,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | |||
361 | */ | 366 | */ |
362 | 367 | ||
363 | get_memcfg_numa(); | 368 | get_memcfg_numa(); |
369 | numa_init_array(); | ||
364 | 370 | ||
365 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); | 371 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); |
366 | 372 | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea1551eebc..9ec0f209a6a4 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -13,31 +13,30 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/nodemask.h> | 14 | #include <linux/nodemask.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/acpi.h> | ||
16 | 17 | ||
17 | #include <asm/e820.h> | 18 | #include <asm/e820.h> |
18 | #include <asm/proto.h> | 19 | #include <asm/proto.h> |
19 | #include <asm/dma.h> | 20 | #include <asm/dma.h> |
20 | #include <asm/numa.h> | ||
21 | #include <asm/acpi.h> | 21 | #include <asm/acpi.h> |
22 | #include <asm/amd_nb.h> | 22 | #include <asm/amd_nb.h> |
23 | 23 | ||
24 | #include "numa_internal.h" | ||
25 | |||
24 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
25 | EXPORT_SYMBOL(node_data); | 27 | EXPORT_SYMBOL(node_data); |
26 | 28 | ||
27 | struct memnode memnode; | 29 | nodemask_t numa_nodes_parsed __initdata; |
28 | 30 | ||
29 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | 31 | struct memnode memnode; |
30 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
31 | }; | ||
32 | 32 | ||
33 | static unsigned long __initdata nodemap_addr; | 33 | static unsigned long __initdata nodemap_addr; |
34 | static unsigned long __initdata nodemap_size; | 34 | static unsigned long __initdata nodemap_size; |
35 | 35 | ||
36 | /* | 36 | static struct numa_meminfo numa_meminfo __initdata; |
37 | * Map cpu index to node index | 37 | |
38 | */ | 38 | static int numa_distance_cnt; |
39 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | 39 | static u8 *numa_distance; |
40 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | ||
41 | 40 | ||
42 | /* | 41 | /* |
43 | * Given a shift value, try to populate memnodemap[] | 42 | * Given a shift value, try to populate memnodemap[] |
@@ -46,16 +45,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | |||
46 | * 0 if memnodmap[] too small (of shift too small) | 45 | * 0 if memnodmap[] too small (of shift too small) |
47 | * -1 if node overlap or lost ram (shift too big) | 46 | * -1 if node overlap or lost ram (shift too big) |
48 | */ | 47 | */ |
49 | static int __init populate_memnodemap(const struct bootnode *nodes, | 48 | static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift) |
50 | int numnodes, int shift, int *nodeids) | ||
51 | { | 49 | { |
52 | unsigned long addr, end; | 50 | unsigned long addr, end; |
53 | int i, res = -1; | 51 | int i, res = -1; |
54 | 52 | ||
55 | memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); | 53 | memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); |
56 | for (i = 0; i < numnodes; i++) { | 54 | for (i = 0; i < mi->nr_blks; i++) { |
57 | addr = nodes[i].start; | 55 | addr = mi->blk[i].start; |
58 | end = nodes[i].end; | 56 | end = mi->blk[i].end; |
59 | if (addr >= end) | 57 | if (addr >= end) |
60 | continue; | 58 | continue; |
61 | if ((end >> shift) >= memnodemapsize) | 59 | if ((end >> shift) >= memnodemapsize) |
@@ -63,12 +61,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes, | |||
63 | do { | 61 | do { |
64 | if (memnodemap[addr >> shift] != NUMA_NO_NODE) | 62 | if (memnodemap[addr >> shift] != NUMA_NO_NODE) |
65 | return -1; | 63 | return -1; |
66 | 64 | memnodemap[addr >> shift] = mi->blk[i].nid; | |
67 | if (!nodeids) | ||
68 | memnodemap[addr >> shift] = i; | ||
69 | else | ||
70 | memnodemap[addr >> shift] = nodeids[i]; | ||
71 | |||
72 | addr += (1UL << shift); | 65 | addr += (1UL << shift); |
73 | } while (addr < end); | 66 | } while (addr < end); |
74 | res = 1; | 67 | res = 1; |
@@ -86,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
86 | 79 | ||
87 | addr = 0x8000; | 80 | addr = 0x8000; |
88 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 81 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
89 | nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT, | 82 | nodemap_addr = memblock_find_in_range(addr, get_max_mapped(), |
90 | nodemap_size, L1_CACHE_BYTES); | 83 | nodemap_size, L1_CACHE_BYTES); |
91 | if (nodemap_addr == MEMBLOCK_ERROR) { | 84 | if (nodemap_addr == MEMBLOCK_ERROR) { |
92 | printk(KERN_ERR | 85 | printk(KERN_ERR |
@@ -106,16 +99,15 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
106 | * The LSB of all start and end addresses in the node map is the value of the | 99 | * The LSB of all start and end addresses in the node map is the value of the |
107 | * maximum possible shift. | 100 | * maximum possible shift. |
108 | */ | 101 | */ |
109 | static int __init extract_lsb_from_nodes(const struct bootnode *nodes, | 102 | static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi) |
110 | int numnodes) | ||
111 | { | 103 | { |
112 | int i, nodes_used = 0; | 104 | int i, nodes_used = 0; |
113 | unsigned long start, end; | 105 | unsigned long start, end; |
114 | unsigned long bitfield = 0, memtop = 0; | 106 | unsigned long bitfield = 0, memtop = 0; |
115 | 107 | ||
116 | for (i = 0; i < numnodes; i++) { | 108 | for (i = 0; i < mi->nr_blks; i++) { |
117 | start = nodes[i].start; | 109 | start = mi->blk[i].start; |
118 | end = nodes[i].end; | 110 | end = mi->blk[i].end; |
119 | if (start >= end) | 111 | if (start >= end) |
120 | continue; | 112 | continue; |
121 | bitfield |= start; | 113 | bitfield |= start; |
@@ -131,18 +123,17 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes, | |||
131 | return i; | 123 | return i; |
132 | } | 124 | } |
133 | 125 | ||
134 | int __init compute_hash_shift(struct bootnode *nodes, int numnodes, | 126 | static int __init compute_hash_shift(const struct numa_meminfo *mi) |
135 | int *nodeids) | ||
136 | { | 127 | { |
137 | int shift; | 128 | int shift; |
138 | 129 | ||
139 | shift = extract_lsb_from_nodes(nodes, numnodes); | 130 | shift = extract_lsb_from_nodes(mi); |
140 | if (allocate_cachealigned_memnodemap()) | 131 | if (allocate_cachealigned_memnodemap()) |
141 | return -1; | 132 | return -1; |
142 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", | 133 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", |
143 | shift); | 134 | shift); |
144 | 135 | ||
145 | if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) { | 136 | if (populate_memnodemap(mi, shift) != 1) { |
146 | printk(KERN_INFO "Your memory is not aligned you need to " | 137 | printk(KERN_INFO "Your memory is not aligned you need to " |
147 | "rebuild your kernel with a bigger NODEMAPSIZE " | 138 | "rebuild your kernel with a bigger NODEMAPSIZE " |
148 | "shift=%d\n", shift); | 139 | "shift=%d\n", shift); |
@@ -188,6 +179,63 @@ static void * __init early_node_mem(int nodeid, unsigned long start, | |||
188 | return NULL; | 179 | return NULL; |
189 | } | 180 | } |
190 | 181 | ||
182 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | ||
183 | struct numa_meminfo *mi) | ||
184 | { | ||
185 | /* ignore zero length blks */ | ||
186 | if (start == end) | ||
187 | return 0; | ||
188 | |||
189 | /* whine about and ignore invalid blks */ | ||
190 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | ||
191 | pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", | ||
192 | nid, start, end); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | ||
197 | pr_err("NUMA: too many memblk ranges\n"); | ||
198 | return -EINVAL; | ||
199 | } | ||
200 | |||
201 | mi->blk[mi->nr_blks].start = start; | ||
202 | mi->blk[mi->nr_blks].end = end; | ||
203 | mi->blk[mi->nr_blks].nid = nid; | ||
204 | mi->nr_blks++; | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | ||
210 | * @idx: Index of memblk to remove | ||
211 | * @mi: numa_meminfo to remove memblk from | ||
212 | * | ||
213 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | ||
214 | * decrementing @mi->nr_blks. | ||
215 | */ | ||
216 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | ||
217 | { | ||
218 | mi->nr_blks--; | ||
219 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | ||
220 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | ||
225 | * @nid: NUMA node ID of the new memblk | ||
226 | * @start: Start address of the new memblk | ||
227 | * @end: End address of the new memblk | ||
228 | * | ||
229 | * Add a new memblk to the default numa_meminfo. | ||
230 | * | ||
231 | * RETURNS: | ||
232 | * 0 on success, -errno on failure. | ||
233 | */ | ||
234 | int __init numa_add_memblk(int nid, u64 start, u64 end) | ||
235 | { | ||
236 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | ||
237 | } | ||
238 | |||
191 | /* Initialize bootmem allocator for a node */ | 239 | /* Initialize bootmem allocator for a node */ |
192 | void __init | 240 | void __init |
193 | setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | 241 | setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) |
@@ -234,696 +282,386 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | |||
234 | node_set_online(nodeid); | 282 | node_set_online(nodeid); |
235 | } | 283 | } |
236 | 284 | ||
237 | /* | 285 | /** |
238 | * There are unfortunately some poorly designed mainboards around that | 286 | * numa_cleanup_meminfo - Cleanup a numa_meminfo |
239 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | 287 | * @mi: numa_meminfo to clean up |
240 | * mapping. To avoid this fill in the mapping for all possible CPUs, | 288 | * |
241 | * as the number of CPUs is not known yet. We round robin the existing | 289 | * Sanitize @mi by merging and removing unncessary memblks. Also check for |
242 | * nodes. | 290 | * conflicts and clear unused memblks. |
291 | * | ||
292 | * RETURNS: | ||
293 | * 0 on success, -errno on failure. | ||
243 | */ | 294 | */ |
244 | void __init numa_init_array(void) | 295 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) |
245 | { | 296 | { |
246 | int rr, i; | 297 | const u64 low = 0; |
298 | const u64 high = (u64)max_pfn << PAGE_SHIFT; | ||
299 | int i, j, k; | ||
247 | 300 | ||
248 | rr = first_node(node_online_map); | 301 | for (i = 0; i < mi->nr_blks; i++) { |
249 | for (i = 0; i < nr_cpu_ids; i++) { | 302 | struct numa_memblk *bi = &mi->blk[i]; |
250 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | ||
251 | continue; | ||
252 | numa_set_node(i, rr); | ||
253 | rr = next_node(rr, node_online_map); | ||
254 | if (rr == MAX_NUMNODES) | ||
255 | rr = first_node(node_online_map); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | #ifdef CONFIG_NUMA_EMU | ||
260 | /* Numa emulation */ | ||
261 | static struct bootnode nodes[MAX_NUMNODES] __initdata; | ||
262 | static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; | ||
263 | static char *cmdline __initdata; | ||
264 | 303 | ||
265 | void __init numa_emu_cmdline(char *str) | 304 | /* make sure all blocks are inside the limits */ |
266 | { | 305 | bi->start = max(bi->start, low); |
267 | cmdline = str; | 306 | bi->end = min(bi->end, high); |
268 | } | ||
269 | 307 | ||
270 | static int __init setup_physnodes(unsigned long start, unsigned long end, | 308 | /* and there's no empty block */ |
271 | int acpi, int amd) | 309 | if (bi->start == bi->end) { |
272 | { | 310 | numa_remove_memblk_from(i--, mi); |
273 | int ret = 0; | ||
274 | int i; | ||
275 | |||
276 | memset(physnodes, 0, sizeof(physnodes)); | ||
277 | #ifdef CONFIG_ACPI_NUMA | ||
278 | if (acpi) | ||
279 | acpi_get_nodes(physnodes, start, end); | ||
280 | #endif | ||
281 | #ifdef CONFIG_AMD_NUMA | ||
282 | if (amd) | ||
283 | amd_get_nodes(physnodes); | ||
284 | #endif | ||
285 | /* | ||
286 | * Basic sanity checking on the physical node map: there may be errors | ||
287 | * if the SRAT or AMD code incorrectly reported the topology or the mem= | ||
288 | * kernel parameter is used. | ||
289 | */ | ||
290 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
291 | if (physnodes[i].start == physnodes[i].end) | ||
292 | continue; | ||
293 | if (physnodes[i].start > end) { | ||
294 | physnodes[i].end = physnodes[i].start; | ||
295 | continue; | ||
296 | } | ||
297 | if (physnodes[i].end < start) { | ||
298 | physnodes[i].start = physnodes[i].end; | ||
299 | continue; | 311 | continue; |
300 | } | 312 | } |
301 | if (physnodes[i].start < start) | ||
302 | physnodes[i].start = start; | ||
303 | if (physnodes[i].end > end) | ||
304 | physnodes[i].end = end; | ||
305 | ret++; | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * If no physical topology was detected, a single node is faked to cover | ||
310 | * the entire address space. | ||
311 | */ | ||
312 | if (!ret) { | ||
313 | physnodes[ret].start = start; | ||
314 | physnodes[ret].end = end; | ||
315 | ret = 1; | ||
316 | } | ||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | static void __init fake_physnodes(int acpi, int amd, int nr_nodes) | ||
321 | { | ||
322 | int i; | ||
323 | |||
324 | BUG_ON(acpi && amd); | ||
325 | #ifdef CONFIG_ACPI_NUMA | ||
326 | if (acpi) | ||
327 | acpi_fake_nodes(nodes, nr_nodes); | ||
328 | #endif | ||
329 | #ifdef CONFIG_AMD_NUMA | ||
330 | if (amd) | ||
331 | amd_fake_nodes(nodes, nr_nodes); | ||
332 | #endif | ||
333 | if (!acpi && !amd) | ||
334 | for (i = 0; i < nr_cpu_ids; i++) | ||
335 | numa_set_node(i, 0); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Setups up nid to range from addr to addr + size. If the end | ||
340 | * boundary is greater than max_addr, then max_addr is used instead. | ||
341 | * The return value is 0 if there is additional memory left for | ||
342 | * allocation past addr and -1 otherwise. addr is adjusted to be at | ||
343 | * the end of the node. | ||
344 | */ | ||
345 | static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) | ||
346 | { | ||
347 | int ret = 0; | ||
348 | nodes[nid].start = *addr; | ||
349 | *addr += size; | ||
350 | if (*addr >= max_addr) { | ||
351 | *addr = max_addr; | ||
352 | ret = -1; | ||
353 | } | ||
354 | nodes[nid].end = *addr; | ||
355 | node_set(nid, node_possible_map); | ||
356 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, | ||
357 | nodes[nid].start, nodes[nid].end, | ||
358 | (nodes[nid].end - nodes[nid].start) >> 20); | ||
359 | return ret; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr | ||
364 | * to max_addr. The return value is the number of nodes allocated. | ||
365 | */ | ||
366 | static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) | ||
367 | { | ||
368 | nodemask_t physnode_mask = NODE_MASK_NONE; | ||
369 | u64 size; | ||
370 | int big; | ||
371 | int ret = 0; | ||
372 | int i; | ||
373 | |||
374 | if (nr_nodes <= 0) | ||
375 | return -1; | ||
376 | if (nr_nodes > MAX_NUMNODES) { | ||
377 | pr_info("numa=fake=%d too large, reducing to %d\n", | ||
378 | nr_nodes, MAX_NUMNODES); | ||
379 | nr_nodes = MAX_NUMNODES; | ||
380 | } | ||
381 | |||
382 | size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; | ||
383 | /* | ||
384 | * Calculate the number of big nodes that can be allocated as a result | ||
385 | * of consolidating the remainder. | ||
386 | */ | ||
387 | big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / | ||
388 | FAKE_NODE_MIN_SIZE; | ||
389 | |||
390 | size &= FAKE_NODE_MIN_HASH_MASK; | ||
391 | if (!size) { | ||
392 | pr_err("Not enough memory for each node. " | ||
393 | "NUMA emulation disabled.\n"); | ||
394 | return -1; | ||
395 | } | ||
396 | 313 | ||
397 | for (i = 0; i < MAX_NUMNODES; i++) | 314 | for (j = i + 1; j < mi->nr_blks; j++) { |
398 | if (physnodes[i].start != physnodes[i].end) | 315 | struct numa_memblk *bj = &mi->blk[j]; |
399 | node_set(i, physnode_mask); | 316 | unsigned long start, end; |
400 | |||
401 | /* | ||
402 | * Continue to fill physical nodes with fake nodes until there is no | ||
403 | * memory left on any of them. | ||
404 | */ | ||
405 | while (nodes_weight(physnode_mask)) { | ||
406 | for_each_node_mask(i, physnode_mask) { | ||
407 | u64 end = physnodes[i].start + size; | ||
408 | u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); | ||
409 | |||
410 | if (ret < big) | ||
411 | end += FAKE_NODE_MIN_SIZE; | ||
412 | 317 | ||
413 | /* | 318 | /* |
414 | * Continue to add memory to this fake node if its | 319 | * See whether there are overlapping blocks. Whine |
415 | * non-reserved memory is less than the per-node size. | 320 | * about but allow overlaps of the same nid. They |
321 | * will be merged below. | ||
416 | */ | 322 | */ |
417 | while (end - physnodes[i].start - | 323 | if (bi->end > bj->start && bi->start < bj->end) { |
418 | memblock_x86_hole_size(physnodes[i].start, end) < size) { | 324 | if (bi->nid != bj->nid) { |
419 | end += FAKE_NODE_MIN_SIZE; | 325 | pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", |
420 | if (end > physnodes[i].end) { | 326 | bi->nid, bi->start, bi->end, |
421 | end = physnodes[i].end; | 327 | bj->nid, bj->start, bj->end); |
422 | break; | 328 | return -EINVAL; |
423 | } | 329 | } |
330 | pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", | ||
331 | bi->nid, bi->start, bi->end, | ||
332 | bj->start, bj->end); | ||
424 | } | 333 | } |
425 | 334 | ||
426 | /* | 335 | /* |
427 | * If there won't be at least FAKE_NODE_MIN_SIZE of | 336 | * Join together blocks on the same node, holes |
428 | * non-reserved memory in ZONE_DMA32 for the next node, | 337 | * between which don't overlap with memory on other |
429 | * this one must extend to the boundary. | 338 | * nodes. |
430 | */ | 339 | */ |
431 | if (end < dma32_end && dma32_end - end - | 340 | if (bi->nid != bj->nid) |
432 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 341 | continue; |
433 | end = dma32_end; | 342 | start = max(min(bi->start, bj->start), low); |
434 | 343 | end = min(max(bi->end, bj->end), high); | |
435 | /* | 344 | for (k = 0; k < mi->nr_blks; k++) { |
436 | * If there won't be enough non-reserved memory for the | 345 | struct numa_memblk *bk = &mi->blk[k]; |
437 | * next node, this one must extend to the end of the | 346 | |
438 | * physical node. | 347 | if (bi->nid == bk->nid) |
439 | */ | 348 | continue; |
440 | if (physnodes[i].end - end - | 349 | if (start < bk->end && end > bk->start) |
441 | memblock_x86_hole_size(end, physnodes[i].end) < size) | 350 | break; |
442 | end = physnodes[i].end; | 351 | } |
443 | 352 | if (k < mi->nr_blks) | |
444 | /* | 353 | continue; |
445 | * Avoid allocating more nodes than requested, which can | 354 | printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", |
446 | * happen as a result of rounding down each node's size | 355 | bi->nid, bi->start, bi->end, bj->start, bj->end, |
447 | * to FAKE_NODE_MIN_SIZE. | 356 | start, end); |
448 | */ | 357 | bi->start = start; |
449 | if (nodes_weight(physnode_mask) + ret >= nr_nodes) | 358 | bi->end = end; |
450 | end = physnodes[i].end; | 359 | numa_remove_memblk_from(j--, mi); |
451 | |||
452 | if (setup_node_range(ret++, &physnodes[i].start, | ||
453 | end - physnodes[i].start, | ||
454 | physnodes[i].end) < 0) | ||
455 | node_clear(i, physnode_mask); | ||
456 | } | 360 | } |
457 | } | 361 | } |
458 | return ret; | ||
459 | } | ||
460 | 362 | ||
461 | /* | 363 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { |
462 | * Returns the end address of a node so that there is at least `size' amount of | 364 | mi->blk[i].start = mi->blk[i].end = 0; |
463 | * non-reserved memory or `max_addr' is reached. | 365 | mi->blk[i].nid = NUMA_NO_NODE; |
464 | */ | ||
465 | static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) | ||
466 | { | ||
467 | u64 end = start + size; | ||
468 | |||
469 | while (end - start - memblock_x86_hole_size(start, end) < size) { | ||
470 | end += FAKE_NODE_MIN_SIZE; | ||
471 | if (end > max_addr) { | ||
472 | end = max_addr; | ||
473 | break; | ||
474 | } | ||
475 | } | 366 | } |
476 | return end; | 367 | |
368 | return 0; | ||
477 | } | 369 | } |
478 | 370 | ||
479 | /* | 371 | /* |
480 | * Sets up fake nodes of `size' interleaved over physical nodes ranging from | 372 | * Set nodes, which have memory in @mi, in *@nodemask. |
481 | * `addr' to `max_addr'. The return value is the number of nodes allocated. | ||
482 | */ | 373 | */ |
483 | static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) | 374 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, |
375 | const struct numa_meminfo *mi) | ||
484 | { | 376 | { |
485 | nodemask_t physnode_mask = NODE_MASK_NONE; | ||
486 | u64 min_size; | ||
487 | int ret = 0; | ||
488 | int i; | 377 | int i; |
489 | 378 | ||
490 | if (!size) | 379 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) |
491 | return -1; | 380 | if (mi->blk[i].start != mi->blk[i].end && |
492 | /* | 381 | mi->blk[i].nid != NUMA_NO_NODE) |
493 | * The limit on emulated nodes is MAX_NUMNODES, so the size per node is | 382 | node_set(mi->blk[i].nid, *nodemask); |
494 | * increased accordingly if the requested size is too small. This | 383 | } |
495 | * creates a uniform distribution of node sizes across the entire | ||
496 | * machine (but not necessarily over physical nodes). | ||
497 | */ | ||
498 | min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / | ||
499 | MAX_NUMNODES; | ||
500 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); | ||
501 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) | ||
502 | min_size = (min_size + FAKE_NODE_MIN_SIZE) & | ||
503 | FAKE_NODE_MIN_HASH_MASK; | ||
504 | if (size < min_size) { | ||
505 | pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", | ||
506 | size >> 20, min_size >> 20); | ||
507 | size = min_size; | ||
508 | } | ||
509 | size &= FAKE_NODE_MIN_HASH_MASK; | ||
510 | |||
511 | for (i = 0; i < MAX_NUMNODES; i++) | ||
512 | if (physnodes[i].start != physnodes[i].end) | ||
513 | node_set(i, physnode_mask); | ||
514 | /* | ||
515 | * Fill physical nodes with fake nodes of size until there is no memory | ||
516 | * left on any of them. | ||
517 | */ | ||
518 | while (nodes_weight(physnode_mask)) { | ||
519 | for_each_node_mask(i, physnode_mask) { | ||
520 | u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; | ||
521 | u64 end; | ||
522 | |||
523 | end = find_end_of_node(physnodes[i].start, | ||
524 | physnodes[i].end, size); | ||
525 | /* | ||
526 | * If there won't be at least FAKE_NODE_MIN_SIZE of | ||
527 | * non-reserved memory in ZONE_DMA32 for the next node, | ||
528 | * this one must extend to the boundary. | ||
529 | */ | ||
530 | if (end < dma32_end && dma32_end - end - | ||
531 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | ||
532 | end = dma32_end; | ||
533 | 384 | ||
534 | /* | 385 | /** |
535 | * If there won't be enough non-reserved memory for the | 386 | * numa_reset_distance - Reset NUMA distance table |
536 | * next node, this one must extend to the end of the | 387 | * |
537 | * physical node. | 388 | * The current table is freed. The next numa_set_distance() call will |
538 | */ | 389 | * create a new one. |
539 | if (physnodes[i].end - end - | 390 | */ |
540 | memblock_x86_hole_size(end, physnodes[i].end) < size) | 391 | void __init numa_reset_distance(void) |
541 | end = physnodes[i].end; | 392 | { |
393 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | ||
542 | 394 | ||
543 | /* | 395 | /* numa_distance could be 1LU marking allocation failure, test cnt */ |
544 | * Setup the fake node that will be allocated as bootmem | 396 | if (numa_distance_cnt) |
545 | * later. If setup_node_range() returns non-zero, there | 397 | memblock_x86_free_range(__pa(numa_distance), |
546 | * is no more memory available on this physical node. | 398 | __pa(numa_distance) + size); |
547 | */ | 399 | numa_distance_cnt = 0; |
548 | if (setup_node_range(ret++, &physnodes[i].start, | 400 | numa_distance = NULL; /* enable table creation */ |
549 | end - physnodes[i].start, | ||
550 | physnodes[i].end) < 0) | ||
551 | node_clear(i, physnode_mask); | ||
552 | } | ||
553 | } | ||
554 | return ret; | ||
555 | } | 401 | } |
556 | 402 | ||
557 | /* | 403 | static int __init numa_alloc_distance(void) |
558 | * Sets up the system RAM area from start_pfn to last_pfn according to the | ||
559 | * numa=fake command-line option. | ||
560 | */ | ||
561 | static int __init numa_emulation(unsigned long start_pfn, | ||
562 | unsigned long last_pfn, int acpi, int amd) | ||
563 | { | 404 | { |
564 | u64 addr = start_pfn << PAGE_SHIFT; | 405 | nodemask_t nodes_parsed; |
565 | u64 max_addr = last_pfn << PAGE_SHIFT; | 406 | size_t size; |
566 | int num_nodes; | 407 | int i, j, cnt = 0; |
567 | int i; | 408 | u64 phys; |
568 | 409 | ||
569 | /* | 410 | /* size the new table and allocate it */ |
570 | * If the numa=fake command-line contains a 'M' or 'G', it represents | 411 | nodes_parsed = numa_nodes_parsed; |
571 | * the fixed node size. Otherwise, if it is just a single number N, | 412 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); |
572 | * split the system RAM into N fake nodes. | ||
573 | */ | ||
574 | if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) { | ||
575 | u64 size; | ||
576 | 413 | ||
577 | size = memparse(cmdline, &cmdline); | 414 | for_each_node_mask(i, nodes_parsed) |
578 | num_nodes = split_nodes_size_interleave(addr, max_addr, size); | 415 | cnt = i; |
579 | } else { | 416 | cnt++; |
580 | unsigned long n; | 417 | size = cnt * cnt * sizeof(numa_distance[0]); |
581 | 418 | ||
582 | n = simple_strtoul(cmdline, NULL, 0); | 419 | phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, |
583 | num_nodes = split_nodes_interleave(addr, max_addr, n); | 420 | size, PAGE_SIZE); |
421 | if (phys == MEMBLOCK_ERROR) { | ||
422 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | ||
423 | /* don't retry until explicitly reset */ | ||
424 | numa_distance = (void *)1LU; | ||
425 | return -ENOMEM; | ||
584 | } | 426 | } |
427 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | ||
585 | 428 | ||
586 | if (num_nodes < 0) | 429 | numa_distance = __va(phys); |
587 | return num_nodes; | 430 | numa_distance_cnt = cnt; |
588 | memnode_shift = compute_hash_shift(nodes, num_nodes, NULL); | 431 | |
589 | if (memnode_shift < 0) { | 432 | /* fill with the default distances */ |
590 | memnode_shift = 0; | 433 | for (i = 0; i < cnt; i++) |
591 | printk(KERN_ERR "No NUMA hash function found. NUMA emulation " | 434 | for (j = 0; j < cnt; j++) |
592 | "disabled.\n"); | 435 | numa_distance[i * cnt + j] = i == j ? |
593 | return -1; | 436 | LOCAL_DISTANCE : REMOTE_DISTANCE; |
594 | } | 437 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); |
595 | 438 | ||
596 | /* | ||
597 | * We need to vacate all active ranges that may have been registered for | ||
598 | * the e820 memory map. | ||
599 | */ | ||
600 | remove_all_active_ranges(); | ||
601 | for_each_node_mask(i, node_possible_map) { | ||
602 | memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, | ||
603 | nodes[i].end >> PAGE_SHIFT); | ||
604 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | ||
605 | } | ||
606 | setup_physnodes(addr, max_addr, acpi, amd); | ||
607 | fake_physnodes(acpi, amd, num_nodes); | ||
608 | numa_init_array(); | ||
609 | return 0; | 439 | return 0; |
610 | } | 440 | } |
611 | #endif /* CONFIG_NUMA_EMU */ | ||
612 | 441 | ||
613 | void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, | 442 | /** |
614 | int acpi, int amd) | 443 | * numa_set_distance - Set NUMA distance from one NUMA to another |
444 | * @from: the 'from' node to set distance | ||
445 | * @to: the 'to' node to set distance | ||
446 | * @distance: NUMA distance | ||
447 | * | ||
448 | * Set the distance from node @from to @to to @distance. If distance table | ||
449 | * doesn't exist, one which is large enough to accomodate all the currently | ||
450 | * known nodes will be created. | ||
451 | * | ||
452 | * If such table cannot be allocated, a warning is printed and further | ||
453 | * calls are ignored until the distance table is reset with | ||
454 | * numa_reset_distance(). | ||
455 | * | ||
456 | * If @from or @to is higher than the highest known node at the time of | ||
457 | * table creation or @distance doesn't make sense, the call is ignored. | ||
458 | * This is to allow simplification of specific NUMA config implementations. | ||
459 | */ | ||
460 | void __init numa_set_distance(int from, int to, int distance) | ||
615 | { | 461 | { |
616 | int i; | 462 | if (!numa_distance && numa_alloc_distance() < 0) |
617 | |||
618 | nodes_clear(node_possible_map); | ||
619 | nodes_clear(node_online_map); | ||
620 | |||
621 | #ifdef CONFIG_NUMA_EMU | ||
622 | setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, | ||
623 | acpi, amd); | ||
624 | if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) | ||
625 | return; | 463 | return; |
626 | setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, | ||
627 | acpi, amd); | ||
628 | nodes_clear(node_possible_map); | ||
629 | nodes_clear(node_online_map); | ||
630 | #endif | ||
631 | 464 | ||
632 | #ifdef CONFIG_ACPI_NUMA | 465 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { |
633 | if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, | 466 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", |
634 | last_pfn << PAGE_SHIFT)) | 467 | from, to, distance); |
635 | return; | 468 | return; |
636 | nodes_clear(node_possible_map); | 469 | } |
637 | nodes_clear(node_online_map); | ||
638 | #endif | ||
639 | 470 | ||
640 | #ifdef CONFIG_AMD_NUMA | 471 | if ((u8)distance != distance || |
641 | if (!numa_off && amd && !amd_scan_nodes()) | 472 | (from == to && distance != LOCAL_DISTANCE)) { |
473 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | ||
474 | from, to, distance); | ||
642 | return; | 475 | return; |
643 | nodes_clear(node_possible_map); | 476 | } |
644 | nodes_clear(node_online_map); | ||
645 | #endif | ||
646 | printk(KERN_INFO "%s\n", | ||
647 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | ||
648 | 477 | ||
649 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", | 478 | numa_distance[from * numa_distance_cnt + to] = distance; |
650 | start_pfn << PAGE_SHIFT, | ||
651 | last_pfn << PAGE_SHIFT); | ||
652 | /* setup dummy node covering all memory */ | ||
653 | memnode_shift = 63; | ||
654 | memnodemap = memnode.embedded_map; | ||
655 | memnodemap[0] = 0; | ||
656 | node_set_online(0); | ||
657 | node_set(0, node_possible_map); | ||
658 | for (i = 0; i < nr_cpu_ids; i++) | ||
659 | numa_set_node(i, 0); | ||
660 | memblock_x86_register_active_regions(0, start_pfn, last_pfn); | ||
661 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); | ||
662 | } | 479 | } |
663 | 480 | ||
664 | unsigned long __init numa_free_all_bootmem(void) | 481 | int __node_distance(int from, int to) |
665 | { | 482 | { |
666 | unsigned long pages = 0; | 483 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) |
667 | int i; | 484 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; |
485 | return numa_distance[from * numa_distance_cnt + to]; | ||
486 | } | ||
487 | EXPORT_SYMBOL(__node_distance); | ||
668 | 488 | ||
669 | for_each_online_node(i) | 489 | /* |
670 | pages += free_all_bootmem_node(NODE_DATA(i)); | 490 | * Sanity check to catch more bad NUMA configurations (they are amazingly |
491 | * common). Make sure the nodes cover all memory. | ||
492 | */ | ||
493 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | ||
494 | { | ||
495 | unsigned long numaram, e820ram; | ||
496 | int i; | ||
671 | 497 | ||
672 | pages += free_all_memory_core_early(MAX_NUMNODES); | 498 | numaram = 0; |
499 | for (i = 0; i < mi->nr_blks; i++) { | ||
500 | unsigned long s = mi->blk[i].start >> PAGE_SHIFT; | ||
501 | unsigned long e = mi->blk[i].end >> PAGE_SHIFT; | ||
502 | numaram += e - s; | ||
503 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | ||
504 | if ((long)numaram < 0) | ||
505 | numaram = 0; | ||
506 | } | ||
673 | 507 | ||
674 | return pages; | 508 | e820ram = max_pfn - (memblock_x86_hole_size(0, |
509 | max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); | ||
510 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | ||
511 | if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | ||
512 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", | ||
513 | (numaram << PAGE_SHIFT) >> 20, | ||
514 | (e820ram << PAGE_SHIFT) >> 20); | ||
515 | return false; | ||
516 | } | ||
517 | return true; | ||
675 | } | 518 | } |
676 | 519 | ||
677 | #ifdef CONFIG_NUMA | 520 | static int __init numa_register_memblks(struct numa_meminfo *mi) |
678 | |||
679 | static __init int find_near_online_node(int node) | ||
680 | { | 521 | { |
681 | int n, val; | 522 | int i, nid; |
682 | int min_val = INT_MAX; | ||
683 | int best_node = -1; | ||
684 | 523 | ||
685 | for_each_online_node(n) { | 524 | /* Account for nodes with cpus and no memory */ |
686 | val = node_distance(node, n); | 525 | node_possible_map = numa_nodes_parsed; |
526 | numa_nodemask_from_meminfo(&node_possible_map, mi); | ||
527 | if (WARN_ON(nodes_empty(node_possible_map))) | ||
528 | return -EINVAL; | ||
529 | |||
530 | memnode_shift = compute_hash_shift(mi); | ||
531 | if (memnode_shift < 0) { | ||
532 | printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); | ||
533 | return -EINVAL; | ||
534 | } | ||
687 | 535 | ||
688 | if (val < min_val) { | 536 | for (i = 0; i < mi->nr_blks; i++) |
689 | min_val = val; | 537 | memblock_x86_register_active_regions(mi->blk[i].nid, |
690 | best_node = n; | 538 | mi->blk[i].start >> PAGE_SHIFT, |
539 | mi->blk[i].end >> PAGE_SHIFT); | ||
540 | |||
541 | /* for out of order entries */ | ||
542 | sort_node_map(); | ||
543 | if (!numa_meminfo_cover_memory(mi)) | ||
544 | return -EINVAL; | ||
545 | |||
546 | /* Finally register nodes. */ | ||
547 | for_each_node_mask(nid, node_possible_map) { | ||
548 | u64 start = (u64)max_pfn << PAGE_SHIFT; | ||
549 | u64 end = 0; | ||
550 | |||
551 | for (i = 0; i < mi->nr_blks; i++) { | ||
552 | if (nid != mi->blk[i].nid) | ||
553 | continue; | ||
554 | start = min(mi->blk[i].start, start); | ||
555 | end = max(mi->blk[i].end, end); | ||
691 | } | 556 | } |
557 | |||
558 | if (start < end) | ||
559 | setup_node_bootmem(nid, start, end); | ||
692 | } | 560 | } |
693 | 561 | ||
694 | return best_node; | 562 | return 0; |
695 | } | 563 | } |
696 | 564 | ||
697 | /* | 565 | /** |
698 | * Setup early cpu_to_node. | 566 | * dummy_numma_init - Fallback dummy NUMA init |
699 | * | 567 | * |
700 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | 568 | * Used if there's no underlying NUMA architecture, NUMA initialization |
701 | * and apicid_to_node[] tables have valid entries for a CPU. | 569 | * fails, or NUMA is disabled on the command line. |
702 | * This means we skip cpu_to_node[] initialisation for NUMA | ||
703 | * emulation and faking node case (when running a kernel compiled | ||
704 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | ||
705 | * is already initialized in a round robin manner at numa_init_array, | ||
706 | * prior to this call, and this initialization is good enough | ||
707 | * for the fake NUMA cases. | ||
708 | * | 570 | * |
709 | * Called before the per_cpu areas are setup. | 571 | * Must online at least one node and add memory blocks that cover all |
572 | * allowed memory. This function must not fail. | ||
710 | */ | 573 | */ |
711 | void __init init_cpu_to_node(void) | 574 | static int __init dummy_numa_init(void) |
712 | { | 575 | { |
713 | int cpu; | 576 | printk(KERN_INFO "%s\n", |
714 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | 577 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); |
715 | 578 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", | |
716 | BUG_ON(cpu_to_apicid == NULL); | 579 | 0LU, max_pfn << PAGE_SHIFT); |
717 | 580 | ||
718 | for_each_possible_cpu(cpu) { | 581 | node_set(0, numa_nodes_parsed); |
719 | int node; | 582 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); |
720 | u16 apicid = cpu_to_apicid[cpu]; | ||
721 | 583 | ||
722 | if (apicid == BAD_APICID) | 584 | return 0; |
723 | continue; | ||
724 | node = apicid_to_node[apicid]; | ||
725 | if (node == NUMA_NO_NODE) | ||
726 | continue; | ||
727 | if (!node_online(node)) | ||
728 | node = find_near_online_node(node); | ||
729 | numa_set_node(cpu, node); | ||
730 | } | ||
731 | } | 585 | } |
732 | #endif | ||
733 | 586 | ||
734 | 587 | static int __init numa_init(int (*init_func)(void)) | |
735 | void __cpuinit numa_set_node(int cpu, int node) | ||
736 | { | 588 | { |
737 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | 589 | int i; |
738 | 590 | int ret; | |
739 | /* early setting, no percpu area yet */ | ||
740 | if (cpu_to_node_map) { | ||
741 | cpu_to_node_map[cpu] = node; | ||
742 | return; | ||
743 | } | ||
744 | |||
745 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
746 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | ||
747 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | ||
748 | dump_stack(); | ||
749 | return; | ||
750 | } | ||
751 | #endif | ||
752 | per_cpu(x86_cpu_to_node_map, cpu) = node; | ||
753 | 591 | ||
754 | if (node != NUMA_NO_NODE) | 592 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
755 | set_cpu_numa_node(cpu, node); | 593 | set_apicid_to_node(i, NUMA_NO_NODE); |
756 | } | ||
757 | 594 | ||
758 | void __cpuinit numa_clear_node(int cpu) | 595 | nodes_clear(numa_nodes_parsed); |
759 | { | 596 | nodes_clear(node_possible_map); |
760 | numa_set_node(cpu, NUMA_NO_NODE); | 597 | nodes_clear(node_online_map); |
761 | } | 598 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
599 | remove_all_active_ranges(); | ||
600 | numa_reset_distance(); | ||
762 | 601 | ||
763 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | 602 | ret = init_func(); |
603 | if (ret < 0) | ||
604 | return ret; | ||
605 | ret = numa_cleanup_meminfo(&numa_meminfo); | ||
606 | if (ret < 0) | ||
607 | return ret; | ||
764 | 608 | ||
765 | #ifndef CONFIG_NUMA_EMU | 609 | numa_emulation(&numa_meminfo, numa_distance_cnt); |
766 | void __cpuinit numa_add_cpu(int cpu) | ||
767 | { | ||
768 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
769 | } | ||
770 | 610 | ||
771 | void __cpuinit numa_remove_cpu(int cpu) | 611 | ret = numa_register_memblks(&numa_meminfo); |
772 | { | 612 | if (ret < 0) |
773 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 613 | return ret; |
774 | } | ||
775 | #else | ||
776 | void __cpuinit numa_add_cpu(int cpu) | ||
777 | { | ||
778 | unsigned long addr; | ||
779 | u16 apicid; | ||
780 | int physnid; | ||
781 | int nid = NUMA_NO_NODE; | ||
782 | 614 | ||
783 | apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | 615 | for (i = 0; i < nr_cpu_ids; i++) { |
784 | if (apicid != BAD_APICID) | 616 | int nid = early_cpu_to_node(i); |
785 | nid = apicid_to_node[apicid]; | ||
786 | if (nid == NUMA_NO_NODE) | ||
787 | nid = early_cpu_to_node(cpu); | ||
788 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | ||
789 | |||
790 | /* | ||
791 | * Use the starting address of the emulated node to find which physical | ||
792 | * node it is allocated on. | ||
793 | */ | ||
794 | addr = node_start_pfn(nid) << PAGE_SHIFT; | ||
795 | for (physnid = 0; physnid < MAX_NUMNODES; physnid++) | ||
796 | if (addr >= physnodes[physnid].start && | ||
797 | addr < physnodes[physnid].end) | ||
798 | break; | ||
799 | 617 | ||
800 | /* | 618 | if (nid == NUMA_NO_NODE) |
801 | * Map the cpu to each emulated node that is allocated on the physical | 619 | continue; |
802 | * node of the cpu's apic id. | 620 | if (!node_online(nid)) |
803 | */ | 621 | numa_clear_node(i); |
804 | for_each_online_node(nid) { | ||
805 | addr = node_start_pfn(nid) << PAGE_SHIFT; | ||
806 | if (addr >= physnodes[physnid].start && | ||
807 | addr < physnodes[physnid].end) | ||
808 | cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); | ||
809 | } | 622 | } |
623 | numa_init_array(); | ||
624 | return 0; | ||
810 | } | 625 | } |
811 | 626 | ||
812 | void __cpuinit numa_remove_cpu(int cpu) | 627 | void __init initmem_init(void) |
813 | { | 628 | { |
814 | int i; | 629 | int ret; |
815 | 630 | ||
816 | for_each_online_node(i) | 631 | if (!numa_off) { |
817 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | 632 | #ifdef CONFIG_ACPI_NUMA |
818 | } | 633 | ret = numa_init(x86_acpi_numa_init); |
819 | #endif /* !CONFIG_NUMA_EMU */ | 634 | if (!ret) |
820 | 635 | return; | |
821 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | 636 | #endif |
822 | static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) | 637 | #ifdef CONFIG_AMD_NUMA |
823 | { | 638 | ret = numa_init(amd_numa_init); |
824 | int node = early_cpu_to_node(cpu); | 639 | if (!ret) |
825 | struct cpumask *mask; | 640 | return; |
826 | char buf[64]; | 641 | #endif |
827 | |||
828 | mask = node_to_cpumask_map[node]; | ||
829 | if (!mask) { | ||
830 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | ||
831 | dump_stack(); | ||
832 | return NULL; | ||
833 | } | 642 | } |
834 | 643 | ||
835 | cpulist_scnprintf(buf, sizeof(buf), mask); | 644 | numa_init(dummy_numa_init); |
836 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | ||
837 | enable ? "numa_add_cpu" : "numa_remove_cpu", | ||
838 | cpu, node, buf); | ||
839 | return mask; | ||
840 | } | 645 | } |
841 | 646 | ||
842 | /* | 647 | unsigned long __init numa_free_all_bootmem(void) |
843 | * --------- debug versions of the numa functions --------- | ||
844 | */ | ||
845 | #ifndef CONFIG_NUMA_EMU | ||
846 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
847 | { | ||
848 | struct cpumask *mask; | ||
849 | |||
850 | mask = debug_cpumask_set_cpu(cpu, enable); | ||
851 | if (!mask) | ||
852 | return; | ||
853 | |||
854 | if (enable) | ||
855 | cpumask_set_cpu(cpu, mask); | ||
856 | else | ||
857 | cpumask_clear_cpu(cpu, mask); | ||
858 | } | ||
859 | #else | ||
860 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
861 | { | 648 | { |
862 | int node = early_cpu_to_node(cpu); | 649 | unsigned long pages = 0; |
863 | struct cpumask *mask; | ||
864 | int i; | 650 | int i; |
865 | 651 | ||
866 | for_each_online_node(i) { | 652 | for_each_online_node(i) |
867 | unsigned long addr; | 653 | pages += free_all_bootmem_node(NODE_DATA(i)); |
868 | |||
869 | addr = node_start_pfn(i) << PAGE_SHIFT; | ||
870 | if (addr < physnodes[node].start || | ||
871 | addr >= physnodes[node].end) | ||
872 | continue; | ||
873 | mask = debug_cpumask_set_cpu(cpu, enable); | ||
874 | if (!mask) | ||
875 | return; | ||
876 | |||
877 | if (enable) | ||
878 | cpumask_set_cpu(cpu, mask); | ||
879 | else | ||
880 | cpumask_clear_cpu(cpu, mask); | ||
881 | } | ||
882 | } | ||
883 | #endif /* CONFIG_NUMA_EMU */ | ||
884 | 654 | ||
885 | void __cpuinit numa_add_cpu(int cpu) | 655 | pages += free_all_memory_core_early(MAX_NUMNODES); |
886 | { | ||
887 | numa_set_cpumask(cpu, 1); | ||
888 | } | ||
889 | 656 | ||
890 | void __cpuinit numa_remove_cpu(int cpu) | 657 | return pages; |
891 | { | ||
892 | numa_set_cpumask(cpu, 0); | ||
893 | } | 658 | } |
894 | 659 | ||
895 | int __cpu_to_node(int cpu) | 660 | int __cpuinit numa_cpu_node(int cpu) |
896 | { | 661 | { |
897 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | 662 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); |
898 | printk(KERN_WARNING | ||
899 | "cpu_to_node(%d): usage too early!\n", cpu); | ||
900 | dump_stack(); | ||
901 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
902 | } | ||
903 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
904 | } | ||
905 | EXPORT_SYMBOL(__cpu_to_node); | ||
906 | 663 | ||
907 | /* | 664 | if (apicid != BAD_APICID) |
908 | * Same function as cpu_to_node() but used if called before the | 665 | return __apicid_to_node[apicid]; |
909 | * per_cpu areas are setup. | 666 | return NUMA_NO_NODE; |
910 | */ | ||
911 | int early_cpu_to_node(int cpu) | ||
912 | { | ||
913 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | ||
914 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
915 | |||
916 | if (!cpu_possible(cpu)) { | ||
917 | printk(KERN_WARNING | ||
918 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | ||
919 | dump_stack(); | ||
920 | return NUMA_NO_NODE; | ||
921 | } | ||
922 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
923 | } | 667 | } |
924 | |||
925 | /* | ||
926 | * --------- end of debug versions of the numa functions --------- | ||
927 | */ | ||
928 | |||
929 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c new file mode 100644 index 000000000000..ad091e4cff17 --- /dev/null +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -0,0 +1,494 @@ | |||
1 | /* | ||
2 | * NUMA emulation | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/errno.h> | ||
6 | #include <linux/topology.h> | ||
7 | #include <linux/memblock.h> | ||
8 | #include <asm/dma.h> | ||
9 | |||
10 | #include "numa_internal.h" | ||
11 | |||
12 | static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; | ||
13 | static char *emu_cmdline __initdata; | ||
14 | |||
15 | void __init numa_emu_cmdline(char *str) | ||
16 | { | ||
17 | emu_cmdline = str; | ||
18 | } | ||
19 | |||
20 | static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) | ||
21 | { | ||
22 | int i; | ||
23 | |||
24 | for (i = 0; i < mi->nr_blks; i++) | ||
25 | if (mi->blk[i].nid == nid) | ||
26 | return i; | ||
27 | return -ENOENT; | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * Sets up nid to range from @start to @end. The return value is -errno if | ||
32 | * something went wrong, 0 otherwise. | ||
33 | */ | ||
34 | static int __init emu_setup_memblk(struct numa_meminfo *ei, | ||
35 | struct numa_meminfo *pi, | ||
36 | int nid, int phys_blk, u64 size) | ||
37 | { | ||
38 | struct numa_memblk *eb = &ei->blk[ei->nr_blks]; | ||
39 | struct numa_memblk *pb = &pi->blk[phys_blk]; | ||
40 | |||
41 | if (ei->nr_blks >= NR_NODE_MEMBLKS) { | ||
42 | pr_err("NUMA: Too many emulated memblks, failing emulation\n"); | ||
43 | return -EINVAL; | ||
44 | } | ||
45 | |||
46 | ei->nr_blks++; | ||
47 | eb->start = pb->start; | ||
48 | eb->end = pb->start + size; | ||
49 | eb->nid = nid; | ||
50 | |||
51 | if (emu_nid_to_phys[nid] == NUMA_NO_NODE) | ||
52 | emu_nid_to_phys[nid] = pb->nid; | ||
53 | |||
54 | pb->start += size; | ||
55 | if (pb->start >= pb->end) { | ||
56 | WARN_ON_ONCE(pb->start > pb->end); | ||
57 | numa_remove_memblk_from(phys_blk, pi); | ||
58 | } | ||
59 | |||
60 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, | ||
61 | eb->start, eb->end, (eb->end - eb->start) >> 20); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr | ||
67 | * to max_addr. The return value is the number of nodes allocated. | ||
68 | */ | ||
69 | static int __init split_nodes_interleave(struct numa_meminfo *ei, | ||
70 | struct numa_meminfo *pi, | ||
71 | u64 addr, u64 max_addr, int nr_nodes) | ||
72 | { | ||
73 | nodemask_t physnode_mask = NODE_MASK_NONE; | ||
74 | u64 size; | ||
75 | int big; | ||
76 | int nid = 0; | ||
77 | int i, ret; | ||
78 | |||
79 | if (nr_nodes <= 0) | ||
80 | return -1; | ||
81 | if (nr_nodes > MAX_NUMNODES) { | ||
82 | pr_info("numa=fake=%d too large, reducing to %d\n", | ||
83 | nr_nodes, MAX_NUMNODES); | ||
84 | nr_nodes = MAX_NUMNODES; | ||
85 | } | ||
86 | |||
87 | size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; | ||
88 | /* | ||
89 | * Calculate the number of big nodes that can be allocated as a result | ||
90 | * of consolidating the remainder. | ||
91 | */ | ||
92 | big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / | ||
93 | FAKE_NODE_MIN_SIZE; | ||
94 | |||
95 | size &= FAKE_NODE_MIN_HASH_MASK; | ||
96 | if (!size) { | ||
97 | pr_err("Not enough memory for each node. " | ||
98 | "NUMA emulation disabled.\n"); | ||
99 | return -1; | ||
100 | } | ||
101 | |||
102 | for (i = 0; i < pi->nr_blks; i++) | ||
103 | node_set(pi->blk[i].nid, physnode_mask); | ||
104 | |||
105 | /* | ||
106 | * Continue to fill physical nodes with fake nodes until there is no | ||
107 | * memory left on any of them. | ||
108 | */ | ||
109 | while (nodes_weight(physnode_mask)) { | ||
110 | for_each_node_mask(i, physnode_mask) { | ||
111 | u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); | ||
112 | u64 start, limit, end; | ||
113 | int phys_blk; | ||
114 | |||
115 | phys_blk = emu_find_memblk_by_nid(i, pi); | ||
116 | if (phys_blk < 0) { | ||
117 | node_clear(i, physnode_mask); | ||
118 | continue; | ||
119 | } | ||
120 | start = pi->blk[phys_blk].start; | ||
121 | limit = pi->blk[phys_blk].end; | ||
122 | end = start + size; | ||
123 | |||
124 | if (nid < big) | ||
125 | end += FAKE_NODE_MIN_SIZE; | ||
126 | |||
127 | /* | ||
128 | * Continue to add memory to this fake node if its | ||
129 | * non-reserved memory is less than the per-node size. | ||
130 | */ | ||
131 | while (end - start - | ||
132 | memblock_x86_hole_size(start, end) < size) { | ||
133 | end += FAKE_NODE_MIN_SIZE; | ||
134 | if (end > limit) { | ||
135 | end = limit; | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * If there won't be at least FAKE_NODE_MIN_SIZE of | ||
142 | * non-reserved memory in ZONE_DMA32 for the next node, | ||
143 | * this one must extend to the boundary. | ||
144 | */ | ||
145 | if (end < dma32_end && dma32_end - end - | ||
146 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | ||
147 | end = dma32_end; | ||
148 | |||
149 | /* | ||
150 | * If there won't be enough non-reserved memory for the | ||
151 | * next node, this one must extend to the end of the | ||
152 | * physical node. | ||
153 | */ | ||
154 | if (limit - end - | ||
155 | memblock_x86_hole_size(end, limit) < size) | ||
156 | end = limit; | ||
157 | |||
158 | ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, | ||
159 | phys_blk, | ||
160 | min(end, limit) - start); | ||
161 | if (ret < 0) | ||
162 | return ret; | ||
163 | } | ||
164 | } | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Returns the end address of a node so that there is at least `size' amount of | ||
170 | * non-reserved memory or `max_addr' is reached. | ||
171 | */ | ||
172 | static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) | ||
173 | { | ||
174 | u64 end = start + size; | ||
175 | |||
176 | while (end - start - memblock_x86_hole_size(start, end) < size) { | ||
177 | end += FAKE_NODE_MIN_SIZE; | ||
178 | if (end > max_addr) { | ||
179 | end = max_addr; | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | return end; | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Sets up fake nodes of `size' interleaved over physical nodes ranging from | ||
188 | * `addr' to `max_addr'. The return value is the number of nodes allocated. | ||
189 | */ | ||
190 | static int __init split_nodes_size_interleave(struct numa_meminfo *ei, | ||
191 | struct numa_meminfo *pi, | ||
192 | u64 addr, u64 max_addr, u64 size) | ||
193 | { | ||
194 | nodemask_t physnode_mask = NODE_MASK_NONE; | ||
195 | u64 min_size; | ||
196 | int nid = 0; | ||
197 | int i, ret; | ||
198 | |||
199 | if (!size) | ||
200 | return -1; | ||
201 | /* | ||
202 | * The limit on emulated nodes is MAX_NUMNODES, so the size per node is | ||
203 | * increased accordingly if the requested size is too small. This | ||
204 | * creates a uniform distribution of node sizes across the entire | ||
205 | * machine (but not necessarily over physical nodes). | ||
206 | */ | ||
207 | min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / | ||
208 | MAX_NUMNODES; | ||
209 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); | ||
210 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) | ||
211 | min_size = (min_size + FAKE_NODE_MIN_SIZE) & | ||
212 | FAKE_NODE_MIN_HASH_MASK; | ||
213 | if (size < min_size) { | ||
214 | pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", | ||
215 | size >> 20, min_size >> 20); | ||
216 | size = min_size; | ||
217 | } | ||
218 | size &= FAKE_NODE_MIN_HASH_MASK; | ||
219 | |||
220 | for (i = 0; i < pi->nr_blks; i++) | ||
221 | node_set(pi->blk[i].nid, physnode_mask); | ||
222 | |||
223 | /* | ||
224 | * Fill physical nodes with fake nodes of size until there is no memory | ||
225 | * left on any of them. | ||
226 | */ | ||
227 | while (nodes_weight(physnode_mask)) { | ||
228 | for_each_node_mask(i, physnode_mask) { | ||
229 | u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; | ||
230 | u64 start, limit, end; | ||
231 | int phys_blk; | ||
232 | |||
233 | phys_blk = emu_find_memblk_by_nid(i, pi); | ||
234 | if (phys_blk < 0) { | ||
235 | node_clear(i, physnode_mask); | ||
236 | continue; | ||
237 | } | ||
238 | start = pi->blk[phys_blk].start; | ||
239 | limit = pi->blk[phys_blk].end; | ||
240 | |||
241 | end = find_end_of_node(start, limit, size); | ||
242 | /* | ||
243 | * If there won't be at least FAKE_NODE_MIN_SIZE of | ||
244 | * non-reserved memory in ZONE_DMA32 for the next node, | ||
245 | * this one must extend to the boundary. | ||
246 | */ | ||
247 | if (end < dma32_end && dma32_end - end - | ||
248 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | ||
249 | end = dma32_end; | ||
250 | |||
251 | /* | ||
252 | * If there won't be enough non-reserved memory for the | ||
253 | * next node, this one must extend to the end of the | ||
254 | * physical node. | ||
255 | */ | ||
256 | if (limit - end - | ||
257 | memblock_x86_hole_size(end, limit) < size) | ||
258 | end = limit; | ||
259 | |||
260 | ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, | ||
261 | phys_blk, | ||
262 | min(end, limit) - start); | ||
263 | if (ret < 0) | ||
264 | return ret; | ||
265 | } | ||
266 | } | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | /** | ||
271 | * numa_emulation - Emulate NUMA nodes | ||
272 | * @numa_meminfo: NUMA configuration to massage | ||
273 | * @numa_dist_cnt: The size of the physical NUMA distance table | ||
274 | * | ||
275 | * Emulate NUMA nodes according to the numa=fake kernel parameter. | ||
276 | * @numa_meminfo contains the physical memory configuration and is modified | ||
277 | * to reflect the emulated configuration on success. @numa_dist_cnt is | ||
278 | * used to determine the size of the physical distance table. | ||
279 | * | ||
280 | * On success, the following modifications are made. | ||
281 | * | ||
282 | * - @numa_meminfo is updated to reflect the emulated nodes. | ||
283 | * | ||
284 | * - __apicid_to_node[] is updated such that APIC IDs are mapped to the | ||
285 | * emulated nodes. | ||
286 | * | ||
287 | * - NUMA distance table is rebuilt to represent distances between emulated | ||
288 | * nodes. The distances are determined considering how emulated nodes | ||
289 | * are mapped to physical nodes and match the actual distances. | ||
290 | * | ||
291 | * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical | ||
292 | * nodes. This is used by numa_add_cpu() and numa_remove_cpu(). | ||
293 | * | ||
294 | * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with | ||
295 | * identity mapping and no other modification is made. | ||
296 | */ | ||
297 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | ||
298 | { | ||
299 | static struct numa_meminfo ei __initdata; | ||
300 | static struct numa_meminfo pi __initdata; | ||
301 | const u64 max_addr = max_pfn << PAGE_SHIFT; | ||
302 | u8 *phys_dist = NULL; | ||
303 | size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); | ||
304 | int max_emu_nid, dfl_phys_nid; | ||
305 | int i, j, ret; | ||
306 | |||
307 | if (!emu_cmdline) | ||
308 | goto no_emu; | ||
309 | |||
310 | memset(&ei, 0, sizeof(ei)); | ||
311 | pi = *numa_meminfo; | ||
312 | |||
313 | for (i = 0; i < MAX_NUMNODES; i++) | ||
314 | emu_nid_to_phys[i] = NUMA_NO_NODE; | ||
315 | |||
316 | /* | ||
317 | * If the numa=fake command-line contains a 'M' or 'G', it represents | ||
318 | * the fixed node size. Otherwise, if it is just a single number N, | ||
319 | * split the system RAM into N fake nodes. | ||
320 | */ | ||
321 | if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) { | ||
322 | u64 size; | ||
323 | |||
324 | size = memparse(emu_cmdline, &emu_cmdline); | ||
325 | ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size); | ||
326 | } else { | ||
327 | unsigned long n; | ||
328 | |||
329 | n = simple_strtoul(emu_cmdline, NULL, 0); | ||
330 | ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); | ||
331 | } | ||
332 | |||
333 | if (ret < 0) | ||
334 | goto no_emu; | ||
335 | |||
336 | if (numa_cleanup_meminfo(&ei) < 0) { | ||
337 | pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n"); | ||
338 | goto no_emu; | ||
339 | } | ||
340 | |||
341 | /* copy the physical distance table */ | ||
342 | if (numa_dist_cnt) { | ||
343 | u64 phys; | ||
344 | |||
345 | phys = memblock_find_in_range(0, | ||
346 | (u64)max_pfn_mapped << PAGE_SHIFT, | ||
347 | phys_size, PAGE_SIZE); | ||
348 | if (phys == MEMBLOCK_ERROR) { | ||
349 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); | ||
350 | goto no_emu; | ||
351 | } | ||
352 | memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); | ||
353 | phys_dist = __va(phys); | ||
354 | |||
355 | for (i = 0; i < numa_dist_cnt; i++) | ||
356 | for (j = 0; j < numa_dist_cnt; j++) | ||
357 | phys_dist[i * numa_dist_cnt + j] = | ||
358 | node_distance(i, j); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Determine the max emulated nid and the default phys nid to use | ||
363 | * for unmapped nodes. | ||
364 | */ | ||
365 | max_emu_nid = 0; | ||
366 | dfl_phys_nid = NUMA_NO_NODE; | ||
367 | for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { | ||
368 | if (emu_nid_to_phys[i] != NUMA_NO_NODE) { | ||
369 | max_emu_nid = i; | ||
370 | if (dfl_phys_nid == NUMA_NO_NODE) | ||
371 | dfl_phys_nid = emu_nid_to_phys[i]; | ||
372 | } | ||
373 | } | ||
374 | if (dfl_phys_nid == NUMA_NO_NODE) { | ||
375 | pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n"); | ||
376 | goto no_emu; | ||
377 | } | ||
378 | |||
379 | /* commit */ | ||
380 | *numa_meminfo = ei; | ||
381 | |||
382 | /* | ||
383 | * Transform __apicid_to_node table to use emulated nids by | ||
384 | * reverse-mapping phys_nid. The maps should always exist but fall | ||
385 | * back to zero just in case. | ||
386 | */ | ||
387 | for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { | ||
388 | if (__apicid_to_node[i] == NUMA_NO_NODE) | ||
389 | continue; | ||
390 | for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++) | ||
391 | if (__apicid_to_node[i] == emu_nid_to_phys[j]) | ||
392 | break; | ||
393 | __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0; | ||
394 | } | ||
395 | |||
396 | /* make sure all emulated nodes are mapped to a physical node */ | ||
397 | for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) | ||
398 | if (emu_nid_to_phys[i] == NUMA_NO_NODE) | ||
399 | emu_nid_to_phys[i] = dfl_phys_nid; | ||
400 | |||
401 | /* transform distance table */ | ||
402 | numa_reset_distance(); | ||
403 | for (i = 0; i < max_emu_nid + 1; i++) { | ||
404 | for (j = 0; j < max_emu_nid + 1; j++) { | ||
405 | int physi = emu_nid_to_phys[i]; | ||
406 | int physj = emu_nid_to_phys[j]; | ||
407 | int dist; | ||
408 | |||
409 | if (physi >= numa_dist_cnt || physj >= numa_dist_cnt) | ||
410 | dist = physi == physj ? | ||
411 | LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
412 | else | ||
413 | dist = phys_dist[physi * numa_dist_cnt + physj]; | ||
414 | |||
415 | numa_set_distance(i, j, dist); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /* free the copied physical distance table */ | ||
420 | if (phys_dist) | ||
421 | memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); | ||
422 | return; | ||
423 | |||
424 | no_emu: | ||
425 | /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ | ||
426 | for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) | ||
427 | emu_nid_to_phys[i] = i; | ||
428 | } | ||
429 | |||
430 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | ||
431 | void __cpuinit numa_add_cpu(int cpu) | ||
432 | { | ||
433 | int physnid, nid; | ||
434 | |||
435 | nid = early_cpu_to_node(cpu); | ||
436 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | ||
437 | |||
438 | physnid = emu_nid_to_phys[nid]; | ||
439 | |||
440 | /* | ||
441 | * Map the cpu to each emulated node that is allocated on the physical | ||
442 | * node of the cpu's apic id. | ||
443 | */ | ||
444 | for_each_online_node(nid) | ||
445 | if (emu_nid_to_phys[nid] == physnid) | ||
446 | cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); | ||
447 | } | ||
448 | |||
449 | void __cpuinit numa_remove_cpu(int cpu) | ||
450 | { | ||
451 | int i; | ||
452 | |||
453 | for_each_online_node(i) | ||
454 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | ||
455 | } | ||
456 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | ||
457 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
458 | { | ||
459 | struct cpumask *mask; | ||
460 | int nid, physnid, i; | ||
461 | |||
462 | nid = early_cpu_to_node(cpu); | ||
463 | if (nid == NUMA_NO_NODE) { | ||
464 | /* early_cpu_to_node() already emits a warning and trace */ | ||
465 | return; | ||
466 | } | ||
467 | |||
468 | physnid = emu_nid_to_phys[nid]; | ||
469 | |||
470 | for_each_online_node(i) { | ||
471 | if (emu_nid_to_phys[nid] != physnid) | ||
472 | continue; | ||
473 | |||
474 | mask = debug_cpumask_set_cpu(cpu, enable); | ||
475 | if (!mask) | ||
476 | return; | ||
477 | |||
478 | if (enable) | ||
479 | cpumask_set_cpu(cpu, mask); | ||
480 | else | ||
481 | cpumask_clear_cpu(cpu, mask); | ||
482 | } | ||
483 | } | ||
484 | |||
485 | void __cpuinit numa_add_cpu(int cpu) | ||
486 | { | ||
487 | numa_set_cpumask(cpu, 1); | ||
488 | } | ||
489 | |||
490 | void __cpuinit numa_remove_cpu(int cpu) | ||
491 | { | ||
492 | numa_set_cpumask(cpu, 0); | ||
493 | } | ||
494 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | ||
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h new file mode 100644 index 000000000000..ef2d97377d7c --- /dev/null +++ b/arch/x86/mm/numa_internal.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef __X86_MM_NUMA_INTERNAL_H | ||
2 | #define __X86_MM_NUMA_INTERNAL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/numa.h> | ||
6 | |||
7 | struct numa_memblk { | ||
8 | u64 start; | ||
9 | u64 end; | ||
10 | int nid; | ||
11 | }; | ||
12 | |||
13 | struct numa_meminfo { | ||
14 | int nr_blks; | ||
15 | struct numa_memblk blk[NR_NODE_MEMBLKS]; | ||
16 | }; | ||
17 | |||
18 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); | ||
19 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi); | ||
20 | void __init numa_reset_distance(void); | ||
21 | |||
22 | #ifdef CONFIG_NUMA_EMU | ||
23 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, | ||
24 | int numa_dist_cnt); | ||
25 | #else | ||
26 | static inline void numa_emulation(struct numa_meminfo *numa_meminfo, | ||
27 | int numa_dist_cnt) | ||
28 | { } | ||
29 | #endif | ||
30 | |||
31 | #endif /* __X86_MM_NUMA_INTERNAL_H */ | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d343b3c81f3c..90825f2eb0f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; | |||
57 | 57 | ||
58 | void update_page_count(int level, unsigned long pages) | 58 | void update_page_count(int level, unsigned long pages) |
59 | { | 59 | { |
60 | unsigned long flags; | ||
61 | |||
62 | /* Protect against CPA */ | 60 | /* Protect against CPA */ |
63 | spin_lock_irqsave(&pgd_lock, flags); | 61 | spin_lock(&pgd_lock); |
64 | direct_pages_count[level] += pages; | 62 | direct_pages_count[level] += pages; |
65 | spin_unlock_irqrestore(&pgd_lock, flags); | 63 | spin_unlock(&pgd_lock); |
66 | } | 64 | } |
67 | 65 | ||
68 | static void split_page_count(int level) | 66 | static void split_page_count(int level) |
@@ -394,7 +392,7 @@ static int | |||
394 | try_preserve_large_page(pte_t *kpte, unsigned long address, | 392 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
395 | struct cpa_data *cpa) | 393 | struct cpa_data *cpa) |
396 | { | 394 | { |
397 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 395 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
398 | pte_t new_pte, old_pte, *tmp; | 396 | pte_t new_pte, old_pte, *tmp; |
399 | pgprot_t old_prot, new_prot, req_prot; | 397 | pgprot_t old_prot, new_prot, req_prot; |
400 | int i, do_split = 1; | 398 | int i, do_split = 1; |
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
403 | if (cpa->force_split) | 401 | if (cpa->force_split) |
404 | return 1; | 402 | return 1; |
405 | 403 | ||
406 | spin_lock_irqsave(&pgd_lock, flags); | 404 | spin_lock(&pgd_lock); |
407 | /* | 405 | /* |
408 | * Check for races, another CPU might have split this page | 406 | * Check for races, another CPU might have split this page |
409 | * up already: | 407 | * up already: |
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
498 | } | 496 | } |
499 | 497 | ||
500 | out_unlock: | 498 | out_unlock: |
501 | spin_unlock_irqrestore(&pgd_lock, flags); | 499 | spin_unlock(&pgd_lock); |
502 | 500 | ||
503 | return do_split; | 501 | return do_split; |
504 | } | 502 | } |
505 | 503 | ||
506 | static int split_large_page(pte_t *kpte, unsigned long address) | 504 | static int split_large_page(pte_t *kpte, unsigned long address) |
507 | { | 505 | { |
508 | unsigned long flags, pfn, pfninc = 1; | 506 | unsigned long pfn, pfninc = 1; |
509 | unsigned int i, level; | 507 | unsigned int i, level; |
510 | pte_t *pbase, *tmp; | 508 | pte_t *pbase, *tmp; |
511 | pgprot_t ref_prot; | 509 | pgprot_t ref_prot; |
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
519 | if (!base) | 517 | if (!base) |
520 | return -ENOMEM; | 518 | return -ENOMEM; |
521 | 519 | ||
522 | spin_lock_irqsave(&pgd_lock, flags); | 520 | spin_lock(&pgd_lock); |
523 | /* | 521 | /* |
524 | * Check for races, another CPU might have split this page | 522 | * Check for races, another CPU might have split this page |
525 | * up for us already: | 523 | * up for us already: |
@@ -591,7 +589,7 @@ out_unlock: | |||
591 | */ | 589 | */ |
592 | if (base) | 590 | if (base) |
593 | __free_page(base); | 591 | __free_page(base); |
594 | spin_unlock_irqrestore(&pgd_lock, flags); | 592 | spin_unlock(&pgd_lock); |
595 | 593 | ||
596 | return 0; | 594 | return 0; |
597 | } | 595 | } |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 500242d3c96d..0113d19c8aa6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | |||
121 | 121 | ||
122 | static void pgd_dtor(pgd_t *pgd) | 122 | static void pgd_dtor(pgd_t *pgd) |
123 | { | 123 | { |
124 | unsigned long flags; /* can be called from interrupt context */ | ||
125 | |||
126 | if (SHARED_KERNEL_PMD) | 124 | if (SHARED_KERNEL_PMD) |
127 | return; | 125 | return; |
128 | 126 | ||
129 | spin_lock_irqsave(&pgd_lock, flags); | 127 | spin_lock(&pgd_lock); |
130 | pgd_list_del(pgd); | 128 | pgd_list_del(pgd); |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 129 | spin_unlock(&pgd_lock); |
132 | } | 130 | } |
133 | 131 | ||
134 | /* | 132 | /* |
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
260 | { | 258 | { |
261 | pgd_t *pgd; | 259 | pgd_t *pgd; |
262 | pmd_t *pmds[PREALLOCATED_PMDS]; | 260 | pmd_t *pmds[PREALLOCATED_PMDS]; |
263 | unsigned long flags; | ||
264 | 261 | ||
265 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); | 262 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
266 | 263 | ||
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
280 | * respect to anything walking the pgd_list, so that they | 277 | * respect to anything walking the pgd_list, so that they |
281 | * never see a partially populated pgd. | 278 | * never see a partially populated pgd. |
282 | */ | 279 | */ |
283 | spin_lock_irqsave(&pgd_lock, flags); | 280 | spin_lock(&pgd_lock); |
284 | 281 | ||
285 | pgd_ctor(mm, pgd); | 282 | pgd_ctor(mm, pgd); |
286 | pgd_prepopulate_pmd(mm, pgd, pmds); | 283 | pgd_prepopulate_pmd(mm, pgd, pmds); |
287 | 284 | ||
288 | spin_unlock_irqrestore(&pgd_lock, flags); | 285 | spin_unlock(&pgd_lock); |
289 | 286 | ||
290 | return pgd; | 287 | return pgd; |
291 | 288 | ||
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index ae96e7b8051d..48651c6f657d 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
@@ -57,7 +57,7 @@ struct node_memory_chunk_s { | |||
57 | static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; | 57 | static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; |
58 | 58 | ||
59 | static int __initdata num_memory_chunks; /* total number of memory chunks */ | 59 | static int __initdata num_memory_chunks; /* total number of memory chunks */ |
60 | static u8 __initdata apicid_to_pxm[MAX_APICID]; | 60 | static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC]; |
61 | 61 | ||
62 | int acpi_numa __initdata; | 62 | int acpi_numa __initdata; |
63 | 63 | ||
@@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void) | |||
254 | printk(KERN_DEBUG "Number of memory chunks in system = %d\n", | 254 | printk(KERN_DEBUG "Number of memory chunks in system = %d\n", |
255 | num_memory_chunks); | 255 | num_memory_chunks); |
256 | 256 | ||
257 | for (i = 0; i < MAX_APICID; i++) | 257 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
258 | apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); | 258 | set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); |
259 | 259 | ||
260 | for (j = 0; j < num_memory_chunks; j++){ | 260 | for (j = 0; j < num_memory_chunks; j++){ |
261 | struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; | 261 | struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 603d285d1daa..8e9d3394f6d4 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -26,88 +26,34 @@ | |||
26 | 26 | ||
27 | int acpi_numa __initdata; | 27 | int acpi_numa __initdata; |
28 | 28 | ||
29 | static struct acpi_table_slit *acpi_slit; | ||
30 | |||
31 | static nodemask_t nodes_parsed __initdata; | ||
32 | static nodemask_t cpu_nodes_parsed __initdata; | ||
33 | static struct bootnode nodes[MAX_NUMNODES] __initdata; | ||
34 | static struct bootnode nodes_add[MAX_NUMNODES]; | 29 | static struct bootnode nodes_add[MAX_NUMNODES]; |
35 | 30 | ||
36 | static int num_node_memblks __initdata; | ||
37 | static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata; | ||
38 | static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata; | ||
39 | |||
40 | static __init int setup_node(int pxm) | 31 | static __init int setup_node(int pxm) |
41 | { | 32 | { |
42 | return acpi_map_pxm_to_node(pxm); | 33 | return acpi_map_pxm_to_node(pxm); |
43 | } | 34 | } |
44 | 35 | ||
45 | static __init int conflicting_memblks(unsigned long start, unsigned long end) | ||
46 | { | ||
47 | int i; | ||
48 | for (i = 0; i < num_node_memblks; i++) { | ||
49 | struct bootnode *nd = &node_memblk_range[i]; | ||
50 | if (nd->start == nd->end) | ||
51 | continue; | ||
52 | if (nd->end > start && nd->start < end) | ||
53 | return memblk_nodeid[i]; | ||
54 | if (nd->end == end && nd->start == start) | ||
55 | return memblk_nodeid[i]; | ||
56 | } | ||
57 | return -1; | ||
58 | } | ||
59 | |||
60 | static __init void cutoff_node(int i, unsigned long start, unsigned long end) | ||
61 | { | ||
62 | struct bootnode *nd = &nodes[i]; | ||
63 | |||
64 | if (nd->start < start) { | ||
65 | nd->start = start; | ||
66 | if (nd->end < nd->start) | ||
67 | nd->start = nd->end; | ||
68 | } | ||
69 | if (nd->end > end) { | ||
70 | nd->end = end; | ||
71 | if (nd->start > nd->end) | ||
72 | nd->start = nd->end; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | static __init void bad_srat(void) | 36 | static __init void bad_srat(void) |
77 | { | 37 | { |
78 | int i; | ||
79 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | 38 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
80 | acpi_numa = -1; | 39 | acpi_numa = -1; |
81 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 40 | memset(nodes_add, 0, sizeof(nodes_add)); |
82 | apicid_to_node[i] = NUMA_NO_NODE; | ||
83 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
84 | nodes[i].start = nodes[i].end = 0; | ||
85 | nodes_add[i].start = nodes_add[i].end = 0; | ||
86 | } | ||
87 | remove_all_active_ranges(); | ||
88 | } | 41 | } |
89 | 42 | ||
90 | static __init inline int srat_disabled(void) | 43 | static __init inline int srat_disabled(void) |
91 | { | 44 | { |
92 | return numa_off || acpi_numa < 0; | 45 | return acpi_numa < 0; |
93 | } | 46 | } |
94 | 47 | ||
95 | /* Callback for SLIT parsing */ | 48 | /* Callback for SLIT parsing */ |
96 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | 49 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) |
97 | { | 50 | { |
98 | unsigned length; | 51 | int i, j; |
99 | unsigned long phys; | ||
100 | |||
101 | length = slit->header.length; | ||
102 | phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length, | ||
103 | PAGE_SIZE); | ||
104 | |||
105 | if (phys == MEMBLOCK_ERROR) | ||
106 | panic(" Can not save slit!\n"); | ||
107 | 52 | ||
108 | acpi_slit = __va(phys); | 53 | for (i = 0; i < slit->locality_count; i++) |
109 | memcpy(acpi_slit, slit, length); | 54 | for (j = 0; j < slit->locality_count; j++) |
110 | memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT"); | 55 | numa_set_distance(pxm_to_node(i), pxm_to_node(j), |
56 | slit->entry[slit->locality_count * i + j]); | ||
111 | } | 57 | } |
112 | 58 | ||
113 | /* Callback for Proximity Domain -> x2APIC mapping */ | 59 | /* Callback for Proximity Domain -> x2APIC mapping */ |
@@ -138,8 +84,8 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) | |||
138 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); | 84 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); |
139 | return; | 85 | return; |
140 | } | 86 | } |
141 | apicid_to_node[apic_id] = node; | 87 | set_apicid_to_node(apic_id, node); |
142 | node_set(node, cpu_nodes_parsed); | 88 | node_set(node, numa_nodes_parsed); |
143 | acpi_numa = 1; | 89 | acpi_numa = 1; |
144 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", | 90 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", |
145 | pxm, apic_id, node); | 91 | pxm, apic_id, node); |
@@ -178,8 +124,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
178 | return; | 124 | return; |
179 | } | 125 | } |
180 | 126 | ||
181 | apicid_to_node[apic_id] = node; | 127 | set_apicid_to_node(apic_id, node); |
182 | node_set(node, cpu_nodes_parsed); | 128 | node_set(node, numa_nodes_parsed); |
183 | acpi_numa = 1; | 129 | acpi_numa = 1; |
184 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", | 130 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", |
185 | pxm, apic_id, node); | 131 | pxm, apic_id, node); |
@@ -241,7 +187,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end) | |||
241 | } | 187 | } |
242 | 188 | ||
243 | if (changed) { | 189 | if (changed) { |
244 | node_set(node, cpu_nodes_parsed); | 190 | node_set(node, numa_nodes_parsed); |
245 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", | 191 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", |
246 | nd->start, nd->end); | 192 | nd->start, nd->end); |
247 | } | 193 | } |
@@ -251,10 +197,8 @@ update_nodes_add(int node, unsigned long start, unsigned long end) | |||
251 | void __init | 197 | void __init |
252 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | 198 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) |
253 | { | 199 | { |
254 | struct bootnode *nd, oldnode; | ||
255 | unsigned long start, end; | 200 | unsigned long start, end; |
256 | int node, pxm; | 201 | int node, pxm; |
257 | int i; | ||
258 | 202 | ||
259 | if (srat_disabled()) | 203 | if (srat_disabled()) |
260 | return; | 204 | return; |
@@ -276,300 +220,31 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
276 | bad_srat(); | 220 | bad_srat(); |
277 | return; | 221 | return; |
278 | } | 222 | } |
279 | i = conflicting_memblks(start, end); | 223 | |
280 | if (i == node) { | 224 | if (numa_add_memblk(node, start, end) < 0) { |
281 | printk(KERN_WARNING | ||
282 | "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", | ||
283 | pxm, start, end, nodes[i].start, nodes[i].end); | ||
284 | } else if (i >= 0) { | ||
285 | printk(KERN_ERR | ||
286 | "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", | ||
287 | pxm, start, end, node_to_pxm(i), | ||
288 | nodes[i].start, nodes[i].end); | ||
289 | bad_srat(); | 225 | bad_srat(); |
290 | return; | 226 | return; |
291 | } | 227 | } |
292 | nd = &nodes[node]; | ||
293 | oldnode = *nd; | ||
294 | if (!node_test_and_set(node, nodes_parsed)) { | ||
295 | nd->start = start; | ||
296 | nd->end = end; | ||
297 | } else { | ||
298 | if (start < nd->start) | ||
299 | nd->start = start; | ||
300 | if (nd->end < end) | ||
301 | nd->end = end; | ||
302 | } | ||
303 | 228 | ||
304 | printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, | 229 | printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, |
305 | start, end); | 230 | start, end); |
306 | 231 | ||
307 | if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { | 232 | if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) |
308 | update_nodes_add(node, start, end); | 233 | update_nodes_add(node, start, end); |
309 | /* restore nodes[node] */ | ||
310 | *nd = oldnode; | ||
311 | if ((nd->start | nd->end) == 0) | ||
312 | node_clear(node, nodes_parsed); | ||
313 | } | ||
314 | |||
315 | node_memblk_range[num_node_memblks].start = start; | ||
316 | node_memblk_range[num_node_memblks].end = end; | ||
317 | memblk_nodeid[num_node_memblks] = node; | ||
318 | num_node_memblks++; | ||
319 | } | ||
320 | |||
321 | /* Sanity check to catch more bad SRATs (they are amazingly common). | ||
322 | Make sure the PXMs cover all memory. */ | ||
323 | static int __init nodes_cover_memory(const struct bootnode *nodes) | ||
324 | { | ||
325 | int i; | ||
326 | unsigned long pxmram, e820ram; | ||
327 | |||
328 | pxmram = 0; | ||
329 | for_each_node_mask(i, nodes_parsed) { | ||
330 | unsigned long s = nodes[i].start >> PAGE_SHIFT; | ||
331 | unsigned long e = nodes[i].end >> PAGE_SHIFT; | ||
332 | pxmram += e - s; | ||
333 | pxmram -= __absent_pages_in_range(i, s, e); | ||
334 | if ((long)pxmram < 0) | ||
335 | pxmram = 0; | ||
336 | } | ||
337 | |||
338 | e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT); | ||
339 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | ||
340 | if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { | ||
341 | printk(KERN_ERR | ||
342 | "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", | ||
343 | (pxmram << PAGE_SHIFT) >> 20, | ||
344 | (e820ram << PAGE_SHIFT) >> 20); | ||
345 | return 0; | ||
346 | } | ||
347 | return 1; | ||
348 | } | 234 | } |
349 | 235 | ||
350 | void __init acpi_numa_arch_fixup(void) {} | 236 | void __init acpi_numa_arch_fixup(void) {} |
351 | 237 | ||
352 | #ifdef CONFIG_NUMA_EMU | 238 | int __init x86_acpi_numa_init(void) |
353 | void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, | ||
354 | unsigned long end) | ||
355 | { | ||
356 | int i; | ||
357 | |||
358 | for_each_node_mask(i, nodes_parsed) { | ||
359 | cutoff_node(i, start, end); | ||
360 | physnodes[i].start = nodes[i].start; | ||
361 | physnodes[i].end = nodes[i].end; | ||
362 | } | ||
363 | } | ||
364 | #endif /* CONFIG_NUMA_EMU */ | ||
365 | |||
366 | /* Use the information discovered above to actually set up the nodes. */ | ||
367 | int __init acpi_scan_nodes(unsigned long start, unsigned long end) | ||
368 | { | 239 | { |
369 | int i; | 240 | int ret; |
370 | |||
371 | if (acpi_numa <= 0) | ||
372 | return -1; | ||
373 | |||
374 | /* First clean up the node list */ | ||
375 | for (i = 0; i < MAX_NUMNODES; i++) | ||
376 | cutoff_node(i, start, end); | ||
377 | |||
378 | /* | ||
379 | * Join together blocks on the same node, holes between | ||
380 | * which don't overlap with memory on other nodes. | ||
381 | */ | ||
382 | for (i = 0; i < num_node_memblks; ++i) { | ||
383 | int j, k; | ||
384 | |||
385 | for (j = i + 1; j < num_node_memblks; ++j) { | ||
386 | unsigned long start, end; | ||
387 | |||
388 | if (memblk_nodeid[i] != memblk_nodeid[j]) | ||
389 | continue; | ||
390 | start = min(node_memblk_range[i].end, | ||
391 | node_memblk_range[j].end); | ||
392 | end = max(node_memblk_range[i].start, | ||
393 | node_memblk_range[j].start); | ||
394 | for (k = 0; k < num_node_memblks; ++k) { | ||
395 | if (memblk_nodeid[i] == memblk_nodeid[k]) | ||
396 | continue; | ||
397 | if (start < node_memblk_range[k].end && | ||
398 | end > node_memblk_range[k].start) | ||
399 | break; | ||
400 | } | ||
401 | if (k < num_node_memblks) | ||
402 | continue; | ||
403 | start = min(node_memblk_range[i].start, | ||
404 | node_memblk_range[j].start); | ||
405 | end = max(node_memblk_range[i].end, | ||
406 | node_memblk_range[j].end); | ||
407 | printk(KERN_INFO "SRAT: Node %d " | ||
408 | "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", | ||
409 | memblk_nodeid[i], | ||
410 | node_memblk_range[i].start, | ||
411 | node_memblk_range[i].end, | ||
412 | node_memblk_range[j].start, | ||
413 | node_memblk_range[j].end, | ||
414 | start, end); | ||
415 | node_memblk_range[i].start = start; | ||
416 | node_memblk_range[i].end = end; | ||
417 | k = --num_node_memblks - j; | ||
418 | memmove(memblk_nodeid + j, memblk_nodeid + j+1, | ||
419 | k * sizeof(*memblk_nodeid)); | ||
420 | memmove(node_memblk_range + j, node_memblk_range + j+1, | ||
421 | k * sizeof(*node_memblk_range)); | ||
422 | --j; | ||
423 | } | ||
424 | } | ||
425 | |||
426 | memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks, | ||
427 | memblk_nodeid); | ||
428 | if (memnode_shift < 0) { | ||
429 | printk(KERN_ERR | ||
430 | "SRAT: No NUMA node hash function found. Contact maintainer\n"); | ||
431 | bad_srat(); | ||
432 | return -1; | ||
433 | } | ||
434 | |||
435 | for (i = 0; i < num_node_memblks; i++) | ||
436 | memblock_x86_register_active_regions(memblk_nodeid[i], | ||
437 | node_memblk_range[i].start >> PAGE_SHIFT, | ||
438 | node_memblk_range[i].end >> PAGE_SHIFT); | ||
439 | |||
440 | /* for out of order entries in SRAT */ | ||
441 | sort_node_map(); | ||
442 | if (!nodes_cover_memory(nodes)) { | ||
443 | bad_srat(); | ||
444 | return -1; | ||
445 | } | ||
446 | 241 | ||
447 | /* Account for nodes with cpus and no memory */ | 242 | ret = acpi_numa_init(); |
448 | nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed); | 243 | if (ret < 0) |
449 | 244 | return ret; | |
450 | /* Finally register nodes */ | 245 | return srat_disabled() ? -EINVAL : 0; |
451 | for_each_node_mask(i, node_possible_map) | ||
452 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | ||
453 | /* Try again in case setup_node_bootmem missed one due | ||
454 | to missing bootmem */ | ||
455 | for_each_node_mask(i, node_possible_map) | ||
456 | if (!node_online(i)) | ||
457 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | ||
458 | |||
459 | for (i = 0; i < nr_cpu_ids; i++) { | ||
460 | int node = early_cpu_to_node(i); | ||
461 | |||
462 | if (node == NUMA_NO_NODE) | ||
463 | continue; | ||
464 | if (!node_online(node)) | ||
465 | numa_clear_node(i); | ||
466 | } | ||
467 | numa_init_array(); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | #ifdef CONFIG_NUMA_EMU | ||
472 | static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = { | ||
473 | [0 ... MAX_NUMNODES-1] = PXM_INVAL | ||
474 | }; | ||
475 | static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { | ||
476 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
477 | }; | ||
478 | static int __init find_node_by_addr(unsigned long addr) | ||
479 | { | ||
480 | int ret = NUMA_NO_NODE; | ||
481 | int i; | ||
482 | |||
483 | for_each_node_mask(i, nodes_parsed) { | ||
484 | /* | ||
485 | * Find the real node that this emulated node appears on. For | ||
486 | * the sake of simplicity, we only use a real node's starting | ||
487 | * address to determine which emulated node it appears on. | ||
488 | */ | ||
489 | if (addr >= nodes[i].start && addr < nodes[i].end) { | ||
490 | ret = i; | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | return ret; | ||
495 | } | 246 | } |
496 | 247 | ||
497 | /* | ||
498 | * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID | ||
499 | * mappings that respect the real ACPI topology but reflect our emulated | ||
500 | * environment. For each emulated node, we find which real node it appears on | ||
501 | * and create PXM to NID mappings for those fake nodes which mirror that | ||
502 | * locality. SLIT will now represent the correct distances between emulated | ||
503 | * nodes as a result of the real topology. | ||
504 | */ | ||
505 | void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) | ||
506 | { | ||
507 | int i, j; | ||
508 | |||
509 | for (i = 0; i < num_nodes; i++) { | ||
510 | int nid, pxm; | ||
511 | |||
512 | nid = find_node_by_addr(fake_nodes[i].start); | ||
513 | if (nid == NUMA_NO_NODE) | ||
514 | continue; | ||
515 | pxm = node_to_pxm(nid); | ||
516 | if (pxm == PXM_INVAL) | ||
517 | continue; | ||
518 | fake_node_to_pxm_map[i] = pxm; | ||
519 | /* | ||
520 | * For each apicid_to_node mapping that exists for this real | ||
521 | * node, it must now point to the fake node ID. | ||
522 | */ | ||
523 | for (j = 0; j < MAX_LOCAL_APIC; j++) | ||
524 | if (apicid_to_node[j] == nid && | ||
525 | fake_apicid_to_node[j] == NUMA_NO_NODE) | ||
526 | fake_apicid_to_node[j] = i; | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * If there are apicid-to-node mappings for physical nodes that do not | ||
531 | * have a corresponding emulated node, it should default to a guaranteed | ||
532 | * value. | ||
533 | */ | ||
534 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
535 | if (apicid_to_node[i] != NUMA_NO_NODE && | ||
536 | fake_apicid_to_node[i] == NUMA_NO_NODE) | ||
537 | fake_apicid_to_node[i] = 0; | ||
538 | |||
539 | for (i = 0; i < num_nodes; i++) | ||
540 | __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); | ||
541 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); | ||
542 | |||
543 | nodes_clear(nodes_parsed); | ||
544 | for (i = 0; i < num_nodes; i++) | ||
545 | if (fake_nodes[i].start != fake_nodes[i].end) | ||
546 | node_set(i, nodes_parsed); | ||
547 | } | ||
548 | |||
549 | static int null_slit_node_compare(int a, int b) | ||
550 | { | ||
551 | return node_to_pxm(a) == node_to_pxm(b); | ||
552 | } | ||
553 | #else | ||
554 | static int null_slit_node_compare(int a, int b) | ||
555 | { | ||
556 | return a == b; | ||
557 | } | ||
558 | #endif /* CONFIG_NUMA_EMU */ | ||
559 | |||
560 | int __node_distance(int a, int b) | ||
561 | { | ||
562 | int index; | ||
563 | |||
564 | if (!acpi_slit) | ||
565 | return null_slit_node_compare(a, b) ? LOCAL_DISTANCE : | ||
566 | REMOTE_DISTANCE; | ||
567 | index = acpi_slit->locality_count * node_to_pxm(a); | ||
568 | return acpi_slit->entry[index + node_to_pxm(b)]; | ||
569 | } | ||
570 | |||
571 | EXPORT_SYMBOL(__node_distance); | ||
572 | |||
573 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) | 248 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) |
574 | int memory_add_physaddr_to_nid(u64 start) | 249 | int memory_add_physaddr_to_nid(u64 start) |
575 | { | 250 | { |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 6acc724d5d8f..d6c0418c3e47 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -179,12 +179,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, | |||
179 | sender = this_cpu_read(tlb_vector_offset); | 179 | sender = this_cpu_read(tlb_vector_offset); |
180 | f = &flush_state[sender]; | 180 | f = &flush_state[sender]; |
181 | 181 | ||
182 | /* | 182 | if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) |
183 | * Could avoid this lock when | 183 | raw_spin_lock(&f->tlbstate_lock); |
184 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | ||
185 | * probably not worth checking this for a cache-hot lock. | ||
186 | */ | ||
187 | raw_spin_lock(&f->tlbstate_lock); | ||
188 | 184 | ||
189 | f->flush_mm = mm; | 185 | f->flush_mm = mm; |
190 | f->flush_va = va; | 186 | f->flush_va = va; |
@@ -202,7 +198,8 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, | |||
202 | 198 | ||
203 | f->flush_mm = NULL; | 199 | f->flush_mm = NULL; |
204 | f->flush_va = 0; | 200 | f->flush_va = 0; |
205 | raw_spin_unlock(&f->tlbstate_lock); | 201 | if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) |
202 | raw_spin_unlock(&f->tlbstate_lock); | ||
206 | } | 203 | } |
207 | 204 | ||
208 | void native_flush_tlb_others(const struct cpumask *cpumask, | 205 | void native_flush_tlb_others(const struct cpumask *cpumask, |
@@ -211,11 +208,10 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
211 | if (is_uv_system()) { | 208 | if (is_uv_system()) { |
212 | unsigned int cpu; | 209 | unsigned int cpu; |
213 | 210 | ||
214 | cpu = get_cpu(); | 211 | cpu = smp_processor_id(); |
215 | cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); | 212 | cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu); |
216 | if (cpumask) | 213 | if (cpumask) |
217 | flush_tlb_others_ipi(cpumask, mm, va); | 214 | flush_tlb_others_ipi(cpumask, mm, va); |
218 | put_cpu(); | ||
219 | return; | 215 | return; |
220 | } | 216 | } |
221 | flush_tlb_others_ipi(cpumask, mm, va); | 217 | flush_tlb_others_ipi(cpumask, mm, va); |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index e27dffbbb1a7..026e4931d162 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -350,7 +350,7 @@ static int __init early_fill_mp_bus_info(void) | |||
350 | 350 | ||
351 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) | 351 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) |
352 | 352 | ||
353 | static void enable_pci_io_ecs(void *unused) | 353 | static void __cpuinit enable_pci_io_ecs(void *unused) |
354 | { | 354 | { |
355 | u64 reg; | 355 | u64 reg; |
356 | rdmsrl(MSR_AMD64_NB_CFG, reg); | 356 | rdmsrl(MSR_AMD64_NB_CFG, reg); |
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 85b68ef5e809..67858be4b52b 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | 36 | ||
37 | #include <asm/ce4100.h> | ||
37 | #include <asm/pci_x86.h> | 38 | #include <asm/pci_x86.h> |
38 | 39 | ||
39 | struct sim_reg { | 40 | struct sim_reg { |
@@ -254,7 +255,7 @@ int bridge_read(unsigned int devfn, int reg, int len, u32 *value) | |||
254 | static int ce4100_conf_read(unsigned int seg, unsigned int bus, | 255 | static int ce4100_conf_read(unsigned int seg, unsigned int bus, |
255 | unsigned int devfn, int reg, int len, u32 *value) | 256 | unsigned int devfn, int reg, int len, u32 *value) |
256 | { | 257 | { |
257 | int i, retval = 1; | 258 | int i; |
258 | 259 | ||
259 | if (bus == 1) { | 260 | if (bus == 1) { |
260 | for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) { | 261 | for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) { |
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = { | |||
306 | .write = ce4100_conf_write, | 307 | .write = ce4100_conf_write, |
307 | }; | 308 | }; |
308 | 309 | ||
309 | static int __init ce4100_pci_init(void) | 310 | int __init ce4100_pci_init(void) |
310 | { | 311 | { |
311 | init_sim_regs(); | 312 | init_sim_regs(); |
312 | raw_pci_ops = &ce4100_pci_conf; | 313 | raw_pci_ops = &ce4100_pci_conf; |
313 | return 0; | 314 | /* Indicate caller that it should invoke pci_legacy_init() */ |
315 | return 1; | ||
314 | } | 316 | } |
315 | subsys_initcall(ce4100_pci_init); | ||
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 25cd4a07d09f..8c4085a95ef1 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -20,7 +20,8 @@ | |||
20 | #include <asm/xen/pci.h> | 20 | #include <asm/xen/pci.h> |
21 | 21 | ||
22 | #ifdef CONFIG_ACPI | 22 | #ifdef CONFIG_ACPI |
23 | static int xen_hvm_register_pirq(u32 gsi, int triggering) | 23 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, |
24 | int trigger, int polarity) | ||
24 | { | 25 | { |
25 | int rc, irq; | 26 | int rc, irq; |
26 | struct physdev_map_pirq map_irq; | 27 | struct physdev_map_pirq map_irq; |
@@ -41,7 +42,7 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering) | |||
41 | return -1; | 42 | return -1; |
42 | } | 43 | } |
43 | 44 | ||
44 | if (triggering == ACPI_EDGE_SENSITIVE) { | 45 | if (trigger == ACPI_EDGE_SENSITIVE) { |
45 | shareable = 0; | 46 | shareable = 0; |
46 | name = "ioapic-edge"; | 47 | name = "ioapic-edge"; |
47 | } else { | 48 | } else { |
@@ -55,12 +56,6 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering) | |||
55 | 56 | ||
56 | return irq; | 57 | return irq; |
57 | } | 58 | } |
58 | |||
59 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, | ||
60 | int trigger, int polarity) | ||
61 | { | ||
62 | return xen_hvm_register_pirq(gsi, trigger); | ||
63 | } | ||
64 | #endif | 59 | #endif |
65 | 60 | ||
66 | #if defined(CONFIG_PCI_MSI) | 61 | #if defined(CONFIG_PCI_MSI) |
@@ -91,7 +86,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, | |||
91 | 86 | ||
92 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 87 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
93 | { | 88 | { |
94 | int irq, pirq, ret = 0; | 89 | int irq, pirq; |
95 | struct msi_desc *msidesc; | 90 | struct msi_desc *msidesc; |
96 | struct msi_msg msg; | 91 | struct msi_msg msg; |
97 | 92 | ||
@@ -99,39 +94,32 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
99 | __read_msi_msg(msidesc, &msg); | 94 | __read_msi_msg(msidesc, &msg); |
100 | pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | | 95 | pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | |
101 | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); | 96 | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); |
102 | if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) { | 97 | if (msg.data != XEN_PIRQ_MSI_DATA || |
103 | xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? | 98 | xen_irq_from_pirq(pirq) < 0) { |
104 | "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ); | 99 | pirq = xen_allocate_pirq_msi(dev, msidesc); |
105 | if (irq < 0) | 100 | if (pirq < 0) |
106 | goto error; | 101 | goto error; |
107 | ret = set_irq_msi(irq, msidesc); | 102 | xen_msi_compose_msg(dev, pirq, &msg); |
108 | if (ret < 0) | 103 | __write_msi_msg(msidesc, &msg); |
109 | goto error_while; | 104 | dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); |
110 | printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d" | 105 | } else { |
111 | " pirq=%d\n", irq, pirq); | 106 | dev_dbg(&dev->dev, |
112 | return 0; | 107 | "xen: msi already bound to pirq=%d\n", pirq); |
113 | } | 108 | } |
114 | xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? | 109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, |
115 | "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ)); | 110 | (type == PCI_CAP_ID_MSIX) ? |
116 | if (irq < 0 || pirq < 0) | 111 | "msi-x" : "msi"); |
112 | if (irq < 0) | ||
117 | goto error; | 113 | goto error; |
118 | printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq); | 114 | dev_dbg(&dev->dev, |
119 | xen_msi_compose_msg(dev, pirq, &msg); | 115 | "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); |
120 | ret = set_irq_msi(irq, msidesc); | ||
121 | if (ret < 0) | ||
122 | goto error_while; | ||
123 | write_msi_msg(irq, &msg); | ||
124 | } | 116 | } |
125 | return 0; | 117 | return 0; |
126 | 118 | ||
127 | error_while: | ||
128 | unbind_from_irqhandler(irq, NULL); | ||
129 | error: | 119 | error: |
130 | if (ret == -ENODEV) | 120 | dev_err(&dev->dev, |
131 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | 121 | "Xen PCI frontend has not registered MSI/MSI-X support!\n"); |
132 | " MSI/MSI-X support!\n"); | 122 | return -ENODEV; |
133 | |||
134 | return ret; | ||
135 | } | 123 | } |
136 | 124 | ||
137 | /* | 125 | /* |
@@ -150,35 +138,26 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
150 | return -ENOMEM; | 138 | return -ENOMEM; |
151 | 139 | ||
152 | if (type == PCI_CAP_ID_MSIX) | 140 | if (type == PCI_CAP_ID_MSIX) |
153 | ret = xen_pci_frontend_enable_msix(dev, &v, nvec); | 141 | ret = xen_pci_frontend_enable_msix(dev, v, nvec); |
154 | else | 142 | else |
155 | ret = xen_pci_frontend_enable_msi(dev, &v); | 143 | ret = xen_pci_frontend_enable_msi(dev, v); |
156 | if (ret) | 144 | if (ret) |
157 | goto error; | 145 | goto error; |
158 | i = 0; | 146 | i = 0; |
159 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 147 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
160 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ | 148 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, |
161 | (type == PCI_CAP_ID_MSIX) ? | 149 | (type == PCI_CAP_ID_MSIX) ? |
162 | "pcifront-msi-x" : "pcifront-msi"); | 150 | "pcifront-msi-x" : |
163 | if (irq < 0) { | 151 | "pcifront-msi"); |
164 | ret = -1; | 152 | if (irq < 0) |
165 | goto free; | 153 | goto free; |
166 | } | ||
167 | |||
168 | ret = set_irq_msi(irq, msidesc); | ||
169 | if (ret) | ||
170 | goto error_while; | ||
171 | i++; | 154 | i++; |
172 | } | 155 | } |
173 | kfree(v); | 156 | kfree(v); |
174 | return 0; | 157 | return 0; |
175 | 158 | ||
176 | error_while: | ||
177 | unbind_from_irqhandler(irq, NULL); | ||
178 | error: | 159 | error: |
179 | if (ret == -ENODEV) | 160 | dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); |
180 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | ||
181 | " MSI/MSI-X support!\n"); | ||
182 | free: | 161 | free: |
183 | kfree(v); | 162 | kfree(v); |
184 | return ret; | 163 | return ret; |
@@ -193,6 +172,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev) | |||
193 | xen_pci_frontend_disable_msix(dev); | 172 | xen_pci_frontend_disable_msix(dev); |
194 | else | 173 | else |
195 | xen_pci_frontend_disable_msi(dev); | 174 | xen_pci_frontend_disable_msi(dev); |
175 | |||
176 | /* Free the IRQ's and the msidesc using the generic code. */ | ||
177 | default_teardown_msi_irqs(dev); | ||
196 | } | 178 | } |
197 | 179 | ||
198 | static void xen_teardown_msi_irq(unsigned int irq) | 180 | static void xen_teardown_msi_irq(unsigned int irq) |
@@ -200,47 +182,82 @@ static void xen_teardown_msi_irq(unsigned int irq) | |||
200 | xen_destroy_irq(irq); | 182 | xen_destroy_irq(irq); |
201 | } | 183 | } |
202 | 184 | ||
185 | #ifdef CONFIG_XEN_DOM0 | ||
203 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 186 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
204 | { | 187 | { |
205 | int irq, ret; | 188 | int ret = 0; |
206 | struct msi_desc *msidesc; | 189 | struct msi_desc *msidesc; |
207 | 190 | ||
208 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 191 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
209 | irq = xen_create_msi_irq(dev, msidesc, type); | 192 | struct physdev_map_pirq map_irq; |
210 | if (irq < 0) | ||
211 | return -1; | ||
212 | 193 | ||
213 | ret = set_irq_msi(irq, msidesc); | 194 | memset(&map_irq, 0, sizeof(map_irq)); |
214 | if (ret) | 195 | map_irq.domid = DOMID_SELF; |
215 | goto error; | 196 | map_irq.type = MAP_PIRQ_TYPE_MSI; |
216 | } | 197 | map_irq.index = -1; |
217 | return 0; | 198 | map_irq.pirq = -1; |
199 | map_irq.bus = dev->bus->number; | ||
200 | map_irq.devfn = dev->devfn; | ||
218 | 201 | ||
219 | error: | 202 | if (type == PCI_CAP_ID_MSIX) { |
220 | xen_destroy_irq(irq); | 203 | int pos; |
204 | u32 table_offset, bir; | ||
205 | |||
206 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
207 | |||
208 | pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, | ||
209 | &table_offset); | ||
210 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
211 | |||
212 | map_irq.table_base = pci_resource_start(dev, bir); | ||
213 | map_irq.entry_nr = msidesc->msi_attrib.entry_nr; | ||
214 | } | ||
215 | |||
216 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
217 | if (ret) { | ||
218 | dev_warn(&dev->dev, "xen map irq failed %d\n", ret); | ||
219 | goto out; | ||
220 | } | ||
221 | |||
222 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, | ||
223 | map_irq.pirq, map_irq.index, | ||
224 | (type == PCI_CAP_ID_MSIX) ? | ||
225 | "msi-x" : "msi"); | ||
226 | if (ret < 0) | ||
227 | goto out; | ||
228 | } | ||
229 | ret = 0; | ||
230 | out: | ||
221 | return ret; | 231 | return ret; |
222 | } | 232 | } |
223 | #endif | 233 | #endif |
234 | #endif | ||
224 | 235 | ||
225 | static int xen_pcifront_enable_irq(struct pci_dev *dev) | 236 | static int xen_pcifront_enable_irq(struct pci_dev *dev) |
226 | { | 237 | { |
227 | int rc; | 238 | int rc; |
228 | int share = 1; | 239 | int share = 1; |
240 | u8 gsi; | ||
229 | 241 | ||
230 | dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); | 242 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); |
231 | 243 | if (rc < 0) { | |
232 | if (dev->irq < 0) | 244 | dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", |
233 | return -EINVAL; | 245 | rc); |
246 | return rc; | ||
247 | } | ||
234 | 248 | ||
235 | if (dev->irq < NR_IRQS_LEGACY) | 249 | if (gsi < NR_IRQS_LEGACY) |
236 | share = 0; | 250 | share = 0; |
237 | 251 | ||
238 | rc = xen_allocate_pirq(dev->irq, share, "pcifront"); | 252 | rc = xen_allocate_pirq(gsi, share, "pcifront"); |
239 | if (rc < 0) { | 253 | if (rc < 0) { |
240 | dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", | 254 | dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", |
241 | dev->irq, rc); | 255 | gsi, rc); |
242 | return rc; | 256 | return rc; |
243 | } | 257 | } |
258 | |||
259 | dev->irq = rc; | ||
260 | dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); | ||
244 | return 0; | 261 | return 0; |
245 | } | 262 | } |
246 | 263 | ||
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index d2c0d51a7178..28071bb31db7 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -15,21 +15,20 @@ | |||
15 | #include <linux/serial_reg.h> | 15 | #include <linux/serial_reg.h> |
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | 17 | ||
18 | #include <asm/ce4100.h> | ||
19 | #include <asm/prom.h> | ||
18 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
21 | #include <asm/i8259.h> | ||
19 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | #include <asm/io_apic.h> | ||
20 | 24 | ||
21 | static int ce4100_i8042_detect(void) | 25 | static int ce4100_i8042_detect(void) |
22 | { | 26 | { |
23 | return 0; | 27 | return 0; |
24 | } | 28 | } |
25 | 29 | ||
26 | static void __init sdv_find_smp_config(void) | ||
27 | { | ||
28 | } | ||
29 | |||
30 | #ifdef CONFIG_SERIAL_8250 | 30 | #ifdef CONFIG_SERIAL_8250 |
31 | 31 | ||
32 | |||
33 | static unsigned int mem_serial_in(struct uart_port *p, int offset) | 32 | static unsigned int mem_serial_in(struct uart_port *p, int offset) |
34 | { | 33 | { |
35 | offset = offset << p->regshift; | 34 | offset = offset << p->regshift; |
@@ -118,6 +117,15 @@ static void __init sdv_arch_setup(void) | |||
118 | sdv_serial_fixup(); | 117 | sdv_serial_fixup(); |
119 | } | 118 | } |
120 | 119 | ||
120 | #ifdef CONFIG_X86_IO_APIC | ||
121 | static void __cpuinit sdv_pci_init(void) | ||
122 | { | ||
123 | x86_of_pci_init(); | ||
124 | /* We can't set this earlier, because we need to calibrate the timer */ | ||
125 | legacy_pic = &null_legacy_pic; | ||
126 | } | ||
127 | #endif | ||
128 | |||
121 | /* | 129 | /* |
122 | * CE4100 specific x86_init function overrides and early setup | 130 | * CE4100 specific x86_init function overrides and early setup |
123 | * calls. | 131 | * calls. |
@@ -128,5 +136,11 @@ void __init x86_ce4100_early_setup(void) | |||
128 | x86_platform.i8042_detect = ce4100_i8042_detect; | 136 | x86_platform.i8042_detect = ce4100_i8042_detect; |
129 | x86_init.resources.probe_roms = x86_init_noop; | 137 | x86_init.resources.probe_roms = x86_init_noop; |
130 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 138 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
131 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; | 139 | x86_init.mpparse.find_smp_config = x86_init_noop; |
140 | x86_init.pci.init = ce4100_pci_init; | ||
141 | |||
142 | #ifdef CONFIG_X86_IO_APIC | ||
143 | x86_init.pci.init_irq = sdv_pci_init; | ||
144 | x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; | ||
145 | #endif | ||
132 | } | 146 | } |
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts new file mode 100644 index 000000000000..dc701ea58546 --- /dev/null +++ b/arch/x86/platform/ce4100/falconfalls.dts | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * CE4100 on Falcon Falls | ||
3 | * | ||
4 | * (c) Copyright 2010 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License. | ||
9 | */ | ||
10 | /dts-v1/; | ||
11 | / { | ||
12 | model = "intel,falconfalls"; | ||
13 | compatible = "intel,falconfalls"; | ||
14 | #address-cells = <1>; | ||
15 | #size-cells = <1>; | ||
16 | |||
17 | cpus { | ||
18 | #address-cells = <1>; | ||
19 | #size-cells = <0>; | ||
20 | |||
21 | cpu@0 { | ||
22 | device_type = "cpu"; | ||
23 | compatible = "intel,ce4100"; | ||
24 | reg = <0>; | ||
25 | lapic = <&lapic0>; | ||
26 | }; | ||
27 | }; | ||
28 | |||
29 | soc@0 { | ||
30 | #address-cells = <1>; | ||
31 | #size-cells = <1>; | ||
32 | compatible = "intel,ce4100-cp"; | ||
33 | ranges; | ||
34 | |||
35 | ioapic1: interrupt-controller@fec00000 { | ||
36 | #interrupt-cells = <2>; | ||
37 | compatible = "intel,ce4100-ioapic"; | ||
38 | interrupt-controller; | ||
39 | reg = <0xfec00000 0x1000>; | ||
40 | }; | ||
41 | |||
42 | timer@fed00000 { | ||
43 | compatible = "intel,ce4100-hpet"; | ||
44 | reg = <0xfed00000 0x200>; | ||
45 | }; | ||
46 | |||
47 | lapic0: interrupt-controller@fee00000 { | ||
48 | compatible = "intel,ce4100-lapic"; | ||
49 | reg = <0xfee00000 0x1000>; | ||
50 | }; | ||
51 | |||
52 | pci@3fc { | ||
53 | #address-cells = <3>; | ||
54 | #size-cells = <2>; | ||
55 | compatible = "intel,ce4100-pci", "pci"; | ||
56 | device_type = "pci"; | ||
57 | bus-range = <0 0>; | ||
58 | ranges = <0x2000000 0 0xbffff000 0xbffff000 0 0x1000 | ||
59 | 0x2000000 0 0xdffe0000 0xdffe0000 0 0x1000 | ||
60 | 0x0000000 0 0x0 0x0 0 0x100>; | ||
61 | |||
62 | /* Secondary IO-APIC */ | ||
63 | ioapic2: interrupt-controller@0,1 { | ||
64 | #interrupt-cells = <2>; | ||
65 | compatible = "intel,ce4100-ioapic"; | ||
66 | interrupt-controller; | ||
67 | reg = <0x100 0x0 0x0 0x0 0x0>; | ||
68 | assigned-addresses = <0x02000000 0x0 0xbffff000 0x0 0x1000>; | ||
69 | }; | ||
70 | |||
71 | pci@1,0 { | ||
72 | #address-cells = <3>; | ||
73 | #size-cells = <2>; | ||
74 | compatible = "intel,ce4100-pci", "pci"; | ||
75 | device_type = "pci"; | ||
76 | bus-range = <1 1>; | ||
77 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | ||
78 | |||
79 | interrupt-parent = <&ioapic2>; | ||
80 | |||
81 | display@2,0 { | ||
82 | compatible = "pci8086,2e5b.2", | ||
83 | "pci8086,2e5b", | ||
84 | "pciclass038000", | ||
85 | "pciclass0380"; | ||
86 | |||
87 | reg = <0x11000 0x0 0x0 0x0 0x0>; | ||
88 | interrupts = <0 1>; | ||
89 | }; | ||
90 | |||
91 | multimedia@3,0 { | ||
92 | compatible = "pci8086,2e5c.2", | ||
93 | "pci8086,2e5c", | ||
94 | "pciclass048000", | ||
95 | "pciclass0480"; | ||
96 | |||
97 | reg = <0x11800 0x0 0x0 0x0 0x0>; | ||
98 | interrupts = <2 1>; | ||
99 | }; | ||
100 | |||
101 | multimedia@4,0 { | ||
102 | compatible = "pci8086,2e5d.2", | ||
103 | "pci8086,2e5d", | ||
104 | "pciclass048000", | ||
105 | "pciclass0480"; | ||
106 | |||
107 | reg = <0x12000 0x0 0x0 0x0 0x0>; | ||
108 | interrupts = <4 1>; | ||
109 | }; | ||
110 | |||
111 | multimedia@4,1 { | ||
112 | compatible = "pci8086,2e5e.2", | ||
113 | "pci8086,2e5e", | ||
114 | "pciclass048000", | ||
115 | "pciclass0480"; | ||
116 | |||
117 | reg = <0x12100 0x0 0x0 0x0 0x0>; | ||
118 | interrupts = <5 1>; | ||
119 | }; | ||
120 | |||
121 | sound@6,0 { | ||
122 | compatible = "pci8086,2e5f.2", | ||
123 | "pci8086,2e5f", | ||
124 | "pciclass040100", | ||
125 | "pciclass0401"; | ||
126 | |||
127 | reg = <0x13000 0x0 0x0 0x0 0x0>; | ||
128 | interrupts = <6 1>; | ||
129 | }; | ||
130 | |||
131 | sound@6,1 { | ||
132 | compatible = "pci8086,2e5f.2", | ||
133 | "pci8086,2e5f", | ||
134 | "pciclass040100", | ||
135 | "pciclass0401"; | ||
136 | |||
137 | reg = <0x13100 0x0 0x0 0x0 0x0>; | ||
138 | interrupts = <7 1>; | ||
139 | }; | ||
140 | |||
141 | sound@6,2 { | ||
142 | compatible = "pci8086,2e60.2", | ||
143 | "pci8086,2e60", | ||
144 | "pciclass040100", | ||
145 | "pciclass0401"; | ||
146 | |||
147 | reg = <0x13200 0x0 0x0 0x0 0x0>; | ||
148 | interrupts = <8 1>; | ||
149 | }; | ||
150 | |||
151 | display@8,0 { | ||
152 | compatible = "pci8086,2e61.2", | ||
153 | "pci8086,2e61", | ||
154 | "pciclass038000", | ||
155 | "pciclass0380"; | ||
156 | |||
157 | reg = <0x14000 0x0 0x0 0x0 0x0>; | ||
158 | interrupts = <9 1>; | ||
159 | }; | ||
160 | |||
161 | display@8,1 { | ||
162 | compatible = "pci8086,2e62.2", | ||
163 | "pci8086,2e62", | ||
164 | "pciclass038000", | ||
165 | "pciclass0380"; | ||
166 | |||
167 | reg = <0x14100 0x0 0x0 0x0 0x0>; | ||
168 | interrupts = <10 1>; | ||
169 | }; | ||
170 | |||
171 | multimedia@8,2 { | ||
172 | compatible = "pci8086,2e63.2", | ||
173 | "pci8086,2e63", | ||
174 | "pciclass048000", | ||
175 | "pciclass0480"; | ||
176 | |||
177 | reg = <0x14200 0x0 0x0 0x0 0x0>; | ||
178 | interrupts = <11 1>; | ||
179 | }; | ||
180 | |||
181 | entertainment-encryption@9,0 { | ||
182 | compatible = "pci8086,2e64.2", | ||
183 | "pci8086,2e64", | ||
184 | "pciclass101000", | ||
185 | "pciclass1010"; | ||
186 | |||
187 | reg = <0x14800 0x0 0x0 0x0 0x0>; | ||
188 | interrupts = <12 1>; | ||
189 | }; | ||
190 | |||
191 | localbus@a,0 { | ||
192 | compatible = "pci8086,2e65.2", | ||
193 | "pci8086,2e65", | ||
194 | "pciclassff0000", | ||
195 | "pciclassff00"; | ||
196 | |||
197 | reg = <0x15000 0x0 0x0 0x0 0x0>; | ||
198 | }; | ||
199 | |||
200 | serial@b,0 { | ||
201 | compatible = "pci8086,2e66.2", | ||
202 | "pci8086,2e66", | ||
203 | "pciclass070003", | ||
204 | "pciclass0700"; | ||
205 | |||
206 | reg = <0x15800 0x0 0x0 0x0 0x0>; | ||
207 | interrupts = <14 1>; | ||
208 | }; | ||
209 | |||
210 | gpio@b,1 { | ||
211 | compatible = "pci8086,2e67.2", | ||
212 | "pci8086,2e67", | ||
213 | "pciclassff0000", | ||
214 | "pciclassff00"; | ||
215 | |||
216 | #gpio-cells = <2>; | ||
217 | reg = <0x15900 0x0 0x0 0x0 0x0>; | ||
218 | interrupts = <15 1>; | ||
219 | gpio-controller; | ||
220 | }; | ||
221 | |||
222 | i2c-controller@b,2 { | ||
223 | #address-cells = <2>; | ||
224 | #size-cells = <1>; | ||
225 | compatible = "pci8086,2e68.2", | ||
226 | "pci8086,2e68", | ||
227 | "pciclass,ff0000", | ||
228 | "pciclass,ff00"; | ||
229 | |||
230 | reg = <0x15a00 0x0 0x0 0x0 0x0>; | ||
231 | interrupts = <16 1>; | ||
232 | ranges = <0 0 0x02000000 0 0xdffe0500 0x100 | ||
233 | 1 0 0x02000000 0 0xdffe0600 0x100 | ||
234 | 2 0 0x02000000 0 0xdffe0700 0x100>; | ||
235 | |||
236 | i2c@0 { | ||
237 | #address-cells = <1>; | ||
238 | #size-cells = <0>; | ||
239 | compatible = "intel,ce4100-i2c-controller"; | ||
240 | reg = <0 0 0x100>; | ||
241 | }; | ||
242 | |||
243 | i2c@1 { | ||
244 | #address-cells = <1>; | ||
245 | #size-cells = <0>; | ||
246 | compatible = "intel,ce4100-i2c-controller"; | ||
247 | reg = <1 0 0x100>; | ||
248 | |||
249 | gpio@26 { | ||
250 | #gpio-cells = <2>; | ||
251 | compatible = "ti,pcf8575"; | ||
252 | reg = <0x26>; | ||
253 | gpio-controller; | ||
254 | }; | ||
255 | }; | ||
256 | |||
257 | i2c@2 { | ||
258 | #address-cells = <1>; | ||
259 | #size-cells = <0>; | ||
260 | compatible = "intel,ce4100-i2c-controller"; | ||
261 | reg = <2 0 0x100>; | ||
262 | |||
263 | gpio@26 { | ||
264 | #gpio-cells = <2>; | ||
265 | compatible = "ti,pcf8575"; | ||
266 | reg = <0x26>; | ||
267 | gpio-controller; | ||
268 | }; | ||
269 | }; | ||
270 | }; | ||
271 | |||
272 | smard-card@b,3 { | ||
273 | compatible = "pci8086,2e69.2", | ||
274 | "pci8086,2e69", | ||
275 | "pciclass070500", | ||
276 | "pciclass0705"; | ||
277 | |||
278 | reg = <0x15b00 0x0 0x0 0x0 0x0>; | ||
279 | interrupts = <15 1>; | ||
280 | }; | ||
281 | |||
282 | spi-controller@b,4 { | ||
283 | #address-cells = <1>; | ||
284 | #size-cells = <0>; | ||
285 | compatible = | ||
286 | "pci8086,2e6a.2", | ||
287 | "pci8086,2e6a", | ||
288 | "pciclass,ff0000", | ||
289 | "pciclass,ff00"; | ||
290 | |||
291 | reg = <0x15c00 0x0 0x0 0x0 0x0>; | ||
292 | interrupts = <15 1>; | ||
293 | |||
294 | dac@0 { | ||
295 | compatible = "ti,pcm1755"; | ||
296 | reg = <0>; | ||
297 | spi-max-frequency = <115200>; | ||
298 | }; | ||
299 | |||
300 | dac@1 { | ||
301 | compatible = "ti,pcm1609a"; | ||
302 | reg = <1>; | ||
303 | spi-max-frequency = <115200>; | ||
304 | }; | ||
305 | |||
306 | eeprom@2 { | ||
307 | compatible = "atmel,at93c46"; | ||
308 | reg = <2>; | ||
309 | spi-max-frequency = <115200>; | ||
310 | }; | ||
311 | }; | ||
312 | |||
313 | multimedia@b,7 { | ||
314 | compatible = "pci8086,2e6d.2", | ||
315 | "pci8086,2e6d", | ||
316 | "pciclassff0000", | ||
317 | "pciclassff00"; | ||
318 | |||
319 | reg = <0x15f00 0x0 0x0 0x0 0x0>; | ||
320 | }; | ||
321 | |||
322 | ethernet@c,0 { | ||
323 | compatible = "pci8086,2e6e.2", | ||
324 | "pci8086,2e6e", | ||
325 | "pciclass020000", | ||
326 | "pciclass0200"; | ||
327 | |||
328 | reg = <0x16000 0x0 0x0 0x0 0x0>; | ||
329 | interrupts = <21 1>; | ||
330 | }; | ||
331 | |||
332 | clock@c,1 { | ||
333 | compatible = "pci8086,2e6f.2", | ||
334 | "pci8086,2e6f", | ||
335 | "pciclassff0000", | ||
336 | "pciclassff00"; | ||
337 | |||
338 | reg = <0x16100 0x0 0x0 0x0 0x0>; | ||
339 | interrupts = <3 1>; | ||
340 | }; | ||
341 | |||
342 | usb@d,0 { | ||
343 | compatible = "pci8086,2e70.2", | ||
344 | "pci8086,2e70", | ||
345 | "pciclass0c0320", | ||
346 | "pciclass0c03"; | ||
347 | |||
348 | reg = <0x16800 0x0 0x0 0x0 0x0>; | ||
349 | interrupts = <22 3>; | ||
350 | }; | ||
351 | |||
352 | usb@d,1 { | ||
353 | compatible = "pci8086,2e70.2", | ||
354 | "pci8086,2e70", | ||
355 | "pciclass0c0320", | ||
356 | "pciclass0c03"; | ||
357 | |||
358 | reg = <0x16900 0x0 0x0 0x0 0x0>; | ||
359 | interrupts = <22 3>; | ||
360 | }; | ||
361 | |||
362 | sata@e,0 { | ||
363 | compatible = "pci8086,2e71.0", | ||
364 | "pci8086,2e71", | ||
365 | "pciclass010601", | ||
366 | "pciclass0106"; | ||
367 | |||
368 | reg = <0x17000 0x0 0x0 0x0 0x0>; | ||
369 | interrupts = <23 3>; | ||
370 | }; | ||
371 | |||
372 | flash@f,0 { | ||
373 | compatible = "pci8086,701.1", | ||
374 | "pci8086,701", | ||
375 | "pciclass050100", | ||
376 | "pciclass0501"; | ||
377 | |||
378 | reg = <0x17800 0x0 0x0 0x0 0x0>; | ||
379 | interrupts = <13 1>; | ||
380 | }; | ||
381 | |||
382 | entertainment-encryption@10,0 { | ||
383 | compatible = "pci8086,702.1", | ||
384 | "pci8086,702", | ||
385 | "pciclass101000", | ||
386 | "pciclass1010"; | ||
387 | |||
388 | reg = <0x18000 0x0 0x0 0x0 0x0>; | ||
389 | }; | ||
390 | |||
391 | co-processor@11,0 { | ||
392 | compatible = "pci8086,703.1", | ||
393 | "pci8086,703", | ||
394 | "pciclass0b4000", | ||
395 | "pciclass0b40"; | ||
396 | |||
397 | reg = <0x18800 0x0 0x0 0x0 0x0>; | ||
398 | interrupts = <1 1>; | ||
399 | }; | ||
400 | |||
401 | multimedia@12,0 { | ||
402 | compatible = "pci8086,704.0", | ||
403 | "pci8086,704", | ||
404 | "pciclass048000", | ||
405 | "pciclass0480"; | ||
406 | |||
407 | reg = <0x19000 0x0 0x0 0x0 0x0>; | ||
408 | }; | ||
409 | }; | ||
410 | |||
411 | isa@1f,0 { | ||
412 | #address-cells = <2>; | ||
413 | #size-cells = <1>; | ||
414 | compatible = "isa"; | ||
415 | ranges = <1 0 0 0 0 0x100>; | ||
416 | |||
417 | rtc@70 { | ||
418 | compatible = "intel,ce4100-rtc", "motorola,mc146818"; | ||
419 | interrupts = <8 3>; | ||
420 | interrupt-parent = <&ioapic1>; | ||
421 | ctrl-reg = <2>; | ||
422 | freq-reg = <0x26>; | ||
423 | reg = <1 0x70 2>; | ||
424 | }; | ||
425 | }; | ||
426 | }; | ||
427 | }; | ||
428 | }; | ||
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index ea6529e93c6f..5c0207bf959b 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/apic.h> | 31 | #include <asm/apic.h> |
32 | #include <asm/io_apic.h> | 32 | #include <asm/io_apic.h> |
33 | #include <asm/mrst.h> | 33 | #include <asm/mrst.h> |
34 | #include <asm/mrst-vrtc.h> | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/i8259.h> | 36 | #include <asm/i8259.h> |
36 | #include <asm/intel_scu_ipc.h> | 37 | #include <asm/intel_scu_ipc.h> |
@@ -268,6 +269,7 @@ void __init x86_mrst_early_setup(void) | |||
268 | 269 | ||
269 | x86_platform.calibrate_tsc = mrst_calibrate_tsc; | 270 | x86_platform.calibrate_tsc = mrst_calibrate_tsc; |
270 | x86_platform.i8042_detect = mrst_i8042_detect; | 271 | x86_platform.i8042_detect = mrst_i8042_detect; |
272 | x86_init.timers.wallclock_init = mrst_rtc_init; | ||
271 | x86_init.pci.init = pci_mrst_init; | 273 | x86_init.pci.init = pci_mrst_init; |
272 | x86_init.pci.fixup_irqs = x86_init_noop; | 274 | x86_init.pci.fixup_irqs = x86_init_noop; |
273 | 275 | ||
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 32cd7edd71a0..04cf645feb92 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c | |||
@@ -100,22 +100,14 @@ int vrtc_set_mmss(unsigned long nowtime) | |||
100 | 100 | ||
101 | void __init mrst_rtc_init(void) | 101 | void __init mrst_rtc_init(void) |
102 | { | 102 | { |
103 | unsigned long rtc_paddr; | 103 | unsigned long vrtc_paddr = sfi_mrtc_array[0].phys_addr; |
104 | void __iomem *virt_base; | ||
105 | 104 | ||
106 | sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); | 105 | sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); |
107 | if (!sfi_mrtc_num) | 106 | if (!sfi_mrtc_num || !vrtc_paddr) |
108 | return; | 107 | return; |
109 | 108 | ||
110 | rtc_paddr = sfi_mrtc_array[0].phys_addr; | 109 | vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC, |
111 | 110 | vrtc_paddr); | |
112 | /* vRTC's register address may not be page aligned */ | ||
113 | set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr); | ||
114 | |||
115 | virt_base = (void __iomem *)__fix_to_virt(FIX_LNW_VRTC); | ||
116 | virt_base += rtc_paddr & ~PAGE_MASK; | ||
117 | vrtc_virt_base = virt_base; | ||
118 | |||
119 | x86_platform.get_wallclock = vrtc_get_time; | 111 | x86_platform.get_wallclock = vrtc_get_time; |
120 | x86_platform.set_wallclock = vrtc_set_mmss; | 112 | x86_platform.set_wallclock = vrtc_set_mmss; |
121 | } | 113 | } |
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile index e797428b163b..c2a8cab65e5d 100644 --- a/arch/x86/platform/olpc/Makefile +++ b/arch/x86/platform/olpc/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_OLPC) += olpc.o | 1 | obj-$(CONFIG_OLPC) += olpc.o |
2 | obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o | 2 | obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o |
3 | obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o | 3 | obj-$(CONFIG_OLPC) += olpc_ofw.o |
4 | obj-$(CONFIG_OLPC_OPENFIRMWARE_DT) += olpc_dt.o | 4 | obj-$(CONFIG_OF_PROMTREE) += olpc_dt.o |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index df58e9cad96a..a7b38d35c29a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1364 | memset(bd2, 0, sizeof(struct bau_desc)); | 1364 | memset(bd2, 0, sizeof(struct bau_desc)); |
1365 | bd2->header.sw_ack_flag = 1; | 1365 | bd2->header.sw_ack_flag = 1; |
1366 | /* | 1366 | /* |
1367 | * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub | 1367 | * base_dest_nodeid is the nasid of the first uvhub |
1368 | * in the partition. The bit map will indicate uvhub numbers, | 1368 | * in the partition. The bit map will indicate uvhub numbers, |
1369 | * which are 0-N in a partition. Pnodes are unique system-wide. | 1369 | * which are 0-N in a partition. Pnodes are unique system-wide. |
1370 | */ | 1370 | */ |
1371 | bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | 1371 | bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); |
1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ | 1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ |
1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
1374 | bd2->header.int_both = 1; | 1374 | bd2->header.int_both = 1; |
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 7b24460917d5..374a05d8ad22 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c | |||
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
131 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
132 | { | 132 | { |
133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
134 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 134 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
135 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
136 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
137 | int mmr_pnode, err; | 137 | int mmr_pnode, err; |
@@ -148,7 +148,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
148 | else | 148 | else |
149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
150 | 150 | ||
151 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 151 | irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, |
152 | irq_name); | 152 | irq_name); |
153 | 153 | ||
154 | mmr_value = 0; | 154 | mmr_value = 0; |
diff --git a/arch/x86/platform/visws/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c index 632037671746..fe4cf8294878 100644 --- a/arch/x86/platform/visws/visws_quirks.c +++ b/arch/x86/platform/visws/visws_quirks.c | |||
@@ -569,11 +569,13 @@ out_unlock: | |||
569 | static struct irqaction master_action = { | 569 | static struct irqaction master_action = { |
570 | .handler = piix4_master_intr, | 570 | .handler = piix4_master_intr, |
571 | .name = "PIIX4-8259", | 571 | .name = "PIIX4-8259", |
572 | .flags = IRQF_NO_THREAD, | ||
572 | }; | 573 | }; |
573 | 574 | ||
574 | static struct irqaction cascade_action = { | 575 | static struct irqaction cascade_action = { |
575 | .handler = no_action, | 576 | .handler = no_action, |
576 | .name = "cascade", | 577 | .name = "cascade", |
578 | .flags = IRQF_NO_THREAD, | ||
577 | }; | 579 | }; |
578 | 580 | ||
579 | static inline void set_piix4_virtual_irq_type(void) | 581 | static inline void set_piix4_virtual_irq_type(void) |
@@ -606,7 +608,7 @@ static void __init visws_pre_intr_init(void) | |||
606 | chip = &cobalt_irq_type; | 608 | chip = &cobalt_irq_type; |
607 | 609 | ||
608 | if (chip) | 610 | if (chip) |
609 | set_irq_chip(i, chip); | 611 | irq_set_chip(i, chip); |
610 | } | 612 | } |
611 | 613 | ||
612 | setup_irq(CO_IRQ_8259, &master_action); | 614 | setup_irq(CO_IRQ_8259, &master_action); |
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 5b54892e4bc3..1c7121ba18ff 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig | |||
@@ -38,7 +38,7 @@ config XEN_MAX_DOMAIN_MEMORY | |||
38 | 38 | ||
39 | config XEN_SAVE_RESTORE | 39 | config XEN_SAVE_RESTORE |
40 | bool | 40 | bool |
41 | depends on XEN && PM | 41 | depends on XEN |
42 | default y | 42 | default y |
43 | 43 | ||
44 | config XEN_DEBUG_FS | 44 | config XEN_DEBUG_FS |
@@ -48,3 +48,11 @@ config XEN_DEBUG_FS | |||
48 | help | 48 | help |
49 | Enable statistics output and various tuning options in debugfs. | 49 | Enable statistics output and various tuning options in debugfs. |
50 | Enabling this option may incur a significant performance overhead. | 50 | Enabling this option may incur a significant performance overhead. |
51 | |||
52 | config XEN_DEBUG | ||
53 | bool "Enable Xen debug checks" | ||
54 | depends on XEN | ||
55 | default n | ||
56 | help | ||
57 | Enable various WARN_ON checks in the Xen MMU code. | ||
58 | Enabling this option WILL incur a significant performance overhead. | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 50542efe45fb..49dbd78ec3cb 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1284,15 +1284,14 @@ static int init_hvm_pv_info(int *major, int *minor) | |||
1284 | 1284 | ||
1285 | xen_setup_features(); | 1285 | xen_setup_features(); |
1286 | 1286 | ||
1287 | pv_info = xen_info; | 1287 | pv_info.name = "Xen HVM"; |
1288 | pv_info.kernel_rpl = 0; | ||
1289 | 1288 | ||
1290 | xen_domain_type = XEN_HVM_DOMAIN; | 1289 | xen_domain_type = XEN_HVM_DOMAIN; |
1291 | 1290 | ||
1292 | return 0; | 1291 | return 0; |
1293 | } | 1292 | } |
1294 | 1293 | ||
1295 | void xen_hvm_init_shared_info(void) | 1294 | void __ref xen_hvm_init_shared_info(void) |
1296 | { | 1295 | { |
1297 | int cpu; | 1296 | int cpu; |
1298 | struct xen_add_to_physmap xatp; | 1297 | struct xen_add_to_physmap xatp; |
@@ -1331,6 +1330,8 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | |||
1331 | switch (action) { | 1330 | switch (action) { |
1332 | case CPU_UP_PREPARE: | 1331 | case CPU_UP_PREPARE: |
1333 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | 1332 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; |
1333 | if (xen_have_vector_callback) | ||
1334 | xen_init_lock_cpu(cpu); | ||
1334 | break; | 1335 | break; |
1335 | default: | 1336 | default: |
1336 | break; | 1337 | break; |
@@ -1355,6 +1356,7 @@ static void __init xen_hvm_guest_init(void) | |||
1355 | 1356 | ||
1356 | if (xen_feature(XENFEAT_hvm_callback_vector)) | 1357 | if (xen_feature(XENFEAT_hvm_callback_vector)) |
1357 | xen_have_vector_callback = 1; | 1358 | xen_have_vector_callback = 1; |
1359 | xen_hvm_smp_init(); | ||
1358 | register_cpu_notifier(&xen_hvm_cpu_notifier); | 1360 | register_cpu_notifier(&xen_hvm_cpu_notifier); |
1359 | xen_unplug_emulated_devices(); | 1361 | xen_unplug_emulated_devices(); |
1360 | have_vcpu_info_placement = 0; | 1362 | have_vcpu_info_placement = 0; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61ad574..3f6f3347aa17 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/gfp.h> | 47 | #include <linux/gfp.h> |
48 | #include <linux/memblock.h> | 48 | #include <linux/memblock.h> |
49 | #include <linux/seq_file.h> | ||
49 | 50 | ||
50 | #include <asm/pgtable.h> | 51 | #include <asm/pgtable.h> |
51 | #include <asm/tlbflush.h> | 52 | #include <asm/tlbflush.h> |
@@ -416,8 +417,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
416 | if (val & _PAGE_PRESENT) { | 417 | if (val & _PAGE_PRESENT) { |
417 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 418 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
418 | pteval_t flags = val & PTE_FLAGS_MASK; | 419 | pteval_t flags = val & PTE_FLAGS_MASK; |
419 | unsigned long mfn = pfn_to_mfn(pfn); | 420 | unsigned long mfn; |
420 | 421 | ||
422 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | ||
423 | mfn = get_phys_to_machine(pfn); | ||
424 | else | ||
425 | mfn = pfn; | ||
421 | /* | 426 | /* |
422 | * If there's no mfn for the pfn, then just create an | 427 | * If there's no mfn for the pfn, then just create an |
423 | * empty non-present pte. Unfortunately this loses | 428 | * empty non-present pte. Unfortunately this loses |
@@ -427,8 +432,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
427 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { | 432 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { |
428 | mfn = 0; | 433 | mfn = 0; |
429 | flags = 0; | 434 | flags = 0; |
435 | } else { | ||
436 | /* | ||
437 | * Paramount to do this test _after_ the | ||
438 | * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & | ||
439 | * IDENTITY_FRAME_BIT resolves to true. | ||
440 | */ | ||
441 | mfn &= ~FOREIGN_FRAME_BIT; | ||
442 | if (mfn & IDENTITY_FRAME_BIT) { | ||
443 | mfn &= ~IDENTITY_FRAME_BIT; | ||
444 | flags |= _PAGE_IOMAP; | ||
445 | } | ||
430 | } | 446 | } |
431 | |||
432 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; | 447 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; |
433 | } | 448 | } |
434 | 449 | ||
@@ -532,6 +547,41 @@ pte_t xen_make_pte(pteval_t pte) | |||
532 | } | 547 | } |
533 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); | 548 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
534 | 549 | ||
550 | #ifdef CONFIG_XEN_DEBUG | ||
551 | pte_t xen_make_pte_debug(pteval_t pte) | ||
552 | { | ||
553 | phys_addr_t addr = (pte & PTE_PFN_MASK); | ||
554 | phys_addr_t other_addr; | ||
555 | bool io_page = false; | ||
556 | pte_t _pte; | ||
557 | |||
558 | if (pte & _PAGE_IOMAP) | ||
559 | io_page = true; | ||
560 | |||
561 | _pte = xen_make_pte(pte); | ||
562 | |||
563 | if (!addr) | ||
564 | return _pte; | ||
565 | |||
566 | if (io_page && | ||
567 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | ||
568 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | ||
569 | WARN(addr != other_addr, | ||
570 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | ||
571 | (unsigned long)addr, (unsigned long)other_addr); | ||
572 | } else { | ||
573 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | ||
574 | other_addr = (_pte.pte & PTE_PFN_MASK); | ||
575 | WARN((addr == other_addr) && (!io_page) && (!iomap_set), | ||
576 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | ||
577 | (unsigned long)addr); | ||
578 | } | ||
579 | |||
580 | return _pte; | ||
581 | } | ||
582 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); | ||
583 | #endif | ||
584 | |||
535 | pgd_t xen_make_pgd(pgdval_t pgd) | 585 | pgd_t xen_make_pgd(pgdval_t pgd) |
536 | { | 586 | { |
537 | pgd = pte_pfn_to_mfn(pgd); | 587 | pgd = pte_pfn_to_mfn(pgd); |
@@ -986,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm) | |||
986 | */ | 1036 | */ |
987 | void xen_mm_pin_all(void) | 1037 | void xen_mm_pin_all(void) |
988 | { | 1038 | { |
989 | unsigned long flags; | ||
990 | struct page *page; | 1039 | struct page *page; |
991 | 1040 | ||
992 | spin_lock_irqsave(&pgd_lock, flags); | 1041 | spin_lock(&pgd_lock); |
993 | 1042 | ||
994 | list_for_each_entry(page, &pgd_list, lru) { | 1043 | list_for_each_entry(page, &pgd_list, lru) { |
995 | if (!PagePinned(page)) { | 1044 | if (!PagePinned(page)) { |
@@ -998,7 +1047,7 @@ void xen_mm_pin_all(void) | |||
998 | } | 1047 | } |
999 | } | 1048 | } |
1000 | 1049 | ||
1001 | spin_unlock_irqrestore(&pgd_lock, flags); | 1050 | spin_unlock(&pgd_lock); |
1002 | } | 1051 | } |
1003 | 1052 | ||
1004 | /* | 1053 | /* |
@@ -1099,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) | |||
1099 | */ | 1148 | */ |
1100 | void xen_mm_unpin_all(void) | 1149 | void xen_mm_unpin_all(void) |
1101 | { | 1150 | { |
1102 | unsigned long flags; | ||
1103 | struct page *page; | 1151 | struct page *page; |
1104 | 1152 | ||
1105 | spin_lock_irqsave(&pgd_lock, flags); | 1153 | spin_lock(&pgd_lock); |
1106 | 1154 | ||
1107 | list_for_each_entry(page, &pgd_list, lru) { | 1155 | list_for_each_entry(page, &pgd_list, lru) { |
1108 | if (PageSavePinned(page)) { | 1156 | if (PageSavePinned(page)) { |
@@ -1112,7 +1160,7 @@ void xen_mm_unpin_all(void) | |||
1112 | } | 1160 | } |
1113 | } | 1161 | } |
1114 | 1162 | ||
1115 | spin_unlock_irqrestore(&pgd_lock, flags); | 1163 | spin_unlock(&pgd_lock); |
1116 | } | 1164 | } |
1117 | 1165 | ||
1118 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1166 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
@@ -1443,7 +1491,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1443 | * early_ioremap fixmap slot, make sure it is RO. | 1491 | * early_ioremap fixmap slot, make sure it is RO. |
1444 | */ | 1492 | */ |
1445 | if (!is_early_ioremap_ptep(ptep) && | 1493 | if (!is_early_ioremap_ptep(ptep) && |
1446 | pfn >= e820_table_start && pfn < e820_table_end) | 1494 | pfn >= pgt_buf_start && pfn < pgt_buf_end) |
1447 | pte = pte_wrprotect(pte); | 1495 | pte = pte_wrprotect(pte); |
1448 | 1496 | ||
1449 | return pte; | 1497 | return pte; |
@@ -1942,6 +1990,9 @@ __init void xen_ident_map_ISA(void) | |||
1942 | 1990 | ||
1943 | static __init void xen_post_allocator_init(void) | 1991 | static __init void xen_post_allocator_init(void) |
1944 | { | 1992 | { |
1993 | #ifdef CONFIG_XEN_DEBUG | ||
1994 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); | ||
1995 | #endif | ||
1945 | pv_mmu_ops.set_pte = xen_set_pte; | 1996 | pv_mmu_ops.set_pte = xen_set_pte; |
1946 | pv_mmu_ops.set_pmd = xen_set_pmd; | 1997 | pv_mmu_ops.set_pmd = xen_set_pmd; |
1947 | pv_mmu_ops.set_pud = xen_set_pud; | 1998 | pv_mmu_ops.set_pud = xen_set_pud; |
@@ -2074,7 +2125,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, | |||
2074 | in_frames[i] = virt_to_mfn(vaddr); | 2125 | in_frames[i] = virt_to_mfn(vaddr); |
2075 | 2126 | ||
2076 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); | 2127 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); |
2077 | set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); | 2128 | __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); |
2078 | 2129 | ||
2079 | if (out_frames) | 2130 | if (out_frames) |
2080 | out_frames[i] = virt_to_pfn(vaddr); | 2131 | out_frames[i] = virt_to_pfn(vaddr); |
@@ -2353,6 +2404,18 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); | |||
2353 | 2404 | ||
2354 | #ifdef CONFIG_XEN_DEBUG_FS | 2405 | #ifdef CONFIG_XEN_DEBUG_FS |
2355 | 2406 | ||
2407 | static int p2m_dump_open(struct inode *inode, struct file *filp) | ||
2408 | { | ||
2409 | return single_open(filp, p2m_dump_show, NULL); | ||
2410 | } | ||
2411 | |||
2412 | static const struct file_operations p2m_dump_fops = { | ||
2413 | .open = p2m_dump_open, | ||
2414 | .read = seq_read, | ||
2415 | .llseek = seq_lseek, | ||
2416 | .release = single_release, | ||
2417 | }; | ||
2418 | |||
2356 | static struct dentry *d_mmu_debug; | 2419 | static struct dentry *d_mmu_debug; |
2357 | 2420 | ||
2358 | static int __init xen_mmu_debugfs(void) | 2421 | static int __init xen_mmu_debugfs(void) |
@@ -2408,6 +2471,7 @@ static int __init xen_mmu_debugfs(void) | |||
2408 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, | 2471 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, |
2409 | &mmu_stats.prot_commit_batched); | 2472 | &mmu_stats.prot_commit_batched); |
2410 | 2473 | ||
2474 | debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); | ||
2411 | return 0; | 2475 | return 0; |
2412 | } | 2476 | } |
2413 | fs_initcall(xen_mmu_debugfs); | 2477 | fs_initcall(xen_mmu_debugfs); |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index fd12d7ce7ff9..215a3ce61068 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -23,6 +23,129 @@ | |||
23 | * P2M_PER_PAGE depends on the architecture, as a mfn is always | 23 | * P2M_PER_PAGE depends on the architecture, as a mfn is always |
24 | * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to | 24 | * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to |
25 | * 512 and 1024 entries respectively. | 25 | * 512 and 1024 entries respectively. |
26 | * | ||
27 | * In short, these structures contain the Machine Frame Number (MFN) of the PFN. | ||
28 | * | ||
29 | * However not all entries are filled with MFNs. Specifically for all other | ||
30 | * leaf entries, or for the top root, or middle one, for which there is a void | ||
31 | * entry, we assume it is "missing". So (for example) | ||
32 | * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. | ||
33 | * | ||
34 | * We also have the possibility of setting 1-1 mappings on certain regions, so | ||
35 | * that: | ||
36 | * pfn_to_mfn(0xc0000)=0xc0000 | ||
37 | * | ||
38 | * The benefit of this is, that we can assume for non-RAM regions (think | ||
39 | * PCI BARs, or ACPI spaces), we can create mappings easily b/c we | ||
40 | * get the PFN value to match the MFN. | ||
41 | * | ||
42 | * For this to work efficiently we have one new page p2m_identity and | ||
43 | * allocate (via reserved_brk) any other pages we need to cover the sides | ||
44 | * (1GB or 4MB boundary violations). All entries in p2m_identity are set to | ||
45 | * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, | ||
46 | * no other fancy value). | ||
47 | * | ||
48 | * On lookup we spot that the entry points to p2m_identity and return the | ||
49 | * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. | ||
50 | * If the entry points to an allocated page, we just proceed as before and | ||
51 | * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in | ||
52 | * appropriate functions (pfn_to_mfn). | ||
53 | * | ||
54 | * The reason for having the IDENTITY_FRAME_BIT instead of just returning the | ||
55 | * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a | ||
56 | * non-identity pfn. To protect ourselves against we elect to set (and get) the | ||
57 | * IDENTITY_FRAME_BIT on all identity mapped PFNs. | ||
58 | * | ||
59 | * This simplistic diagram is used to explain the more subtle piece of code. | ||
60 | * There is also a digram of the P2M at the end that can help. | ||
61 | * Imagine your E820 looking as so: | ||
62 | * | ||
63 | * 1GB 2GB | ||
64 | * /-------------------+---------\/----\ /----------\ /---+-----\ | ||
65 | * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | | ||
66 | * \-------------------+---------/\----/ \----------/ \---+-----/ | ||
67 | * ^- 1029MB ^- 2001MB | ||
68 | * | ||
69 | * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), | ||
70 | * 2048MB = 524288 (0x80000)] | ||
71 | * | ||
72 | * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB | ||
73 | * is actually not present (would have to kick the balloon driver to put it in). | ||
74 | * | ||
75 | * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: | ||
76 | * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start | ||
77 | * of the PFN and the end PFN (263424 and 512256 respectively). The first step | ||
78 | * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page | ||
79 | * covers 512^2 of page estate (1GB) and in case the start or end PFN is not | ||
80 | * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn | ||
81 | * to end pfn. We reserve_brk top leaf pages if they are missing (means they | ||
82 | * point to p2m_mid_missing). | ||
83 | * | ||
84 | * With the E820 example above, 263424 is not 1GB aligned so we allocate a | ||
85 | * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. | ||
86 | * Each entry in the allocate page is "missing" (points to p2m_missing). | ||
87 | * | ||
88 | * Next stage is to determine if we need to do a more granular boundary check | ||
89 | * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. | ||
90 | * We check if the start pfn and end pfn violate that boundary check, and if | ||
91 | * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer | ||
92 | * granularity of setting which PFNs are missing and which ones are identity. | ||
93 | * In our example 263424 and 512256 both fail the check so we reserve_brk two | ||
94 | * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" | ||
95 | * values) and assign them to p2m[1][2] and p2m[1][488] respectively. | ||
96 | * | ||
97 | * At this point we would at minimum reserve_brk one page, but could be up to | ||
98 | * three. Each call to set_phys_range_identity has at maximum a three page | ||
99 | * cost. If we were to query the P2M at this stage, all those entries from | ||
100 | * start PFN through end PFN (so 1029MB -> 2001MB) would return | ||
101 | * INVALID_P2M_ENTRY ("missing"). | ||
102 | * | ||
103 | * The next step is to walk from the start pfn to the end pfn setting | ||
104 | * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. | ||
105 | * If we find that the middle leaf is pointing to p2m_missing we can swap it | ||
106 | * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this | ||
107 | * point we do not need to worry about boundary aligment (so no need to | ||
108 | * reserve_brk a middle page, figure out which PFNs are "missing" and which | ||
109 | * ones are identity), as that has been done earlier. If we find that the | ||
110 | * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference | ||
111 | * that page (which covers 512 PFNs) and set the appropriate PFN with | ||
112 | * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we | ||
113 | * set from p2m[1][2][256->511] and p2m[1][488][0->256] with | ||
114 | * IDENTITY_FRAME_BIT set. | ||
115 | * | ||
116 | * All other regions that are void (or not filled) either point to p2m_missing | ||
117 | * (considered missing) or have the default value of INVALID_P2M_ENTRY (also | ||
118 | * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] | ||
119 | * contain the INVALID_P2M_ENTRY value and are considered "missing." | ||
120 | * | ||
121 | * This is what the p2m ends up looking (for the E820 above) with this | ||
122 | * fabulous drawing: | ||
123 | * | ||
124 | * p2m /--------------\ | ||
125 | * /-----\ | &mfn_list[0],| /-----------------\ | ||
126 | * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | | ||
127 | * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | | ||
128 | * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | | ||
129 | * |-----| \ | [p2m_identity]+\\ | .... | | ||
130 | * | 2 |--\ \-------------------->| ... | \\ \----------------/ | ||
131 | * |-----| \ \---------------/ \\ | ||
132 | * | 3 |\ \ \\ p2m_identity | ||
133 | * |-----| \ \-------------------->/---------------\ /-----------------\ | ||
134 | * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | | ||
135 | * \-----/ / | [p2m_identity]+-->| ..., ~0 | | ||
136 | * / /---------------\ | .... | \-----------------/ | ||
137 | * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | | ||
138 | * / | IDENTITY[@256]|<----/ \---------------/ | ||
139 | * / | ~0, ~0, .... | | ||
140 | * | \---------------/ | ||
141 | * | | ||
142 | * p2m_missing p2m_missing | ||
143 | * /------------------\ /------------\ | ||
144 | * | [p2m_mid_missing]+---->| ~0, ~0, ~0 | | ||
145 | * | [p2m_mid_missing]+---->| ..., ~0 | | ||
146 | * \------------------/ \------------/ | ||
147 | * | ||
148 | * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) | ||
26 | */ | 149 | */ |
27 | 150 | ||
28 | #include <linux/init.h> | 151 | #include <linux/init.h> |
@@ -30,6 +153,7 @@ | |||
30 | #include <linux/list.h> | 153 | #include <linux/list.h> |
31 | #include <linux/hash.h> | 154 | #include <linux/hash.h> |
32 | #include <linux/sched.h> | 155 | #include <linux/sched.h> |
156 | #include <linux/seq_file.h> | ||
33 | 157 | ||
34 | #include <asm/cache.h> | 158 | #include <asm/cache.h> |
35 | #include <asm/setup.h> | 159 | #include <asm/setup.h> |
@@ -59,9 +183,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); | |||
59 | static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); | 183 | static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); |
60 | static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); | 184 | static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); |
61 | 185 | ||
186 | static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); | ||
187 | |||
62 | RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); | 188 | RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); |
63 | RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); | 189 | RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); |
64 | 190 | ||
191 | /* We might hit two boundary violations at the start and end, at max each | ||
192 | * boundary violation will require three middle nodes. */ | ||
193 | RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); | ||
194 | |||
65 | static inline unsigned p2m_top_index(unsigned long pfn) | 195 | static inline unsigned p2m_top_index(unsigned long pfn) |
66 | { | 196 | { |
67 | BUG_ON(pfn >= MAX_P2M_PFN); | 197 | BUG_ON(pfn >= MAX_P2M_PFN); |
@@ -136,7 +266,7 @@ static void p2m_init(unsigned long *p2m) | |||
136 | * - After resume we're called from within stop_machine, but the mfn | 266 | * - After resume we're called from within stop_machine, but the mfn |
137 | * tree should alreay be completely allocated. | 267 | * tree should alreay be completely allocated. |
138 | */ | 268 | */ |
139 | void xen_build_mfn_list_list(void) | 269 | void __ref xen_build_mfn_list_list(void) |
140 | { | 270 | { |
141 | unsigned long pfn; | 271 | unsigned long pfn; |
142 | 272 | ||
@@ -221,6 +351,9 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
221 | p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); | 351 | p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); |
222 | p2m_top_init(p2m_top); | 352 | p2m_top_init(p2m_top); |
223 | 353 | ||
354 | p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
355 | p2m_init(p2m_identity); | ||
356 | |||
224 | /* | 357 | /* |
225 | * The domain builder gives us a pre-constructed p2m array in | 358 | * The domain builder gives us a pre-constructed p2m array in |
226 | * mfn_list for all the pages initially given to us, so we just | 359 | * mfn_list for all the pages initially given to us, so we just |
@@ -266,6 +399,14 @@ unsigned long get_phys_to_machine(unsigned long pfn) | |||
266 | mididx = p2m_mid_index(pfn); | 399 | mididx = p2m_mid_index(pfn); |
267 | idx = p2m_index(pfn); | 400 | idx = p2m_index(pfn); |
268 | 401 | ||
402 | /* | ||
403 | * The INVALID_P2M_ENTRY is filled in both p2m_*identity | ||
404 | * and in p2m_*missing, so returning the INVALID_P2M_ENTRY | ||
405 | * would be wrong. | ||
406 | */ | ||
407 | if (p2m_top[topidx][mididx] == p2m_identity) | ||
408 | return IDENTITY_FRAME(pfn); | ||
409 | |||
269 | return p2m_top[topidx][mididx][idx]; | 410 | return p2m_top[topidx][mididx][idx]; |
270 | } | 411 | } |
271 | EXPORT_SYMBOL_GPL(get_phys_to_machine); | 412 | EXPORT_SYMBOL_GPL(get_phys_to_machine); |
@@ -335,9 +476,11 @@ static bool alloc_p2m(unsigned long pfn) | |||
335 | p2m_top_mfn_p[topidx] = mid_mfn; | 476 | p2m_top_mfn_p[topidx] = mid_mfn; |
336 | } | 477 | } |
337 | 478 | ||
338 | if (p2m_top[topidx][mididx] == p2m_missing) { | 479 | if (p2m_top[topidx][mididx] == p2m_identity || |
480 | p2m_top[topidx][mididx] == p2m_missing) { | ||
339 | /* p2m leaf page is missing */ | 481 | /* p2m leaf page is missing */ |
340 | unsigned long *p2m; | 482 | unsigned long *p2m; |
483 | unsigned long *p2m_orig = p2m_top[topidx][mididx]; | ||
341 | 484 | ||
342 | p2m = alloc_p2m_page(); | 485 | p2m = alloc_p2m_page(); |
343 | if (!p2m) | 486 | if (!p2m) |
@@ -345,7 +488,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
345 | 488 | ||
346 | p2m_init(p2m); | 489 | p2m_init(p2m); |
347 | 490 | ||
348 | if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) | 491 | if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) |
349 | free_p2m_page(p2m); | 492 | free_p2m_page(p2m); |
350 | else | 493 | else |
351 | mid_mfn[mididx] = virt_to_mfn(p2m); | 494 | mid_mfn[mididx] = virt_to_mfn(p2m); |
@@ -354,11 +497,91 @@ static bool alloc_p2m(unsigned long pfn) | |||
354 | return true; | 497 | return true; |
355 | } | 498 | } |
356 | 499 | ||
500 | bool __early_alloc_p2m(unsigned long pfn) | ||
501 | { | ||
502 | unsigned topidx, mididx, idx; | ||
503 | |||
504 | topidx = p2m_top_index(pfn); | ||
505 | mididx = p2m_mid_index(pfn); | ||
506 | idx = p2m_index(pfn); | ||
507 | |||
508 | /* Pfff.. No boundary cross-over, lets get out. */ | ||
509 | if (!idx) | ||
510 | return false; | ||
511 | |||
512 | WARN(p2m_top[topidx][mididx] == p2m_identity, | ||
513 | "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", | ||
514 | topidx, mididx); | ||
515 | |||
516 | /* | ||
517 | * Could be done by xen_build_dynamic_phys_to_machine.. | ||
518 | */ | ||
519 | if (p2m_top[topidx][mididx] != p2m_missing) | ||
520 | return false; | ||
521 | |||
522 | /* Boundary cross-over for the edges: */ | ||
523 | if (idx) { | ||
524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
525 | |||
526 | p2m_init(p2m); | ||
527 | |||
528 | p2m_top[topidx][mididx] = p2m; | ||
529 | |||
530 | } | ||
531 | return idx != 0; | ||
532 | } | ||
533 | unsigned long set_phys_range_identity(unsigned long pfn_s, | ||
534 | unsigned long pfn_e) | ||
535 | { | ||
536 | unsigned long pfn; | ||
537 | |||
538 | if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) | ||
539 | return 0; | ||
540 | |||
541 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) | ||
542 | return pfn_e - pfn_s; | ||
543 | |||
544 | if (pfn_s > pfn_e) | ||
545 | return 0; | ||
546 | |||
547 | for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); | ||
548 | pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); | ||
549 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) | ||
550 | { | ||
551 | unsigned topidx = p2m_top_index(pfn); | ||
552 | if (p2m_top[topidx] == p2m_mid_missing) { | ||
553 | unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
554 | |||
555 | p2m_mid_init(mid); | ||
556 | |||
557 | p2m_top[topidx] = mid; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | __early_alloc_p2m(pfn_s); | ||
562 | __early_alloc_p2m(pfn_e); | ||
563 | |||
564 | for (pfn = pfn_s; pfn < pfn_e; pfn++) | ||
565 | if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) | ||
566 | break; | ||
567 | |||
568 | if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), | ||
569 | "Identity mapping failed. We are %ld short of 1-1 mappings!\n", | ||
570 | (pfn_e - pfn_s) - (pfn - pfn_s))) | ||
571 | printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); | ||
572 | |||
573 | return pfn - pfn_s; | ||
574 | } | ||
575 | |||
357 | /* Try to install p2m mapping; fail if intermediate bits missing */ | 576 | /* Try to install p2m mapping; fail if intermediate bits missing */ |
358 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 577 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
359 | { | 578 | { |
360 | unsigned topidx, mididx, idx; | 579 | unsigned topidx, mididx, idx; |
361 | 580 | ||
581 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | ||
582 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
583 | return true; | ||
584 | } | ||
362 | if (unlikely(pfn >= MAX_P2M_PFN)) { | 585 | if (unlikely(pfn >= MAX_P2M_PFN)) { |
363 | BUG_ON(mfn != INVALID_P2M_ENTRY); | 586 | BUG_ON(mfn != INVALID_P2M_ENTRY); |
364 | return true; | 587 | return true; |
@@ -368,6 +591,21 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
368 | mididx = p2m_mid_index(pfn); | 591 | mididx = p2m_mid_index(pfn); |
369 | idx = p2m_index(pfn); | 592 | idx = p2m_index(pfn); |
370 | 593 | ||
594 | /* For sparse holes were the p2m leaf has real PFN along with | ||
595 | * PCI holes, stick in the PFN as the MFN value. | ||
596 | */ | ||
597 | if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { | ||
598 | if (p2m_top[topidx][mididx] == p2m_identity) | ||
599 | return true; | ||
600 | |||
601 | /* Swap over from MISSING to IDENTITY if needed. */ | ||
602 | if (p2m_top[topidx][mididx] == p2m_missing) { | ||
603 | WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, | ||
604 | p2m_identity) != p2m_missing); | ||
605 | return true; | ||
606 | } | ||
607 | } | ||
608 | |||
371 | if (p2m_top[topidx][mididx] == p2m_missing) | 609 | if (p2m_top[topidx][mididx] == p2m_missing) |
372 | return mfn == INVALID_P2M_ENTRY; | 610 | return mfn == INVALID_P2M_ENTRY; |
373 | 611 | ||
@@ -378,11 +616,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
378 | 616 | ||
379 | bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 617 | bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
380 | { | 618 | { |
381 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | ||
382 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
383 | return true; | ||
384 | } | ||
385 | |||
386 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { | 619 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { |
387 | if (!alloc_p2m(pfn)) | 620 | if (!alloc_p2m(pfn)) |
388 | return false; | 621 | return false; |
@@ -421,7 +654,7 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
421 | { | 654 | { |
422 | unsigned long flags; | 655 | unsigned long flags; |
423 | unsigned long pfn; | 656 | unsigned long pfn; |
424 | unsigned long address; | 657 | unsigned long uninitialized_var(address); |
425 | unsigned level; | 658 | unsigned level; |
426 | pte_t *ptep = NULL; | 659 | pte_t *ptep = NULL; |
427 | 660 | ||
@@ -455,7 +688,7 @@ int m2p_remove_override(struct page *page) | |||
455 | unsigned long flags; | 688 | unsigned long flags; |
456 | unsigned long mfn; | 689 | unsigned long mfn; |
457 | unsigned long pfn; | 690 | unsigned long pfn; |
458 | unsigned long address; | 691 | unsigned long uninitialized_var(address); |
459 | unsigned level; | 692 | unsigned level; |
460 | pte_t *ptep = NULL; | 693 | pte_t *ptep = NULL; |
461 | 694 | ||
@@ -520,3 +753,80 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) | |||
520 | return ret; | 753 | return ret; |
521 | } | 754 | } |
522 | EXPORT_SYMBOL_GPL(m2p_find_override_pfn); | 755 | EXPORT_SYMBOL_GPL(m2p_find_override_pfn); |
756 | |||
757 | #ifdef CONFIG_XEN_DEBUG_FS | ||
758 | |||
759 | int p2m_dump_show(struct seq_file *m, void *v) | ||
760 | { | ||
761 | static const char * const level_name[] = { "top", "middle", | ||
762 | "entry", "abnormal" }; | ||
763 | static const char * const type_name[] = { "identity", "missing", | ||
764 | "pfn", "abnormal"}; | ||
765 | #define TYPE_IDENTITY 0 | ||
766 | #define TYPE_MISSING 1 | ||
767 | #define TYPE_PFN 2 | ||
768 | #define TYPE_UNKNOWN 3 | ||
769 | unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; | ||
770 | unsigned int uninitialized_var(prev_level); | ||
771 | unsigned int uninitialized_var(prev_type); | ||
772 | |||
773 | if (!p2m_top) | ||
774 | return 0; | ||
775 | |||
776 | for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { | ||
777 | unsigned topidx = p2m_top_index(pfn); | ||
778 | unsigned mididx = p2m_mid_index(pfn); | ||
779 | unsigned idx = p2m_index(pfn); | ||
780 | unsigned lvl, type; | ||
781 | |||
782 | lvl = 4; | ||
783 | type = TYPE_UNKNOWN; | ||
784 | if (p2m_top[topidx] == p2m_mid_missing) { | ||
785 | lvl = 0; type = TYPE_MISSING; | ||
786 | } else if (p2m_top[topidx] == NULL) { | ||
787 | lvl = 0; type = TYPE_UNKNOWN; | ||
788 | } else if (p2m_top[topidx][mididx] == NULL) { | ||
789 | lvl = 1; type = TYPE_UNKNOWN; | ||
790 | } else if (p2m_top[topidx][mididx] == p2m_identity) { | ||
791 | lvl = 1; type = TYPE_IDENTITY; | ||
792 | } else if (p2m_top[topidx][mididx] == p2m_missing) { | ||
793 | lvl = 1; type = TYPE_MISSING; | ||
794 | } else if (p2m_top[topidx][mididx][idx] == 0) { | ||
795 | lvl = 2; type = TYPE_UNKNOWN; | ||
796 | } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { | ||
797 | lvl = 2; type = TYPE_IDENTITY; | ||
798 | } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { | ||
799 | lvl = 2; type = TYPE_MISSING; | ||
800 | } else if (p2m_top[topidx][mididx][idx] == pfn) { | ||
801 | lvl = 2; type = TYPE_PFN; | ||
802 | } else if (p2m_top[topidx][mididx][idx] != pfn) { | ||
803 | lvl = 2; type = TYPE_PFN; | ||
804 | } | ||
805 | if (pfn == 0) { | ||
806 | prev_level = lvl; | ||
807 | prev_type = type; | ||
808 | } | ||
809 | if (pfn == MAX_DOMAIN_PAGES-1) { | ||
810 | lvl = 3; | ||
811 | type = TYPE_UNKNOWN; | ||
812 | } | ||
813 | if (prev_type != type) { | ||
814 | seq_printf(m, " [0x%lx->0x%lx] %s\n", | ||
815 | prev_pfn_type, pfn, type_name[prev_type]); | ||
816 | prev_pfn_type = pfn; | ||
817 | prev_type = type; | ||
818 | } | ||
819 | if (prev_level != lvl) { | ||
820 | seq_printf(m, " [0x%lx->0x%lx] level %s\n", | ||
821 | prev_pfn_level, pfn, level_name[prev_level]); | ||
822 | prev_pfn_level = pfn; | ||
823 | prev_level = lvl; | ||
824 | } | ||
825 | } | ||
826 | return 0; | ||
827 | #undef TYPE_IDENTITY | ||
828 | #undef TYPE_MISSING | ||
829 | #undef TYPE_PFN | ||
830 | #undef TYPE_UNKNOWN | ||
831 | } | ||
832 | #endif | ||
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index a8a66a50d446..fa0269a99377 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; | |||
52 | 52 | ||
53 | static __init void xen_add_extra_mem(unsigned long pages) | 53 | static __init void xen_add_extra_mem(unsigned long pages) |
54 | { | 54 | { |
55 | unsigned long pfn; | ||
56 | |||
55 | u64 size = (u64)pages * PAGE_SIZE; | 57 | u64 size = (u64)pages * PAGE_SIZE; |
56 | u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; | 58 | u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; |
57 | 59 | ||
@@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages) | |||
66 | xen_extra_mem_size += size; | 68 | xen_extra_mem_size += size; |
67 | 69 | ||
68 | xen_max_p2m_pfn = PFN_DOWN(extra_start + size); | 70 | xen_max_p2m_pfn = PFN_DOWN(extra_start + size); |
71 | |||
72 | for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) | ||
73 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | ||
69 | } | 74 | } |
70 | 75 | ||
71 | static unsigned long __init xen_release_chunk(phys_addr_t start_addr, | 76 | static unsigned long __init xen_release_chunk(phys_addr_t start_addr, |
@@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, | |||
104 | WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", | 109 | WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", |
105 | start, end, ret); | 110 | start, end, ret); |
106 | if (ret == 1) { | 111 | if (ret == 1) { |
107 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 112 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
108 | len++; | 113 | len++; |
109 | } | 114 | } |
110 | } | 115 | } |
@@ -138,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, | |||
138 | return released; | 143 | return released; |
139 | } | 144 | } |
140 | 145 | ||
146 | static unsigned long __init xen_set_identity(const struct e820entry *list, | ||
147 | ssize_t map_size) | ||
148 | { | ||
149 | phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; | ||
150 | phys_addr_t start_pci = last; | ||
151 | const struct e820entry *entry; | ||
152 | unsigned long identity = 0; | ||
153 | int i; | ||
154 | |||
155 | for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
156 | phys_addr_t start = entry->addr; | ||
157 | phys_addr_t end = start + entry->size; | ||
158 | |||
159 | if (start < last) | ||
160 | start = last; | ||
161 | |||
162 | if (end <= start) | ||
163 | continue; | ||
164 | |||
165 | /* Skip over the 1MB region. */ | ||
166 | if (last > end) | ||
167 | continue; | ||
168 | |||
169 | if (entry->type == E820_RAM) { | ||
170 | if (start > start_pci) | ||
171 | identity += set_phys_range_identity( | ||
172 | PFN_UP(start_pci), PFN_DOWN(start)); | ||
173 | |||
174 | /* Without saving 'last' we would gooble RAM too | ||
175 | * at the end of the loop. */ | ||
176 | last = end; | ||
177 | start_pci = end; | ||
178 | continue; | ||
179 | } | ||
180 | start_pci = min(start, start_pci); | ||
181 | last = end; | ||
182 | } | ||
183 | if (last > start_pci) | ||
184 | identity += set_phys_range_identity( | ||
185 | PFN_UP(start_pci), PFN_DOWN(last)); | ||
186 | return identity; | ||
187 | } | ||
141 | /** | 188 | /** |
142 | * machine_specific_memory_setup - Hook for machine specific memory setup. | 189 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
143 | **/ | 190 | **/ |
144 | char * __init xen_memory_setup(void) | 191 | char * __init xen_memory_setup(void) |
145 | { | 192 | { |
146 | static struct e820entry map[E820MAX] __initdata; | 193 | static struct e820entry map[E820MAX] __initdata; |
194 | static struct e820entry map_raw[E820MAX] __initdata; | ||
147 | 195 | ||
148 | unsigned long max_pfn = xen_start_info->nr_pages; | 196 | unsigned long max_pfn = xen_start_info->nr_pages; |
149 | unsigned long long mem_end; | 197 | unsigned long long mem_end; |
@@ -151,6 +199,7 @@ char * __init xen_memory_setup(void) | |||
151 | struct xen_memory_map memmap; | 199 | struct xen_memory_map memmap; |
152 | unsigned long extra_pages = 0; | 200 | unsigned long extra_pages = 0; |
153 | unsigned long extra_limit; | 201 | unsigned long extra_limit; |
202 | unsigned long identity_pages = 0; | ||
154 | int i; | 203 | int i; |
155 | int op; | 204 | int op; |
156 | 205 | ||
@@ -176,6 +225,7 @@ char * __init xen_memory_setup(void) | |||
176 | } | 225 | } |
177 | BUG_ON(rc); | 226 | BUG_ON(rc); |
178 | 227 | ||
228 | memcpy(map_raw, map, sizeof(map)); | ||
179 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
180 | xen_extra_mem_start = mem_end; | 230 | xen_extra_mem_start = mem_end; |
181 | for (i = 0; i < memmap.nr_entries; i++) { | 231 | for (i = 0; i < memmap.nr_entries; i++) { |
@@ -194,6 +244,15 @@ char * __init xen_memory_setup(void) | |||
194 | end -= delta; | 244 | end -= delta; |
195 | 245 | ||
196 | extra_pages += PFN_DOWN(delta); | 246 | extra_pages += PFN_DOWN(delta); |
247 | /* | ||
248 | * Set RAM below 4GB that is not for us to be unusable. | ||
249 | * This prevents "System RAM" address space from being | ||
250 | * used as potential resource for I/O address (happens | ||
251 | * when 'allocate_resource' is called). | ||
252 | */ | ||
253 | if (delta && | ||
254 | (xen_initial_domain() && end < 0x100000000ULL)) | ||
255 | e820_add_region(end, delta, E820_UNUSABLE); | ||
197 | } | 256 | } |
198 | 257 | ||
199 | if (map[i].size > 0 && end > xen_extra_mem_start) | 258 | if (map[i].size > 0 && end > xen_extra_mem_start) |
@@ -251,6 +310,13 @@ char * __init xen_memory_setup(void) | |||
251 | 310 | ||
252 | xen_add_extra_mem(extra_pages); | 311 | xen_add_extra_mem(extra_pages); |
253 | 312 | ||
313 | /* | ||
314 | * Set P2M for all non-RAM pages and E820 gaps to be identity | ||
315 | * type PFNs. We supply it with the non-sanitized version | ||
316 | * of the E820. | ||
317 | */ | ||
318 | identity_pages = xen_set_identity(map_raw, memmap.nr_entries); | ||
319 | printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); | ||
254 | return "Xen"; | 320 | return "Xen"; |
255 | } | 321 | } |
256 | 322 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 72a4c7959045..30612441ed99 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -509,3 +509,41 @@ void __init xen_smp_init(void) | |||
509 | xen_fill_possible_map(); | 509 | xen_fill_possible_map(); |
510 | xen_init_spinlocks(); | 510 | xen_init_spinlocks(); |
511 | } | 511 | } |
512 | |||
513 | static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | ||
514 | { | ||
515 | native_smp_prepare_cpus(max_cpus); | ||
516 | WARN_ON(xen_smp_intr_init(0)); | ||
517 | |||
518 | if (!xen_have_vector_callback) | ||
519 | return; | ||
520 | xen_init_lock_cpu(0); | ||
521 | xen_init_spinlocks(); | ||
522 | } | ||
523 | |||
524 | static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) | ||
525 | { | ||
526 | int rc; | ||
527 | rc = native_cpu_up(cpu); | ||
528 | WARN_ON (xen_smp_intr_init(cpu)); | ||
529 | return rc; | ||
530 | } | ||
531 | |||
532 | static void xen_hvm_cpu_die(unsigned int cpu) | ||
533 | { | ||
534 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); | ||
535 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); | ||
536 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); | ||
537 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); | ||
538 | native_cpu_die(cpu); | ||
539 | } | ||
540 | |||
541 | void __init xen_hvm_smp_init(void) | ||
542 | { | ||
543 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; | ||
544 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; | ||
545 | smp_ops.cpu_up = xen_hvm_cpu_up; | ||
546 | smp_ops.cpu_die = xen_hvm_cpu_die; | ||
547 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; | ||
548 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; | ||
549 | } | ||
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 9bbd63a129b5..45329c8c226e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include "xen-ops.h" | 12 | #include "xen-ops.h" |
13 | #include "mmu.h" | 13 | #include "mmu.h" |
14 | 14 | ||
15 | void xen_pre_suspend(void) | 15 | void xen_arch_pre_suspend(void) |
16 | { | 16 | { |
17 | xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); | 17 | xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); |
18 | xen_start_info->console.domU.mfn = | 18 | xen_start_info->console.domU.mfn = |
@@ -26,8 +26,9 @@ void xen_pre_suspend(void) | |||
26 | BUG(); | 26 | BUG(); |
27 | } | 27 | } |
28 | 28 | ||
29 | void xen_hvm_post_suspend(int suspend_cancelled) | 29 | void xen_arch_hvm_post_suspend(int suspend_cancelled) |
30 | { | 30 | { |
31 | #ifdef CONFIG_XEN_PVHVM | ||
31 | int cpu; | 32 | int cpu; |
32 | xen_hvm_init_shared_info(); | 33 | xen_hvm_init_shared_info(); |
33 | xen_callback_vector(); | 34 | xen_callback_vector(); |
@@ -37,9 +38,10 @@ void xen_hvm_post_suspend(int suspend_cancelled) | |||
37 | xen_setup_runstate_info(cpu); | 38 | xen_setup_runstate_info(cpu); |
38 | } | 39 | } |
39 | } | 40 | } |
41 | #endif | ||
40 | } | 42 | } |
41 | 43 | ||
42 | void xen_post_suspend(int suspend_cancelled) | 44 | void xen_arch_post_suspend(int suspend_cancelled) |
43 | { | 45 | { |
44 | xen_build_mfn_list_list(); | 46 | xen_build_mfn_list_list(); |
45 | 47 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 067759e3d6a5..2e2d370a47b1 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu) | |||
397 | name = "<timer kasprintf failed>"; | 397 | name = "<timer kasprintf failed>"; |
398 | 398 | ||
399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
400 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, | 400 | IRQF_DISABLED|IRQF_PERCPU| |
401 | IRQF_NOBALANCING|IRQF_TIMER| | ||
402 | IRQF_FORCE_RESUME, | ||
401 | name, NULL); | 403 | name, NULL); |
402 | 404 | ||
403 | evt = &per_cpu(xen_clock_events, cpu); | 405 | evt = &per_cpu(xen_clock_events, cpu); |
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 1a5ff24e29c0..aaa7291c9259 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S | |||
@@ -28,9 +28,9 @@ ENTRY(startup_xen) | |||
28 | __FINIT | 28 | __FINIT |
29 | 29 | ||
30 | .pushsection .text | 30 | .pushsection .text |
31 | .align PAGE_SIZE_asm | 31 | .align PAGE_SIZE |
32 | ENTRY(hypercall_page) | 32 | ENTRY(hypercall_page) |
33 | .skip PAGE_SIZE_asm | 33 | .skip PAGE_SIZE |
34 | .popsection | 34 | .popsection |
35 | 35 | ||
36 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") | 36 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9d41bf985757..3112f55638c4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -64,10 +64,12 @@ void xen_setup_vcpu_info_placement(void); | |||
64 | 64 | ||
65 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
66 | void xen_smp_init(void); | 66 | void xen_smp_init(void); |
67 | void __init xen_hvm_smp_init(void); | ||
67 | 68 | ||
68 | extern cpumask_var_t xen_cpu_initialized_map; | 69 | extern cpumask_var_t xen_cpu_initialized_map; |
69 | #else | 70 | #else |
70 | static inline void xen_smp_init(void) {} | 71 | static inline void xen_smp_init(void) {} |
72 | static inline void xen_hvm_smp_init(void) {} | ||
71 | #endif | 73 | #endif |
72 | 74 | ||
73 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | 75 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
diff --git a/arch/xtensa/include/asm/ioctls.h b/arch/xtensa/include/asm/ioctls.h index ccf1800f0b0c..fd1d1369a407 100644 --- a/arch/xtensa/include/asm/ioctls.h +++ b/arch/xtensa/include/asm/ioctls.h | |||
@@ -100,6 +100,7 @@ | |||
100 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 100 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
101 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | 101 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ |
102 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 102 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
103 | #define TIOCVHANGUP _IO('T', 0x37) | ||
103 | 104 | ||
104 | #define TIOCSERCONFIG _IO('T', 83) | 105 | #define TIOCSERCONFIG _IO('T', 83) |
105 | #define TIOCSERGWILD _IOR('T', 84, int) | 106 | #define TIOCSERGWILD _IOR('T', 84, int) |
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h index e39edf5c86f2..249619e7e7f2 100644 --- a/arch/xtensa/include/asm/rwsem.h +++ b/arch/xtensa/include/asm/rwsem.h | |||
@@ -17,44 +17,12 @@ | |||
17 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." | 17 | #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <asm/atomic.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | /* | ||
26 | * the semaphore definition | ||
27 | */ | ||
28 | struct rw_semaphore { | ||
29 | signed long count; | ||
30 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 20 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
31 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 21 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
32 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 22 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
33 | #define RWSEM_WAITING_BIAS (-0x00010000) | 23 | #define RWSEM_WAITING_BIAS (-0x00010000) |
34 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 24 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
35 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 25 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
36 | spinlock_t wait_lock; | ||
37 | struct list_head wait_list; | ||
38 | }; | ||
39 | |||
40 | #define __RWSEM_INITIALIZER(name) \ | ||
41 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
42 | LIST_HEAD_INIT((name).wait_list) } | ||
43 | |||
44 | #define DECLARE_RWSEM(name) \ | ||
45 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
46 | |||
47 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
48 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
49 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
50 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
51 | |||
52 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
53 | { | ||
54 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
55 | spin_lock_init(&sem->wait_lock); | ||
56 | INIT_LIST_HEAD(&sem->wait_list); | ||
57 | } | ||
58 | 26 | ||
59 | /* | 27 | /* |
60 | * lock for reading | 28 | * lock for reading |
@@ -160,9 +128,4 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | |||
160 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | 128 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); |
161 | } | 129 | } |
162 | 130 | ||
163 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
164 | { | ||
165 | return (sem->count != 0); | ||
166 | } | ||
167 | |||
168 | #endif /* _XTENSA_RWSEM_H */ | 131 | #endif /* _XTENSA_RWSEM_H */ |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 19df764f6399..f3e5eb43f71c 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -96,16 +96,12 @@ again: | |||
96 | update_process_times(user_mode(get_irq_regs())); | 96 | update_process_times(user_mode(get_irq_regs())); |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | write_seqlock(&xtime_lock); | 99 | xtime_update(1); /* Linux handler in kernel/time/timekeeping */ |
100 | |||
101 | do_timer(1); /* Linux handler in kernel/timer.c */ | ||
102 | 100 | ||
103 | /* Note that writing CCOMPARE clears the interrupt. */ | 101 | /* Note that writing CCOMPARE clears the interrupt. */ |
104 | 102 | ||
105 | next += CCOUNT_PER_JIFFY; | 103 | next += CCOUNT_PER_JIFFY; |
106 | set_linux_timer(next); | 104 | set_linux_timer(next); |
107 | |||
108 | write_sequnlock(&xtime_lock); | ||
109 | } | 105 | } |
110 | 106 | ||
111 | /* Allow platform to do something useful (Wdog). */ | 107 | /* Allow platform to do something useful (Wdog). */ |
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 9b526154c9ba..a2820065927e 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S | |||
@@ -155,7 +155,7 @@ SECTIONS | |||
155 | INIT_RAM_FS | 155 | INIT_RAM_FS |
156 | } | 156 | } |
157 | 157 | ||
158 | PERCPU(PAGE_SIZE) | 158 | PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE) |
159 | 159 | ||
160 | /* We need this dummy segment here */ | 160 | /* We need this dummy segment here */ |
161 | 161 | ||