diff options
291 files changed, 16246 insertions, 3579 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 30d44b78171a..47e7d8794fc6 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2034,6 +2034,9 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2034 | 2034 | ||
2035 | snd-ymfpci= [HW,ALSA] | 2035 | snd-ymfpci= [HW,ALSA] |
2036 | 2036 | ||
2037 | softlockup_panic= | ||
2038 | [KNL] Should the soft-lockup detector generate panics. | ||
2039 | |||
2037 | sonypi.*= [HW] Sony Programmable I/O Control Device driver | 2040 | sonypi.*= [HW] Sony Programmable I/O Control Device driver |
2038 | See Documentation/sonypi.txt | 2041 | See Documentation/sonypi.txt |
2039 | 2042 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 0652ab384d51..5d8971c76a7f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -441,10 +441,7 @@ M: spyro@f2s.com | |||
441 | S: Maintained | 441 | S: Maintained |
442 | 442 | ||
443 | ARM PRIMECELL MMCI PL180/1 DRIVER | 443 | ARM PRIMECELL MMCI PL180/1 DRIVER |
444 | P: Russell King | 444 | S: Orphan |
445 | M: rmk@arm.linux.org.uk | ||
446 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
447 | S: Maintained | ||
448 | 445 | ||
449 | ARM/ADI ROADRUNNER MACHINE SUPPORT | 446 | ARM/ADI ROADRUNNER MACHINE SUPPORT |
450 | P: Lennert Buytenhek | 447 | P: Lennert Buytenhek |
@@ -483,11 +480,28 @@ M: kernel@wantstofly.org | |||
483 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 480 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
484 | S: Maintained | 481 | S: Maintained |
485 | 482 | ||
483 | ARM/COMPULAB CM-X270/EM-X270 MACHINE SUPPORT | ||
484 | P: Mike Rapoport | ||
485 | M: mike@compulab.co.il | ||
486 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | ||
487 | S: Maintained | ||
488 | |||
486 | ARM/CORGI MACHINE SUPPORT | 489 | ARM/CORGI MACHINE SUPPORT |
487 | P: Richard Purdie | 490 | P: Richard Purdie |
488 | M: rpurdie@rpsys.net | 491 | M: rpurdie@rpsys.net |
489 | S: Maintained | 492 | S: Maintained |
490 | 493 | ||
494 | ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) | ||
495 | P: Daniel Ribeiro | ||
496 | M: drwyrm@gmail.com | ||
497 | P: Stefan Schmidt | ||
498 | M: stefan@openezx.org | ||
499 | P: Harald Welte | ||
500 | M: laforge@openezx.org | ||
501 | L: openezx-devel@lists.openezx.org (subscribers-only) | ||
502 | W: http://www.openezx.org/ | ||
503 | S: Maintained | ||
504 | |||
491 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT | 505 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT |
492 | P: Lennert Buytenhek | 506 | P: Lennert Buytenhek |
493 | M: kernel@wantstofly.org | 507 | M: kernel@wantstofly.org |
@@ -575,10 +589,18 @@ L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | |||
575 | S: Maintained | 589 | S: Maintained |
576 | 590 | ||
577 | ARM/TOSA MACHINE SUPPORT | 591 | ARM/TOSA MACHINE SUPPORT |
592 | P: Dmitry Baryshkov | ||
593 | M: dbaryshkov@gmail.com | ||
578 | P: Dirk Opfer | 594 | P: Dirk Opfer |
579 | M: dirk@opfer-online.de | 595 | M: dirk@opfer-online.de |
580 | S: Maintained | 596 | S: Maintained |
581 | 597 | ||
598 | ARM/PALMTX SUPPORT | ||
599 | P: Marek Vasut | ||
600 | M: marek.vasut@gmail.com | ||
601 | W: http://hackndev.com | ||
602 | S: Maintained | ||
603 | |||
582 | ARM/PLEB SUPPORT | 604 | ARM/PLEB SUPPORT |
583 | P: Peter Chubb | 605 | P: Peter Chubb |
584 | M: pleb@gelato.unsw.edu.au | 606 | M: pleb@gelato.unsw.edu.au |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c7ad324ddf2c..d048f6887d0b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -12,6 +12,7 @@ config ARM | |||
12 | select RTC_LIB | 12 | select RTC_LIB |
13 | select SYS_SUPPORTS_APM_EMULATION | 13 | select SYS_SUPPORTS_APM_EMULATION |
14 | select HAVE_OPROFILE | 14 | select HAVE_OPROFILE |
15 | select HAVE_ARCH_KGDB | ||
15 | select HAVE_KPROBES if (!XIP_KERNEL) | 16 | select HAVE_KPROBES if (!XIP_KERNEL) |
16 | select HAVE_KRETPROBES if (HAVE_KPROBES) | 17 | select HAVE_KRETPROBES if (HAVE_KPROBES) |
17 | select HAVE_FTRACE if (!XIP_KERNEL) | 18 | select HAVE_FTRACE if (!XIP_KERNEL) |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index c8e8f0ea59e1..0a8e1ff2af8a 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -627,7 +627,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq) | |||
627 | if (!sachip) | 627 | if (!sachip) |
628 | return -ENOMEM; | 628 | return -ENOMEM; |
629 | 629 | ||
630 | sachip->clk = clk_get(me, "GPIO27_CLK"); | 630 | sachip->clk = clk_get(me, "SA1111_CLK"); |
631 | if (!sachip->clk) { | 631 | if (!sachip->clk) { |
632 | ret = PTR_ERR(sachip->clk); | 632 | ret = PTR_ERR(sachip->clk); |
633 | goto err_free; | 633 | goto err_free; |
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig new file mode 100644 index 000000000000..2a84d557adc2 --- /dev/null +++ b/arch/arm/configs/ezx_defconfig | |||
@@ -0,0 +1,1614 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.26-rc3 | ||
4 | # Mon Jul 7 17:52:21 2008 | ||
5 | # | ||
6 | CONFIG_ARM=y | ||
7 | CONFIG_HAVE_PWM=y | ||
8 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y | ||
9 | CONFIG_GENERIC_GPIO=y | ||
10 | CONFIG_GENERIC_TIME=y | ||
11 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
12 | CONFIG_MMU=y | ||
13 | # CONFIG_NO_IOPORT is not set | ||
14 | CONFIG_GENERIC_HARDIRQS=y | ||
15 | CONFIG_STACKTRACE_SUPPORT=y | ||
16 | CONFIG_LOCKDEP_SUPPORT=y | ||
17 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
18 | CONFIG_HARDIRQS_SW_RESEND=y | ||
19 | CONFIG_GENERIC_IRQ_PROBE=y | ||
20 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
21 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | ||
22 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | ||
23 | CONFIG_GENERIC_HWEIGHT=y | ||
24 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
25 | CONFIG_ARCH_SUPPORTS_AOUT=y | ||
26 | CONFIG_ZONE_DMA=y | ||
27 | CONFIG_ARCH_MTD_XIP=y | ||
28 | CONFIG_VECTORS_BASE=0xffff0000 | ||
29 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
30 | |||
31 | # | ||
32 | # General setup | ||
33 | # | ||
34 | CONFIG_EXPERIMENTAL=y | ||
35 | CONFIG_BROKEN_ON_SMP=y | ||
36 | CONFIG_LOCK_KERNEL=y | ||
37 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
38 | CONFIG_LOCALVERSION="-ezxdev" | ||
39 | # CONFIG_LOCALVERSION_AUTO is not set | ||
40 | CONFIG_SWAP=y | ||
41 | CONFIG_SYSVIPC=y | ||
42 | CONFIG_SYSVIPC_SYSCTL=y | ||
43 | # CONFIG_POSIX_MQUEUE is not set | ||
44 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
45 | # CONFIG_TASKSTATS is not set | ||
46 | # CONFIG_AUDIT is not set | ||
47 | CONFIG_IKCONFIG=y | ||
48 | CONFIG_IKCONFIG_PROC=y | ||
49 | CONFIG_LOG_BUF_SHIFT=14 | ||
50 | # CONFIG_CGROUPS is not set | ||
51 | CONFIG_GROUP_SCHED=y | ||
52 | CONFIG_FAIR_GROUP_SCHED=y | ||
53 | # CONFIG_RT_GROUP_SCHED is not set | ||
54 | CONFIG_USER_SCHED=y | ||
55 | # CONFIG_CGROUP_SCHED is not set | ||
56 | CONFIG_SYSFS_DEPRECATED=y | ||
57 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
58 | # CONFIG_RELAY is not set | ||
59 | # CONFIG_NAMESPACES is not set | ||
60 | # CONFIG_BLK_DEV_INITRD is not set | ||
61 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | ||
62 | CONFIG_SYSCTL=y | ||
63 | CONFIG_EMBEDDED=y | ||
64 | CONFIG_UID16=y | ||
65 | CONFIG_SYSCTL_SYSCALL=y | ||
66 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
67 | CONFIG_KALLSYMS=y | ||
68 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
69 | CONFIG_HOTPLUG=y | ||
70 | CONFIG_PRINTK=y | ||
71 | CONFIG_BUG=y | ||
72 | CONFIG_ELF_CORE=y | ||
73 | # CONFIG_COMPAT_BRK is not set | ||
74 | CONFIG_BASE_FULL=y | ||
75 | CONFIG_FUTEX=y | ||
76 | CONFIG_ANON_INODES=y | ||
77 | CONFIG_EPOLL=y | ||
78 | CONFIG_SIGNALFD=y | ||
79 | CONFIG_TIMERFD=y | ||
80 | CONFIG_EVENTFD=y | ||
81 | CONFIG_SHMEM=y | ||
82 | CONFIG_VM_EVENT_COUNTERS=y | ||
83 | CONFIG_SLAB=y | ||
84 | # CONFIG_SLUB is not set | ||
85 | # CONFIG_SLOB is not set | ||
86 | # CONFIG_PROFILING is not set | ||
87 | # CONFIG_MARKERS is not set | ||
88 | CONFIG_HAVE_OPROFILE=y | ||
89 | # CONFIG_KPROBES is not set | ||
90 | CONFIG_HAVE_KPROBES=y | ||
91 | CONFIG_HAVE_KRETPROBES=y | ||
92 | # CONFIG_HAVE_DMA_ATTRS is not set | ||
93 | CONFIG_PROC_PAGE_MONITOR=y | ||
94 | CONFIG_SLABINFO=y | ||
95 | CONFIG_RT_MUTEXES=y | ||
96 | # CONFIG_TINY_SHMEM is not set | ||
97 | CONFIG_BASE_SMALL=0 | ||
98 | CONFIG_MODULES=y | ||
99 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
100 | CONFIG_MODULE_UNLOAD=y | ||
101 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
102 | CONFIG_MODVERSIONS=y | ||
103 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
104 | CONFIG_KMOD=y | ||
105 | CONFIG_BLOCK=y | ||
106 | # CONFIG_LBD is not set | ||
107 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
108 | # CONFIG_LSF is not set | ||
109 | # CONFIG_BLK_DEV_BSG is not set | ||
110 | |||
111 | # | ||
112 | # IO Schedulers | ||
113 | # | ||
114 | CONFIG_IOSCHED_NOOP=y | ||
115 | # CONFIG_IOSCHED_AS is not set | ||
116 | CONFIG_IOSCHED_DEADLINE=y | ||
117 | # CONFIG_IOSCHED_CFQ is not set | ||
118 | # CONFIG_DEFAULT_AS is not set | ||
119 | CONFIG_DEFAULT_DEADLINE=y | ||
120 | # CONFIG_DEFAULT_CFQ is not set | ||
121 | # CONFIG_DEFAULT_NOOP is not set | ||
122 | CONFIG_DEFAULT_IOSCHED="deadline" | ||
123 | CONFIG_CLASSIC_RCU=y | ||
124 | |||
125 | # | ||
126 | # System Type | ||
127 | # | ||
128 | # CONFIG_ARCH_AAEC2000 is not set | ||
129 | # CONFIG_ARCH_INTEGRATOR is not set | ||
130 | # CONFIG_ARCH_REALVIEW is not set | ||
131 | # CONFIG_ARCH_VERSATILE is not set | ||
132 | # CONFIG_ARCH_AT91 is not set | ||
133 | # CONFIG_ARCH_CLPS7500 is not set | ||
134 | # CONFIG_ARCH_CLPS711X is not set | ||
135 | # CONFIG_ARCH_CO285 is not set | ||
136 | # CONFIG_ARCH_EBSA110 is not set | ||
137 | # CONFIG_ARCH_EP93XX is not set | ||
138 | # CONFIG_ARCH_FOOTBRIDGE is not set | ||
139 | # CONFIG_ARCH_NETX is not set | ||
140 | # CONFIG_ARCH_H720X is not set | ||
141 | # CONFIG_ARCH_IMX is not set | ||
142 | # CONFIG_ARCH_IOP13XX is not set | ||
143 | # CONFIG_ARCH_IOP32X is not set | ||
144 | # CONFIG_ARCH_IOP33X is not set | ||
145 | # CONFIG_ARCH_IXP23XX is not set | ||
146 | # CONFIG_ARCH_IXP2000 is not set | ||
147 | # CONFIG_ARCH_IXP4XX is not set | ||
148 | # CONFIG_ARCH_L7200 is not set | ||
149 | # CONFIG_ARCH_KS8695 is not set | ||
150 | # CONFIG_ARCH_NS9XXX is not set | ||
151 | # CONFIG_ARCH_MXC is not set | ||
152 | # CONFIG_ARCH_ORION5X is not set | ||
153 | # CONFIG_ARCH_PNX4008 is not set | ||
154 | CONFIG_ARCH_PXA=y | ||
155 | # CONFIG_ARCH_RPC is not set | ||
156 | # CONFIG_ARCH_SA1100 is not set | ||
157 | # CONFIG_ARCH_S3C2410 is not set | ||
158 | # CONFIG_ARCH_SHARK is not set | ||
159 | # CONFIG_ARCH_LH7A40X is not set | ||
160 | # CONFIG_ARCH_DAVINCI is not set | ||
161 | # CONFIG_ARCH_OMAP is not set | ||
162 | # CONFIG_ARCH_MSM7X00A is not set | ||
163 | |||
164 | # | ||
165 | # Intel PXA2xx/PXA3xx Implementations | ||
166 | # | ||
167 | # CONFIG_ARCH_GUMSTIX is not set | ||
168 | # CONFIG_ARCH_LUBBOCK is not set | ||
169 | # CONFIG_MACH_LOGICPD_PXA270 is not set | ||
170 | # CONFIG_MACH_MAINSTONE is not set | ||
171 | # CONFIG_ARCH_PXA_IDP is not set | ||
172 | # CONFIG_PXA_SHARPSL is not set | ||
173 | # CONFIG_ARCH_PXA_ESERIES is not set | ||
174 | # CONFIG_MACH_TRIZEPS4 is not set | ||
175 | # CONFIG_MACH_EM_X270 is not set | ||
176 | # CONFIG_MACH_COLIBRI is not set | ||
177 | # CONFIG_MACH_ZYLONITE is not set | ||
178 | # CONFIG_MACH_LITTLETON is not set | ||
179 | # CONFIG_MACH_ARMCORE is not set | ||
180 | # CONFIG_MACH_MAGICIAN is not set | ||
181 | # CONFIG_MACH_PCM027 is not set | ||
182 | CONFIG_PXA_EZX=y | ||
183 | CONFIG_MACH_EZX_A780=y | ||
184 | CONFIG_MACH_EZX_E680=y | ||
185 | CONFIG_MACH_EZX_A1200=y | ||
186 | CONFIG_MACH_EZX_A910=y | ||
187 | CONFIG_MACH_EZX_E6=y | ||
188 | CONFIG_MACH_EZX_E2=y | ||
189 | CONFIG_PXA27x=y | ||
190 | CONFIG_PXA_SSP=y | ||
191 | CONFIG_PXA_PWM=y | ||
192 | |||
193 | # | ||
194 | # Boot options | ||
195 | # | ||
196 | |||
197 | # | ||
198 | # Power management | ||
199 | # | ||
200 | |||
201 | # | ||
202 | # Processor Type | ||
203 | # | ||
204 | CONFIG_CPU_32=y | ||
205 | CONFIG_CPU_XSCALE=y | ||
206 | CONFIG_CPU_32v5=y | ||
207 | CONFIG_CPU_ABRT_EV5T=y | ||
208 | CONFIG_CPU_PABRT_NOIFAR=y | ||
209 | CONFIG_CPU_CACHE_VIVT=y | ||
210 | CONFIG_CPU_TLB_V4WBI=y | ||
211 | CONFIG_CPU_CP15=y | ||
212 | CONFIG_CPU_CP15_MMU=y | ||
213 | |||
214 | # | ||
215 | # Processor Features | ||
216 | # | ||
217 | CONFIG_ARM_THUMB=y | ||
218 | # CONFIG_CPU_DCACHE_DISABLE is not set | ||
219 | # CONFIG_OUTER_CACHE is not set | ||
220 | CONFIG_IWMMXT=y | ||
221 | CONFIG_XSCALE_PMU=y | ||
222 | |||
223 | # | ||
224 | # Bus support | ||
225 | # | ||
226 | # CONFIG_PCI_SYSCALL is not set | ||
227 | # CONFIG_ARCH_SUPPORTS_MSI is not set | ||
228 | # CONFIG_PCCARD is not set | ||
229 | |||
230 | # | ||
231 | # Kernel Features | ||
232 | # | ||
233 | CONFIG_TICK_ONESHOT=y | ||
234 | # CONFIG_NO_HZ is not set | ||
235 | CONFIG_HIGH_RES_TIMERS=y | ||
236 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
237 | CONFIG_PREEMPT=y | ||
238 | CONFIG_HZ=100 | ||
239 | CONFIG_AEABI=y | ||
240 | CONFIG_OABI_COMPAT=y | ||
241 | # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set | ||
242 | CONFIG_SELECT_MEMORY_MODEL=y | ||
243 | CONFIG_FLATMEM_MANUAL=y | ||
244 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
245 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
246 | CONFIG_FLATMEM=y | ||
247 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
248 | # CONFIG_SPARSEMEM_STATIC is not set | ||
249 | # CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set | ||
250 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
251 | CONFIG_SPLIT_PTLOCK_CPUS=4096 | ||
252 | # CONFIG_RESOURCES_64BIT is not set | ||
253 | CONFIG_ZONE_DMA_FLAG=1 | ||
254 | CONFIG_BOUNCE=y | ||
255 | CONFIG_VIRT_TO_BUS=y | ||
256 | CONFIG_ALIGNMENT_TRAP=y | ||
257 | |||
258 | # | ||
259 | # Boot options | ||
260 | # | ||
261 | CONFIG_ZBOOT_ROM_TEXT=0x0 | ||
262 | CONFIG_ZBOOT_ROM_BSS=0x0 | ||
263 | CONFIG_CMDLINE="console=tty1 root=/dev/mmcblk0p2 rootfstype=ext2 rootdelay=1 ip=192.168.0.202:192.168.0.200:192.168.0.200:255.255.255.0 debug" | ||
264 | # CONFIG_XIP_KERNEL is not set | ||
265 | CONFIG_KEXEC=y | ||
266 | CONFIG_ATAGS_PROC=y | ||
267 | |||
268 | # | ||
269 | # CPU Frequency scaling | ||
270 | # | ||
271 | # CONFIG_CPU_FREQ is not set | ||
272 | |||
273 | # | ||
274 | # Floating point emulation | ||
275 | # | ||
276 | |||
277 | # | ||
278 | # At least one emulation must be selected | ||
279 | # | ||
280 | CONFIG_FPE_NWFPE=y | ||
281 | # CONFIG_FPE_NWFPE_XP is not set | ||
282 | # CONFIG_FPE_FASTFPE is not set | ||
283 | |||
284 | # | ||
285 | # Userspace binary formats | ||
286 | # | ||
287 | CONFIG_BINFMT_ELF=y | ||
288 | CONFIG_BINFMT_AOUT=m | ||
289 | CONFIG_BINFMT_MISC=m | ||
290 | |||
291 | # | ||
292 | # Power management options | ||
293 | # | ||
294 | CONFIG_PM=y | ||
295 | # CONFIG_PM_DEBUG is not set | ||
296 | CONFIG_PM_SLEEP=y | ||
297 | CONFIG_SUSPEND=y | ||
298 | CONFIG_SUSPEND_FREEZER=y | ||
299 | CONFIG_APM_EMULATION=y | ||
300 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | ||
301 | |||
302 | # | ||
303 | # Networking | ||
304 | # | ||
305 | CONFIG_NET=y | ||
306 | |||
307 | # | ||
308 | # Networking options | ||
309 | # | ||
310 | CONFIG_PACKET=y | ||
311 | CONFIG_PACKET_MMAP=y | ||
312 | CONFIG_UNIX=y | ||
313 | CONFIG_XFRM=y | ||
314 | # CONFIG_XFRM_USER is not set | ||
315 | # CONFIG_XFRM_SUB_POLICY is not set | ||
316 | # CONFIG_XFRM_MIGRATE is not set | ||
317 | # CONFIG_XFRM_STATISTICS is not set | ||
318 | # CONFIG_NET_KEY is not set | ||
319 | CONFIG_INET=y | ||
320 | # CONFIG_IP_MULTICAST is not set | ||
321 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
322 | CONFIG_IP_FIB_HASH=y | ||
323 | CONFIG_IP_PNP=y | ||
324 | CONFIG_IP_PNP_DHCP=y | ||
325 | CONFIG_IP_PNP_BOOTP=y | ||
326 | CONFIG_IP_PNP_RARP=y | ||
327 | # CONFIG_NET_IPIP is not set | ||
328 | # CONFIG_NET_IPGRE is not set | ||
329 | # CONFIG_ARPD is not set | ||
330 | CONFIG_SYN_COOKIES=y | ||
331 | # CONFIG_INET_AH is not set | ||
332 | # CONFIG_INET_ESP is not set | ||
333 | # CONFIG_INET_IPCOMP is not set | ||
334 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
335 | CONFIG_INET_TUNNEL=m | ||
336 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
337 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
338 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
339 | # CONFIG_INET_LRO is not set | ||
340 | # CONFIG_INET_DIAG is not set | ||
341 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
342 | CONFIG_TCP_CONG_CUBIC=y | ||
343 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
344 | # CONFIG_TCP_MD5SIG is not set | ||
345 | # CONFIG_IP_VS is not set | ||
346 | CONFIG_IPV6=m | ||
347 | # CONFIG_IPV6_PRIVACY is not set | ||
348 | # CONFIG_IPV6_ROUTER_PREF is not set | ||
349 | # CONFIG_IPV6_OPTIMISTIC_DAD is not set | ||
350 | CONFIG_INET6_AH=m | ||
351 | CONFIG_INET6_ESP=m | ||
352 | CONFIG_INET6_IPCOMP=m | ||
353 | CONFIG_IPV6_MIP6=m | ||
354 | CONFIG_INET6_XFRM_TUNNEL=m | ||
355 | CONFIG_INET6_TUNNEL=m | ||
356 | CONFIG_INET6_XFRM_MODE_TRANSPORT=m | ||
357 | CONFIG_INET6_XFRM_MODE_TUNNEL=m | ||
358 | CONFIG_INET6_XFRM_MODE_BEET=m | ||
359 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | ||
360 | CONFIG_IPV6_SIT=m | ||
361 | CONFIG_IPV6_NDISC_NODETYPE=y | ||
362 | CONFIG_IPV6_TUNNEL=m | ||
363 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
364 | CONFIG_IPV6_SUBTREES=y | ||
365 | # CONFIG_IPV6_MROUTE is not set | ||
366 | # CONFIG_NETWORK_SECMARK is not set | ||
367 | CONFIG_NETFILTER=y | ||
368 | # CONFIG_NETFILTER_DEBUG is not set | ||
369 | CONFIG_NETFILTER_ADVANCED=y | ||
370 | CONFIG_BRIDGE_NETFILTER=y | ||
371 | |||
372 | # | ||
373 | # Core Netfilter Configuration | ||
374 | # | ||
375 | CONFIG_NETFILTER_NETLINK=m | ||
376 | CONFIG_NETFILTER_NETLINK_QUEUE=m | ||
377 | CONFIG_NETFILTER_NETLINK_LOG=m | ||
378 | CONFIG_NF_CONNTRACK=m | ||
379 | CONFIG_NF_CT_ACCT=y | ||
380 | CONFIG_NF_CONNTRACK_MARK=y | ||
381 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
382 | # CONFIG_NF_CT_PROTO_DCCP is not set | ||
383 | CONFIG_NF_CT_PROTO_GRE=m | ||
384 | CONFIG_NF_CT_PROTO_SCTP=m | ||
385 | CONFIG_NF_CT_PROTO_UDPLITE=m | ||
386 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
387 | CONFIG_NF_CONNTRACK_FTP=m | ||
388 | CONFIG_NF_CONNTRACK_H323=m | ||
389 | CONFIG_NF_CONNTRACK_IRC=m | ||
390 | CONFIG_NF_CONNTRACK_NETBIOS_NS=m | ||
391 | CONFIG_NF_CONNTRACK_PPTP=m | ||
392 | CONFIG_NF_CONNTRACK_SANE=m | ||
393 | CONFIG_NF_CONNTRACK_SIP=m | ||
394 | CONFIG_NF_CONNTRACK_TFTP=m | ||
395 | CONFIG_NF_CT_NETLINK=m | ||
396 | CONFIG_NETFILTER_XTABLES=m | ||
397 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | ||
398 | # CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set | ||
399 | # CONFIG_NETFILTER_XT_TARGET_DSCP is not set | ||
400 | CONFIG_NETFILTER_XT_TARGET_MARK=m | ||
401 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | ||
402 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | ||
403 | # CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set | ||
404 | # CONFIG_NETFILTER_XT_TARGET_RATEEST is not set | ||
405 | # CONFIG_NETFILTER_XT_TARGET_TRACE is not set | ||
406 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | ||
407 | # CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set | ||
408 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m | ||
409 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m | ||
410 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m | ||
411 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m | ||
412 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
413 | CONFIG_NETFILTER_XT_MATCH_DCCP=m | ||
414 | CONFIG_NETFILTER_XT_MATCH_DSCP=m | ||
415 | CONFIG_NETFILTER_XT_MATCH_ESP=m | ||
416 | CONFIG_NETFILTER_XT_MATCH_HELPER=m | ||
417 | # CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set | ||
418 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m | ||
419 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m | ||
420 | CONFIG_NETFILTER_XT_MATCH_MAC=m | ||
421 | CONFIG_NETFILTER_XT_MATCH_MARK=m | ||
422 | # CONFIG_NETFILTER_XT_MATCH_OWNER is not set | ||
423 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | ||
424 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m | ||
425 | # CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set | ||
426 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m | ||
427 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m | ||
428 | # CONFIG_NETFILTER_XT_MATCH_RATEEST is not set | ||
429 | CONFIG_NETFILTER_XT_MATCH_REALM=m | ||
430 | CONFIG_NETFILTER_XT_MATCH_SCTP=m | ||
431 | CONFIG_NETFILTER_XT_MATCH_STATE=m | ||
432 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | ||
433 | CONFIG_NETFILTER_XT_MATCH_STRING=m | ||
434 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m | ||
435 | CONFIG_NETFILTER_XT_MATCH_TIME=m | ||
436 | CONFIG_NETFILTER_XT_MATCH_U32=m | ||
437 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m | ||
438 | |||
439 | # | ||
440 | # IP: Netfilter Configuration | ||
441 | # | ||
442 | CONFIG_NF_CONNTRACK_IPV4=m | ||
443 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y | ||
444 | CONFIG_IP_NF_QUEUE=m | ||
445 | CONFIG_IP_NF_IPTABLES=m | ||
446 | CONFIG_IP_NF_MATCH_RECENT=m | ||
447 | CONFIG_IP_NF_MATCH_ECN=m | ||
448 | CONFIG_IP_NF_MATCH_AH=m | ||
449 | CONFIG_IP_NF_MATCH_TTL=m | ||
450 | CONFIG_IP_NF_MATCH_ADDRTYPE=m | ||
451 | CONFIG_IP_NF_FILTER=m | ||
452 | CONFIG_IP_NF_TARGET_REJECT=m | ||
453 | CONFIG_IP_NF_TARGET_LOG=m | ||
454 | CONFIG_IP_NF_TARGET_ULOG=m | ||
455 | CONFIG_NF_NAT=m | ||
456 | CONFIG_NF_NAT_NEEDED=y | ||
457 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
458 | CONFIG_IP_NF_TARGET_REDIRECT=m | ||
459 | CONFIG_IP_NF_TARGET_NETMAP=m | ||
460 | CONFIG_NF_NAT_SNMP_BASIC=m | ||
461 | CONFIG_NF_NAT_PROTO_GRE=m | ||
462 | CONFIG_NF_NAT_PROTO_UDPLITE=m | ||
463 | CONFIG_NF_NAT_PROTO_SCTP=m | ||
464 | CONFIG_NF_NAT_FTP=m | ||
465 | CONFIG_NF_NAT_IRC=m | ||
466 | CONFIG_NF_NAT_TFTP=m | ||
467 | CONFIG_NF_NAT_AMANDA=m | ||
468 | CONFIG_NF_NAT_PPTP=m | ||
469 | CONFIG_NF_NAT_H323=m | ||
470 | CONFIG_NF_NAT_SIP=m | ||
471 | CONFIG_IP_NF_MANGLE=m | ||
472 | CONFIG_IP_NF_TARGET_ECN=m | ||
473 | CONFIG_IP_NF_TARGET_TTL=m | ||
474 | CONFIG_IP_NF_TARGET_CLUSTERIP=m | ||
475 | CONFIG_IP_NF_RAW=m | ||
476 | CONFIG_IP_NF_ARPTABLES=m | ||
477 | CONFIG_IP_NF_ARPFILTER=m | ||
478 | CONFIG_IP_NF_ARP_MANGLE=m | ||
479 | |||
480 | # | ||
481 | # IPv6: Netfilter Configuration | ||
482 | # | ||
483 | CONFIG_NF_CONNTRACK_IPV6=m | ||
484 | CONFIG_IP6_NF_QUEUE=m | ||
485 | CONFIG_IP6_NF_IPTABLES=m | ||
486 | CONFIG_IP6_NF_MATCH_RT=m | ||
487 | CONFIG_IP6_NF_MATCH_OPTS=m | ||
488 | CONFIG_IP6_NF_MATCH_FRAG=m | ||
489 | CONFIG_IP6_NF_MATCH_HL=m | ||
490 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m | ||
491 | CONFIG_IP6_NF_MATCH_AH=m | ||
492 | CONFIG_IP6_NF_MATCH_MH=m | ||
493 | CONFIG_IP6_NF_MATCH_EUI64=m | ||
494 | CONFIG_IP6_NF_FILTER=m | ||
495 | CONFIG_IP6_NF_TARGET_LOG=m | ||
496 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
497 | CONFIG_IP6_NF_MANGLE=m | ||
498 | CONFIG_IP6_NF_TARGET_HL=m | ||
499 | CONFIG_IP6_NF_RAW=m | ||
500 | |||
501 | # | ||
502 | # Bridge: Netfilter Configuration | ||
503 | # | ||
504 | # CONFIG_BRIDGE_NF_EBTABLES is not set | ||
505 | # CONFIG_IP_DCCP is not set | ||
506 | # CONFIG_IP_SCTP is not set | ||
507 | # CONFIG_TIPC is not set | ||
508 | # CONFIG_ATM is not set | ||
509 | CONFIG_BRIDGE=m | ||
510 | # CONFIG_VLAN_8021Q is not set | ||
511 | # CONFIG_DECNET is not set | ||
512 | CONFIG_LLC=m | ||
513 | # CONFIG_LLC2 is not set | ||
514 | # CONFIG_IPX is not set | ||
515 | # CONFIG_ATALK is not set | ||
516 | # CONFIG_X25 is not set | ||
517 | # CONFIG_LAPB is not set | ||
518 | # CONFIG_ECONET is not set | ||
519 | # CONFIG_WAN_ROUTER is not set | ||
520 | # CONFIG_NET_SCHED is not set | ||
521 | CONFIG_NET_CLS_ROUTE=y | ||
522 | CONFIG_NET_SCH_FIFO=y | ||
523 | |||
524 | # | ||
525 | # Network testing | ||
526 | # | ||
527 | # CONFIG_NET_PKTGEN is not set | ||
528 | # CONFIG_HAMRADIO is not set | ||
529 | # CONFIG_CAN is not set | ||
530 | # CONFIG_IRDA is not set | ||
531 | CONFIG_BT=y | ||
532 | CONFIG_BT_L2CAP=m | ||
533 | CONFIG_BT_SCO=y | ||
534 | CONFIG_BT_RFCOMM=m | ||
535 | CONFIG_BT_RFCOMM_TTY=y | ||
536 | CONFIG_BT_BNEP=m | ||
537 | CONFIG_BT_BNEP_MC_FILTER=y | ||
538 | CONFIG_BT_BNEP_PROTO_FILTER=y | ||
539 | CONFIG_BT_HIDP=m | ||
540 | |||
541 | # | ||
542 | # Bluetooth device drivers | ||
543 | # | ||
544 | # CONFIG_BT_HCIUSB is not set | ||
545 | # CONFIG_BT_HCIBTUSB is not set | ||
546 | # CONFIG_BT_HCIBTSDIO is not set | ||
547 | CONFIG_BT_HCIUART=y | ||
548 | CONFIG_BT_HCIUART_H4=y | ||
549 | # CONFIG_BT_HCIUART_BCSP is not set | ||
550 | # CONFIG_BT_HCIUART_LL is not set | ||
551 | # CONFIG_BT_HCIBCM203X is not set | ||
552 | # CONFIG_BT_HCIBPA10X is not set | ||
553 | # CONFIG_BT_HCIBFUSB is not set | ||
554 | # CONFIG_BT_HCIVHCI is not set | ||
555 | # CONFIG_AF_RXRPC is not set | ||
556 | CONFIG_FIB_RULES=y | ||
557 | |||
558 | # | ||
559 | # Wireless | ||
560 | # | ||
561 | CONFIG_CFG80211=m | ||
562 | CONFIG_NL80211=y | ||
563 | CONFIG_WIRELESS_EXT=y | ||
564 | CONFIG_MAC80211=m | ||
565 | |||
566 | # | ||
567 | # Rate control algorithm selection | ||
568 | # | ||
569 | CONFIG_MAC80211_RC_DEFAULT_PID=y | ||
570 | # CONFIG_MAC80211_RC_DEFAULT_NONE is not set | ||
571 | |||
572 | # | ||
573 | # Selecting 'y' for an algorithm will | ||
574 | # | ||
575 | |||
576 | # | ||
577 | # build the algorithm into mac80211. | ||
578 | # | ||
579 | CONFIG_MAC80211_RC_DEFAULT="pid" | ||
580 | CONFIG_MAC80211_RC_PID=y | ||
581 | # CONFIG_MAC80211_MESH is not set | ||
582 | CONFIG_MAC80211_LEDS=y | ||
583 | # CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set | ||
584 | # CONFIG_MAC80211_DEBUG is not set | ||
585 | CONFIG_IEEE80211=m | ||
586 | # CONFIG_IEEE80211_DEBUG is not set | ||
587 | CONFIG_IEEE80211_CRYPT_WEP=m | ||
588 | CONFIG_IEEE80211_CRYPT_CCMP=m | ||
589 | CONFIG_IEEE80211_CRYPT_TKIP=m | ||
590 | # CONFIG_RFKILL is not set | ||
591 | # CONFIG_NET_9P is not set | ||
592 | |||
593 | # | ||
594 | # Device Drivers | ||
595 | # | ||
596 | |||
597 | # | ||
598 | # Generic Driver Options | ||
599 | # | ||
600 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
601 | CONFIG_STANDALONE=y | ||
602 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
603 | CONFIG_FW_LOADER=m | ||
604 | # CONFIG_SYS_HYPERVISOR is not set | ||
605 | CONFIG_CONNECTOR=m | ||
606 | CONFIG_MTD=y | ||
607 | # CONFIG_MTD_DEBUG is not set | ||
608 | # CONFIG_MTD_CONCAT is not set | ||
609 | CONFIG_MTD_PARTITIONS=y | ||
610 | # CONFIG_MTD_REDBOOT_PARTS is not set | ||
611 | # CONFIG_MTD_CMDLINE_PARTS is not set | ||
612 | # CONFIG_MTD_AFS_PARTS is not set | ||
613 | # CONFIG_MTD_AR7_PARTS is not set | ||
614 | |||
615 | # | ||
616 | # User Modules And Translation Layers | ||
617 | # | ||
618 | CONFIG_MTD_CHAR=y | ||
619 | # CONFIG_MTD_BLKDEVS is not set | ||
620 | # CONFIG_MTD_BLOCK is not set | ||
621 | # CONFIG_MTD_BLOCK_RO is not set | ||
622 | # CONFIG_FTL is not set | ||
623 | # CONFIG_NFTL is not set | ||
624 | # CONFIG_INFTL is not set | ||
625 | # CONFIG_RFD_FTL is not set | ||
626 | # CONFIG_SSFDC is not set | ||
627 | # CONFIG_MTD_OOPS is not set | ||
628 | |||
629 | # | ||
630 | # RAM/ROM/Flash chip drivers | ||
631 | # | ||
632 | CONFIG_MTD_CFI=y | ||
633 | # CONFIG_MTD_JEDECPROBE is not set | ||
634 | CONFIG_MTD_GEN_PROBE=y | ||
635 | CONFIG_MTD_CFI_ADV_OPTIONS=y | ||
636 | CONFIG_MTD_CFI_NOSWAP=y | ||
637 | # CONFIG_MTD_CFI_BE_BYTE_SWAP is not set | ||
638 | # CONFIG_MTD_CFI_LE_BYTE_SWAP is not set | ||
639 | CONFIG_MTD_CFI_GEOMETRY=y | ||
640 | # CONFIG_MTD_MAP_BANK_WIDTH_1 is not set | ||
641 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
642 | # CONFIG_MTD_MAP_BANK_WIDTH_4 is not set | ||
643 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
644 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
645 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
646 | CONFIG_MTD_CFI_I1=y | ||
647 | # CONFIG_MTD_CFI_I2 is not set | ||
648 | # CONFIG_MTD_CFI_I4 is not set | ||
649 | # CONFIG_MTD_CFI_I8 is not set | ||
650 | # CONFIG_MTD_OTP is not set | ||
651 | CONFIG_MTD_CFI_INTELEXT=y | ||
652 | # CONFIG_MTD_CFI_AMDSTD is not set | ||
653 | # CONFIG_MTD_CFI_STAA is not set | ||
654 | CONFIG_MTD_CFI_UTIL=y | ||
655 | # CONFIG_MTD_RAM is not set | ||
656 | # CONFIG_MTD_ROM is not set | ||
657 | # CONFIG_MTD_ABSENT is not set | ||
658 | CONFIG_MTD_XIP=y | ||
659 | |||
660 | # | ||
661 | # Mapping drivers for chip access | ||
662 | # | ||
663 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
664 | CONFIG_MTD_PHYSMAP=y | ||
665 | CONFIG_MTD_PHYSMAP_START=0x0 | ||
666 | CONFIG_MTD_PHYSMAP_LEN=0x0 | ||
667 | CONFIG_MTD_PHYSMAP_BANKWIDTH=2 | ||
668 | # CONFIG_MTD_PXA2XX is not set | ||
669 | # CONFIG_MTD_ARM_INTEGRATOR is not set | ||
670 | # CONFIG_MTD_SHARP_SL is not set | ||
671 | # CONFIG_MTD_PLATRAM is not set | ||
672 | |||
673 | # | ||
674 | # Self-contained MTD device drivers | ||
675 | # | ||
676 | # CONFIG_MTD_DATAFLASH is not set | ||
677 | # CONFIG_MTD_M25P80 is not set | ||
678 | # CONFIG_MTD_SLRAM is not set | ||
679 | # CONFIG_MTD_PHRAM is not set | ||
680 | # CONFIG_MTD_MTDRAM is not set | ||
681 | # CONFIG_MTD_BLOCK2MTD is not set | ||
682 | |||
683 | # | ||
684 | # Disk-On-Chip Device Drivers | ||
685 | # | ||
686 | # CONFIG_MTD_DOC2000 is not set | ||
687 | # CONFIG_MTD_DOC2001 is not set | ||
688 | # CONFIG_MTD_DOC2001PLUS is not set | ||
689 | # CONFIG_MTD_NAND is not set | ||
690 | # CONFIG_MTD_ONENAND is not set | ||
691 | |||
692 | # | ||
693 | # UBI - Unsorted block images | ||
694 | # | ||
695 | # CONFIG_MTD_UBI is not set | ||
696 | # CONFIG_PARPORT is not set | ||
697 | CONFIG_BLK_DEV=y | ||
698 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
699 | CONFIG_BLK_DEV_LOOP=m | ||
700 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
701 | CONFIG_BLK_DEV_NBD=m | ||
702 | # CONFIG_BLK_DEV_UB is not set | ||
703 | CONFIG_BLK_DEV_RAM=m | ||
704 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
705 | CONFIG_BLK_DEV_RAM_SIZE=4096 | ||
706 | # CONFIG_BLK_DEV_XIP is not set | ||
707 | # CONFIG_CDROM_PKTCDVD is not set | ||
708 | # CONFIG_ATA_OVER_ETH is not set | ||
709 | CONFIG_MISC_DEVICES=y | ||
710 | # CONFIG_EEPROM_93CX6 is not set | ||
711 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
712 | CONFIG_HAVE_IDE=y | ||
713 | # CONFIG_IDE is not set | ||
714 | |||
715 | # | ||
716 | # SCSI device support | ||
717 | # | ||
718 | # CONFIG_RAID_ATTRS is not set | ||
719 | # CONFIG_SCSI is not set | ||
720 | # CONFIG_SCSI_DMA is not set | ||
721 | # CONFIG_SCSI_NETLINK is not set | ||
722 | # CONFIG_ATA is not set | ||
723 | # CONFIG_MD is not set | ||
724 | CONFIG_NETDEVICES=y | ||
725 | # CONFIG_NETDEVICES_MULTIQUEUE is not set | ||
726 | CONFIG_DUMMY=y | ||
727 | # CONFIG_BONDING is not set | ||
728 | # CONFIG_MACVLAN is not set | ||
729 | # CONFIG_EQUALIZER is not set | ||
730 | # CONFIG_TUN is not set | ||
731 | # CONFIG_VETH is not set | ||
732 | # CONFIG_NET_ETHERNET is not set | ||
733 | # CONFIG_NETDEV_1000 is not set | ||
734 | # CONFIG_NETDEV_10000 is not set | ||
735 | |||
736 | # | ||
737 | # Wireless LAN | ||
738 | # | ||
739 | # CONFIG_WLAN_PRE80211 is not set | ||
740 | # CONFIG_WLAN_80211 is not set | ||
741 | # CONFIG_IWLWIFI_LEDS is not set | ||
742 | |||
743 | # | ||
744 | # USB Network Adapters | ||
745 | # | ||
746 | # CONFIG_USB_CATC is not set | ||
747 | # CONFIG_USB_KAWETH is not set | ||
748 | # CONFIG_USB_PEGASUS is not set | ||
749 | # CONFIG_USB_RTL8150 is not set | ||
750 | # CONFIG_USB_USBNET is not set | ||
751 | # CONFIG_WAN is not set | ||
752 | CONFIG_PPP=m | ||
753 | CONFIG_PPP_MULTILINK=y | ||
754 | CONFIG_PPP_FILTER=y | ||
755 | CONFIG_PPP_ASYNC=m | ||
756 | CONFIG_PPP_SYNC_TTY=m | ||
757 | CONFIG_PPP_DEFLATE=m | ||
758 | CONFIG_PPP_BSDCOMP=m | ||
759 | # CONFIG_PPP_MPPE is not set | ||
760 | # CONFIG_PPPOE is not set | ||
761 | # CONFIG_PPPOL2TP is not set | ||
762 | # CONFIG_SLIP is not set | ||
763 | CONFIG_SLHC=m | ||
764 | # CONFIG_NETCONSOLE is not set | ||
765 | # CONFIG_NETPOLL is not set | ||
766 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
767 | # CONFIG_ISDN is not set | ||
768 | |||
769 | # | ||
770 | # Input device support | ||
771 | # | ||
772 | CONFIG_INPUT=y | ||
773 | # CONFIG_INPUT_FF_MEMLESS is not set | ||
774 | # CONFIG_INPUT_POLLDEV is not set | ||
775 | |||
776 | # | ||
777 | # Userland interfaces | ||
778 | # | ||
779 | # CONFIG_INPUT_MOUSEDEV is not set | ||
780 | # CONFIG_INPUT_JOYDEV is not set | ||
781 | CONFIG_INPUT_EVDEV=y | ||
782 | # CONFIG_INPUT_EVBUG is not set | ||
783 | # CONFIG_INPUT_APMPOWER is not set | ||
784 | |||
785 | # | ||
786 | # Input Device Drivers | ||
787 | # | ||
788 | CONFIG_INPUT_KEYBOARD=y | ||
789 | # CONFIG_KEYBOARD_ATKBD is not set | ||
790 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
791 | # CONFIG_KEYBOARD_LKKBD is not set | ||
792 | # CONFIG_KEYBOARD_XTKBD is not set | ||
793 | # CONFIG_KEYBOARD_NEWTON is not set | ||
794 | # CONFIG_KEYBOARD_STOWAWAY is not set | ||
795 | CONFIG_KEYBOARD_PXA27x=y | ||
796 | CONFIG_KEYBOARD_GPIO=y | ||
797 | # CONFIG_INPUT_MOUSE is not set | ||
798 | # CONFIG_INPUT_JOYSTICK is not set | ||
799 | # CONFIG_INPUT_TABLET is not set | ||
800 | CONFIG_INPUT_TOUCHSCREEN=y | ||
801 | # CONFIG_TOUCHSCREEN_ADS7846 is not set | ||
802 | # CONFIG_TOUCHSCREEN_FUJITSU is not set | ||
803 | # CONFIG_TOUCHSCREEN_GUNZE is not set | ||
804 | # CONFIG_TOUCHSCREEN_ELO is not set | ||
805 | # CONFIG_TOUCHSCREEN_MTOUCH is not set | ||
806 | # CONFIG_TOUCHSCREEN_MK712 is not set | ||
807 | # CONFIG_TOUCHSCREEN_PENMOUNT is not set | ||
808 | # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set | ||
809 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set | ||
810 | # CONFIG_TOUCHSCREEN_UCB1400 is not set | ||
811 | # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set | ||
812 | CONFIG_TOUCHSCREEN_PCAP=y | ||
813 | CONFIG_INPUT_MISC=y | ||
814 | # CONFIG_INPUT_ATI_REMOTE is not set | ||
815 | # CONFIG_INPUT_ATI_REMOTE2 is not set | ||
816 | # CONFIG_INPUT_KEYSPAN_REMOTE is not set | ||
817 | # CONFIG_INPUT_POWERMATE is not set | ||
818 | # CONFIG_INPUT_YEALINK is not set | ||
819 | CONFIG_INPUT_UINPUT=y | ||
820 | |||
821 | # | ||
822 | # Hardware I/O ports | ||
823 | # | ||
824 | # CONFIG_SERIO is not set | ||
825 | # CONFIG_GAMEPORT is not set | ||
826 | |||
827 | # | ||
828 | # Character devices | ||
829 | # | ||
830 | CONFIG_VT=y | ||
831 | CONFIG_VT_CONSOLE=y | ||
832 | CONFIG_HW_CONSOLE=y | ||
833 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | ||
834 | CONFIG_DEVKMEM=y | ||
835 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
836 | |||
837 | # | ||
838 | # Serial drivers | ||
839 | # | ||
840 | # CONFIG_SERIAL_8250 is not set | ||
841 | |||
842 | # | ||
843 | # Non-8250 serial port support | ||
844 | # | ||
845 | CONFIG_SERIAL_PXA=y | ||
846 | CONFIG_SERIAL_PXA_CONSOLE=y | ||
847 | CONFIG_SERIAL_CORE=y | ||
848 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
849 | CONFIG_UNIX98_PTYS=y | ||
850 | CONFIG_LEGACY_PTYS=y | ||
851 | CONFIG_LEGACY_PTY_COUNT=8 | ||
852 | # CONFIG_IPMI_HANDLER is not set | ||
853 | CONFIG_HW_RANDOM=y | ||
854 | # CONFIG_NVRAM is not set | ||
855 | # CONFIG_R3964 is not set | ||
856 | # CONFIG_RAW_DRIVER is not set | ||
857 | # CONFIG_TCG_TPM is not set | ||
858 | CONFIG_I2C=y | ||
859 | CONFIG_I2C_BOARDINFO=y | ||
860 | CONFIG_I2C_CHARDEV=y | ||
861 | |||
862 | # | ||
863 | # I2C Hardware Bus support | ||
864 | # | ||
865 | # CONFIG_I2C_GPIO is not set | ||
866 | CONFIG_I2C_PXA=y | ||
867 | # CONFIG_I2C_PXA_SLAVE is not set | ||
868 | # CONFIG_I2C_OCORES is not set | ||
869 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
870 | # CONFIG_I2C_SIMTEC is not set | ||
871 | # CONFIG_I2C_TAOS_EVM is not set | ||
872 | # CONFIG_I2C_STUB is not set | ||
873 | # CONFIG_I2C_TINY_USB is not set | ||
874 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
875 | |||
876 | # | ||
877 | # Miscellaneous I2C Chip support | ||
878 | # | ||
879 | # CONFIG_DS1682 is not set | ||
880 | # CONFIG_SENSORS_EEPROM is not set | ||
881 | # CONFIG_SENSORS_PCF8574 is not set | ||
882 | # CONFIG_PCF8575 is not set | ||
883 | # CONFIG_SENSORS_PCF8591 is not set | ||
884 | # CONFIG_TPS65010 is not set | ||
885 | # CONFIG_SENSORS_MAX6875 is not set | ||
886 | # CONFIG_SENSORS_TSL2550 is not set | ||
887 | # CONFIG_I2C_DEBUG_CORE is not set | ||
888 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
889 | # CONFIG_I2C_DEBUG_BUS is not set | ||
890 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
891 | CONFIG_SPI=y | ||
892 | CONFIG_SPI_MASTER=y | ||
893 | |||
894 | # | ||
895 | # SPI Master Controller Drivers | ||
896 | # | ||
897 | # CONFIG_SPI_BITBANG is not set | ||
898 | CONFIG_SPI_PXA2XX=m | ||
899 | |||
900 | # | ||
901 | # SPI Protocol Masters | ||
902 | # | ||
903 | # CONFIG_SPI_AT25 is not set | ||
904 | # CONFIG_SPI_SPIDEV is not set | ||
905 | # CONFIG_SPI_TLE62X0 is not set | ||
906 | CONFIG_HAVE_GPIO_LIB=y | ||
907 | |||
908 | # | ||
909 | # GPIO Support | ||
910 | # | ||
911 | |||
912 | # | ||
913 | # I2C GPIO expanders: | ||
914 | # | ||
915 | # CONFIG_GPIO_PCA953X is not set | ||
916 | # CONFIG_GPIO_PCF857X is not set | ||
917 | |||
918 | # | ||
919 | # SPI GPIO expanders: | ||
920 | # | ||
921 | # CONFIG_GPIO_MCP23S08 is not set | ||
922 | # CONFIG_W1 is not set | ||
923 | # CONFIG_POWER_SUPPLY is not set | ||
924 | # CONFIG_HWMON is not set | ||
925 | # CONFIG_WATCHDOG is not set | ||
926 | |||
927 | # | ||
928 | # Sonics Silicon Backplane | ||
929 | # | ||
930 | CONFIG_SSB_POSSIBLE=y | ||
931 | # CONFIG_SSB is not set | ||
932 | |||
933 | # | ||
934 | # Multifunction device drivers | ||
935 | # | ||
936 | # CONFIG_MFD_CORE is not set | ||
937 | # CONFIG_MFD_SM501 is not set | ||
938 | # CONFIG_MFD_ASIC3 is not set | ||
939 | # CONFIG_HTC_EGPIO is not set | ||
940 | # CONFIG_HTC_PASIC3 is not set | ||
941 | # CONFIG_MFD_TC6393XB is not set | ||
942 | CONFIG_EZX_PCAP=y | ||
943 | |||
944 | # | ||
945 | # Multimedia devices | ||
946 | # | ||
947 | |||
948 | # | ||
949 | # Multimedia core support | ||
950 | # | ||
951 | CONFIG_VIDEO_DEV=m | ||
952 | CONFIG_VIDEO_V4L2_COMMON=m | ||
953 | CONFIG_VIDEO_ALLOW_V4L1=y | ||
954 | CONFIG_VIDEO_V4L1_COMPAT=y | ||
955 | # CONFIG_DVB_CORE is not set | ||
956 | CONFIG_VIDEO_MEDIA=m | ||
957 | |||
958 | # | ||
959 | # Multimedia drivers | ||
960 | # | ||
961 | # CONFIG_MEDIA_ATTACH is not set | ||
962 | CONFIG_MEDIA_TUNER=m | ||
963 | # CONFIG_MEDIA_TUNER_CUSTOMIZE is not set | ||
964 | CONFIG_MEDIA_TUNER_SIMPLE=m | ||
965 | CONFIG_MEDIA_TUNER_TDA8290=m | ||
966 | CONFIG_MEDIA_TUNER_TDA9887=m | ||
967 | CONFIG_MEDIA_TUNER_TEA5761=m | ||
968 | CONFIG_MEDIA_TUNER_TEA5767=m | ||
969 | CONFIG_MEDIA_TUNER_MT20XX=m | ||
970 | CONFIG_MEDIA_TUNER_XC2028=m | ||
971 | CONFIG_MEDIA_TUNER_XC5000=m | ||
972 | CONFIG_VIDEO_V4L2=m | ||
973 | CONFIG_VIDEO_V4L1=m | ||
974 | CONFIG_VIDEO_CAPTURE_DRIVERS=y | ||
975 | # CONFIG_VIDEO_ADV_DEBUG is not set | ||
976 | CONFIG_VIDEO_HELPER_CHIPS_AUTO=y | ||
977 | # CONFIG_VIDEO_VIVI is not set | ||
978 | # CONFIG_VIDEO_CPIA is not set | ||
979 | # CONFIG_VIDEO_CPIA2 is not set | ||
980 | # CONFIG_VIDEO_SAA5246A is not set | ||
981 | # CONFIG_VIDEO_SAA5249 is not set | ||
982 | # CONFIG_TUNER_3036 is not set | ||
983 | # CONFIG_V4L_USB_DRIVERS is not set | ||
984 | # CONFIG_SOC_CAMERA is not set | ||
985 | # CONFIG_VIDEO_PXA27x is not set | ||
986 | CONFIG_RADIO_ADAPTERS=y | ||
987 | # CONFIG_USB_DSBR is not set | ||
988 | # CONFIG_USB_SI470X is not set | ||
989 | # CONFIG_DAB is not set | ||
990 | |||
991 | # | ||
992 | # Graphics support | ||
993 | # | ||
994 | # CONFIG_VGASTATE is not set | ||
995 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
996 | CONFIG_FB=y | ||
997 | # CONFIG_FIRMWARE_EDID is not set | ||
998 | # CONFIG_FB_DDC is not set | ||
999 | CONFIG_FB_CFB_FILLRECT=y | ||
1000 | CONFIG_FB_CFB_COPYAREA=y | ||
1001 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
1002 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
1003 | # CONFIG_FB_SYS_FILLRECT is not set | ||
1004 | # CONFIG_FB_SYS_COPYAREA is not set | ||
1005 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
1006 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
1007 | # CONFIG_FB_SYS_FOPS is not set | ||
1008 | # CONFIG_FB_SVGALIB is not set | ||
1009 | # CONFIG_FB_MACMODES is not set | ||
1010 | # CONFIG_FB_BACKLIGHT is not set | ||
1011 | # CONFIG_FB_MODE_HELPERS is not set | ||
1012 | # CONFIG_FB_TILEBLITTING is not set | ||
1013 | |||
1014 | # | ||
1015 | # Frame buffer hardware drivers | ||
1016 | # | ||
1017 | # CONFIG_FB_UVESA is not set | ||
1018 | # CONFIG_FB_S1D13XXX is not set | ||
1019 | CONFIG_FB_PXA=y | ||
1020 | # CONFIG_FB_PXA_SMARTPANEL is not set | ||
1021 | CONFIG_FB_PXA_PARAMETERS=y | ||
1022 | # CONFIG_FB_MBX is not set | ||
1023 | # CONFIG_FB_AM200EPD is not set | ||
1024 | # CONFIG_FB_VIRTUAL is not set | ||
1025 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | ||
1026 | # CONFIG_LCD_CLASS_DEVICE is not set | ||
1027 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | ||
1028 | # CONFIG_BACKLIGHT_CORGI is not set | ||
1029 | CONFIG_BACKLIGHT_PWM=y | ||
1030 | |||
1031 | # | ||
1032 | # Display device support | ||
1033 | # | ||
1034 | # CONFIG_DISPLAY_SUPPORT is not set | ||
1035 | |||
1036 | # | ||
1037 | # Console display driver support | ||
1038 | # | ||
1039 | # CONFIG_VGA_CONSOLE is not set | ||
1040 | CONFIG_DUMMY_CONSOLE=y | ||
1041 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
1042 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | ||
1043 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
1044 | CONFIG_FONTS=y | ||
1045 | # CONFIG_FONT_8x8 is not set | ||
1046 | # CONFIG_FONT_8x16 is not set | ||
1047 | # CONFIG_FONT_6x11 is not set | ||
1048 | # CONFIG_FONT_7x14 is not set | ||
1049 | # CONFIG_FONT_PEARL_8x8 is not set | ||
1050 | # CONFIG_FONT_ACORN_8x8 is not set | ||
1051 | CONFIG_FONT_MINI_4x6=y | ||
1052 | # CONFIG_FONT_SUN8x16 is not set | ||
1053 | # CONFIG_FONT_SUN12x22 is not set | ||
1054 | # CONFIG_FONT_10x18 is not set | ||
1055 | # CONFIG_LOGO is not set | ||
1056 | |||
1057 | # | ||
1058 | # Sound | ||
1059 | # | ||
1060 | CONFIG_SOUND=y | ||
1061 | |||
1062 | # | ||
1063 | # Advanced Linux Sound Architecture | ||
1064 | # | ||
1065 | CONFIG_SND=y | ||
1066 | CONFIG_SND_TIMER=y | ||
1067 | CONFIG_SND_PCM=y | ||
1068 | # CONFIG_SND_SEQUENCER is not set | ||
1069 | CONFIG_SND_OSSEMUL=y | ||
1070 | CONFIG_SND_MIXER_OSS=y | ||
1071 | CONFIG_SND_PCM_OSS=y | ||
1072 | CONFIG_SND_PCM_OSS_PLUGINS=y | ||
1073 | # CONFIG_SND_DYNAMIC_MINORS is not set | ||
1074 | CONFIG_SND_SUPPORT_OLD_API=y | ||
1075 | CONFIG_SND_VERBOSE_PROCFS=y | ||
1076 | # CONFIG_SND_VERBOSE_PRINTK is not set | ||
1077 | # CONFIG_SND_DEBUG is not set | ||
1078 | |||
1079 | # | ||
1080 | # Generic devices | ||
1081 | # | ||
1082 | # CONFIG_SND_DUMMY is not set | ||
1083 | # CONFIG_SND_MTPAV is not set | ||
1084 | # CONFIG_SND_SERIAL_U16550 is not set | ||
1085 | # CONFIG_SND_MPU401 is not set | ||
1086 | |||
1087 | # | ||
1088 | # ALSA ARM devices | ||
1089 | # | ||
1090 | # CONFIG_SND_PXA2XX_AC97 is not set | ||
1091 | |||
1092 | # | ||
1093 | # SPI devices | ||
1094 | # | ||
1095 | |||
1096 | # | ||
1097 | # USB devices | ||
1098 | # | ||
1099 | # CONFIG_SND_USB_AUDIO is not set | ||
1100 | # CONFIG_SND_USB_CAIAQ is not set | ||
1101 | |||
1102 | # | ||
1103 | # System on Chip audio support | ||
1104 | # | ||
1105 | CONFIG_SND_SOC=y | ||
1106 | CONFIG_SND_PXA2XX_SOC=y | ||
1107 | |||
1108 | # | ||
1109 | # ALSA SoC audio for Freescale SOCs | ||
1110 | # | ||
1111 | |||
1112 | # | ||
1113 | # SoC Audio for the Texas Instruments OMAP | ||
1114 | # | ||
1115 | |||
1116 | # | ||
1117 | # Open Sound System | ||
1118 | # | ||
1119 | # CONFIG_SOUND_PRIME is not set | ||
1120 | CONFIG_HID_SUPPORT=y | ||
1121 | CONFIG_HID=y | ||
1122 | # CONFIG_HID_DEBUG is not set | ||
1123 | # CONFIG_HIDRAW is not set | ||
1124 | |||
1125 | # | ||
1126 | # USB Input Devices | ||
1127 | # | ||
1128 | # CONFIG_USB_HID is not set | ||
1129 | |||
1130 | # | ||
1131 | # USB HID Boot Protocol drivers | ||
1132 | # | ||
1133 | # CONFIG_USB_KBD is not set | ||
1134 | # CONFIG_USB_MOUSE is not set | ||
1135 | CONFIG_USB_SUPPORT=y | ||
1136 | CONFIG_USB_ARCH_HAS_HCD=y | ||
1137 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
1138 | # CONFIG_USB_ARCH_HAS_EHCI is not set | ||
1139 | CONFIG_USB=y | ||
1140 | # CONFIG_USB_DEBUG is not set | ||
1141 | # CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set | ||
1142 | |||
1143 | # | ||
1144 | # Miscellaneous USB options | ||
1145 | # | ||
1146 | # CONFIG_USB_DEVICEFS is not set | ||
1147 | # CONFIG_USB_DEVICE_CLASS is not set | ||
1148 | # CONFIG_USB_DYNAMIC_MINORS is not set | ||
1149 | # CONFIG_USB_SUSPEND is not set | ||
1150 | # CONFIG_USB_OTG is not set | ||
1151 | # CONFIG_USB_OTG_WHITELIST is not set | ||
1152 | # CONFIG_USB_OTG_BLACKLIST_HUB is not set | ||
1153 | |||
1154 | # | ||
1155 | # USB Host Controller Drivers | ||
1156 | # | ||
1157 | # CONFIG_USB_C67X00_HCD is not set | ||
1158 | # CONFIG_USB_ISP116X_HCD is not set | ||
1159 | # CONFIG_USB_ISP1760_HCD is not set | ||
1160 | CONFIG_USB_OHCI_HCD=y | ||
1161 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | ||
1162 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | ||
1163 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
1164 | # CONFIG_USB_SL811_HCD is not set | ||
1165 | # CONFIG_USB_R8A66597_HCD is not set | ||
1166 | |||
1167 | # | ||
1168 | # USB Device Class drivers | ||
1169 | # | ||
1170 | # CONFIG_USB_ACM is not set | ||
1171 | # CONFIG_USB_PRINTER is not set | ||
1172 | |||
1173 | # | ||
1174 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | ||
1175 | # | ||
1176 | |||
1177 | # | ||
1178 | # may also be needed; see USB_STORAGE Help for more information | ||
1179 | # | ||
1180 | # CONFIG_USB_LIBUSUAL is not set | ||
1181 | |||
1182 | # | ||
1183 | # USB Imaging devices | ||
1184 | # | ||
1185 | # CONFIG_USB_MDC800 is not set | ||
1186 | # CONFIG_USB_MON is not set | ||
1187 | |||
1188 | # | ||
1189 | # USB port drivers | ||
1190 | # | ||
1191 | # CONFIG_USB_SERIAL is not set | ||
1192 | |||
1193 | # | ||
1194 | # USB Miscellaneous drivers | ||
1195 | # | ||
1196 | # CONFIG_USB_EMI62 is not set | ||
1197 | # CONFIG_USB_EMI26 is not set | ||
1198 | # CONFIG_USB_ADUTUX is not set | ||
1199 | # CONFIG_USB_AUERSWALD is not set | ||
1200 | # CONFIG_USB_RIO500 is not set | ||
1201 | # CONFIG_USB_LEGOTOWER is not set | ||
1202 | # CONFIG_USB_LCD is not set | ||
1203 | # CONFIG_USB_BERRY_CHARGE is not set | ||
1204 | # CONFIG_USB_LED is not set | ||
1205 | # CONFIG_USB_CYPRESS_CY7C63 is not set | ||
1206 | # CONFIG_USB_CYTHERM is not set | ||
1207 | # CONFIG_USB_PHIDGET is not set | ||
1208 | # CONFIG_USB_IDMOUSE is not set | ||
1209 | # CONFIG_USB_FTDI_ELAN is not set | ||
1210 | # CONFIG_USB_APPLEDISPLAY is not set | ||
1211 | # CONFIG_USB_LD is not set | ||
1212 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1213 | # CONFIG_USB_IOWARRIOR is not set | ||
1214 | CONFIG_USB_GADGET=y | ||
1215 | # CONFIG_USB_GADGET_DEBUG_FILES is not set | ||
1216 | CONFIG_USB_GADGET_SELECTED=y | ||
1217 | # CONFIG_USB_GADGET_AMD5536UDC is not set | ||
1218 | # CONFIG_USB_GADGET_ATMEL_USBA is not set | ||
1219 | # CONFIG_USB_GADGET_FSL_USB2 is not set | ||
1220 | # CONFIG_USB_GADGET_NET2280 is not set | ||
1221 | # CONFIG_USB_GADGET_PXA25X is not set | ||
1222 | # CONFIG_USB_GADGET_M66592 is not set | ||
1223 | CONFIG_USB_GADGET_PXA27X=y | ||
1224 | CONFIG_USB_PXA27X=y | ||
1225 | # CONFIG_USB_GADGET_GOKU is not set | ||
1226 | # CONFIG_USB_GADGET_LH7A40X is not set | ||
1227 | # CONFIG_USB_GADGET_OMAP is not set | ||
1228 | # CONFIG_USB_GADGET_S3C2410 is not set | ||
1229 | # CONFIG_USB_GADGET_AT91 is not set | ||
1230 | # CONFIG_USB_GADGET_DUMMY_HCD is not set | ||
1231 | # CONFIG_USB_GADGET_DUALSPEED is not set | ||
1232 | # CONFIG_USB_ZERO is not set | ||
1233 | CONFIG_USB_ETH=y | ||
1234 | # CONFIG_USB_ETH_RNDIS is not set | ||
1235 | # CONFIG_USB_GADGETFS is not set | ||
1236 | # CONFIG_USB_FILE_STORAGE is not set | ||
1237 | # CONFIG_USB_G_SERIAL is not set | ||
1238 | # CONFIG_USB_MIDI_GADGET is not set | ||
1239 | # CONFIG_USB_G_PRINTER is not set | ||
1240 | CONFIG_MMC=y | ||
1241 | # CONFIG_MMC_DEBUG is not set | ||
1242 | CONFIG_MMC_UNSAFE_RESUME=y | ||
1243 | |||
1244 | # | ||
1245 | # MMC/SD Card Drivers | ||
1246 | # | ||
1247 | CONFIG_MMC_BLOCK=y | ||
1248 | CONFIG_MMC_BLOCK_BOUNCE=y | ||
1249 | CONFIG_SDIO_UART=y | ||
1250 | |||
1251 | # | ||
1252 | # MMC/SD Host Controller Drivers | ||
1253 | # | ||
1254 | CONFIG_MMC_PXA=y | ||
1255 | # CONFIG_MMC_SPI is not set | ||
1256 | CONFIG_NEW_LEDS=y | ||
1257 | CONFIG_LEDS_CLASS=y | ||
1258 | |||
1259 | # | ||
1260 | # LED drivers | ||
1261 | # | ||
1262 | # CONFIG_LEDS_GPIO is not set | ||
1263 | |||
1264 | # | ||
1265 | # LED Triggers | ||
1266 | # | ||
1267 | CONFIG_LEDS_TRIGGERS=y | ||
1268 | CONFIG_LEDS_TRIGGER_TIMER=y | ||
1269 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
1270 | # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set | ||
1271 | CONFIG_RTC_LIB=y | ||
1272 | CONFIG_RTC_CLASS=y | ||
1273 | CONFIG_RTC_HCTOSYS=y | ||
1274 | CONFIG_RTC_HCTOSYS_DEVICE="rtc0" | ||
1275 | # CONFIG_RTC_DEBUG is not set | ||
1276 | |||
1277 | # | ||
1278 | # RTC interfaces | ||
1279 | # | ||
1280 | CONFIG_RTC_INTF_SYSFS=y | ||
1281 | CONFIG_RTC_INTF_PROC=y | ||
1282 | CONFIG_RTC_INTF_DEV=y | ||
1283 | # CONFIG_RTC_INTF_DEV_UIE_EMUL is not set | ||
1284 | # CONFIG_RTC_DRV_TEST is not set | ||
1285 | |||
1286 | # | ||
1287 | # I2C RTC drivers | ||
1288 | # | ||
1289 | # CONFIG_RTC_DRV_DS1307 is not set | ||
1290 | # CONFIG_RTC_DRV_DS1374 is not set | ||
1291 | # CONFIG_RTC_DRV_DS1672 is not set | ||
1292 | # CONFIG_RTC_DRV_MAX6900 is not set | ||
1293 | # CONFIG_RTC_DRV_RS5C372 is not set | ||
1294 | # CONFIG_RTC_DRV_ISL1208 is not set | ||
1295 | # CONFIG_RTC_DRV_X1205 is not set | ||
1296 | # CONFIG_RTC_DRV_PCF8563 is not set | ||
1297 | # CONFIG_RTC_DRV_PCF8583 is not set | ||
1298 | # CONFIG_RTC_DRV_M41T80 is not set | ||
1299 | # CONFIG_RTC_DRV_S35390A is not set | ||
1300 | |||
1301 | # | ||
1302 | # SPI RTC drivers | ||
1303 | # | ||
1304 | # CONFIG_RTC_DRV_MAX6902 is not set | ||
1305 | # CONFIG_RTC_DRV_R9701 is not set | ||
1306 | # CONFIG_RTC_DRV_RS5C348 is not set | ||
1307 | |||
1308 | # | ||
1309 | # Platform RTC drivers | ||
1310 | # | ||
1311 | # CONFIG_RTC_DRV_CMOS is not set | ||
1312 | # CONFIG_RTC_DRV_DS1511 is not set | ||
1313 | # CONFIG_RTC_DRV_DS1553 is not set | ||
1314 | # CONFIG_RTC_DRV_DS1742 is not set | ||
1315 | # CONFIG_RTC_DRV_STK17TA8 is not set | ||
1316 | # CONFIG_RTC_DRV_M48T86 is not set | ||
1317 | # CONFIG_RTC_DRV_M48T59 is not set | ||
1318 | # CONFIG_RTC_DRV_V3020 is not set | ||
1319 | |||
1320 | # | ||
1321 | # on-CPU RTC drivers | ||
1322 | # | ||
1323 | CONFIG_RTC_DRV_SA1100=m | ||
1324 | # CONFIG_UIO is not set | ||
1325 | |||
1326 | # | ||
1327 | # File systems | ||
1328 | # | ||
1329 | CONFIG_EXT2_FS=y | ||
1330 | # CONFIG_EXT2_FS_XATTR is not set | ||
1331 | # CONFIG_EXT2_FS_XIP is not set | ||
1332 | CONFIG_EXT3_FS=m | ||
1333 | CONFIG_EXT3_FS_XATTR=y | ||
1334 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
1335 | # CONFIG_EXT3_FS_SECURITY is not set | ||
1336 | # CONFIG_EXT4DEV_FS is not set | ||
1337 | CONFIG_JBD=m | ||
1338 | CONFIG_FS_MBCACHE=y | ||
1339 | CONFIG_REISERFS_FS=m | ||
1340 | # CONFIG_REISERFS_CHECK is not set | ||
1341 | # CONFIG_REISERFS_PROC_INFO is not set | ||
1342 | CONFIG_REISERFS_FS_XATTR=y | ||
1343 | CONFIG_REISERFS_FS_POSIX_ACL=y | ||
1344 | CONFIG_REISERFS_FS_SECURITY=y | ||
1345 | # CONFIG_JFS_FS is not set | ||
1346 | CONFIG_FS_POSIX_ACL=y | ||
1347 | CONFIG_XFS_FS=m | ||
1348 | # CONFIG_XFS_QUOTA is not set | ||
1349 | # CONFIG_XFS_POSIX_ACL is not set | ||
1350 | # CONFIG_XFS_RT is not set | ||
1351 | # CONFIG_XFS_DEBUG is not set | ||
1352 | # CONFIG_OCFS2_FS is not set | ||
1353 | CONFIG_DNOTIFY=y | ||
1354 | CONFIG_INOTIFY=y | ||
1355 | CONFIG_INOTIFY_USER=y | ||
1356 | # CONFIG_QUOTA is not set | ||
1357 | CONFIG_AUTOFS_FS=y | ||
1358 | CONFIG_AUTOFS4_FS=y | ||
1359 | CONFIG_FUSE_FS=m | ||
1360 | |||
1361 | # | ||
1362 | # CD-ROM/DVD Filesystems | ||
1363 | # | ||
1364 | CONFIG_ISO9660_FS=m | ||
1365 | CONFIG_JOLIET=y | ||
1366 | CONFIG_ZISOFS=y | ||
1367 | # CONFIG_UDF_FS is not set | ||
1368 | |||
1369 | # | ||
1370 | # DOS/FAT/NT Filesystems | ||
1371 | # | ||
1372 | CONFIG_FAT_FS=m | ||
1373 | CONFIG_MSDOS_FS=m | ||
1374 | CONFIG_VFAT_FS=m | ||
1375 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
1376 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | ||
1377 | # CONFIG_NTFS_FS is not set | ||
1378 | |||
1379 | # | ||
1380 | # Pseudo filesystems | ||
1381 | # | ||
1382 | CONFIG_PROC_FS=y | ||
1383 | CONFIG_PROC_SYSCTL=y | ||
1384 | CONFIG_SYSFS=y | ||
1385 | CONFIG_TMPFS=y | ||
1386 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1387 | # CONFIG_HUGETLB_PAGE is not set | ||
1388 | # CONFIG_CONFIGFS_FS is not set | ||
1389 | |||
1390 | # | ||
1391 | # Miscellaneous filesystems | ||
1392 | # | ||
1393 | # CONFIG_ADFS_FS is not set | ||
1394 | # CONFIG_AFFS_FS is not set | ||
1395 | # CONFIG_HFS_FS is not set | ||
1396 | # CONFIG_HFSPLUS_FS is not set | ||
1397 | # CONFIG_BEFS_FS is not set | ||
1398 | # CONFIG_BFS_FS is not set | ||
1399 | # CONFIG_EFS_FS is not set | ||
1400 | # CONFIG_JFFS2_FS is not set | ||
1401 | CONFIG_CRAMFS=m | ||
1402 | # CONFIG_VXFS_FS is not set | ||
1403 | # CONFIG_MINIX_FS is not set | ||
1404 | # CONFIG_HPFS_FS is not set | ||
1405 | # CONFIG_QNX4FS_FS is not set | ||
1406 | # CONFIG_ROMFS_FS is not set | ||
1407 | # CONFIG_SYSV_FS is not set | ||
1408 | # CONFIG_UFS_FS is not set | ||
1409 | CONFIG_NETWORK_FILESYSTEMS=y | ||
1410 | CONFIG_NFS_FS=y | ||
1411 | CONFIG_NFS_V3=y | ||
1412 | CONFIG_NFS_V3_ACL=y | ||
1413 | # CONFIG_NFS_V4 is not set | ||
1414 | CONFIG_NFSD=m | ||
1415 | CONFIG_NFSD_V2_ACL=y | ||
1416 | CONFIG_NFSD_V3=y | ||
1417 | CONFIG_NFSD_V3_ACL=y | ||
1418 | # CONFIG_NFSD_V4 is not set | ||
1419 | # CONFIG_ROOT_NFS is not set | ||
1420 | CONFIG_LOCKD=y | ||
1421 | CONFIG_LOCKD_V4=y | ||
1422 | CONFIG_EXPORTFS=m | ||
1423 | CONFIG_NFS_ACL_SUPPORT=y | ||
1424 | CONFIG_NFS_COMMON=y | ||
1425 | CONFIG_SUNRPC=y | ||
1426 | # CONFIG_SUNRPC_BIND34 is not set | ||
1427 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
1428 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1429 | CONFIG_SMB_FS=m | ||
1430 | # CONFIG_SMB_NLS_DEFAULT is not set | ||
1431 | CONFIG_CIFS=m | ||
1432 | CONFIG_CIFS_STATS=y | ||
1433 | # CONFIG_CIFS_STATS2 is not set | ||
1434 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
1435 | CONFIG_CIFS_XATTR=y | ||
1436 | CONFIG_CIFS_POSIX=y | ||
1437 | # CONFIG_CIFS_DEBUG2 is not set | ||
1438 | # CONFIG_CIFS_EXPERIMENTAL is not set | ||
1439 | # CONFIG_NCP_FS is not set | ||
1440 | # CONFIG_CODA_FS is not set | ||
1441 | # CONFIG_AFS_FS is not set | ||
1442 | |||
1443 | # | ||
1444 | # Partition Types | ||
1445 | # | ||
1446 | # CONFIG_PARTITION_ADVANCED is not set | ||
1447 | CONFIG_MSDOS_PARTITION=y | ||
1448 | CONFIG_NLS=y | ||
1449 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
1450 | CONFIG_NLS_CODEPAGE_437=m | ||
1451 | CONFIG_NLS_CODEPAGE_737=m | ||
1452 | CONFIG_NLS_CODEPAGE_775=m | ||
1453 | CONFIG_NLS_CODEPAGE_850=m | ||
1454 | CONFIG_NLS_CODEPAGE_852=m | ||
1455 | CONFIG_NLS_CODEPAGE_855=m | ||
1456 | CONFIG_NLS_CODEPAGE_857=m | ||
1457 | CONFIG_NLS_CODEPAGE_860=m | ||
1458 | CONFIG_NLS_CODEPAGE_861=m | ||
1459 | CONFIG_NLS_CODEPAGE_862=m | ||
1460 | CONFIG_NLS_CODEPAGE_863=m | ||
1461 | CONFIG_NLS_CODEPAGE_864=m | ||
1462 | CONFIG_NLS_CODEPAGE_865=m | ||
1463 | CONFIG_NLS_CODEPAGE_866=m | ||
1464 | CONFIG_NLS_CODEPAGE_869=m | ||
1465 | CONFIG_NLS_CODEPAGE_936=m | ||
1466 | CONFIG_NLS_CODEPAGE_950=m | ||
1467 | CONFIG_NLS_CODEPAGE_932=m | ||
1468 | CONFIG_NLS_CODEPAGE_949=m | ||
1469 | CONFIG_NLS_CODEPAGE_874=m | ||
1470 | CONFIG_NLS_ISO8859_8=m | ||
1471 | CONFIG_NLS_CODEPAGE_1250=m | ||
1472 | CONFIG_NLS_CODEPAGE_1251=m | ||
1473 | CONFIG_NLS_ASCII=m | ||
1474 | CONFIG_NLS_ISO8859_1=m | ||
1475 | CONFIG_NLS_ISO8859_2=m | ||
1476 | CONFIG_NLS_ISO8859_3=m | ||
1477 | CONFIG_NLS_ISO8859_4=m | ||
1478 | CONFIG_NLS_ISO8859_5=m | ||
1479 | CONFIG_NLS_ISO8859_6=m | ||
1480 | CONFIG_NLS_ISO8859_7=m | ||
1481 | CONFIG_NLS_ISO8859_9=m | ||
1482 | CONFIG_NLS_ISO8859_13=m | ||
1483 | CONFIG_NLS_ISO8859_14=m | ||
1484 | CONFIG_NLS_ISO8859_15=m | ||
1485 | CONFIG_NLS_KOI8_R=m | ||
1486 | CONFIG_NLS_KOI8_U=m | ||
1487 | CONFIG_NLS_UTF8=m | ||
1488 | # CONFIG_DLM is not set | ||
1489 | |||
1490 | # | ||
1491 | # Kernel hacking | ||
1492 | # | ||
1493 | # CONFIG_PRINTK_TIME is not set | ||
1494 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
1495 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
1496 | CONFIG_FRAME_WARN=1024 | ||
1497 | # CONFIG_MAGIC_SYSRQ is not set | ||
1498 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1499 | # CONFIG_DEBUG_FS is not set | ||
1500 | # CONFIG_HEADERS_CHECK is not set | ||
1501 | # CONFIG_DEBUG_KERNEL is not set | ||
1502 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
1503 | CONFIG_FRAME_POINTER=y | ||
1504 | # CONFIG_SAMPLES is not set | ||
1505 | # CONFIG_DEBUG_USER is not set | ||
1506 | |||
1507 | # | ||
1508 | # Security options | ||
1509 | # | ||
1510 | # CONFIG_KEYS is not set | ||
1511 | # CONFIG_SECURITY is not set | ||
1512 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
1513 | CONFIG_CRYPTO=y | ||
1514 | |||
1515 | # | ||
1516 | # Crypto core or helper | ||
1517 | # | ||
1518 | CONFIG_CRYPTO_ALGAPI=m | ||
1519 | CONFIG_CRYPTO_AEAD=m | ||
1520 | CONFIG_CRYPTO_BLKCIPHER=m | ||
1521 | CONFIG_CRYPTO_HASH=m | ||
1522 | CONFIG_CRYPTO_MANAGER=m | ||
1523 | CONFIG_CRYPTO_GF128MUL=m | ||
1524 | CONFIG_CRYPTO_NULL=m | ||
1525 | CONFIG_CRYPTO_CRYPTD=m | ||
1526 | CONFIG_CRYPTO_AUTHENC=m | ||
1527 | CONFIG_CRYPTO_TEST=m | ||
1528 | |||
1529 | # | ||
1530 | # Authenticated Encryption with Associated Data | ||
1531 | # | ||
1532 | # CONFIG_CRYPTO_CCM is not set | ||
1533 | # CONFIG_CRYPTO_GCM is not set | ||
1534 | # CONFIG_CRYPTO_SEQIV is not set | ||
1535 | |||
1536 | # | ||
1537 | # Block modes | ||
1538 | # | ||
1539 | CONFIG_CRYPTO_CBC=m | ||
1540 | # CONFIG_CRYPTO_CTR is not set | ||
1541 | # CONFIG_CRYPTO_CTS is not set | ||
1542 | CONFIG_CRYPTO_ECB=m | ||
1543 | CONFIG_CRYPTO_LRW=m | ||
1544 | CONFIG_CRYPTO_PCBC=m | ||
1545 | CONFIG_CRYPTO_XTS=m | ||
1546 | |||
1547 | # | ||
1548 | # Hash modes | ||
1549 | # | ||
1550 | CONFIG_CRYPTO_HMAC=m | ||
1551 | CONFIG_CRYPTO_XCBC=m | ||
1552 | |||
1553 | # | ||
1554 | # Digest | ||
1555 | # | ||
1556 | CONFIG_CRYPTO_CRC32C=m | ||
1557 | CONFIG_CRYPTO_MD4=m | ||
1558 | CONFIG_CRYPTO_MD5=m | ||
1559 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
1560 | CONFIG_CRYPTO_SHA1=m | ||
1561 | CONFIG_CRYPTO_SHA256=m | ||
1562 | CONFIG_CRYPTO_SHA512=m | ||
1563 | CONFIG_CRYPTO_TGR192=m | ||
1564 | # CONFIG_CRYPTO_WP512 is not set | ||
1565 | |||
1566 | # | ||
1567 | # Ciphers | ||
1568 | # | ||
1569 | CONFIG_CRYPTO_AES=m | ||
1570 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1571 | CONFIG_CRYPTO_ARC4=m | ||
1572 | CONFIG_CRYPTO_BLOWFISH=m | ||
1573 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1574 | CONFIG_CRYPTO_CAST5=m | ||
1575 | CONFIG_CRYPTO_CAST6=m | ||
1576 | CONFIG_CRYPTO_DES=m | ||
1577 | CONFIG_CRYPTO_FCRYPT=m | ||
1578 | CONFIG_CRYPTO_KHAZAD=m | ||
1579 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1580 | CONFIG_CRYPTO_SEED=m | ||
1581 | CONFIG_CRYPTO_SERPENT=m | ||
1582 | CONFIG_CRYPTO_TEA=m | ||
1583 | CONFIG_CRYPTO_TWOFISH=m | ||
1584 | CONFIG_CRYPTO_TWOFISH_COMMON=m | ||
1585 | |||
1586 | # | ||
1587 | # Compression | ||
1588 | # | ||
1589 | CONFIG_CRYPTO_DEFLATE=m | ||
1590 | # CONFIG_CRYPTO_LZO is not set | ||
1591 | CONFIG_CRYPTO_HW=y | ||
1592 | |||
1593 | # | ||
1594 | # Library routines | ||
1595 | # | ||
1596 | CONFIG_BITREVERSE=y | ||
1597 | # CONFIG_GENERIC_FIND_FIRST_BIT is not set | ||
1598 | # CONFIG_GENERIC_FIND_NEXT_BIT is not set | ||
1599 | CONFIG_CRC_CCITT=m | ||
1600 | CONFIG_CRC16=m | ||
1601 | # CONFIG_CRC_ITU_T is not set | ||
1602 | CONFIG_CRC32=y | ||
1603 | # CONFIG_CRC7 is not set | ||
1604 | CONFIG_LIBCRC32C=m | ||
1605 | CONFIG_ZLIB_INFLATE=m | ||
1606 | CONFIG_ZLIB_DEFLATE=m | ||
1607 | CONFIG_TEXTSEARCH=y | ||
1608 | CONFIG_TEXTSEARCH_KMP=m | ||
1609 | CONFIG_TEXTSEARCH_BM=m | ||
1610 | CONFIG_TEXTSEARCH_FSM=m | ||
1611 | CONFIG_PLIST=y | ||
1612 | CONFIG_HAS_IOMEM=y | ||
1613 | CONFIG_HAS_IOPORT=y | ||
1614 | CONFIG_HAS_DMA=y | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index eb9092ca8008..1d296fc8494e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o | |||
28 | obj-$(CONFIG_ATAGS_PROC) += atags.o | 28 | obj-$(CONFIG_ATAGS_PROC) += atags.o |
29 | obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o | 29 | obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o |
30 | obj-$(CONFIG_ARM_THUMBEE) += thumbee.o | 30 | obj-$(CONFIG_ARM_THUMBEE) += thumbee.o |
31 | obj-$(CONFIG_KGDB) += kgdb.o | ||
31 | 32 | ||
32 | obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o | 33 | obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o |
33 | AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 | 34 | AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c new file mode 100644 index 000000000000..aaffaecffcd1 --- /dev/null +++ b/arch/arm/kernel/kgdb.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kgdb.c | ||
3 | * | ||
4 | * ARM KGDB support | ||
5 | * | ||
6 | * Copyright (c) 2002-2004 MontaVista Software, Inc | ||
7 | * Copyright (c) 2008 Wind River Systems, Inc. | ||
8 | * | ||
9 | * Authors: George Davis <davis_g@mvista.com> | ||
10 | * Deepak Saxena <dsaxena@plexity.net> | ||
11 | */ | ||
12 | #include <linux/kgdb.h> | ||
13 | #include <asm/traps.h> | ||
14 | |||
15 | /* Make a local copy of the registers passed into the handler (bletch) */ | ||
16 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) | ||
17 | { | ||
18 | int regno; | ||
19 | |||
20 | /* Initialize all to zero. */ | ||
21 | for (regno = 0; regno < GDB_MAX_REGS; regno++) | ||
22 | gdb_regs[regno] = 0; | ||
23 | |||
24 | gdb_regs[_R0] = kernel_regs->ARM_r0; | ||
25 | gdb_regs[_R1] = kernel_regs->ARM_r1; | ||
26 | gdb_regs[_R2] = kernel_regs->ARM_r2; | ||
27 | gdb_regs[_R3] = kernel_regs->ARM_r3; | ||
28 | gdb_regs[_R4] = kernel_regs->ARM_r4; | ||
29 | gdb_regs[_R5] = kernel_regs->ARM_r5; | ||
30 | gdb_regs[_R6] = kernel_regs->ARM_r6; | ||
31 | gdb_regs[_R7] = kernel_regs->ARM_r7; | ||
32 | gdb_regs[_R8] = kernel_regs->ARM_r8; | ||
33 | gdb_regs[_R9] = kernel_regs->ARM_r9; | ||
34 | gdb_regs[_R10] = kernel_regs->ARM_r10; | ||
35 | gdb_regs[_FP] = kernel_regs->ARM_fp; | ||
36 | gdb_regs[_IP] = kernel_regs->ARM_ip; | ||
37 | gdb_regs[_SPT] = kernel_regs->ARM_sp; | ||
38 | gdb_regs[_LR] = kernel_regs->ARM_lr; | ||
39 | gdb_regs[_PC] = kernel_regs->ARM_pc; | ||
40 | gdb_regs[_CPSR] = kernel_regs->ARM_cpsr; | ||
41 | } | ||
42 | |||
43 | /* Copy local gdb registers back to kgdb regs, for later copy to kernel */ | ||
44 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) | ||
45 | { | ||
46 | kernel_regs->ARM_r0 = gdb_regs[_R0]; | ||
47 | kernel_regs->ARM_r1 = gdb_regs[_R1]; | ||
48 | kernel_regs->ARM_r2 = gdb_regs[_R2]; | ||
49 | kernel_regs->ARM_r3 = gdb_regs[_R3]; | ||
50 | kernel_regs->ARM_r4 = gdb_regs[_R4]; | ||
51 | kernel_regs->ARM_r5 = gdb_regs[_R5]; | ||
52 | kernel_regs->ARM_r6 = gdb_regs[_R6]; | ||
53 | kernel_regs->ARM_r7 = gdb_regs[_R7]; | ||
54 | kernel_regs->ARM_r8 = gdb_regs[_R8]; | ||
55 | kernel_regs->ARM_r9 = gdb_regs[_R9]; | ||
56 | kernel_regs->ARM_r10 = gdb_regs[_R10]; | ||
57 | kernel_regs->ARM_fp = gdb_regs[_FP]; | ||
58 | kernel_regs->ARM_ip = gdb_regs[_IP]; | ||
59 | kernel_regs->ARM_sp = gdb_regs[_SPT]; | ||
60 | kernel_regs->ARM_lr = gdb_regs[_LR]; | ||
61 | kernel_regs->ARM_pc = gdb_regs[_PC]; | ||
62 | kernel_regs->ARM_cpsr = gdb_regs[_CPSR]; | ||
63 | } | ||
64 | |||
65 | void | ||
66 | sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) | ||
67 | { | ||
68 | struct pt_regs *thread_regs; | ||
69 | int regno; | ||
70 | |||
71 | /* Just making sure... */ | ||
72 | if (task == NULL) | ||
73 | return; | ||
74 | |||
75 | /* Initialize to zero */ | ||
76 | for (regno = 0; regno < GDB_MAX_REGS; regno++) | ||
77 | gdb_regs[regno] = 0; | ||
78 | |||
79 | /* Otherwise, we have only some registers from switch_to() */ | ||
80 | thread_regs = task_pt_regs(task); | ||
81 | gdb_regs[_R0] = thread_regs->ARM_r0; | ||
82 | gdb_regs[_R1] = thread_regs->ARM_r1; | ||
83 | gdb_regs[_R2] = thread_regs->ARM_r2; | ||
84 | gdb_regs[_R3] = thread_regs->ARM_r3; | ||
85 | gdb_regs[_R4] = thread_regs->ARM_r4; | ||
86 | gdb_regs[_R5] = thread_regs->ARM_r5; | ||
87 | gdb_regs[_R6] = thread_regs->ARM_r6; | ||
88 | gdb_regs[_R7] = thread_regs->ARM_r7; | ||
89 | gdb_regs[_R8] = thread_regs->ARM_r8; | ||
90 | gdb_regs[_R9] = thread_regs->ARM_r9; | ||
91 | gdb_regs[_R10] = thread_regs->ARM_r10; | ||
92 | gdb_regs[_FP] = thread_regs->ARM_fp; | ||
93 | gdb_regs[_IP] = thread_regs->ARM_ip; | ||
94 | gdb_regs[_SPT] = thread_regs->ARM_sp; | ||
95 | gdb_regs[_LR] = thread_regs->ARM_lr; | ||
96 | gdb_regs[_PC] = thread_regs->ARM_pc; | ||
97 | gdb_regs[_CPSR] = thread_regs->ARM_cpsr; | ||
98 | } | ||
99 | |||
100 | static int compiled_break; | ||
101 | |||
102 | int kgdb_arch_handle_exception(int exception_vector, int signo, | ||
103 | int err_code, char *remcom_in_buffer, | ||
104 | char *remcom_out_buffer, | ||
105 | struct pt_regs *linux_regs) | ||
106 | { | ||
107 | unsigned long addr; | ||
108 | char *ptr; | ||
109 | |||
110 | switch (remcom_in_buffer[0]) { | ||
111 | case 'D': | ||
112 | case 'k': | ||
113 | case 'c': | ||
114 | kgdb_contthread = NULL; | ||
115 | |||
116 | /* | ||
117 | * Try to read optional parameter, pc unchanged if no parm. | ||
118 | * If this was a compiled breakpoint, we need to move | ||
119 | * to the next instruction or we will just breakpoint | ||
120 | * over and over again. | ||
121 | */ | ||
122 | ptr = &remcom_in_buffer[1]; | ||
123 | if (kgdb_hex2long(&ptr, &addr)) | ||
124 | linux_regs->ARM_pc = addr; | ||
125 | else if (compiled_break == 1) | ||
126 | linux_regs->ARM_pc += 4; | ||
127 | |||
128 | compiled_break = 0; | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | return -1; | ||
134 | } | ||
135 | |||
136 | static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) | ||
137 | { | ||
138 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) | ||
144 | { | ||
145 | compiled_break = 1; | ||
146 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static struct undef_hook kgdb_brkpt_hook = { | ||
152 | .instr_mask = 0xffffffff, | ||
153 | .instr_val = KGDB_BREAKINST, | ||
154 | .fn = kgdb_brk_fn | ||
155 | }; | ||
156 | |||
157 | static struct undef_hook kgdb_compiled_brkpt_hook = { | ||
158 | .instr_mask = 0xffffffff, | ||
159 | .instr_val = KGDB_COMPILED_BREAK, | ||
160 | .fn = kgdb_compiled_brk_fn | ||
161 | }; | ||
162 | |||
163 | /** | ||
164 | * kgdb_arch_init - Perform any architecture specific initalization. | ||
165 | * | ||
166 | * This function will handle the initalization of any architecture | ||
167 | * specific callbacks. | ||
168 | */ | ||
169 | int kgdb_arch_init(void) | ||
170 | { | ||
171 | register_undef_hook(&kgdb_brkpt_hook); | ||
172 | register_undef_hook(&kgdb_compiled_brkpt_hook); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | ||
179 | * | ||
180 | * This function will handle the uninitalization of any architecture | ||
181 | * specific callbacks, for dynamic registration and unregistration. | ||
182 | */ | ||
183 | void kgdb_arch_exit(void) | ||
184 | { | ||
185 | unregister_undef_hook(&kgdb_brkpt_hook); | ||
186 | unregister_undef_hook(&kgdb_compiled_brkpt_hook); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Register our undef instruction hooks with ARM undef core. | ||
191 | * We regsiter a hook specifically looking for the KGB break inst | ||
192 | * and we handle the normal undef case within the do_undefinstr | ||
193 | * handler. | ||
194 | */ | ||
195 | struct kgdb_arch arch_kgdb_ops = { | ||
196 | #ifndef __ARMEB__ | ||
197 | .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} | ||
198 | #else /* ! __ARMEB__ */ | ||
199 | .gdb_bpt_instr = {0xe7, 0xff, 0xde, 0xfe} | ||
200 | #endif | ||
201 | }; | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b7b0720bc1bb..38f0e7940a13 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/mach/arch.h> | 36 | #include <asm/mach/arch.h> |
37 | #include <asm/mach/irq.h> | 37 | #include <asm/mach/irq.h> |
38 | #include <asm/mach/time.h> | 38 | #include <asm/mach/time.h> |
39 | #include <asm/traps.h> | ||
39 | 40 | ||
40 | #include "compat.h" | 41 | #include "compat.h" |
41 | #include "atags.h" | 42 | #include "atags.h" |
@@ -853,6 +854,7 @@ void __init setup_arch(char **cmdline_p) | |||
853 | conswitchp = &dummy_con; | 854 | conswitchp = &dummy_con; |
854 | #endif | 855 | #endif |
855 | #endif | 856 | #endif |
857 | early_trap_init(); | ||
856 | } | 858 | } |
857 | 859 | ||
858 | 860 | ||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 5595fdd75e82..7277aef83098 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -708,6 +708,11 @@ EXPORT_SYMBOL(abort); | |||
708 | 708 | ||
709 | void __init trap_init(void) | 709 | void __init trap_init(void) |
710 | { | 710 | { |
711 | return; | ||
712 | } | ||
713 | |||
714 | void __init early_trap_init(void) | ||
715 | { | ||
711 | unsigned long vectors = CONFIG_VECTORS_BASE; | 716 | unsigned long vectors = CONFIG_VECTORS_BASE; |
712 | extern char __stubs_start[], __stubs_end[]; | 717 | extern char __stubs_start[], __stubs_end[]; |
713 | extern char __vectors_start[], __vectors_end[]; | 718 | extern char __vectors_start[], __vectors_end[]; |
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 914bb33dab92..e8ee7ec9ff6d 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig | |||
@@ -16,18 +16,24 @@ config CPU_PXA310 | |||
16 | config CPU_PXA320 | 16 | config CPU_PXA320 |
17 | bool "PXA320 (codename Monahans-P)" | 17 | bool "PXA320 (codename Monahans-P)" |
18 | 18 | ||
19 | config CPU_PXA930 | ||
20 | bool "PXA930 (codename Tavor-P)" | ||
21 | |||
19 | endmenu | 22 | endmenu |
20 | 23 | ||
21 | endif | 24 | endif |
22 | 25 | ||
23 | menu "Select target boards" | ||
24 | |||
25 | config ARCH_GUMSTIX | 26 | config ARCH_GUMSTIX |
26 | bool "Gumstix XScale boards" | 27 | bool "Gumstix XScale boards" |
27 | help | 28 | help |
28 | Say Y here if you intend to run this kernel on a | 29 | Say Y here if you intend to run this kernel on a |
29 | Gumstix Full Function Minature Computer. | 30 | Gumstix Full Function Minature Computer. |
30 | 31 | ||
32 | config MACH_GUMSTIX_F | ||
33 | bool "Basix, Connex, ws-200ax, ws-400ax systems" | ||
34 | depends on ARCH_GUMSTIX | ||
35 | select PXA25x | ||
36 | |||
31 | config ARCH_LUBBOCK | 37 | config ARCH_LUBBOCK |
32 | bool "Intel DBPXA250 Development Platform" | 38 | bool "Intel DBPXA250 Development Platform" |
33 | select PXA25x | 39 | select PXA25x |
@@ -58,6 +64,57 @@ config PXA_SHARPSL | |||
58 | SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa) | 64 | SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa) |
59 | handheld computer. | 65 | handheld computer. |
60 | 66 | ||
67 | config MACH_POODLE | ||
68 | bool "Enable Sharp SL-5600 (Poodle) Support" | ||
69 | depends on PXA_SHARPSL | ||
70 | select PXA25x | ||
71 | select SHARP_LOCOMO | ||
72 | select PXA_SSP | ||
73 | |||
74 | config MACH_CORGI | ||
75 | bool "Enable Sharp SL-C700 (Corgi) Support" | ||
76 | depends on PXA_SHARPSL | ||
77 | select PXA25x | ||
78 | select PXA_SHARP_C7xx | ||
79 | |||
80 | config MACH_SHEPHERD | ||
81 | bool "Enable Sharp SL-C750 (Shepherd) Support" | ||
82 | depends on PXA_SHARPSL | ||
83 | select PXA25x | ||
84 | select PXA_SHARP_C7xx | ||
85 | |||
86 | config MACH_HUSKY | ||
87 | bool "Enable Sharp SL-C760 (Husky) Support" | ||
88 | depends on PXA_SHARPSL | ||
89 | select PXA25x | ||
90 | select PXA_SHARP_C7xx | ||
91 | |||
92 | config MACH_AKITA | ||
93 | bool "Enable Sharp SL-1000 (Akita) Support" | ||
94 | depends on PXA_SHARPSL | ||
95 | select PXA27x | ||
96 | select PXA_SHARP_Cxx00 | ||
97 | select MACH_SPITZ | ||
98 | select I2C | ||
99 | select I2C_PXA | ||
100 | |||
101 | config MACH_SPITZ | ||
102 | bool "Enable Sharp Zaurus SL-3000 (Spitz) Support" | ||
103 | depends on PXA_SHARPSL | ||
104 | select PXA27x | ||
105 | select PXA_SHARP_Cxx00 | ||
106 | |||
107 | config MACH_BORZOI | ||
108 | bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support" | ||
109 | depends on PXA_SHARPSL | ||
110 | select PXA27x | ||
111 | select PXA_SHARP_Cxx00 | ||
112 | |||
113 | config MACH_TOSA | ||
114 | bool "Enable Sharp SL-6000x (Tosa) Support" | ||
115 | depends on PXA_SHARPSL | ||
116 | select PXA25x | ||
117 | |||
61 | config ARCH_PXA_ESERIES | 118 | config ARCH_PXA_ESERIES |
62 | bool "PXA based Toshiba e-series PDAs" | 119 | bool "PXA based Toshiba e-series PDAs" |
63 | select PXA25x | 120 | select PXA25x |
@@ -70,10 +127,19 @@ config MACH_E330 | |||
70 | Say Y here if you intend to run this kernel on a Toshiba | 127 | Say Y here if you intend to run this kernel on a Toshiba |
71 | e330 family PDA. | 128 | e330 family PDA. |
72 | 129 | ||
130 | config MACH_E350 | ||
131 | bool "Toshiba e350" | ||
132 | default y | ||
133 | depends on ARCH_PXA_ESERIES | ||
134 | help | ||
135 | Say Y here if you intend to run this kernel on a Toshiba | ||
136 | e350 family PDA. | ||
137 | |||
73 | config MACH_E740 | 138 | config MACH_E740 |
74 | bool "Toshiba e740" | 139 | bool "Toshiba e740" |
75 | default y | 140 | default y |
76 | depends on ARCH_PXA_ESERIES | 141 | depends on ARCH_PXA_ESERIES |
142 | select FB_W100 | ||
77 | help | 143 | help |
78 | Say Y here if you intend to run this kernel on a Toshiba | 144 | Say Y here if you intend to run this kernel on a Toshiba |
79 | e740 family PDA. | 145 | e740 family PDA. |
@@ -82,6 +148,7 @@ config MACH_E750 | |||
82 | bool "Toshiba e750" | 148 | bool "Toshiba e750" |
83 | default y | 149 | default y |
84 | depends on ARCH_PXA_ESERIES | 150 | depends on ARCH_PXA_ESERIES |
151 | select FB_W100 | ||
85 | help | 152 | help |
86 | Say Y here if you intend to run this kernel on a Toshiba | 153 | Say Y here if you intend to run this kernel on a Toshiba |
87 | e750 family PDA. | 154 | e750 family PDA. |
@@ -98,6 +165,7 @@ config MACH_E800 | |||
98 | bool "Toshiba e800" | 165 | bool "Toshiba e800" |
99 | default y | 166 | default y |
100 | depends on ARCH_PXA_ESERIES | 167 | depends on ARCH_PXA_ESERIES |
168 | select FB_W100 | ||
101 | help | 169 | help |
102 | Say Y here if you intend to run this kernel on a Toshiba | 170 | Say Y here if you intend to run this kernel on a Toshiba |
103 | e800 family PDA. | 171 | e800 family PDA. |
@@ -106,6 +174,10 @@ config MACH_TRIZEPS4 | |||
106 | bool "Keith und Koep Trizeps4 DIMM-Module" | 174 | bool "Keith und Koep Trizeps4 DIMM-Module" |
107 | select PXA27x | 175 | select PXA27x |
108 | 176 | ||
177 | config MACH_TRIZEPS4_CONXS | ||
178 | bool "ConXS Eval Board" | ||
179 | depends on MACH_TRIZEPS4 | ||
180 | |||
109 | config MACH_EM_X270 | 181 | config MACH_EM_X270 |
110 | bool "CompuLab EM-x270 platform" | 182 | bool "CompuLab EM-x270 platform" |
111 | select PXA27x | 183 | select PXA27x |
@@ -115,7 +187,7 @@ config MACH_COLIBRI | |||
115 | select PXA27x | 187 | select PXA27x |
116 | 188 | ||
117 | config MACH_ZYLONITE | 189 | config MACH_ZYLONITE |
118 | bool "PXA3xx Development Platform" | 190 | bool "PXA3xx Development Platform (aka Zylonite)" |
119 | select PXA3xx | 191 | select PXA3xx |
120 | select HAVE_PWM | 192 | select HAVE_PWM |
121 | 193 | ||
@@ -124,6 +196,16 @@ config MACH_LITTLETON | |||
124 | select PXA3xx | 196 | select PXA3xx |
125 | select PXA_SSP | 197 | select PXA_SSP |
126 | 198 | ||
199 | config MACH_TAVOREVB | ||
200 | bool "PXA930 Evaluation Board (aka TavorEVB)" | ||
201 | select PXA3xx | ||
202 | select PXA930 | ||
203 | |||
204 | config MACH_SAAR | ||
205 | bool "PXA930 Handheld Platform (aka SAAR)" | ||
206 | select PXA3xx | ||
207 | select PXA930 | ||
208 | |||
127 | config MACH_ARMCORE | 209 | config MACH_ARMCORE |
128 | bool "CompuLab CM-X270 modules" | 210 | bool "CompuLab CM-X270 modules" |
129 | select PXA27x | 211 | select PXA27x |
@@ -131,7 +213,6 @@ config MACH_ARMCORE | |||
131 | 213 | ||
132 | config MACH_MAGICIAN | 214 | config MACH_MAGICIAN |
133 | bool "Enable HTC Magician Support" | 215 | bool "Enable HTC Magician Support" |
134 | depends on ARCH_PXA | ||
135 | select PXA27x | 216 | select PXA27x |
136 | select IWMMXT | 217 | select IWMMXT |
137 | 218 | ||
@@ -139,18 +220,26 @@ config MACH_PCM027 | |||
139 | bool "Phytec phyCORE-PXA270 CPU module (PCM-027)" | 220 | bool "Phytec phyCORE-PXA270 CPU module (PCM-027)" |
140 | select PXA27x | 221 | select PXA27x |
141 | select IWMMXT | 222 | select IWMMXT |
223 | select PXA_SSP | ||
142 | 224 | ||
143 | endmenu | 225 | config ARCH_PXA_PALM |
226 | bool "PXA based Palm PDAs" | ||
227 | select HAVE_PWM | ||
144 | 228 | ||
145 | choice | 229 | config MACH_PALMTX |
146 | prompt "Used baseboard" | 230 | bool "Palm T|X" |
147 | depends on MACH_PCM027 | 231 | default y |
232 | depends on ARCH_PXA_PALM | ||
233 | select PXA27x | ||
234 | select IWMMXT | ||
235 | help | ||
236 | Say Y here if you intend to run this kernel on a Palm T|X | ||
237 | handheld computer. | ||
148 | 238 | ||
149 | config MACH_PCM990_BASEBOARD | 239 | config MACH_PCM990_BASEBOARD |
150 | bool "PHYTEC PCM-990 development board" | 240 | bool "PHYTEC PCM-990 development board" |
151 | select HAVE_PWM | 241 | select HAVE_PWM |
152 | 242 | depends on MACH_PCM027 | |
153 | endchoice | ||
154 | 243 | ||
155 | choice | 244 | choice |
156 | prompt "display on pcm990" | 245 | prompt "display on pcm990" |
@@ -167,88 +256,45 @@ config PCM990_DISPLAY_NONE | |||
167 | 256 | ||
168 | endchoice | 257 | endchoice |
169 | 258 | ||
170 | if ARCH_GUMSTIX | ||
171 | |||
172 | choice | ||
173 | prompt "Select target Gumstix board" | ||
174 | |||
175 | config MACH_GUMSTIX_F | ||
176 | bool "Basix, Connex, ws-200ax, ws-400ax systems" | ||
177 | select PXA25x | ||
178 | |||
179 | endchoice | ||
180 | |||
181 | endif | ||
182 | 259 | ||
260 | config PXA_EZX | ||
261 | bool "Motorola EZX Platform" | ||
262 | select PXA27x | ||
263 | select IWMMXT | ||
264 | select HAVE_PWM | ||
183 | 265 | ||
184 | if MACH_TRIZEPS4 | 266 | config MACH_EZX_A780 |
267 | bool "Motorola EZX A780" | ||
268 | default y | ||
269 | depends on PXA_EZX | ||
185 | 270 | ||
186 | choice | 271 | config MACH_EZX_E680 |
187 | prompt "Select base board for Trizeps 4 module" | 272 | bool "Motorola EZX E680" |
273 | default y | ||
274 | depends on PXA_EZX | ||
188 | 275 | ||
189 | config MACH_TRIZEPS4_CONXS | 276 | config MACH_EZX_A1200 |
190 | bool "ConXS Eval Board" | 277 | bool "Motorola EZX A1200" |
278 | default y | ||
279 | depends on PXA_EZX | ||
191 | 280 | ||
192 | config MACH_TRIZEPS4_ANY | 281 | config MACH_EZX_A910 |
193 | bool "another Board" | 282 | bool "Motorola EZX A910" |
283 | default y | ||
284 | depends on PXA_EZX | ||
194 | 285 | ||
195 | endchoice | 286 | config MACH_EZX_E6 |
287 | bool "Motorola EZX E6" | ||
288 | default y | ||
289 | depends on PXA_EZX | ||
196 | 290 | ||
197 | endif | 291 | config MACH_EZX_E2 |
292 | bool "Motorola EZX E2" | ||
293 | default y | ||
294 | depends on PXA_EZX | ||
198 | 295 | ||
199 | endmenu | 296 | endmenu |
200 | 297 | ||
201 | config MACH_POODLE | ||
202 | bool "Enable Sharp SL-5600 (Poodle) Support" | ||
203 | depends on PXA_SHARPSL | ||
204 | select PXA25x | ||
205 | select SHARP_LOCOMO | ||
206 | select PXA_SSP | ||
207 | |||
208 | config MACH_CORGI | ||
209 | bool "Enable Sharp SL-C700 (Corgi) Support" | ||
210 | depends on PXA_SHARPSL | ||
211 | select PXA25x | ||
212 | select PXA_SHARP_C7xx | ||
213 | |||
214 | config MACH_SHEPHERD | ||
215 | bool "Enable Sharp SL-C750 (Shepherd) Support" | ||
216 | depends on PXA_SHARPSL | ||
217 | select PXA25x | ||
218 | select PXA_SHARP_C7xx | ||
219 | |||
220 | config MACH_HUSKY | ||
221 | bool "Enable Sharp SL-C760 (Husky) Support" | ||
222 | depends on PXA_SHARPSL | ||
223 | select PXA25x | ||
224 | select PXA_SHARP_C7xx | ||
225 | |||
226 | config MACH_AKITA | ||
227 | bool "Enable Sharp SL-1000 (Akita) Support" | ||
228 | depends on PXA_SHARPSL | ||
229 | select PXA27x | ||
230 | select PXA_SHARP_Cxx00 | ||
231 | select MACH_SPITZ | ||
232 | select I2C | ||
233 | select I2C_PXA | ||
234 | |||
235 | config MACH_SPITZ | ||
236 | bool "Enable Sharp Zaurus SL-3000 (Spitz) Support" | ||
237 | depends on PXA_SHARPSL | ||
238 | select PXA27x | ||
239 | select PXA_SHARP_Cxx00 | ||
240 | |||
241 | config MACH_BORZOI | ||
242 | bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support" | ||
243 | depends on PXA_SHARPSL | ||
244 | select PXA27x | ||
245 | select PXA_SHARP_Cxx00 | ||
246 | |||
247 | config MACH_TOSA | ||
248 | bool "Enable Sharp SL-6000x (Tosa) Support" | ||
249 | depends on PXA_SHARPSL | ||
250 | select PXA25x | ||
251 | |||
252 | config PXA25x | 298 | config PXA25x |
253 | bool | 299 | bool |
254 | help | 300 | help |
@@ -288,4 +334,13 @@ config PXA_PWM | |||
288 | default BACKLIGHT_PWM | 334 | default BACKLIGHT_PWM |
289 | help | 335 | help |
290 | Enable support for PXA2xx/PXA3xx PWM controllers | 336 | Enable support for PXA2xx/PXA3xx PWM controllers |
337 | |||
338 | config TOSA_BT | ||
339 | tristate "Control the state of built-in bluetooth chip on Sharp SL-6000" | ||
340 | depends on MACH_TOSA | ||
341 | select RFKILL | ||
342 | help | ||
343 | This is a simple driver that is able to control | ||
344 | the state of built in bluetooth chip on tosa. | ||
345 | |||
291 | endif | 346 | endif |
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index c4dfbe87fc4e..99ecbe7f8506 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Common support (must be linked before board specific support) | 5 | # Common support (must be linked before board specific support) |
6 | obj-y += clock.o devices.o generic.o irq.o dma.o \ | 6 | obj-y += clock.o devices.o generic.o irq.o dma.o \ |
7 | time.o gpio.o | 7 | time.o gpio.o reset.o |
8 | obj-$(CONFIG_PM) += pm.o sleep.o standby.o | 8 | obj-$(CONFIG_PM) += pm.o sleep.o standby.o |
9 | obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o | 9 | obj-$(CONFIG_CPU_FREQ) += cpu-pxa.o |
10 | 10 | ||
@@ -18,6 +18,7 @@ obj-$(CONFIG_PXA27x) += mfp-pxa2xx.o pxa2xx.o pxa27x.o | |||
18 | obj-$(CONFIG_PXA3xx) += mfp-pxa3xx.o pxa3xx.o smemc.o | 18 | obj-$(CONFIG_PXA3xx) += mfp-pxa3xx.o pxa3xx.o smemc.o |
19 | obj-$(CONFIG_CPU_PXA300) += pxa300.o | 19 | obj-$(CONFIG_CPU_PXA300) += pxa300.o |
20 | obj-$(CONFIG_CPU_PXA320) += pxa320.o | 20 | obj-$(CONFIG_CPU_PXA320) += pxa320.o |
21 | obj-$(CONFIG_CPU_PXA930) += pxa930.o | ||
21 | 22 | ||
22 | # Specific board support | 23 | # Specific board support |
23 | obj-$(CONFIG_ARCH_GUMSTIX) += gumstix.o | 24 | obj-$(CONFIG_ARCH_GUMSTIX) += gumstix.o |
@@ -36,7 +37,12 @@ obj-$(CONFIG_MACH_PCM990_BASEBOARD) += pcm990-baseboard.o | |||
36 | obj-$(CONFIG_MACH_TOSA) += tosa.o | 37 | obj-$(CONFIG_MACH_TOSA) += tosa.o |
37 | obj-$(CONFIG_MACH_EM_X270) += em-x270.o | 38 | obj-$(CONFIG_MACH_EM_X270) += em-x270.o |
38 | obj-$(CONFIG_MACH_MAGICIAN) += magician.o | 39 | obj-$(CONFIG_MACH_MAGICIAN) += magician.o |
39 | obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o | 40 | obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o eseries_udc.o |
41 | obj-$(CONFIG_MACH_E740) += e740_lcd.o | ||
42 | obj-$(CONFIG_MACH_E750) += e750_lcd.o | ||
43 | obj-$(CONFIG_MACH_E400) += e400_lcd.o | ||
44 | obj-$(CONFIG_MACH_E800) += e800_lcd.o | ||
45 | obj-$(CONFIG_MACH_PALMTX) += palmtx.o | ||
40 | 46 | ||
41 | ifeq ($(CONFIG_MACH_ZYLONITE),y) | 47 | ifeq ($(CONFIG_MACH_ZYLONITE),y) |
42 | obj-y += zylonite.o | 48 | obj-y += zylonite.o |
@@ -44,8 +50,11 @@ ifeq ($(CONFIG_MACH_ZYLONITE),y) | |||
44 | obj-$(CONFIG_CPU_PXA320) += zylonite_pxa320.o | 50 | obj-$(CONFIG_CPU_PXA320) += zylonite_pxa320.o |
45 | endif | 51 | endif |
46 | obj-$(CONFIG_MACH_LITTLETON) += littleton.o | 52 | obj-$(CONFIG_MACH_LITTLETON) += littleton.o |
53 | obj-$(CONFIG_MACH_TAVOREVB) += tavorevb.o | ||
54 | obj-$(CONFIG_MACH_SAAR) += saar.o | ||
47 | 55 | ||
48 | obj-$(CONFIG_MACH_ARMCORE) += cm-x270.o | 56 | obj-$(CONFIG_MACH_ARMCORE) += cm-x270.o |
57 | obj-$(CONFIG_PXA_EZX) += ezx.o | ||
49 | 58 | ||
50 | # Support for blinky lights | 59 | # Support for blinky lights |
51 | led-y := leds.o | 60 | led-y := leds.o |
@@ -59,3 +68,5 @@ obj-$(CONFIG_LEDS) += $(led-y) | |||
59 | ifeq ($(CONFIG_PCI),y) | 68 | ifeq ($(CONFIG_PCI),y) |
60 | obj-$(CONFIG_MACH_ARMCORE) += cm-x270-pci.o | 69 | obj-$(CONFIG_MACH_ARMCORE) += cm-x270-pci.o |
61 | endif | 70 | endif |
71 | |||
72 | obj-$(CONFIG_TOSA_BT) += tosa-bt.o | ||
diff --git a/arch/arm/mach-pxa/clock.c b/arch/arm/mach-pxa/clock.c index b4d04955dcb0..630063ffa6fc 100644 --- a/arch/arm/mach-pxa/clock.c +++ b/arch/arm/mach-pxa/clock.c | |||
@@ -101,21 +101,6 @@ unsigned long clk_get_rate(struct clk *clk) | |||
101 | EXPORT_SYMBOL(clk_get_rate); | 101 | EXPORT_SYMBOL(clk_get_rate); |
102 | 102 | ||
103 | 103 | ||
104 | static void clk_gpio27_enable(struct clk *clk) | ||
105 | { | ||
106 | pxa_gpio_mode(GPIO11_3_6MHz_MD); | ||
107 | } | ||
108 | |||
109 | static void clk_gpio27_disable(struct clk *clk) | ||
110 | { | ||
111 | } | ||
112 | |||
113 | static const struct clkops clk_gpio27_ops = { | ||
114 | .enable = clk_gpio27_enable, | ||
115 | .disable = clk_gpio27_disable, | ||
116 | }; | ||
117 | |||
118 | |||
119 | void clk_cken_enable(struct clk *clk) | 104 | void clk_cken_enable(struct clk *clk) |
120 | { | 105 | { |
121 | CKEN |= 1 << clk->cken; | 106 | CKEN |= 1 << clk->cken; |
@@ -131,14 +116,6 @@ const struct clkops clk_cken_ops = { | |||
131 | .disable = clk_cken_disable, | 116 | .disable = clk_cken_disable, |
132 | }; | 117 | }; |
133 | 118 | ||
134 | static struct clk common_clks[] = { | ||
135 | { | ||
136 | .name = "GPIO27_CLK", | ||
137 | .ops = &clk_gpio27_ops, | ||
138 | .rate = 3686400, | ||
139 | }, | ||
140 | }; | ||
141 | |||
142 | void clks_register(struct clk *clks, size_t num) | 119 | void clks_register(struct clk *clks, size_t num) |
143 | { | 120 | { |
144 | int i; | 121 | int i; |
@@ -148,10 +125,3 @@ void clks_register(struct clk *clks, size_t num) | |||
148 | list_add(&clks[i].node, &clocks); | 125 | list_add(&clks[i].node, &clocks); |
149 | mutex_unlock(&clocks_mutex); | 126 | mutex_unlock(&clocks_mutex); |
150 | } | 127 | } |
151 | |||
152 | static int __init clk_init(void) | ||
153 | { | ||
154 | clks_register(common_clks, ARRAY_SIZE(common_clks)); | ||
155 | return 0; | ||
156 | } | ||
157 | arch_initcall(clk_init); | ||
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h index 83cbfaba485d..1ec8f9178aaf 100644 --- a/arch/arm/mach-pxa/clock.h +++ b/arch/arm/mach-pxa/clock.h | |||
@@ -47,9 +47,42 @@ struct clk { | |||
47 | .other = _other, \ | 47 | .other = _other, \ |
48 | } | 48 | } |
49 | 49 | ||
50 | #define INIT_CLK(_name, _ops, _rate, _delay, _dev) \ | ||
51 | { \ | ||
52 | .name = _name, \ | ||
53 | .dev = _dev, \ | ||
54 | .ops = _ops, \ | ||
55 | .rate = _rate, \ | ||
56 | .delay = _delay, \ | ||
57 | } | ||
58 | |||
50 | extern const struct clkops clk_cken_ops; | 59 | extern const struct clkops clk_cken_ops; |
51 | 60 | ||
52 | void clk_cken_enable(struct clk *clk); | 61 | void clk_cken_enable(struct clk *clk); |
53 | void clk_cken_disable(struct clk *clk); | 62 | void clk_cken_disable(struct clk *clk); |
54 | 63 | ||
64 | #ifdef CONFIG_PXA3xx | ||
65 | #define PXA3xx_CKEN(_name, _cken, _rate, _delay, _dev) \ | ||
66 | { \ | ||
67 | .name = _name, \ | ||
68 | .dev = _dev, \ | ||
69 | .ops = &clk_pxa3xx_cken_ops, \ | ||
70 | .rate = _rate, \ | ||
71 | .cken = CKEN_##_cken, \ | ||
72 | .delay = _delay, \ | ||
73 | } | ||
74 | |||
75 | #define PXA3xx_CK(_name, _cken, _ops, _dev) \ | ||
76 | { \ | ||
77 | .name = _name, \ | ||
78 | .dev = _dev, \ | ||
79 | .ops = _ops, \ | ||
80 | .cken = CKEN_##_cken, \ | ||
81 | } | ||
82 | |||
83 | extern const struct clkops clk_pxa3xx_cken_ops; | ||
84 | extern void clk_pxa3xx_cken_enable(struct clk *); | ||
85 | extern void clk_pxa3xx_cken_disable(struct clk *); | ||
86 | #endif | ||
87 | |||
55 | void clks_register(struct clk *clks, size_t num); | 88 | void clks_register(struct clk *clks, size_t num); |
diff --git a/arch/arm/mach-pxa/cm-x270-pci.c b/arch/arm/mach-pxa/cm-x270-pci.c index 319c9ff3ab9a..bcf0cde6ccc9 100644 --- a/arch/arm/mach-pxa/cm-x270-pci.c +++ b/arch/arm/mach-pxa/cm-x270-pci.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Bits taken from various places. | 6 | * Bits taken from various places. |
7 | * | 7 | * |
8 | * Copyright (C) 2007 Compulab, Ltd. | 8 | * Copyright (C) 2007, 2008 Compulab, Ltd. |
9 | * Mike Rapoport <mike@compulab.co.il> | 9 | * Mike Rapoport <mike@compulab.co.il> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -19,16 +19,16 @@ | |||
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/gpio.h> | ||
22 | 23 | ||
23 | #include <asm/mach/pci.h> | 24 | #include <asm/mach/pci.h> |
24 | #include <asm/arch/cm-x270.h> | ||
25 | #include <asm/arch/pxa-regs.h> | 25 | #include <asm/arch/pxa-regs.h> |
26 | #include <asm/arch/pxa2xx-gpio.h> | ||
27 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
28 | 27 | ||
29 | #include <asm/hardware/it8152.h> | 28 | #include <asm/hardware/it8152.h> |
30 | 29 | ||
31 | unsigned long it8152_base_address = CMX270_IT8152_VIRT; | 30 | unsigned long it8152_base_address; |
31 | static int cmx270_it8152_irq_gpio; | ||
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Only first 64MB of memory can be accessed via PCI. | 34 | * Only first 64MB of memory can be accessed via PCI. |
@@ -42,7 +42,7 @@ void __init cmx270_pci_adjust_zones(int node, unsigned long *zone_size, | |||
42 | unsigned int sz = SZ_64M >> PAGE_SHIFT; | 42 | unsigned int sz = SZ_64M >> PAGE_SHIFT; |
43 | 43 | ||
44 | if (machine_is_armcore()) { | 44 | if (machine_is_armcore()) { |
45 | pr_info("Adjusting zones for CM-x270\n"); | 45 | pr_info("Adjusting zones for CM-X270\n"); |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Only adjust if > 64M on current system | 48 | * Only adjust if > 64M on current system |
@@ -60,19 +60,20 @@ void __init cmx270_pci_adjust_zones(int node, unsigned long *zone_size, | |||
60 | static void cmx270_it8152_irq_demux(unsigned int irq, struct irq_desc *desc) | 60 | static void cmx270_it8152_irq_demux(unsigned int irq, struct irq_desc *desc) |
61 | { | 61 | { |
62 | /* clear our parent irq */ | 62 | /* clear our parent irq */ |
63 | GEDR(GPIO_IT8152_IRQ) = GPIO_bit(GPIO_IT8152_IRQ); | 63 | GEDR(cmx270_it8152_irq_gpio) = GPIO_bit(cmx270_it8152_irq_gpio); |
64 | 64 | ||
65 | it8152_irq_demux(irq, desc); | 65 | it8152_irq_demux(irq, desc); |
66 | } | 66 | } |
67 | 67 | ||
68 | void __cmx270_pci_init_irq(void) | 68 | void __cmx270_pci_init_irq(int irq_gpio) |
69 | { | 69 | { |
70 | it8152_init_irq(); | 70 | it8152_init_irq(); |
71 | pxa_gpio_mode(IRQ_TO_GPIO(GPIO_IT8152_IRQ)); | ||
72 | set_irq_type(IRQ_GPIO(GPIO_IT8152_IRQ), IRQT_RISING); | ||
73 | 71 | ||
74 | set_irq_chained_handler(IRQ_GPIO(GPIO_IT8152_IRQ), | 72 | cmx270_it8152_irq_gpio = irq_gpio; |
75 | cmx270_it8152_irq_demux); | 73 | |
74 | set_irq_type(gpio_to_irq(irq_gpio), IRQT_RISING); | ||
75 | |||
76 | set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx270_it8152_irq_demux); | ||
76 | } | 77 | } |
77 | 78 | ||
78 | #ifdef CONFIG_PM | 79 | #ifdef CONFIG_PM |
@@ -115,8 +116,8 @@ static int __init cmx270_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | |||
115 | 116 | ||
116 | /* | 117 | /* |
117 | Here comes the ugly part. The routing is baseboard specific, | 118 | Here comes the ugly part. The routing is baseboard specific, |
118 | but defining a platform for each possible base of CM-x270 is | 119 | but defining a platform for each possible base of CM-X270 is |
119 | unrealistic. Here we keep mapping for ATXBase and SB-x270. | 120 | unrealistic. Here we keep mapping for ATXBase and SB-X270. |
120 | */ | 121 | */ |
121 | /* ATXBASE PCI slot */ | 122 | /* ATXBASE PCI slot */ |
122 | if (slot == 7) | 123 | if (slot == 7) |
diff --git a/arch/arm/mach-pxa/cm-x270-pci.h b/arch/arm/mach-pxa/cm-x270-pci.h index ffe37b66f9a0..48f532f4cb51 100644 --- a/arch/arm/mach-pxa/cm-x270-pci.h +++ b/arch/arm/mach-pxa/cm-x270-pci.h | |||
@@ -1,13 +1,13 @@ | |||
1 | extern void __cmx270_pci_init_irq(void); | 1 | extern void __cmx270_pci_init_irq(int irq_gpio); |
2 | extern void __cmx270_pci_suspend(void); | 2 | extern void __cmx270_pci_suspend(void); |
3 | extern void __cmx270_pci_resume(void); | 3 | extern void __cmx270_pci_resume(void); |
4 | 4 | ||
5 | #ifdef CONFIG_PCI | 5 | #ifdef CONFIG_PCI |
6 | #define cmx270_pci_init_irq __cmx270_pci_init_irq | 6 | #define cmx270_pci_init_irq(x) __cmx270_pci_init_irq(x) |
7 | #define cmx270_pci_suspend __cmx270_pci_suspend | 7 | #define cmx270_pci_suspend(x) __cmx270_pci_suspend(x) |
8 | #define cmx270_pci_resume __cmx270_pci_resume | 8 | #define cmx270_pci_resume(x) __cmx270_pci_resume(x) |
9 | #else | 9 | #else |
10 | #define cmx270_pci_init_irq() do {} while (0) | 10 | #define cmx270_pci_init_irq(x) do {} while (0) |
11 | #define cmx270_pci_suspend() do {} while (0) | 11 | #define cmx270_pci_suspend(x) do {} while (0) |
12 | #define cmx270_pci_resume() do {} while (0) | 12 | #define cmx270_pci_resume(x) do {} while (0) |
13 | #endif | 13 | #endif |
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index 01b9964acec1..402e807eae54 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mach-pxa/cm-x270.c | 2 | * linux/arch/arm/mach-pxa/cm-x270.c |
3 | * | 3 | * |
4 | * Copyright (C) 2007 CompuLab, Ltd. | 4 | * Copyright (C) 2007, 2008 CompuLab, Ltd. |
5 | * Mike Rapoport <mike@compulab.co.il> | 5 | * Mike Rapoport <mike@compulab.co.il> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -9,44 +9,156 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/pm.h> | ||
14 | #include <linux/fb.h> | ||
15 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
16 | #include <linux/irq.h> | ||
17 | #include <linux/sysdev.h> | 13 | #include <linux/sysdev.h> |
18 | #include <linux/io.h> | 14 | #include <linux/irq.h> |
19 | #include <linux/delay.h> | 15 | #include <linux/gpio.h> |
20 | 16 | ||
21 | #include <linux/dm9000.h> | 17 | #include <linux/dm9000.h> |
22 | #include <linux/rtc-v3020.h> | 18 | #include <linux/rtc-v3020.h> |
23 | #include <linux/serial_8250.h> | ||
24 | |||
25 | #include <video/mbxfb.h> | 19 | #include <video/mbxfb.h> |
20 | #include <linux/leds.h> | ||
26 | 21 | ||
27 | #include <asm/mach/arch.h> | 22 | #include <asm/mach/arch.h> |
28 | #include <asm/mach-types.h> | 23 | #include <asm/mach-types.h> |
29 | #include <asm/mach/map.h> | 24 | #include <asm/mach/map.h> |
30 | 25 | ||
31 | #include <asm/arch/pxa-regs.h> | ||
32 | #include <asm/arch/pxa2xx-regs.h> | 26 | #include <asm/arch/pxa2xx-regs.h> |
33 | #include <asm/arch/pxa2xx-gpio.h> | 27 | #include <asm/arch/mfp-pxa27x.h> |
28 | #include <asm/arch/pxa-regs.h> | ||
34 | #include <asm/arch/audio.h> | 29 | #include <asm/arch/audio.h> |
35 | #include <asm/arch/pxafb.h> | 30 | #include <asm/arch/pxafb.h> |
36 | #include <asm/arch/ohci.h> | 31 | #include <asm/arch/ohci.h> |
37 | #include <asm/arch/mmc.h> | 32 | #include <asm/arch/mmc.h> |
38 | #include <asm/arch/bitfield.h> | 33 | #include <asm/arch/bitfield.h> |
39 | #include <asm/arch/cm-x270.h> | ||
40 | 34 | ||
41 | #include <asm/hardware/it8152.h> | 35 | #include <asm/hardware/it8152.h> |
42 | 36 | ||
43 | #include "generic.h" | 37 | #include "generic.h" |
44 | #include "cm-x270-pci.h" | 38 | #include "cm-x270-pci.h" |
45 | 39 | ||
40 | /* virtual addresses for statically mapped regions */ | ||
41 | #define CMX270_VIRT_BASE (0xe8000000) | ||
42 | #define CMX270_IT8152_VIRT (CMX270_VIRT_BASE) | ||
43 | |||
46 | #define RTC_PHYS_BASE (PXA_CS1_PHYS + (5 << 22)) | 44 | #define RTC_PHYS_BASE (PXA_CS1_PHYS + (5 << 22)) |
47 | #define DM9000_PHYS_BASE (PXA_CS1_PHYS + (6 << 22)) | 45 | #define DM9000_PHYS_BASE (PXA_CS1_PHYS + (6 << 22)) |
48 | 46 | ||
49 | static struct resource cmx270_dm9k_resource[] = { | 47 | /* GPIO IRQ usage */ |
48 | #define GPIO10_ETHIRQ (10) | ||
49 | #define GPIO22_IT8152_IRQ (22) | ||
50 | #define GPIO83_MMC_IRQ (83) | ||
51 | #define GPIO95_GFXIRQ (95) | ||
52 | |||
53 | #define CMX270_ETHIRQ IRQ_GPIO(GPIO10_ETHIRQ) | ||
54 | #define CMX270_IT8152_IRQ IRQ_GPIO(GPIO22_IT8152_IRQ) | ||
55 | #define CMX270_MMC_IRQ IRQ_GPIO(GPIO83_MMC_IRQ) | ||
56 | #define CMX270_GFXIRQ IRQ_GPIO(GPIO95_GFXIRQ) | ||
57 | |||
58 | /* MMC power enable */ | ||
59 | #define GPIO105_MMC_POWER (105) | ||
60 | |||
61 | static unsigned long cmx270_pin_config[] = { | ||
62 | /* AC'97 */ | ||
63 | GPIO28_AC97_BITCLK, | ||
64 | GPIO29_AC97_SDATA_IN_0, | ||
65 | GPIO30_AC97_SDATA_OUT, | ||
66 | GPIO31_AC97_SYNC, | ||
67 | GPIO98_AC97_SYSCLK, | ||
68 | GPIO113_AC97_nRESET, | ||
69 | |||
70 | /* BTUART */ | ||
71 | GPIO42_BTUART_RXD, | ||
72 | GPIO43_BTUART_TXD, | ||
73 | GPIO44_BTUART_CTS, | ||
74 | GPIO45_BTUART_RTS, | ||
75 | |||
76 | /* STUART */ | ||
77 | GPIO46_STUART_RXD, | ||
78 | GPIO47_STUART_TXD, | ||
79 | |||
80 | /* MCI controller */ | ||
81 | GPIO32_MMC_CLK, | ||
82 | GPIO112_MMC_CMD, | ||
83 | GPIO92_MMC_DAT_0, | ||
84 | GPIO109_MMC_DAT_1, | ||
85 | GPIO110_MMC_DAT_2, | ||
86 | GPIO111_MMC_DAT_3, | ||
87 | |||
88 | /* LCD */ | ||
89 | GPIO58_LCD_LDD_0, | ||
90 | GPIO59_LCD_LDD_1, | ||
91 | GPIO60_LCD_LDD_2, | ||
92 | GPIO61_LCD_LDD_3, | ||
93 | GPIO62_LCD_LDD_4, | ||
94 | GPIO63_LCD_LDD_5, | ||
95 | GPIO64_LCD_LDD_6, | ||
96 | GPIO65_LCD_LDD_7, | ||
97 | GPIO66_LCD_LDD_8, | ||
98 | GPIO67_LCD_LDD_9, | ||
99 | GPIO68_LCD_LDD_10, | ||
100 | GPIO69_LCD_LDD_11, | ||
101 | GPIO70_LCD_LDD_12, | ||
102 | GPIO71_LCD_LDD_13, | ||
103 | GPIO72_LCD_LDD_14, | ||
104 | GPIO73_LCD_LDD_15, | ||
105 | GPIO74_LCD_FCLK, | ||
106 | GPIO75_LCD_LCLK, | ||
107 | GPIO76_LCD_PCLK, | ||
108 | GPIO77_LCD_BIAS, | ||
109 | |||
110 | /* I2C */ | ||
111 | GPIO117_I2C_SCL, | ||
112 | GPIO118_I2C_SDA, | ||
113 | |||
114 | /* SSP1 */ | ||
115 | GPIO23_SSP1_SCLK, | ||
116 | GPIO24_SSP1_SFRM, | ||
117 | GPIO25_SSP1_TXD, | ||
118 | GPIO26_SSP1_RXD, | ||
119 | |||
120 | /* SSP2 */ | ||
121 | GPIO19_SSP2_SCLK, | ||
122 | GPIO14_SSP2_SFRM, | ||
123 | GPIO87_SSP2_TXD, | ||
124 | GPIO88_SSP2_RXD, | ||
125 | |||
126 | /* PC Card */ | ||
127 | GPIO48_nPOE, | ||
128 | GPIO49_nPWE, | ||
129 | GPIO50_nPIOR, | ||
130 | GPIO51_nPIOW, | ||
131 | GPIO85_nPCE_1, | ||
132 | GPIO54_nPCE_2, | ||
133 | GPIO55_nPREG, | ||
134 | GPIO56_nPWAIT, | ||
135 | GPIO57_nIOIS16, | ||
136 | |||
137 | /* SDRAM and local bus */ | ||
138 | GPIO15_nCS_1, | ||
139 | GPIO78_nCS_2, | ||
140 | GPIO79_nCS_3, | ||
141 | GPIO80_nCS_4, | ||
142 | GPIO33_nCS_5, | ||
143 | GPIO49_nPWE, | ||
144 | GPIO18_RDY, | ||
145 | |||
146 | /* GPIO */ | ||
147 | GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, | ||
148 | GPIO105_GPIO | MFP_LPM_DRIVE_HIGH, /* MMC/SD power */ | ||
149 | GPIO53_GPIO, /* PC card reset */ | ||
150 | |||
151 | /* NAND controls */ | ||
152 | GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */ | ||
153 | GPIO89_GPIO, /* NAND Ready/Busy */ | ||
154 | |||
155 | /* interrupts */ | ||
156 | GPIO10_GPIO, /* DM9000 interrupt */ | ||
157 | GPIO83_GPIO, /* MMC card detect */ | ||
158 | }; | ||
159 | |||
160 | #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) | ||
161 | static struct resource cmx270_dm9000_resource[] = { | ||
50 | [0] = { | 162 | [0] = { |
51 | .start = DM9000_PHYS_BASE, | 163 | .start = DM9000_PHYS_BASE, |
52 | .end = DM9000_PHYS_BASE + 4, | 164 | .end = DM9000_PHYS_BASE + 4, |
@@ -64,31 +176,45 @@ static struct resource cmx270_dm9k_resource[] = { | |||
64 | } | 176 | } |
65 | }; | 177 | }; |
66 | 178 | ||
67 | /* for the moment we limit ourselves to 32bit IO until some | 179 | static struct dm9000_plat_data cmx270_dm9000_platdata = { |
68 | * better IO routines can be written and tested | ||
69 | */ | ||
70 | static struct dm9000_plat_data cmx270_dm9k_platdata = { | ||
71 | .flags = DM9000_PLATF_32BITONLY, | 180 | .flags = DM9000_PLATF_32BITONLY, |
72 | }; | 181 | }; |
73 | 182 | ||
74 | /* Ethernet device */ | 183 | static struct platform_device cmx270_dm9000_device = { |
75 | static struct platform_device cmx270_device_dm9k = { | ||
76 | .name = "dm9000", | 184 | .name = "dm9000", |
77 | .id = 0, | 185 | .id = 0, |
78 | .num_resources = ARRAY_SIZE(cmx270_dm9k_resource), | 186 | .num_resources = ARRAY_SIZE(cmx270_dm9000_resource), |
79 | .resource = cmx270_dm9k_resource, | 187 | .resource = cmx270_dm9000_resource, |
80 | .dev = { | 188 | .dev = { |
81 | .platform_data = &cmx270_dm9k_platdata, | 189 | .platform_data = &cmx270_dm9000_platdata, |
82 | } | 190 | } |
83 | }; | 191 | }; |
84 | 192 | ||
85 | /* touchscreen controller */ | 193 | static void __init cmx270_init_dm9000(void) |
194 | { | ||
195 | platform_device_register(&cmx270_dm9000_device); | ||
196 | } | ||
197 | #else | ||
198 | static inline void cmx270_init_dm9000(void) {} | ||
199 | #endif | ||
200 | |||
201 | /* UCB1400 touchscreen controller */ | ||
202 | #if defined(CONFIG_TOUCHSCREEN_UCB1400) || defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE) | ||
86 | static struct platform_device cmx270_ts_device = { | 203 | static struct platform_device cmx270_ts_device = { |
87 | .name = "ucb1400_ts", | 204 | .name = "ucb1400_ts", |
88 | .id = -1, | 205 | .id = -1, |
89 | }; | 206 | }; |
90 | 207 | ||
91 | /* RTC */ | 208 | static void __init cmx270_init_touchscreen(void) |
209 | { | ||
210 | platform_device_register(&cmx270_ts_device); | ||
211 | } | ||
212 | #else | ||
213 | static inline void cmx270_init_touchscreen(void) {} | ||
214 | #endif | ||
215 | |||
216 | /* V3020 RTC */ | ||
217 | #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE) | ||
92 | static struct resource cmx270_v3020_resource[] = { | 218 | static struct resource cmx270_v3020_resource[] = { |
93 | [0] = { | 219 | [0] = { |
94 | .start = RTC_PHYS_BASE, | 220 | .start = RTC_PHYS_BASE, |
@@ -111,28 +237,67 @@ static struct platform_device cmx270_rtc_device = { | |||
111 | } | 237 | } |
112 | }; | 238 | }; |
113 | 239 | ||
114 | /* | 240 | static void __init cmx270_init_rtc(void) |
115 | * CM-X270 LEDs | 241 | { |
116 | */ | 242 | platform_device_register(&cmx270_rtc_device); |
243 | } | ||
244 | #else | ||
245 | static inline void cmx270_init_rtc(void) {} | ||
246 | #endif | ||
247 | |||
248 | /* CM-X270 LEDs */ | ||
249 | #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) | ||
250 | static struct gpio_led cmx270_leds[] = { | ||
251 | [0] = { | ||
252 | .name = "cm-x270:red", | ||
253 | .default_trigger = "nand-disk", | ||
254 | .gpio = 93, | ||
255 | .active_low = 1, | ||
256 | }, | ||
257 | [1] = { | ||
258 | .name = "cm-x270:green", | ||
259 | .default_trigger = "heartbeat", | ||
260 | .gpio = 94, | ||
261 | .active_low = 1, | ||
262 | }, | ||
263 | }; | ||
264 | |||
265 | static struct gpio_led_platform_data cmx270_gpio_led_pdata = { | ||
266 | .num_leds = ARRAY_SIZE(cmx270_leds), | ||
267 | .leds = cmx270_leds, | ||
268 | }; | ||
269 | |||
117 | static struct platform_device cmx270_led_device = { | 270 | static struct platform_device cmx270_led_device = { |
118 | .name = "cm-x270-led", | 271 | .name = "leds-gpio", |
119 | .id = -1, | 272 | .id = -1, |
273 | .dev = { | ||
274 | .platform_data = &cmx270_gpio_led_pdata, | ||
275 | }, | ||
120 | }; | 276 | }; |
121 | 277 | ||
278 | static void __init cmx270_init_leds(void) | ||
279 | { | ||
280 | platform_device_register(&cmx270_led_device); | ||
281 | } | ||
282 | #else | ||
283 | static inline void cmx270_init_leds(void) {} | ||
284 | #endif | ||
285 | |||
122 | /* 2700G graphics */ | 286 | /* 2700G graphics */ |
287 | #if defined(CONFIG_FB_MBX) || defined(CONFIG_FB_MBX_MODULE) | ||
123 | static u64 fb_dma_mask = ~(u64)0; | 288 | static u64 fb_dma_mask = ~(u64)0; |
124 | 289 | ||
125 | static struct resource cmx270_2700G_resource[] = { | 290 | static struct resource cmx270_2700G_resource[] = { |
126 | /* frame buffer memory including ODFB and External SDRAM */ | 291 | /* frame buffer memory including ODFB and External SDRAM */ |
127 | [0] = { | 292 | [0] = { |
128 | .start = MARATHON_PHYS, | 293 | .start = PXA_CS2_PHYS, |
129 | .end = MARATHON_PHYS + 0x02000000, | 294 | .end = PXA_CS2_PHYS + 0x01ffffff, |
130 | .flags = IORESOURCE_MEM, | 295 | .flags = IORESOURCE_MEM, |
131 | }, | 296 | }, |
132 | /* Marathon registers */ | 297 | /* Marathon registers */ |
133 | [1] = { | 298 | [1] = { |
134 | .start = MARATHON_PHYS + 0x03fe0000, | 299 | .start = PXA_CS2_PHYS + 0x03fe0000, |
135 | .end = MARATHON_PHYS + 0x03ffffff, | 300 | .end = PXA_CS2_PHYS + 0x03ffffff, |
136 | .flags = IORESOURCE_MEM, | 301 | .flags = IORESOURCE_MEM, |
137 | }, | 302 | }, |
138 | }; | 303 | }; |
@@ -200,43 +365,15 @@ static struct platform_device cmx270_2700G = { | |||
200 | .id = -1, | 365 | .id = -1, |
201 | }; | 366 | }; |
202 | 367 | ||
203 | static u64 ata_dma_mask = ~(u64)0; | 368 | static void __init cmx270_init_2700G(void) |
204 | 369 | { | |
205 | static struct platform_device cmx270_ata = { | 370 | platform_device_register(&cmx270_2700G); |
206 | .name = "pata_cm_x270", | 371 | } |
207 | .id = -1, | 372 | #else |
208 | .dev = { | 373 | static inline void cmx270_init_2700G(void) {} |
209 | .dma_mask = &ata_dma_mask, | 374 | #endif |
210 | .coherent_dma_mask = 0xffffffff, | ||
211 | }, | ||
212 | }; | ||
213 | |||
214 | /* platform devices */ | ||
215 | static struct platform_device *platform_devices[] __initdata = { | ||
216 | &cmx270_device_dm9k, | ||
217 | &cmx270_rtc_device, | ||
218 | &cmx270_2700G, | ||
219 | &cmx270_led_device, | ||
220 | &cmx270_ts_device, | ||
221 | &cmx270_ata, | ||
222 | }; | ||
223 | |||
224 | /* Map PCI companion and IDE/General Purpose CS statically */ | ||
225 | static struct map_desc cmx270_io_desc[] __initdata = { | ||
226 | [0] = { /* IDE/general purpose space */ | ||
227 | .virtual = CMX270_IDE104_VIRT, | ||
228 | .pfn = __phys_to_pfn(CMX270_IDE104_PHYS), | ||
229 | .length = SZ_64M - SZ_8M, | ||
230 | .type = MT_DEVICE | ||
231 | }, | ||
232 | [1] = { /* PCI bridge */ | ||
233 | .virtual = CMX270_IT8152_VIRT, | ||
234 | .pfn = __phys_to_pfn(CMX270_IT8152_PHYS), | ||
235 | .length = SZ_64M, | ||
236 | .type = MT_DEVICE | ||
237 | }, | ||
238 | }; | ||
239 | 375 | ||
376 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) | ||
240 | /* | 377 | /* |
241 | Display definitions | 378 | Display definitions |
242 | keep these for backwards compatibility, although symbolic names (as | 379 | keep these for backwards compatibility, although symbolic names (as |
@@ -446,7 +583,16 @@ static int __init cmx270_set_display(char *str) | |||
446 | */ | 583 | */ |
447 | __setup("monitor=", cmx270_set_display); | 584 | __setup("monitor=", cmx270_set_display); |
448 | 585 | ||
586 | static void __init cmx270_init_display(void) | ||
587 | { | ||
588 | set_pxa_fb_info(cmx270_display); | ||
589 | } | ||
590 | #else | ||
591 | static inline void cmx270_init_display(void) {} | ||
592 | #endif | ||
593 | |||
449 | /* PXA27x OHCI controller setup */ | 594 | /* PXA27x OHCI controller setup */ |
595 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) | ||
450 | static int cmx270_ohci_init(struct device *dev) | 596 | static int cmx270_ohci_init(struct device *dev) |
451 | { | 597 | { |
452 | /* Set the Power Control Polarity Low */ | 598 | /* Set the Power Control Polarity Low */ |
@@ -461,35 +607,37 @@ static struct pxaohci_platform_data cmx270_ohci_platform_data = { | |||
461 | .init = cmx270_ohci_init, | 607 | .init = cmx270_ohci_init, |
462 | }; | 608 | }; |
463 | 609 | ||
610 | static void __init cmx270_init_ohci(void) | ||
611 | { | ||
612 | pxa_set_ohci_info(&cmx270_ohci_platform_data); | ||
613 | } | ||
614 | #else | ||
615 | static inline void cmx270_init_ohci(void) {} | ||
616 | #endif | ||
464 | 617 | ||
618 | #if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE) | ||
465 | static int cmx270_mci_init(struct device *dev, | 619 | static int cmx270_mci_init(struct device *dev, |
466 | irq_handler_t cmx270_detect_int, | 620 | irq_handler_t cmx270_detect_int, |
467 | void *data) | 621 | void *data) |
468 | { | 622 | { |
469 | int err; | 623 | int err; |
470 | 624 | ||
471 | /* | 625 | err = gpio_request(GPIO105_MMC_POWER, "MMC/SD power"); |
472 | * setup GPIO for PXA27x MMC controller | 626 | if (err) { |
473 | */ | 627 | dev_warn(dev, "power gpio unavailable\n"); |
474 | pxa_gpio_mode(GPIO32_MMCCLK_MD); | 628 | return err; |
475 | pxa_gpio_mode(GPIO112_MMCCMD_MD); | 629 | } |
476 | pxa_gpio_mode(GPIO92_MMCDAT0_MD); | ||
477 | pxa_gpio_mode(GPIO109_MMCDAT1_MD); | ||
478 | pxa_gpio_mode(GPIO110_MMCDAT2_MD); | ||
479 | pxa_gpio_mode(GPIO111_MMCDAT3_MD); | ||
480 | |||
481 | /* SB-X270 uses GPIO105 as SD power enable */ | ||
482 | pxa_gpio_mode(105 | GPIO_OUT); | ||
483 | 630 | ||
484 | /* card detect IRQ on GPIO 83 */ | 631 | gpio_direction_output(GPIO105_MMC_POWER, 0); |
485 | pxa_gpio_mode(IRQ_TO_GPIO(CMX270_MMC_IRQ)); | ||
486 | 632 | ||
487 | err = request_irq(CMX270_MMC_IRQ, cmx270_detect_int, | 633 | err = request_irq(CMX270_MMC_IRQ, cmx270_detect_int, |
488 | IRQF_DISABLED | IRQF_TRIGGER_FALLING, | 634 | IRQF_DISABLED | IRQF_TRIGGER_FALLING, |
489 | "MMC card detect", data); | 635 | "MMC card detect", data); |
490 | if (err) | 636 | if (err) { |
491 | printk(KERN_ERR "cmx270_mci_init: MMC/SD: can't" | 637 | gpio_free(GPIO105_MMC_POWER); |
492 | " request MMC card detect IRQ\n"); | 638 | dev_err(dev, "cmx270_mci_init: MMC/SD: can't" |
639 | " request MMC card detect IRQ\n"); | ||
640 | } | ||
493 | 641 | ||
494 | return err; | 642 | return err; |
495 | } | 643 | } |
@@ -499,17 +647,18 @@ static void cmx270_mci_setpower(struct device *dev, unsigned int vdd) | |||
499 | struct pxamci_platform_data *p_d = dev->platform_data; | 647 | struct pxamci_platform_data *p_d = dev->platform_data; |
500 | 648 | ||
501 | if ((1 << vdd) & p_d->ocr_mask) { | 649 | if ((1 << vdd) & p_d->ocr_mask) { |
502 | printk(KERN_DEBUG "%s: on\n", __func__); | 650 | dev_dbg(dev, "power on\n"); |
503 | GPCR(105) = GPIO_bit(105); | 651 | gpio_set_value(GPIO105_MMC_POWER, 0); |
504 | } else { | 652 | } else { |
505 | GPSR(105) = GPIO_bit(105); | 653 | gpio_set_value(GPIO105_MMC_POWER, 1); |
506 | printk(KERN_DEBUG "%s: off\n", __func__); | 654 | dev_dbg(dev, "power off\n"); |
507 | } | 655 | } |
508 | } | 656 | } |
509 | 657 | ||
510 | static void cmx270_mci_exit(struct device *dev, void *data) | 658 | static void cmx270_mci_exit(struct device *dev, void *data) |
511 | { | 659 | { |
512 | free_irq(CMX270_MMC_IRQ, data); | 660 | free_irq(CMX270_MMC_IRQ, data); |
661 | gpio_free(GPIO105_MMC_POWER); | ||
513 | } | 662 | } |
514 | 663 | ||
515 | static struct pxamci_platform_data cmx270_mci_platform_data = { | 664 | static struct pxamci_platform_data cmx270_mci_platform_data = { |
@@ -519,6 +668,14 @@ static struct pxamci_platform_data cmx270_mci_platform_data = { | |||
519 | .exit = cmx270_mci_exit, | 668 | .exit = cmx270_mci_exit, |
520 | }; | 669 | }; |
521 | 670 | ||
671 | static void __init cmx270_init_mmc(void) | ||
672 | { | ||
673 | pxa_set_mci_info(&cmx270_mci_platform_data); | ||
674 | } | ||
675 | #else | ||
676 | static inline void cmx270_init_mmc(void) {} | ||
677 | #endif | ||
678 | |||
522 | #ifdef CONFIG_PM | 679 | #ifdef CONFIG_PM |
523 | static unsigned long sleep_save_msc[10]; | 680 | static unsigned long sleep_save_msc[10]; |
524 | 681 | ||
@@ -580,53 +737,63 @@ static int __init cmx270_pm_init(void) | |||
580 | static int __init cmx270_pm_init(void) { return 0; } | 737 | static int __init cmx270_pm_init(void) { return 0; } |
581 | #endif | 738 | #endif |
582 | 739 | ||
583 | static void __init cmx270_init(void) | 740 | #if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE) |
741 | static void __init cmx270_init_ac97(void) | ||
584 | { | 742 | { |
585 | cmx270_pm_init(); | ||
586 | |||
587 | set_pxa_fb_info(cmx270_display); | ||
588 | |||
589 | /* register CM-X270 platform devices */ | ||
590 | platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); | ||
591 | pxa_set_ac97_info(NULL); | 743 | pxa_set_ac97_info(NULL); |
744 | } | ||
745 | #else | ||
746 | static inline void cmx270_init_ac97(void) {} | ||
747 | #endif | ||
592 | 748 | ||
593 | /* set MCI and OHCI platform parameters */ | 749 | static void __init cmx270_init(void) |
594 | pxa_set_mci_info(&cmx270_mci_platform_data); | 750 | { |
595 | pxa_set_ohci_info(&cmx270_ohci_platform_data); | 751 | cmx270_pm_init(); |
596 | |||
597 | /* This enables the STUART */ | ||
598 | pxa_gpio_mode(GPIO46_STRXD_MD); | ||
599 | pxa_gpio_mode(GPIO47_STTXD_MD); | ||
600 | 752 | ||
601 | /* This enables the BTUART */ | 753 | pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx270_pin_config)); |
602 | pxa_gpio_mode(GPIO42_BTRXD_MD); | 754 | |
603 | pxa_gpio_mode(GPIO43_BTTXD_MD); | 755 | cmx270_init_dm9000(); |
604 | pxa_gpio_mode(GPIO44_BTCTS_MD); | 756 | cmx270_init_rtc(); |
605 | pxa_gpio_mode(GPIO45_BTRTS_MD); | 757 | cmx270_init_display(); |
758 | cmx270_init_mmc(); | ||
759 | cmx270_init_ohci(); | ||
760 | cmx270_init_ac97(); | ||
761 | cmx270_init_touchscreen(); | ||
762 | cmx270_init_leds(); | ||
763 | cmx270_init_2700G(); | ||
606 | } | 764 | } |
607 | 765 | ||
608 | static void __init cmx270_init_irq(void) | 766 | static void __init cmx270_init_irq(void) |
609 | { | 767 | { |
610 | pxa27x_init_irq(); | 768 | pxa27x_init_irq(); |
611 | 769 | ||
770 | cmx270_pci_init_irq(GPIO22_IT8152_IRQ); | ||
771 | } | ||
612 | 772 | ||
613 | cmx270_pci_init_irq(); | 773 | #ifdef CONFIG_PCI |
774 | /* Map PCI companion statically */ | ||
775 | static struct map_desc cmx270_io_desc[] __initdata = { | ||
776 | [0] = { /* PCI bridge */ | ||
777 | .virtual = CMX270_IT8152_VIRT, | ||
778 | .pfn = __phys_to_pfn(PXA_CS4_PHYS), | ||
779 | .length = SZ_64M, | ||
780 | .type = MT_DEVICE | ||
781 | }, | ||
782 | }; | ||
614 | 783 | ||
615 | /* Setup interrupt for dm9000 */ | 784 | static void __init cmx270_map_io(void) |
616 | pxa_gpio_mode(IRQ_TO_GPIO(CMX270_ETHIRQ)); | 785 | { |
617 | set_irq_type(CMX270_ETHIRQ, IRQT_RISING); | 786 | pxa_map_io(); |
787 | iotable_init(cmx270_io_desc, ARRAY_SIZE(cmx270_io_desc)); | ||
618 | 788 | ||
619 | /* Setup interrupt for 2700G */ | 789 | it8152_base_address = CMX270_IT8152_VIRT; |
620 | pxa_gpio_mode(IRQ_TO_GPIO(CMX270_GFXIRQ)); | ||
621 | set_irq_type(CMX270_GFXIRQ, IRQT_FALLING); | ||
622 | } | 790 | } |
623 | 791 | #else | |
624 | static void __init cmx270_map_io(void) | 792 | static void __init cmx270_map_io(void) |
625 | { | 793 | { |
626 | pxa_map_io(); | 794 | pxa_map_io(); |
627 | iotable_init(cmx270_io_desc, ARRAY_SIZE(cmx270_io_desc)); | ||
628 | } | 795 | } |
629 | 796 | #endif | |
630 | 797 | ||
631 | MACHINE_START(ARMCORE, "Compulab CM-x270") | 798 | MACHINE_START(ARMCORE, "Compulab CM-x270") |
632 | .boot_params = 0xa0000100, | 799 | .boot_params = 0xa0000100, |
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index b37671b71886..e58504edb140 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c | |||
@@ -465,6 +465,7 @@ static void corgi_irda_transceiver_mode(struct device *dev, int mode) | |||
465 | GPSR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON); | 465 | GPSR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON); |
466 | else | 466 | else |
467 | GPCR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON); | 467 | GPCR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON); |
468 | pxa2xx_transceiver_mode(dev, mode); | ||
468 | } | 469 | } |
469 | 470 | ||
470 | static struct pxaficp_platform_data corgi_ficp_platform_data = { | 471 | static struct pxaficp_platform_data corgi_ficp_platform_data = { |
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index a6f2390ce662..84489dc51d81 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c | |||
@@ -13,8 +13,10 @@ | |||
13 | #include <asm/arch/mfp-pxa27x.h> | 13 | #include <asm/arch/mfp-pxa27x.h> |
14 | #include <asm/arch/ohci.h> | 14 | #include <asm/arch/ohci.h> |
15 | #include <asm/arch/pxa27x_keypad.h> | 15 | #include <asm/arch/pxa27x_keypad.h> |
16 | #include <asm/arch/pxa2xx_spi.h> | ||
16 | #include <asm/arch/camera.h> | 17 | #include <asm/arch/camera.h> |
17 | #include <asm/arch/audio.h> | 18 | #include <asm/arch/audio.h> |
19 | #include <asm/arch/pxa3xx_nand.h> | ||
18 | 20 | ||
19 | #include "devices.h" | 21 | #include "devices.h" |
20 | #include "generic.h" | 22 | #include "generic.h" |
@@ -830,4 +832,63 @@ void __init pxa3xx_set_mci3_info(struct pxamci_platform_data *info) | |||
830 | pxa_register_device(&pxa3xx_device_mci3, info); | 832 | pxa_register_device(&pxa3xx_device_mci3, info); |
831 | } | 833 | } |
832 | 834 | ||
835 | static struct resource pxa3xx_resources_nand[] = { | ||
836 | [0] = { | ||
837 | .start = 0x43100000, | ||
838 | .end = 0x43100053, | ||
839 | .flags = IORESOURCE_MEM, | ||
840 | }, | ||
841 | [1] = { | ||
842 | .start = IRQ_NAND, | ||
843 | .end = IRQ_NAND, | ||
844 | .flags = IORESOURCE_IRQ, | ||
845 | }, | ||
846 | [2] = { | ||
847 | /* DRCMR for Data DMA */ | ||
848 | .start = 97, | ||
849 | .end = 97, | ||
850 | .flags = IORESOURCE_DMA, | ||
851 | }, | ||
852 | [3] = { | ||
853 | /* DRCMR for Command DMA */ | ||
854 | .start = 99, | ||
855 | .end = 99, | ||
856 | .flags = IORESOURCE_DMA, | ||
857 | }, | ||
858 | }; | ||
859 | |||
860 | static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32); | ||
861 | |||
862 | struct platform_device pxa3xx_device_nand = { | ||
863 | .name = "pxa3xx-nand", | ||
864 | .id = -1, | ||
865 | .dev = { | ||
866 | .dma_mask = &pxa3xx_nand_dma_mask, | ||
867 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
868 | }, | ||
869 | .num_resources = ARRAY_SIZE(pxa3xx_resources_nand), | ||
870 | .resource = pxa3xx_resources_nand, | ||
871 | }; | ||
872 | |||
873 | void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info) | ||
874 | { | ||
875 | pxa_register_device(&pxa3xx_device_nand, info); | ||
876 | } | ||
833 | #endif /* CONFIG_PXA3xx */ | 877 | #endif /* CONFIG_PXA3xx */ |
878 | |||
879 | /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. | ||
880 | * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ | ||
881 | void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) | ||
882 | { | ||
883 | struct platform_device *pd; | ||
884 | |||
885 | pd = platform_device_alloc("pxa2xx-spi", id); | ||
886 | if (pd == NULL) { | ||
887 | printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", | ||
888 | id); | ||
889 | return; | ||
890 | } | ||
891 | |||
892 | pd->dev.platform_data = info; | ||
893 | platform_device_add(pd); | ||
894 | } | ||
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h index b852eb18daa5..887c738f5911 100644 --- a/arch/arm/mach-pxa/devices.h +++ b/arch/arm/mach-pxa/devices.h | |||
@@ -31,4 +31,6 @@ extern struct platform_device pxa25x_device_pwm1; | |||
31 | extern struct platform_device pxa27x_device_pwm0; | 31 | extern struct platform_device pxa27x_device_pwm0; |
32 | extern struct platform_device pxa27x_device_pwm1; | 32 | extern struct platform_device pxa27x_device_pwm1; |
33 | 33 | ||
34 | extern struct platform_device pxa3xx_device_nand; | ||
35 | |||
34 | void __init pxa_register_device(struct platform_device *dev, void *data); | 36 | void __init pxa_register_device(struct platform_device *dev, void *data); |
diff --git a/arch/arm/mach-pxa/e400_lcd.c b/arch/arm/mach-pxa/e400_lcd.c new file mode 100644 index 000000000000..16c023630626 --- /dev/null +++ b/arch/arm/mach-pxa/e400_lcd.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * e400_lcd.c | ||
3 | * | ||
4 | * (c) 2005 Ian Molton <spyro@f2s.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/module.h> | ||
15 | |||
16 | #include <asm/mach-types.h> | ||
17 | #include <asm/arch/pxa-regs.h> | ||
18 | #include <asm/arch/pxafb.h> | ||
19 | |||
20 | static struct pxafb_mode_info e400_pxafb_mode_info = { | ||
21 | .pixclock = 140703, | ||
22 | .xres = 240, | ||
23 | .yres = 320, | ||
24 | .bpp = 16, | ||
25 | .hsync_len = 4, | ||
26 | .left_margin = 28, | ||
27 | .right_margin = 8, | ||
28 | .vsync_len = 3, | ||
29 | .upper_margin = 5, | ||
30 | .lower_margin = 6, | ||
31 | .sync = 0, | ||
32 | }; | ||
33 | |||
34 | static struct pxafb_mach_info e400_pxafb_mach_info = { | ||
35 | .modes = &e400_pxafb_mode_info, | ||
36 | .num_modes = 1, | ||
37 | .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act, | ||
38 | .lccr3 = 0, | ||
39 | .pxafb_backlight_power = NULL, | ||
40 | }; | ||
41 | |||
42 | static int __init e400_lcd_init(void) | ||
43 | { | ||
44 | if (!machine_is_e400()) | ||
45 | return -ENODEV; | ||
46 | |||
47 | set_pxa_fb_info(&e400_pxafb_mach_info); | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | module_init(e400_lcd_init); | ||
52 | |||
53 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | ||
54 | MODULE_DESCRIPTION("e400 lcd driver"); | ||
55 | MODULE_LICENSE("GPLv2"); | ||
56 | |||
diff --git a/arch/arm/mach-pxa/e740_lcd.c b/arch/arm/mach-pxa/e740_lcd.c new file mode 100644 index 000000000000..26bd599af178 --- /dev/null +++ b/arch/arm/mach-pxa/e740_lcd.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* e740_lcd.c | ||
2 | * | ||
3 | * This file contains the definitions for the LCD timings and functions | ||
4 | * to control the LCD power / frontlighting via the w100fb driver. | ||
5 | * | ||
6 | * (c) 2005 Ian Molton <spyro@f2s.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/fb.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include <asm/mach-types.h> | ||
21 | |||
22 | #include <video/w100fb.h> | ||
23 | |||
24 | /* | ||
25 | **potential** shutdown routine - to be investigated | ||
26 | devmem2 0x0c010528 w 0xff3fff00 | ||
27 | devmem2 0x0c010190 w 0x7FFF8000 | ||
28 | devmem2 0x0c0101b0 w 0x00FF0000 | ||
29 | devmem2 0x0c01008c w 0x00000000 | ||
30 | devmem2 0x0c010080 w 0x000000bf | ||
31 | devmem2 0x0c010098 w 0x00000015 | ||
32 | devmem2 0x0c010088 w 0x4b000204 | ||
33 | devmem2 0x0c010098 w 0x0000001d | ||
34 | */ | ||
35 | |||
36 | static struct w100_gen_regs e740_lcd_regs = { | ||
37 | .lcd_format = 0x00008023, | ||
38 | .lcdd_cntl1 = 0x0f000000, | ||
39 | .lcdd_cntl2 = 0x0003ffff, | ||
40 | .genlcd_cntl1 = 0x00ffff03, | ||
41 | .genlcd_cntl2 = 0x003c0f03, | ||
42 | .genlcd_cntl3 = 0x000143aa, | ||
43 | }; | ||
44 | |||
45 | static struct w100_mode e740_lcd_mode = { | ||
46 | .xres = 240, | ||
47 | .yres = 320, | ||
48 | .left_margin = 20, | ||
49 | .right_margin = 28, | ||
50 | .upper_margin = 9, | ||
51 | .lower_margin = 8, | ||
52 | .crtc_ss = 0x80140013, | ||
53 | .crtc_ls = 0x81150110, | ||
54 | .crtc_gs = 0x80050005, | ||
55 | .crtc_vpos_gs = 0x000a0009, | ||
56 | .crtc_rev = 0x0040010a, | ||
57 | .crtc_dclk = 0xa906000a, | ||
58 | .crtc_gclk = 0x80050108, | ||
59 | .crtc_goe = 0x80050108, | ||
60 | .pll_freq = 57, | ||
61 | .pixclk_divider = 4, | ||
62 | .pixclk_divider_rotated = 4, | ||
63 | .pixclk_src = CLK_SRC_XTAL, | ||
64 | .sysclk_divider = 1, | ||
65 | .sysclk_src = CLK_SRC_PLL, | ||
66 | .crtc_ps1_active = 0x41060010, | ||
67 | }; | ||
68 | |||
69 | |||
70 | static struct w100_gpio_regs e740_w100_gpio_info = { | ||
71 | .init_data1 = 0x21002103, | ||
72 | .gpio_dir1 = 0xffffdeff, | ||
73 | .gpio_oe1 = 0x03c00643, | ||
74 | .init_data2 = 0x003f003f, | ||
75 | .gpio_dir2 = 0xffffffff, | ||
76 | .gpio_oe2 = 0x000000ff, | ||
77 | }; | ||
78 | |||
79 | static struct w100fb_mach_info e740_fb_info = { | ||
80 | .modelist = &e740_lcd_mode, | ||
81 | .num_modes = 1, | ||
82 | .regs = &e740_lcd_regs, | ||
83 | .gpio = &e740_w100_gpio_info, | ||
84 | .xtal_freq = 14318000, | ||
85 | .xtal_dbl = 1, | ||
86 | }; | ||
87 | |||
88 | static struct resource e740_fb_resources[] = { | ||
89 | [0] = { | ||
90 | .start = 0x0c000000, | ||
91 | .end = 0x0cffffff, | ||
92 | .flags = IORESOURCE_MEM, | ||
93 | }, | ||
94 | }; | ||
95 | |||
96 | /* ----------------------- device declarations -------------------------- */ | ||
97 | |||
98 | |||
99 | static struct platform_device e740_fb_device = { | ||
100 | .name = "w100fb", | ||
101 | .id = -1, | ||
102 | .dev = { | ||
103 | .platform_data = &e740_fb_info, | ||
104 | }, | ||
105 | .num_resources = ARRAY_SIZE(e740_fb_resources), | ||
106 | .resource = e740_fb_resources, | ||
107 | }; | ||
108 | |||
109 | static int e740_lcd_init(void) | ||
110 | { | ||
111 | int ret; | ||
112 | |||
113 | if (!machine_is_e740()) | ||
114 | return -ENODEV; | ||
115 | |||
116 | return platform_device_register(&e740_fb_device); | ||
117 | } | ||
118 | |||
119 | module_init(e740_lcd_init); | ||
120 | |||
121 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | ||
122 | MODULE_DESCRIPTION("e740 lcd driver"); | ||
123 | MODULE_LICENSE("GPLv2"); | ||
diff --git a/arch/arm/mach-pxa/e750_lcd.c b/arch/arm/mach-pxa/e750_lcd.c new file mode 100644 index 000000000000..75edc3b5390f --- /dev/null +++ b/arch/arm/mach-pxa/e750_lcd.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* e750_lcd.c | ||
2 | * | ||
3 | * This file contains the definitions for the LCD timings and functions | ||
4 | * to control the LCD power / frontlighting via the w100fb driver. | ||
5 | * | ||
6 | * (c) 2005 Ian Molton <spyro@f2s.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/fb.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include <asm/mach-types.h> | ||
21 | |||
22 | #include <video/w100fb.h> | ||
23 | |||
24 | static struct w100_gen_regs e750_lcd_regs = { | ||
25 | .lcd_format = 0x00008003, | ||
26 | .lcdd_cntl1 = 0x00000000, | ||
27 | .lcdd_cntl2 = 0x0003ffff, | ||
28 | .genlcd_cntl1 = 0x00fff003, | ||
29 | .genlcd_cntl2 = 0x003c0f03, | ||
30 | .genlcd_cntl3 = 0x000143aa, | ||
31 | }; | ||
32 | |||
33 | static struct w100_mode e750_lcd_mode = { | ||
34 | .xres = 240, | ||
35 | .yres = 320, | ||
36 | .left_margin = 21, | ||
37 | .right_margin = 22, | ||
38 | .upper_margin = 5, | ||
39 | .lower_margin = 4, | ||
40 | .crtc_ss = 0x80150014, | ||
41 | .crtc_ls = 0x8014000d, | ||
42 | .crtc_gs = 0xc1000005, | ||
43 | .crtc_vpos_gs = 0x00020147, | ||
44 | .crtc_rev = 0x0040010a, | ||
45 | .crtc_dclk = 0xa1700030, | ||
46 | .crtc_gclk = 0x80cc0015, | ||
47 | .crtc_goe = 0x80cc0015, | ||
48 | .crtc_ps1_active = 0x61060017, | ||
49 | .pll_freq = 57, | ||
50 | .pixclk_divider = 4, | ||
51 | .pixclk_divider_rotated = 4, | ||
52 | .pixclk_src = CLK_SRC_XTAL, | ||
53 | .sysclk_divider = 1, | ||
54 | .sysclk_src = CLK_SRC_PLL, | ||
55 | }; | ||
56 | |||
57 | |||
58 | static struct w100_gpio_regs e750_w100_gpio_info = { | ||
59 | .init_data1 = 0x01192f1b, | ||
60 | .gpio_dir1 = 0xd5ffdeff, | ||
61 | .gpio_oe1 = 0x000020bf, | ||
62 | .init_data2 = 0x010f010f, | ||
63 | .gpio_dir2 = 0xffffffff, | ||
64 | .gpio_oe2 = 0x000001cf, | ||
65 | }; | ||
66 | |||
67 | static struct w100fb_mach_info e750_fb_info = { | ||
68 | .modelist = &e750_lcd_mode, | ||
69 | .num_modes = 1, | ||
70 | .regs = &e750_lcd_regs, | ||
71 | .gpio = &e750_w100_gpio_info, | ||
72 | .xtal_freq = 14318000, | ||
73 | .xtal_dbl = 1, | ||
74 | }; | ||
75 | |||
76 | static struct resource e750_fb_resources[] = { | ||
77 | [0] = { | ||
78 | .start = 0x0c000000, | ||
79 | .end = 0x0cffffff, | ||
80 | .flags = IORESOURCE_MEM, | ||
81 | }, | ||
82 | }; | ||
83 | |||
84 | /* ----------------------- device declarations -------------------------- */ | ||
85 | |||
86 | |||
87 | static struct platform_device e750_fb_device = { | ||
88 | .name = "w100fb", | ||
89 | .id = -1, | ||
90 | .dev = { | ||
91 | .platform_data = &e750_fb_info, | ||
92 | }, | ||
93 | .num_resources = ARRAY_SIZE(e750_fb_resources), | ||
94 | .resource = e750_fb_resources, | ||
95 | }; | ||
96 | |||
97 | static int e750_lcd_init(void) | ||
98 | { | ||
99 | if (!machine_is_e750()) | ||
100 | return -ENODEV; | ||
101 | |||
102 | return platform_device_register(&e750_fb_device); | ||
103 | } | ||
104 | |||
105 | module_init(e750_lcd_init); | ||
106 | |||
107 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | ||
108 | MODULE_DESCRIPTION("e750 lcd driver"); | ||
109 | MODULE_LICENSE("GPLv2"); | ||
diff --git a/arch/arm/mach-pxa/e800_lcd.c b/arch/arm/mach-pxa/e800_lcd.c new file mode 100644 index 000000000000..e6aeab0ebc22 --- /dev/null +++ b/arch/arm/mach-pxa/e800_lcd.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* e800_lcd.c | ||
2 | * | ||
3 | * This file contains the definitions for the LCD timings and functions | ||
4 | * to control the LCD power / frontlighting via the w100fb driver. | ||
5 | * | ||
6 | * (c) 2005 Ian Molton <spyro@f2s.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/fb.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | |||
20 | #include <asm/mach-types.h> | ||
21 | |||
22 | #include <video/w100fb.h> | ||
23 | |||
24 | static struct w100_gen_regs e800_lcd_regs = { | ||
25 | .lcd_format = 0x00008003, | ||
26 | .lcdd_cntl1 = 0x02a00000, | ||
27 | .lcdd_cntl2 = 0x0003ffff, | ||
28 | .genlcd_cntl1 = 0x000ff2a3, | ||
29 | .genlcd_cntl2 = 0x000002a3, | ||
30 | .genlcd_cntl3 = 0x000102aa, | ||
31 | }; | ||
32 | |||
33 | static struct w100_mode e800_lcd_mode[2] = { | ||
34 | [0] = { | ||
35 | .xres = 480, | ||
36 | .yres = 640, | ||
37 | .left_margin = 52, | ||
38 | .right_margin = 148, | ||
39 | .upper_margin = 2, | ||
40 | .lower_margin = 6, | ||
41 | .crtc_ss = 0x80350034, | ||
42 | .crtc_ls = 0x802b0026, | ||
43 | .crtc_gs = 0x80160016, | ||
44 | .crtc_vpos_gs = 0x00020003, | ||
45 | .crtc_rev = 0x0040001d, | ||
46 | .crtc_dclk = 0xe0000000, | ||
47 | .crtc_gclk = 0x82a50049, | ||
48 | .crtc_goe = 0x80ee001c, | ||
49 | .crtc_ps1_active = 0x00000000, | ||
50 | .pll_freq = 128, | ||
51 | .pixclk_divider = 4, | ||
52 | .pixclk_divider_rotated = 6, | ||
53 | .pixclk_src = CLK_SRC_PLL, | ||
54 | .sysclk_divider = 0, | ||
55 | .sysclk_src = CLK_SRC_PLL, | ||
56 | }, | ||
57 | [1] = { | ||
58 | .xres = 240, | ||
59 | .yres = 320, | ||
60 | .left_margin = 15, | ||
61 | .right_margin = 88, | ||
62 | .upper_margin = 0, | ||
63 | .lower_margin = 7, | ||
64 | .crtc_ss = 0xd010000f, | ||
65 | .crtc_ls = 0x80070003, | ||
66 | .crtc_gs = 0x80000000, | ||
67 | .crtc_vpos_gs = 0x01460147, | ||
68 | .crtc_rev = 0x00400003, | ||
69 | .crtc_dclk = 0xa1700030, | ||
70 | .crtc_gclk = 0x814b0008, | ||
71 | .crtc_goe = 0x80cc0015, | ||
72 | .crtc_ps1_active = 0x00000000, | ||
73 | .pll_freq = 100, | ||
74 | .pixclk_divider = 6, /* Wince uses 14 which gives a 7MHz pclk. */ | ||
75 | .pixclk_divider_rotated = 6, /* we want a 14MHz one (much nicer to look at) */ | ||
76 | .pixclk_src = CLK_SRC_PLL, | ||
77 | .sysclk_divider = 0, | ||
78 | .sysclk_src = CLK_SRC_PLL, | ||
79 | } | ||
80 | }; | ||
81 | |||
82 | |||
83 | static struct w100_gpio_regs e800_w100_gpio_info = { | ||
84 | .init_data1 = 0xc13fc019, | ||
85 | .gpio_dir1 = 0x3e40df7f, | ||
86 | .gpio_oe1 = 0x003c3000, | ||
87 | .init_data2 = 0x00000000, | ||
88 | .gpio_dir2 = 0x00000000, | ||
89 | .gpio_oe2 = 0x00000000, | ||
90 | }; | ||
91 | |||
92 | static struct w100_mem_info e800_w100_mem_info = { | ||
93 | .ext_cntl = 0x09640011, | ||
94 | .sdram_mode_reg = 0x00600021, | ||
95 | .ext_timing_cntl = 0x10001545, | ||
96 | .io_cntl = 0x7ddd7333, | ||
97 | .size = 0x1fffff, | ||
98 | }; | ||
99 | |||
100 | static void e800_tg_change(struct w100fb_par *par) | ||
101 | { | ||
102 | unsigned long tmp; | ||
103 | |||
104 | tmp = w100fb_gpio_read(W100_GPIO_PORT_A); | ||
105 | if (par->mode->xres == 480) | ||
106 | tmp |= 0x100; | ||
107 | else | ||
108 | tmp &= ~0x100; | ||
109 | w100fb_gpio_write(W100_GPIO_PORT_A, tmp); | ||
110 | } | ||
111 | |||
112 | static struct w100_tg_info e800_tg_info = { | ||
113 | .change = e800_tg_change, | ||
114 | }; | ||
115 | |||
116 | static struct w100fb_mach_info e800_fb_info = { | ||
117 | .modelist = e800_lcd_mode, | ||
118 | .num_modes = 2, | ||
119 | .regs = &e800_lcd_regs, | ||
120 | .gpio = &e800_w100_gpio_info, | ||
121 | .mem = &e800_w100_mem_info, | ||
122 | .tg = &e800_tg_info, | ||
123 | .xtal_freq = 16000000, | ||
124 | }; | ||
125 | |||
126 | static struct resource e800_fb_resources[] = { | ||
127 | [0] = { | ||
128 | .start = 0x0c000000, | ||
129 | .end = 0x0cffffff, | ||
130 | .flags = IORESOURCE_MEM, | ||
131 | }, | ||
132 | }; | ||
133 | |||
134 | /* ----------------------- device declarations -------------------------- */ | ||
135 | |||
136 | |||
137 | static struct platform_device e800_fb_device = { | ||
138 | .name = "w100fb", | ||
139 | .id = -1, | ||
140 | .dev = { | ||
141 | .platform_data = &e800_fb_info, | ||
142 | }, | ||
143 | .num_resources = ARRAY_SIZE(e800_fb_resources), | ||
144 | .resource = e800_fb_resources, | ||
145 | }; | ||
146 | |||
147 | static int e800_lcd_init(void) | ||
148 | { | ||
149 | if (!machine_is_e800()) | ||
150 | return -ENODEV; | ||
151 | |||
152 | return platform_device_register(&e800_fb_device); | ||
153 | } | ||
154 | |||
155 | module_init(e800_lcd_init); | ||
156 | |||
157 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | ||
158 | MODULE_DESCRIPTION("e800 lcd driver"); | ||
159 | MODULE_LICENSE("GPLv2"); | ||
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 1bf680749928..e5cc6ca63c75 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Support for CompuLab EM-x270 platform | 2 | * Support for CompuLab EM-X270 platform |
3 | * | 3 | * |
4 | * Copyright (C) 2007 CompuLab, Ltd. | 4 | * Copyright (C) 2007, 2008 CompuLab, Ltd. |
5 | * Author: Mike Rapoport <mike@compulab.co.il> | 5 | * Author: Mike Rapoport <mike@compulab.co.il> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -14,31 +14,159 @@ | |||
14 | 14 | ||
15 | #include <linux/dm9000.h> | 15 | #include <linux/dm9000.h> |
16 | #include <linux/rtc-v3020.h> | 16 | #include <linux/rtc-v3020.h> |
17 | |||
18 | #include <linux/mtd/nand.h> | 17 | #include <linux/mtd/nand.h> |
19 | #include <linux/mtd/partitions.h> | 18 | #include <linux/mtd/partitions.h> |
19 | #include <linux/input.h> | ||
20 | #include <linux/gpio_keys.h> | ||
21 | #include <linux/gpio.h> | ||
20 | 22 | ||
21 | #include <asm/mach-types.h> | 23 | #include <asm/mach-types.h> |
22 | |||
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | 25 | ||
26 | #include <asm/arch/mfp-pxa27x.h> | ||
25 | #include <asm/arch/pxa-regs.h> | 27 | #include <asm/arch/pxa-regs.h> |
26 | #include <asm/arch/pxa2xx-gpio.h> | ||
27 | #include <asm/arch/pxa27x-udc.h> | 28 | #include <asm/arch/pxa27x-udc.h> |
28 | #include <asm/arch/audio.h> | 29 | #include <asm/arch/audio.h> |
29 | #include <asm/arch/pxafb.h> | 30 | #include <asm/arch/pxafb.h> |
30 | #include <asm/arch/ohci.h> | 31 | #include <asm/arch/ohci.h> |
31 | #include <asm/arch/mmc.h> | 32 | #include <asm/arch/mmc.h> |
32 | #include <asm/arch/bitfield.h> | 33 | #include <asm/arch/pxa27x_keypad.h> |
33 | 34 | ||
34 | #include "generic.h" | 35 | #include "generic.h" |
35 | 36 | ||
36 | /* GPIO IRQ usage */ | 37 | /* GPIO IRQ usage */ |
37 | #define EM_X270_MMC_PD (105) | 38 | #define GPIO41_ETHIRQ (41) |
38 | #define EM_X270_ETHIRQ IRQ_GPIO(41) | 39 | #define GPIO13_MMC_CD (13) |
39 | #define EM_X270_MMC_IRQ IRQ_GPIO(13) | 40 | #define EM_X270_ETHIRQ IRQ_GPIO(GPIO41_ETHIRQ) |
41 | #define EM_X270_MMC_CD IRQ_GPIO(GPIO13_MMC_CD) | ||
42 | |||
43 | /* NAND control GPIOs */ | ||
44 | #define GPIO11_NAND_CS (11) | ||
45 | #define GPIO56_NAND_RB (56) | ||
46 | |||
47 | static unsigned long em_x270_pin_config[] = { | ||
48 | /* AC'97 */ | ||
49 | GPIO28_AC97_BITCLK, | ||
50 | GPIO29_AC97_SDATA_IN_0, | ||
51 | GPIO30_AC97_SDATA_OUT, | ||
52 | GPIO31_AC97_SYNC, | ||
53 | GPIO98_AC97_SYSCLK, | ||
54 | GPIO113_AC97_nRESET, | ||
55 | |||
56 | /* BTUART */ | ||
57 | GPIO42_BTUART_RXD, | ||
58 | GPIO43_BTUART_TXD, | ||
59 | GPIO44_BTUART_CTS, | ||
60 | GPIO45_BTUART_RTS, | ||
61 | |||
62 | /* STUART */ | ||
63 | GPIO46_STUART_RXD, | ||
64 | GPIO47_STUART_TXD, | ||
65 | |||
66 | /* MCI controller */ | ||
67 | GPIO32_MMC_CLK, | ||
68 | GPIO112_MMC_CMD, | ||
69 | GPIO92_MMC_DAT_0, | ||
70 | GPIO109_MMC_DAT_1, | ||
71 | GPIO110_MMC_DAT_2, | ||
72 | GPIO111_MMC_DAT_3, | ||
73 | |||
74 | /* LCD */ | ||
75 | GPIO58_LCD_LDD_0, | ||
76 | GPIO59_LCD_LDD_1, | ||
77 | GPIO60_LCD_LDD_2, | ||
78 | GPIO61_LCD_LDD_3, | ||
79 | GPIO62_LCD_LDD_4, | ||
80 | GPIO63_LCD_LDD_5, | ||
81 | GPIO64_LCD_LDD_6, | ||
82 | GPIO65_LCD_LDD_7, | ||
83 | GPIO66_LCD_LDD_8, | ||
84 | GPIO67_LCD_LDD_9, | ||
85 | GPIO68_LCD_LDD_10, | ||
86 | GPIO69_LCD_LDD_11, | ||
87 | GPIO70_LCD_LDD_12, | ||
88 | GPIO71_LCD_LDD_13, | ||
89 | GPIO72_LCD_LDD_14, | ||
90 | GPIO73_LCD_LDD_15, | ||
91 | GPIO74_LCD_FCLK, | ||
92 | GPIO75_LCD_LCLK, | ||
93 | GPIO76_LCD_PCLK, | ||
94 | GPIO77_LCD_BIAS, | ||
95 | |||
96 | /* QCI */ | ||
97 | GPIO84_CIF_FV, | ||
98 | GPIO25_CIF_LV, | ||
99 | GPIO53_CIF_MCLK, | ||
100 | GPIO54_CIF_PCLK, | ||
101 | GPIO81_CIF_DD_0, | ||
102 | GPIO55_CIF_DD_1, | ||
103 | GPIO51_CIF_DD_2, | ||
104 | GPIO50_CIF_DD_3, | ||
105 | GPIO52_CIF_DD_4, | ||
106 | GPIO48_CIF_DD_5, | ||
107 | GPIO17_CIF_DD_6, | ||
108 | GPIO12_CIF_DD_7, | ||
109 | |||
110 | /* I2C */ | ||
111 | GPIO117_I2C_SCL, | ||
112 | GPIO118_I2C_SDA, | ||
113 | |||
114 | /* Keypad */ | ||
115 | GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, | ||
116 | GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, | ||
117 | GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, | ||
118 | GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH, | ||
119 | GPIO39_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH, | ||
120 | GPIO99_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH, | ||
121 | GPIO91_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH, | ||
122 | GPIO36_KP_MKIN_7 | WAKEUP_ON_LEVEL_HIGH, | ||
123 | GPIO103_KP_MKOUT_0, | ||
124 | GPIO104_KP_MKOUT_1, | ||
125 | GPIO105_KP_MKOUT_2, | ||
126 | GPIO106_KP_MKOUT_3, | ||
127 | GPIO107_KP_MKOUT_4, | ||
128 | GPIO108_KP_MKOUT_5, | ||
129 | GPIO96_KP_MKOUT_6, | ||
130 | GPIO22_KP_MKOUT_7, | ||
131 | |||
132 | /* SSP1 */ | ||
133 | GPIO26_SSP1_RXD, | ||
134 | GPIO23_SSP1_SCLK, | ||
135 | GPIO24_SSP1_SFRM, | ||
136 | GPIO57_SSP1_TXD, | ||
137 | |||
138 | /* SSP2 */ | ||
139 | GPIO19_SSP2_SCLK, | ||
140 | GPIO14_SSP2_SFRM, | ||
141 | GPIO89_SSP2_TXD, | ||
142 | GPIO88_SSP2_RXD, | ||
143 | |||
144 | /* SDRAM and local bus */ | ||
145 | GPIO15_nCS_1, | ||
146 | GPIO78_nCS_2, | ||
147 | GPIO79_nCS_3, | ||
148 | GPIO80_nCS_4, | ||
149 | GPIO49_nPWE, | ||
150 | GPIO18_RDY, | ||
151 | |||
152 | /* GPIO */ | ||
153 | GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, | ||
154 | |||
155 | /* power controls */ | ||
156 | GPIO20_GPIO | MFP_LPM_DRIVE_LOW, /* GPRS_PWEN */ | ||
157 | GPIO115_GPIO | MFP_LPM_DRIVE_LOW, /* WLAN_PWEN */ | ||
158 | |||
159 | /* NAND controls */ | ||
160 | GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */ | ||
161 | GPIO56_GPIO, /* NAND Ready/Busy */ | ||
162 | |||
163 | /* interrupts */ | ||
164 | GPIO13_GPIO, /* MMC card detect */ | ||
165 | GPIO41_GPIO, /* DM9000 interrupt */ | ||
166 | }; | ||
40 | 167 | ||
41 | static struct resource em_x270_dm9k_resource[] = { | 168 | #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) |
169 | static struct resource em_x270_dm9000_resource[] = { | ||
42 | [0] = { | 170 | [0] = { |
43 | .start = PXA_CS2_PHYS, | 171 | .start = PXA_CS2_PHYS, |
44 | .end = PXA_CS2_PHYS + 3, | 172 | .end = PXA_CS2_PHYS + 3, |
@@ -56,32 +184,30 @@ static struct resource em_x270_dm9k_resource[] = { | |||
56 | } | 184 | } |
57 | }; | 185 | }; |
58 | 186 | ||
59 | /* for the moment we limit ourselves to 32bit IO until some | 187 | static struct dm9000_plat_data em_x270_dm9000_platdata = { |
60 | * better IO routines can be written and tested | ||
61 | */ | ||
62 | static struct dm9000_plat_data em_x270_dm9k_platdata = { | ||
63 | .flags = DM9000_PLATF_32BITONLY, | 188 | .flags = DM9000_PLATF_32BITONLY, |
64 | }; | 189 | }; |
65 | 190 | ||
66 | /* Ethernet device */ | 191 | static struct platform_device em_x270_dm9000 = { |
67 | static struct platform_device em_x270_dm9k = { | ||
68 | .name = "dm9000", | 192 | .name = "dm9000", |
69 | .id = 0, | 193 | .id = 0, |
70 | .num_resources = ARRAY_SIZE(em_x270_dm9k_resource), | 194 | .num_resources = ARRAY_SIZE(em_x270_dm9000_resource), |
71 | .resource = em_x270_dm9k_resource, | 195 | .resource = em_x270_dm9000_resource, |
72 | .dev = { | 196 | .dev = { |
73 | .platform_data = &em_x270_dm9k_platdata, | 197 | .platform_data = &em_x270_dm9000_platdata, |
74 | } | 198 | } |
75 | }; | 199 | }; |
76 | 200 | ||
77 | /* WM9712 touchscreen controller. Hopefully the driver will make it to | 201 | static void __init em_x270_init_dm9000(void) |
78 | * the mainstream sometime */ | 202 | { |
79 | static struct platform_device em_x270_ts = { | 203 | platform_device_register(&em_x270_dm9000); |
80 | .name = "wm97xx-ts", | 204 | } |
81 | .id = -1, | 205 | #else |
82 | }; | 206 | static inline void em_x270_init_dm9000(void) {} |
207 | #endif | ||
83 | 208 | ||
84 | /* RTC */ | 209 | /* V3020 RTC */ |
210 | #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE) | ||
85 | static struct resource em_x270_v3020_resource[] = { | 211 | static struct resource em_x270_v3020_resource[] = { |
86 | [0] = { | 212 | [0] = { |
87 | .start = PXA_CS4_PHYS, | 213 | .start = PXA_CS4_PHYS, |
@@ -104,20 +230,26 @@ static struct platform_device em_x270_rtc = { | |||
104 | } | 230 | } |
105 | }; | 231 | }; |
106 | 232 | ||
107 | /* NAND flash */ | 233 | static void __init em_x270_init_rtc(void) |
108 | #define GPIO_NAND_CS (11) | 234 | { |
109 | #define GPIO_NAND_RB (56) | 235 | platform_device_register(&em_x270_rtc); |
236 | } | ||
237 | #else | ||
238 | static inline void em_x270_init_rtc(void) {} | ||
239 | #endif | ||
110 | 240 | ||
241 | /* NAND flash */ | ||
242 | #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) | ||
111 | static inline void nand_cs_on(void) | 243 | static inline void nand_cs_on(void) |
112 | { | 244 | { |
113 | GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); | 245 | gpio_set_value(GPIO11_NAND_CS, 0); |
114 | } | 246 | } |
115 | 247 | ||
116 | static void nand_cs_off(void) | 248 | static void nand_cs_off(void) |
117 | { | 249 | { |
118 | dsb(); | 250 | dsb(); |
119 | 251 | ||
120 | GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); | 252 | gpio_set_value(GPIO11_NAND_CS, 1); |
121 | } | 253 | } |
122 | 254 | ||
123 | /* hardware specific access to control-lines */ | 255 | /* hardware specific access to control-lines */ |
@@ -157,7 +289,7 @@ static int em_x270_nand_device_ready(struct mtd_info *mtd) | |||
157 | { | 289 | { |
158 | dsb(); | 290 | dsb(); |
159 | 291 | ||
160 | return GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB); | 292 | return gpio_get_value(GPIO56_NAND_RB); |
161 | } | 293 | } |
162 | 294 | ||
163 | static struct mtd_partition em_x270_partition_info[] = { | 295 | static struct mtd_partition em_x270_partition_info[] = { |
@@ -210,16 +342,35 @@ static struct platform_device em_x270_nand = { | |||
210 | } | 342 | } |
211 | }; | 343 | }; |
212 | 344 | ||
213 | /* platform devices */ | 345 | static void __init em_x270_init_nand(void) |
214 | static struct platform_device *platform_devices[] __initdata = { | 346 | { |
215 | &em_x270_dm9k, | 347 | int err; |
216 | &em_x270_ts, | ||
217 | &em_x270_rtc, | ||
218 | &em_x270_nand, | ||
219 | }; | ||
220 | 348 | ||
349 | err = gpio_request(GPIO11_NAND_CS, "NAND CS"); | ||
350 | if (err) { | ||
351 | pr_warning("EM-X270: failed to request NAND CS gpio\n"); | ||
352 | return; | ||
353 | } | ||
354 | |||
355 | gpio_direction_output(GPIO11_NAND_CS, 1); | ||
356 | |||
357 | err = gpio_request(GPIO56_NAND_RB, "NAND R/B"); | ||
358 | if (err) { | ||
359 | pr_warning("EM-X270: failed to request NAND R/B gpio\n"); | ||
360 | gpio_free(GPIO11_NAND_CS); | ||
361 | return; | ||
362 | } | ||
363 | |||
364 | gpio_direction_input(GPIO56_NAND_RB); | ||
365 | |||
366 | platform_device_register(&em_x270_nand); | ||
367 | } | ||
368 | #else | ||
369 | static inline void em_x270_init_nand(void) {} | ||
370 | #endif | ||
221 | 371 | ||
222 | /* PXA27x OHCI controller setup */ | 372 | /* PXA27x OHCI controller setup */ |
373 | #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) | ||
223 | static int em_x270_ohci_init(struct device *dev) | 374 | static int em_x270_ohci_init(struct device *dev) |
224 | { | 375 | { |
225 | /* Set the Power Control Polarity Low */ | 376 | /* Set the Power Control Polarity Low */ |
@@ -237,27 +388,23 @@ static struct pxaohci_platform_data em_x270_ohci_platform_data = { | |||
237 | .init = em_x270_ohci_init, | 388 | .init = em_x270_ohci_init, |
238 | }; | 389 | }; |
239 | 390 | ||
391 | static void __init em_x270_init_ohci(void) | ||
392 | { | ||
393 | pxa_set_ohci_info(&em_x270_ohci_platform_data); | ||
394 | } | ||
395 | #else | ||
396 | static inline void em_x270_init_ohci(void) {} | ||
397 | #endif | ||
240 | 398 | ||
399 | /* MCI controller setup */ | ||
400 | #if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE) | ||
241 | static int em_x270_mci_init(struct device *dev, | 401 | static int em_x270_mci_init(struct device *dev, |
242 | irq_handler_t em_x270_detect_int, | 402 | irq_handler_t em_x270_detect_int, |
243 | void *data) | 403 | void *data) |
244 | { | 404 | { |
245 | int err; | 405 | int err = request_irq(EM_X270_MMC_CD, em_x270_detect_int, |
246 | 406 | IRQF_DISABLED | IRQF_TRIGGER_FALLING, | |
247 | /* setup GPIO for PXA27x MMC controller */ | 407 | "MMC card detect", data); |
248 | pxa_gpio_mode(GPIO32_MMCCLK_MD); | ||
249 | pxa_gpio_mode(GPIO112_MMCCMD_MD); | ||
250 | pxa_gpio_mode(GPIO92_MMCDAT0_MD); | ||
251 | pxa_gpio_mode(GPIO109_MMCDAT1_MD); | ||
252 | pxa_gpio_mode(GPIO110_MMCDAT2_MD); | ||
253 | pxa_gpio_mode(GPIO111_MMCDAT3_MD); | ||
254 | |||
255 | /* EM-X270 uses GPIO13 as SD power enable */ | ||
256 | pxa_gpio_mode(EM_X270_MMC_PD | GPIO_OUT); | ||
257 | |||
258 | err = request_irq(EM_X270_MMC_IRQ, em_x270_detect_int, | ||
259 | IRQF_DISABLED | IRQF_TRIGGER_FALLING, | ||
260 | "MMC card detect", data); | ||
261 | if (err) { | 408 | if (err) { |
262 | printk(KERN_ERR "%s: can't request MMC card detect IRQ: %d\n", | 409 | printk(KERN_ERR "%s: can't request MMC card detect IRQ: %d\n", |
263 | __func__, err); | 410 | __func__, err); |
@@ -279,7 +426,8 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd) | |||
279 | 426 | ||
280 | static void em_x270_mci_exit(struct device *dev, void *data) | 427 | static void em_x270_mci_exit(struct device *dev, void *data) |
281 | { | 428 | { |
282 | free_irq(EM_X270_MMC_IRQ, data); | 429 | int irq = gpio_to_irq(GPIO13_MMC_CD); |
430 | free_irq(irq, data); | ||
283 | } | 431 | } |
284 | 432 | ||
285 | static struct pxamci_platform_data em_x270_mci_platform_data = { | 433 | static struct pxamci_platform_data em_x270_mci_platform_data = { |
@@ -289,7 +437,16 @@ static struct pxamci_platform_data em_x270_mci_platform_data = { | |||
289 | .exit = em_x270_mci_exit, | 437 | .exit = em_x270_mci_exit, |
290 | }; | 438 | }; |
291 | 439 | ||
440 | static void __init em_x270_init_mmc(void) | ||
441 | { | ||
442 | pxa_set_mci_info(&em_x270_mci_platform_data); | ||
443 | } | ||
444 | #else | ||
445 | static inline void em_x270_init_mmc(void) {} | ||
446 | #endif | ||
447 | |||
292 | /* LCD 480x640 */ | 448 | /* LCD 480x640 */ |
449 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) | ||
293 | static struct pxafb_mode_info em_x270_lcd_mode = { | 450 | static struct pxafb_mode_info em_x270_lcd_mode = { |
294 | .pixclock = 50000, | 451 | .pixclock = 50000, |
295 | .bpp = 16, | 452 | .bpp = 16, |
@@ -307,40 +464,96 @@ static struct pxafb_mode_info em_x270_lcd_mode = { | |||
307 | static struct pxafb_mach_info em_x270_lcd = { | 464 | static struct pxafb_mach_info em_x270_lcd = { |
308 | .modes = &em_x270_lcd_mode, | 465 | .modes = &em_x270_lcd_mode, |
309 | .num_modes = 1, | 466 | .num_modes = 1, |
310 | .cmap_inverse = 0, | 467 | .lcd_conn = LCD_COLOR_TFT_16BPP, |
311 | .cmap_static = 0, | ||
312 | .lccr0 = LCCR0_PAS, | ||
313 | .lccr3 = LCCR3_PixClkDiv(0x01) | LCCR3_Acb(0xff), | ||
314 | }; | 468 | }; |
315 | 469 | static void __init em_x270_init_lcd(void) | |
316 | static void __init em_x270_init(void) | ||
317 | { | 470 | { |
318 | /* setup LCD */ | ||
319 | set_pxa_fb_info(&em_x270_lcd); | 471 | set_pxa_fb_info(&em_x270_lcd); |
472 | } | ||
473 | #else | ||
474 | static inline void em_x270_init_lcd(void) {} | ||
475 | #endif | ||
320 | 476 | ||
321 | /* register EM-X270 platform devices */ | 477 | #if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE) |
322 | platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); | 478 | static void __init em_x270_init_ac97(void) |
479 | { | ||
323 | pxa_set_ac97_info(NULL); | 480 | pxa_set_ac97_info(NULL); |
481 | } | ||
482 | #else | ||
483 | static inline void em_x270_init_ac97(void) {} | ||
484 | #endif | ||
485 | |||
486 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) | ||
487 | static unsigned int em_x270_matrix_keys[] = { | ||
488 | KEY(0, 0, KEY_A), KEY(1, 0, KEY_UP), KEY(2, 1, KEY_B), | ||
489 | KEY(0, 2, KEY_LEFT), KEY(1, 1, KEY_ENTER), KEY(2, 0, KEY_RIGHT), | ||
490 | KEY(0, 1, KEY_C), KEY(1, 2, KEY_DOWN), KEY(2, 2, KEY_D), | ||
491 | }; | ||
324 | 492 | ||
325 | /* set MCI and OHCI platform parameters */ | 493 | struct pxa27x_keypad_platform_data em_x270_keypad_info = { |
326 | pxa_set_mci_info(&em_x270_mci_platform_data); | 494 | /* code map for the matrix keys */ |
327 | pxa_set_ohci_info(&em_x270_ohci_platform_data); | 495 | .matrix_key_rows = 3, |
496 | .matrix_key_cols = 3, | ||
497 | .matrix_key_map = em_x270_matrix_keys, | ||
498 | .matrix_key_map_size = ARRAY_SIZE(em_x270_matrix_keys), | ||
499 | }; | ||
500 | |||
501 | static void __init em_x270_init_keypad(void) | ||
502 | { | ||
503 | pxa_set_keypad_info(&em_x270_keypad_info); | ||
504 | } | ||
505 | #else | ||
506 | static inline void em_x270_init_keypad(void) {} | ||
507 | #endif | ||
328 | 508 | ||
329 | /* setup STUART GPIOs */ | 509 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) |
330 | pxa_gpio_mode(GPIO46_STRXD_MD); | 510 | static struct gpio_keys_button gpio_keys_button[] = { |
331 | pxa_gpio_mode(GPIO47_STTXD_MD); | 511 | [0] = { |
512 | .desc = "sleep/wakeup", | ||
513 | .code = KEY_SUSPEND, | ||
514 | .type = EV_PWR, | ||
515 | .gpio = 1, | ||
516 | .wakeup = 1, | ||
517 | }, | ||
518 | }; | ||
332 | 519 | ||
333 | /* setup BTUART GPIOs */ | 520 | static struct gpio_keys_platform_data em_x270_gpio_keys_data = { |
334 | pxa_gpio_mode(GPIO42_BTRXD_MD); | 521 | .buttons = gpio_keys_button, |
335 | pxa_gpio_mode(GPIO43_BTTXD_MD); | 522 | .nbuttons = 1, |
336 | pxa_gpio_mode(GPIO44_BTCTS_MD); | 523 | }; |
337 | pxa_gpio_mode(GPIO45_BTRTS_MD); | ||
338 | 524 | ||
339 | /* Setup interrupt for dm9000 */ | 525 | static struct platform_device em_x270_gpio_keys = { |
340 | set_irq_type(EM_X270_ETHIRQ, IRQT_RISING); | 526 | .name = "gpio-keys", |
527 | .id = -1, | ||
528 | .dev = { | ||
529 | .platform_data = &em_x270_gpio_keys_data, | ||
530 | }, | ||
531 | }; | ||
532 | |||
533 | static void __init em_x270_init_gpio_keys(void) | ||
534 | { | ||
535 | platform_device_register(&em_x270_gpio_keys); | ||
536 | } | ||
537 | #else | ||
538 | static inline void em_x270_init_gpio_keys(void) {} | ||
539 | #endif | ||
540 | |||
541 | static void __init em_x270_init(void) | ||
542 | { | ||
543 | pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_pin_config)); | ||
544 | |||
545 | em_x270_init_dm9000(); | ||
546 | em_x270_init_rtc(); | ||
547 | em_x270_init_nand(); | ||
548 | em_x270_init_lcd(); | ||
549 | em_x270_init_mmc(); | ||
550 | em_x270_init_ohci(); | ||
551 | em_x270_init_keypad(); | ||
552 | em_x270_init_gpio_keys(); | ||
553 | em_x270_init_ac97(); | ||
341 | } | 554 | } |
342 | 555 | ||
343 | MACHINE_START(EM_X270, "Compulab EM-x270") | 556 | MACHINE_START(EM_X270, "Compulab EM-X270") |
344 | .boot_params = 0xa0000100, | 557 | .boot_params = 0xa0000100, |
345 | .phys_io = 0x40000000, | 558 | .phys_io = 0x40000000, |
346 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | 559 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, |
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c index ee0ae93c876a..c29b7b21c11b 100644 --- a/arch/arm/mach-pxa/eseries.c +++ b/arch/arm/mach-pxa/eseries.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/arch/hardware.h> | 17 | #include <asm/arch/hardware.h> |
18 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
19 | 19 | ||
20 | #include <generic.h> | 20 | #include "generic.h" |
21 | 21 | ||
22 | /* Only e800 has 128MB RAM */ | 22 | /* Only e800 has 128MB RAM */ |
23 | static void __init eseries_fixup(struct machine_desc *desc, | 23 | static void __init eseries_fixup(struct machine_desc *desc, |
@@ -47,6 +47,19 @@ MACHINE_START(E330, "Toshiba e330") | |||
47 | MACHINE_END | 47 | MACHINE_END |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_MACH_E350 | ||
51 | MACHINE_START(E350, "Toshiba e350") | ||
52 | /* Maintainer: Ian Molton (spyro@f2s.com) */ | ||
53 | .phys_io = 0x40000000, | ||
54 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
55 | .boot_params = 0xa0000100, | ||
56 | .map_io = pxa_map_io, | ||
57 | .init_irq = pxa25x_init_irq, | ||
58 | .fixup = eseries_fixup, | ||
59 | .timer = &pxa_timer, | ||
60 | MACHINE_END | ||
61 | #endif | ||
62 | |||
50 | #ifdef CONFIG_MACH_E740 | 63 | #ifdef CONFIG_MACH_E740 |
51 | MACHINE_START(E740, "Toshiba e740") | 64 | MACHINE_START(E740, "Toshiba e740") |
52 | /* Maintainer: Ian Molton (spyro@f2s.com) */ | 65 | /* Maintainer: Ian Molton (spyro@f2s.com) */ |
diff --git a/arch/arm/mach-pxa/eseries_udc.c b/arch/arm/mach-pxa/eseries_udc.c new file mode 100644 index 000000000000..362847a10998 --- /dev/null +++ b/arch/arm/mach-pxa/eseries_udc.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * UDC functions for the Toshiba e-series PDAs | ||
3 | * | ||
4 | * Copyright (c) Ian Molton 2003 | ||
5 | * | ||
6 | * This file is licensed under | ||
7 | * the terms of the GNU General Public License version 2. This program | ||
8 | * is licensed "as is" without any warranty of any kind, whether express | ||
9 | * or implied. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/device.h> | ||
16 | |||
17 | #include <asm/arch/udc.h> | ||
18 | #include <asm/arch/eseries-gpio.h> | ||
19 | #include <asm/arch/hardware.h> | ||
20 | #include <asm/arch/pxa-regs.h> | ||
21 | #include <asm/mach/arch.h> | ||
22 | #include <asm/mach-types.h> | ||
23 | #include <asm/mach/map.h> | ||
24 | #include <asm/domain.h> | ||
25 | |||
26 | /* local PXA generic code */ | ||
27 | #include "generic.h" | ||
28 | |||
29 | static struct pxa2xx_udc_mach_info e7xx_udc_mach_info = { | ||
30 | .gpio_vbus = GPIO_E7XX_USB_DISC, | ||
31 | .gpio_pullup = GPIO_E7XX_USB_PULLUP, | ||
32 | .gpio_pullup_inverted = 1 | ||
33 | }; | ||
34 | |||
35 | static struct pxa2xx_udc_mach_info e800_udc_mach_info = { | ||
36 | .gpio_vbus = GPIO_E800_USB_DISC, | ||
37 | .gpio_pullup = GPIO_E800_USB_PULLUP, | ||
38 | .gpio_pullup_inverted = 1 | ||
39 | }; | ||
40 | |||
41 | static int __init eseries_udc_init(void) | ||
42 | { | ||
43 | if (machine_is_e330() || machine_is_e350() || | ||
44 | machine_is_e740() || machine_is_e750() || | ||
45 | machine_is_e400()) | ||
46 | pxa_set_udc_info(&e7xx_udc_mach_info); | ||
47 | else if (machine_is_e800()) | ||
48 | pxa_set_udc_info(&e800_udc_mach_info); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | module_init(eseries_udc_init); | ||
54 | |||
55 | MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); | ||
56 | MODULE_DESCRIPTION("eseries UDC support"); | ||
57 | MODULE_LICENSE("GPLv2"); | ||
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c new file mode 100644 index 000000000000..0143eed65398 --- /dev/null +++ b/arch/arm/mach-pxa/ezx.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * ezx.c - Common code for the EZX platform. | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Harald Welte <laforge@openezx.org>, | ||
5 | * 2007-2008 Daniel Ribeiro <drwyrm@gmail.com>, | ||
6 | * 2007-2008 Stefan Schmidt <stefan@datenfreihafen.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/pwm_backlight.h> | ||
19 | |||
20 | #include <asm/setup.h> | ||
21 | #include <asm/arch/pxafb.h> | ||
22 | #include <asm/arch/ohci.h> | ||
23 | #include <asm/arch/i2c.h> | ||
24 | |||
25 | #include <asm/arch/mfp-pxa27x.h> | ||
26 | #include <asm/arch/pxa-regs.h> | ||
27 | #include <asm/arch/pxa2xx-regs.h> | ||
28 | #include <asm/mach-types.h> | ||
29 | #include <asm/mach/arch.h> | ||
30 | |||
31 | #include "devices.h" | ||
32 | #include "generic.h" | ||
33 | |||
34 | static struct platform_pwm_backlight_data ezx_backlight_data = { | ||
35 | .pwm_id = 0, | ||
36 | .max_brightness = 1023, | ||
37 | .dft_brightness = 1023, | ||
38 | .pwm_period_ns = 78770, | ||
39 | }; | ||
40 | |||
41 | static struct platform_device ezx_backlight_device = { | ||
42 | .name = "pwm-backlight", | ||
43 | .dev = { | ||
44 | .parent = &pxa27x_device_pwm0.dev, | ||
45 | .platform_data = &ezx_backlight_data, | ||
46 | }, | ||
47 | }; | ||
48 | |||
49 | static struct pxafb_mode_info mode_ezx_old = { | ||
50 | .pixclock = 150000, | ||
51 | .xres = 240, | ||
52 | .yres = 320, | ||
53 | .bpp = 16, | ||
54 | .hsync_len = 10, | ||
55 | .left_margin = 20, | ||
56 | .right_margin = 10, | ||
57 | .vsync_len = 2, | ||
58 | .upper_margin = 3, | ||
59 | .lower_margin = 2, | ||
60 | .sync = 0, | ||
61 | }; | ||
62 | |||
63 | static struct pxafb_mach_info ezx_fb_info_1 = { | ||
64 | .modes = &mode_ezx_old, | ||
65 | .num_modes = 1, | ||
66 | .lcd_conn = LCD_COLOR_TFT_16BPP, | ||
67 | }; | ||
68 | |||
69 | static struct pxafb_mode_info mode_72r89803y01 = { | ||
70 | .pixclock = 192308, | ||
71 | .xres = 240, | ||
72 | .yres = 320, | ||
73 | .bpp = 32, | ||
74 | .depth = 18, | ||
75 | .hsync_len = 10, | ||
76 | .left_margin = 20, | ||
77 | .right_margin = 10, | ||
78 | .vsync_len = 2, | ||
79 | .upper_margin = 3, | ||
80 | .lower_margin = 2, | ||
81 | .sync = 0, | ||
82 | }; | ||
83 | |||
84 | static struct pxafb_mach_info ezx_fb_info_2 = { | ||
85 | .modes = &mode_72r89803y01, | ||
86 | .num_modes = 1, | ||
87 | .lcd_conn = LCD_COLOR_TFT_18BPP, | ||
88 | }; | ||
89 | |||
90 | static struct platform_device *devices[] __initdata = { | ||
91 | &ezx_backlight_device, | ||
92 | }; | ||
93 | |||
94 | static unsigned long ezx_pin_config[] __initdata = { | ||
95 | /* PWM backlight */ | ||
96 | GPIO16_PWM0_OUT, | ||
97 | |||
98 | /* BTUART */ | ||
99 | GPIO42_BTUART_RXD, | ||
100 | GPIO43_BTUART_TXD, | ||
101 | GPIO44_BTUART_CTS, | ||
102 | GPIO45_BTUART_RTS, | ||
103 | |||
104 | /* STUART */ | ||
105 | GPIO46_STUART_RXD, | ||
106 | GPIO47_STUART_TXD, | ||
107 | |||
108 | /* For A780 support (connected with Neptune GSM chip) */ | ||
109 | GPIO30_USB_P3_2, /* ICL_TXENB */ | ||
110 | GPIO31_USB_P3_6, /* ICL_VPOUT */ | ||
111 | GPIO90_USB_P3_5, /* ICL_VPIN */ | ||
112 | GPIO91_USB_P3_1, /* ICL_XRXD */ | ||
113 | GPIO56_USB_P3_4, /* ICL_VMOUT */ | ||
114 | GPIO113_USB_P3_3, /* /ICL_VMIN */ | ||
115 | }; | ||
116 | |||
117 | static void __init ezx_init(void) | ||
118 | { | ||
119 | pxa2xx_mfp_config(ARRAY_AND_SIZE(ezx_pin_config)); | ||
120 | pxa_set_i2c_info(NULL); | ||
121 | if (machine_is_ezx_a780() || machine_is_ezx_e680()) | ||
122 | set_pxa_fb_info(&ezx_fb_info_1); | ||
123 | else | ||
124 | set_pxa_fb_info(&ezx_fb_info_2); | ||
125 | |||
126 | platform_add_devices(devices, ARRAY_SIZE(devices)); | ||
127 | } | ||
128 | |||
129 | static void __init ezx_fixup(struct machine_desc *desc, struct tag *tags, | ||
130 | char **cmdline, struct meminfo *mi) | ||
131 | { | ||
132 | /* We have two ram chips. First one with 32MB at 0xA0000000 and a second | ||
133 | * 16MB one at 0xAC000000 | ||
134 | */ | ||
135 | mi->nr_banks = 2; | ||
136 | mi->bank[0].start = 0xa0000000; | ||
137 | mi->bank[0].node = 0; | ||
138 | mi->bank[0].size = (32*1024*1024); | ||
139 | mi->bank[1].start = 0xac000000; | ||
140 | mi->bank[1].node = 1; | ||
141 | mi->bank[1].size = (16*1024*1024); | ||
142 | } | ||
143 | |||
144 | #ifdef CONFIG_MACH_EZX_A780 | ||
145 | MACHINE_START(EZX_A780, "Motorola EZX A780") | ||
146 | .phys_io = 0x40000000, | ||
147 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
148 | .fixup = ezx_fixup, | ||
149 | .boot_params = 0xa0000100, | ||
150 | .map_io = pxa_map_io, | ||
151 | .init_irq = pxa27x_init_irq, | ||
152 | .timer = &pxa_timer, | ||
153 | .init_machine = &ezx_init, | ||
154 | MACHINE_END | ||
155 | #endif | ||
156 | |||
157 | #ifdef CONFIG_MACH_EZX_E680 | ||
158 | MACHINE_START(EZX_E680, "Motorola EZX E680") | ||
159 | .phys_io = 0x40000000, | ||
160 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
161 | .fixup = ezx_fixup, | ||
162 | .boot_params = 0xa0000100, | ||
163 | .map_io = pxa_map_io, | ||
164 | .init_irq = pxa27x_init_irq, | ||
165 | .timer = &pxa_timer, | ||
166 | .init_machine = &ezx_init, | ||
167 | MACHINE_END | ||
168 | #endif | ||
169 | |||
170 | #ifdef CONFIG_MACH_EZX_A1200 | ||
171 | MACHINE_START(EZX_A1200, "Motorola EZX A1200") | ||
172 | .phys_io = 0x40000000, | ||
173 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
174 | .fixup = ezx_fixup, | ||
175 | .boot_params = 0xa0000100, | ||
176 | .map_io = pxa_map_io, | ||
177 | .init_irq = pxa27x_init_irq, | ||
178 | .timer = &pxa_timer, | ||
179 | .init_machine = &ezx_init, | ||
180 | MACHINE_END | ||
181 | #endif | ||
182 | |||
183 | #ifdef CONFIG_MACH_EZX_A910 | ||
184 | MACHINE_START(EZX_A910, "Motorola EZX A910") | ||
185 | .phys_io = 0x40000000, | ||
186 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
187 | .fixup = ezx_fixup, | ||
188 | .boot_params = 0xa0000100, | ||
189 | .map_io = pxa_map_io, | ||
190 | .init_irq = pxa27x_init_irq, | ||
191 | .timer = &pxa_timer, | ||
192 | .init_machine = &ezx_init, | ||
193 | MACHINE_END | ||
194 | #endif | ||
195 | |||
196 | #ifdef CONFIG_MACH_EZX_E6 | ||
197 | MACHINE_START(EZX_E6, "Motorola EZX E6") | ||
198 | .phys_io = 0x40000000, | ||
199 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
200 | .fixup = ezx_fixup, | ||
201 | .boot_params = 0xa0000100, | ||
202 | .map_io = pxa_map_io, | ||
203 | .init_irq = pxa27x_init_irq, | ||
204 | .timer = &pxa_timer, | ||
205 | .init_machine = &ezx_init, | ||
206 | MACHINE_END | ||
207 | #endif | ||
208 | |||
209 | #ifdef CONFIG_MACH_EZX_E2 | ||
210 | MACHINE_START(EZX_E2, "Motorola EZX E2") | ||
211 | .phys_io = 0x40000000, | ||
212 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
213 | .fixup = ezx_fixup, | ||
214 | .boot_params = 0xa0000100, | ||
215 | .map_io = pxa_map_io, | ||
216 | .init_irq = pxa27x_init_irq, | ||
217 | .timer = &pxa_timer, | ||
218 | .init_machine = &ezx_init, | ||
219 | MACHINE_END | ||
220 | #endif | ||
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 530654474bb2..dd759d03a9fd 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/smc91x.h> | ||
23 | 24 | ||
24 | #include <asm/types.h> | 25 | #include <asm/types.h> |
25 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
@@ -38,6 +39,7 @@ | |||
38 | #include <asm/arch/pxafb.h> | 39 | #include <asm/arch/pxafb.h> |
39 | #include <asm/arch/ssp.h> | 40 | #include <asm/arch/ssp.h> |
40 | #include <asm/arch/pxa27x_keypad.h> | 41 | #include <asm/arch/pxa27x_keypad.h> |
42 | #include <asm/arch/pxa3xx_nand.h> | ||
41 | #include <asm/arch/littleton.h> | 43 | #include <asm/arch/littleton.h> |
42 | 44 | ||
43 | #include "generic.h" | 45 | #include "generic.h" |
@@ -101,18 +103,26 @@ static struct resource smc91x_resources[] = { | |||
101 | [1] = { | 103 | [1] = { |
102 | .start = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)), | 104 | .start = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)), |
103 | .end = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)), | 105 | .end = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)), |
104 | .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, | 106 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, |
105 | } | 107 | } |
106 | }; | 108 | }; |
107 | 109 | ||
110 | static struct smc91x_platdata littleton_smc91x_info = { | ||
111 | .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | | ||
112 | SMC91X_NOWAIT | SMC91X_USE_DMA, | ||
113 | }; | ||
114 | |||
108 | static struct platform_device smc91x_device = { | 115 | static struct platform_device smc91x_device = { |
109 | .name = "smc91x", | 116 | .name = "smc91x", |
110 | .id = 0, | 117 | .id = 0, |
111 | .num_resources = ARRAY_SIZE(smc91x_resources), | 118 | .num_resources = ARRAY_SIZE(smc91x_resources), |
112 | .resource = smc91x_resources, | 119 | .resource = smc91x_resources, |
120 | .dev = { | ||
121 | .platform_data = &littleton_smc91x_info, | ||
122 | }, | ||
113 | }; | 123 | }; |
114 | 124 | ||
115 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULES) | 125 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) |
116 | /* use bit 30, 31 as the indicator of command parameter number */ | 126 | /* use bit 30, 31 as the indicator of command parameter number */ |
117 | #define CMD0(x) ((0x00000000) | ((x) << 9)) | 127 | #define CMD0(x) ((0x00000000) | ((x) << 9)) |
118 | #define CMD1(x, x1) ((0x40000000) | ((x) << 9) | 0x100 | (x1)) | 128 | #define CMD1(x, x1) ((0x40000000) | ((x) << 9) | 0x100 | (x1)) |
@@ -311,9 +321,9 @@ static void littleton_init_lcd(void) | |||
311 | } | 321 | } |
312 | #else | 322 | #else |
313 | static inline void littleton_init_lcd(void) {}; | 323 | static inline void littleton_init_lcd(void) {}; |
314 | #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULES */ | 324 | #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ |
315 | 325 | ||
316 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULES) | 326 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) |
317 | static unsigned int littleton_matrix_key_map[] = { | 327 | static unsigned int littleton_matrix_key_map[] = { |
318 | /* KEY(row, col, key_code) */ | 328 | /* KEY(row, col, key_code) */ |
319 | KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3), | 329 | KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3), |
@@ -361,6 +371,57 @@ static void __init littleton_init_keypad(void) | |||
361 | static inline void littleton_init_keypad(void) {} | 371 | static inline void littleton_init_keypad(void) {} |
362 | #endif | 372 | #endif |
363 | 373 | ||
374 | #if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE) | ||
375 | static struct mtd_partition littleton_nand_partitions[] = { | ||
376 | [0] = { | ||
377 | .name = "Bootloader", | ||
378 | .offset = 0, | ||
379 | .size = 0x060000, | ||
380 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
381 | }, | ||
382 | [1] = { | ||
383 | .name = "Kernel", | ||
384 | .offset = 0x060000, | ||
385 | .size = 0x200000, | ||
386 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
387 | }, | ||
388 | [2] = { | ||
389 | .name = "Filesystem", | ||
390 | .offset = 0x0260000, | ||
391 | .size = 0x3000000, /* 48M - rootfs */ | ||
392 | }, | ||
393 | [3] = { | ||
394 | .name = "MassStorage", | ||
395 | .offset = 0x3260000, | ||
396 | .size = 0x3d40000, | ||
397 | }, | ||
398 | [4] = { | ||
399 | .name = "BBT", | ||
400 | .offset = 0x6FA0000, | ||
401 | .size = 0x80000, | ||
402 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
403 | }, | ||
404 | /* NOTE: we reserve some blocks at the end of the NAND flash for | ||
405 | * bad block management, and the max number of relocation blocks | ||
406 | * differs on different platforms. Please take care with it when | ||
407 | * defining the partition table. | ||
408 | */ | ||
409 | }; | ||
410 | |||
411 | static struct pxa3xx_nand_platform_data littleton_nand_info = { | ||
412 | .enable_arbiter = 1, | ||
413 | .parts = littleton_nand_partitions, | ||
414 | .nr_parts = ARRAY_SIZE(littleton_nand_partitions), | ||
415 | }; | ||
416 | |||
417 | static void __init littleton_init_nand(void) | ||
418 | { | ||
419 | pxa3xx_set_nand_info(&littleton_nand_info); | ||
420 | } | ||
421 | #else | ||
422 | static inline void littleton_init_nand(void) {} | ||
423 | #endif /* CONFIG_MTD_NAND_PXA3xx || CONFIG_MTD_NAND_PXA3xx_MODULE */ | ||
424 | |||
364 | static void __init littleton_init(void) | 425 | static void __init littleton_init(void) |
365 | { | 426 | { |
366 | /* initialize MFP configurations */ | 427 | /* initialize MFP configurations */ |
@@ -374,6 +435,7 @@ static void __init littleton_init(void) | |||
374 | 435 | ||
375 | littleton_init_lcd(); | 436 | littleton_init_lcd(); |
376 | littleton_init_keypad(); | 437 | littleton_init_keypad(); |
438 | littleton_init_nand(); | ||
377 | } | 439 | } |
378 | 440 | ||
379 | MACHINE_START(LITTLETON, "Marvell Form Factor Development Platform (aka Littleton)") | 441 | MACHINE_START(LITTLETON, "Marvell Form Factor Development Platform (aka Littleton)") |
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index a3fae4139203..ac26423cd20c 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/mtd/mtd.h> | 22 | #include <linux/mtd/mtd.h> |
23 | #include <linux/mtd/partitions.h> | 23 | #include <linux/mtd/partitions.h> |
24 | #include <linux/smc91x.h> | ||
24 | 25 | ||
25 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
26 | #include <linux/spi/ads7846.h> | 27 | #include <linux/spi/ads7846.h> |
@@ -226,14 +227,6 @@ static struct pxa2xx_spi_master pxa_ssp_master_info = { | |||
226 | .num_chipselect = 0, | 227 | .num_chipselect = 0, |
227 | }; | 228 | }; |
228 | 229 | ||
229 | static struct platform_device pxa_ssp = { | ||
230 | .name = "pxa2xx-spi", | ||
231 | .id = 1, | ||
232 | .dev = { | ||
233 | .platform_data = &pxa_ssp_master_info, | ||
234 | }, | ||
235 | }; | ||
236 | |||
237 | static int lubbock_ads7846_pendown_state(void) | 230 | static int lubbock_ads7846_pendown_state(void) |
238 | { | 231 | { |
239 | /* TS_BUSY is bit 8 in LUB_MISC_RD, but pendown is irq-only */ | 232 | /* TS_BUSY is bit 8 in LUB_MISC_RD, but pendown is irq-only */ |
@@ -292,11 +285,18 @@ static struct resource smc91x_resources[] = { | |||
292 | }, | 285 | }, |
293 | }; | 286 | }; |
294 | 287 | ||
288 | static struct smc91x_platdata lubbock_smc91x_info = { | ||
289 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_IO_SHIFT_2, | ||
290 | }; | ||
291 | |||
295 | static struct platform_device smc91x_device = { | 292 | static struct platform_device smc91x_device = { |
296 | .name = "smc91x", | 293 | .name = "smc91x", |
297 | .id = -1, | 294 | .id = -1, |
298 | .num_resources = ARRAY_SIZE(smc91x_resources), | 295 | .num_resources = ARRAY_SIZE(smc91x_resources), |
299 | .resource = smc91x_resources, | 296 | .resource = smc91x_resources, |
297 | .dev = { | ||
298 | .platform_data = &lubbock_smc91x_info, | ||
299 | }, | ||
300 | }; | 300 | }; |
301 | 301 | ||
302 | static struct resource flash_resources[] = { | 302 | static struct resource flash_resources[] = { |
@@ -367,7 +367,6 @@ static struct platform_device *devices[] __initdata = { | |||
367 | &smc91x_device, | 367 | &smc91x_device, |
368 | &lubbock_flash_device[0], | 368 | &lubbock_flash_device[0], |
369 | &lubbock_flash_device[1], | 369 | &lubbock_flash_device[1], |
370 | &pxa_ssp, | ||
371 | }; | 370 | }; |
372 | 371 | ||
373 | static struct pxafb_mode_info sharp_lm8v31_mode = { | 372 | static struct pxafb_mode_info sharp_lm8v31_mode = { |
@@ -471,6 +470,7 @@ static void lubbock_irda_transceiver_mode(struct device *dev, int mode) | |||
471 | } else if (mode & IR_FIRMODE) { | 470 | } else if (mode & IR_FIRMODE) { |
472 | LUB_MISC_WR |= 1 << 4; | 471 | LUB_MISC_WR |= 1 << 4; |
473 | } | 472 | } |
473 | pxa2xx_transceiver_mode(dev, mode); | ||
474 | local_irq_restore(flags); | 474 | local_irq_restore(flags); |
475 | } | 475 | } |
476 | 476 | ||
@@ -501,6 +501,7 @@ static void __init lubbock_init(void) | |||
501 | lubbock_flash_data[flashboot].name = "boot-rom"; | 501 | lubbock_flash_data[flashboot].name = "boot-rom"; |
502 | (void) platform_add_devices(devices, ARRAY_SIZE(devices)); | 502 | (void) platform_add_devices(devices, ARRAY_SIZE(devices)); |
503 | 503 | ||
504 | pxa2xx_set_spi_info(1, &pxa_ssp_master_info); | ||
504 | spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); | 505 | spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); |
505 | } | 506 | } |
506 | 507 | ||
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 01b2fa790217..c9d274f0048f 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c | |||
@@ -17,17 +17,15 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/gpio.h> | ||
20 | #include <linux/gpio_keys.h> | 21 | #include <linux/gpio_keys.h> |
21 | #include <linux/input.h> | 22 | #include <linux/input.h> |
22 | #include <linux/mfd/htc-egpio.h> | 23 | #include <linux/mfd/htc-egpio.h> |
23 | #include <linux/mfd/htc-pasic3.h> | 24 | #include <linux/mfd/htc-pasic3.h> |
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/mtd/map.h> | ||
26 | #include <linux/mtd/physmap.h> | 25 | #include <linux/mtd/physmap.h> |
27 | #include <linux/pda_power.h> | 26 | #include <linux/pda_power.h> |
28 | #include <linux/pwm_backlight.h> | 27 | #include <linux/pwm_backlight.h> |
29 | 28 | ||
30 | #include <asm/gpio.h> | ||
31 | #include <asm/hardware.h> | 29 | #include <asm/hardware.h> |
32 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
33 | #include <asm/mach/arch.h> | 31 | #include <asm/mach/arch.h> |
@@ -44,7 +42,7 @@ | |||
44 | #include "devices.h" | 42 | #include "devices.h" |
45 | #include "generic.h" | 43 | #include "generic.h" |
46 | 44 | ||
47 | static unsigned long magician_pin_config[] = { | 45 | static unsigned long magician_pin_config[] __initdata = { |
48 | 46 | ||
49 | /* SDRAM and Static Memory I/O Signals */ | 47 | /* SDRAM and Static Memory I/O Signals */ |
50 | GPIO20_nSDCS_2, | 48 | GPIO20_nSDCS_2, |
@@ -134,6 +132,7 @@ static unsigned long magician_pin_config[] = { | |||
134 | static void magician_irda_transceiver_mode(struct device *dev, int mode) | 132 | static void magician_irda_transceiver_mode(struct device *dev, int mode) |
135 | { | 133 | { |
136 | gpio_set_value(GPIO83_MAGICIAN_nIR_EN, mode & IR_OFF); | 134 | gpio_set_value(GPIO83_MAGICIAN_nIR_EN, mode & IR_OFF); |
135 | pxa2xx_transceiver_mode(dev, mode); | ||
137 | } | 136 | } |
138 | 137 | ||
139 | static struct pxaficp_platform_data magician_ficp_info = { | 138 | static struct pxaficp_platform_data magician_ficp_info = { |
@@ -399,6 +398,7 @@ static struct platform_pwm_backlight_data backlight_data = { | |||
399 | 398 | ||
400 | static struct platform_device backlight = { | 399 | static struct platform_device backlight = { |
401 | .name = "pwm-backlight", | 400 | .name = "pwm-backlight", |
401 | .id = -1, | ||
402 | .dev = { | 402 | .dev = { |
403 | .parent = &pxa27x_device_pwm0.dev, | 403 | .parent = &pxa27x_device_pwm0.dev, |
404 | .platform_data = &backlight_data, | 404 | .platform_data = &backlight_data, |
@@ -511,6 +511,37 @@ static struct platform_device pasic3 = { | |||
511 | * External power | 511 | * External power |
512 | */ | 512 | */ |
513 | 513 | ||
514 | static int power_supply_init(struct device *dev) | ||
515 | { | ||
516 | int ret; | ||
517 | |||
518 | ret = gpio_request(EGPIO_MAGICIAN_CABLE_STATE_AC, "CABLE_STATE_AC"); | ||
519 | if (ret) | ||
520 | goto err_cs_ac; | ||
521 | ret = gpio_request(EGPIO_MAGICIAN_CABLE_STATE_USB, "CABLE_STATE_USB"); | ||
522 | if (ret) | ||
523 | goto err_cs_usb; | ||
524 | ret = gpio_request(EGPIO_MAGICIAN_CHARGE_EN, "CHARGE_EN"); | ||
525 | if (ret) | ||
526 | goto err_chg_en; | ||
527 | ret = gpio_request(GPIO30_MAGICIAN_nCHARGE_EN, "nCHARGE_EN"); | ||
528 | if (!ret) | ||
529 | ret = gpio_direction_output(GPIO30_MAGICIAN_nCHARGE_EN, 0); | ||
530 | if (ret) | ||
531 | goto err_nchg_en; | ||
532 | |||
533 | return 0; | ||
534 | |||
535 | err_nchg_en: | ||
536 | gpio_free(EGPIO_MAGICIAN_CHARGE_EN); | ||
537 | err_chg_en: | ||
538 | gpio_free(EGPIO_MAGICIAN_CABLE_STATE_USB); | ||
539 | err_cs_usb: | ||
540 | gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC); | ||
541 | err_cs_ac: | ||
542 | return ret; | ||
543 | } | ||
544 | |||
514 | static int magician_is_ac_online(void) | 545 | static int magician_is_ac_online(void) |
515 | { | 546 | { |
516 | return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC); | 547 | return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC); |
@@ -527,14 +558,24 @@ static void magician_set_charge(int flags) | |||
527 | gpio_set_value(EGPIO_MAGICIAN_CHARGE_EN, flags); | 558 | gpio_set_value(EGPIO_MAGICIAN_CHARGE_EN, flags); |
528 | } | 559 | } |
529 | 560 | ||
561 | static void power_supply_exit(struct device *dev) | ||
562 | { | ||
563 | gpio_free(GPIO30_MAGICIAN_nCHARGE_EN); | ||
564 | gpio_free(EGPIO_MAGICIAN_CHARGE_EN); | ||
565 | gpio_free(EGPIO_MAGICIAN_CABLE_STATE_USB); | ||
566 | gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC); | ||
567 | } | ||
568 | |||
530 | static char *magician_supplicants[] = { | 569 | static char *magician_supplicants[] = { |
531 | "ds2760-battery.0", "backup-battery" | 570 | "ds2760-battery.0", "backup-battery" |
532 | }; | 571 | }; |
533 | 572 | ||
534 | static struct pda_power_pdata power_supply_info = { | 573 | static struct pda_power_pdata power_supply_info = { |
574 | .init = power_supply_init, | ||
535 | .is_ac_online = magician_is_ac_online, | 575 | .is_ac_online = magician_is_ac_online, |
536 | .is_usb_online = magician_is_usb_online, | 576 | .is_usb_online = magician_is_usb_online, |
537 | .set_charge = magician_set_charge, | 577 | .set_charge = magician_set_charge, |
578 | .exit = power_supply_exit, | ||
538 | .supplied_to = magician_supplicants, | 579 | .supplied_to = magician_supplicants, |
539 | .num_supplicants = ARRAY_SIZE(magician_supplicants), | 580 | .num_supplicants = ARRAY_SIZE(magician_supplicants), |
540 | }; | 581 | }; |
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index f2e9e7c4da8e..851ec2d9b699 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/input.h> | 26 | #include <linux/input.h> |
27 | #include <linux/gpio_keys.h> | 27 | #include <linux/gpio_keys.h> |
28 | #include <linux/pwm_backlight.h> | 28 | #include <linux/pwm_backlight.h> |
29 | #include <linux/smc91x.h> | ||
29 | 30 | ||
30 | #include <asm/types.h> | 31 | #include <asm/types.h> |
31 | #include <asm/setup.h> | 32 | #include <asm/setup.h> |
@@ -110,9 +111,9 @@ static unsigned long mainstone_pin_config[] = { | |||
110 | GPIO45_AC97_SYSCLK, | 111 | GPIO45_AC97_SYSCLK, |
111 | 112 | ||
112 | /* Keypad */ | 113 | /* Keypad */ |
113 | GPIO93_KP_DKIN_0 | WAKEUP_ON_LEVEL_HIGH, | 114 | GPIO93_KP_DKIN_0, |
114 | GPIO94_KP_DKIN_1 | WAKEUP_ON_LEVEL_HIGH, | 115 | GPIO94_KP_DKIN_1, |
115 | GPIO95_KP_DKIN_2 | WAKEUP_ON_LEVEL_HIGH, | 116 | GPIO95_KP_DKIN_2, |
116 | GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, | 117 | GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, |
117 | GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, | 118 | GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, |
118 | GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, | 119 | GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, |
@@ -240,11 +241,19 @@ static struct resource smc91x_resources[] = { | |||
240 | } | 241 | } |
241 | }; | 242 | }; |
242 | 243 | ||
244 | static struct smc91x_platdata mainstone_smc91x_info = { | ||
245 | .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | | ||
246 | SMC91X_NOWAIT | SMC91X_USE_DMA, | ||
247 | }; | ||
248 | |||
243 | static struct platform_device smc91x_device = { | 249 | static struct platform_device smc91x_device = { |
244 | .name = "smc91x", | 250 | .name = "smc91x", |
245 | .id = 0, | 251 | .id = 0, |
246 | .num_resources = ARRAY_SIZE(smc91x_resources), | 252 | .num_resources = ARRAY_SIZE(smc91x_resources), |
247 | .resource = smc91x_resources, | 253 | .resource = smc91x_resources, |
254 | .dev = { | ||
255 | .platform_data = &mainstone_smc91x_info, | ||
256 | }, | ||
248 | }; | 257 | }; |
249 | 258 | ||
250 | static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv) | 259 | static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv) |
@@ -455,6 +464,7 @@ static void mainstone_irda_transceiver_mode(struct device *dev, int mode) | |||
455 | } else if (mode & IR_FIRMODE) { | 464 | } else if (mode & IR_FIRMODE) { |
456 | MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR; | 465 | MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR; |
457 | } | 466 | } |
467 | pxa2xx_transceiver_mode(dev, mode); | ||
458 | if (mode & IR_OFF) { | 468 | if (mode & IR_OFF) { |
459 | MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF; | 469 | MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF; |
460 | } else { | 470 | } else { |
@@ -513,7 +523,7 @@ static struct pxaohci_platform_data mainstone_ohci_platform_data = { | |||
513 | .init = mainstone_ohci_init, | 523 | .init = mainstone_ohci_init, |
514 | }; | 524 | }; |
515 | 525 | ||
516 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULES) | 526 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) |
517 | static unsigned int mainstone_matrix_keys[] = { | 527 | static unsigned int mainstone_matrix_keys[] = { |
518 | KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), | 528 | KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), |
519 | KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), | 529 | KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), |
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index d1cdb4ecb0b8..fd4545eab803 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c | |||
@@ -39,6 +39,28 @@ struct gpio_desc { | |||
39 | 39 | ||
40 | static struct gpio_desc gpio_desc[MFP_PIN_GPIO127 + 1]; | 40 | static struct gpio_desc gpio_desc[MFP_PIN_GPIO127 + 1]; |
41 | 41 | ||
42 | static int __mfp_config_lpm(unsigned gpio, unsigned long lpm) | ||
43 | { | ||
44 | unsigned mask = GPIO_bit(gpio); | ||
45 | |||
46 | /* low power state */ | ||
47 | switch (lpm) { | ||
48 | case MFP_LPM_DRIVE_HIGH: | ||
49 | PGSR(gpio) |= mask; | ||
50 | break; | ||
51 | case MFP_LPM_DRIVE_LOW: | ||
52 | PGSR(gpio) &= ~mask; | ||
53 | break; | ||
54 | case MFP_LPM_INPUT: | ||
55 | break; | ||
56 | default: | ||
57 | pr_warning("%s: invalid low power state for GPIO%d\n", | ||
58 | __func__, gpio); | ||
59 | return -EINVAL; | ||
60 | } | ||
61 | return 0; | ||
62 | } | ||
63 | |||
42 | static int __mfp_config_gpio(unsigned gpio, unsigned long c) | 64 | static int __mfp_config_gpio(unsigned gpio, unsigned long c) |
43 | { | 65 | { |
44 | unsigned long gafr, mask = GPIO_bit(gpio); | 66 | unsigned long gafr, mask = GPIO_bit(gpio); |
@@ -57,21 +79,8 @@ static int __mfp_config_gpio(unsigned gpio, unsigned long c) | |||
57 | else | 79 | else |
58 | GPDR(gpio) &= ~mask; | 80 | GPDR(gpio) &= ~mask; |
59 | 81 | ||
60 | /* low power state */ | 82 | if (__mfp_config_lpm(gpio, c & MFP_LPM_STATE_MASK)) |
61 | switch (c & MFP_LPM_STATE_MASK) { | ||
62 | case MFP_LPM_DRIVE_HIGH: | ||
63 | PGSR(gpio) |= mask; | ||
64 | break; | ||
65 | case MFP_LPM_DRIVE_LOW: | ||
66 | PGSR(gpio) &= ~mask; | ||
67 | break; | ||
68 | case MFP_LPM_INPUT: | ||
69 | break; | ||
70 | default: | ||
71 | pr_warning("%s: invalid low power state for GPIO%d\n", | ||
72 | __func__, gpio); | ||
73 | return -EINVAL; | 83 | return -EINVAL; |
74 | } | ||
75 | 84 | ||
76 | /* give early warning if MFP_LPM_CAN_WAKEUP is set on the | 85 | /* give early warning if MFP_LPM_CAN_WAKEUP is set on the |
77 | * configurations of those pins not able to wakeup | 86 | * configurations of those pins not able to wakeup |
@@ -91,6 +100,18 @@ static int __mfp_config_gpio(unsigned gpio, unsigned long c) | |||
91 | return 0; | 100 | return 0; |
92 | } | 101 | } |
93 | 102 | ||
103 | static inline int __mfp_validate(int mfp) | ||
104 | { | ||
105 | int gpio = mfp_to_gpio(mfp); | ||
106 | |||
107 | if ((mfp > MFP_PIN_GPIO127) || !gpio_desc[gpio].valid) { | ||
108 | pr_warning("%s: GPIO%d is invalid pin\n", __func__, gpio); | ||
109 | return -1; | ||
110 | } | ||
111 | |||
112 | return gpio; | ||
113 | } | ||
114 | |||
94 | void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) | 115 | void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) |
95 | { | 116 | { |
96 | unsigned long flags; | 117 | unsigned long flags; |
@@ -99,13 +120,9 @@ void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) | |||
99 | 120 | ||
100 | for (i = 0, c = mfp_cfgs; i < num; i++, c++) { | 121 | for (i = 0, c = mfp_cfgs; i < num; i++, c++) { |
101 | 122 | ||
102 | gpio = mfp_to_gpio(MFP_PIN(*c)); | 123 | gpio = __mfp_validate(MFP_PIN(*c)); |
103 | 124 | if (gpio < 0) | |
104 | if (!gpio_desc[gpio].valid) { | ||
105 | pr_warning("%s: GPIO%d is invalid pin\n", | ||
106 | __func__, gpio); | ||
107 | continue; | 125 | continue; |
108 | } | ||
109 | 126 | ||
110 | local_irq_save(flags); | 127 | local_irq_save(flags); |
111 | 128 | ||
@@ -116,6 +133,20 @@ void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num) | |||
116 | } | 133 | } |
117 | } | 134 | } |
118 | 135 | ||
136 | void pxa2xx_mfp_set_lpm(int mfp, unsigned long lpm) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | int gpio; | ||
140 | |||
141 | gpio = __mfp_validate(mfp); | ||
142 | if (gpio < 0) | ||
143 | return; | ||
144 | |||
145 | local_irq_save(flags); | ||
146 | __mfp_config_lpm(gpio, lpm); | ||
147 | local_irq_restore(flags); | ||
148 | } | ||
149 | |||
119 | int gpio_set_wake(unsigned int gpio, unsigned int on) | 150 | int gpio_set_wake(unsigned int gpio, unsigned int on) |
120 | { | 151 | { |
121 | struct gpio_desc *d; | 152 | struct gpio_desc *d; |
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c new file mode 100644 index 000000000000..408657a24f8c --- /dev/null +++ b/arch/arm/mach-pxa/palmtx.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * Hardware definitions for PalmTX | ||
3 | * | ||
4 | * Author: Marek Vasut <marek.vasut@gmail.com> | ||
5 | * | ||
6 | * Based on work of: | ||
7 | * Alex Osborne <ato@meshy.org> | ||
8 | * Cristiano P. <cristianop@users.sourceforge.net> | ||
9 | * Jan Herman <2hp@seznam.cz> | ||
10 | * Michal Hrusecky | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * (find more info at www.hackndev.com) | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/irq.h> | ||
23 | #include <linux/gpio_keys.h> | ||
24 | #include <linux/input.h> | ||
25 | #include <linux/pda_power.h> | ||
26 | #include <linux/pwm_backlight.h> | ||
27 | #include <linux/gpio.h> | ||
28 | |||
29 | #include <asm/mach-types.h> | ||
30 | #include <asm/mach/arch.h> | ||
31 | #include <asm/mach/map.h> | ||
32 | |||
33 | #include <asm/arch/audio.h> | ||
34 | #include <asm/arch/palmtx.h> | ||
35 | #include <asm/arch/mmc.h> | ||
36 | #include <asm/arch/pxafb.h> | ||
37 | #include <asm/arch/pxa-regs.h> | ||
38 | #include <asm/arch/mfp-pxa27x.h> | ||
39 | #include <asm/arch/irda.h> | ||
40 | #include <asm/arch/pxa27x_keypad.h> | ||
41 | #include <asm/arch/udc.h> | ||
42 | |||
43 | #include "generic.h" | ||
44 | #include "devices.h" | ||
45 | |||
46 | /****************************************************************************** | ||
47 | * Pin configuration | ||
48 | ******************************************************************************/ | ||
49 | static unsigned long palmtx_pin_config[] __initdata = { | ||
50 | /* MMC */ | ||
51 | GPIO32_MMC_CLK, | ||
52 | GPIO92_MMC_DAT_0, | ||
53 | GPIO109_MMC_DAT_1, | ||
54 | GPIO110_MMC_DAT_2, | ||
55 | GPIO111_MMC_DAT_3, | ||
56 | GPIO112_MMC_CMD, | ||
57 | |||
58 | /* AC97 */ | ||
59 | GPIO28_AC97_BITCLK, | ||
60 | GPIO29_AC97_SDATA_IN_0, | ||
61 | GPIO30_AC97_SDATA_OUT, | ||
62 | GPIO31_AC97_SYNC, | ||
63 | |||
64 | /* IrDA */ | ||
65 | GPIO46_FICP_RXD, | ||
66 | GPIO47_FICP_TXD, | ||
67 | |||
68 | /* PWM */ | ||
69 | GPIO16_PWM0_OUT, | ||
70 | |||
71 | /* USB */ | ||
72 | GPIO13_GPIO, | ||
73 | |||
74 | /* PCMCIA */ | ||
75 | GPIO48_nPOE, | ||
76 | GPIO49_nPWE, | ||
77 | GPIO50_nPIOR, | ||
78 | GPIO51_nPIOW, | ||
79 | GPIO85_nPCE_1, | ||
80 | GPIO54_nPCE_2, | ||
81 | GPIO79_PSKTSEL, | ||
82 | GPIO55_nPREG, | ||
83 | GPIO56_nPWAIT, | ||
84 | GPIO57_nIOIS16, | ||
85 | }; | ||
86 | |||
87 | /****************************************************************************** | ||
88 | * SD/MMC card controller | ||
89 | ******************************************************************************/ | ||
90 | static int palmtx_mci_init(struct device *dev, irq_handler_t palmtx_detect_int, | ||
91 | void *data) | ||
92 | { | ||
93 | int err = 0; | ||
94 | |||
95 | /* Setup an interrupt for detecting card insert/remove events */ | ||
96 | err = request_irq(IRQ_GPIO_PALMTX_SD_DETECT_N, palmtx_detect_int, | ||
97 | IRQF_DISABLED | IRQF_SAMPLE_RANDOM | | ||
98 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
99 | "SD/MMC card detect", data); | ||
100 | if (err) { | ||
101 | printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n", | ||
102 | __func__); | ||
103 | return err; | ||
104 | } | ||
105 | |||
106 | err = gpio_request(GPIO_NR_PALMTX_SD_POWER, "SD_POWER"); | ||
107 | if (err) | ||
108 | goto pwr_err; | ||
109 | |||
110 | err = gpio_request(GPIO_NR_PALMTX_SD_READONLY, "SD_READONLY"); | ||
111 | if (err) | ||
112 | goto ro_err; | ||
113 | |||
114 | printk(KERN_DEBUG "%s: irq registered\n", __func__); | ||
115 | |||
116 | return 0; | ||
117 | |||
118 | ro_err: | ||
119 | gpio_free(GPIO_NR_PALMTX_SD_POWER); | ||
120 | pwr_err: | ||
121 | free_irq(IRQ_GPIO_PALMTX_SD_DETECT_N, data); | ||
122 | return err; | ||
123 | } | ||
124 | |||
125 | static void palmtx_mci_exit(struct device *dev, void *data) | ||
126 | { | ||
127 | gpio_free(GPIO_NR_PALMTX_SD_READONLY); | ||
128 | gpio_free(GPIO_NR_PALMTX_SD_POWER); | ||
129 | free_irq(IRQ_GPIO_PALMTX_SD_DETECT_N, data); | ||
130 | } | ||
131 | |||
132 | static void palmtx_mci_power(struct device *dev, unsigned int vdd) | ||
133 | { | ||
134 | struct pxamci_platform_data *p_d = dev->platform_data; | ||
135 | gpio_set_value(GPIO_NR_PALMTX_SD_POWER, p_d->ocr_mask & (1 << vdd)); | ||
136 | } | ||
137 | |||
138 | static int palmtx_mci_get_ro(struct device *dev) | ||
139 | { | ||
140 | return gpio_get_value(GPIO_NR_PALMTX_SD_READONLY); | ||
141 | } | ||
142 | |||
143 | static struct pxamci_platform_data palmtx_mci_platform_data = { | ||
144 | .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, | ||
145 | .setpower = palmtx_mci_power, | ||
146 | .get_ro = palmtx_mci_get_ro, | ||
147 | .init = palmtx_mci_init, | ||
148 | .exit = palmtx_mci_exit, | ||
149 | }; | ||
150 | |||
151 | /****************************************************************************** | ||
152 | * GPIO keyboard | ||
153 | ******************************************************************************/ | ||
154 | static unsigned int palmtx_matrix_keys[] = { | ||
155 | KEY(0, 0, KEY_POWER), | ||
156 | KEY(0, 1, KEY_F1), | ||
157 | KEY(0, 2, KEY_ENTER), | ||
158 | |||
159 | KEY(1, 0, KEY_F2), | ||
160 | KEY(1, 1, KEY_F3), | ||
161 | KEY(1, 2, KEY_F4), | ||
162 | |||
163 | KEY(2, 0, KEY_UP), | ||
164 | KEY(2, 2, KEY_DOWN), | ||
165 | |||
166 | KEY(3, 0, KEY_RIGHT), | ||
167 | KEY(3, 2, KEY_LEFT), | ||
168 | |||
169 | }; | ||
170 | |||
171 | static struct pxa27x_keypad_platform_data palmtx_keypad_platform_data = { | ||
172 | .matrix_key_rows = 4, | ||
173 | .matrix_key_cols = 3, | ||
174 | .matrix_key_map = palmtx_matrix_keys, | ||
175 | .matrix_key_map_size = ARRAY_SIZE(palmtx_matrix_keys), | ||
176 | |||
177 | .debounce_interval = 30, | ||
178 | }; | ||
179 | |||
180 | /****************************************************************************** | ||
181 | * GPIO keys | ||
182 | ******************************************************************************/ | ||
183 | static struct gpio_keys_button palmtx_pxa_buttons[] = { | ||
184 | {KEY_F8, GPIO_NR_PALMTX_HOTSYNC_BUTTON_N, 1, "HotSync Button" }, | ||
185 | }; | ||
186 | |||
187 | static struct gpio_keys_platform_data palmtx_pxa_keys_data = { | ||
188 | .buttons = palmtx_pxa_buttons, | ||
189 | .nbuttons = ARRAY_SIZE(palmtx_pxa_buttons), | ||
190 | }; | ||
191 | |||
192 | static struct platform_device palmtx_pxa_keys = { | ||
193 | .name = "gpio-keys", | ||
194 | .id = -1, | ||
195 | .dev = { | ||
196 | .platform_data = &palmtx_pxa_keys_data, | ||
197 | }, | ||
198 | }; | ||
199 | |||
200 | /****************************************************************************** | ||
201 | * Backlight | ||
202 | ******************************************************************************/ | ||
203 | static int palmtx_backlight_init(struct device *dev) | ||
204 | { | ||
205 | int ret; | ||
206 | |||
207 | ret = gpio_request(GPIO_NR_PALMTX_BL_POWER, "BL POWER"); | ||
208 | if (ret) | ||
209 | goto err; | ||
210 | ret = gpio_request(GPIO_NR_PALMTX_LCD_POWER, "LCD POWER"); | ||
211 | if (ret) | ||
212 | goto err2; | ||
213 | |||
214 | return 0; | ||
215 | err2: | ||
216 | gpio_free(GPIO_NR_PALMTX_BL_POWER); | ||
217 | err: | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static int palmtx_backlight_notify(int brightness) | ||
222 | { | ||
223 | gpio_set_value(GPIO_NR_PALMTX_BL_POWER, brightness); | ||
224 | gpio_set_value(GPIO_NR_PALMTX_LCD_POWER, brightness); | ||
225 | return brightness; | ||
226 | } | ||
227 | |||
228 | static void palmtx_backlight_exit(struct device *dev) | ||
229 | { | ||
230 | gpio_free(GPIO_NR_PALMTX_BL_POWER); | ||
231 | gpio_free(GPIO_NR_PALMTX_LCD_POWER); | ||
232 | } | ||
233 | |||
234 | static struct platform_pwm_backlight_data palmtx_backlight_data = { | ||
235 | .pwm_id = 0, | ||
236 | .max_brightness = PALMTX_MAX_INTENSITY, | ||
237 | .dft_brightness = PALMTX_MAX_INTENSITY, | ||
238 | .pwm_period_ns = PALMTX_PERIOD_NS, | ||
239 | .init = palmtx_backlight_init, | ||
240 | .notify = palmtx_backlight_notify, | ||
241 | .exit = palmtx_backlight_exit, | ||
242 | }; | ||
243 | |||
244 | static struct platform_device palmtx_backlight = { | ||
245 | .name = "pwm-backlight", | ||
246 | .dev = { | ||
247 | .parent = &pxa27x_device_pwm0.dev, | ||
248 | .platform_data = &palmtx_backlight_data, | ||
249 | }, | ||
250 | }; | ||
251 | |||
252 | /****************************************************************************** | ||
253 | * IrDA | ||
254 | ******************************************************************************/ | ||
255 | static void palmtx_irda_transceiver_mode(struct device *dev, int mode) | ||
256 | { | ||
257 | gpio_set_value(GPIO_NR_PALMTX_IR_DISABLE, mode & IR_OFF); | ||
258 | pxa2xx_transceiver_mode(dev, mode); | ||
259 | } | ||
260 | |||
261 | static struct pxaficp_platform_data palmtx_ficp_platform_data = { | ||
262 | .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF, | ||
263 | .transceiver_mode = palmtx_irda_transceiver_mode, | ||
264 | }; | ||
265 | |||
266 | /****************************************************************************** | ||
267 | * UDC | ||
268 | ******************************************************************************/ | ||
269 | static void palmtx_udc_command(int cmd) | ||
270 | { | ||
271 | gpio_set_value(GPIO_NR_PALMTX_USB_POWER, !cmd); | ||
272 | udelay(50); | ||
273 | gpio_set_value(GPIO_NR_PALMTX_USB_PULLUP, !cmd); | ||
274 | } | ||
275 | |||
276 | static struct pxa2xx_udc_mach_info palmtx_udc_info __initdata = { | ||
277 | .gpio_vbus = GPIO_NR_PALMTX_USB_DETECT_N, | ||
278 | .gpio_vbus_inverted = 1, | ||
279 | .udc_command = palmtx_udc_command, | ||
280 | }; | ||
281 | |||
282 | /****************************************************************************** | ||
283 | * Power supply | ||
284 | ******************************************************************************/ | ||
285 | static int power_supply_init(struct device *dev) | ||
286 | { | ||
287 | int ret; | ||
288 | |||
289 | ret = gpio_request(GPIO_NR_PALMTX_POWER_DETECT, "CABLE_STATE_AC"); | ||
290 | if (ret) | ||
291 | goto err_cs_ac; | ||
292 | |||
293 | ret = gpio_request(GPIO_NR_PALMTX_USB_DETECT_N, "CABLE_STATE_USB"); | ||
294 | if (ret) | ||
295 | goto err_cs_usb; | ||
296 | |||
297 | return 0; | ||
298 | |||
299 | err_cs_usb: | ||
300 | gpio_free(GPIO_NR_PALMTX_POWER_DETECT); | ||
301 | err_cs_ac: | ||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | static int palmtx_is_ac_online(void) | ||
306 | { | ||
307 | return gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT); | ||
308 | } | ||
309 | |||
310 | static int palmtx_is_usb_online(void) | ||
311 | { | ||
312 | return !gpio_get_value(GPIO_NR_PALMTX_USB_DETECT_N); | ||
313 | } | ||
314 | |||
315 | static void power_supply_exit(struct device *dev) | ||
316 | { | ||
317 | gpio_free(GPIO_NR_PALMTX_USB_DETECT_N); | ||
318 | gpio_free(GPIO_NR_PALMTX_POWER_DETECT); | ||
319 | } | ||
320 | |||
321 | static char *palmtx_supplicants[] = { | ||
322 | "main-battery", | ||
323 | }; | ||
324 | |||
325 | static struct pda_power_pdata power_supply_info = { | ||
326 | .init = power_supply_init, | ||
327 | .is_ac_online = palmtx_is_ac_online, | ||
328 | .is_usb_online = palmtx_is_usb_online, | ||
329 | .exit = power_supply_exit, | ||
330 | .supplied_to = palmtx_supplicants, | ||
331 | .num_supplicants = ARRAY_SIZE(palmtx_supplicants), | ||
332 | }; | ||
333 | |||
334 | static struct platform_device power_supply = { | ||
335 | .name = "pda-power", | ||
336 | .id = -1, | ||
337 | .dev = { | ||
338 | .platform_data = &power_supply_info, | ||
339 | }, | ||
340 | }; | ||
341 | |||
342 | /****************************************************************************** | ||
343 | * Framebuffer | ||
344 | ******************************************************************************/ | ||
345 | static struct pxafb_mode_info palmtx_lcd_modes[] = { | ||
346 | { | ||
347 | .pixclock = 57692, | ||
348 | .xres = 320, | ||
349 | .yres = 480, | ||
350 | .bpp = 16, | ||
351 | |||
352 | .left_margin = 32, | ||
353 | .right_margin = 1, | ||
354 | .upper_margin = 7, | ||
355 | .lower_margin = 1, | ||
356 | |||
357 | .hsync_len = 4, | ||
358 | .vsync_len = 1, | ||
359 | }, | ||
360 | }; | ||
361 | |||
362 | static struct pxafb_mach_info palmtx_lcd_screen = { | ||
363 | .modes = palmtx_lcd_modes, | ||
364 | .num_modes = ARRAY_SIZE(palmtx_lcd_modes), | ||
365 | .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, | ||
366 | }; | ||
367 | |||
368 | /****************************************************************************** | ||
369 | * Machine init | ||
370 | ******************************************************************************/ | ||
371 | static struct platform_device *devices[] __initdata = { | ||
372 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) | ||
373 | &palmtx_pxa_keys, | ||
374 | #endif | ||
375 | &palmtx_backlight, | ||
376 | &power_supply, | ||
377 | }; | ||
378 | |||
379 | static struct map_desc palmtx_io_desc[] __initdata = { | ||
380 | { | ||
381 | .virtual = PALMTX_PCMCIA_VIRT, | ||
382 | .pfn = __phys_to_pfn(PALMTX_PCMCIA_PHYS), | ||
383 | .length = PALMTX_PCMCIA_SIZE, | ||
384 | .type = MT_DEVICE | ||
385 | }, | ||
386 | }; | ||
387 | |||
388 | static void __init palmtx_map_io(void) | ||
389 | { | ||
390 | pxa_map_io(); | ||
391 | iotable_init(palmtx_io_desc, ARRAY_SIZE(palmtx_io_desc)); | ||
392 | } | ||
393 | |||
394 | static void __init palmtx_init(void) | ||
395 | { | ||
396 | pxa2xx_mfp_config(ARRAY_AND_SIZE(palmtx_pin_config)); | ||
397 | |||
398 | set_pxa_fb_info(&palmtx_lcd_screen); | ||
399 | pxa_set_mci_info(&palmtx_mci_platform_data); | ||
400 | pxa_set_udc_info(&palmtx_udc_info); | ||
401 | pxa_set_ac97_info(NULL); | ||
402 | pxa_set_ficp_info(&palmtx_ficp_platform_data); | ||
403 | pxa_set_keypad_info(&palmtx_keypad_platform_data); | ||
404 | |||
405 | platform_add_devices(devices, ARRAY_SIZE(devices)); | ||
406 | } | ||
407 | |||
408 | MACHINE_START(PALMTX, "Palm T|X") | ||
409 | .phys_io = PALMTX_PHYS_IO_START, | ||
410 | .io_pg_offst = io_p2v(0x40000000), | ||
411 | .boot_params = 0xa0000100, | ||
412 | .map_io = palmtx_map_io, | ||
413 | .init_irq = pxa27x_init_irq, | ||
414 | .timer = &pxa_timer, | ||
415 | .init_machine = palmtx_init | ||
416 | MACHINE_END | ||
diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c index 3b945eb0aee3..377f3be8ce57 100644 --- a/arch/arm/mach-pxa/pcm027.c +++ b/arch/arm/mach-pxa/pcm027.c | |||
@@ -24,7 +24,9 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/mtd/physmap.h> | 25 | #include <linux/mtd/physmap.h> |
26 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
27 | #include <linux/spi/max7301.h> | ||
27 | #include <linux/leds.h> | 28 | #include <linux/leds.h> |
29 | |||
28 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
29 | #include <asm/mach/arch.h> | 31 | #include <asm/mach/arch.h> |
30 | #include <asm/arch/hardware.h> | 32 | #include <asm/arch/hardware.h> |
@@ -108,6 +110,32 @@ static struct platform_device smc91x_device = { | |||
108 | .resource = smc91x_resources, | 110 | .resource = smc91x_resources, |
109 | }; | 111 | }; |
110 | 112 | ||
113 | /* | ||
114 | * SPI host and devices | ||
115 | */ | ||
116 | static struct pxa2xx_spi_master pxa_ssp_master_info = { | ||
117 | .num_chipselect = 1, | ||
118 | }; | ||
119 | |||
120 | static struct max7301_platform_data max7301_info = { | ||
121 | .base = -1, | ||
122 | }; | ||
123 | |||
124 | /* bus_num must match id in pxa2xx_set_spi_info() call */ | ||
125 | static struct spi_board_info spi_board_info[] __initdata = { | ||
126 | { | ||
127 | .modalias = "max7301", | ||
128 | .platform_data = &max7301_info, | ||
129 | .max_speed_hz = 13000000, | ||
130 | .bus_num = 1, | ||
131 | .chip_select = 0, | ||
132 | .mode = SPI_MODE_0, | ||
133 | }, | ||
134 | }; | ||
135 | |||
136 | /* | ||
137 | * NOR flash | ||
138 | */ | ||
111 | static struct physmap_flash_data pcm027_flash_data = { | 139 | static struct physmap_flash_data pcm027_flash_data = { |
112 | .width = 4, | 140 | .width = 4, |
113 | }; | 141 | }; |
@@ -190,6 +218,9 @@ static void __init pcm027_init(void) | |||
190 | #ifdef CONFIG_MACH_PCM990_BASEBOARD | 218 | #ifdef CONFIG_MACH_PCM990_BASEBOARD |
191 | pcm990_baseboard_init(); | 219 | pcm990_baseboard_init(); |
192 | #endif | 220 | #endif |
221 | |||
222 | pxa2xx_set_spi_info(1, &pxa_ssp_master_info); | ||
223 | spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); | ||
193 | } | 224 | } |
194 | 225 | ||
195 | static void __init pcm027_map_io(void) | 226 | static void __init pcm027_map_io(void) |
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 5d87c7c866e4..30023b00e476 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c | |||
@@ -33,14 +33,30 @@ | |||
33 | #include <asm/arch/camera.h> | 33 | #include <asm/arch/camera.h> |
34 | #include <asm/mach/map.h> | 34 | #include <asm/mach/map.h> |
35 | #include <asm/arch/pxa-regs.h> | 35 | #include <asm/arch/pxa-regs.h> |
36 | #include <asm/arch/pxa2xx-gpio.h> | ||
37 | #include <asm/arch/audio.h> | 36 | #include <asm/arch/audio.h> |
38 | #include <asm/arch/mmc.h> | 37 | #include <asm/arch/mmc.h> |
39 | #include <asm/arch/ohci.h> | 38 | #include <asm/arch/ohci.h> |
40 | #include <asm/arch/pcm990_baseboard.h> | 39 | #include <asm/arch/pcm990_baseboard.h> |
41 | #include <asm/arch/pxafb.h> | 40 | #include <asm/arch/pxafb.h> |
41 | #include <asm/arch/mfp-pxa27x.h> | ||
42 | 42 | ||
43 | #include "devices.h" | 43 | #include "devices.h" |
44 | #include "generic.h" | ||
45 | |||
46 | static unsigned long pcm990_pin_config[] __initdata = { | ||
47 | /* MMC */ | ||
48 | GPIO32_MMC_CLK, | ||
49 | GPIO112_MMC_CMD, | ||
50 | GPIO92_MMC_DAT_0, | ||
51 | GPIO109_MMC_DAT_1, | ||
52 | GPIO110_MMC_DAT_2, | ||
53 | GPIO111_MMC_DAT_3, | ||
54 | /* USB */ | ||
55 | GPIO88_USBH1_PWR, | ||
56 | GPIO89_USBH1_PEN, | ||
57 | /* PWM0 */ | ||
58 | GPIO16_PWM0_OUT, | ||
59 | }; | ||
44 | 60 | ||
45 | /* | 61 | /* |
46 | * pcm990_lcd_power - control power supply to the LCD | 62 | * pcm990_lcd_power - control power supply to the LCD |
@@ -277,16 +293,6 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, | |||
277 | { | 293 | { |
278 | int err; | 294 | int err; |
279 | 295 | ||
280 | /* | ||
281 | * enable GPIO for PXA27x MMC controller | ||
282 | */ | ||
283 | pxa_gpio_mode(GPIO32_MMCCLK_MD); | ||
284 | pxa_gpio_mode(GPIO112_MMCCMD_MD); | ||
285 | pxa_gpio_mode(GPIO92_MMCDAT0_MD); | ||
286 | pxa_gpio_mode(GPIO109_MMCDAT1_MD); | ||
287 | pxa_gpio_mode(GPIO110_MMCDAT2_MD); | ||
288 | pxa_gpio_mode(GPIO111_MMCDAT3_MD); | ||
289 | |||
290 | err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, | 296 | err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED, |
291 | "MMC card detect", data); | 297 | "MMC card detect", data); |
292 | if (err) | 298 | if (err) |
@@ -333,8 +339,6 @@ static struct pxamci_platform_data pcm990_mci_platform_data = { | |||
333 | */ | 339 | */ |
334 | static int pcm990_ohci_init(struct device *dev) | 340 | static int pcm990_ohci_init(struct device *dev) |
335 | { | 341 | { |
336 | pxa_gpio_mode(PCM990_USB_OVERCURRENT); | ||
337 | pxa_gpio_mode(PCM990_USB_PWR_EN); | ||
338 | /* | 342 | /* |
339 | * disable USB port 2 and 3 | 343 | * disable USB port 2 and 3 |
340 | * power sense is active low | 344 | * power sense is active low |
@@ -361,23 +365,27 @@ static struct pxaohci_platform_data pcm990_ohci_platform_data = { | |||
361 | * PXA27x Camera specific stuff | 365 | * PXA27x Camera specific stuff |
362 | */ | 366 | */ |
363 | #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) | 367 | #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) |
368 | static unsigned long pcm990_camera_pin_config[] = { | ||
369 | /* CIF */ | ||
370 | GPIO98_CIF_DD_0, | ||
371 | GPIO105_CIF_DD_1, | ||
372 | GPIO104_CIF_DD_2, | ||
373 | GPIO103_CIF_DD_3, | ||
374 | GPIO95_CIF_DD_4, | ||
375 | GPIO94_CIF_DD_5, | ||
376 | GPIO93_CIF_DD_6, | ||
377 | GPIO108_CIF_DD_7, | ||
378 | GPIO107_CIF_DD_8, | ||
379 | GPIO106_CIF_DD_9, | ||
380 | GPIO42_CIF_MCLK, | ||
381 | GPIO45_CIF_PCLK, | ||
382 | GPIO43_CIF_FV, | ||
383 | GPIO44_CIF_LV, | ||
384 | }; | ||
385 | |||
364 | static int pcm990_pxacamera_init(struct device *dev) | 386 | static int pcm990_pxacamera_init(struct device *dev) |
365 | { | 387 | { |
366 | pxa_gpio_mode(GPIO98_CIF_DD_0_MD); | 388 | pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config)); |
367 | pxa_gpio_mode(GPIO105_CIF_DD_1_MD); | ||
368 | pxa_gpio_mode(GPIO104_CIF_DD_2_MD); | ||
369 | pxa_gpio_mode(GPIO103_CIF_DD_3_MD); | ||
370 | pxa_gpio_mode(GPIO95_CIF_DD_4_MD); | ||
371 | pxa_gpio_mode(GPIO94_CIF_DD_5_MD); | ||
372 | pxa_gpio_mode(GPIO93_CIF_DD_6_MD); | ||
373 | pxa_gpio_mode(GPIO108_CIF_DD_7_MD); | ||
374 | pxa_gpio_mode(GPIO107_CIF_DD_8_MD); | ||
375 | pxa_gpio_mode(GPIO106_CIF_DD_9_MD); | ||
376 | pxa_gpio_mode(GPIO42_CIF_MCLK_MD); | ||
377 | pxa_gpio_mode(GPIO45_CIF_PCLK_MD); | ||
378 | pxa_gpio_mode(GPIO43_CIF_FV_MD); | ||
379 | pxa_gpio_mode(GPIO44_CIF_LV_MD); | ||
380 | |||
381 | return 0; | 389 | return 0; |
382 | } | 390 | } |
383 | 391 | ||
@@ -449,8 +457,10 @@ static struct map_desc pcm990_io_desc[] __initdata = { | |||
449 | */ | 457 | */ |
450 | void __init pcm990_baseboard_init(void) | 458 | void __init pcm990_baseboard_init(void) |
451 | { | 459 | { |
460 | pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config)); | ||
461 | |||
452 | /* register CPLD access */ | 462 | /* register CPLD access */ |
453 | iotable_init(pcm990_io_desc, ARRAY_SIZE(pcm990_io_desc)); | 463 | iotable_init(ARRAY_AND_SIZE(pcm990_io_desc)); |
454 | 464 | ||
455 | /* register CPLD's IRQ controller */ | 465 | /* register CPLD's IRQ controller */ |
456 | pcm990_init_irq(); | 466 | pcm990_init_irq(); |
@@ -458,7 +468,6 @@ void __init pcm990_baseboard_init(void) | |||
458 | #ifndef CONFIG_PCM990_DISPLAY_NONE | 468 | #ifndef CONFIG_PCM990_DISPLAY_NONE |
459 | set_pxa_fb_info(&pcm990_fbinfo); | 469 | set_pxa_fb_info(&pcm990_fbinfo); |
460 | #endif | 470 | #endif |
461 | pxa_gpio_mode(GPIO16_PWM0_MD); | ||
462 | platform_device_register(&pcm990_backlight_device); | 471 | platform_device_register(&pcm990_backlight_device); |
463 | 472 | ||
464 | /* MMC */ | 473 | /* MMC */ |
@@ -473,9 +482,8 @@ void __init pcm990_baseboard_init(void) | |||
473 | #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) | 482 | #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE) |
474 | pxa_set_camera_info(&pcm990_pxacamera_platform_data); | 483 | pxa_set_camera_info(&pcm990_pxacamera_platform_data); |
475 | 484 | ||
476 | i2c_register_board_info(0, pcm990_i2c_devices, | 485 | i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices)); |
477 | ARRAY_SIZE(pcm990_i2c_devices)); | ||
478 | #endif | 486 | #endif |
479 | 487 | ||
480 | printk(KERN_INFO"PCM-990 Evaluation baseboard initialized\n"); | 488 | printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n"); |
481 | } | 489 | } |
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index f81c10cafd48..39612cfa0b4d 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c | |||
@@ -267,6 +267,7 @@ static void poodle_irda_transceiver_mode(struct device *dev, int mode) | |||
267 | } else { | 267 | } else { |
268 | GPCR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON); | 268 | GPCR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON); |
269 | } | 269 | } |
270 | pxa2xx_transceiver_mode(dev, mode); | ||
270 | } | 271 | } |
271 | 272 | ||
272 | static struct pxaficp_platform_data poodle_ficp_platform_data = { | 273 | static struct pxaficp_platform_data poodle_ficp_platform_data = { |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 4cd50e3005e9..c5b845b935bb 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -109,6 +109,52 @@ static const struct clkops clk_pxa25x_lcd_ops = { | |||
109 | .getrate = clk_pxa25x_lcd_getrate, | 109 | .getrate = clk_pxa25x_lcd_getrate, |
110 | }; | 110 | }; |
111 | 111 | ||
112 | static unsigned long gpio12_config_32k[] = { | ||
113 | GPIO12_32KHz, | ||
114 | }; | ||
115 | |||
116 | static unsigned long gpio12_config_gpio[] = { | ||
117 | GPIO12_GPIO, | ||
118 | }; | ||
119 | |||
120 | static void clk_gpio12_enable(struct clk *clk) | ||
121 | { | ||
122 | pxa2xx_mfp_config(gpio12_config_32k, 1); | ||
123 | } | ||
124 | |||
125 | static void clk_gpio12_disable(struct clk *clk) | ||
126 | { | ||
127 | pxa2xx_mfp_config(gpio12_config_gpio, 1); | ||
128 | } | ||
129 | |||
130 | static const struct clkops clk_pxa25x_gpio12_ops = { | ||
131 | .enable = clk_gpio12_enable, | ||
132 | .disable = clk_gpio12_disable, | ||
133 | }; | ||
134 | |||
135 | static unsigned long gpio11_config_3m6[] = { | ||
136 | GPIO11_3_6MHz, | ||
137 | }; | ||
138 | |||
139 | static unsigned long gpio11_config_gpio[] = { | ||
140 | GPIO11_GPIO, | ||
141 | }; | ||
142 | |||
143 | static void clk_gpio11_enable(struct clk *clk) | ||
144 | { | ||
145 | pxa2xx_mfp_config(gpio11_config_3m6, 1); | ||
146 | } | ||
147 | |||
148 | static void clk_gpio11_disable(struct clk *clk) | ||
149 | { | ||
150 | pxa2xx_mfp_config(gpio11_config_gpio, 1); | ||
151 | } | ||
152 | |||
153 | static const struct clkops clk_pxa25x_gpio11_ops = { | ||
154 | .enable = clk_gpio11_enable, | ||
155 | .disable = clk_gpio11_disable, | ||
156 | }; | ||
157 | |||
112 | /* | 158 | /* |
113 | * 3.6864MHz -> OST, GPIO, SSP, PWM, PLLs (95.842MHz, 147.456MHz) | 159 | * 3.6864MHz -> OST, GPIO, SSP, PWM, PLLs (95.842MHz, 147.456MHz) |
114 | * 95.842MHz -> MMC 19.169MHz, I2C 31.949MHz, FICP 47.923MHz, USB 47.923MHz | 160 | * 95.842MHz -> MMC 19.169MHz, I2C 31.949MHz, FICP 47.923MHz, USB 47.923MHz |
@@ -128,6 +174,8 @@ static struct clk pxa25x_clks[] = { | |||
128 | INIT_CKEN("UARTCLK", BTUART, 14745600, 1, &pxa_device_btuart.dev), | 174 | INIT_CKEN("UARTCLK", BTUART, 14745600, 1, &pxa_device_btuart.dev), |
129 | INIT_CKEN("UARTCLK", STUART, 14745600, 1, NULL), | 175 | INIT_CKEN("UARTCLK", STUART, 14745600, 1, NULL), |
130 | INIT_CKEN("UDCCLK", USB, 47923000, 5, &pxa25x_device_udc.dev), | 176 | INIT_CKEN("UDCCLK", USB, 47923000, 5, &pxa25x_device_udc.dev), |
177 | INIT_CLK("GPIO11_CLK", &clk_pxa25x_gpio11_ops, 3686400, 0, NULL), | ||
178 | INIT_CLK("GPIO12_CLK", &clk_pxa25x_gpio12_ops, 32768, 0, NULL), | ||
131 | INIT_CKEN("MMCCLK", MMC, 19169000, 0, &pxa_device_mci.dev), | 179 | INIT_CKEN("MMCCLK", MMC, 19169000, 0, &pxa_device_mci.dev), |
132 | INIT_CKEN("I2CCLK", I2C, 31949000, 0, &pxa_device_i2c.dev), | 180 | INIT_CKEN("I2CCLK", I2C, 31949000, 0, &pxa_device_i2c.dev), |
133 | 181 | ||
@@ -145,7 +193,10 @@ static struct clk pxa25x_clks[] = { | |||
145 | INIT_CKEN("FICPCLK", FICP, 47923000, 0, NULL), | 193 | INIT_CKEN("FICPCLK", FICP, 47923000, 0, NULL), |
146 | }; | 194 | }; |
147 | 195 | ||
148 | static struct clk gpio7_clk = INIT_CKOTHER("GPIO7_CK", &pxa25x_clks[4], NULL); | 196 | static struct clk pxa2xx_clk_aliases[] = { |
197 | INIT_CKOTHER("GPIO7_CLK", &pxa25x_clks[4], NULL), | ||
198 | INIT_CKOTHER("SA1111_CLK", &pxa25x_clks[5], NULL), | ||
199 | }; | ||
149 | 200 | ||
150 | #ifdef CONFIG_PM | 201 | #ifdef CONFIG_PM |
151 | 202 | ||
@@ -293,7 +344,7 @@ static int __init pxa25x_init(void) | |||
293 | int i, ret = 0; | 344 | int i, ret = 0; |
294 | 345 | ||
295 | /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ | 346 | /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ |
296 | if (cpu_is_pxa25x()) | 347 | if (cpu_is_pxa255()) |
297 | clks_register(&pxa25x_hwuart_clk, 1); | 348 | clks_register(&pxa25x_hwuart_clk, 1); |
298 | 349 | ||
299 | if (cpu_is_pxa21x() || cpu_is_pxa25x()) { | 350 | if (cpu_is_pxa21x() || cpu_is_pxa25x()) { |
@@ -317,10 +368,10 @@ static int __init pxa25x_init(void) | |||
317 | } | 368 | } |
318 | 369 | ||
319 | /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ | 370 | /* Only add HWUART for PXA255/26x; PXA210/250/27x do not have it. */ |
320 | if (cpu_is_pxa25x()) | 371 | if (cpu_is_pxa255()) |
321 | ret = platform_device_register(&pxa_device_hwuart); | 372 | ret = platform_device_register(&pxa_device_hwuart); |
322 | 373 | ||
323 | clks_register(&gpio7_clk, 1); | 374 | clks_register(pxa2xx_clk_aliases, ARRAY_SIZE(pxa2xx_clk_aliases)); |
324 | 375 | ||
325 | return ret; | 376 | return ret; |
326 | } | 377 | } |
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c index 0a0d3877f212..da92e9733886 100644 --- a/arch/arm/mach-pxa/pxa300.c +++ b/arch/arm/mach-pxa/pxa300.c | |||
@@ -15,10 +15,16 @@ | |||
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/platform_device.h> | ||
18 | 19 | ||
19 | #include <asm/hardware.h> | 20 | #include <asm/hardware.h> |
21 | #include <asm/arch/pxa3xx-regs.h> | ||
20 | #include <asm/arch/mfp-pxa300.h> | 22 | #include <asm/arch/mfp-pxa300.h> |
21 | 23 | ||
24 | #include "generic.h" | ||
25 | #include "devices.h" | ||
26 | #include "clock.h" | ||
27 | |||
22 | static struct pxa3xx_mfp_addr_map pxa300_mfp_addr_map[] __initdata = { | 28 | static struct pxa3xx_mfp_addr_map pxa300_mfp_addr_map[] __initdata = { |
23 | 29 | ||
24 | MFP_ADDR_X(GPIO0, GPIO2, 0x00b4), | 30 | MFP_ADDR_X(GPIO0, GPIO2, 0x00b4), |
@@ -79,15 +85,26 @@ static struct pxa3xx_mfp_addr_map pxa310_mfp_addr_map[] __initdata = { | |||
79 | MFP_ADDR_END, | 85 | MFP_ADDR_END, |
80 | }; | 86 | }; |
81 | 87 | ||
88 | static struct clk common_clks[] = { | ||
89 | PXA3xx_CKEN("NANDCLK", NAND, 156000000, 0, &pxa3xx_device_nand.dev), | ||
90 | }; | ||
91 | |||
92 | static struct clk pxa310_clks[] = { | ||
93 | PXA3xx_CKEN("MMCCLK", MMC3, 19500000, 0, &pxa3xx_device_mci3.dev), | ||
94 | }; | ||
95 | |||
82 | static int __init pxa300_init(void) | 96 | static int __init pxa300_init(void) |
83 | { | 97 | { |
84 | if (cpu_is_pxa300() || cpu_is_pxa310()) { | 98 | if (cpu_is_pxa300() || cpu_is_pxa310()) { |
85 | pxa3xx_init_mfp(); | 99 | pxa3xx_init_mfp(); |
86 | pxa3xx_mfp_init_addr(pxa300_mfp_addr_map); | 100 | pxa3xx_mfp_init_addr(pxa300_mfp_addr_map); |
101 | clks_register(ARRAY_AND_SIZE(common_clks)); | ||
87 | } | 102 | } |
88 | 103 | ||
89 | if (cpu_is_pxa310()) | 104 | if (cpu_is_pxa310()) { |
90 | pxa3xx_mfp_init_addr(pxa310_mfp_addr_map); | 105 | pxa3xx_mfp_init_addr(pxa310_mfp_addr_map); |
106 | clks_register(ARRAY_AND_SIZE(pxa310_clks)); | ||
107 | } | ||
91 | 108 | ||
92 | return 0; | 109 | return 0; |
93 | } | 110 | } |
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c index 74128eb8f8d0..c557c23a1efe 100644 --- a/arch/arm/mach-pxa/pxa320.c +++ b/arch/arm/mach-pxa/pxa320.c | |||
@@ -15,11 +15,17 @@ | |||
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/platform_device.h> | ||
18 | 19 | ||
19 | #include <asm/hardware.h> | 20 | #include <asm/hardware.h> |
20 | #include <asm/arch/mfp.h> | 21 | #include <asm/arch/mfp.h> |
22 | #include <asm/arch/pxa3xx-regs.h> | ||
21 | #include <asm/arch/mfp-pxa320.h> | 23 | #include <asm/arch/mfp-pxa320.h> |
22 | 24 | ||
25 | #include "generic.h" | ||
26 | #include "devices.h" | ||
27 | #include "clock.h" | ||
28 | |||
23 | static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = { | 29 | static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = { |
24 | 30 | ||
25 | MFP_ADDR_X(GPIO0, GPIO4, 0x0124), | 31 | MFP_ADDR_X(GPIO0, GPIO4, 0x0124), |
@@ -74,16 +80,17 @@ static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = { | |||
74 | MFP_ADDR_END, | 80 | MFP_ADDR_END, |
75 | }; | 81 | }; |
76 | 82 | ||
77 | static void __init pxa320_init_mfp(void) | 83 | static struct clk pxa320_clks[] = { |
78 | { | 84 | PXA3xx_CKEN("NANDCLK", NAND, 104000000, 0, &pxa3xx_device_nand.dev), |
79 | pxa3xx_init_mfp(); | 85 | }; |
80 | pxa3xx_mfp_init_addr(pxa320_mfp_addr_map); | ||
81 | } | ||
82 | 86 | ||
83 | static int __init pxa320_init(void) | 87 | static int __init pxa320_init(void) |
84 | { | 88 | { |
85 | if (cpu_is_pxa320()) | 89 | if (cpu_is_pxa320()) { |
86 | pxa320_init_mfp(); | 90 | pxa3xx_init_mfp(); |
91 | pxa3xx_mfp_init_addr(pxa320_mfp_addr_map); | ||
92 | clks_register(ARRAY_AND_SIZE(pxa320_clks)); | ||
93 | } | ||
87 | 94 | ||
88 | return 0; | 95 | return 0; |
89 | } | 96 | } |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 15685d2b8f8c..f491025a0c82 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -144,7 +144,7 @@ static unsigned long clk_pxa3xx_hsio_getrate(struct clk *clk) | |||
144 | return hsio_clk; | 144 | return hsio_clk; |
145 | } | 145 | } |
146 | 146 | ||
147 | static void clk_pxa3xx_cken_enable(struct clk *clk) | 147 | void clk_pxa3xx_cken_enable(struct clk *clk) |
148 | { | 148 | { |
149 | unsigned long mask = 1ul << (clk->cken & 0x1f); | 149 | unsigned long mask = 1ul << (clk->cken & 0x1f); |
150 | 150 | ||
@@ -154,7 +154,7 @@ static void clk_pxa3xx_cken_enable(struct clk *clk) | |||
154 | CKENB |= mask; | 154 | CKENB |= mask; |
155 | } | 155 | } |
156 | 156 | ||
157 | static void clk_pxa3xx_cken_disable(struct clk *clk) | 157 | void clk_pxa3xx_cken_disable(struct clk *clk) |
158 | { | 158 | { |
159 | unsigned long mask = 1ul << (clk->cken & 0x1f); | 159 | unsigned long mask = 1ul << (clk->cken & 0x1f); |
160 | 160 | ||
@@ -164,7 +164,7 @@ static void clk_pxa3xx_cken_disable(struct clk *clk) | |||
164 | CKENB &= ~mask; | 164 | CKENB &= ~mask; |
165 | } | 165 | } |
166 | 166 | ||
167 | static const struct clkops clk_pxa3xx_cken_ops = { | 167 | const struct clkops clk_pxa3xx_cken_ops = { |
168 | .enable = clk_pxa3xx_cken_enable, | 168 | .enable = clk_pxa3xx_cken_enable, |
169 | .disable = clk_pxa3xx_cken_disable, | 169 | .disable = clk_pxa3xx_cken_disable, |
170 | }; | 170 | }; |
@@ -196,24 +196,6 @@ static const struct clkops clk_pout_ops = { | |||
196 | .disable = clk_pout_disable, | 196 | .disable = clk_pout_disable, |
197 | }; | 197 | }; |
198 | 198 | ||
199 | #define PXA3xx_CKEN(_name, _cken, _rate, _delay, _dev) \ | ||
200 | { \ | ||
201 | .name = _name, \ | ||
202 | .dev = _dev, \ | ||
203 | .ops = &clk_pxa3xx_cken_ops, \ | ||
204 | .rate = _rate, \ | ||
205 | .cken = CKEN_##_cken, \ | ||
206 | .delay = _delay, \ | ||
207 | } | ||
208 | |||
209 | #define PXA3xx_CK(_name, _cken, _ops, _dev) \ | ||
210 | { \ | ||
211 | .name = _name, \ | ||
212 | .dev = _dev, \ | ||
213 | .ops = _ops, \ | ||
214 | .cken = CKEN_##_cken, \ | ||
215 | } | ||
216 | |||
217 | static struct clk pxa3xx_clks[] = { | 199 | static struct clk pxa3xx_clks[] = { |
218 | { | 200 | { |
219 | .name = "CLK_POUT", | 201 | .name = "CLK_POUT", |
@@ -244,7 +226,6 @@ static struct clk pxa3xx_clks[] = { | |||
244 | 226 | ||
245 | PXA3xx_CKEN("MMCCLK", MMC1, 19500000, 0, &pxa_device_mci.dev), | 227 | PXA3xx_CKEN("MMCCLK", MMC1, 19500000, 0, &pxa_device_mci.dev), |
246 | PXA3xx_CKEN("MMCCLK", MMC2, 19500000, 0, &pxa3xx_device_mci2.dev), | 228 | PXA3xx_CKEN("MMCCLK", MMC2, 19500000, 0, &pxa3xx_device_mci2.dev), |
247 | PXA3xx_CKEN("MMCCLK", MMC3, 19500000, 0, &pxa3xx_device_mci3.dev), | ||
248 | }; | 229 | }; |
249 | 230 | ||
250 | #ifdef CONFIG_PM | 231 | #ifdef CONFIG_PM |
diff --git a/arch/arm/mach-pxa/pxa930.c b/arch/arm/mach-pxa/pxa930.c new file mode 100644 index 000000000000..9503897d049c --- /dev/null +++ b/arch/arm/mach-pxa/pxa930.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-pxa/pxa930.c | ||
3 | * | ||
4 | * Code specific to PXA930 | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Marvell Internation Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/irq.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | |||
19 | #include <asm/hardware.h> | ||
20 | #include <asm/arch/mfp-pxa930.h> | ||
21 | |||
22 | static struct pxa3xx_mfp_addr_map pxa930_mfp_addr_map[] __initdata = { | ||
23 | |||
24 | MFP_ADDR(GPIO0, 0x02e0), | ||
25 | MFP_ADDR(GPIO1, 0x02dc), | ||
26 | MFP_ADDR(GPIO2, 0x02e8), | ||
27 | MFP_ADDR(GPIO3, 0x02d8), | ||
28 | MFP_ADDR(GPIO4, 0x02e4), | ||
29 | MFP_ADDR(GPIO5, 0x02ec), | ||
30 | MFP_ADDR(GPIO6, 0x02f8), | ||
31 | MFP_ADDR(GPIO7, 0x02fc), | ||
32 | MFP_ADDR(GPIO8, 0x0300), | ||
33 | MFP_ADDR(GPIO9, 0x02d4), | ||
34 | MFP_ADDR(GPIO10, 0x02f4), | ||
35 | MFP_ADDR(GPIO11, 0x02f0), | ||
36 | MFP_ADDR(GPIO12, 0x0304), | ||
37 | MFP_ADDR(GPIO13, 0x0310), | ||
38 | MFP_ADDR(GPIO14, 0x0308), | ||
39 | MFP_ADDR(GPIO15, 0x030c), | ||
40 | MFP_ADDR(GPIO16, 0x04e8), | ||
41 | MFP_ADDR(GPIO17, 0x04f4), | ||
42 | MFP_ADDR(GPIO18, 0x04f8), | ||
43 | MFP_ADDR(GPIO19, 0x04fc), | ||
44 | MFP_ADDR(GPIO20, 0x0518), | ||
45 | MFP_ADDR(GPIO21, 0x051c), | ||
46 | MFP_ADDR(GPIO22, 0x04ec), | ||
47 | MFP_ADDR(GPIO23, 0x0500), | ||
48 | MFP_ADDR(GPIO24, 0x04f0), | ||
49 | MFP_ADDR(GPIO25, 0x0504), | ||
50 | MFP_ADDR(GPIO26, 0x0510), | ||
51 | MFP_ADDR(GPIO27, 0x0514), | ||
52 | MFP_ADDR(GPIO28, 0x0520), | ||
53 | MFP_ADDR(GPIO29, 0x0600), | ||
54 | MFP_ADDR(GPIO30, 0x0618), | ||
55 | MFP_ADDR(GPIO31, 0x0610), | ||
56 | MFP_ADDR(GPIO32, 0x060c), | ||
57 | MFP_ADDR(GPIO33, 0x061c), | ||
58 | MFP_ADDR(GPIO34, 0x0620), | ||
59 | MFP_ADDR(GPIO35, 0x0628), | ||
60 | MFP_ADDR(GPIO36, 0x062c), | ||
61 | MFP_ADDR(GPIO37, 0x0630), | ||
62 | MFP_ADDR(GPIO38, 0x0634), | ||
63 | MFP_ADDR(GPIO39, 0x0638), | ||
64 | MFP_ADDR(GPIO40, 0x063c), | ||
65 | MFP_ADDR(GPIO41, 0x0614), | ||
66 | MFP_ADDR(GPIO42, 0x0624), | ||
67 | MFP_ADDR(GPIO43, 0x0608), | ||
68 | MFP_ADDR(GPIO44, 0x0604), | ||
69 | MFP_ADDR(GPIO45, 0x050c), | ||
70 | MFP_ADDR(GPIO46, 0x0508), | ||
71 | MFP_ADDR(GPIO47, 0x02bc), | ||
72 | MFP_ADDR(GPIO48, 0x02b4), | ||
73 | MFP_ADDR(GPIO49, 0x02b8), | ||
74 | MFP_ADDR(GPIO50, 0x02c8), | ||
75 | MFP_ADDR(GPIO51, 0x02c0), | ||
76 | MFP_ADDR(GPIO52, 0x02c4), | ||
77 | MFP_ADDR(GPIO53, 0x02d0), | ||
78 | MFP_ADDR(GPIO54, 0x02cc), | ||
79 | MFP_ADDR(GPIO55, 0x029c), | ||
80 | MFP_ADDR(GPIO56, 0x02a0), | ||
81 | MFP_ADDR(GPIO57, 0x0294), | ||
82 | MFP_ADDR(GPIO58, 0x0298), | ||
83 | MFP_ADDR(GPIO59, 0x02a4), | ||
84 | MFP_ADDR(GPIO60, 0x02a8), | ||
85 | MFP_ADDR(GPIO61, 0x02b0), | ||
86 | MFP_ADDR(GPIO62, 0x02ac), | ||
87 | MFP_ADDR(GPIO63, 0x0640), | ||
88 | MFP_ADDR(GPIO64, 0x065c), | ||
89 | MFP_ADDR(GPIO65, 0x0648), | ||
90 | MFP_ADDR(GPIO66, 0x0644), | ||
91 | MFP_ADDR(GPIO67, 0x0674), | ||
92 | MFP_ADDR(GPIO68, 0x0658), | ||
93 | MFP_ADDR(GPIO69, 0x0654), | ||
94 | MFP_ADDR(GPIO70, 0x0660), | ||
95 | MFP_ADDR(GPIO71, 0x0668), | ||
96 | MFP_ADDR(GPIO72, 0x0664), | ||
97 | MFP_ADDR(GPIO73, 0x0650), | ||
98 | MFP_ADDR(GPIO74, 0x066c), | ||
99 | MFP_ADDR(GPIO75, 0x064c), | ||
100 | MFP_ADDR(GPIO76, 0x0670), | ||
101 | MFP_ADDR(GPIO77, 0x0678), | ||
102 | MFP_ADDR(GPIO78, 0x067c), | ||
103 | MFP_ADDR(GPIO79, 0x0694), | ||
104 | MFP_ADDR(GPIO80, 0x069c), | ||
105 | MFP_ADDR(GPIO81, 0x06a0), | ||
106 | MFP_ADDR(GPIO82, 0x06a4), | ||
107 | MFP_ADDR(GPIO83, 0x0698), | ||
108 | MFP_ADDR(GPIO84, 0x06bc), | ||
109 | MFP_ADDR(GPIO85, 0x06b4), | ||
110 | MFP_ADDR(GPIO86, 0x06b0), | ||
111 | MFP_ADDR(GPIO87, 0x06c0), | ||
112 | MFP_ADDR(GPIO88, 0x06c4), | ||
113 | MFP_ADDR(GPIO89, 0x06ac), | ||
114 | MFP_ADDR(GPIO90, 0x0680), | ||
115 | MFP_ADDR(GPIO91, 0x0684), | ||
116 | MFP_ADDR(GPIO92, 0x0688), | ||
117 | MFP_ADDR(GPIO93, 0x0690), | ||
118 | MFP_ADDR(GPIO94, 0x068c), | ||
119 | MFP_ADDR(GPIO95, 0x06a8), | ||
120 | MFP_ADDR(GPIO96, 0x06b8), | ||
121 | MFP_ADDR(GPIO97, 0x0410), | ||
122 | MFP_ADDR(GPIO98, 0x0418), | ||
123 | MFP_ADDR(GPIO99, 0x041c), | ||
124 | MFP_ADDR(GPIO100, 0x0414), | ||
125 | MFP_ADDR(GPIO101, 0x0408), | ||
126 | MFP_ADDR(GPIO102, 0x0324), | ||
127 | MFP_ADDR(GPIO103, 0x040c), | ||
128 | MFP_ADDR(GPIO104, 0x0400), | ||
129 | MFP_ADDR(GPIO105, 0x0328), | ||
130 | MFP_ADDR(GPIO106, 0x0404), | ||
131 | |||
132 | MFP_ADDR(nXCVREN, 0x0204), | ||
133 | MFP_ADDR(DF_CLE_nOE, 0x020c), | ||
134 | MFP_ADDR(DF_nADV1_ALE, 0x0218), | ||
135 | MFP_ADDR(DF_SCLK_E, 0x0214), | ||
136 | MFP_ADDR(DF_SCLK_S, 0x0210), | ||
137 | MFP_ADDR(nBE0, 0x021c), | ||
138 | MFP_ADDR(nBE1, 0x0220), | ||
139 | MFP_ADDR(DF_nADV2_ALE, 0x0224), | ||
140 | MFP_ADDR(DF_INT_RnB, 0x0228), | ||
141 | MFP_ADDR(DF_nCS0, 0x022c), | ||
142 | MFP_ADDR(DF_nCS1, 0x0230), | ||
143 | MFP_ADDR(nLUA, 0x0254), | ||
144 | MFP_ADDR(nLLA, 0x0258), | ||
145 | MFP_ADDR(DF_nWE, 0x0234), | ||
146 | MFP_ADDR(DF_nRE_nOE, 0x0238), | ||
147 | MFP_ADDR(DF_ADDR0, 0x024c), | ||
148 | MFP_ADDR(DF_ADDR1, 0x0250), | ||
149 | MFP_ADDR(DF_ADDR2, 0x025c), | ||
150 | MFP_ADDR(DF_ADDR3, 0x0260), | ||
151 | MFP_ADDR(DF_IO0, 0x023c), | ||
152 | MFP_ADDR(DF_IO1, 0x0240), | ||
153 | MFP_ADDR(DF_IO2, 0x0244), | ||
154 | MFP_ADDR(DF_IO3, 0x0248), | ||
155 | MFP_ADDR(DF_IO4, 0x0264), | ||
156 | MFP_ADDR(DF_IO5, 0x0268), | ||
157 | MFP_ADDR(DF_IO6, 0x026c), | ||
158 | MFP_ADDR(DF_IO7, 0x0270), | ||
159 | MFP_ADDR(DF_IO8, 0x0274), | ||
160 | MFP_ADDR(DF_IO9, 0x0278), | ||
161 | MFP_ADDR(DF_IO10, 0x027c), | ||
162 | MFP_ADDR(DF_IO11, 0x0280), | ||
163 | MFP_ADDR(DF_IO12, 0x0284), | ||
164 | MFP_ADDR(DF_IO13, 0x0288), | ||
165 | MFP_ADDR(DF_IO14, 0x028c), | ||
166 | MFP_ADDR(DF_IO15, 0x0290), | ||
167 | |||
168 | MFP_ADDR(GSIM_UIO, 0x0314), | ||
169 | MFP_ADDR(GSIM_UCLK, 0x0318), | ||
170 | MFP_ADDR(GSIM_UDET, 0x031c), | ||
171 | MFP_ADDR(GSIM_nURST, 0x0320), | ||
172 | |||
173 | MFP_ADDR(PMIC_INT, 0x06c8), | ||
174 | |||
175 | MFP_ADDR(RDY, 0x0200), | ||
176 | |||
177 | MFP_ADDR_END, | ||
178 | }; | ||
179 | |||
180 | static int __init pxa930_init(void) | ||
181 | { | ||
182 | if (cpu_is_pxa930()) { | ||
183 | pxa3xx_init_mfp(); | ||
184 | pxa3xx_mfp_init_addr(pxa930_mfp_addr_map); | ||
185 | } | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | core_initcall(pxa930_init); | ||
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c new file mode 100644 index 000000000000..9d39dea57ce2 --- /dev/null +++ b/arch/arm/mach-pxa/reset.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/gpio.h> | ||
10 | #include <asm/io.h> | ||
11 | #include <asm/proc-fns.h> | ||
12 | |||
13 | #include <asm/arch/pxa-regs.h> | ||
14 | #include <asm/arch/pxa2xx-regs.h> | ||
15 | |||
16 | static void do_hw_reset(void); | ||
17 | |||
18 | static int reset_gpio = -1; | ||
19 | |||
20 | int init_gpio_reset(int gpio) | ||
21 | { | ||
22 | int rc; | ||
23 | |||
24 | rc = gpio_request(gpio, "reset generator"); | ||
25 | if (rc) { | ||
26 | printk(KERN_ERR "Can't request reset_gpio\n"); | ||
27 | goto out; | ||
28 | } | ||
29 | |||
30 | rc = gpio_direction_input(gpio); | ||
31 | if (rc) { | ||
32 | printk(KERN_ERR "Can't configure reset_gpio for input\n"); | ||
33 | gpio_free(gpio); | ||
34 | goto out; | ||
35 | } | ||
36 | |||
37 | out: | ||
38 | if (!rc) | ||
39 | reset_gpio = gpio; | ||
40 | |||
41 | return rc; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Trigger GPIO reset. | ||
46 | * This covers various types of logic connecting gpio pin | ||
47 | * to RESET pins (nRESET or GPIO_RESET): | ||
48 | */ | ||
49 | static void do_gpio_reset(void) | ||
50 | { | ||
51 | BUG_ON(reset_gpio == -1); | ||
52 | |||
53 | /* drive it low */ | ||
54 | gpio_direction_output(reset_gpio, 0); | ||
55 | mdelay(2); | ||
56 | /* rising edge or drive high */ | ||
57 | gpio_set_value(reset_gpio, 1); | ||
58 | mdelay(2); | ||
59 | /* falling edge */ | ||
60 | gpio_set_value(reset_gpio, 0); | ||
61 | |||
62 | /* give it some time */ | ||
63 | mdelay(10); | ||
64 | |||
65 | WARN_ON(1); | ||
66 | /* fallback */ | ||
67 | do_hw_reset(); | ||
68 | } | ||
69 | |||
70 | static void do_hw_reset(void) | ||
71 | { | ||
72 | /* Initialize the watchdog and let it fire */ | ||
73 | OWER = OWER_WME; | ||
74 | OSSR = OSSR_M3; | ||
75 | OSMR3 = OSCR + 368640; /* ... in 100 ms */ | ||
76 | } | ||
77 | |||
78 | void arch_reset(char mode) | ||
79 | { | ||
80 | if (cpu_is_pxa2xx()) | ||
81 | RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; | ||
82 | |||
83 | switch (mode) { | ||
84 | case 's': | ||
85 | /* Jump into ROM at address 0 */ | ||
86 | cpu_reset(0); | ||
87 | break; | ||
88 | case 'h': | ||
89 | do_hw_reset(); | ||
90 | break; | ||
91 | case 'g': | ||
92 | do_gpio_reset(); | ||
93 | break; | ||
94 | } | ||
95 | } | ||
96 | |||
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c new file mode 100644 index 000000000000..d02bc6f8bb93 --- /dev/null +++ b/arch/arm/mach-pxa/saar.c | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-pxa/saar.c | ||
3 | * | ||
4 | * Support for the Marvell PXA930 Handheld Platform (aka SAAR) | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Marvell International Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * publishhed by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/gpio.h> | ||
20 | #include <linux/smc91x.h> | ||
21 | |||
22 | #include <asm/mach-types.h> | ||
23 | #include <asm/mach/arch.h> | ||
24 | #include <asm/hardware.h> | ||
25 | #include <asm/arch/pxa3xx-regs.h> | ||
26 | #include <asm/arch/mfp-pxa930.h> | ||
27 | |||
28 | #include "devices.h" | ||
29 | #include "generic.h" | ||
30 | |||
31 | /* SAAR MFP configurations */ | ||
32 | static mfp_cfg_t saar_mfp_cfg[] __initdata = { | ||
33 | /* Ethernet */ | ||
34 | DF_nCS1_nCS3, | ||
35 | GPIO97_GPIO, | ||
36 | }; | ||
37 | |||
38 | #define SAAR_ETH_PHYS (0x14000000) | ||
39 | |||
40 | static struct resource smc91x_resources[] = { | ||
41 | [0] = { | ||
42 | .start = (SAAR_ETH_PHYS + 0x300), | ||
43 | .end = (SAAR_ETH_PHYS + 0xfffff), | ||
44 | .flags = IORESOURCE_MEM, | ||
45 | }, | ||
46 | [1] = { | ||
47 | .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO97)), | ||
48 | .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO97)), | ||
49 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, | ||
50 | } | ||
51 | }; | ||
52 | |||
53 | static struct smc91x_platdata saar_smc91x_info = { | ||
54 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, | ||
55 | }; | ||
56 | |||
57 | static struct platform_device smc91x_device = { | ||
58 | .name = "smc91x", | ||
59 | .id = 0, | ||
60 | .num_resources = ARRAY_SIZE(smc91x_resources), | ||
61 | .resource = smc91x_resources, | ||
62 | .dev = { | ||
63 | .platform_data = &saar_smc91x_info, | ||
64 | }, | ||
65 | }; | ||
66 | |||
67 | static void __init saar_init(void) | ||
68 | { | ||
69 | /* initialize MFP configurations */ | ||
70 | pxa3xx_mfp_config(ARRAY_AND_SIZE(saar_mfp_cfg)); | ||
71 | |||
72 | platform_device_register(&smc91x_device); | ||
73 | } | ||
74 | |||
75 | MACHINE_START(SAAR, "PXA930 Handheld Platform (aka SAAR)") | ||
76 | /* Maintainer: Eric Miao <eric.miao@marvell.com> */ | ||
77 | .phys_io = 0x40000000, | ||
78 | .boot_params = 0xa0000100, | ||
79 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
80 | .map_io = pxa_map_io, | ||
81 | .init_irq = pxa3xx_init_irq, | ||
82 | .timer = &pxa_timer, | ||
83 | .init_machine = saar_init, | ||
84 | MACHINE_END | ||
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index e7d0fcd9b43f..762249c03ded 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/arch/pxa-regs.h> | 38 | #include <asm/arch/pxa-regs.h> |
39 | #include <asm/arch/pxa2xx-regs.h> | 39 | #include <asm/arch/pxa2xx-regs.h> |
40 | #include <asm/arch/pxa2xx-gpio.h> | 40 | #include <asm/arch/pxa2xx-gpio.h> |
41 | #include <asm/arch/pxa27x-udc.h> | ||
41 | #include <asm/arch/irda.h> | 42 | #include <asm/arch/irda.h> |
42 | #include <asm/arch/mmc.h> | 43 | #include <asm/arch/mmc.h> |
43 | #include <asm/arch/ohci.h> | 44 | #include <asm/arch/ohci.h> |
@@ -450,6 +451,7 @@ static void spitz_irda_transceiver_mode(struct device *dev, int mode) | |||
450 | set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON); | 451 | set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON); |
451 | else | 452 | else |
452 | reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON); | 453 | reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON); |
454 | pxa2xx_transceiver_mode(dev, mode); | ||
453 | } | 455 | } |
454 | 456 | ||
455 | #ifdef CONFIG_MACH_AKITA | 457 | #ifdef CONFIG_MACH_AKITA |
@@ -459,6 +461,7 @@ static void akita_irda_transceiver_mode(struct device *dev, int mode) | |||
459 | akita_set_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON); | 461 | akita_set_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON); |
460 | else | 462 | else |
461 | akita_reset_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON); | 463 | akita_reset_ioexp(&akitaioexp_device.dev, AKITA_IOEXP_IR_ON); |
464 | pxa2xx_transceiver_mode(dev, mode); | ||
462 | } | 465 | } |
463 | #endif | 466 | #endif |
464 | 467 | ||
@@ -529,11 +532,7 @@ static struct platform_device *devices[] __initdata = { | |||
529 | 532 | ||
530 | static void spitz_poweroff(void) | 533 | static void spitz_poweroff(void) |
531 | { | 534 | { |
532 | pxa_gpio_mode(SPITZ_GPIO_ON_RESET | GPIO_OUT); | 535 | arm_machine_restart('g'); |
533 | GPSR(SPITZ_GPIO_ON_RESET) = GPIO_bit(SPITZ_GPIO_ON_RESET); | ||
534 | |||
535 | mdelay(1000); | ||
536 | arm_machine_restart('h'); | ||
537 | } | 536 | } |
538 | 537 | ||
539 | static void spitz_restart(char mode) | 538 | static void spitz_restart(char mode) |
@@ -547,6 +546,7 @@ static void spitz_restart(char mode) | |||
547 | 546 | ||
548 | static void __init common_init(void) | 547 | static void __init common_init(void) |
549 | { | 548 | { |
549 | init_gpio_reset(SPITZ_GPIO_ON_RESET); | ||
550 | pm_power_off = spitz_poweroff; | 550 | pm_power_off = spitz_poweroff; |
551 | arm_pm_restart = spitz_restart; | 551 | arm_pm_restart = spitz_restart; |
552 | 552 | ||
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c index 0bb31982fb6f..89f38683787e 100644 --- a/arch/arm/mach-pxa/ssp.c +++ b/arch/arm/mach-pxa/ssp.c | |||
@@ -14,13 +14,6 @@ | |||
14 | * IO-based SSP applications and allows easy port setup for DMA access. | 14 | * IO-based SSP applications and allows easy port setup for DMA access. |
15 | * | 15 | * |
16 | * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com> | 16 | * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com> |
17 | * | ||
18 | * Revision history: | ||
19 | * 22nd Aug 2003 Initial version. | ||
20 | * 20th Dec 2004 Added ssp_config for changing port config without | ||
21 | * closing the port. | ||
22 | * 4th Aug 2005 Added option to disable irq handler registration and | ||
23 | * cleaned up irq and clock detection. | ||
24 | */ | 17 | */ |
25 | 18 | ||
26 | #include <linux/module.h> | 19 | #include <linux/module.h> |
@@ -285,7 +278,7 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags) | |||
285 | goto out_region; | 278 | goto out_region; |
286 | dev->irq = ssp->irq; | 279 | dev->irq = ssp->irq; |
287 | } else | 280 | } else |
288 | dev->irq = 0; | 281 | dev->irq = NO_IRQ; |
289 | 282 | ||
290 | /* turn on SSP port clock */ | 283 | /* turn on SSP port clock */ |
291 | clk_enable(ssp->clk); | 284 | clk_enable(ssp->clk); |
@@ -306,7 +299,8 @@ void ssp_exit(struct ssp_dev *dev) | |||
306 | struct ssp_device *ssp = dev->ssp; | 299 | struct ssp_device *ssp = dev->ssp; |
307 | 300 | ||
308 | ssp_disable(dev); | 301 | ssp_disable(dev); |
309 | free_irq(dev->irq, dev); | 302 | if (dev->irq != NO_IRQ) |
303 | free_irq(dev->irq, dev); | ||
310 | clk_disable(ssp->clk); | 304 | clk_disable(ssp->clk); |
311 | ssp_free(ssp); | 305 | ssp_free(ssp); |
312 | } | 306 | } |
@@ -360,6 +354,7 @@ static int __devinit ssp_probe(struct platform_device *pdev, int type) | |||
360 | dev_err(&pdev->dev, "failed to allocate memory"); | 354 | dev_err(&pdev->dev, "failed to allocate memory"); |
361 | return -ENOMEM; | 355 | return -ENOMEM; |
362 | } | 356 | } |
357 | ssp->pdev = pdev; | ||
363 | 358 | ||
364 | ssp->clk = clk_get(&pdev->dev, "SSPCLK"); | 359 | ssp->clk = clk_get(&pdev->dev, "SSPCLK"); |
365 | if (IS_ERR(ssp->clk)) { | 360 | if (IS_ERR(ssp->clk)) { |
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c new file mode 100644 index 000000000000..ac283507e423 --- /dev/null +++ b/arch/arm/mach-pxa/tavorevb.c | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-pxa/tavorevb.c | ||
3 | * | ||
4 | * Support for the Marvell PXA930 Evaluation Board | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Marvell International Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * publishhed by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/gpio.h> | ||
20 | #include <linux/smc91x.h> | ||
21 | |||
22 | #include <asm/mach-types.h> | ||
23 | #include <asm/mach/arch.h> | ||
24 | #include <asm/hardware.h> | ||
25 | #include <asm/arch/pxa3xx-regs.h> | ||
26 | #include <asm/arch/mfp-pxa930.h> | ||
27 | |||
28 | #include "devices.h" | ||
29 | #include "generic.h" | ||
30 | |||
31 | /* Tavor EVB MFP configurations */ | ||
32 | static mfp_cfg_t tavorevb_mfp_cfg[] __initdata = { | ||
33 | /* Ethernet */ | ||
34 | DF_nCS1_nCS3, | ||
35 | GPIO47_GPIO, | ||
36 | }; | ||
37 | |||
38 | #define TAVOREVB_ETH_PHYS (0x14000000) | ||
39 | |||
40 | static struct resource smc91x_resources[] = { | ||
41 | [0] = { | ||
42 | .start = (TAVOREVB_ETH_PHYS + 0x300), | ||
43 | .end = (TAVOREVB_ETH_PHYS + 0xfffff), | ||
44 | .flags = IORESOURCE_MEM, | ||
45 | }, | ||
46 | [1] = { | ||
47 | .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)), | ||
48 | .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)), | ||
49 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, | ||
50 | } | ||
51 | }; | ||
52 | |||
53 | static struct smc91x_platdata tavorevb_smc91x_info = { | ||
54 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, | ||
55 | }; | ||
56 | |||
57 | static struct platform_device smc91x_device = { | ||
58 | .name = "smc91x", | ||
59 | .id = 0, | ||
60 | .num_resources = ARRAY_SIZE(smc91x_resources), | ||
61 | .resource = smc91x_resources, | ||
62 | .dev = { | ||
63 | .platform_data = &tavorevb_smc91x_info, | ||
64 | }, | ||
65 | }; | ||
66 | |||
67 | static void __init tavorevb_init(void) | ||
68 | { | ||
69 | /* initialize MFP configurations */ | ||
70 | pxa3xx_mfp_config(ARRAY_AND_SIZE(tavorevb_mfp_cfg)); | ||
71 | |||
72 | platform_device_register(&smc91x_device); | ||
73 | } | ||
74 | |||
75 | MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)") | ||
76 | /* Maintainer: Eric Miao <eric.miao@marvell.com> */ | ||
77 | .phys_io = 0x40000000, | ||
78 | .boot_params = 0xa0000100, | ||
79 | .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, | ||
80 | .map_io = pxa_map_io, | ||
81 | .init_irq = pxa3xx_init_irq, | ||
82 | .timer = &pxa_timer, | ||
83 | .init_machine = tavorevb_init, | ||
84 | MACHINE_END | ||
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c new file mode 100644 index 000000000000..7d8505466e54 --- /dev/null +++ b/arch/arm/mach-pxa/tosa-bt.c | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * Bluetooth built-in chip control | ||
3 | * | ||
4 | * Copyright (c) 2008 Dmitry Baryshkov | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/gpio.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/rfkill.h> | ||
18 | |||
19 | #include <asm/arch/tosa_bt.h> | ||
20 | |||
21 | static void tosa_bt_on(struct tosa_bt_data *data) | ||
22 | { | ||
23 | gpio_set_value(data->gpio_reset, 0); | ||
24 | gpio_set_value(data->gpio_pwr, 1); | ||
25 | gpio_set_value(data->gpio_reset, 1); | ||
26 | mdelay(20); | ||
27 | gpio_set_value(data->gpio_reset, 0); | ||
28 | } | ||
29 | |||
30 | static void tosa_bt_off(struct tosa_bt_data *data) | ||
31 | { | ||
32 | gpio_set_value(data->gpio_reset, 1); | ||
33 | mdelay(10); | ||
34 | gpio_set_value(data->gpio_pwr, 0); | ||
35 | gpio_set_value(data->gpio_reset, 0); | ||
36 | } | ||
37 | |||
38 | static int tosa_bt_toggle_radio(void *data, enum rfkill_state state) | ||
39 | { | ||
40 | pr_info("BT_RADIO going: %s\n", | ||
41 | state == RFKILL_STATE_ON ? "on" : "off"); | ||
42 | |||
43 | if (state == RFKILL_STATE_ON) { | ||
44 | pr_info("TOSA_BT: going ON\n"); | ||
45 | tosa_bt_on(data); | ||
46 | } else { | ||
47 | pr_info("TOSA_BT: going OFF\n"); | ||
48 | tosa_bt_off(data); | ||
49 | } | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static int tosa_bt_probe(struct platform_device *dev) | ||
54 | { | ||
55 | int rc; | ||
56 | struct rfkill *rfk; | ||
57 | |||
58 | struct tosa_bt_data *data = dev->dev.platform_data; | ||
59 | |||
60 | rc = gpio_request(data->gpio_reset, "Bluetooth reset"); | ||
61 | if (rc) | ||
62 | goto err_reset; | ||
63 | rc = gpio_direction_output(data->gpio_reset, 0); | ||
64 | if (rc) | ||
65 | goto err_reset_dir; | ||
66 | rc = gpio_request(data->gpio_pwr, "Bluetooth power"); | ||
67 | if (rc) | ||
68 | goto err_pwr; | ||
69 | rc = gpio_direction_output(data->gpio_pwr, 0); | ||
70 | if (rc) | ||
71 | goto err_pwr_dir; | ||
72 | |||
73 | rfk = rfkill_allocate(&dev->dev, RFKILL_TYPE_BLUETOOTH); | ||
74 | if (!rfk) { | ||
75 | rc = -ENOMEM; | ||
76 | goto err_rfk_alloc; | ||
77 | } | ||
78 | |||
79 | rfk->name = "tosa-bt"; | ||
80 | rfk->toggle_radio = tosa_bt_toggle_radio; | ||
81 | rfk->data = data; | ||
82 | #ifdef CONFIG_RFKILL_LEDS | ||
83 | rfk->led_trigger.name = "tosa-bt"; | ||
84 | #endif | ||
85 | |||
86 | rc = rfkill_register(rfk); | ||
87 | if (rc) | ||
88 | goto err_rfkill; | ||
89 | |||
90 | platform_set_drvdata(dev, rfk); | ||
91 | |||
92 | return 0; | ||
93 | |||
94 | err_rfkill: | ||
95 | if (rfk) | ||
96 | rfkill_free(rfk); | ||
97 | rfk = NULL; | ||
98 | err_rfk_alloc: | ||
99 | tosa_bt_off(data); | ||
100 | err_pwr_dir: | ||
101 | gpio_free(data->gpio_pwr); | ||
102 | err_pwr: | ||
103 | err_reset_dir: | ||
104 | gpio_free(data->gpio_reset); | ||
105 | err_reset: | ||
106 | return rc; | ||
107 | } | ||
108 | |||
109 | static int __devexit tosa_bt_remove(struct platform_device *dev) | ||
110 | { | ||
111 | struct tosa_bt_data *data = dev->dev.platform_data; | ||
112 | struct rfkill *rfk = platform_get_drvdata(dev); | ||
113 | |||
114 | platform_set_drvdata(dev, NULL); | ||
115 | |||
116 | if (rfk) | ||
117 | rfkill_unregister(rfk); | ||
118 | rfk = NULL; | ||
119 | |||
120 | tosa_bt_off(data); | ||
121 | |||
122 | gpio_free(data->gpio_pwr); | ||
123 | gpio_free(data->gpio_reset); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static struct platform_driver tosa_bt_driver = { | ||
129 | .probe = tosa_bt_probe, | ||
130 | .remove = __devexit_p(tosa_bt_remove), | ||
131 | |||
132 | .driver = { | ||
133 | .name = "tosa-bt", | ||
134 | .owner = THIS_MODULE, | ||
135 | }, | ||
136 | }; | ||
137 | |||
138 | |||
139 | static int __init tosa_bt_init(void) | ||
140 | { | ||
141 | return platform_driver_register(&tosa_bt_driver); | ||
142 | } | ||
143 | |||
144 | static void __exit tosa_bt_exit(void) | ||
145 | { | ||
146 | platform_driver_unregister(&tosa_bt_driver); | ||
147 | } | ||
148 | |||
149 | module_init(tosa_bt_init); | ||
150 | module_exit(tosa_bt_exit); | ||
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index ab4a9f579913..fea17ce6b55f 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
@@ -18,30 +18,31 @@ | |||
18 | #include <linux/major.h> | 18 | #include <linux/major.h> |
19 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/delay.h> | ||
22 | #include <linux/fb.h> | ||
21 | #include <linux/mmc/host.h> | 23 | #include <linux/mmc/host.h> |
24 | #include <linux/mfd/tc6393xb.h> | ||
25 | #include <linux/mfd/tmio.h> | ||
26 | #include <linux/mtd/nand.h> | ||
27 | #include <linux/mtd/partitions.h> | ||
22 | #include <linux/pm.h> | 28 | #include <linux/pm.h> |
23 | #include <linux/delay.h> | ||
24 | #include <linux/gpio_keys.h> | 29 | #include <linux/gpio_keys.h> |
25 | #include <linux/input.h> | 30 | #include <linux/input.h> |
26 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/pda_power.h> | ||
33 | #include <linux/rfkill.h> | ||
27 | 34 | ||
28 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
29 | #include <asm/memory.h> | ||
30 | #include <asm/mach-types.h> | 36 | #include <asm/mach-types.h> |
31 | #include <asm/hardware.h> | ||
32 | #include <asm/irq.h> | ||
33 | #include <asm/system.h> | ||
34 | #include <asm/arch/pxa-regs.h> | ||
35 | #include <asm/arch/pxa2xx-regs.h> | 37 | #include <asm/arch/pxa2xx-regs.h> |
36 | #include <asm/arch/mfp-pxa25x.h> | 38 | #include <asm/arch/mfp-pxa25x.h> |
37 | #include <asm/arch/irda.h> | 39 | #include <asm/arch/irda.h> |
38 | #include <asm/arch/i2c.h> | 40 | #include <asm/arch/i2c.h> |
39 | #include <asm/arch/mmc.h> | 41 | #include <asm/arch/mmc.h> |
40 | #include <asm/arch/udc.h> | 42 | #include <asm/arch/udc.h> |
43 | #include <asm/arch/tosa_bt.h> | ||
41 | 44 | ||
42 | #include <asm/mach/arch.h> | 45 | #include <asm/mach/arch.h> |
43 | #include <asm/mach/map.h> | ||
44 | #include <asm/mach/irq.h> | ||
45 | #include <asm/arch/tosa.h> | 46 | #include <asm/arch/tosa.h> |
46 | 47 | ||
47 | #include <asm/hardware/scoop.h> | 48 | #include <asm/hardware/scoop.h> |
@@ -86,7 +87,7 @@ static unsigned long tosa_pin_config[] = { | |||
86 | GPIO6_MMC_CLK, | 87 | GPIO6_MMC_CLK, |
87 | GPIO8_MMC_CS0, | 88 | GPIO8_MMC_CS0, |
88 | GPIO9_GPIO, /* Detect */ | 89 | GPIO9_GPIO, /* Detect */ |
89 | // GPIO10 nSD_INT | 90 | GPIO10_GPIO, /* nSD_INT */ |
90 | 91 | ||
91 | /* CF */ | 92 | /* CF */ |
92 | GPIO13_GPIO, /* CD_IRQ */ | 93 | GPIO13_GPIO, /* CD_IRQ */ |
@@ -124,34 +125,34 @@ static unsigned long tosa_pin_config[] = { | |||
124 | GPIO44_BTUART_CTS, | 125 | GPIO44_BTUART_CTS, |
125 | GPIO45_BTUART_RTS, | 126 | GPIO45_BTUART_RTS, |
126 | 127 | ||
127 | /* IrDA */ | ||
128 | GPIO46_STUART_RXD, | ||
129 | GPIO47_STUART_TXD, | ||
130 | |||
131 | /* Keybd */ | 128 | /* Keybd */ |
132 | GPIO58_GPIO, | 129 | GPIO58_GPIO | MFP_LPM_DRIVE_LOW, |
133 | GPIO59_GPIO, | 130 | GPIO59_GPIO | MFP_LPM_DRIVE_LOW, |
134 | GPIO60_GPIO, | 131 | GPIO60_GPIO | MFP_LPM_DRIVE_LOW, |
135 | GPIO61_GPIO, | 132 | GPIO61_GPIO | MFP_LPM_DRIVE_LOW, |
136 | GPIO62_GPIO, | 133 | GPIO62_GPIO | MFP_LPM_DRIVE_LOW, |
137 | GPIO63_GPIO, | 134 | GPIO63_GPIO | MFP_LPM_DRIVE_LOW, |
138 | GPIO64_GPIO, | 135 | GPIO64_GPIO | MFP_LPM_DRIVE_LOW, |
139 | GPIO65_GPIO, | 136 | GPIO65_GPIO | MFP_LPM_DRIVE_LOW, |
140 | GPIO66_GPIO, | 137 | GPIO66_GPIO | MFP_LPM_DRIVE_LOW, |
141 | GPIO67_GPIO, | 138 | GPIO67_GPIO | MFP_LPM_DRIVE_LOW, |
142 | GPIO68_GPIO, | 139 | GPIO68_GPIO | MFP_LPM_DRIVE_LOW, |
143 | GPIO69_GPIO, | 140 | GPIO69_GPIO | MFP_LPM_DRIVE_LOW, |
144 | GPIO70_GPIO, | 141 | GPIO70_GPIO | MFP_LPM_DRIVE_LOW, |
145 | GPIO71_GPIO, | 142 | GPIO71_GPIO | MFP_LPM_DRIVE_LOW, |
146 | GPIO72_GPIO, | 143 | GPIO72_GPIO | MFP_LPM_DRIVE_LOW, |
147 | GPIO73_GPIO, | 144 | GPIO73_GPIO | MFP_LPM_DRIVE_LOW, |
148 | GPIO74_GPIO, | 145 | GPIO74_GPIO | MFP_LPM_DRIVE_LOW, |
149 | GPIO75_GPIO, | 146 | GPIO75_GPIO | MFP_LPM_DRIVE_LOW, |
150 | 147 | ||
151 | /* SPI */ | 148 | /* SPI */ |
152 | GPIO81_SSP2_CLK_OUT, | 149 | GPIO81_SSP2_CLK_OUT, |
153 | GPIO82_SSP2_FRM_OUT, | 150 | GPIO82_SSP2_FRM_OUT, |
154 | GPIO83_SSP2_TXD, | 151 | GPIO83_SSP2_TXD, |
152 | |||
153 | /* IrDA is managed in other way */ | ||
154 | GPIO46_GPIO, | ||
155 | GPIO47_GPIO, | ||
155 | }; | 156 | }; |
156 | 157 | ||
157 | /* | 158 | /* |
@@ -249,6 +250,15 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void | |||
249 | 250 | ||
250 | tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250); | 251 | tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250); |
251 | 252 | ||
253 | err = gpio_request(TOSA_GPIO_nSD_DETECT, "MMC/SD card detect"); | ||
254 | if (err) { | ||
255 | printk(KERN_ERR "tosa_mci_init: can't request nSD_DETECT gpio\n"); | ||
256 | goto err_gpio_detect; | ||
257 | } | ||
258 | err = gpio_direction_input(TOSA_GPIO_nSD_DETECT); | ||
259 | if (err) | ||
260 | goto err_gpio_detect_dir; | ||
261 | |||
252 | err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int, | 262 | err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int, |
253 | IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | 263 | IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
254 | "MMC/SD card detect", data); | 264 | "MMC/SD card detect", data); |
@@ -257,7 +267,7 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void | |||
257 | goto err_irq; | 267 | goto err_irq; |
258 | } | 268 | } |
259 | 269 | ||
260 | err = gpio_request(TOSA_GPIO_SD_WP, "sd_wp"); | 270 | err = gpio_request(TOSA_GPIO_SD_WP, "SD Write Protect"); |
261 | if (err) { | 271 | if (err) { |
262 | printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n"); | 272 | printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n"); |
263 | goto err_gpio_wp; | 273 | goto err_gpio_wp; |
@@ -266,7 +276,7 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void | |||
266 | if (err) | 276 | if (err) |
267 | goto err_gpio_wp_dir; | 277 | goto err_gpio_wp_dir; |
268 | 278 | ||
269 | err = gpio_request(TOSA_GPIO_PWR_ON, "sd_pwr"); | 279 | err = gpio_request(TOSA_GPIO_PWR_ON, "SD Power"); |
270 | if (err) { | 280 | if (err) { |
271 | printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); | 281 | printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); |
272 | goto err_gpio_pwr; | 282 | goto err_gpio_pwr; |
@@ -275,8 +285,20 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void | |||
275 | if (err) | 285 | if (err) |
276 | goto err_gpio_pwr_dir; | 286 | goto err_gpio_pwr_dir; |
277 | 287 | ||
288 | err = gpio_request(TOSA_GPIO_nSD_INT, "SD Int"); | ||
289 | if (err) { | ||
290 | printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); | ||
291 | goto err_gpio_int; | ||
292 | } | ||
293 | err = gpio_direction_input(TOSA_GPIO_nSD_INT); | ||
294 | if (err) | ||
295 | goto err_gpio_int_dir; | ||
296 | |||
278 | return 0; | 297 | return 0; |
279 | 298 | ||
299 | err_gpio_int_dir: | ||
300 | gpio_free(TOSA_GPIO_nSD_INT); | ||
301 | err_gpio_int: | ||
280 | err_gpio_pwr_dir: | 302 | err_gpio_pwr_dir: |
281 | gpio_free(TOSA_GPIO_PWR_ON); | 303 | gpio_free(TOSA_GPIO_PWR_ON); |
282 | err_gpio_pwr: | 304 | err_gpio_pwr: |
@@ -285,6 +307,9 @@ err_gpio_wp_dir: | |||
285 | err_gpio_wp: | 307 | err_gpio_wp: |
286 | free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); | 308 | free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); |
287 | err_irq: | 309 | err_irq: |
310 | err_gpio_detect_dir: | ||
311 | gpio_free(TOSA_GPIO_nSD_DETECT); | ||
312 | err_gpio_detect: | ||
288 | return err; | 313 | return err; |
289 | } | 314 | } |
290 | 315 | ||
@@ -306,9 +331,11 @@ static int tosa_mci_get_ro(struct device *dev) | |||
306 | 331 | ||
307 | static void tosa_mci_exit(struct device *dev, void *data) | 332 | static void tosa_mci_exit(struct device *dev, void *data) |
308 | { | 333 | { |
334 | gpio_free(TOSA_GPIO_nSD_INT); | ||
309 | gpio_free(TOSA_GPIO_PWR_ON); | 335 | gpio_free(TOSA_GPIO_PWR_ON); |
310 | gpio_free(TOSA_GPIO_SD_WP); | 336 | gpio_free(TOSA_GPIO_SD_WP); |
311 | free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); | 337 | free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); |
338 | gpio_free(TOSA_GPIO_nSD_DETECT); | ||
312 | } | 339 | } |
313 | 340 | ||
314 | static struct pxamci_platform_data tosa_mci_platform_data = { | 341 | static struct pxamci_platform_data tosa_mci_platform_data = { |
@@ -322,29 +349,55 @@ static struct pxamci_platform_data tosa_mci_platform_data = { | |||
322 | /* | 349 | /* |
323 | * Irda | 350 | * Irda |
324 | */ | 351 | */ |
352 | static void tosa_irda_transceiver_mode(struct device *dev, int mode) | ||
353 | { | ||
354 | if (mode & IR_OFF) { | ||
355 | gpio_set_value(TOSA_GPIO_IR_POWERDWN, 0); | ||
356 | pxa2xx_transceiver_mode(dev, mode); | ||
357 | gpio_direction_output(TOSA_GPIO_IRDA_TX, 0); | ||
358 | } else { | ||
359 | pxa2xx_transceiver_mode(dev, mode); | ||
360 | gpio_set_value(TOSA_GPIO_IR_POWERDWN, 1); | ||
361 | } | ||
362 | } | ||
363 | |||
325 | static int tosa_irda_startup(struct device *dev) | 364 | static int tosa_irda_startup(struct device *dev) |
326 | { | 365 | { |
327 | int ret; | 366 | int ret; |
328 | 367 | ||
368 | ret = gpio_request(TOSA_GPIO_IRDA_TX, "IrDA TX"); | ||
369 | if (ret) | ||
370 | goto err_tx; | ||
371 | ret = gpio_direction_output(TOSA_GPIO_IRDA_TX, 0); | ||
372 | if (ret) | ||
373 | goto err_tx_dir; | ||
374 | |||
329 | ret = gpio_request(TOSA_GPIO_IR_POWERDWN, "IrDA powerdown"); | 375 | ret = gpio_request(TOSA_GPIO_IR_POWERDWN, "IrDA powerdown"); |
330 | if (ret) | 376 | if (ret) |
331 | return ret; | 377 | goto err_pwr; |
332 | 378 | ||
333 | ret = gpio_direction_output(TOSA_GPIO_IR_POWERDWN, 0); | 379 | ret = gpio_direction_output(TOSA_GPIO_IR_POWERDWN, 0); |
334 | if (ret) | 380 | if (ret) |
335 | gpio_free(TOSA_GPIO_IR_POWERDWN); | 381 | goto err_pwr_dir; |
336 | 382 | ||
337 | return ret; | 383 | tosa_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF); |
338 | } | ||
339 | 384 | ||
340 | static void tosa_irda_shutdown(struct device *dev) | 385 | return 0; |
341 | { | 386 | |
387 | err_pwr_dir: | ||
342 | gpio_free(TOSA_GPIO_IR_POWERDWN); | 388 | gpio_free(TOSA_GPIO_IR_POWERDWN); |
389 | err_pwr: | ||
390 | err_tx_dir: | ||
391 | gpio_free(TOSA_GPIO_IRDA_TX); | ||
392 | err_tx: | ||
393 | return ret; | ||
343 | } | 394 | } |
344 | 395 | ||
345 | static void tosa_irda_transceiver_mode(struct device *dev, int mode) | 396 | static void tosa_irda_shutdown(struct device *dev) |
346 | { | 397 | { |
347 | gpio_set_value(TOSA_GPIO_IR_POWERDWN, !(mode & IR_OFF)); | 398 | tosa_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF); |
399 | gpio_free(TOSA_GPIO_IR_POWERDWN); | ||
400 | gpio_free(TOSA_GPIO_IRDA_TX); | ||
348 | } | 401 | } |
349 | 402 | ||
350 | static struct pxaficp_platform_data tosa_ficp_platform_data = { | 403 | static struct pxaficp_platform_data tosa_ficp_platform_data = { |
@@ -355,6 +408,70 @@ static struct pxaficp_platform_data tosa_ficp_platform_data = { | |||
355 | }; | 408 | }; |
356 | 409 | ||
357 | /* | 410 | /* |
411 | * Tosa AC IN | ||
412 | */ | ||
413 | static int tosa_power_init(struct device *dev) | ||
414 | { | ||
415 | int ret = gpio_request(TOSA_GPIO_AC_IN, "ac in"); | ||
416 | if (ret) | ||
417 | goto err_gpio_req; | ||
418 | |||
419 | ret = gpio_direction_input(TOSA_GPIO_AC_IN); | ||
420 | if (ret) | ||
421 | goto err_gpio_in; | ||
422 | |||
423 | return 0; | ||
424 | |||
425 | err_gpio_in: | ||
426 | gpio_free(TOSA_GPIO_AC_IN); | ||
427 | err_gpio_req: | ||
428 | return ret; | ||
429 | } | ||
430 | |||
431 | static void tosa_power_exit(struct device *dev) | ||
432 | { | ||
433 | gpio_free(TOSA_GPIO_AC_IN); | ||
434 | } | ||
435 | |||
436 | static int tosa_power_ac_online(void) | ||
437 | { | ||
438 | return gpio_get_value(TOSA_GPIO_AC_IN) == 0; | ||
439 | } | ||
440 | |||
441 | static char *tosa_ac_supplied_to[] = { | ||
442 | "main-battery", | ||
443 | "backup-battery", | ||
444 | "jacket-battery", | ||
445 | }; | ||
446 | |||
447 | static struct pda_power_pdata tosa_power_data = { | ||
448 | .init = tosa_power_init, | ||
449 | .is_ac_online = tosa_power_ac_online, | ||
450 | .exit = tosa_power_exit, | ||
451 | .supplied_to = tosa_ac_supplied_to, | ||
452 | .num_supplicants = ARRAY_SIZE(tosa_ac_supplied_to), | ||
453 | }; | ||
454 | |||
455 | static struct resource tosa_power_resource[] = { | ||
456 | { | ||
457 | .name = "ac", | ||
458 | .start = gpio_to_irq(TOSA_GPIO_AC_IN), | ||
459 | .end = gpio_to_irq(TOSA_GPIO_AC_IN), | ||
460 | .flags = IORESOURCE_IRQ | | ||
461 | IORESOURCE_IRQ_HIGHEDGE | | ||
462 | IORESOURCE_IRQ_LOWEDGE, | ||
463 | }, | ||
464 | }; | ||
465 | |||
466 | static struct platform_device tosa_power_device = { | ||
467 | .name = "pda-power", | ||
468 | .id = -1, | ||
469 | .dev.platform_data = &tosa_power_data, | ||
470 | .resource = tosa_power_resource, | ||
471 | .num_resources = ARRAY_SIZE(tosa_power_resource), | ||
472 | }; | ||
473 | |||
474 | /* | ||
358 | * Tosa Keyboard | 475 | * Tosa Keyboard |
359 | */ | 476 | */ |
360 | static struct platform_device tosakbd_device = { | 477 | static struct platform_device tosakbd_device = { |
@@ -439,7 +556,7 @@ static struct gpio_led tosa_gpio_leds[] = { | |||
439 | }, | 556 | }, |
440 | { | 557 | { |
441 | .name = "tosa:blue:bluetooth", | 558 | .name = "tosa:blue:bluetooth", |
442 | .default_trigger = "none", | 559 | .default_trigger = "tosa-bt", |
443 | .gpio = TOSA_GPIO_BT_LED, | 560 | .gpio = TOSA_GPIO_BT_LED, |
444 | }, | 561 | }, |
445 | }; | 562 | }; |
@@ -457,21 +574,184 @@ static struct platform_device tosaled_device = { | |||
457 | }, | 574 | }, |
458 | }; | 575 | }; |
459 | 576 | ||
577 | /* | ||
578 | * Toshiba Mobile IO Controller | ||
579 | */ | ||
580 | static struct resource tc6393xb_resources[] = { | ||
581 | [0] = { | ||
582 | .start = TOSA_LCDC_PHYS, | ||
583 | .end = TOSA_LCDC_PHYS + 0x3ffffff, | ||
584 | .flags = IORESOURCE_MEM, | ||
585 | }, | ||
586 | |||
587 | [1] = { | ||
588 | .start = TOSA_IRQ_GPIO_TC6393XB_INT, | ||
589 | .end = TOSA_IRQ_GPIO_TC6393XB_INT, | ||
590 | .flags = IORESOURCE_IRQ, | ||
591 | }, | ||
592 | }; | ||
593 | |||
594 | |||
595 | static int tosa_tc6393xb_enable(struct platform_device *dev) | ||
596 | { | ||
597 | int rc; | ||
598 | |||
599 | rc = gpio_request(TOSA_GPIO_TC6393XB_REST_IN, "tc6393xb #pclr"); | ||
600 | if (rc) | ||
601 | goto err_req_pclr; | ||
602 | rc = gpio_request(TOSA_GPIO_TC6393XB_SUSPEND, "tc6393xb #suspend"); | ||
603 | if (rc) | ||
604 | goto err_req_suspend; | ||
605 | rc = gpio_request(TOSA_GPIO_TC6393XB_L3V_ON, "l3v"); | ||
606 | if (rc) | ||
607 | goto err_req_l3v; | ||
608 | rc = gpio_direction_output(TOSA_GPIO_TC6393XB_L3V_ON, 0); | ||
609 | if (rc) | ||
610 | goto err_dir_l3v; | ||
611 | rc = gpio_direction_output(TOSA_GPIO_TC6393XB_SUSPEND, 0); | ||
612 | if (rc) | ||
613 | goto err_dir_suspend; | ||
614 | rc = gpio_direction_output(TOSA_GPIO_TC6393XB_REST_IN, 0); | ||
615 | if (rc) | ||
616 | goto err_dir_pclr; | ||
617 | |||
618 | mdelay(1); | ||
619 | |||
620 | gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 1); | ||
621 | |||
622 | mdelay(10); | ||
623 | |||
624 | gpio_set_value(TOSA_GPIO_TC6393XB_REST_IN, 1); | ||
625 | gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 1); | ||
626 | |||
627 | return 0; | ||
628 | err_dir_pclr: | ||
629 | err_dir_suspend: | ||
630 | err_dir_l3v: | ||
631 | gpio_free(TOSA_GPIO_TC6393XB_L3V_ON); | ||
632 | err_req_l3v: | ||
633 | gpio_free(TOSA_GPIO_TC6393XB_SUSPEND); | ||
634 | err_req_suspend: | ||
635 | gpio_free(TOSA_GPIO_TC6393XB_REST_IN); | ||
636 | err_req_pclr: | ||
637 | return rc; | ||
638 | } | ||
639 | |||
640 | static int tosa_tc6393xb_disable(struct platform_device *dev) | ||
641 | { | ||
642 | gpio_free(TOSA_GPIO_TC6393XB_L3V_ON); | ||
643 | gpio_free(TOSA_GPIO_TC6393XB_SUSPEND); | ||
644 | gpio_free(TOSA_GPIO_TC6393XB_REST_IN); | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | static int tosa_tc6393xb_resume(struct platform_device *dev) | ||
650 | { | ||
651 | gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 1); | ||
652 | mdelay(10); | ||
653 | gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 1); | ||
654 | mdelay(10); | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | static int tosa_tc6393xb_suspend(struct platform_device *dev) | ||
660 | { | ||
661 | gpio_set_value(TOSA_GPIO_TC6393XB_L3V_ON, 0); | ||
662 | gpio_set_value(TOSA_GPIO_TC6393XB_SUSPEND, 0); | ||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static struct mtd_partition tosa_nand_partition[] = { | ||
667 | { | ||
668 | .name = "smf", | ||
669 | .offset = 0, | ||
670 | .size = 7 * 1024 * 1024, | ||
671 | }, | ||
672 | { | ||
673 | .name = "root", | ||
674 | .offset = MTDPART_OFS_APPEND, | ||
675 | .size = 28 * 1024 * 1024, | ||
676 | }, | ||
677 | { | ||
678 | .name = "home", | ||
679 | .offset = MTDPART_OFS_APPEND, | ||
680 | .size = MTDPART_SIZ_FULL, | ||
681 | }, | ||
682 | }; | ||
683 | |||
684 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | ||
685 | |||
686 | static struct nand_bbt_descr tosa_tc6393xb_nand_bbt = { | ||
687 | .options = 0, | ||
688 | .offs = 4, | ||
689 | .len = 2, | ||
690 | .pattern = scan_ff_pattern | ||
691 | }; | ||
692 | |||
693 | static struct tmio_nand_data tosa_tc6393xb_nand_config = { | ||
694 | .num_partitions = ARRAY_SIZE(tosa_nand_partition), | ||
695 | .partition = tosa_nand_partition, | ||
696 | .badblock_pattern = &tosa_tc6393xb_nand_bbt, | ||
697 | }; | ||
698 | |||
699 | static struct tc6393xb_platform_data tosa_tc6393xb_setup = { | ||
700 | .scr_pll2cr = 0x0cc1, | ||
701 | .scr_gper = 0x3300, | ||
702 | .scr_gpo_dsr = | ||
703 | TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON), | ||
704 | .scr_gpo_doecr = | ||
705 | TOSA_TC6393XB_GPIO_BIT(TOSA_GPIO_CARD_VCC_ON), | ||
706 | |||
707 | .irq_base = IRQ_BOARD_START, | ||
708 | .gpio_base = TOSA_TC6393XB_GPIO_BASE, | ||
709 | |||
710 | .enable = tosa_tc6393xb_enable, | ||
711 | .disable = tosa_tc6393xb_disable, | ||
712 | .suspend = tosa_tc6393xb_suspend, | ||
713 | .resume = tosa_tc6393xb_resume, | ||
714 | |||
715 | .nand_data = &tosa_tc6393xb_nand_config, | ||
716 | }; | ||
717 | |||
718 | |||
719 | static struct platform_device tc6393xb_device = { | ||
720 | .name = "tc6393xb", | ||
721 | .id = -1, | ||
722 | .dev = { | ||
723 | .platform_data = &tosa_tc6393xb_setup, | ||
724 | }, | ||
725 | .num_resources = ARRAY_SIZE(tc6393xb_resources), | ||
726 | .resource = tc6393xb_resources, | ||
727 | }; | ||
728 | |||
729 | static struct tosa_bt_data tosa_bt_data = { | ||
730 | .gpio_pwr = TOSA_GPIO_BT_PWR_EN, | ||
731 | .gpio_reset = TOSA_GPIO_BT_RESET, | ||
732 | }; | ||
733 | |||
734 | static struct platform_device tosa_bt_device = { | ||
735 | .name = "tosa-bt", | ||
736 | .id = -1, | ||
737 | .dev.platform_data = &tosa_bt_data, | ||
738 | }; | ||
739 | |||
740 | |||
460 | static struct platform_device *devices[] __initdata = { | 741 | static struct platform_device *devices[] __initdata = { |
461 | &tosascoop_device, | 742 | &tosascoop_device, |
462 | &tosascoop_jc_device, | 743 | &tosascoop_jc_device, |
744 | &tc6393xb_device, | ||
745 | &tosa_power_device, | ||
463 | &tosakbd_device, | 746 | &tosakbd_device, |
464 | &tosa_gpio_keys_device, | 747 | &tosa_gpio_keys_device, |
465 | &tosaled_device, | 748 | &tosaled_device, |
749 | &tosa_bt_device, | ||
466 | }; | 750 | }; |
467 | 751 | ||
468 | static void tosa_poweroff(void) | 752 | static void tosa_poweroff(void) |
469 | { | 753 | { |
470 | gpio_direction_output(TOSA_GPIO_ON_RESET, 0); | 754 | arm_machine_restart('g'); |
471 | gpio_set_value(TOSA_GPIO_ON_RESET, 1); | ||
472 | |||
473 | mdelay(1000); | ||
474 | arm_machine_restart('h'); | ||
475 | } | 755 | } |
476 | 756 | ||
477 | static void tosa_restart(char mode) | 757 | static void tosa_restart(char mode) |
@@ -485,10 +765,14 @@ static void tosa_restart(char mode) | |||
485 | 765 | ||
486 | static void __init tosa_init(void) | 766 | static void __init tosa_init(void) |
487 | { | 767 | { |
768 | int dummy; | ||
769 | |||
488 | pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config)); | 770 | pxa2xx_mfp_config(ARRAY_AND_SIZE(tosa_pin_config)); |
489 | gpio_set_wake(MFP_PIN_GPIO1, 1); | 771 | gpio_set_wake(MFP_PIN_GPIO1, 1); |
490 | /* We can't pass to gpio-keys since it will drop the Reset altfunc */ | 772 | /* We can't pass to gpio-keys since it will drop the Reset altfunc */ |
491 | 773 | ||
774 | init_gpio_reset(TOSA_GPIO_ON_RESET); | ||
775 | |||
492 | pm_power_off = tosa_poweroff; | 776 | pm_power_off = tosa_poweroff; |
493 | arm_pm_restart = tosa_restart; | 777 | arm_pm_restart = tosa_restart; |
494 | 778 | ||
@@ -497,6 +781,10 @@ static void __init tosa_init(void) | |||
497 | /* enable batt_fault */ | 781 | /* enable batt_fault */ |
498 | PMCR = 0x01; | 782 | PMCR = 0x01; |
499 | 783 | ||
784 | dummy = gpiochip_reserve(TOSA_SCOOP_GPIO_BASE, 12); | ||
785 | dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12); | ||
786 | dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16); | ||
787 | |||
500 | pxa_set_mci_info(&tosa_mci_platform_data); | 788 | pxa_set_mci_info(&tosa_mci_platform_data); |
501 | pxa_set_udc_info(&udc_info); | 789 | pxa_set_udc_info(&udc_info); |
502 | pxa_set_ficp_info(&tosa_ficp_platform_data); | 790 | pxa_set_ficp_info(&tosa_ficp_platform_data); |
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index 61e244023089..dee7bf36f013 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c | |||
@@ -254,6 +254,7 @@ static void board_irda_mode(struct device *dev, int mode) | |||
254 | /* Fast mode */ | 254 | /* Fast mode */ |
255 | trizeps_conxs_ircr |= ConXS_IRCR_MODE; | 255 | trizeps_conxs_ircr |= ConXS_IRCR_MODE; |
256 | } | 256 | } |
257 | pxa2xx_transceiver_mode(dev, mode); | ||
257 | if (mode & IR_OFF) { | 258 | if (mode & IR_OFF) { |
258 | trizeps_conxs_ircr |= ConXS_IRCR_SD; | 259 | trizeps_conxs_ircr |= ConXS_IRCR_SD; |
259 | } else { | 260 | } else { |
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index 66b446ca273d..8fca6d890b7d 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/pwm_backlight.h> | 21 | #include <linux/pwm_backlight.h> |
22 | #include <linux/smc91x.h> | ||
22 | 23 | ||
23 | #include <asm/mach-types.h> | 24 | #include <asm/mach-types.h> |
24 | #include <asm/mach/arch.h> | 25 | #include <asm/mach/arch.h> |
@@ -29,6 +30,7 @@ | |||
29 | #include <asm/arch/zylonite.h> | 30 | #include <asm/arch/zylonite.h> |
30 | #include <asm/arch/mmc.h> | 31 | #include <asm/arch/mmc.h> |
31 | #include <asm/arch/pxa27x_keypad.h> | 32 | #include <asm/arch/pxa27x_keypad.h> |
33 | #include <asm/arch/pxa3xx_nand.h> | ||
32 | 34 | ||
33 | #include "devices.h" | 35 | #include "devices.h" |
34 | #include "generic.h" | 36 | #include "generic.h" |
@@ -37,6 +39,8 @@ | |||
37 | struct platform_mmc_slot zylonite_mmc_slot[MAX_SLOTS]; | 39 | struct platform_mmc_slot zylonite_mmc_slot[MAX_SLOTS]; |
38 | 40 | ||
39 | int gpio_eth_irq; | 41 | int gpio_eth_irq; |
42 | int gpio_debug_led1; | ||
43 | int gpio_debug_led2; | ||
40 | 44 | ||
41 | int wm9713_irq; | 45 | int wm9713_irq; |
42 | 46 | ||
@@ -56,13 +60,57 @@ static struct resource smc91x_resources[] = { | |||
56 | } | 60 | } |
57 | }; | 61 | }; |
58 | 62 | ||
63 | static struct smc91x_platdata zylonite_smc91x_info = { | ||
64 | .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | | ||
65 | SMC91X_NOWAIT | SMC91X_USE_DMA, | ||
66 | }; | ||
67 | |||
59 | static struct platform_device smc91x_device = { | 68 | static struct platform_device smc91x_device = { |
60 | .name = "smc91x", | 69 | .name = "smc91x", |
61 | .id = 0, | 70 | .id = 0, |
62 | .num_resources = ARRAY_SIZE(smc91x_resources), | 71 | .num_resources = ARRAY_SIZE(smc91x_resources), |
63 | .resource = smc91x_resources, | 72 | .resource = smc91x_resources, |
73 | .dev = { | ||
74 | .platform_data = &zylonite_smc91x_info, | ||
75 | }, | ||
76 | }; | ||
77 | |||
78 | #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) | ||
79 | static struct gpio_led zylonite_debug_leds[] = { | ||
80 | [0] = { | ||
81 | .name = "zylonite:yellow:1", | ||
82 | .default_trigger = "heartbeat", | ||
83 | }, | ||
84 | [1] = { | ||
85 | .name = "zylonite:yellow:2", | ||
86 | .default_trigger = "default-on", | ||
87 | }, | ||
64 | }; | 88 | }; |
65 | 89 | ||
90 | static struct gpio_led_platform_data zylonite_debug_leds_info = { | ||
91 | .leds = zylonite_debug_leds, | ||
92 | .num_leds = ARRAY_SIZE(zylonite_debug_leds), | ||
93 | }; | ||
94 | |||
95 | static struct platform_device zylonite_device_leds = { | ||
96 | .name = "leds-gpio", | ||
97 | .id = -1, | ||
98 | .dev = { | ||
99 | .platform_data = &zylonite_debug_leds_info, | ||
100 | } | ||
101 | }; | ||
102 | |||
103 | static void __init zylonite_init_leds(void) | ||
104 | { | ||
105 | zylonite_debug_leds[0].gpio = gpio_debug_led1; | ||
106 | zylonite_debug_leds[1].gpio = gpio_debug_led2; | ||
107 | |||
108 | platform_device_register(&zylonite_device_leds); | ||
109 | } | ||
110 | #else | ||
111 | static inline void zylonite_init_leds(void) {} | ||
112 | #endif | ||
113 | |||
66 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) | 114 | #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) |
67 | static struct platform_pwm_backlight_data zylonite_backlight_data = { | 115 | static struct platform_pwm_backlight_data zylonite_backlight_data = { |
68 | .pwm_id = 3, | 116 | .pwm_id = 3, |
@@ -259,7 +307,7 @@ static void __init zylonite_init_mmc(void) | |||
259 | static inline void zylonite_init_mmc(void) {} | 307 | static inline void zylonite_init_mmc(void) {} |
260 | #endif | 308 | #endif |
261 | 309 | ||
262 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULES) | 310 | #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) |
263 | static unsigned int zylonite_matrix_key_map[] = { | 311 | static unsigned int zylonite_matrix_key_map[] = { |
264 | /* KEY(row, col, key_code) */ | 312 | /* KEY(row, col, key_code) */ |
265 | KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_C), KEY(0, 5, KEY_D), | 313 | KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_C), KEY(0, 5, KEY_D), |
@@ -324,6 +372,57 @@ static void __init zylonite_init_keypad(void) | |||
324 | static inline void zylonite_init_keypad(void) {} | 372 | static inline void zylonite_init_keypad(void) {} |
325 | #endif | 373 | #endif |
326 | 374 | ||
375 | #if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE) | ||
376 | static struct mtd_partition zylonite_nand_partitions[] = { | ||
377 | [0] = { | ||
378 | .name = "Bootloader", | ||
379 | .offset = 0, | ||
380 | .size = 0x060000, | ||
381 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
382 | }, | ||
383 | [1] = { | ||
384 | .name = "Kernel", | ||
385 | .offset = 0x060000, | ||
386 | .size = 0x200000, | ||
387 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
388 | }, | ||
389 | [2] = { | ||
390 | .name = "Filesystem", | ||
391 | .offset = 0x0260000, | ||
392 | .size = 0x3000000, /* 48M - rootfs */ | ||
393 | }, | ||
394 | [3] = { | ||
395 | .name = "MassStorage", | ||
396 | .offset = 0x3260000, | ||
397 | .size = 0x3d40000, | ||
398 | }, | ||
399 | [4] = { | ||
400 | .name = "BBT", | ||
401 | .offset = 0x6FA0000, | ||
402 | .size = 0x80000, | ||
403 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
404 | }, | ||
405 | /* NOTE: we reserve some blocks at the end of the NAND flash for | ||
406 | * bad block management, and the max number of relocation blocks | ||
407 | * differs on different platforms. Please take care with it when | ||
408 | * defining the partition table. | ||
409 | */ | ||
410 | }; | ||
411 | |||
412 | static struct pxa3xx_nand_platform_data zylonite_nand_info = { | ||
413 | .enable_arbiter = 1, | ||
414 | .parts = zylonite_nand_partitions, | ||
415 | .nr_parts = ARRAY_SIZE(zylonite_nand_partitions), | ||
416 | }; | ||
417 | |||
418 | static void __init zylonite_init_nand(void) | ||
419 | { | ||
420 | pxa3xx_set_nand_info(&zylonite_nand_info); | ||
421 | } | ||
422 | #else | ||
423 | static inline void zylonite_init_nand(void) {} | ||
424 | #endif /* CONFIG_MTD_NAND_PXA3xx || CONFIG_MTD_NAND_PXA3xx_MODULE */ | ||
425 | |||
327 | static void __init zylonite_init(void) | 426 | static void __init zylonite_init(void) |
328 | { | 427 | { |
329 | /* board-processor specific initialization */ | 428 | /* board-processor specific initialization */ |
@@ -342,6 +441,8 @@ static void __init zylonite_init(void) | |||
342 | zylonite_init_lcd(); | 441 | zylonite_init_lcd(); |
343 | zylonite_init_mmc(); | 442 | zylonite_init_mmc(); |
344 | zylonite_init_keypad(); | 443 | zylonite_init_keypad(); |
444 | zylonite_init_nand(); | ||
445 | zylonite_init_leds(); | ||
345 | } | 446 | } |
346 | 447 | ||
347 | MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)") | 448 | MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)") |
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c index 6f7ae972b8db..b28d46e081d3 100644 --- a/arch/arm/mach-pxa/zylonite_pxa300.c +++ b/arch/arm/mach-pxa/zylonite_pxa300.c | |||
@@ -16,9 +16,12 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/i2c.h> | ||
20 | #include <linux/i2c/pca953x.h> | ||
19 | 21 | ||
20 | #include <asm/gpio.h> | 22 | #include <asm/gpio.h> |
21 | #include <asm/arch/mfp-pxa300.h> | 23 | #include <asm/arch/mfp-pxa300.h> |
24 | #include <asm/arch/i2c.h> | ||
22 | #include <asm/arch/zylonite.h> | 25 | #include <asm/arch/zylonite.h> |
23 | 26 | ||
24 | #include "generic.h" | 27 | #include "generic.h" |
@@ -109,6 +112,10 @@ static mfp_cfg_t common_mfp_cfg[] __initdata = { | |||
109 | GPIO12_MMC2_DAT3, | 112 | GPIO12_MMC2_DAT3, |
110 | GPIO13_MMC2_CLK, | 113 | GPIO13_MMC2_CLK, |
111 | GPIO14_MMC2_CMD, | 114 | GPIO14_MMC2_CMD, |
115 | |||
116 | /* Standard I2C */ | ||
117 | GPIO21_I2C_SCL, | ||
118 | GPIO22_I2C_SDA, | ||
112 | }; | 119 | }; |
113 | 120 | ||
114 | static mfp_cfg_t pxa300_mfp_cfg[] __initdata = { | 121 | static mfp_cfg_t pxa300_mfp_cfg[] __initdata = { |
@@ -192,6 +199,39 @@ static void __init zylonite_detect_lcd_panel(void) | |||
192 | pxa3xx_mfp_write(lcd_detect_pins[i], mfpr_save[i]); | 199 | pxa3xx_mfp_write(lcd_detect_pins[i], mfpr_save[i]); |
193 | } | 200 | } |
194 | 201 | ||
202 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | ||
203 | static struct pca953x_platform_data gpio_exp[] = { | ||
204 | [0] = { | ||
205 | .gpio_base = 128, | ||
206 | }, | ||
207 | [1] = { | ||
208 | .gpio_base = 144, | ||
209 | }, | ||
210 | }; | ||
211 | |||
212 | struct i2c_board_info zylonite_i2c_board_info[] = { | ||
213 | { | ||
214 | .type = "pca9539", | ||
215 | .addr = 0x74, | ||
216 | .platform_data = &gpio_exp[0], | ||
217 | .irq = IRQ_GPIO(18), | ||
218 | }, { | ||
219 | .type = "pca9539", | ||
220 | .addr = 0x75, | ||
221 | .platform_data = &gpio_exp[1], | ||
222 | .irq = IRQ_GPIO(19), | ||
223 | }, | ||
224 | }; | ||
225 | |||
226 | static void __init zylonite_init_i2c(void) | ||
227 | { | ||
228 | pxa_set_i2c_info(NULL); | ||
229 | i2c_register_board_info(0, ARRAY_AND_SIZE(zylonite_i2c_board_info)); | ||
230 | } | ||
231 | #else | ||
232 | static inline void zylonite_init_i2c(void) {} | ||
233 | #endif | ||
234 | |||
195 | void __init zylonite_pxa300_init(void) | 235 | void __init zylonite_pxa300_init(void) |
196 | { | 236 | { |
197 | if (cpu_is_pxa300() || cpu_is_pxa310()) { | 237 | if (cpu_is_pxa300() || cpu_is_pxa310()) { |
@@ -207,6 +247,8 @@ void __init zylonite_pxa300_init(void) | |||
207 | 247 | ||
208 | /* WM9713 IRQ */ | 248 | /* WM9713 IRQ */ |
209 | wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26); | 249 | wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26); |
250 | |||
251 | zylonite_init_i2c(); | ||
210 | } | 252 | } |
211 | 253 | ||
212 | if (cpu_is_pxa300()) { | 254 | if (cpu_is_pxa300()) { |
@@ -222,4 +264,8 @@ void __init zylonite_pxa300_init(void) | |||
222 | zylonite_mmc_slot[2].gpio_cd = EXT_GPIO(30); | 264 | zylonite_mmc_slot[2].gpio_cd = EXT_GPIO(30); |
223 | zylonite_mmc_slot[2].gpio_wp = EXT_GPIO(31); | 265 | zylonite_mmc_slot[2].gpio_wp = EXT_GPIO(31); |
224 | } | 266 | } |
267 | |||
268 | /* GPIOs for Debug LEDs */ | ||
269 | gpio_debug_led1 = EXT_GPIO(25); | ||
270 | gpio_debug_led2 = EXT_GPIO(26); | ||
225 | } | 271 | } |
diff --git a/arch/arm/mach-pxa/zylonite_pxa320.c b/arch/arm/mach-pxa/zylonite_pxa320.c index 2b4fc34919ac..2b7fba7a2921 100644 --- a/arch/arm/mach-pxa/zylonite_pxa320.c +++ b/arch/arm/mach-pxa/zylonite_pxa320.c | |||
@@ -116,6 +116,10 @@ static mfp_cfg_t mfp_cfg[] __initdata = { | |||
116 | GPIO27_MMC2_DAT3, | 116 | GPIO27_MMC2_DAT3, |
117 | GPIO28_MMC2_CLK, | 117 | GPIO28_MMC2_CLK, |
118 | GPIO29_MMC2_CMD, | 118 | GPIO29_MMC2_CMD, |
119 | |||
120 | /* Debug LEDs */ | ||
121 | GPIO1_2_GPIO | MFP_LPM_DRIVE_HIGH, | ||
122 | GPIO4_2_GPIO | MFP_LPM_DRIVE_HIGH, | ||
119 | }; | 123 | }; |
120 | 124 | ||
121 | #define NUM_LCD_DETECT_PINS 7 | 125 | #define NUM_LCD_DETECT_PINS 7 |
@@ -189,6 +193,8 @@ void __init zylonite_pxa320_init(void) | |||
189 | 193 | ||
190 | /* GPIO pin assignment */ | 194 | /* GPIO pin assignment */ |
191 | gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO9); | 195 | gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO9); |
196 | gpio_debug_led1 = mfp_to_gpio(MFP_PIN_GPIO1_2); | ||
197 | gpio_debug_led2 = mfp_to_gpio(MFP_PIN_GPIO4_2); | ||
192 | 198 | ||
193 | /* MMC card detect & write protect for controller 0 */ | 199 | /* MMC card detect & write protect for controller 0 */ |
194 | zylonite_mmc_slot[0].gpio_cd = mfp_to_gpio(MFP_PIN_GPIO1); | 200 | zylonite_mmc_slot[0].gpio_cd = mfp_to_gpio(MFP_PIN_GPIO1); |
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c index fc97fe57ee6f..b5809c51d13f 100644 --- a/arch/arm/mach-sa1100/clock.c +++ b/arch/arm/mach-sa1100/clock.c | |||
@@ -103,7 +103,7 @@ static void clk_gpio27_disable(void) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | static struct clk clk_gpio27 = { | 105 | static struct clk clk_gpio27 = { |
106 | .name = "GPIO27_CLK", | 106 | .name = "SA1111_CLK", |
107 | .rate = 3686400, | 107 | .rate = 3686400, |
108 | .enable = clk_gpio27_enable, | 108 | .enable = clk_gpio27_enable, |
109 | .disable = clk_gpio27_disable, | 109 | .disable = clk_gpio27_disable, |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index f64b92557b11..2e27a8c8372b 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -76,3 +76,5 @@ obj-$(CONFIG_CPU_V7) += proc-v7.o | |||
76 | 76 | ||
77 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o | 77 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o |
78 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o | 78 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o |
79 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o | ||
80 | |||
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 0be5630ff568..8b8f564c3aa2 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -12,7 +12,7 @@ | |||
12 | # | 12 | # |
13 | # http://www.arm.linux.org.uk/developer/machines/?action=new | 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new |
14 | # | 14 | # |
15 | # Last update: Mon Jul 7 16:25:39 2008 | 15 | # Last update: Sun Jul 13 12:04:05 2008 |
16 | # | 16 | # |
17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
18 | # | 18 | # |
@@ -1812,3 +1812,11 @@ jade MACH_JADE JADE 1821 | |||
1812 | ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822 | 1812 | ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822 |
1813 | gprisc4 MACH_GPRISC4 GPRISC4 1823 | 1813 | gprisc4 MACH_GPRISC4 GPRISC4 1823 |
1814 | stamp9260 MACH_STAMP9260 STAMP9260 1824 | 1814 | stamp9260 MACH_STAMP9260 STAMP9260 1824 |
1815 | smdk6430 MACH_SMDK6430 SMDK6430 1825 | ||
1816 | smdkc100 MACH_SMDKC100 SMDKC100 1826 | ||
1817 | tavorevb MACH_TAVOREVB TAVOREVB 1827 | ||
1818 | saar MACH_SAAR SAAR 1828 | ||
1819 | deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829 | ||
1820 | at91sam9m10ek MACH_AT91SAM9M10EK AT91SAM9M10EK 1830 | ||
1821 | linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831 | ||
1822 | hit_b0 MACH_HIT_B0 HIT_B0 1832 | ||
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index 021d51217184..604f44f5dd16 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/clk.h> | 8 | #include <linux/clk.h> |
9 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
10 | #include <linux/dw_dmac.h> | ||
10 | #include <linux/fb.h> | 11 | #include <linux/fb.h> |
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
@@ -594,6 +595,17 @@ static void __init genclk_init_parent(struct clk *clk) | |||
594 | clk->parent = parent; | 595 | clk->parent = parent; |
595 | } | 596 | } |
596 | 597 | ||
598 | static struct dw_dma_platform_data dw_dmac0_data = { | ||
599 | .nr_channels = 3, | ||
600 | }; | ||
601 | |||
602 | static struct resource dw_dmac0_resource[] = { | ||
603 | PBMEM(0xff200000), | ||
604 | IRQ(2), | ||
605 | }; | ||
606 | DEFINE_DEV_DATA(dw_dmac, 0); | ||
607 | DEV_CLK(hclk, dw_dmac0, hsb, 10); | ||
608 | |||
597 | /* -------------------------------------------------------------------- | 609 | /* -------------------------------------------------------------------- |
598 | * System peripherals | 610 | * System peripherals |
599 | * -------------------------------------------------------------------- */ | 611 | * -------------------------------------------------------------------- */ |
@@ -708,17 +720,6 @@ static struct clk pico_clk = { | |||
708 | .users = 1, | 720 | .users = 1, |
709 | }; | 721 | }; |
710 | 722 | ||
711 | static struct resource dmaca0_resource[] = { | ||
712 | { | ||
713 | .start = 0xff200000, | ||
714 | .end = 0xff20ffff, | ||
715 | .flags = IORESOURCE_MEM, | ||
716 | }, | ||
717 | IRQ(2), | ||
718 | }; | ||
719 | DEFINE_DEV(dmaca, 0); | ||
720 | DEV_CLK(hclk, dmaca0, hsb, 10); | ||
721 | |||
722 | /* -------------------------------------------------------------------- | 723 | /* -------------------------------------------------------------------- |
723 | * HMATRIX | 724 | * HMATRIX |
724 | * -------------------------------------------------------------------- */ | 725 | * -------------------------------------------------------------------- */ |
@@ -831,7 +832,7 @@ void __init at32_add_system_devices(void) | |||
831 | platform_device_register(&at32_eic0_device); | 832 | platform_device_register(&at32_eic0_device); |
832 | platform_device_register(&smc0_device); | 833 | platform_device_register(&smc0_device); |
833 | platform_device_register(&pdc_device); | 834 | platform_device_register(&pdc_device); |
834 | platform_device_register(&dmaca0_device); | 835 | platform_device_register(&dw_dmac0_device); |
835 | 836 | ||
836 | platform_device_register(&at32_tcb0_device); | 837 | platform_device_register(&at32_tcb0_device); |
837 | platform_device_register(&at32_tcb1_device); | 838 | platform_device_register(&at32_tcb1_device); |
@@ -2032,7 +2033,7 @@ struct clk *at32_clock_list[] = { | |||
2032 | &smc0_mck, | 2033 | &smc0_mck, |
2033 | &pdc_hclk, | 2034 | &pdc_hclk, |
2034 | &pdc_pclk, | 2035 | &pdc_pclk, |
2035 | &dmaca0_hclk, | 2036 | &dw_dmac0_hclk, |
2036 | &pico_clk, | 2037 | &pico_clk, |
2037 | &pio0_mck, | 2038 | &pio0_mck, |
2038 | &pio1_mck, | 2039 | &pio1_mck, |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4c22242b396f..737ebf9d12bb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -112,6 +112,7 @@ config PPC | |||
112 | select HAVE_FTRACE | 112 | select HAVE_FTRACE |
113 | select HAVE_IDE | 113 | select HAVE_IDE |
114 | select HAVE_KPROBES | 114 | select HAVE_KPROBES |
115 | select HAVE_ARCH_KGDB | ||
115 | select HAVE_KRETPROBES | 116 | select HAVE_KRETPROBES |
116 | select HAVE_LMB | 117 | select HAVE_LMB |
117 | select HAVE_DMA_ATTRS if PPC64 | 118 | select HAVE_DMA_ATTRS if PPC64 |
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 2840ab69ef4e..8c8aadbe9563 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -41,22 +41,6 @@ config HCALL_STATS | |||
41 | This option will add a small amount of overhead to all hypervisor | 41 | This option will add a small amount of overhead to all hypervisor |
42 | calls. | 42 | calls. |
43 | 43 | ||
44 | config DEBUGGER | ||
45 | bool "Enable debugger hooks" | ||
46 | depends on DEBUG_KERNEL | ||
47 | help | ||
48 | Include in-kernel hooks for kernel debuggers. Unless you are | ||
49 | intending to debug the kernel, say N here. | ||
50 | |||
51 | config KGDB | ||
52 | bool "Include kgdb kernel debugger" | ||
53 | depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx) | ||
54 | select DEBUG_INFO | ||
55 | help | ||
56 | Include in-kernel hooks for kgdb, the Linux kernel source level | ||
57 | debugger. See <http://kgdb.sourceforge.net/> for more information. | ||
58 | Unless you are intending to debug the kernel, say N here. | ||
59 | |||
60 | config CODE_PATCHING_SELFTEST | 44 | config CODE_PATCHING_SELFTEST |
61 | bool "Run self-tests of the code-patching code." | 45 | bool "Run self-tests of the code-patching code." |
62 | depends on DEBUG_KERNEL | 46 | depends on DEBUG_KERNEL |
@@ -67,36 +51,9 @@ config FTR_FIXUP_SELFTEST | |||
67 | depends on DEBUG_KERNEL | 51 | depends on DEBUG_KERNEL |
68 | default n | 52 | default n |
69 | 53 | ||
70 | choice | ||
71 | prompt "Serial Port" | ||
72 | depends on KGDB | ||
73 | default KGDB_TTYS1 | ||
74 | |||
75 | config KGDB_TTYS0 | ||
76 | bool "ttyS0" | ||
77 | |||
78 | config KGDB_TTYS1 | ||
79 | bool "ttyS1" | ||
80 | |||
81 | config KGDB_TTYS2 | ||
82 | bool "ttyS2" | ||
83 | |||
84 | config KGDB_TTYS3 | ||
85 | bool "ttyS3" | ||
86 | |||
87 | endchoice | ||
88 | |||
89 | config KGDB_CONSOLE | ||
90 | bool "Enable serial console thru kgdb port" | ||
91 | depends on KGDB && 8xx || CPM2 | ||
92 | help | ||
93 | If you enable this, all serial console messages will be sent | ||
94 | over the gdb stub. | ||
95 | If unsure, say N. | ||
96 | |||
97 | config XMON | 54 | config XMON |
98 | bool "Include xmon kernel debugger" | 55 | bool "Include xmon kernel debugger" |
99 | depends on DEBUGGER | 56 | depends on DEBUG_KERNEL |
100 | help | 57 | help |
101 | Include in-kernel hooks for the xmon kernel monitor/debugger. | 58 | Include in-kernel hooks for the xmon kernel monitor/debugger. |
102 | Unless you are intending to debug the kernel, say N here. | 59 | Unless you are intending to debug the kernel, say N here. |
@@ -126,6 +83,11 @@ config XMON_DISASSEMBLY | |||
126 | to say Y here, unless you're building for a memory-constrained | 83 | to say Y here, unless you're building for a memory-constrained |
127 | system. | 84 | system. |
128 | 85 | ||
86 | config DEBUGGER | ||
87 | bool | ||
88 | depends on KGDB || XMON | ||
89 | default y | ||
90 | |||
129 | config IRQSTACKS | 91 | config IRQSTACKS |
130 | bool "Use separate kernel stacks when processing interrupts" | 92 | bool "Use separate kernel stacks when processing interrupts" |
131 | help | 93 | help |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index bf0b1fd0ec34..1a4094704b1f 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -74,6 +74,7 @@ obj-y += time.o prom.o traps.o setup-common.o \ | |||
74 | misc_$(CONFIG_WORD_SIZE).o | 74 | misc_$(CONFIG_WORD_SIZE).o |
75 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o | 75 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o |
76 | obj-$(CONFIG_PPC64) += dma_64.o iommu.o | 76 | obj-$(CONFIG_PPC64) += dma_64.o iommu.o |
77 | obj-$(CONFIG_KGDB) += kgdb.o | ||
77 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o | 78 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o |
78 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 79 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
79 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | 80 | obj-$(CONFIG_BOOTX_TEXT) += btext.o |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c new file mode 100644 index 000000000000..b4fdf2f2743c --- /dev/null +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -0,0 +1,410 @@ | |||
1 | /* | ||
2 | * PowerPC backend to the KGDB stub. | ||
3 | * | ||
4 | * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu) | ||
5 | * Copyright (C) 2003 Timesys Corporation. | ||
6 | * Copyright (C) 2004-2006 MontaVista Software, Inc. | ||
7 | * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com) | ||
8 | * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and | ||
9 | * Sergei Shtylyov <sshtylyov@ru.mvista.com> | ||
10 | * Copyright (C) 2007-2008 Wind River Systems, Inc. | ||
11 | * | ||
12 | * This file is licensed under the terms of the GNU General Public License | ||
13 | * version 2. This program as licensed "as is" without any warranty of any | ||
14 | * kind, whether express or implied. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kgdb.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <asm/current.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/machdep.h> | ||
26 | |||
27 | /* | ||
28 | * This table contains the mapping between PowerPC hardware trap types, and | ||
29 | * signals, which are primarily what GDB understands. GDB and the kernel | ||
30 | * don't always agree on values, so we use constants taken from gdb-6.2. | ||
31 | */ | ||
32 | static struct hard_trap_info | ||
33 | { | ||
34 | unsigned int tt; /* Trap type code for powerpc */ | ||
35 | unsigned char signo; /* Signal that we map this trap into */ | ||
36 | } hard_trap_info[] = { | ||
37 | { 0x0100, 0x02 /* SIGINT */ }, /* system reset */ | ||
38 | { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */ | ||
39 | { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */ | ||
40 | { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */ | ||
41 | { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */ | ||
42 | { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */ | ||
43 | { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */ | ||
44 | { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */ | ||
45 | { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */ | ||
46 | { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */ | ||
47 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
48 | { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */ | ||
49 | #if defined(CONFIG_FSL_BOOKE) | ||
50 | { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */ | ||
51 | { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */ | ||
52 | { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */ | ||
53 | { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */ | ||
54 | { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */ | ||
55 | { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */ | ||
56 | { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */ | ||
57 | { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */ | ||
58 | { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */ | ||
59 | #else /* ! CONFIG_FSL_BOOKE */ | ||
60 | { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */ | ||
61 | { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */ | ||
62 | { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */ | ||
63 | { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */ | ||
64 | { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */ | ||
65 | #endif | ||
66 | #else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */ | ||
67 | { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */ | ||
68 | #if defined(CONFIG_8xx) | ||
69 | { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */ | ||
70 | #else /* ! CONFIG_8xx */ | ||
71 | { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */ | ||
72 | { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */ | ||
73 | { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */ | ||
74 | #if defined(CONFIG_PPC64) | ||
75 | { 0x1200, 0x05 /* SIGILL */ }, /* system error */ | ||
76 | { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */ | ||
77 | { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */ | ||
78 | { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */ | ||
79 | { 0x1800, 0x04 /* SIGILL */ }, /* thermal */ | ||
80 | #else /* ! CONFIG_PPC64 */ | ||
81 | { 0x1400, 0x02 /* SIGINT */ }, /* SMI */ | ||
82 | { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */ | ||
83 | { 0x1700, 0x04 /* SIGILL */ }, /* TAU */ | ||
84 | { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */ | ||
85 | #endif | ||
86 | #endif | ||
87 | #endif | ||
88 | { 0x0000, 0x00 } /* Must be last */ | ||
89 | }; | ||
90 | |||
91 | static int computeSignal(unsigned int tt) | ||
92 | { | ||
93 | struct hard_trap_info *ht; | ||
94 | |||
95 | for (ht = hard_trap_info; ht->tt && ht->signo; ht++) | ||
96 | if (ht->tt == tt) | ||
97 | return ht->signo; | ||
98 | |||
99 | return SIGHUP; /* default for things we don't know about */ | ||
100 | } | ||
101 | |||
102 | static int kgdb_call_nmi_hook(struct pt_regs *regs) | ||
103 | { | ||
104 | kgdb_nmicallback(raw_smp_processor_id(), regs); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | #ifdef CONFIG_SMP | ||
109 | void kgdb_roundup_cpus(unsigned long flags) | ||
110 | { | ||
111 | smp_send_debugger_break(MSG_ALL_BUT_SELF); | ||
112 | } | ||
113 | #endif | ||
114 | |||
115 | /* KGDB functions to use existing PowerPC64 hooks. */ | ||
116 | static int kgdb_debugger(struct pt_regs *regs) | ||
117 | { | ||
118 | return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs); | ||
119 | } | ||
120 | |||
121 | static int kgdb_handle_breakpoint(struct pt_regs *regs) | ||
122 | { | ||
123 | if (user_mode(regs)) | ||
124 | return 0; | ||
125 | |||
126 | if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0) | ||
127 | return 0; | ||
128 | |||
129 | if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) | ||
130 | regs->nip += 4; | ||
131 | |||
132 | return 1; | ||
133 | } | ||
134 | |||
135 | static int kgdb_singlestep(struct pt_regs *regs) | ||
136 | { | ||
137 | struct thread_info *thread_info, *exception_thread_info; | ||
138 | |||
139 | if (user_mode(regs)) | ||
140 | return 0; | ||
141 | |||
142 | /* | ||
143 | * On Book E and perhaps other processsors, singlestep is handled on | ||
144 | * the critical exception stack. This causes current_thread_info() | ||
145 | * to fail, since it it locates the thread_info by masking off | ||
146 | * the low bits of the current stack pointer. We work around | ||
147 | * this issue by copying the thread_info from the kernel stack | ||
148 | * before calling kgdb_handle_exception, and copying it back | ||
149 | * afterwards. On most processors the copy is avoided since | ||
150 | * exception_thread_info == thread_info. | ||
151 | */ | ||
152 | thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1)); | ||
153 | exception_thread_info = current_thread_info(); | ||
154 | |||
155 | if (thread_info != exception_thread_info) | ||
156 | memcpy(exception_thread_info, thread_info, sizeof *thread_info); | ||
157 | |||
158 | kgdb_handle_exception(0, SIGTRAP, 0, regs); | ||
159 | |||
160 | if (thread_info != exception_thread_info) | ||
161 | memcpy(thread_info, exception_thread_info, sizeof *thread_info); | ||
162 | |||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | static int kgdb_iabr_match(struct pt_regs *regs) | ||
167 | { | ||
168 | if (user_mode(regs)) | ||
169 | return 0; | ||
170 | |||
171 | if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0) | ||
172 | return 0; | ||
173 | return 1; | ||
174 | } | ||
175 | |||
176 | static int kgdb_dabr_match(struct pt_regs *regs) | ||
177 | { | ||
178 | if (user_mode(regs)) | ||
179 | return 0; | ||
180 | |||
181 | if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0) | ||
182 | return 0; | ||
183 | return 1; | ||
184 | } | ||
185 | |||
186 | #define PACK64(ptr, src) do { *(ptr++) = (src); } while (0) | ||
187 | |||
188 | #define PACK32(ptr, src) do { \ | ||
189 | u32 *ptr32; \ | ||
190 | ptr32 = (u32 *)ptr; \ | ||
191 | *(ptr32++) = (src); \ | ||
192 | ptr = (unsigned long *)ptr32; \ | ||
193 | } while (0) | ||
194 | |||
195 | |||
196 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
197 | { | ||
198 | unsigned long *ptr = gdb_regs; | ||
199 | int reg; | ||
200 | |||
201 | memset(gdb_regs, 0, NUMREGBYTES); | ||
202 | |||
203 | for (reg = 0; reg < 32; reg++) | ||
204 | PACK64(ptr, regs->gpr[reg]); | ||
205 | |||
206 | #ifdef CONFIG_FSL_BOOKE | ||
207 | #ifdef CONFIG_SPE | ||
208 | for (reg = 0; reg < 32; reg++) | ||
209 | PACK64(ptr, current->thread.evr[reg]); | ||
210 | #else | ||
211 | ptr += 32; | ||
212 | #endif | ||
213 | #else | ||
214 | /* fp registers not used by kernel, leave zero */ | ||
215 | ptr += 32 * 8 / sizeof(long); | ||
216 | #endif | ||
217 | |||
218 | PACK64(ptr, regs->nip); | ||
219 | PACK64(ptr, regs->msr); | ||
220 | PACK32(ptr, regs->ccr); | ||
221 | PACK64(ptr, regs->link); | ||
222 | PACK64(ptr, regs->ctr); | ||
223 | PACK32(ptr, regs->xer); | ||
224 | |||
225 | BUG_ON((unsigned long)ptr > | ||
226 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
227 | } | ||
228 | |||
229 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | ||
230 | { | ||
231 | struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp + | ||
232 | STACK_FRAME_OVERHEAD); | ||
233 | unsigned long *ptr = gdb_regs; | ||
234 | int reg; | ||
235 | |||
236 | memset(gdb_regs, 0, NUMREGBYTES); | ||
237 | |||
238 | /* Regs GPR0-2 */ | ||
239 | for (reg = 0; reg < 3; reg++) | ||
240 | PACK64(ptr, regs->gpr[reg]); | ||
241 | |||
242 | /* Regs GPR3-13 are caller saved, not in regs->gpr[] */ | ||
243 | ptr += 11; | ||
244 | |||
245 | /* Regs GPR14-31 */ | ||
246 | for (reg = 14; reg < 32; reg++) | ||
247 | PACK64(ptr, regs->gpr[reg]); | ||
248 | |||
249 | #ifdef CONFIG_FSL_BOOKE | ||
250 | #ifdef CONFIG_SPE | ||
251 | for (reg = 0; reg < 32; reg++) | ||
252 | PACK64(ptr, p->thread.evr[reg]); | ||
253 | #else | ||
254 | ptr += 32; | ||
255 | #endif | ||
256 | #else | ||
257 | /* fp registers not used by kernel, leave zero */ | ||
258 | ptr += 32 * 8 / sizeof(long); | ||
259 | #endif | ||
260 | |||
261 | PACK64(ptr, regs->nip); | ||
262 | PACK64(ptr, regs->msr); | ||
263 | PACK32(ptr, regs->ccr); | ||
264 | PACK64(ptr, regs->link); | ||
265 | PACK64(ptr, regs->ctr); | ||
266 | PACK32(ptr, regs->xer); | ||
267 | |||
268 | BUG_ON((unsigned long)ptr > | ||
269 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
270 | } | ||
271 | |||
272 | #define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0) | ||
273 | |||
274 | #define UNPACK32(dest, ptr) do { \ | ||
275 | u32 *ptr32; \ | ||
276 | ptr32 = (u32 *)ptr; \ | ||
277 | dest = *(ptr32++); \ | ||
278 | ptr = (unsigned long *)ptr32; \ | ||
279 | } while (0) | ||
280 | |||
281 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
282 | { | ||
283 | unsigned long *ptr = gdb_regs; | ||
284 | int reg; | ||
285 | #ifdef CONFIG_SPE | ||
286 | union { | ||
287 | u32 v32[2]; | ||
288 | u64 v64; | ||
289 | } acc; | ||
290 | #endif | ||
291 | |||
292 | for (reg = 0; reg < 32; reg++) | ||
293 | UNPACK64(regs->gpr[reg], ptr); | ||
294 | |||
295 | #ifdef CONFIG_FSL_BOOKE | ||
296 | #ifdef CONFIG_SPE | ||
297 | for (reg = 0; reg < 32; reg++) | ||
298 | UNPACK64(current->thread.evr[reg], ptr); | ||
299 | #else | ||
300 | ptr += 32; | ||
301 | #endif | ||
302 | #else | ||
303 | /* fp registers not used by kernel, leave zero */ | ||
304 | ptr += 32 * 8 / sizeof(int); | ||
305 | #endif | ||
306 | |||
307 | UNPACK64(regs->nip, ptr); | ||
308 | UNPACK64(regs->msr, ptr); | ||
309 | UNPACK32(regs->ccr, ptr); | ||
310 | UNPACK64(regs->link, ptr); | ||
311 | UNPACK64(regs->ctr, ptr); | ||
312 | UNPACK32(regs->xer, ptr); | ||
313 | |||
314 | BUG_ON((unsigned long)ptr > | ||
315 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * This function does PowerPC specific procesing for interfacing to gdb. | ||
320 | */ | ||
321 | int kgdb_arch_handle_exception(int vector, int signo, int err_code, | ||
322 | char *remcom_in_buffer, char *remcom_out_buffer, | ||
323 | struct pt_regs *linux_regs) | ||
324 | { | ||
325 | char *ptr = &remcom_in_buffer[1]; | ||
326 | unsigned long addr; | ||
327 | |||
328 | switch (remcom_in_buffer[0]) { | ||
329 | /* | ||
330 | * sAA..AA Step one instruction from AA..AA | ||
331 | * This will return an error to gdb .. | ||
332 | */ | ||
333 | case 's': | ||
334 | case 'c': | ||
335 | /* handle the optional parameter */ | ||
336 | if (kgdb_hex2long(&ptr, &addr)) | ||
337 | linux_regs->nip = addr; | ||
338 | |||
339 | atomic_set(&kgdb_cpu_doing_single_step, -1); | ||
340 | /* set the trace bit if we're stepping */ | ||
341 | if (remcom_in_buffer[0] == 's') { | ||
342 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
343 | mtspr(SPRN_DBCR0, | ||
344 | mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); | ||
345 | linux_regs->msr |= MSR_DE; | ||
346 | #else | ||
347 | linux_regs->msr |= MSR_SE; | ||
348 | #endif | ||
349 | kgdb_single_step = 1; | ||
350 | if (kgdb_contthread) | ||
351 | atomic_set(&kgdb_cpu_doing_single_step, | ||
352 | raw_smp_processor_id()); | ||
353 | } | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | return -1; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * Global data | ||
362 | */ | ||
363 | struct kgdb_arch arch_kgdb_ops = { | ||
364 | .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08}, | ||
365 | }; | ||
366 | |||
367 | static int kgdb_not_implemented(struct pt_regs *regs) | ||
368 | { | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static void *old__debugger_ipi; | ||
373 | static void *old__debugger; | ||
374 | static void *old__debugger_bpt; | ||
375 | static void *old__debugger_sstep; | ||
376 | static void *old__debugger_iabr_match; | ||
377 | static void *old__debugger_dabr_match; | ||
378 | static void *old__debugger_fault_handler; | ||
379 | |||
380 | int kgdb_arch_init(void) | ||
381 | { | ||
382 | old__debugger_ipi = __debugger_ipi; | ||
383 | old__debugger = __debugger; | ||
384 | old__debugger_bpt = __debugger_bpt; | ||
385 | old__debugger_sstep = __debugger_sstep; | ||
386 | old__debugger_iabr_match = __debugger_iabr_match; | ||
387 | old__debugger_dabr_match = __debugger_dabr_match; | ||
388 | old__debugger_fault_handler = __debugger_fault_handler; | ||
389 | |||
390 | __debugger_ipi = kgdb_call_nmi_hook; | ||
391 | __debugger = kgdb_debugger; | ||
392 | __debugger_bpt = kgdb_handle_breakpoint; | ||
393 | __debugger_sstep = kgdb_singlestep; | ||
394 | __debugger_iabr_match = kgdb_iabr_match; | ||
395 | __debugger_dabr_match = kgdb_dabr_match; | ||
396 | __debugger_fault_handler = kgdb_not_implemented; | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | void kgdb_arch_exit(void) | ||
402 | { | ||
403 | __debugger_ipi = old__debugger_ipi; | ||
404 | __debugger = old__debugger; | ||
405 | __debugger_bpt = old__debugger_bpt; | ||
406 | __debugger_sstep = old__debugger_sstep; | ||
407 | __debugger_iabr_match = old__debugger_iabr_match; | ||
408 | __debugger_dabr_match = old__debugger_dabr_match; | ||
409 | __debugger_fault_handler = old__debugger_fault_handler; | ||
410 | } | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 4efebe88e64a..066e65c59b58 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -43,10 +43,6 @@ | |||
43 | 43 | ||
44 | #define DBG(fmt...) | 44 | #define DBG(fmt...) |
45 | 45 | ||
46 | #if defined CONFIG_KGDB | ||
47 | #include <asm/kgdb.h> | ||
48 | #endif | ||
49 | |||
50 | extern void bootx_init(unsigned long r4, unsigned long phys); | 46 | extern void bootx_init(unsigned long r4, unsigned long phys); |
51 | 47 | ||
52 | int boot_cpuid; | 48 | int boot_cpuid; |
@@ -302,18 +298,6 @@ void __init setup_arch(char **cmdline_p) | |||
302 | 298 | ||
303 | xmon_setup(); | 299 | xmon_setup(); |
304 | 300 | ||
305 | #if defined(CONFIG_KGDB) | ||
306 | if (ppc_md.kgdb_map_scc) | ||
307 | ppc_md.kgdb_map_scc(); | ||
308 | set_debug_traps(); | ||
309 | if (strstr(cmd_line, "gdb")) { | ||
310 | if (ppc_md.progress) | ||
311 | ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000); | ||
312 | printk("kgdb breakpoint activated\n"); | ||
313 | breakpoint(); | ||
314 | } | ||
315 | #endif | ||
316 | |||
317 | /* | 301 | /* |
318 | * Set cache line size based on type of cpu as a default. | 302 | * Set cache line size based on type of cpu as a default. |
319 | * Systems with OF can look in the properties on the cpu node(s) | 303 | * Systems with OF can look in the properties on the cpu node(s) |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 00bd0166d07f..31635446901a 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -97,8 +97,6 @@ extern struct machdep_calls pmac_md; | |||
97 | int sccdbg; | 97 | int sccdbg; |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | extern void zs_kgdb_hook(int tty_num); | ||
101 | |||
102 | sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; | 100 | sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; |
103 | EXPORT_SYMBOL(sys_ctrler); | 101 | EXPORT_SYMBOL(sys_ctrler); |
104 | 102 | ||
@@ -329,10 +327,6 @@ static void __init pmac_setup_arch(void) | |||
329 | l2cr_init(); | 327 | l2cr_init(); |
330 | #endif /* CONFIG_PPC32 */ | 328 | #endif /* CONFIG_PPC32 */ |
331 | 329 | ||
332 | #ifdef CONFIG_KGDB | ||
333 | zs_kgdb_hook(0); | ||
334 | #endif | ||
335 | |||
336 | find_via_cuda(); | 330 | find_via_cuda(); |
337 | find_via_pmu(); | 331 | find_via_pmu(); |
338 | smu_init(); | 332 | smu_init(); |
diff --git a/arch/sh/boards/renesas/migor/setup.c b/arch/sh/boards/renesas/migor/setup.c index 01af44245b57..963c99322095 100644 --- a/arch/sh/boards/renesas/migor/setup.c +++ b/arch/sh/boards/renesas/migor/setup.c | |||
@@ -30,7 +30,6 @@ | |||
30 | 30 | ||
31 | static struct smc91x_platdata smc91x_info = { | 31 | static struct smc91x_platdata smc91x_info = { |
32 | .flags = SMC91X_USE_16BIT, | 32 | .flags = SMC91X_USE_16BIT, |
33 | .irq_flags = IRQF_TRIGGER_HIGH, | ||
34 | }; | 33 | }; |
35 | 34 | ||
36 | static struct resource smc91x_eth_resources[] = { | 35 | static struct resource smc91x_eth_resources[] = { |
@@ -42,7 +41,7 @@ static struct resource smc91x_eth_resources[] = { | |||
42 | }, | 41 | }, |
43 | [1] = { | 42 | [1] = { |
44 | .start = 32, /* IRQ0 */ | 43 | .start = 32, /* IRQ0 */ |
45 | .flags = IORESOURCE_IRQ, | 44 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, |
46 | }, | 45 | }, |
47 | }; | 46 | }; |
48 | 47 | ||
diff --git a/arch/um/include/init.h b/arch/um/include/init.h index b00a95741d41..37dd097c16c0 100644 --- a/arch/um/include/init.h +++ b/arch/um/include/init.h | |||
@@ -45,6 +45,8 @@ typedef void (*exitcall_t)(void); | |||
45 | # define __section(S) __attribute__ ((__section__(#S))) | 45 | # define __section(S) __attribute__ ((__section__(#S))) |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #if __GNUC__ == 3 | ||
49 | |||
48 | #if __GNUC_MINOR__ >= 3 | 50 | #if __GNUC_MINOR__ >= 3 |
49 | # define __used __attribute__((__used__)) | 51 | # define __used __attribute__((__used__)) |
50 | #else | 52 | #else |
@@ -52,6 +54,12 @@ typedef void (*exitcall_t)(void); | |||
52 | #endif | 54 | #endif |
53 | 55 | ||
54 | #else | 56 | #else |
57 | #if __GNUC__ == 4 | ||
58 | # define __used __attribute__((__used__)) | ||
59 | #endif | ||
60 | #endif | ||
61 | |||
62 | #else | ||
55 | #include <linux/compiler.h> | 63 | #include <linux/compiler.h> |
56 | #endif | 64 | #endif |
57 | /* These are for everybody (although not all archs will actually | 65 | /* These are for everybody (although not all archs will actually |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 23d146ce676b..021d71bc69b5 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -15,6 +15,16 @@ | |||
15 | #include <asm/irqflags.h> | 15 | #include <asm/irqflags.h> |
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | 17 | ||
18 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | ||
19 | #include <linux/elf-em.h> | ||
20 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | ||
21 | #define __AUDIT_ARCH_LE 0x40000000 | ||
22 | |||
23 | #ifndef CONFIG_AUDITSYSCALL | ||
24 | #define sysexit_audit int_ret_from_sys_call | ||
25 | #define sysretl_audit int_ret_from_sys_call | ||
26 | #endif | ||
27 | |||
18 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) | 28 | #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) |
19 | 29 | ||
20 | .macro IA32_ARG_FIXUP noebp=0 | 30 | .macro IA32_ARG_FIXUP noebp=0 |
@@ -148,13 +158,15 @@ ENTRY(ia32_sysenter_target) | |||
148 | ja ia32_badsys | 158 | ja ia32_badsys |
149 | sysenter_do_call: | 159 | sysenter_do_call: |
150 | IA32_ARG_FIXUP 1 | 160 | IA32_ARG_FIXUP 1 |
161 | sysenter_dispatch: | ||
151 | call *ia32_sys_call_table(,%rax,8) | 162 | call *ia32_sys_call_table(,%rax,8) |
152 | movq %rax,RAX-ARGOFFSET(%rsp) | 163 | movq %rax,RAX-ARGOFFSET(%rsp) |
153 | GET_THREAD_INFO(%r10) | 164 | GET_THREAD_INFO(%r10) |
154 | DISABLE_INTERRUPTS(CLBR_NONE) | 165 | DISABLE_INTERRUPTS(CLBR_NONE) |
155 | TRACE_IRQS_OFF | 166 | TRACE_IRQS_OFF |
156 | testl $_TIF_ALLWORK_MASK,TI_flags(%r10) | 167 | testl $_TIF_ALLWORK_MASK,TI_flags(%r10) |
157 | jnz int_ret_from_sys_call | 168 | jnz sysexit_audit |
169 | sysexit_from_sys_call: | ||
158 | andl $~TS_COMPAT,TI_status(%r10) | 170 | andl $~TS_COMPAT,TI_status(%r10) |
159 | /* clear IF, that popfq doesn't enable interrupts early */ | 171 | /* clear IF, that popfq doesn't enable interrupts early */ |
160 | andl $~0x200,EFLAGS-R11(%rsp) | 172 | andl $~0x200,EFLAGS-R11(%rsp) |
@@ -170,9 +182,63 @@ sysenter_do_call: | |||
170 | TRACE_IRQS_ON | 182 | TRACE_IRQS_ON |
171 | ENABLE_INTERRUPTS_SYSEXIT32 | 183 | ENABLE_INTERRUPTS_SYSEXIT32 |
172 | 184 | ||
173 | sysenter_tracesys: | 185 | #ifdef CONFIG_AUDITSYSCALL |
186 | .macro auditsys_entry_common | ||
187 | movl %esi,%r9d /* 6th arg: 4th syscall arg */ | ||
188 | movl %edx,%r8d /* 5th arg: 3rd syscall arg */ | ||
189 | /* (already in %ecx) 4th arg: 2nd syscall arg */ | ||
190 | movl %ebx,%edx /* 3rd arg: 1st syscall arg */ | ||
191 | movl %eax,%esi /* 2nd arg: syscall number */ | ||
192 | movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ | ||
193 | call audit_syscall_entry | ||
194 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ | ||
195 | cmpl $(IA32_NR_syscalls-1),%eax | ||
196 | ja ia32_badsys | ||
197 | movl %ebx,%edi /* reload 1st syscall arg */ | ||
198 | movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ | ||
199 | movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */ | ||
200 | movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */ | ||
201 | movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */ | ||
202 | .endm | ||
203 | |||
204 | .macro auditsys_exit exit | ||
205 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) | ||
206 | jnz int_ret_from_sys_call | ||
207 | TRACE_IRQS_ON | ||
208 | sti | ||
209 | movl %eax,%esi /* second arg, syscall return value */ | ||
210 | cmpl $0,%eax /* is it < 0? */ | ||
211 | setl %al /* 1 if so, 0 if not */ | ||
212 | movzbl %al,%edi /* zero-extend that into %edi */ | ||
213 | inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | ||
214 | call audit_syscall_exit | ||
215 | GET_THREAD_INFO(%r10) | ||
216 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ | ||
217 | movl RBP-ARGOFFSET(%rsp),%ebp /* reload user register value */ | ||
218 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | ||
219 | cli | ||
220 | TRACE_IRQS_OFF | ||
221 | testl %edi,TI_flags(%r10) | ||
222 | jnz int_with_check | ||
223 | jmp \exit | ||
224 | .endm | ||
225 | |||
226 | sysenter_auditsys: | ||
174 | CFI_RESTORE_STATE | 227 | CFI_RESTORE_STATE |
228 | auditsys_entry_common | ||
229 | movl %ebp,%r9d /* reload 6th syscall arg */ | ||
230 | jmp sysenter_dispatch | ||
231 | |||
232 | sysexit_audit: | ||
233 | auditsys_exit sysexit_from_sys_call | ||
234 | #endif | ||
235 | |||
236 | sysenter_tracesys: | ||
175 | xchgl %r9d,%ebp | 237 | xchgl %r9d,%ebp |
238 | #ifdef CONFIG_AUDITSYSCALL | ||
239 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) | ||
240 | jz sysenter_auditsys | ||
241 | #endif | ||
176 | SAVE_REST | 242 | SAVE_REST |
177 | CLEAR_RREGS | 243 | CLEAR_RREGS |
178 | movq %r9,R9(%rsp) | 244 | movq %r9,R9(%rsp) |
@@ -252,13 +318,15 @@ cstar_do_call: | |||
252 | cmpl $IA32_NR_syscalls-1,%eax | 318 | cmpl $IA32_NR_syscalls-1,%eax |
253 | ja ia32_badsys | 319 | ja ia32_badsys |
254 | IA32_ARG_FIXUP 1 | 320 | IA32_ARG_FIXUP 1 |
321 | cstar_dispatch: | ||
255 | call *ia32_sys_call_table(,%rax,8) | 322 | call *ia32_sys_call_table(,%rax,8) |
256 | movq %rax,RAX-ARGOFFSET(%rsp) | 323 | movq %rax,RAX-ARGOFFSET(%rsp) |
257 | GET_THREAD_INFO(%r10) | 324 | GET_THREAD_INFO(%r10) |
258 | DISABLE_INTERRUPTS(CLBR_NONE) | 325 | DISABLE_INTERRUPTS(CLBR_NONE) |
259 | TRACE_IRQS_OFF | 326 | TRACE_IRQS_OFF |
260 | testl $_TIF_ALLWORK_MASK,TI_flags(%r10) | 327 | testl $_TIF_ALLWORK_MASK,TI_flags(%r10) |
261 | jnz int_ret_from_sys_call | 328 | jnz sysretl_audit |
329 | sysretl_from_sys_call: | ||
262 | andl $~TS_COMPAT,TI_status(%r10) | 330 | andl $~TS_COMPAT,TI_status(%r10) |
263 | RESTORE_ARGS 1,-ARG_SKIP,1,1,1 | 331 | RESTORE_ARGS 1,-ARG_SKIP,1,1,1 |
264 | movl RIP-ARGOFFSET(%rsp),%ecx | 332 | movl RIP-ARGOFFSET(%rsp),%ecx |
@@ -270,8 +338,23 @@ cstar_do_call: | |||
270 | CFI_RESTORE rsp | 338 | CFI_RESTORE rsp |
271 | USERGS_SYSRET32 | 339 | USERGS_SYSRET32 |
272 | 340 | ||
273 | cstar_tracesys: | 341 | #ifdef CONFIG_AUDITSYSCALL |
342 | cstar_auditsys: | ||
274 | CFI_RESTORE_STATE | 343 | CFI_RESTORE_STATE |
344 | movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */ | ||
345 | auditsys_entry_common | ||
346 | movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */ | ||
347 | jmp cstar_dispatch | ||
348 | |||
349 | sysretl_audit: | ||
350 | auditsys_exit sysretl_from_sys_call | ||
351 | #endif | ||
352 | |||
353 | cstar_tracesys: | ||
354 | #ifdef CONFIG_AUDITSYSCALL | ||
355 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) | ||
356 | jz cstar_auditsys | ||
357 | #endif | ||
275 | xchgl %r9d,%ebp | 358 | xchgl %r9d,%ebp |
276 | SAVE_REST | 359 | SAVE_REST |
277 | CLEAR_RREGS | 360 | CLEAR_RREGS |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa83..9220cf46aa10 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
73 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 73 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
74 | 74 | ||
75 | cpumask_t saved_mask; | 75 | cpumask_t saved_mask; |
76 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
76 | int retval; | 77 | int retval; |
77 | unsigned int eax, ebx, ecx, edx; | 78 | unsigned int eax, ebx, ecx, edx; |
78 | unsigned int edx_part; | 79 | unsigned int edx_part; |
@@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
91 | 92 | ||
92 | /* Make sure we are running on right CPU */ | 93 | /* Make sure we are running on right CPU */ |
93 | saved_mask = current->cpus_allowed; | 94 | saved_mask = current->cpus_allowed; |
94 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 95 | retval = set_cpus_allowed_ptr(current, new_mask); |
95 | if (retval) | 96 | if (retval) |
96 | return -1; | 97 | return -1; |
97 | 98 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index b0c8208df9fa..ff2fff56f0a8 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd) | |||
200 | static void drv_write(struct drv_cmd *cmd) | 200 | static void drv_write(struct drv_cmd *cmd) |
201 | { | 201 | { |
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | cpumask_of_cpu_ptr_declare(cpu_mask); | ||
203 | unsigned int i; | 204 | unsigned int i; |
204 | 205 | ||
205 | for_each_cpu_mask(i, cmd->mask) { | 206 | for_each_cpu_mask_nr(i, cmd->mask) { |
206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 207 | cpumask_of_cpu_ptr_next(cpu_mask, i); |
208 | set_cpus_allowed_ptr(current, cpu_mask); | ||
207 | do_drv_write(cmd); | 209 | do_drv_write(cmd); |
208 | } | 210 | } |
209 | 211 | ||
@@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
267 | } aperf_cur, mperf_cur; | 269 | } aperf_cur, mperf_cur; |
268 | 270 | ||
269 | cpumask_t saved_mask; | 271 | cpumask_t saved_mask; |
272 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
270 | unsigned int perf_percent; | 273 | unsigned int perf_percent; |
271 | unsigned int retval; | 274 | unsigned int retval; |
272 | 275 | ||
273 | saved_mask = current->cpus_allowed; | 276 | saved_mask = current->cpus_allowed; |
274 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 277 | set_cpus_allowed_ptr(current, cpu_mask); |
275 | if (get_cpu() != cpu) { | 278 | if (get_cpu() != cpu) { |
276 | /* We were not able to run on requested processor */ | 279 | /* We were not able to run on requested processor */ |
277 | put_cpu(); | 280 | put_cpu(); |
@@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
337 | 340 | ||
338 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 341 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
339 | { | 342 | { |
343 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
340 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 344 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); |
341 | unsigned int freq; | 345 | unsigned int freq; |
342 | unsigned int cached_freq; | 346 | unsigned int cached_freq; |
@@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
349 | } | 353 | } |
350 | 354 | ||
351 | cached_freq = data->freq_table[data->acpi_data->state].frequency; | 355 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
352 | freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); | 356 | freq = extract_freq(get_cur_val(cpu_mask), data); |
353 | if (freq != cached_freq) { | 357 | if (freq != cached_freq) { |
354 | /* | 358 | /* |
355 | * The dreaded BIOS frequency change behind our back. | 359 | * The dreaded BIOS frequency change behind our back. |
@@ -451,7 +455,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
451 | 455 | ||
452 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 456 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
453 | freqs.new = data->freq_table[next_state].frequency; | 457 | freqs.new = data->freq_table[next_state].frequency; |
454 | for_each_cpu_mask(i, cmd.mask) { | 458 | for_each_cpu_mask_nr(i, cmd.mask) { |
455 | freqs.cpu = i; | 459 | freqs.cpu = i; |
456 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 460 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
457 | } | 461 | } |
@@ -466,7 +470,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
466 | } | 470 | } |
467 | } | 471 | } |
468 | 472 | ||
469 | for_each_cpu_mask(i, cmd.mask) { | 473 | for_each_cpu_mask_nr(i, cmd.mask) { |
470 | freqs.cpu = i; | 474 | freqs.cpu = i; |
471 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 475 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
472 | } | 476 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 199e4e05e5dc..f1685fb91fbd 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* notifiers */ | 124 | /* notifiers */ |
125 | for_each_cpu_mask(i, policy->cpus) { | 125 | for_each_cpu_mask_nr(i, policy->cpus) { |
126 | freqs.cpu = i; | 126 | freqs.cpu = i; |
127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 127 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
128 | } | 128 | } |
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software | 130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software |
131 | * Developer's Manual, Volume 3 | 131 | * Developer's Manual, Volume 3 |
132 | */ | 132 | */ |
133 | for_each_cpu_mask(i, policy->cpus) | 133 | for_each_cpu_mask_nr(i, policy->cpus) |
134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); | 134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); |
135 | 135 | ||
136 | /* notifiers */ | 136 | /* notifiers */ |
137 | for_each_cpu_mask(i, policy->cpus) { | 137 | for_each_cpu_mask_nr(i, policy->cpus) { |
138 | freqs.cpu = i; | 138 | freqs.cpu = i; |
139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
140 | } | 140 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 206791eb46e3..53c7b6936973 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi | |||
479 | static int check_supported_cpu(unsigned int cpu) | 479 | static int check_supported_cpu(unsigned int cpu) |
480 | { | 480 | { |
481 | cpumask_t oldmask; | 481 | cpumask_t oldmask; |
482 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
482 | u32 eax, ebx, ecx, edx; | 483 | u32 eax, ebx, ecx, edx; |
483 | unsigned int rc = 0; | 484 | unsigned int rc = 0; |
484 | 485 | ||
485 | oldmask = current->cpus_allowed; | 486 | oldmask = current->cpus_allowed; |
486 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 487 | set_cpus_allowed_ptr(current, cpu_mask); |
487 | 488 | ||
488 | if (smp_processor_id() != cpu) { | 489 | if (smp_processor_id() != cpu) { |
489 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | 490 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
@@ -966,7 +967,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
966 | freqs.old = find_khz_freq_from_fid(data->currfid); | 967 | freqs.old = find_khz_freq_from_fid(data->currfid); |
967 | freqs.new = find_khz_freq_from_fid(fid); | 968 | freqs.new = find_khz_freq_from_fid(fid); |
968 | 969 | ||
969 | for_each_cpu_mask(i, *(data->available_cores)) { | 970 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
970 | freqs.cpu = i; | 971 | freqs.cpu = i; |
971 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 972 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
972 | } | 973 | } |
@@ -974,7 +975,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i | |||
974 | res = transition_fid_vid(data, fid, vid); | 975 | res = transition_fid_vid(data, fid, vid); |
975 | freqs.new = find_khz_freq_from_fid(data->currfid); | 976 | freqs.new = find_khz_freq_from_fid(data->currfid); |
976 | 977 | ||
977 | for_each_cpu_mask(i, *(data->available_cores)) { | 978 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
978 | freqs.cpu = i; | 979 | freqs.cpu = i; |
979 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 980 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
980 | } | 981 | } |
@@ -997,7 +998,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
997 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); | 998 | freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); |
998 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 999 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
999 | 1000 | ||
1000 | for_each_cpu_mask(i, *(data->available_cores)) { | 1001 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1001 | freqs.cpu = i; | 1002 | freqs.cpu = i; |
1002 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 1003 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
1003 | } | 1004 | } |
@@ -1005,7 +1006,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1005 | res = transition_pstate(data, pstate); | 1006 | res = transition_pstate(data, pstate); |
1006 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 1007 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1007 | 1008 | ||
1008 | for_each_cpu_mask(i, *(data->available_cores)) { | 1009 | for_each_cpu_mask_nr(i, *(data->available_cores)) { |
1009 | freqs.cpu = i; | 1010 | freqs.cpu = i; |
1010 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 1011 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
1011 | } | 1012 | } |
@@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1016 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | 1017 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
1017 | { | 1018 | { |
1018 | cpumask_t oldmask; | 1019 | cpumask_t oldmask; |
1020 | cpumask_of_cpu_ptr(cpu_mask, pol->cpu); | ||
1019 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1021 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1020 | u32 checkfid; | 1022 | u32 checkfid; |
1021 | u32 checkvid; | 1023 | u32 checkvid; |
@@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1030 | 1032 | ||
1031 | /* only run on specific CPU from here on */ | 1033 | /* only run on specific CPU from here on */ |
1032 | oldmask = current->cpus_allowed; | 1034 | oldmask = current->cpus_allowed; |
1033 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1035 | set_cpus_allowed_ptr(current, cpu_mask); |
1034 | 1036 | ||
1035 | if (smp_processor_id() != pol->cpu) { | 1037 | if (smp_processor_id() != pol->cpu) { |
1036 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1038 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1105 | { | 1107 | { |
1106 | struct powernow_k8_data *data; | 1108 | struct powernow_k8_data *data; |
1107 | cpumask_t oldmask; | 1109 | cpumask_t oldmask; |
1110 | cpumask_of_cpu_ptr_declare(newmask); | ||
1108 | int rc; | 1111 | int rc; |
1109 | 1112 | ||
1110 | if (!cpu_online(pol->cpu)) | 1113 | if (!cpu_online(pol->cpu)) |
@@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1156 | 1159 | ||
1157 | /* only run on specific CPU from here on */ | 1160 | /* only run on specific CPU from here on */ |
1158 | oldmask = current->cpus_allowed; | 1161 | oldmask = current->cpus_allowed; |
1159 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1162 | cpumask_of_cpu_ptr_next(newmask, pol->cpu); |
1163 | set_cpus_allowed_ptr(current, newmask); | ||
1160 | 1164 | ||
1161 | if (smp_processor_id() != pol->cpu) { | 1165 | if (smp_processor_id() != pol->cpu) { |
1162 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1166 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1178 | set_cpus_allowed_ptr(current, &oldmask); | 1182 | set_cpus_allowed_ptr(current, &oldmask); |
1179 | 1183 | ||
1180 | if (cpu_family == CPU_HW_PSTATE) | 1184 | if (cpu_family == CPU_HW_PSTATE) |
1181 | pol->cpus = cpumask_of_cpu(pol->cpu); | 1185 | pol->cpus = *newmask; |
1182 | else | 1186 | else |
1183 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | 1187 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); |
1184 | data->available_cores = &(pol->cpus); | 1188 | data->available_cores = &(pol->cpus); |
@@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1244 | { | 1248 | { |
1245 | struct powernow_k8_data *data; | 1249 | struct powernow_k8_data *data; |
1246 | cpumask_t oldmask = current->cpus_allowed; | 1250 | cpumask_t oldmask = current->cpus_allowed; |
1251 | cpumask_of_cpu_ptr(newmask, cpu); | ||
1247 | unsigned int khz = 0; | 1252 | unsigned int khz = 0; |
1248 | unsigned int first; | 1253 | unsigned int first; |
1249 | 1254 | ||
@@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1253 | if (!data) | 1258 | if (!data) |
1254 | return -EINVAL; | 1259 | return -EINVAL; |
1255 | 1260 | ||
1256 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 1261 | set_cpus_allowed_ptr(current, newmask); |
1257 | if (smp_processor_id() != cpu) { | 1262 | if (smp_processor_id() != cpu) { |
1258 | printk(KERN_ERR PFX | 1263 | printk(KERN_ERR PFX |
1259 | "limiting to CPU %d failed in powernowk8_get\n", cpu); | 1264 | "limiting to CPU %d failed in powernowk8_get\n", cpu); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 908dd347c67e..ca2ac13b7af2 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -28,7 +28,8 @@ | |||
28 | #define PFX "speedstep-centrino: " | 28 | #define PFX "speedstep-centrino: " |
29 | #define MAINTAINER "cpufreq@lists.linux.org.uk" | 29 | #define MAINTAINER "cpufreq@lists.linux.org.uk" |
30 | 30 | ||
31 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | 31 | #define dprintk(msg...) \ |
32 | cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) | ||
32 | 33 | ||
33 | #define INTEL_MSR_RANGE (0xffff) | 34 | #define INTEL_MSR_RANGE (0xffff) |
34 | 35 | ||
@@ -66,11 +67,12 @@ struct cpu_model | |||
66 | 67 | ||
67 | struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ | 68 | struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ |
68 | }; | 69 | }; |
69 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x); | 70 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, |
71 | const struct cpu_id *x); | ||
70 | 72 | ||
71 | /* Operating points for current CPU */ | 73 | /* Operating points for current CPU */ |
72 | static struct cpu_model *centrino_model[NR_CPUS]; | 74 | static DEFINE_PER_CPU(struct cpu_model *, centrino_model); |
73 | static const struct cpu_id *centrino_cpu[NR_CPUS]; | 75 | static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu); |
74 | 76 | ||
75 | static struct cpufreq_driver centrino_driver; | 77 | static struct cpufreq_driver centrino_driver; |
76 | 78 | ||
@@ -255,7 +257,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
255 | return -ENOENT; | 257 | return -ENOENT; |
256 | } | 258 | } |
257 | 259 | ||
258 | centrino_model[policy->cpu] = model; | 260 | per_cpu(centrino_model, policy->cpu) = model; |
259 | 261 | ||
260 | dprintk("found \"%s\": max frequency: %dkHz\n", | 262 | dprintk("found \"%s\": max frequency: %dkHz\n", |
261 | model->model_name, model->max_freq); | 263 | model->model_name, model->max_freq); |
@@ -264,10 +266,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) | |||
264 | } | 266 | } |
265 | 267 | ||
266 | #else | 268 | #else |
267 | static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) { return -ENODEV; } | 269 | static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) |
270 | { | ||
271 | return -ENODEV; | ||
272 | } | ||
268 | #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ | 273 | #endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ |
269 | 274 | ||
270 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, const struct cpu_id *x) | 275 | static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, |
276 | const struct cpu_id *x) | ||
271 | { | 277 | { |
272 | if ((c->x86 == x->x86) && | 278 | if ((c->x86 == x->x86) && |
273 | (c->x86_model == x->x86_model) && | 279 | (c->x86_model == x->x86_model) && |
@@ -286,23 +292,28 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) | |||
286 | * for centrino, as some DSDTs are buggy. | 292 | * for centrino, as some DSDTs are buggy. |
287 | * Ideally, this can be done using the acpi_data structure. | 293 | * Ideally, this can be done using the acpi_data structure. |
288 | */ | 294 | */ |
289 | if ((centrino_cpu[cpu] == &cpu_ids[CPU_BANIAS]) || | 295 | if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || |
290 | (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_A1]) || | 296 | (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || |
291 | (centrino_cpu[cpu] == &cpu_ids[CPU_DOTHAN_B0])) { | 297 | (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { |
292 | msr = (msr >> 8) & 0xff; | 298 | msr = (msr >> 8) & 0xff; |
293 | return msr * 100000; | 299 | return msr * 100000; |
294 | } | 300 | } |
295 | 301 | ||
296 | if ((!centrino_model[cpu]) || (!centrino_model[cpu]->op_points)) | 302 | if ((!per_cpu(centrino_model, cpu)) || |
303 | (!per_cpu(centrino_model, cpu)->op_points)) | ||
297 | return 0; | 304 | return 0; |
298 | 305 | ||
299 | msr &= 0xffff; | 306 | msr &= 0xffff; |
300 | for (i=0;centrino_model[cpu]->op_points[i].frequency != CPUFREQ_TABLE_END; i++) { | 307 | for (i = 0; |
301 | if (msr == centrino_model[cpu]->op_points[i].index) | 308 | per_cpu(centrino_model, cpu)->op_points[i].frequency |
302 | return centrino_model[cpu]->op_points[i].frequency; | 309 | != CPUFREQ_TABLE_END; |
310 | i++) { | ||
311 | if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) | ||
312 | return per_cpu(centrino_model, cpu)-> | ||
313 | op_points[i].frequency; | ||
303 | } | 314 | } |
304 | if (failsafe) | 315 | if (failsafe) |
305 | return centrino_model[cpu]->op_points[i-1].frequency; | 316 | return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; |
306 | else | 317 | else |
307 | return 0; | 318 | return 0; |
308 | } | 319 | } |
@@ -313,9 +324,10 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
313 | unsigned l, h; | 324 | unsigned l, h; |
314 | unsigned clock_freq; | 325 | unsigned clock_freq; |
315 | cpumask_t saved_mask; | 326 | cpumask_t saved_mask; |
327 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
316 | 328 | ||
317 | saved_mask = current->cpus_allowed; | 329 | saved_mask = current->cpus_allowed; |
318 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 330 | set_cpus_allowed_ptr(current, new_mask); |
319 | if (smp_processor_id() != cpu) | 331 | if (smp_processor_id() != cpu) |
320 | return 0; | 332 | return 0; |
321 | 333 | ||
@@ -347,7 +359,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
347 | int i; | 359 | int i; |
348 | 360 | ||
349 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ | 361 | /* Only Intel makes Enhanced Speedstep-capable CPUs */ |
350 | if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) | 362 | if (cpu->x86_vendor != X86_VENDOR_INTEL || |
363 | !cpu_has(cpu, X86_FEATURE_EST)) | ||
351 | return -ENODEV; | 364 | return -ENODEV; |
352 | 365 | ||
353 | if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) | 366 | if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) |
@@ -361,9 +374,9 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
361 | break; | 374 | break; |
362 | 375 | ||
363 | if (i != N_IDS) | 376 | if (i != N_IDS) |
364 | centrino_cpu[policy->cpu] = &cpu_ids[i]; | 377 | per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; |
365 | 378 | ||
366 | if (!centrino_cpu[policy->cpu]) { | 379 | if (!per_cpu(centrino_cpu, policy->cpu)) { |
367 | dprintk("found unsupported CPU with " | 380 | dprintk("found unsupported CPU with " |
368 | "Enhanced SpeedStep: send /proc/cpuinfo to " | 381 | "Enhanced SpeedStep: send /proc/cpuinfo to " |
369 | MAINTAINER "\n"); | 382 | MAINTAINER "\n"); |
@@ -386,23 +399,26 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
386 | /* check to see if it stuck */ | 399 | /* check to see if it stuck */ |
387 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 400 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
388 | if (!(l & (1<<16))) { | 401 | if (!(l & (1<<16))) { |
389 | printk(KERN_INFO PFX "couldn't enable Enhanced SpeedStep\n"); | 402 | printk(KERN_INFO PFX |
403 | "couldn't enable Enhanced SpeedStep\n"); | ||
390 | return -ENODEV; | 404 | return -ENODEV; |
391 | } | 405 | } |
392 | } | 406 | } |
393 | 407 | ||
394 | freq = get_cur_freq(policy->cpu); | 408 | freq = get_cur_freq(policy->cpu); |
395 | 409 | policy->cpuinfo.transition_latency = 10000; | |
396 | policy->cpuinfo.transition_latency = 10000; /* 10uS transition latency */ | 410 | /* 10uS transition latency */ |
397 | policy->cur = freq; | 411 | policy->cur = freq; |
398 | 412 | ||
399 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); | 413 | dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); |
400 | 414 | ||
401 | ret = cpufreq_frequency_table_cpuinfo(policy, centrino_model[policy->cpu]->op_points); | 415 | ret = cpufreq_frequency_table_cpuinfo(policy, |
416 | per_cpu(centrino_model, policy->cpu)->op_points); | ||
402 | if (ret) | 417 | if (ret) |
403 | return (ret); | 418 | return (ret); |
404 | 419 | ||
405 | cpufreq_frequency_table_get_attr(centrino_model[policy->cpu]->op_points, policy->cpu); | 420 | cpufreq_frequency_table_get_attr( |
421 | per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); | ||
406 | 422 | ||
407 | return 0; | 423 | return 0; |
408 | } | 424 | } |
@@ -411,12 +427,12 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) | |||
411 | { | 427 | { |
412 | unsigned int cpu = policy->cpu; | 428 | unsigned int cpu = policy->cpu; |
413 | 429 | ||
414 | if (!centrino_model[cpu]) | 430 | if (!per_cpu(centrino_model, cpu)) |
415 | return -ENODEV; | 431 | return -ENODEV; |
416 | 432 | ||
417 | cpufreq_frequency_table_put_attr(cpu); | 433 | cpufreq_frequency_table_put_attr(cpu); |
418 | 434 | ||
419 | centrino_model[cpu] = NULL; | 435 | per_cpu(centrino_model, cpu) = NULL; |
420 | 436 | ||
421 | return 0; | 437 | return 0; |
422 | } | 438 | } |
@@ -430,17 +446,26 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy) | |||
430 | */ | 446 | */ |
431 | static int centrino_verify (struct cpufreq_policy *policy) | 447 | static int centrino_verify (struct cpufreq_policy *policy) |
432 | { | 448 | { |
433 | return cpufreq_frequency_table_verify(policy, centrino_model[policy->cpu]->op_points); | 449 | return cpufreq_frequency_table_verify(policy, |
450 | per_cpu(centrino_model, policy->cpu)->op_points); | ||
434 | } | 451 | } |
435 | 452 | ||
436 | /** | 453 | /** |
437 | * centrino_setpolicy - set a new CPUFreq policy | 454 | * centrino_setpolicy - set a new CPUFreq policy |
438 | * @policy: new policy | 455 | * @policy: new policy |
439 | * @target_freq: the target frequency | 456 | * @target_freq: the target frequency |
440 | * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | 457 | * @relation: how that frequency relates to achieved frequency |
458 | * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) | ||
441 | * | 459 | * |
442 | * Sets a new CPUFreq policy. | 460 | * Sets a new CPUFreq policy. |
443 | */ | 461 | */ |
462 | struct allmasks { | ||
463 | cpumask_t online_policy_cpus; | ||
464 | cpumask_t saved_mask; | ||
465 | cpumask_t set_mask; | ||
466 | cpumask_t covered_cpus; | ||
467 | }; | ||
468 | |||
444 | static int centrino_target (struct cpufreq_policy *policy, | 469 | static int centrino_target (struct cpufreq_policy *policy, |
445 | unsigned int target_freq, | 470 | unsigned int target_freq, |
446 | unsigned int relation) | 471 | unsigned int relation) |
@@ -448,48 +473,55 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
448 | unsigned int newstate = 0; | 473 | unsigned int newstate = 0; |
449 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; | 474 | unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; |
450 | struct cpufreq_freqs freqs; | 475 | struct cpufreq_freqs freqs; |
451 | cpumask_t online_policy_cpus; | ||
452 | cpumask_t saved_mask; | ||
453 | cpumask_t set_mask; | ||
454 | cpumask_t covered_cpus; | ||
455 | int retval = 0; | 476 | int retval = 0; |
456 | unsigned int j, k, first_cpu, tmp; | 477 | unsigned int j, k, first_cpu, tmp; |
457 | 478 | CPUMASK_ALLOC(allmasks); | |
458 | if (unlikely(centrino_model[cpu] == NULL)) | 479 | CPUMASK_PTR(online_policy_cpus, allmasks); |
459 | return -ENODEV; | 480 | CPUMASK_PTR(saved_mask, allmasks); |
481 | CPUMASK_PTR(set_mask, allmasks); | ||
482 | CPUMASK_PTR(covered_cpus, allmasks); | ||
483 | |||
484 | if (unlikely(allmasks == NULL)) | ||
485 | return -ENOMEM; | ||
486 | |||
487 | if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { | ||
488 | retval = -ENODEV; | ||
489 | goto out; | ||
490 | } | ||
460 | 491 | ||
461 | if (unlikely(cpufreq_frequency_table_target(policy, | 492 | if (unlikely(cpufreq_frequency_table_target(policy, |
462 | centrino_model[cpu]->op_points, | 493 | per_cpu(centrino_model, cpu)->op_points, |
463 | target_freq, | 494 | target_freq, |
464 | relation, | 495 | relation, |
465 | &newstate))) { | 496 | &newstate))) { |
466 | return -EINVAL; | 497 | retval = -EINVAL; |
498 | goto out; | ||
467 | } | 499 | } |
468 | 500 | ||
469 | #ifdef CONFIG_HOTPLUG_CPU | 501 | #ifdef CONFIG_HOTPLUG_CPU |
470 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 502 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
471 | cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); | 503 | cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); |
472 | #else | 504 | #else |
473 | online_policy_cpus = policy->cpus; | 505 | *online_policy_cpus = policy->cpus; |
474 | #endif | 506 | #endif |
475 | 507 | ||
476 | saved_mask = current->cpus_allowed; | 508 | *saved_mask = current->cpus_allowed; |
477 | first_cpu = 1; | 509 | first_cpu = 1; |
478 | cpus_clear(covered_cpus); | 510 | cpus_clear(*covered_cpus); |
479 | for_each_cpu_mask(j, online_policy_cpus) { | 511 | for_each_cpu_mask_nr(j, *online_policy_cpus) { |
480 | /* | 512 | /* |
481 | * Support for SMP systems. | 513 | * Support for SMP systems. |
482 | * Make sure we are running on CPU that wants to change freq | 514 | * Make sure we are running on CPU that wants to change freq |
483 | */ | 515 | */ |
484 | cpus_clear(set_mask); | 516 | cpus_clear(*set_mask); |
485 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 517 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
486 | cpus_or(set_mask, set_mask, online_policy_cpus); | 518 | cpus_or(*set_mask, *set_mask, *online_policy_cpus); |
487 | else | 519 | else |
488 | cpu_set(j, set_mask); | 520 | cpu_set(j, *set_mask); |
489 | 521 | ||
490 | set_cpus_allowed_ptr(current, &set_mask); | 522 | set_cpus_allowed_ptr(current, set_mask); |
491 | preempt_disable(); | 523 | preempt_disable(); |
492 | if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { | 524 | if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { |
493 | dprintk("couldn't limit to CPUs in this domain\n"); | 525 | dprintk("couldn't limit to CPUs in this domain\n"); |
494 | retval = -EAGAIN; | 526 | retval = -EAGAIN; |
495 | if (first_cpu) { | 527 | if (first_cpu) { |
@@ -500,7 +532,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
500 | break; | 532 | break; |
501 | } | 533 | } |
502 | 534 | ||
503 | msr = centrino_model[cpu]->op_points[newstate].index; | 535 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; |
504 | 536 | ||
505 | if (first_cpu) { | 537 | if (first_cpu) { |
506 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 538 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
@@ -517,7 +549,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
517 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", | 549 | dprintk("target=%dkHz old=%d new=%d msr=%04x\n", |
518 | target_freq, freqs.old, freqs.new, msr); | 550 | target_freq, freqs.old, freqs.new, msr); |
519 | 551 | ||
520 | for_each_cpu_mask(k, online_policy_cpus) { | 552 | for_each_cpu_mask_nr(k, *online_policy_cpus) { |
521 | freqs.cpu = k; | 553 | freqs.cpu = k; |
522 | cpufreq_notify_transition(&freqs, | 554 | cpufreq_notify_transition(&freqs, |
523 | CPUFREQ_PRECHANGE); | 555 | CPUFREQ_PRECHANGE); |
@@ -536,11 +568,11 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
536 | break; | 568 | break; |
537 | } | 569 | } |
538 | 570 | ||
539 | cpu_set(j, covered_cpus); | 571 | cpu_set(j, *covered_cpus); |
540 | preempt_enable(); | 572 | preempt_enable(); |
541 | } | 573 | } |
542 | 574 | ||
543 | for_each_cpu_mask(k, online_policy_cpus) { | 575 | for_each_cpu_mask_nr(k, *online_policy_cpus) { |
544 | freqs.cpu = k; | 576 | freqs.cpu = k; |
545 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 577 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
546 | } | 578 | } |
@@ -553,10 +585,12 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
553 | * Best effort undo.. | 585 | * Best effort undo.. |
554 | */ | 586 | */ |
555 | 587 | ||
556 | if (!cpus_empty(covered_cpus)) { | 588 | if (!cpus_empty(*covered_cpus)) { |
557 | for_each_cpu_mask(j, covered_cpus) { | 589 | cpumask_of_cpu_ptr_declare(new_mask); |
558 | set_cpus_allowed_ptr(current, | 590 | |
559 | &cpumask_of_cpu(j)); | 591 | for_each_cpu_mask_nr(j, *covered_cpus) { |
592 | cpumask_of_cpu_ptr_next(new_mask, j); | ||
593 | set_cpus_allowed_ptr(current, new_mask); | ||
560 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 594 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
561 | } | 595 | } |
562 | } | 596 | } |
@@ -564,19 +598,22 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
564 | tmp = freqs.new; | 598 | tmp = freqs.new; |
565 | freqs.new = freqs.old; | 599 | freqs.new = freqs.old; |
566 | freqs.old = tmp; | 600 | freqs.old = tmp; |
567 | for_each_cpu_mask(j, online_policy_cpus) { | 601 | for_each_cpu_mask_nr(j, *online_policy_cpus) { |
568 | freqs.cpu = j; | 602 | freqs.cpu = j; |
569 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 603 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
570 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 604 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
571 | } | 605 | } |
572 | } | 606 | } |
573 | set_cpus_allowed_ptr(current, &saved_mask); | 607 | set_cpus_allowed_ptr(current, saved_mask); |
574 | return 0; | 608 | retval = 0; |
609 | goto out; | ||
575 | 610 | ||
576 | migrate_end: | 611 | migrate_end: |
577 | preempt_enable(); | 612 | preempt_enable(); |
578 | set_cpus_allowed_ptr(current, &saved_mask); | 613 | set_cpus_allowed_ptr(current, saved_mask); |
579 | return 0; | 614 | out: |
615 | CPUMASK_FREE(allmasks); | ||
616 | return retval; | ||
580 | } | 617 | } |
581 | 618 | ||
582 | static struct freq_attr* centrino_attr[] = { | 619 | static struct freq_attr* centrino_attr[] = { |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 1b50244b1fdf..2f3728dc24f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) | |||
244 | 244 | ||
245 | static unsigned int speedstep_get(unsigned int cpu) | 245 | static unsigned int speedstep_get(unsigned int cpu) |
246 | { | 246 | { |
247 | return _speedstep_get(&cpumask_of_cpu(cpu)); | 247 | cpumask_of_cpu_ptr(newmask, cpu); |
248 | return _speedstep_get(newmask); | ||
248 | } | 249 | } |
249 | 250 | ||
250 | /** | 251 | /** |
@@ -279,7 +280,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
279 | 280 | ||
280 | cpus_allowed = current->cpus_allowed; | 281 | cpus_allowed = current->cpus_allowed; |
281 | 282 | ||
282 | for_each_cpu_mask(i, policy->cpus) { | 283 | for_each_cpu_mask_nr(i, policy->cpus) { |
283 | freqs.cpu = i; | 284 | freqs.cpu = i; |
284 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 285 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
285 | } | 286 | } |
@@ -292,7 +293,7 @@ static int speedstep_target (struct cpufreq_policy *policy, | |||
292 | /* allow to be run on all CPUs */ | 293 | /* allow to be run on all CPUs */ |
293 | set_cpus_allowed_ptr(current, &cpus_allowed); | 294 | set_cpus_allowed_ptr(current, &cpus_allowed); |
294 | 295 | ||
295 | for_each_cpu_mask(i, policy->cpus) { | 296 | for_each_cpu_mask_nr(i, policy->cpus) { |
296 | freqs.cpu = i; | 297 | freqs.cpu = i; |
297 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 298 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
298 | } | 299 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ff517f0b8cc4..650d40f7912b 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
489 | int sibling; | 489 | int sibling; |
490 | 490 | ||
491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 491 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
492 | for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { | 492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
495 | } | 495 | } |
@@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
516 | unsigned long j; | 516 | unsigned long j; |
517 | int retval; | 517 | int retval; |
518 | cpumask_t oldmask; | 518 | cpumask_t oldmask; |
519 | cpumask_of_cpu_ptr(newmask, cpu); | ||
519 | 520 | ||
520 | if (num_cache_leaves == 0) | 521 | if (num_cache_leaves == 0) |
521 | return -ENOENT; | 522 | return -ENOENT; |
@@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
526 | return -ENOMEM; | 527 | return -ENOMEM; |
527 | 528 | ||
528 | oldmask = current->cpus_allowed; | 529 | oldmask = current->cpus_allowed; |
529 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 530 | retval = set_cpus_allowed_ptr(current, newmask); |
530 | if (retval) | 531 | if (retval) |
531 | goto out; | 532 | goto out; |
532 | 533 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 9ab65be82427..65a339678ece 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -580,7 +580,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
580 | char __user *buf = ubuf; | 580 | char __user *buf = ubuf; |
581 | int i, err; | 581 | int i, err; |
582 | 582 | ||
583 | cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL); | 583 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); |
584 | if (!cpu_tsc) | 584 | if (!cpu_tsc) |
585 | return -ENOMEM; | 585 | return -ENOMEM; |
586 | 586 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7c9a813e1193..88736cadbaa6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
527 | if (err) | 527 | if (err) |
528 | goto out_free; | 528 | goto out_free; |
529 | 529 | ||
530 | for_each_cpu_mask(i, b->cpus) { | 530 | for_each_cpu_mask_nr(i, b->cpus) { |
531 | if (i == cpu) | 531 | if (i == cpu) |
532 | continue; | 532 | continue; |
533 | 533 | ||
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
617 | #endif | 617 | #endif |
618 | 618 | ||
619 | /* remove all sibling symlinks before unregistering */ | 619 | /* remove all sibling symlinks before unregistering */ |
620 | for_each_cpu_mask(i, b->cpus) { | 620 | for_each_cpu_mask_nr(i, b->cpus) { |
621 | if (i == cpu) | 621 | if (i == cpu) |
622 | continue; | 622 | continue; |
623 | 623 | ||
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 0d0d9057e7c0..a26c480b9491 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -160,7 +160,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) | |||
160 | { | 160 | { |
161 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 161 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
162 | *pos = first_cpu(cpu_online_map); | 162 | *pos = first_cpu(cpu_online_map); |
163 | if ((*pos) < NR_CPUS && cpu_online(*pos)) | 163 | if ((*pos) < nr_cpu_ids && cpu_online(*pos)) |
164 | return &cpu_data(*pos); | 164 | return &cpu_data(*pos); |
165 | return NULL; | 165 | return NULL; |
166 | } | 166 | } |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index cdfd94cc6b14..109792bc7cfa 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -54,6 +54,16 @@ | |||
54 | #include <asm/ftrace.h> | 54 | #include <asm/ftrace.h> |
55 | #include <asm/irq_vectors.h> | 55 | #include <asm/irq_vectors.h> |
56 | 56 | ||
57 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | ||
58 | #include <linux/elf-em.h> | ||
59 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | ||
60 | #define __AUDIT_ARCH_LE 0x40000000 | ||
61 | |||
62 | #ifndef CONFIG_AUDITSYSCALL | ||
63 | #define sysenter_audit syscall_trace_entry | ||
64 | #define sysexit_audit syscall_exit_work | ||
65 | #endif | ||
66 | |||
57 | /* | 67 | /* |
58 | * We use macros for low-level operations which need to be overridden | 68 | * We use macros for low-level operations which need to be overridden |
59 | * for paravirtualization. The following will never clobber any registers: | 69 | * for paravirtualization. The following will never clobber any registers: |
@@ -333,7 +343,8 @@ sysenter_past_esp: | |||
333 | 343 | ||
334 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 344 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
335 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) | 345 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
336 | jnz syscall_trace_entry | 346 | jnz sysenter_audit |
347 | sysenter_do_call: | ||
337 | cmpl $(nr_syscalls), %eax | 348 | cmpl $(nr_syscalls), %eax |
338 | jae syscall_badsys | 349 | jae syscall_badsys |
339 | call *sys_call_table(,%eax,4) | 350 | call *sys_call_table(,%eax,4) |
@@ -343,7 +354,8 @@ sysenter_past_esp: | |||
343 | TRACE_IRQS_OFF | 354 | TRACE_IRQS_OFF |
344 | movl TI_flags(%ebp), %ecx | 355 | movl TI_flags(%ebp), %ecx |
345 | testw $_TIF_ALLWORK_MASK, %cx | 356 | testw $_TIF_ALLWORK_MASK, %cx |
346 | jne syscall_exit_work | 357 | jne sysexit_audit |
358 | sysenter_exit: | ||
347 | /* if something modifies registers it must also disable sysexit */ | 359 | /* if something modifies registers it must also disable sysexit */ |
348 | movl PT_EIP(%esp), %edx | 360 | movl PT_EIP(%esp), %edx |
349 | movl PT_OLDESP(%esp), %ecx | 361 | movl PT_OLDESP(%esp), %ecx |
@@ -351,6 +363,45 @@ sysenter_past_esp: | |||
351 | TRACE_IRQS_ON | 363 | TRACE_IRQS_ON |
352 | 1: mov PT_FS(%esp), %fs | 364 | 1: mov PT_FS(%esp), %fs |
353 | ENABLE_INTERRUPTS_SYSEXIT | 365 | ENABLE_INTERRUPTS_SYSEXIT |
366 | |||
367 | #ifdef CONFIG_AUDITSYSCALL | ||
368 | sysenter_audit: | ||
369 | testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | ||
370 | jnz syscall_trace_entry | ||
371 | addl $4,%esp | ||
372 | CFI_ADJUST_CFA_OFFSET -4 | ||
373 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | ||
374 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | ||
375 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | ||
376 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | ||
377 | movl %eax,%edx /* 2nd arg: syscall number */ | ||
378 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | ||
379 | call audit_syscall_entry | ||
380 | pushl %ebx | ||
381 | CFI_ADJUST_CFA_OFFSET 4 | ||
382 | movl PT_EAX(%esp),%eax /* reload syscall number */ | ||
383 | jmp sysenter_do_call | ||
384 | |||
385 | sysexit_audit: | ||
386 | testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx | ||
387 | jne syscall_exit_work | ||
388 | TRACE_IRQS_ON | ||
389 | ENABLE_INTERRUPTS(CLBR_ANY) | ||
390 | movl %eax,%edx /* second arg, syscall return value */ | ||
391 | cmpl $0,%eax /* is it < 0? */ | ||
392 | setl %al /* 1 if so, 0 if not */ | ||
393 | movzbl %al,%eax /* zero-extend that */ | ||
394 | inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | ||
395 | call audit_syscall_exit | ||
396 | DISABLE_INTERRUPTS(CLBR_ANY) | ||
397 | TRACE_IRQS_OFF | ||
398 | movl TI_flags(%ebp), %ecx | ||
399 | testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx | ||
400 | jne syscall_exit_work | ||
401 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | ||
402 | jmp sysenter_exit | ||
403 | #endif | ||
404 | |||
354 | CFI_ENDPROC | 405 | CFI_ENDPROC |
355 | .pushsection .fixup,"ax" | 406 | .pushsection .fixup,"ax" |
356 | 2: movl $0,PT_FS(%esp) | 407 | 2: movl $0,PT_FS(%esp) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8410e26f4183..89434d439605 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -53,6 +53,12 @@ | |||
53 | #include <asm/paravirt.h> | 53 | #include <asm/paravirt.h> |
54 | #include <asm/ftrace.h> | 54 | #include <asm/ftrace.h> |
55 | 55 | ||
56 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | ||
57 | #include <linux/elf-em.h> | ||
58 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | ||
59 | #define __AUDIT_ARCH_64BIT 0x80000000 | ||
60 | #define __AUDIT_ARCH_LE 0x40000000 | ||
61 | |||
56 | .code64 | 62 | .code64 |
57 | 63 | ||
58 | #ifdef CONFIG_FTRACE | 64 | #ifdef CONFIG_FTRACE |
@@ -351,6 +357,7 @@ ENTRY(system_call_after_swapgs) | |||
351 | GET_THREAD_INFO(%rcx) | 357 | GET_THREAD_INFO(%rcx) |
352 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) | 358 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) |
353 | jnz tracesys | 359 | jnz tracesys |
360 | system_call_fastpath: | ||
354 | cmpq $__NR_syscall_max,%rax | 361 | cmpq $__NR_syscall_max,%rax |
355 | ja badsys | 362 | ja badsys |
356 | movq %r10,%rcx | 363 | movq %r10,%rcx |
@@ -402,16 +409,16 @@ sysret_careful: | |||
402 | sysret_signal: | 409 | sysret_signal: |
403 | TRACE_IRQS_ON | 410 | TRACE_IRQS_ON |
404 | ENABLE_INTERRUPTS(CLBR_NONE) | 411 | ENABLE_INTERRUPTS(CLBR_NONE) |
405 | testl $_TIF_DO_NOTIFY_MASK,%edx | 412 | #ifdef CONFIG_AUDITSYSCALL |
406 | jz 1f | 413 | bt $TIF_SYSCALL_AUDIT,%edx |
407 | 414 | jc sysret_audit | |
408 | /* Really a signal */ | 415 | #endif |
409 | /* edx: work flags (arg3) */ | 416 | /* edx: work flags (arg3) */ |
410 | leaq do_notify_resume(%rip),%rax | 417 | leaq do_notify_resume(%rip),%rax |
411 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 | 418 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 |
412 | xorl %esi,%esi # oldset -> arg2 | 419 | xorl %esi,%esi # oldset -> arg2 |
413 | call ptregscall_common | 420 | call ptregscall_common |
414 | 1: movl $_TIF_WORK_MASK,%edi | 421 | movl $_TIF_WORK_MASK,%edi |
415 | /* Use IRET because user could have changed frame. This | 422 | /* Use IRET because user could have changed frame. This |
416 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ | 423 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ |
417 | DISABLE_INTERRUPTS(CLBR_NONE) | 424 | DISABLE_INTERRUPTS(CLBR_NONE) |
@@ -422,8 +429,45 @@ badsys: | |||
422 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | 429 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) |
423 | jmp ret_from_sys_call | 430 | jmp ret_from_sys_call |
424 | 431 | ||
432 | #ifdef CONFIG_AUDITSYSCALL | ||
433 | /* | ||
434 | * Fast path for syscall audit without full syscall trace. | ||
435 | * We just call audit_syscall_entry() directly, and then | ||
436 | * jump back to the normal fast path. | ||
437 | */ | ||
438 | auditsys: | ||
439 | movq %r10,%r9 /* 6th arg: 4th syscall arg */ | ||
440 | movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ | ||
441 | movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ | ||
442 | movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ | ||
443 | movq %rax,%rsi /* 2nd arg: syscall number */ | ||
444 | movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ | ||
445 | call audit_syscall_entry | ||
446 | LOAD_ARGS 0 /* reload call-clobbered registers */ | ||
447 | jmp system_call_fastpath | ||
448 | |||
449 | /* | ||
450 | * Return fast path for syscall audit. Call audit_syscall_exit() | ||
451 | * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT | ||
452 | * masked off. | ||
453 | */ | ||
454 | sysret_audit: | ||
455 | movq %rax,%rsi /* second arg, syscall return value */ | ||
456 | cmpq $0,%rax /* is it < 0? */ | ||
457 | setl %al /* 1 if so, 0 if not */ | ||
458 | movzbl %al,%edi /* zero-extend that into %edi */ | ||
459 | inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | ||
460 | call audit_syscall_exit | ||
461 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | ||
462 | jmp sysret_check | ||
463 | #endif /* CONFIG_AUDITSYSCALL */ | ||
464 | |||
425 | /* Do syscall tracing */ | 465 | /* Do syscall tracing */ |
426 | tracesys: | 466 | tracesys: |
467 | #ifdef CONFIG_AUDITSYSCALL | ||
468 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | ||
469 | jz auditsys | ||
470 | #endif | ||
427 | SAVE_REST | 471 | SAVE_REST |
428 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | 472 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
429 | FIXUP_TOP_OF_STACK %rdi | 473 | FIXUP_TOP_OF_STACK %rdi |
@@ -448,6 +492,7 @@ tracesys: | |||
448 | * Has correct top of stack, but partial stack frame. | 492 | * Has correct top of stack, but partial stack frame. |
449 | */ | 493 | */ |
450 | .globl int_ret_from_sys_call | 494 | .globl int_ret_from_sys_call |
495 | .globl int_with_check | ||
451 | int_ret_from_sys_call: | 496 | int_ret_from_sys_call: |
452 | DISABLE_INTERRUPTS(CLBR_NONE) | 497 | DISABLE_INTERRUPTS(CLBR_NONE) |
453 | TRACE_IRQS_OFF | 498 | TRACE_IRQS_OFF |
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 1a9c68845ee8..786548a62d38 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -168,7 +168,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
168 | * May as well be the first. | 168 | * May as well be the first. |
169 | */ | 169 | */ |
170 | cpu = first_cpu(cpumask); | 170 | cpu = first_cpu(cpumask); |
171 | if ((unsigned)cpu < NR_CPUS) | 171 | if ((unsigned)cpu < nr_cpu_ids) |
172 | return per_cpu(x86_cpu_to_apicid, cpu); | 172 | return per_cpu(x86_cpu_to_apicid, cpu); |
173 | else | 173 | else |
174 | return BAD_APICID; | 174 | return BAD_APICID; |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 3c3929340692..2cfcbded888a 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -98,7 +98,7 @@ static void uv_send_IPI_mask(cpumask_t mask, int vector) | |||
98 | { | 98 | { |
99 | unsigned int cpu; | 99 | unsigned int cpu; |
100 | 100 | ||
101 | for (cpu = 0; cpu < NR_CPUS; ++cpu) | 101 | for_each_possible_cpu(cpu) |
102 | if (cpu_isset(cpu, mask)) | 102 | if (cpu_isset(cpu, mask)) |
103 | uv_send_IPI_one(cpu, vector); | 103 | uv_send_IPI_one(cpu, vector); |
104 | } | 104 | } |
@@ -132,7 +132,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
132 | * May as well be the first. | 132 | * May as well be the first. |
133 | */ | 133 | */ |
134 | cpu = first_cpu(cpumask); | 134 | cpu = first_cpu(cpumask); |
135 | if ((unsigned)cpu < NR_CPUS) | 135 | if ((unsigned)cpu < nr_cpu_ids) |
136 | return per_cpu(x86_cpu_to_apicid, cpu); | 136 | return per_cpu(x86_cpu_to_apicid, cpu); |
137 | else | 137 | else |
138 | return BAD_APICID; | 138 | return BAD_APICID; |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 64a46affd858..8269434d1707 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -732,7 +732,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
732 | return 0; | 732 | return 0; |
733 | } | 733 | } |
734 | 734 | ||
735 | for_each_cpu_mask(cpu, mask) { | 735 | for_each_cpu_mask_nr(cpu, mask) { |
736 | cpumask_t domain, new_mask; | 736 | cpumask_t domain, new_mask; |
737 | int new_cpu; | 737 | int new_cpu; |
738 | int vector, offset; | 738 | int vector, offset; |
@@ -753,7 +753,7 @@ next: | |||
753 | continue; | 753 | continue; |
754 | if (vector == IA32_SYSCALL_VECTOR) | 754 | if (vector == IA32_SYSCALL_VECTOR) |
755 | goto next; | 755 | goto next; |
756 | for_each_cpu_mask(new_cpu, new_mask) | 756 | for_each_cpu_mask_nr(new_cpu, new_mask) |
757 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 757 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
758 | goto next; | 758 | goto next; |
759 | /* Found one! */ | 759 | /* Found one! */ |
@@ -763,7 +763,7 @@ next: | |||
763 | cfg->move_in_progress = 1; | 763 | cfg->move_in_progress = 1; |
764 | cfg->old_domain = cfg->domain; | 764 | cfg->old_domain = cfg->domain; |
765 | } | 765 | } |
766 | for_each_cpu_mask(new_cpu, new_mask) | 766 | for_each_cpu_mask_nr(new_cpu, new_mask) |
767 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 767 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
768 | cfg->vector = vector; | 768 | cfg->vector = vector; |
769 | cfg->domain = domain; | 769 | cfg->domain = domain; |
@@ -795,7 +795,7 @@ static void __clear_irq_vector(int irq) | |||
795 | 795 | ||
796 | vector = cfg->vector; | 796 | vector = cfg->vector; |
797 | cpus_and(mask, cfg->domain, cpu_online_map); | 797 | cpus_and(mask, cfg->domain, cpu_online_map); |
798 | for_each_cpu_mask(cpu, mask) | 798 | for_each_cpu_mask_nr(cpu, mask) |
799 | per_cpu(vector_irq, cpu)[vector] = -1; | 799 | per_cpu(vector_irq, cpu)[vector] = -1; |
800 | 800 | ||
801 | cfg->vector = 0; | 801 | cfg->vector = 0; |
@@ -1373,12 +1373,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
1373 | static int ioapic_retrigger_irq(unsigned int irq) | 1373 | static int ioapic_retrigger_irq(unsigned int irq) |
1374 | { | 1374 | { |
1375 | struct irq_cfg *cfg = &irq_cfg[irq]; | 1375 | struct irq_cfg *cfg = &irq_cfg[irq]; |
1376 | cpumask_t mask; | ||
1377 | unsigned long flags; | 1376 | unsigned long flags; |
1378 | 1377 | ||
1379 | spin_lock_irqsave(&vector_lock, flags); | 1378 | spin_lock_irqsave(&vector_lock, flags); |
1380 | mask = cpumask_of_cpu(first_cpu(cfg->domain)); | 1379 | send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); |
1381 | send_IPI_mask(mask, cfg->vector); | ||
1382 | spin_unlock_irqrestore(&vector_lock, flags); | 1380 | spin_unlock_irqrestore(&vector_lock, flags); |
1383 | 1381 | ||
1384 | return 1; | 1382 | return 1; |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index a8449571858a..3fee2aa50f3f 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -62,12 +62,12 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
62 | 62 | ||
63 | if (reload) { | 63 | if (reload) { |
64 | #ifdef CONFIG_SMP | 64 | #ifdef CONFIG_SMP |
65 | cpumask_t mask; | 65 | cpumask_of_cpu_ptr_declare(mask); |
66 | 66 | ||
67 | preempt_disable(); | 67 | preempt_disable(); |
68 | load_LDT(pc); | 68 | load_LDT(pc); |
69 | mask = cpumask_of_cpu(smp_processor_id()); | 69 | cpumask_of_cpu_ptr_next(mask, smp_processor_id()); |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) | 70 | if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) |
71 | smp_call_function(flush_ldt, current->mm, 1); | 71 | smp_call_function(flush_ldt, current->mm, 1); |
72 | preempt_enable(); | 72 | preempt_enable(); |
73 | #else | 73 | #else |
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index fc4790638b69..6994c751590e 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c | |||
@@ -388,6 +388,7 @@ static int do_microcode_update (void) | |||
388 | void *new_mc = NULL; | 388 | void *new_mc = NULL; |
389 | int cpu; | 389 | int cpu; |
390 | cpumask_t old; | 390 | cpumask_t old; |
391 | cpumask_of_cpu_ptr_declare(newmask); | ||
391 | 392 | ||
392 | old = current->cpus_allowed; | 393 | old = current->cpus_allowed; |
393 | 394 | ||
@@ -404,7 +405,8 @@ static int do_microcode_update (void) | |||
404 | 405 | ||
405 | if (!uci->valid) | 406 | if (!uci->valid) |
406 | continue; | 407 | continue; |
407 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 408 | cpumask_of_cpu_ptr_next(newmask, cpu); |
409 | set_cpus_allowed_ptr(current, newmask); | ||
408 | error = get_maching_microcode(new_mc, cpu); | 410 | error = get_maching_microcode(new_mc, cpu); |
409 | if (error < 0) | 411 | if (error < 0) |
410 | goto out; | 412 | goto out; |
@@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu) | |||
574 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 576 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
575 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 577 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
576 | cpumask_t old; | 578 | cpumask_t old; |
579 | cpumask_of_cpu_ptr(newmask, cpu); | ||
577 | unsigned int val[2]; | 580 | unsigned int val[2]; |
578 | int err = 0; | 581 | int err = 0; |
579 | 582 | ||
@@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu) | |||
582 | return 0; | 585 | return 0; |
583 | 586 | ||
584 | old = current->cpus_allowed; | 587 | old = current->cpus_allowed; |
585 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 588 | set_cpus_allowed_ptr(current, newmask); |
586 | 589 | ||
587 | /* Check if the microcode we have in memory matches the CPU */ | 590 | /* Check if the microcode we have in memory matches the CPU */ |
588 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 591 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
@@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu) | |||
620 | static void microcode_init_cpu(int cpu, int resume) | 623 | static void microcode_init_cpu(int cpu, int resume) |
621 | { | 624 | { |
622 | cpumask_t old; | 625 | cpumask_t old; |
626 | cpumask_of_cpu_ptr(newmask, cpu); | ||
623 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 627 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
624 | 628 | ||
625 | old = current->cpus_allowed; | 629 | old = current->cpus_allowed; |
626 | 630 | ||
627 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 631 | set_cpus_allowed_ptr(current, newmask); |
628 | mutex_lock(µcode_mutex); | 632 | mutex_lock(µcode_mutex); |
629 | collect_cpu_info(cpu); | 633 | collect_cpu_info(cpu); |
630 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) | 634 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) |
@@ -658,11 +662,12 @@ static ssize_t reload_store(struct sys_device *dev, | |||
658 | return -EINVAL; | 662 | return -EINVAL; |
659 | if (val == 1) { | 663 | if (val == 1) { |
660 | cpumask_t old; | 664 | cpumask_t old; |
665 | cpumask_of_cpu_ptr(newmask, cpu); | ||
661 | 666 | ||
662 | old = current->cpus_allowed; | 667 | old = current->cpus_allowed; |
663 | 668 | ||
664 | get_online_cpus(); | 669 | get_online_cpus(); |
665 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 670 | set_cpus_allowed_ptr(current, newmask); |
666 | 671 | ||
667 | mutex_lock(µcode_mutex); | 672 | mutex_lock(µcode_mutex); |
668 | if (uci->valid) | 673 | if (uci->valid) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 9dcf39c02972..06a9f643817e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -411,24 +411,28 @@ void native_machine_shutdown(void) | |||
411 | { | 411 | { |
412 | /* Stop the cpus and apics */ | 412 | /* Stop the cpus and apics */ |
413 | #ifdef CONFIG_SMP | 413 | #ifdef CONFIG_SMP |
414 | int reboot_cpu_id; | ||
415 | 414 | ||
416 | /* The boot cpu is always logical cpu 0 */ | 415 | /* The boot cpu is always logical cpu 0 */ |
417 | reboot_cpu_id = 0; | 416 | int reboot_cpu_id = 0; |
417 | cpumask_of_cpu_ptr(newmask, reboot_cpu_id); | ||
418 | 418 | ||
419 | #ifdef CONFIG_X86_32 | 419 | #ifdef CONFIG_X86_32 |
420 | /* See if there has been given a command line override */ | 420 | /* See if there has been given a command line override */ |
421 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && | 421 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && |
422 | cpu_online(reboot_cpu)) | 422 | cpu_online(reboot_cpu)) { |
423 | reboot_cpu_id = reboot_cpu; | 423 | reboot_cpu_id = reboot_cpu; |
424 | cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | ||
425 | } | ||
424 | #endif | 426 | #endif |
425 | 427 | ||
426 | /* Make certain the cpu I'm about to reboot on is online */ | 428 | /* Make certain the cpu I'm about to reboot on is online */ |
427 | if (!cpu_online(reboot_cpu_id)) | 429 | if (!cpu_online(reboot_cpu_id)) { |
428 | reboot_cpu_id = smp_processor_id(); | 430 | reboot_cpu_id = smp_processor_id(); |
431 | cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | ||
432 | } | ||
429 | 433 | ||
430 | /* Make certain I only run on the appropriate processor */ | 434 | /* Make certain I only run on the appropriate processor */ |
431 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); | 435 | set_cpus_allowed_ptr(current, newmask); |
432 | 436 | ||
433 | /* O.K Now that I'm on the appropriate processor, | 437 | /* O.K Now that I'm on the appropriate processor, |
434 | * stop all of the others. | 438 | * stop all of the others. |
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 07faaa5109cb..6fb5bcdd8933 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
@@ -661,8 +661,5 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
661 | if (thread_info_flags & _TIF_SIGPENDING) | 661 | if (thread_info_flags & _TIF_SIGPENDING) |
662 | do_signal(regs); | 662 | do_signal(regs); |
663 | 663 | ||
664 | if (thread_info_flags & _TIF_HRTICK_RESCHED) | ||
665 | hrtick_resched(); | ||
666 | |||
667 | clear_thread_flag(TIF_IRET); | 664 | clear_thread_flag(TIF_IRET); |
668 | } | 665 | } |
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index bf87684474f1..47c3d249e638 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
@@ -496,9 +496,6 @@ void do_notify_resume(struct pt_regs *regs, void *unused, | |||
496 | /* deal with pending signal delivery */ | 496 | /* deal with pending signal delivery */ |
497 | if (thread_info_flags & _TIF_SIGPENDING) | 497 | if (thread_info_flags & _TIF_SIGPENDING) |
498 | do_signal(regs); | 498 | do_signal(regs); |
499 | |||
500 | if (thread_info_flags & _TIF_HRTICK_RESCHED) | ||
501 | hrtick_resched(); | ||
502 | } | 499 | } |
503 | 500 | ||
504 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 501 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 4b53a647bc0a..332512767f4f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -438,7 +438,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
438 | cpu_set(cpu, cpu_sibling_setup_map); | 438 | cpu_set(cpu, cpu_sibling_setup_map); |
439 | 439 | ||
440 | if (smp_num_siblings > 1) { | 440 | if (smp_num_siblings > 1) { |
441 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 441 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | 442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && |
443 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | 443 | c->cpu_core_id == cpu_data(i).cpu_core_id) { |
444 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | 444 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); |
@@ -461,7 +461,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
461 | return; | 461 | return; |
462 | } | 462 | } |
463 | 463 | ||
464 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 464 | for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { |
465 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 465 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
466 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 466 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
467 | cpu_set(i, c->llc_shared_map); | 467 | cpu_set(i, c->llc_shared_map); |
@@ -1219,7 +1219,7 @@ static void remove_siblinginfo(int cpu) | |||
1219 | int sibling; | 1219 | int sibling; |
1220 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 1220 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1221 | 1221 | ||
1222 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { | 1222 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { |
1223 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | 1223 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); |
1224 | /*/ | 1224 | /*/ |
1225 | * last thread sibling in this cpu core going down | 1225 | * last thread sibling in this cpu core going down |
@@ -1228,7 +1228,7 @@ static void remove_siblinginfo(int cpu) | |||
1228 | cpu_data(sibling).booted_cores--; | 1228 | cpu_data(sibling).booted_cores--; |
1229 | } | 1229 | } |
1230 | 1230 | ||
1231 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | 1231 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) |
1232 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | 1232 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
1233 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 1233 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
1234 | cpus_clear(per_cpu(cpu_core_map, cpu)); | 1234 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index e693812ac59a..d8faf79a0a1d 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -367,7 +367,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
367 | 367 | ||
368 | cpus_and(mask, mask, cpu_online_map); | 368 | cpus_and(mask, mask, cpu_online_map); |
369 | 369 | ||
370 | for_each_cpu_mask(cpu, mask) | 370 | for_each_cpu_mask_nr(cpu, mask) |
371 | xen_send_IPI_one(cpu, vector); | 371 | xen_send_IPI_one(cpu, vector); |
372 | } | 372 | } |
373 | 373 | ||
@@ -378,7 +378,7 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
378 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 378 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
379 | 379 | ||
380 | /* Make sure other vcpus get a chance to run if they need to. */ | 380 | /* Make sure other vcpus get a chance to run if they need to. */ |
381 | for_each_cpu_mask(cpu, mask) { | 381 | for_each_cpu_mask_nr(cpu, mask) { |
382 | if (xen_vcpu_stolen(cpu)) { | 382 | if (xen_vcpu_stolen(cpu)) { |
383 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 383 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); |
384 | break; | 384 | break; |
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index a5eda80e8427..ddccfb01c416 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
73 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 73 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
74 | 74 | ||
75 | /* wait for any prerequisite operations */ | 75 | /* wait for any prerequisite operations */ |
76 | if (depend_tx) { | 76 | async_tx_quiesce(&depend_tx); |
77 | /* if ack is already set then we cannot be sure | ||
78 | * we are referring to the correct operation | ||
79 | */ | ||
80 | BUG_ON(async_tx_test_ack(depend_tx)); | ||
81 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | ||
82 | panic("%s: DMA_ERROR waiting for depend_tx\n", | ||
83 | __func__); | ||
84 | } | ||
85 | 77 | ||
86 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; | 78 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; |
87 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; | 79 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; |
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
91 | kunmap_atomic(dest_buf, KM_USER0); | 83 | kunmap_atomic(dest_buf, KM_USER0); |
92 | kunmap_atomic(src_buf, KM_USER1); | 84 | kunmap_atomic(src_buf, KM_USER1); |
93 | 85 | ||
94 | async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); | 86 | async_tx_sync_epilog(cb_fn, cb_param); |
95 | } | 87 | } |
96 | 88 | ||
97 | return tx; | 89 | return tx; |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index f5ff3906b035..5b5eb99bb244 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset, | |||
72 | dest_buf = (void *) (((char *) page_address(dest)) + offset); | 72 | dest_buf = (void *) (((char *) page_address(dest)) + offset); |
73 | 73 | ||
74 | /* wait for any prerequisite operations */ | 74 | /* wait for any prerequisite operations */ |
75 | if (depend_tx) { | 75 | async_tx_quiesce(&depend_tx); |
76 | /* if ack is already set then we cannot be sure | ||
77 | * we are referring to the correct operation | ||
78 | */ | ||
79 | BUG_ON(depend_tx->ack); | ||
80 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | ||
81 | panic("%s: DMA_ERROR waiting for depend_tx\n", | ||
82 | __func__); | ||
83 | } | ||
84 | 76 | ||
85 | memset(dest_buf, val, len); | 77 | memset(dest_buf, val, len); |
86 | 78 | ||
87 | async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); | 79 | async_tx_sync_epilog(cb_fn, cb_param); |
88 | } | 80 | } |
89 | 81 | ||
90 | return tx; | 82 | return tx; |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 095c798d3170..85eaf7b1c531 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client, | |||
295 | case DMA_RESOURCE_REMOVED: | 295 | case DMA_RESOURCE_REMOVED: |
296 | found = 0; | 296 | found = 0; |
297 | spin_lock_irqsave(&async_tx_lock, flags); | 297 | spin_lock_irqsave(&async_tx_lock, flags); |
298 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | 298 | list_for_each_entry(ref, &async_tx_master_list, node) |
299 | if (ref->chan == chan) { | 299 | if (ref->chan == chan) { |
300 | /* permit backing devices to go away */ | 300 | /* permit backing devices to go away */ |
301 | dma_chan_put(ref->chan); | 301 | dma_chan_put(ref->chan); |
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags, | |||
608 | pr_debug("%s: (sync)\n", __func__); | 608 | pr_debug("%s: (sync)\n", __func__); |
609 | 609 | ||
610 | /* wait for any prerequisite operations */ | 610 | /* wait for any prerequisite operations */ |
611 | if (depend_tx) { | 611 | async_tx_quiesce(&depend_tx); |
612 | /* if ack is already set then we cannot be sure | ||
613 | * we are referring to the correct operation | ||
614 | */ | ||
615 | BUG_ON(async_tx_test_ack(depend_tx)); | ||
616 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | ||
617 | panic("%s: DMA_ERROR waiting for depend_tx\n", | ||
618 | __func__); | ||
619 | } | ||
620 | 612 | ||
621 | async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); | 613 | async_tx_sync_epilog(cb_fn, cb_param); |
622 | } | 614 | } |
623 | 615 | ||
624 | return tx; | 616 | return tx; |
625 | } | 617 | } |
626 | EXPORT_SYMBOL_GPL(async_trigger_callback); | 618 | EXPORT_SYMBOL_GPL(async_trigger_callback); |
627 | 619 | ||
620 | /** | ||
621 | * async_tx_quiesce - ensure tx is complete and freeable upon return | ||
622 | * @tx - transaction to quiesce | ||
623 | */ | ||
624 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | ||
625 | { | ||
626 | if (*tx) { | ||
627 | /* if ack is already set then we cannot be sure | ||
628 | * we are referring to the correct operation | ||
629 | */ | ||
630 | BUG_ON(async_tx_test_ack(*tx)); | ||
631 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) | ||
632 | panic("DMA_ERROR waiting for transaction\n"); | ||
633 | async_tx_ack(*tx); | ||
634 | *tx = NULL; | ||
635 | } | ||
636 | } | ||
637 | EXPORT_SYMBOL_GPL(async_tx_quiesce); | ||
638 | |||
628 | module_init(async_tx_init); | 639 | module_init(async_tx_init); |
629 | module_exit(async_tx_exit); | 640 | module_exit(async_tx_exit); |
630 | 641 | ||
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 3a0dddca5a10..65974c6d3d7a 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -35,74 +35,121 @@ | |||
35 | * when CONFIG_DMA_ENGINE=n | 35 | * when CONFIG_DMA_ENGINE=n |
36 | */ | 36 | */ |
37 | static __always_inline struct dma_async_tx_descriptor * | 37 | static __always_inline struct dma_async_tx_descriptor * |
38 | do_async_xor(struct dma_device *device, | 38 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, |
39 | struct dma_chan *chan, struct page *dest, struct page **src_list, | 39 | unsigned int offset, int src_cnt, size_t len, |
40 | unsigned int offset, unsigned int src_cnt, size_t len, | 40 | enum async_tx_flags flags, |
41 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 41 | struct dma_async_tx_descriptor *depend_tx, |
42 | dma_async_tx_callback cb_fn, void *cb_param) | 42 | dma_async_tx_callback cb_fn, void *cb_param) |
43 | { | 43 | { |
44 | dma_addr_t dma_dest; | 44 | struct dma_device *dma = chan->device; |
45 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | 45 | dma_addr_t *dma_src = (dma_addr_t *) src_list; |
46 | struct dma_async_tx_descriptor *tx; | 46 | struct dma_async_tx_descriptor *tx = NULL; |
47 | int src_off = 0; | ||
47 | int i; | 48 | int i; |
48 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 49 | dma_async_tx_callback _cb_fn; |
49 | 50 | void *_cb_param; | |
50 | pr_debug("%s: len: %zu\n", __func__, len); | 51 | enum async_tx_flags async_flags; |
51 | 52 | enum dma_ctrl_flags dma_flags; | |
52 | dma_dest = dma_map_page(device->dev, dest, offset, len, | 53 | int xor_src_cnt; |
53 | DMA_FROM_DEVICE); | 54 | dma_addr_t dma_dest; |
54 | 55 | ||
56 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); | ||
55 | for (i = 0; i < src_cnt; i++) | 57 | for (i = 0; i < src_cnt; i++) |
56 | dma_src[i] = dma_map_page(device->dev, src_list[i], offset, | 58 | dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, |
57 | len, DMA_TO_DEVICE); | 59 | len, DMA_TO_DEVICE); |
58 | 60 | ||
59 | /* Since we have clobbered the src_list we are committed | 61 | while (src_cnt) { |
60 | * to doing this asynchronously. Drivers force forward progress | 62 | async_flags = flags; |
61 | * in case they can not provide a descriptor | 63 | dma_flags = 0; |
62 | */ | 64 | xor_src_cnt = min(src_cnt, dma->max_xor); |
63 | tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, | 65 | /* if we are submitting additional xors, leave the chain open, |
64 | dma_prep_flags); | 66 | * clear the callback parameters, and leave the destination |
65 | if (!tx) { | 67 | * buffer mapped |
66 | if (depend_tx) | 68 | */ |
67 | dma_wait_for_async_tx(depend_tx); | 69 | if (src_cnt > xor_src_cnt) { |
68 | 70 | async_flags &= ~ASYNC_TX_ACK; | |
69 | while (!tx) | 71 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; |
70 | tx = device->device_prep_dma_xor(chan, dma_dest, | 72 | _cb_fn = NULL; |
71 | dma_src, src_cnt, len, | 73 | _cb_param = NULL; |
72 | dma_prep_flags); | 74 | } else { |
73 | } | 75 | _cb_fn = cb_fn; |
76 | _cb_param = cb_param; | ||
77 | } | ||
78 | if (_cb_fn) | ||
79 | dma_flags |= DMA_PREP_INTERRUPT; | ||
74 | 80 | ||
75 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 81 | /* Since we have clobbered the src_list we are committed |
82 | * to doing this asynchronously. Drivers force forward progress | ||
83 | * in case they can not provide a descriptor | ||
84 | */ | ||
85 | tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], | ||
86 | xor_src_cnt, len, dma_flags); | ||
87 | |||
88 | if (unlikely(!tx)) | ||
89 | async_tx_quiesce(&depend_tx); | ||
90 | |||
91 | /* spin wait for the preceeding transactions to complete */ | ||
92 | while (unlikely(!tx)) { | ||
93 | dma_async_issue_pending(chan); | ||
94 | tx = dma->device_prep_dma_xor(chan, dma_dest, | ||
95 | &dma_src[src_off], | ||
96 | xor_src_cnt, len, | ||
97 | dma_flags); | ||
98 | } | ||
99 | |||
100 | async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, | ||
101 | _cb_param); | ||
102 | |||
103 | depend_tx = tx; | ||
104 | flags |= ASYNC_TX_DEP_ACK; | ||
105 | |||
106 | if (src_cnt > xor_src_cnt) { | ||
107 | /* drop completed sources */ | ||
108 | src_cnt -= xor_src_cnt; | ||
109 | src_off += xor_src_cnt; | ||
110 | |||
111 | /* use the intermediate result a source */ | ||
112 | dma_src[--src_off] = dma_dest; | ||
113 | src_cnt++; | ||
114 | } else | ||
115 | break; | ||
116 | } | ||
76 | 117 | ||
77 | return tx; | 118 | return tx; |
78 | } | 119 | } |
79 | 120 | ||
80 | static void | 121 | static void |
81 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | 122 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, |
82 | unsigned int src_cnt, size_t len, enum async_tx_flags flags, | 123 | int src_cnt, size_t len, enum async_tx_flags flags, |
83 | struct dma_async_tx_descriptor *depend_tx, | 124 | dma_async_tx_callback cb_fn, void *cb_param) |
84 | dma_async_tx_callback cb_fn, void *cb_param) | ||
85 | { | 125 | { |
86 | void *_dest; | ||
87 | int i; | 126 | int i; |
88 | 127 | int xor_src_cnt; | |
89 | pr_debug("%s: len: %zu\n", __func__, len); | 128 | int src_off = 0; |
129 | void *dest_buf; | ||
130 | void **srcs = (void **) src_list; | ||
90 | 131 | ||
91 | /* reuse the 'src_list' array to convert to buffer pointers */ | 132 | /* reuse the 'src_list' array to convert to buffer pointers */ |
92 | for (i = 0; i < src_cnt; i++) | 133 | for (i = 0; i < src_cnt; i++) |
93 | src_list[i] = (struct page *) | 134 | srcs[i] = page_address(src_list[i]) + offset; |
94 | (page_address(src_list[i]) + offset); | ||
95 | 135 | ||
96 | /* set destination address */ | 136 | /* set destination address */ |
97 | _dest = page_address(dest) + offset; | 137 | dest_buf = page_address(dest) + offset; |
98 | 138 | ||
99 | if (flags & ASYNC_TX_XOR_ZERO_DST) | 139 | if (flags & ASYNC_TX_XOR_ZERO_DST) |
100 | memset(_dest, 0, len); | 140 | memset(dest_buf, 0, len); |
101 | 141 | ||
102 | xor_blocks(src_cnt, len, _dest, | 142 | while (src_cnt > 0) { |
103 | (void **) src_list); | 143 | /* process up to 'MAX_XOR_BLOCKS' sources */ |
144 | xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); | ||
145 | xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]); | ||
104 | 146 | ||
105 | async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); | 147 | /* drop completed sources */ |
148 | src_cnt -= xor_src_cnt; | ||
149 | src_off += xor_src_cnt; | ||
150 | } | ||
151 | |||
152 | async_tx_sync_epilog(cb_fn, cb_param); | ||
106 | } | 153 | } |
107 | 154 | ||
108 | /** | 155 | /** |
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
132 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, | 179 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, |
133 | &dest, 1, src_list, | 180 | &dest, 1, src_list, |
134 | src_cnt, len); | 181 | src_cnt, len); |
135 | struct dma_device *device = chan ? chan->device : NULL; | ||
136 | struct dma_async_tx_descriptor *tx = NULL; | ||
137 | dma_async_tx_callback _cb_fn; | ||
138 | void *_cb_param; | ||
139 | unsigned long local_flags; | ||
140 | int xor_src_cnt; | ||
141 | int i = 0, src_off = 0; | ||
142 | |||
143 | BUG_ON(src_cnt <= 1); | 182 | BUG_ON(src_cnt <= 1); |
144 | 183 | ||
145 | while (src_cnt) { | 184 | if (chan) { |
146 | local_flags = flags; | 185 | /* run the xor asynchronously */ |
147 | if (device) { /* run the xor asynchronously */ | 186 | pr_debug("%s (async): len: %zu\n", __func__, len); |
148 | xor_src_cnt = min(src_cnt, device->max_xor); | ||
149 | /* if we are submitting additional xors | ||
150 | * only set the callback on the last transaction | ||
151 | */ | ||
152 | if (src_cnt > xor_src_cnt) { | ||
153 | local_flags &= ~ASYNC_TX_ACK; | ||
154 | _cb_fn = NULL; | ||
155 | _cb_param = NULL; | ||
156 | } else { | ||
157 | _cb_fn = cb_fn; | ||
158 | _cb_param = cb_param; | ||
159 | } | ||
160 | |||
161 | tx = do_async_xor(device, chan, dest, | ||
162 | &src_list[src_off], offset, | ||
163 | xor_src_cnt, len, local_flags, | ||
164 | depend_tx, _cb_fn, _cb_param); | ||
165 | } else { /* run the xor synchronously */ | ||
166 | /* in the sync case the dest is an implied source | ||
167 | * (assumes the dest is at the src_off index) | ||
168 | */ | ||
169 | if (flags & ASYNC_TX_XOR_DROP_DST) { | ||
170 | src_cnt--; | ||
171 | src_off++; | ||
172 | } | ||
173 | |||
174 | /* process up to 'MAX_XOR_BLOCKS' sources */ | ||
175 | xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); | ||
176 | |||
177 | /* if we are submitting additional xors | ||
178 | * only set the callback on the last transaction | ||
179 | */ | ||
180 | if (src_cnt > xor_src_cnt) { | ||
181 | local_flags &= ~ASYNC_TX_ACK; | ||
182 | _cb_fn = NULL; | ||
183 | _cb_param = NULL; | ||
184 | } else { | ||
185 | _cb_fn = cb_fn; | ||
186 | _cb_param = cb_param; | ||
187 | } | ||
188 | |||
189 | /* wait for any prerequisite operations */ | ||
190 | if (depend_tx) { | ||
191 | /* if ack is already set then we cannot be sure | ||
192 | * we are referring to the correct operation | ||
193 | */ | ||
194 | BUG_ON(async_tx_test_ack(depend_tx)); | ||
195 | if (dma_wait_for_async_tx(depend_tx) == | ||
196 | DMA_ERROR) | ||
197 | panic("%s: DMA_ERROR waiting for " | ||
198 | "depend_tx\n", | ||
199 | __func__); | ||
200 | } | ||
201 | |||
202 | do_sync_xor(dest, &src_list[src_off], offset, | ||
203 | xor_src_cnt, len, local_flags, depend_tx, | ||
204 | _cb_fn, _cb_param); | ||
205 | } | ||
206 | 187 | ||
207 | /* the previous tx is hidden from the client, | 188 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, |
208 | * so ack it | 189 | flags, depend_tx, cb_fn, cb_param); |
209 | */ | 190 | } else { |
210 | if (i && depend_tx) | 191 | /* run the xor synchronously */ |
211 | async_tx_ack(depend_tx); | 192 | pr_debug("%s (sync): len: %zu\n", __func__, len); |
212 | 193 | ||
213 | depend_tx = tx; | 194 | /* in the sync case the dest is an implied source |
195 | * (assumes the dest is the first source) | ||
196 | */ | ||
197 | if (flags & ASYNC_TX_XOR_DROP_DST) { | ||
198 | src_cnt--; | ||
199 | src_list++; | ||
200 | } | ||
214 | 201 | ||
215 | if (src_cnt > xor_src_cnt) { | 202 | /* wait for any prerequisite operations */ |
216 | /* drop completed sources */ | 203 | async_tx_quiesce(&depend_tx); |
217 | src_cnt -= xor_src_cnt; | ||
218 | src_off += xor_src_cnt; | ||
219 | 204 | ||
220 | /* unconditionally preserve the destination */ | 205 | do_sync_xor(dest, src_list, offset, src_cnt, len, |
221 | flags &= ~ASYNC_TX_XOR_ZERO_DST; | 206 | flags, cb_fn, cb_param); |
222 | 207 | ||
223 | /* use the intermediate result a source, but remember | 208 | return NULL; |
224 | * it's dropped, because it's implied, in the sync case | ||
225 | */ | ||
226 | src_list[--src_off] = dest; | ||
227 | src_cnt++; | ||
228 | flags |= ASYNC_TX_XOR_DROP_DST; | ||
229 | } else | ||
230 | src_cnt = 0; | ||
231 | i++; | ||
232 | } | 209 | } |
233 | |||
234 | return tx; | ||
235 | } | 210 | } |
236 | EXPORT_SYMBOL_GPL(async_xor); | 211 | EXPORT_SYMBOL_GPL(async_xor); |
237 | 212 | ||
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, | |||
285 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, | 260 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, |
286 | len, result, | 261 | len, result, |
287 | dma_prep_flags); | 262 | dma_prep_flags); |
288 | if (!tx) { | 263 | if (unlikely(!tx)) { |
289 | if (depend_tx) | 264 | async_tx_quiesce(&depend_tx); |
290 | dma_wait_for_async_tx(depend_tx); | ||
291 | 265 | ||
292 | while (!tx) | 266 | while (!tx) |
267 | dma_async_issue_pending(chan); | ||
293 | tx = device->device_prep_dma_zero_sum(chan, | 268 | tx = device->device_prep_dma_zero_sum(chan, |
294 | dma_src, src_cnt, len, result, | 269 | dma_src, src_cnt, len, result, |
295 | dma_prep_flags); | 270 | dma_prep_flags); |
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, | |||
307 | tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, | 282 | tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, |
308 | depend_tx, NULL, NULL); | 283 | depend_tx, NULL, NULL); |
309 | 284 | ||
310 | if (tx) { | 285 | async_tx_quiesce(&tx); |
311 | if (dma_wait_for_async_tx(tx) == DMA_ERROR) | ||
312 | panic("%s: DMA_ERROR waiting for tx\n", | ||
313 | __func__); | ||
314 | async_tx_ack(tx); | ||
315 | } | ||
316 | 286 | ||
317 | *result = page_is_zero(dest, offset, len) ? 0 : 1; | 287 | *result = page_is_zero(dest, offset, len) ? 0 : 1; |
318 | 288 | ||
319 | tx = NULL; | 289 | async_tx_sync_epilog(cb_fn, cb_param); |
320 | |||
321 | async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); | ||
322 | } | 290 | } |
323 | 291 | ||
324 | return tx; | 292 | return tx; |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 0622ace05220..a2c3f9cfa549 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) | 827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
828 | { | 828 | { |
829 | cpumask_t saved_mask; | 829 | cpumask_t saved_mask; |
830 | cpumask_of_cpu_ptr_declare(new_mask); | ||
830 | int ret; | 831 | int ret; |
831 | 832 | ||
832 | if (!pr) | 833 | if (!pr) |
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
838 | * Migrate task to the cpu pointed by pr. | 839 | * Migrate task to the cpu pointed by pr. |
839 | */ | 840 | */ |
840 | saved_mask = current->cpus_allowed; | 841 | saved_mask = current->cpus_allowed; |
841 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 842 | cpumask_of_cpu_ptr_next(new_mask, pr->id); |
843 | set_cpus_allowed_ptr(current, new_mask); | ||
842 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 844 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
843 | /* restore the previous state */ | 845 | /* restore the previous state */ |
844 | set_cpus_allowed_ptr(current, &saved_mask); | 846 | set_cpus_allowed_ptr(current, &saved_mask); |
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 989 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
988 | { | 990 | { |
989 | cpumask_t saved_mask; | 991 | cpumask_t saved_mask; |
992 | cpumask_of_cpu_ptr_declare(new_mask); | ||
990 | int ret = 0; | 993 | int ret = 0; |
991 | unsigned int i; | 994 | unsigned int i; |
992 | struct acpi_processor *match_pr; | 995 | struct acpi_processor *match_pr; |
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1013 | * affected cpu in order to get one proper T-state. | 1016 | * affected cpu in order to get one proper T-state. |
1014 | * The notifier event is THROTTLING_PRECHANGE. | 1017 | * The notifier event is THROTTLING_PRECHANGE. |
1015 | */ | 1018 | */ |
1016 | for_each_cpu_mask(i, online_throttling_cpus) { | 1019 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1017 | t_state.cpu = i; | 1020 | t_state.cpu = i; |
1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1021 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1019 | &t_state); | 1022 | &t_state); |
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1025 | * it can be called only for the cpu pointed by pr. | 1028 | * it can be called only for the cpu pointed by pr. |
1026 | */ | 1029 | */ |
1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1030 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1028 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 1031 | cpumask_of_cpu_ptr_next(new_mask, pr->id); |
1032 | set_cpus_allowed_ptr(current, new_mask); | ||
1029 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1033 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1030 | t_state.target_state); | 1034 | t_state.target_state); |
1031 | } else { | 1035 | } else { |
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1034 | * it is necessary to set T-state for every affected | 1038 | * it is necessary to set T-state for every affected |
1035 | * cpus. | 1039 | * cpus. |
1036 | */ | 1040 | */ |
1037 | for_each_cpu_mask(i, online_throttling_cpus) { | 1041 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1038 | match_pr = per_cpu(processors, i); | 1042 | match_pr = per_cpu(processors, i); |
1039 | /* | 1043 | /* |
1040 | * If the pointer is invalid, we will report the | 1044 | * If the pointer is invalid, we will report the |
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1056 | continue; | 1060 | continue; |
1057 | } | 1061 | } |
1058 | t_state.cpu = i; | 1062 | t_state.cpu = i; |
1059 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 1063 | cpumask_of_cpu_ptr_next(new_mask, i); |
1064 | set_cpus_allowed_ptr(current, new_mask); | ||
1060 | ret = match_pr->throttling. | 1065 | ret = match_pr->throttling. |
1061 | acpi_processor_set_throttling( | 1066 | acpi_processor_set_throttling( |
1062 | match_pr, t_state.target_state); | 1067 | match_pr, t_state.target_state); |
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1068 | * affected cpu to update the T-states. | 1073 | * affected cpu to update the T-states. |
1069 | * The notifier event is THROTTLING_POSTCHANGE | 1074 | * The notifier event is THROTTLING_POSTCHANGE |
1070 | */ | 1075 | */ |
1071 | for_each_cpu_mask(i, online_throttling_cpus) { | 1076 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1072 | t_state.cpu = i; | 1077 | t_state.cpu = i; |
1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1078 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1074 | &t_state); | 1079 | &t_state); |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 20537d507909..64f5d54f7edc 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ | |||
121 | { \ | 121 | { \ |
122 | return print_cpus_map(buf, &cpu_##type##_map); \ | 122 | return print_cpus_map(buf, &cpu_##type##_map); \ |
123 | } \ | 123 | } \ |
124 | struct sysdev_class_attribute attr_##type##_map = \ | 124 | static struct sysdev_class_attribute attr_##type##_map = \ |
125 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) | 125 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) |
126 | 126 | ||
127 | print_cpus_func(online); | 127 | print_cpus_func(online); |
128 | print_cpus_func(possible); | 128 | print_cpus_func(possible); |
129 | print_cpus_func(present); | 129 | print_cpus_func(present); |
130 | 130 | ||
131 | struct sysdev_class_attribute *cpu_state_attr[] = { | 131 | static struct sysdev_class_attribute *cpu_state_attr[] = { |
132 | &attr_online_map, | 132 | &attr_online_map, |
133 | &attr_possible_map, | 133 | &attr_possible_map, |
134 | &attr_present_map, | 134 | &attr_present_map, |
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index a22662b6a1a5..39f6357e3b5d 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c | |||
@@ -107,7 +107,6 @@ | |||
107 | #include <linux/init.h> | 107 | #include <linux/init.h> |
108 | #include <linux/proc_fs.h> | 108 | #include <linux/proc_fs.h> |
109 | #include <linux/spinlock.h> | 109 | #include <linux/spinlock.h> |
110 | #include <linux/smp_lock.h> | ||
111 | 110 | ||
112 | #include <asm/io.h> | 111 | #include <asm/io.h> |
113 | #include <asm/uaccess.h> | 112 | #include <asm/uaccess.h> |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 15e597d03002..fa48dba5ba5e 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -915,7 +915,7 @@ static void tty_reset_termios(struct tty_struct *tty) | |||
915 | * do_tty_hangup - actual handler for hangup events | 915 | * do_tty_hangup - actual handler for hangup events |
916 | * @work: tty device | 916 | * @work: tty device |
917 | * | 917 | * |
918 | k * This can be called by the "eventd" kernel thread. That is process | 918 | * This can be called by the "eventd" kernel thread. That is process |
919 | * synchronous but doesn't hold any locks, so we need to make sure we | 919 | * synchronous but doesn't hold any locks, so we need to make sure we |
920 | * have the appropriate locks for what we're doing. | 920 | * have the appropriate locks for what we're doing. |
921 | * | 921 | * |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ee1df0d45e81..8d6a3ff02672 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf) | |||
589 | ssize_t i = 0; | 589 | ssize_t i = 0; |
590 | unsigned int cpu; | 590 | unsigned int cpu; |
591 | 591 | ||
592 | for_each_cpu_mask(cpu, mask) { | 592 | for_each_cpu_mask_nr(cpu, mask) { |
593 | if (i) | 593 | if (i) |
594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | 594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); |
595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | 595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); |
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
835 | } | 835 | } |
836 | #endif | 836 | #endif |
837 | 837 | ||
838 | for_each_cpu_mask(j, policy->cpus) { | 838 | for_each_cpu_mask_nr(j, policy->cpus) { |
839 | if (cpu == j) | 839 | if (cpu == j) |
840 | continue; | 840 | continue; |
841 | 841 | ||
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
898 | } | 898 | } |
899 | 899 | ||
900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
901 | for_each_cpu_mask(j, policy->cpus) { | 901 | for_each_cpu_mask_nr(j, policy->cpus) { |
902 | per_cpu(cpufreq_cpu_data, j) = policy; | 902 | per_cpu(cpufreq_cpu_data, j) = policy; |
903 | per_cpu(policy_cpu, j) = policy->cpu; | 903 | per_cpu(policy_cpu, j) = policy->cpu; |
904 | } | 904 | } |
905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
906 | 906 | ||
907 | /* symlink affected CPUs */ | 907 | /* symlink affected CPUs */ |
908 | for_each_cpu_mask(j, policy->cpus) { | 908 | for_each_cpu_mask_nr(j, policy->cpus) { |
909 | if (j == cpu) | 909 | if (j == cpu) |
910 | continue; | 910 | continue; |
911 | if (!cpu_online(j)) | 911 | if (!cpu_online(j)) |
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
945 | 945 | ||
946 | err_out_unregister: | 946 | err_out_unregister: |
947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
948 | for_each_cpu_mask(j, policy->cpus) | 948 | for_each_cpu_mask_nr(j, policy->cpus) |
949 | per_cpu(cpufreq_cpu_data, j) = NULL; | 949 | per_cpu(cpufreq_cpu_data, j) = NULL; |
950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
951 | 951 | ||
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1028 | * the sysfs links afterwards. | 1028 | * the sysfs links afterwards. |
1029 | */ | 1029 | */ |
1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1031 | for_each_cpu_mask(j, data->cpus) { | 1031 | for_each_cpu_mask_nr(j, data->cpus) { |
1032 | if (j == cpu) | 1032 | if (j == cpu) |
1033 | continue; | 1033 | continue; |
1034 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1034 | per_cpu(cpufreq_cpu_data, j) = NULL; |
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1039 | 1039 | ||
1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1041 | for_each_cpu_mask(j, data->cpus) { | 1041 | for_each_cpu_mask_nr(j, data->cpus) { |
1042 | if (j == cpu) | 1042 | if (j == cpu) |
1043 | continue; | 1043 | continue; |
1044 | dprintk("removing link for cpu %u\n", j); | 1044 | dprintk("removing link for cpu %u\n", j); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 5d3a04ba6ad2..fe565ee43757 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
497 | return rc; | 497 | return rc; |
498 | } | 498 | } |
499 | 499 | ||
500 | for_each_cpu_mask(j, policy->cpus) { | 500 | for_each_cpu_mask_nr(j, policy->cpus) { |
501 | struct cpu_dbs_info_s *j_dbs_info; | 501 | struct cpu_dbs_info_s *j_dbs_info; |
502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
503 | j_dbs_info->cur_policy = policy; | 503 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d2af20dda382..33855cb3cf16 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
367 | 367 | ||
368 | /* Get Idle Time */ | 368 | /* Get Idle Time */ |
369 | idle_ticks = UINT_MAX; | 369 | idle_ticks = UINT_MAX; |
370 | for_each_cpu_mask(j, policy->cpus) { | 370 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | 371 | cputime64_t total_idle_ticks; |
372 | unsigned int tmp_idle_ticks; | 372 | unsigned int tmp_idle_ticks; |
373 | struct cpu_dbs_info_s *j_dbs_info; | 373 | struct cpu_dbs_info_s *j_dbs_info; |
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
521 | return rc; | 521 | return rc; |
522 | } | 522 | } |
523 | 523 | ||
524 | for_each_cpu_mask(j, policy->cpus) { | 524 | for_each_cpu_mask_nr(j, policy->cpus) { |
525 | struct cpu_dbs_info_s *j_dbs_info; | 525 | struct cpu_dbs_info_s *j_dbs_info; |
526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
527 | j_dbs_info->cur_policy = policy; | 527 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index cb2ac01a41a1..32244aa7cc0c 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -30,16 +30,18 @@ | |||
30 | /** | 30 | /** |
31 | * A few values needed by the userspace governor | 31 | * A few values needed by the userspace governor |
32 | */ | 32 | */ |
33 | static unsigned int cpu_max_freq[NR_CPUS]; | 33 | static DEFINE_PER_CPU(unsigned int, cpu_max_freq); |
34 | static unsigned int cpu_min_freq[NR_CPUS]; | 34 | static DEFINE_PER_CPU(unsigned int, cpu_min_freq); |
35 | static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ | 35 | static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */ |
36 | static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ | 36 | static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by |
37 | static unsigned int cpu_is_managed[NR_CPUS]; | 37 | userspace */ |
38 | static DEFINE_PER_CPU(unsigned int, cpu_is_managed); | ||
38 | 39 | ||
39 | static DEFINE_MUTEX (userspace_mutex); | 40 | static DEFINE_MUTEX (userspace_mutex); |
40 | static int cpus_using_userspace_governor; | 41 | static int cpus_using_userspace_governor; |
41 | 42 | ||
42 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | 43 | #define dprintk(msg...) \ |
44 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | ||
43 | 45 | ||
44 | /* keep track of frequency transitions */ | 46 | /* keep track of frequency transitions */ |
45 | static int | 47 | static int |
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
48 | { | 50 | { |
49 | struct cpufreq_freqs *freq = data; | 51 | struct cpufreq_freqs *freq = data; |
50 | 52 | ||
51 | if (!cpu_is_managed[freq->cpu]) | 53 | if (!per_cpu(cpu_is_managed, freq->cpu)) |
52 | return 0; | 54 | return 0; |
53 | 55 | ||
54 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", | 56 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", |
55 | freq->cpu, freq->new); | 57 | freq->cpu, freq->new); |
56 | cpu_cur_freq[freq->cpu] = freq->new; | 58 | per_cpu(cpu_cur_freq, freq->cpu) = freq->new; |
57 | 59 | ||
58 | return 0; | 60 | return 0; |
59 | } | 61 | } |
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
77 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 79 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
78 | 80 | ||
79 | mutex_lock(&userspace_mutex); | 81 | mutex_lock(&userspace_mutex); |
80 | if (!cpu_is_managed[policy->cpu]) | 82 | if (!per_cpu(cpu_is_managed, policy->cpu)) |
81 | goto err; | 83 | goto err; |
82 | 84 | ||
83 | cpu_set_freq[policy->cpu] = freq; | 85 | per_cpu(cpu_set_freq, policy->cpu) = freq; |
84 | 86 | ||
85 | if (freq < cpu_min_freq[policy->cpu]) | 87 | if (freq < per_cpu(cpu_min_freq, policy->cpu)) |
86 | freq = cpu_min_freq[policy->cpu]; | 88 | freq = per_cpu(cpu_min_freq, policy->cpu); |
87 | if (freq > cpu_max_freq[policy->cpu]) | 89 | if (freq > per_cpu(cpu_max_freq, policy->cpu)) |
88 | freq = cpu_max_freq[policy->cpu]; | 90 | freq = per_cpu(cpu_max_freq, policy->cpu); |
89 | 91 | ||
90 | /* | 92 | /* |
91 | * We're safe from concurrent calls to ->target() here | 93 | * We're safe from concurrent calls to ->target() here |
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
104 | 106 | ||
105 | static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) | 107 | static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) |
106 | { | 108 | { |
107 | return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); | 109 | return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu)); |
108 | } | 110 | } |
109 | 111 | ||
110 | static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | 112 | static int cpufreq_governor_userspace(struct cpufreq_policy *policy, |
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
127 | } | 129 | } |
128 | cpus_using_userspace_governor++; | 130 | cpus_using_userspace_governor++; |
129 | 131 | ||
130 | cpu_is_managed[cpu] = 1; | 132 | per_cpu(cpu_is_managed, cpu) = 1; |
131 | cpu_min_freq[cpu] = policy->min; | 133 | per_cpu(cpu_min_freq, cpu) = policy->min; |
132 | cpu_max_freq[cpu] = policy->max; | 134 | per_cpu(cpu_max_freq, cpu) = policy->max; |
133 | cpu_cur_freq[cpu] = policy->cur; | 135 | per_cpu(cpu_cur_freq, cpu) = policy->cur; |
134 | cpu_set_freq[cpu] = policy->cur; | 136 | per_cpu(cpu_set_freq, cpu) = policy->cur; |
135 | dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); | 137 | dprintk("managing cpu %u started " |
138 | "(%u - %u kHz, currently %u kHz)\n", | ||
139 | cpu, | ||
140 | per_cpu(cpu_min_freq, cpu), | ||
141 | per_cpu(cpu_max_freq, cpu), | ||
142 | per_cpu(cpu_cur_freq, cpu)); | ||
136 | 143 | ||
137 | mutex_unlock(&userspace_mutex); | 144 | mutex_unlock(&userspace_mutex); |
138 | break; | 145 | break; |
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
145 | CPUFREQ_TRANSITION_NOTIFIER); | 152 | CPUFREQ_TRANSITION_NOTIFIER); |
146 | } | 153 | } |
147 | 154 | ||
148 | cpu_is_managed[cpu] = 0; | 155 | per_cpu(cpu_is_managed, cpu) = 0; |
149 | cpu_min_freq[cpu] = 0; | 156 | per_cpu(cpu_min_freq, cpu) = 0; |
150 | cpu_max_freq[cpu] = 0; | 157 | per_cpu(cpu_max_freq, cpu) = 0; |
151 | cpu_set_freq[cpu] = 0; | 158 | per_cpu(cpu_set_freq, cpu) = 0; |
152 | dprintk("managing cpu %u stopped\n", cpu); | 159 | dprintk("managing cpu %u stopped\n", cpu); |
153 | mutex_unlock(&userspace_mutex); | 160 | mutex_unlock(&userspace_mutex); |
154 | break; | 161 | break; |
155 | case CPUFREQ_GOV_LIMITS: | 162 | case CPUFREQ_GOV_LIMITS: |
156 | mutex_lock(&userspace_mutex); | 163 | mutex_lock(&userspace_mutex); |
157 | dprintk("limit event for cpu %u: %u - %u kHz," | 164 | dprintk("limit event for cpu %u: %u - %u kHz, " |
158 | "currently %u kHz, last set to %u kHz\n", | 165 | "currently %u kHz, last set to %u kHz\n", |
159 | cpu, policy->min, policy->max, | 166 | cpu, policy->min, policy->max, |
160 | cpu_cur_freq[cpu], cpu_set_freq[cpu]); | 167 | per_cpu(cpu_cur_freq, cpu), |
161 | if (policy->max < cpu_set_freq[cpu]) { | 168 | per_cpu(cpu_set_freq, cpu)); |
169 | if (policy->max < per_cpu(cpu_set_freq, cpu)) { | ||
162 | __cpufreq_driver_target(policy, policy->max, | 170 | __cpufreq_driver_target(policy, policy->max, |
163 | CPUFREQ_RELATION_H); | 171 | CPUFREQ_RELATION_H); |
164 | } | 172 | } else if (policy->min > per_cpu(cpu_set_freq, cpu)) { |
165 | else if (policy->min > cpu_set_freq[cpu]) { | ||
166 | __cpufreq_driver_target(policy, policy->min, | 173 | __cpufreq_driver_target(policy, policy->min, |
167 | CPUFREQ_RELATION_L); | 174 | CPUFREQ_RELATION_L); |
168 | } | 175 | } else { |
169 | else { | 176 | __cpufreq_driver_target(policy, |
170 | __cpufreq_driver_target(policy, cpu_set_freq[cpu], | 177 | per_cpu(cpu_set_freq, cpu), |
171 | CPUFREQ_RELATION_L); | 178 | CPUFREQ_RELATION_L); |
172 | } | 179 | } |
173 | cpu_min_freq[cpu] = policy->min; | 180 | per_cpu(cpu_min_freq, cpu) = policy->min; |
174 | cpu_max_freq[cpu] = policy->max; | 181 | per_cpu(cpu_max_freq, cpu) = policy->max; |
175 | cpu_cur_freq[cpu] = policy->cur; | 182 | per_cpu(cpu_cur_freq, cpu) = policy->cur; |
176 | mutex_unlock(&userspace_mutex); | 183 | mutex_unlock(&userspace_mutex); |
177 | break; | 184 | break; |
178 | } | 185 | } |
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index bf5b92f86df7..ec249d2db633 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -28,13 +28,29 @@ | |||
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/dca.h> | 29 | #include <linux/dca.h> |
30 | 30 | ||
31 | MODULE_LICENSE("GPL"); | 31 | #define DCA_VERSION "1.4" |
32 | 32 | ||
33 | /* For now we're assuming a single, global, DCA provider for the system. */ | 33 | MODULE_VERSION(DCA_VERSION); |
34 | MODULE_LICENSE("GPL"); | ||
35 | MODULE_AUTHOR("Intel Corporation"); | ||
34 | 36 | ||
35 | static DEFINE_SPINLOCK(dca_lock); | 37 | static DEFINE_SPINLOCK(dca_lock); |
36 | 38 | ||
37 | static struct dca_provider *global_dca = NULL; | 39 | static LIST_HEAD(dca_providers); |
40 | |||
41 | static struct dca_provider *dca_find_provider_by_dev(struct device *dev) | ||
42 | { | ||
43 | struct dca_provider *dca, *ret = NULL; | ||
44 | |||
45 | list_for_each_entry(dca, &dca_providers, node) { | ||
46 | if ((!dev) || (dca->ops->dev_managed(dca, dev))) { | ||
47 | ret = dca; | ||
48 | break; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | return ret; | ||
53 | } | ||
38 | 54 | ||
39 | /** | 55 | /** |
40 | * dca_add_requester - add a dca client to the list | 56 | * dca_add_requester - add a dca client to the list |
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL; | |||
42 | */ | 58 | */ |
43 | int dca_add_requester(struct device *dev) | 59 | int dca_add_requester(struct device *dev) |
44 | { | 60 | { |
45 | int err, slot; | 61 | struct dca_provider *dca; |
62 | int err, slot = -ENODEV; | ||
46 | 63 | ||
47 | if (!global_dca) | 64 | if (!dev) |
48 | return -ENODEV; | 65 | return -EFAULT; |
49 | 66 | ||
50 | spin_lock(&dca_lock); | 67 | spin_lock(&dca_lock); |
51 | slot = global_dca->ops->add_requester(global_dca, dev); | 68 | |
52 | spin_unlock(&dca_lock); | 69 | /* check if the requester has not been added already */ |
53 | if (slot < 0) | 70 | dca = dca_find_provider_by_dev(dev); |
71 | if (dca) { | ||
72 | spin_unlock(&dca_lock); | ||
73 | return -EEXIST; | ||
74 | } | ||
75 | |||
76 | list_for_each_entry(dca, &dca_providers, node) { | ||
77 | slot = dca->ops->add_requester(dca, dev); | ||
78 | if (slot >= 0) | ||
79 | break; | ||
80 | } | ||
81 | if (slot < 0) { | ||
82 | spin_unlock(&dca_lock); | ||
54 | return slot; | 83 | return slot; |
84 | } | ||
55 | 85 | ||
56 | err = dca_sysfs_add_req(global_dca, dev, slot); | 86 | err = dca_sysfs_add_req(dca, dev, slot); |
57 | if (err) { | 87 | if (err) { |
58 | spin_lock(&dca_lock); | 88 | dca->ops->remove_requester(dca, dev); |
59 | global_dca->ops->remove_requester(global_dca, dev); | ||
60 | spin_unlock(&dca_lock); | 89 | spin_unlock(&dca_lock); |
61 | return err; | 90 | return err; |
62 | } | 91 | } |
63 | 92 | ||
93 | spin_unlock(&dca_lock); | ||
64 | return 0; | 94 | return 0; |
65 | } | 95 | } |
66 | EXPORT_SYMBOL_GPL(dca_add_requester); | 96 | EXPORT_SYMBOL_GPL(dca_add_requester); |
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester); | |||
71 | */ | 101 | */ |
72 | int dca_remove_requester(struct device *dev) | 102 | int dca_remove_requester(struct device *dev) |
73 | { | 103 | { |
104 | struct dca_provider *dca; | ||
74 | int slot; | 105 | int slot; |
75 | if (!global_dca) | 106 | |
76 | return -ENODEV; | 107 | if (!dev) |
108 | return -EFAULT; | ||
77 | 109 | ||
78 | spin_lock(&dca_lock); | 110 | spin_lock(&dca_lock); |
79 | slot = global_dca->ops->remove_requester(global_dca, dev); | 111 | dca = dca_find_provider_by_dev(dev); |
80 | spin_unlock(&dca_lock); | 112 | if (!dca) { |
81 | if (slot < 0) | 113 | spin_unlock(&dca_lock); |
114 | return -ENODEV; | ||
115 | } | ||
116 | slot = dca->ops->remove_requester(dca, dev); | ||
117 | if (slot < 0) { | ||
118 | spin_unlock(&dca_lock); | ||
82 | return slot; | 119 | return slot; |
120 | } | ||
83 | 121 | ||
84 | dca_sysfs_remove_req(global_dca, slot); | 122 | dca_sysfs_remove_req(dca, slot); |
123 | |||
124 | spin_unlock(&dca_lock); | ||
85 | return 0; | 125 | return 0; |
86 | } | 126 | } |
87 | EXPORT_SYMBOL_GPL(dca_remove_requester); | 127 | EXPORT_SYMBOL_GPL(dca_remove_requester); |
88 | 128 | ||
89 | /** | 129 | /** |
90 | * dca_get_tag - return the dca tag for the given cpu | 130 | * dca_common_get_tag - return the dca tag (serves both new and old api) |
131 | * @dev - the device that wants dca service | ||
91 | * @cpu - the cpuid as returned by get_cpu() | 132 | * @cpu - the cpuid as returned by get_cpu() |
92 | */ | 133 | */ |
93 | u8 dca_get_tag(int cpu) | 134 | u8 dca_common_get_tag(struct device *dev, int cpu) |
94 | { | 135 | { |
95 | if (!global_dca) | 136 | struct dca_provider *dca; |
137 | u8 tag; | ||
138 | |||
139 | spin_lock(&dca_lock); | ||
140 | |||
141 | dca = dca_find_provider_by_dev(dev); | ||
142 | if (!dca) { | ||
143 | spin_unlock(&dca_lock); | ||
96 | return -ENODEV; | 144 | return -ENODEV; |
97 | return global_dca->ops->get_tag(global_dca, cpu); | 145 | } |
146 | tag = dca->ops->get_tag(dca, dev, cpu); | ||
147 | |||
148 | spin_unlock(&dca_lock); | ||
149 | return tag; | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * dca3_get_tag - return the dca tag to the requester device | ||
154 | * for the given cpu (new api) | ||
155 | * @dev - the device that wants dca service | ||
156 | * @cpu - the cpuid as returned by get_cpu() | ||
157 | */ | ||
158 | u8 dca3_get_tag(struct device *dev, int cpu) | ||
159 | { | ||
160 | if (!dev) | ||
161 | return -EFAULT; | ||
162 | |||
163 | return dca_common_get_tag(dev, cpu); | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(dca3_get_tag); | ||
166 | |||
167 | /** | ||
168 | * dca_get_tag - return the dca tag for the given cpu (old api) | ||
169 | * @cpu - the cpuid as returned by get_cpu() | ||
170 | */ | ||
171 | u8 dca_get_tag(int cpu) | ||
172 | { | ||
173 | struct device *dev = NULL; | ||
174 | |||
175 | return dca_common_get_tag(dev, cpu); | ||
98 | } | 176 | } |
99 | EXPORT_SYMBOL_GPL(dca_get_tag); | 177 | EXPORT_SYMBOL_GPL(dca_get_tag); |
100 | 178 | ||
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
140 | { | 218 | { |
141 | int err; | 219 | int err; |
142 | 220 | ||
143 | if (global_dca) | ||
144 | return -EEXIST; | ||
145 | err = dca_sysfs_add_provider(dca, dev); | 221 | err = dca_sysfs_add_provider(dca, dev); |
146 | if (err) | 222 | if (err) |
147 | return err; | 223 | return err; |
148 | global_dca = dca; | 224 | list_add(&dca->node, &dca_providers); |
149 | blocking_notifier_call_chain(&dca_provider_chain, | 225 | blocking_notifier_call_chain(&dca_provider_chain, |
150 | DCA_PROVIDER_ADD, NULL); | 226 | DCA_PROVIDER_ADD, NULL); |
151 | return 0; | 227 | return 0; |
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider); | |||
158 | */ | 234 | */ |
159 | void unregister_dca_provider(struct dca_provider *dca) | 235 | void unregister_dca_provider(struct dca_provider *dca) |
160 | { | 236 | { |
161 | if (!global_dca) | ||
162 | return; | ||
163 | blocking_notifier_call_chain(&dca_provider_chain, | 237 | blocking_notifier_call_chain(&dca_provider_chain, |
164 | DCA_PROVIDER_REMOVE, NULL); | 238 | DCA_PROVIDER_REMOVE, NULL); |
165 | global_dca = NULL; | 239 | list_del(&dca->node); |
166 | dca_sysfs_remove_provider(dca); | 240 | dca_sysfs_remove_provider(dca); |
167 | } | 241 | } |
168 | EXPORT_SYMBOL_GPL(unregister_dca_provider); | 242 | EXPORT_SYMBOL_GPL(unregister_dca_provider); |
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify); | |||
187 | 261 | ||
188 | static int __init dca_init(void) | 262 | static int __init dca_init(void) |
189 | { | 263 | { |
264 | printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION); | ||
190 | return dca_sysfs_init(); | 265 | return dca_sysfs_init(); |
191 | } | 266 | } |
192 | 267 | ||
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c index 9a70377bfb34..7af4b403bd2d 100644 --- a/drivers/dca/dca-sysfs.c +++ b/drivers/dca/dca-sysfs.c | |||
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock; | |||
13 | int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) | 13 | int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) |
14 | { | 14 | { |
15 | struct device *cd; | 15 | struct device *cd; |
16 | static int req_count; | ||
16 | 17 | ||
17 | cd = device_create_drvdata(dca_class, dca->cd, | 18 | cd = device_create_drvdata(dca_class, dca->cd, |
18 | MKDEV(0, slot + 1), NULL, | 19 | MKDEV(0, slot + 1), NULL, |
19 | "requester%d", slot); | 20 | "requester%d", req_count++); |
20 | if (IS_ERR(cd)) | 21 | if (IS_ERR(cd)) |
21 | return PTR_ERR(cd); | 22 | return PTR_ERR(cd); |
22 | return 0; | 23 | return 0; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6239c3df30ac..cd303901eb5b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -4,13 +4,14 @@ | |||
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC | 7 | depends on !HIGHMEM64G && HAS_DMA |
8 | depends on !HIGHMEM64G | ||
9 | help | 8 | help |
10 | DMA engines can do asynchronous data transfers without | 9 | DMA engines can do asynchronous data transfers without |
11 | involving the host CPU. Currently, this framework can be | 10 | involving the host CPU. Currently, this framework can be |
12 | used to offload memory copies in the network stack and | 11 | used to offload memory copies in the network stack and |
13 | RAID operations in the MD driver. | 12 | RAID operations in the MD driver. This menu only presents |
13 | DMA Device drivers supported by the configured arch, it may | ||
14 | be empty in some cases. | ||
14 | 15 | ||
15 | if DMADEVICES | 16 | if DMADEVICES |
16 | 17 | ||
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA | |||
37 | help | 38 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 39 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 40 | ||
41 | config DW_DMAC | ||
42 | tristate "Synopsys DesignWare AHB DMA support" | ||
43 | depends on AVR32 | ||
44 | select DMA_ENGINE | ||
45 | default y if CPU_AT32AP7000 | ||
46 | help | ||
47 | Support the Synopsys DesignWare AHB DMA controller. This | ||
48 | can be integrated in chips such as the Atmel AT32ap7000. | ||
49 | |||
40 | config FSL_DMA | 50 | config FSL_DMA |
41 | bool "Freescale MPC85xx/MPC83xx DMA support" | 51 | bool "Freescale MPC85xx/MPC83xx DMA support" |
42 | depends on PPC | 52 | depends on PPC |
@@ -46,6 +56,14 @@ config FSL_DMA | |||
46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | 56 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. |
47 | The MPC8349, MPC8360 is also supported. | 57 | The MPC8349, MPC8360 is also supported. |
48 | 58 | ||
59 | config MV_XOR | ||
60 | bool "Marvell XOR engine support" | ||
61 | depends on PLAT_ORION | ||
62 | select ASYNC_CORE | ||
63 | select DMA_ENGINE | ||
64 | ---help--- | ||
65 | Enable support for the Marvell XOR engine. | ||
66 | |||
49 | config DMA_ENGINE | 67 | config DMA_ENGINE |
50 | bool | 68 | bool |
51 | 69 | ||
@@ -55,10 +73,19 @@ comment "DMA Clients" | |||
55 | config NET_DMA | 73 | config NET_DMA |
56 | bool "Network: TCP receive copy offload" | 74 | bool "Network: TCP receive copy offload" |
57 | depends on DMA_ENGINE && NET | 75 | depends on DMA_ENGINE && NET |
76 | default (INTEL_IOATDMA || FSL_DMA) | ||
58 | help | 77 | help |
59 | This enables the use of DMA engines in the network stack to | 78 | This enables the use of DMA engines in the network stack to |
60 | offload receive copy-to-user operations, freeing CPU cycles. | 79 | offload receive copy-to-user operations, freeing CPU cycles. |
61 | Since this is the main user of the DMA engine, it should be enabled; | 80 | |
62 | say Y here. | 81 | Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise |
82 | say N. | ||
83 | |||
84 | config DMATEST | ||
85 | tristate "DMA Test client" | ||
86 | depends on DMA_ENGINE | ||
87 | help | ||
88 | Simple DMA test client. Say N unless you're debugging a | ||
89 | DMA Device driver. | ||
63 | 90 | ||
64 | endif | 91 | endif |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c8036d945902..14f59527d4f6 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -1,6 +1,9 @@ | |||
1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
2 | obj-$(CONFIG_NET_DMA) += iovlock.o | 2 | obj-$(CONFIG_NET_DMA) += iovlock.o |
3 | obj-$(CONFIG_DMATEST) += dmatest.o | ||
3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 4 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 5 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 6 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 7 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
8 | obj-$(CONFIG_MV_XOR) += mv_xor.o | ||
9 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 97b329e76798..dc003a3a787d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client) | |||
169 | enum dma_state_client ack; | 169 | enum dma_state_client ack; |
170 | 170 | ||
171 | /* Find a channel */ | 171 | /* Find a channel */ |
172 | list_for_each_entry(device, &dma_device_list, global_node) | 172 | list_for_each_entry(device, &dma_device_list, global_node) { |
173 | /* Does the client require a specific DMA controller? */ | ||
174 | if (client->slave && client->slave->dma_dev | ||
175 | && client->slave->dma_dev != device->dev) | ||
176 | continue; | ||
177 | |||
173 | list_for_each_entry(chan, &device->channels, device_node) { | 178 | list_for_each_entry(chan, &device->channels, device_node) { |
174 | if (!dma_chan_satisfies_mask(chan, client->cap_mask)) | 179 | if (!dma_chan_satisfies_mask(chan, client->cap_mask)) |
175 | continue; | 180 | continue; |
176 | 181 | ||
177 | desc = chan->device->device_alloc_chan_resources(chan); | 182 | desc = chan->device->device_alloc_chan_resources( |
183 | chan, client); | ||
178 | if (desc >= 0) { | 184 | if (desc >= 0) { |
179 | ack = client->event_callback(client, | 185 | ack = client->event_callback(client, |
180 | chan, | 186 | chan, |
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client) | |||
183 | /* we are done once this client rejects | 189 | /* we are done once this client rejects |
184 | * an available resource | 190 | * an available resource |
185 | */ | 191 | */ |
186 | if (ack == DMA_ACK) | 192 | if (ack == DMA_ACK) { |
187 | dma_chan_get(chan); | 193 | dma_chan_get(chan); |
188 | else if (ack == DMA_NAK) | 194 | chan->client_count++; |
195 | } else if (ack == DMA_NAK) | ||
189 | return; | 196 | return; |
190 | } | 197 | } |
191 | } | 198 | } |
199 | } | ||
192 | } | 200 | } |
193 | 201 | ||
194 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 202 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan) | |||
272 | /* client was holding resources for this channel so | 280 | /* client was holding resources for this channel so |
273 | * free it | 281 | * free it |
274 | */ | 282 | */ |
275 | if (ack == DMA_ACK) | 283 | if (ack == DMA_ACK) { |
276 | dma_chan_put(chan); | 284 | dma_chan_put(chan); |
285 | chan->client_count--; | ||
286 | } | ||
277 | } | 287 | } |
278 | 288 | ||
279 | mutex_unlock(&dma_list_mutex); | 289 | mutex_unlock(&dma_list_mutex); |
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan) | |||
285 | */ | 295 | */ |
286 | void dma_async_client_register(struct dma_client *client) | 296 | void dma_async_client_register(struct dma_client *client) |
287 | { | 297 | { |
298 | /* validate client data */ | ||
299 | BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && | ||
300 | !client->slave); | ||
301 | |||
288 | mutex_lock(&dma_list_mutex); | 302 | mutex_lock(&dma_list_mutex); |
289 | list_add_tail(&client->global_node, &dma_client_list); | 303 | list_add_tail(&client->global_node, &dma_client_list); |
290 | mutex_unlock(&dma_list_mutex); | 304 | mutex_unlock(&dma_list_mutex); |
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client) | |||
313 | ack = client->event_callback(client, chan, | 327 | ack = client->event_callback(client, chan, |
314 | DMA_RESOURCE_REMOVED); | 328 | DMA_RESOURCE_REMOVED); |
315 | 329 | ||
316 | if (ack == DMA_ACK) | 330 | if (ack == DMA_ACK) { |
317 | dma_chan_put(chan); | 331 | dma_chan_put(chan); |
332 | chan->client_count--; | ||
333 | } | ||
318 | } | 334 | } |
319 | 335 | ||
320 | list_del(&client->global_node); | 336 | list_del(&client->global_node); |
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device) | |||
359 | !device->device_prep_dma_memset); | 375 | !device->device_prep_dma_memset); |
360 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 376 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
361 | !device->device_prep_dma_interrupt); | 377 | !device->device_prep_dma_interrupt); |
378 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | ||
379 | !device->device_prep_slave_sg); | ||
380 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | ||
381 | !device->device_terminate_all); | ||
362 | 382 | ||
363 | BUG_ON(!device->device_alloc_chan_resources); | 383 | BUG_ON(!device->device_alloc_chan_resources); |
364 | BUG_ON(!device->device_free_chan_resources); | 384 | BUG_ON(!device->device_free_chan_resources); |
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device) | |||
378 | 398 | ||
379 | chan->chan_id = chancnt++; | 399 | chan->chan_id = chancnt++; |
380 | chan->dev.class = &dma_devclass; | 400 | chan->dev.class = &dma_devclass; |
381 | chan->dev.parent = NULL; | 401 | chan->dev.parent = device->dev; |
382 | snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", | 402 | snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", |
383 | device->dev_id, chan->chan_id); | 403 | device->dev_id, chan->chan_id); |
384 | 404 | ||
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device) | |||
394 | kref_get(&device->refcount); | 414 | kref_get(&device->refcount); |
395 | kref_get(&device->refcount); | 415 | kref_get(&device->refcount); |
396 | kref_init(&chan->refcount); | 416 | kref_init(&chan->refcount); |
417 | chan->client_count = 0; | ||
397 | chan->slow_ref = 0; | 418 | chan->slow_ref = 0; |
398 | INIT_RCU_HEAD(&chan->rcu); | 419 | INIT_RCU_HEAD(&chan->rcu); |
399 | } | 420 | } |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c new file mode 100644 index 000000000000..a08d19704743 --- /dev/null +++ b/drivers/dma/dmatest.c | |||
@@ -0,0 +1,444 @@ | |||
1 | /* | ||
2 | * DMA Engine test module | ||
3 | * | ||
4 | * Copyright (C) 2007 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/dmaengine.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kthread.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/moduleparam.h> | ||
16 | #include <linux/random.h> | ||
17 | #include <linux/wait.h> | ||
18 | |||
19 | static unsigned int test_buf_size = 16384; | ||
20 | module_param(test_buf_size, uint, S_IRUGO); | ||
21 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); | ||
22 | |||
23 | static char test_channel[BUS_ID_SIZE]; | ||
24 | module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); | ||
25 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); | ||
26 | |||
27 | static char test_device[BUS_ID_SIZE]; | ||
28 | module_param_string(device, test_device, sizeof(test_device), S_IRUGO); | ||
29 | MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); | ||
30 | |||
31 | static unsigned int threads_per_chan = 1; | ||
32 | module_param(threads_per_chan, uint, S_IRUGO); | ||
33 | MODULE_PARM_DESC(threads_per_chan, | ||
34 | "Number of threads to start per channel (default: 1)"); | ||
35 | |||
36 | static unsigned int max_channels; | ||
37 | module_param(max_channels, uint, S_IRUGO); | ||
38 | MODULE_PARM_DESC(nr_channels, | ||
39 | "Maximum number of channels to use (default: all)"); | ||
40 | |||
41 | /* | ||
42 | * Initialization patterns. All bytes in the source buffer has bit 7 | ||
43 | * set, all bytes in the destination buffer has bit 7 cleared. | ||
44 | * | ||
45 | * Bit 6 is set for all bytes which are to be copied by the DMA | ||
46 | * engine. Bit 5 is set for all bytes which are to be overwritten by | ||
47 | * the DMA engine. | ||
48 | * | ||
49 | * The remaining bits are the inverse of a counter which increments by | ||
50 | * one for each byte address. | ||
51 | */ | ||
52 | #define PATTERN_SRC 0x80 | ||
53 | #define PATTERN_DST 0x00 | ||
54 | #define PATTERN_COPY 0x40 | ||
55 | #define PATTERN_OVERWRITE 0x20 | ||
56 | #define PATTERN_COUNT_MASK 0x1f | ||
57 | |||
58 | struct dmatest_thread { | ||
59 | struct list_head node; | ||
60 | struct task_struct *task; | ||
61 | struct dma_chan *chan; | ||
62 | u8 *srcbuf; | ||
63 | u8 *dstbuf; | ||
64 | }; | ||
65 | |||
66 | struct dmatest_chan { | ||
67 | struct list_head node; | ||
68 | struct dma_chan *chan; | ||
69 | struct list_head threads; | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * These are protected by dma_list_mutex since they're only used by | ||
74 | * the DMA client event callback | ||
75 | */ | ||
76 | static LIST_HEAD(dmatest_channels); | ||
77 | static unsigned int nr_channels; | ||
78 | |||
79 | static bool dmatest_match_channel(struct dma_chan *chan) | ||
80 | { | ||
81 | if (test_channel[0] == '\0') | ||
82 | return true; | ||
83 | return strcmp(chan->dev.bus_id, test_channel) == 0; | ||
84 | } | ||
85 | |||
86 | static bool dmatest_match_device(struct dma_device *device) | ||
87 | { | ||
88 | if (test_device[0] == '\0') | ||
89 | return true; | ||
90 | return strcmp(device->dev->bus_id, test_device) == 0; | ||
91 | } | ||
92 | |||
93 | static unsigned long dmatest_random(void) | ||
94 | { | ||
95 | unsigned long buf; | ||
96 | |||
97 | get_random_bytes(&buf, sizeof(buf)); | ||
98 | return buf; | ||
99 | } | ||
100 | |||
101 | static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) | ||
102 | { | ||
103 | unsigned int i; | ||
104 | |||
105 | for (i = 0; i < start; i++) | ||
106 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | ||
107 | for ( ; i < start + len; i++) | ||
108 | buf[i] = PATTERN_SRC | PATTERN_COPY | ||
109 | | (~i & PATTERN_COUNT_MASK);; | ||
110 | for ( ; i < test_buf_size; i++) | ||
111 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | ||
112 | } | ||
113 | |||
114 | static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) | ||
115 | { | ||
116 | unsigned int i; | ||
117 | |||
118 | for (i = 0; i < start; i++) | ||
119 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | ||
120 | for ( ; i < start + len; i++) | ||
121 | buf[i] = PATTERN_DST | PATTERN_OVERWRITE | ||
122 | | (~i & PATTERN_COUNT_MASK); | ||
123 | for ( ; i < test_buf_size; i++) | ||
124 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | ||
125 | } | ||
126 | |||
127 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, | ||
128 | unsigned int counter, bool is_srcbuf) | ||
129 | { | ||
130 | u8 diff = actual ^ pattern; | ||
131 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); | ||
132 | const char *thread_name = current->comm; | ||
133 | |||
134 | if (is_srcbuf) | ||
135 | pr_warning("%s: srcbuf[0x%x] overwritten!" | ||
136 | " Expected %02x, got %02x\n", | ||
137 | thread_name, index, expected, actual); | ||
138 | else if ((pattern & PATTERN_COPY) | ||
139 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
140 | pr_warning("%s: dstbuf[0x%x] not copied!" | ||
141 | " Expected %02x, got %02x\n", | ||
142 | thread_name, index, expected, actual); | ||
143 | else if (diff & PATTERN_SRC) | ||
144 | pr_warning("%s: dstbuf[0x%x] was copied!" | ||
145 | " Expected %02x, got %02x\n", | ||
146 | thread_name, index, expected, actual); | ||
147 | else | ||
148 | pr_warning("%s: dstbuf[0x%x] mismatch!" | ||
149 | " Expected %02x, got %02x\n", | ||
150 | thread_name, index, expected, actual); | ||
151 | } | ||
152 | |||
153 | static unsigned int dmatest_verify(u8 *buf, unsigned int start, | ||
154 | unsigned int end, unsigned int counter, u8 pattern, | ||
155 | bool is_srcbuf) | ||
156 | { | ||
157 | unsigned int i; | ||
158 | unsigned int error_count = 0; | ||
159 | u8 actual; | ||
160 | |||
161 | for (i = start; i < end; i++) { | ||
162 | actual = buf[i]; | ||
163 | if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { | ||
164 | if (error_count < 32) | ||
165 | dmatest_mismatch(actual, pattern, i, counter, | ||
166 | is_srcbuf); | ||
167 | error_count++; | ||
168 | } | ||
169 | counter++; | ||
170 | } | ||
171 | |||
172 | if (error_count > 32) | ||
173 | pr_warning("%s: %u errors suppressed\n", | ||
174 | current->comm, error_count - 32); | ||
175 | |||
176 | return error_count; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * This function repeatedly tests DMA transfers of various lengths and | ||
181 | * offsets until it is told to exit by kthread_stop(). There may be | ||
182 | * multiple threads running this function in parallel for a single | ||
183 | * channel, and there may be multiple channels being tested in | ||
184 | * parallel. | ||
185 | * | ||
186 | * Before each test, the source and destination buffer is initialized | ||
187 | * with a known pattern. This pattern is different depending on | ||
188 | * whether it's in an area which is supposed to be copied or | ||
189 | * overwritten, and different in the source and destination buffers. | ||
190 | * So if the DMA engine doesn't copy exactly what we tell it to copy, | ||
191 | * we'll notice. | ||
192 | */ | ||
193 | static int dmatest_func(void *data) | ||
194 | { | ||
195 | struct dmatest_thread *thread = data; | ||
196 | struct dma_chan *chan; | ||
197 | const char *thread_name; | ||
198 | unsigned int src_off, dst_off, len; | ||
199 | unsigned int error_count; | ||
200 | unsigned int failed_tests = 0; | ||
201 | unsigned int total_tests = 0; | ||
202 | dma_cookie_t cookie; | ||
203 | enum dma_status status; | ||
204 | int ret; | ||
205 | |||
206 | thread_name = current->comm; | ||
207 | |||
208 | ret = -ENOMEM; | ||
209 | thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL); | ||
210 | if (!thread->srcbuf) | ||
211 | goto err_srcbuf; | ||
212 | thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL); | ||
213 | if (!thread->dstbuf) | ||
214 | goto err_dstbuf; | ||
215 | |||
216 | smp_rmb(); | ||
217 | chan = thread->chan; | ||
218 | dma_chan_get(chan); | ||
219 | |||
220 | while (!kthread_should_stop()) { | ||
221 | total_tests++; | ||
222 | |||
223 | len = dmatest_random() % test_buf_size + 1; | ||
224 | src_off = dmatest_random() % (test_buf_size - len + 1); | ||
225 | dst_off = dmatest_random() % (test_buf_size - len + 1); | ||
226 | |||
227 | dmatest_init_srcbuf(thread->srcbuf, src_off, len); | ||
228 | dmatest_init_dstbuf(thread->dstbuf, dst_off, len); | ||
229 | |||
230 | cookie = dma_async_memcpy_buf_to_buf(chan, | ||
231 | thread->dstbuf + dst_off, | ||
232 | thread->srcbuf + src_off, | ||
233 | len); | ||
234 | if (dma_submit_error(cookie)) { | ||
235 | pr_warning("%s: #%u: submit error %d with src_off=0x%x " | ||
236 | "dst_off=0x%x len=0x%x\n", | ||
237 | thread_name, total_tests - 1, cookie, | ||
238 | src_off, dst_off, len); | ||
239 | msleep(100); | ||
240 | failed_tests++; | ||
241 | continue; | ||
242 | } | ||
243 | dma_async_memcpy_issue_pending(chan); | ||
244 | |||
245 | do { | ||
246 | msleep(1); | ||
247 | status = dma_async_memcpy_complete( | ||
248 | chan, cookie, NULL, NULL); | ||
249 | } while (status == DMA_IN_PROGRESS); | ||
250 | |||
251 | if (status == DMA_ERROR) { | ||
252 | pr_warning("%s: #%u: error during copy\n", | ||
253 | thread_name, total_tests - 1); | ||
254 | failed_tests++; | ||
255 | continue; | ||
256 | } | ||
257 | |||
258 | error_count = 0; | ||
259 | |||
260 | pr_debug("%s: verifying source buffer...\n", thread_name); | ||
261 | error_count += dmatest_verify(thread->srcbuf, 0, src_off, | ||
262 | 0, PATTERN_SRC, true); | ||
263 | error_count += dmatest_verify(thread->srcbuf, src_off, | ||
264 | src_off + len, src_off, | ||
265 | PATTERN_SRC | PATTERN_COPY, true); | ||
266 | error_count += dmatest_verify(thread->srcbuf, src_off + len, | ||
267 | test_buf_size, src_off + len, | ||
268 | PATTERN_SRC, true); | ||
269 | |||
270 | pr_debug("%s: verifying dest buffer...\n", | ||
271 | thread->task->comm); | ||
272 | error_count += dmatest_verify(thread->dstbuf, 0, dst_off, | ||
273 | 0, PATTERN_DST, false); | ||
274 | error_count += dmatest_verify(thread->dstbuf, dst_off, | ||
275 | dst_off + len, src_off, | ||
276 | PATTERN_SRC | PATTERN_COPY, false); | ||
277 | error_count += dmatest_verify(thread->dstbuf, dst_off + len, | ||
278 | test_buf_size, dst_off + len, | ||
279 | PATTERN_DST, false); | ||
280 | |||
281 | if (error_count) { | ||
282 | pr_warning("%s: #%u: %u errors with " | ||
283 | "src_off=0x%x dst_off=0x%x len=0x%x\n", | ||
284 | thread_name, total_tests - 1, error_count, | ||
285 | src_off, dst_off, len); | ||
286 | failed_tests++; | ||
287 | } else { | ||
288 | pr_debug("%s: #%u: No errors with " | ||
289 | "src_off=0x%x dst_off=0x%x len=0x%x\n", | ||
290 | thread_name, total_tests - 1, | ||
291 | src_off, dst_off, len); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | ret = 0; | ||
296 | dma_chan_put(chan); | ||
297 | kfree(thread->dstbuf); | ||
298 | err_dstbuf: | ||
299 | kfree(thread->srcbuf); | ||
300 | err_srcbuf: | ||
301 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", | ||
302 | thread_name, total_tests, failed_tests, ret); | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | ||
307 | { | ||
308 | struct dmatest_thread *thread; | ||
309 | struct dmatest_thread *_thread; | ||
310 | int ret; | ||
311 | |||
312 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { | ||
313 | ret = kthread_stop(thread->task); | ||
314 | pr_debug("dmatest: thread %s exited with status %d\n", | ||
315 | thread->task->comm, ret); | ||
316 | list_del(&thread->node); | ||
317 | kfree(thread); | ||
318 | } | ||
319 | kfree(dtc); | ||
320 | } | ||
321 | |||
322 | static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) | ||
323 | { | ||
324 | struct dmatest_chan *dtc; | ||
325 | struct dmatest_thread *thread; | ||
326 | unsigned int i; | ||
327 | |||
328 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); | ||
329 | if (!dtc) { | ||
330 | pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); | ||
331 | return DMA_NAK; | ||
332 | } | ||
333 | |||
334 | dtc->chan = chan; | ||
335 | INIT_LIST_HEAD(&dtc->threads); | ||
336 | |||
337 | for (i = 0; i < threads_per_chan; i++) { | ||
338 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | ||
339 | if (!thread) { | ||
340 | pr_warning("dmatest: No memory for %s-test%u\n", | ||
341 | chan->dev.bus_id, i); | ||
342 | break; | ||
343 | } | ||
344 | thread->chan = dtc->chan; | ||
345 | smp_wmb(); | ||
346 | thread->task = kthread_run(dmatest_func, thread, "%s-test%u", | ||
347 | chan->dev.bus_id, i); | ||
348 | if (IS_ERR(thread->task)) { | ||
349 | pr_warning("dmatest: Failed to run thread %s-test%u\n", | ||
350 | chan->dev.bus_id, i); | ||
351 | kfree(thread); | ||
352 | break; | ||
353 | } | ||
354 | |||
355 | /* srcbuf and dstbuf are allocated by the thread itself */ | ||
356 | |||
357 | list_add_tail(&thread->node, &dtc->threads); | ||
358 | } | ||
359 | |||
360 | pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id); | ||
361 | |||
362 | list_add_tail(&dtc->node, &dmatest_channels); | ||
363 | nr_channels++; | ||
364 | |||
365 | return DMA_ACK; | ||
366 | } | ||
367 | |||
368 | static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan) | ||
369 | { | ||
370 | struct dmatest_chan *dtc, *_dtc; | ||
371 | |||
372 | list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { | ||
373 | if (dtc->chan == chan) { | ||
374 | list_del(&dtc->node); | ||
375 | dmatest_cleanup_channel(dtc); | ||
376 | pr_debug("dmatest: lost channel %s\n", | ||
377 | chan->dev.bus_id); | ||
378 | return DMA_ACK; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | return DMA_DUP; | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * Start testing threads as new channels are assigned to us, and kill | ||
387 | * them when the channels go away. | ||
388 | * | ||
389 | * When we unregister the client, all channels are removed so this | ||
390 | * will also take care of cleaning things up when the module is | ||
391 | * unloaded. | ||
392 | */ | ||
393 | static enum dma_state_client | ||
394 | dmatest_event(struct dma_client *client, struct dma_chan *chan, | ||
395 | enum dma_state state) | ||
396 | { | ||
397 | enum dma_state_client ack = DMA_NAK; | ||
398 | |||
399 | switch (state) { | ||
400 | case DMA_RESOURCE_AVAILABLE: | ||
401 | if (!dmatest_match_channel(chan) | ||
402 | || !dmatest_match_device(chan->device)) | ||
403 | ack = DMA_DUP; | ||
404 | else if (max_channels && nr_channels >= max_channels) | ||
405 | ack = DMA_NAK; | ||
406 | else | ||
407 | ack = dmatest_add_channel(chan); | ||
408 | break; | ||
409 | |||
410 | case DMA_RESOURCE_REMOVED: | ||
411 | ack = dmatest_remove_channel(chan); | ||
412 | break; | ||
413 | |||
414 | default: | ||
415 | pr_info("dmatest: Unhandled event %u (%s)\n", | ||
416 | state, chan->dev.bus_id); | ||
417 | break; | ||
418 | } | ||
419 | |||
420 | return ack; | ||
421 | } | ||
422 | |||
423 | static struct dma_client dmatest_client = { | ||
424 | .event_callback = dmatest_event, | ||
425 | }; | ||
426 | |||
427 | static int __init dmatest_init(void) | ||
428 | { | ||
429 | dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask); | ||
430 | dma_async_client_register(&dmatest_client); | ||
431 | dma_async_client_chan_request(&dmatest_client); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | module_init(dmatest_init); | ||
436 | |||
437 | static void __exit dmatest_exit(void) | ||
438 | { | ||
439 | dma_async_client_unregister(&dmatest_client); | ||
440 | } | ||
441 | module_exit(dmatest_exit); | ||
442 | |||
443 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); | ||
444 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c new file mode 100644 index 000000000000..94df91771243 --- /dev/null +++ b/drivers/dma/dw_dmac.c | |||
@@ -0,0 +1,1122 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | ||
3 | * AVR32 systems.) | ||
4 | * | ||
5 | * Copyright (C) 2007-2008 Atmel Corporation | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/clk.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #include "dw_dmac_regs.h" | ||
24 | |||
25 | /* | ||
26 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | ||
27 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | ||
28 | * of which use ARM any more). See the "Databook" from Synopsys for | ||
29 | * information beyond what licensees probably provide. | ||
30 | * | ||
31 | * The driver has currently been tested only with the Atmel AT32AP7000, | ||
32 | * which does not support descriptor writeback. | ||
33 | */ | ||
34 | |||
35 | /* NOTE: DMS+SMS is system-specific. We should get this information | ||
36 | * from the platform code somehow. | ||
37 | */ | ||
38 | #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ | ||
39 | | DWC_CTLL_SRC_MSIZE(0) \ | ||
40 | | DWC_CTLL_DMS(0) \ | ||
41 | | DWC_CTLL_SMS(1) \ | ||
42 | | DWC_CTLL_LLP_D_EN \ | ||
43 | | DWC_CTLL_LLP_S_EN) | ||
44 | |||
45 | /* | ||
46 | * This is configuration-dependent and usually a funny size like 4095. | ||
47 | * Let's round it down to the nearest power of two. | ||
48 | * | ||
49 | * Note that this is a transfer count, i.e. if we transfer 32-bit | ||
50 | * words, we can do 8192 bytes per descriptor. | ||
51 | * | ||
52 | * This parameter is also system-specific. | ||
53 | */ | ||
54 | #define DWC_MAX_COUNT 2048U | ||
55 | |||
56 | /* | ||
57 | * Number of descriptors to allocate for each channel. This should be | ||
58 | * made configurable somehow; preferably, the clients (at least the | ||
59 | * ones using slave transfers) should be able to give us a hint. | ||
60 | */ | ||
61 | #define NR_DESCS_PER_CHANNEL 64 | ||
62 | |||
63 | /*----------------------------------------------------------------------*/ | ||
64 | |||
65 | /* | ||
66 | * Because we're not relying on writeback from the controller (it may not | ||
67 | * even be configured into the core!) we don't need to use dma_pool. These | ||
68 | * descriptors -- and associated data -- are cacheable. We do need to make | ||
69 | * sure their dcache entries are written back before handing them off to | ||
70 | * the controller, though. | ||
71 | */ | ||
72 | |||
73 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | ||
74 | { | ||
75 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | ||
76 | } | ||
77 | |||
78 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | ||
79 | { | ||
80 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | ||
81 | } | ||
82 | |||
83 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | ||
84 | { | ||
85 | struct dw_desc *desc, *_desc; | ||
86 | struct dw_desc *ret = NULL; | ||
87 | unsigned int i = 0; | ||
88 | |||
89 | spin_lock_bh(&dwc->lock); | ||
90 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | ||
91 | if (async_tx_test_ack(&desc->txd)) { | ||
92 | list_del(&desc->desc_node); | ||
93 | ret = desc; | ||
94 | break; | ||
95 | } | ||
96 | dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); | ||
97 | i++; | ||
98 | } | ||
99 | spin_unlock_bh(&dwc->lock); | ||
100 | |||
101 | dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); | ||
102 | |||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
107 | { | ||
108 | struct dw_desc *child; | ||
109 | |||
110 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | ||
111 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | ||
112 | child->txd.phys, sizeof(child->lli), | ||
113 | DMA_TO_DEVICE); | ||
114 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | ||
115 | desc->txd.phys, sizeof(desc->lli), | ||
116 | DMA_TO_DEVICE); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Move a descriptor, including any children, to the free list. | ||
121 | * `desc' must not be on any lists. | ||
122 | */ | ||
123 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
124 | { | ||
125 | if (desc) { | ||
126 | struct dw_desc *child; | ||
127 | |||
128 | dwc_sync_desc_for_cpu(dwc, desc); | ||
129 | |||
130 | spin_lock_bh(&dwc->lock); | ||
131 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | ||
132 | dev_vdbg(&dwc->chan.dev, | ||
133 | "moving child desc %p to freelist\n", | ||
134 | child); | ||
135 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); | ||
136 | dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); | ||
137 | list_add(&desc->desc_node, &dwc->free_list); | ||
138 | spin_unlock_bh(&dwc->lock); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | /* Called with dwc->lock held and bh disabled */ | ||
143 | static dma_cookie_t | ||
144 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
145 | { | ||
146 | dma_cookie_t cookie = dwc->chan.cookie; | ||
147 | |||
148 | if (++cookie < 0) | ||
149 | cookie = 1; | ||
150 | |||
151 | dwc->chan.cookie = cookie; | ||
152 | desc->txd.cookie = cookie; | ||
153 | |||
154 | return cookie; | ||
155 | } | ||
156 | |||
157 | /*----------------------------------------------------------------------*/ | ||
158 | |||
159 | /* Called with dwc->lock held and bh disabled */ | ||
160 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | ||
161 | { | ||
162 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
163 | |||
164 | /* ASSERT: channel is idle */ | ||
165 | if (dma_readl(dw, CH_EN) & dwc->mask) { | ||
166 | dev_err(&dwc->chan.dev, | ||
167 | "BUG: Attempted to start non-idle channel\n"); | ||
168 | dev_err(&dwc->chan.dev, | ||
169 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
170 | channel_readl(dwc, SAR), | ||
171 | channel_readl(dwc, DAR), | ||
172 | channel_readl(dwc, LLP), | ||
173 | channel_readl(dwc, CTL_HI), | ||
174 | channel_readl(dwc, CTL_LO)); | ||
175 | |||
176 | /* The tasklet will hopefully advance the queue... */ | ||
177 | return; | ||
178 | } | ||
179 | |||
180 | channel_writel(dwc, LLP, first->txd.phys); | ||
181 | channel_writel(dwc, CTL_LO, | ||
182 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
183 | channel_writel(dwc, CTL_HI, 0); | ||
184 | channel_set_bit(dw, CH_EN, dwc->mask); | ||
185 | } | ||
186 | |||
187 | /*----------------------------------------------------------------------*/ | ||
188 | |||
189 | static void | ||
190 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
191 | { | ||
192 | dma_async_tx_callback callback; | ||
193 | void *param; | ||
194 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
195 | |||
196 | dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); | ||
197 | |||
198 | dwc->completed = txd->cookie; | ||
199 | callback = txd->callback; | ||
200 | param = txd->callback_param; | ||
201 | |||
202 | dwc_sync_desc_for_cpu(dwc, desc); | ||
203 | list_splice_init(&txd->tx_list, &dwc->free_list); | ||
204 | list_move(&desc->desc_node, &dwc->free_list); | ||
205 | |||
206 | /* | ||
207 | * We use dma_unmap_page() regardless of how the buffers were | ||
208 | * mapped before they were submitted... | ||
209 | */ | ||
210 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
211 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, | ||
212 | DMA_FROM_DEVICE); | ||
213 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
214 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, | ||
215 | DMA_TO_DEVICE); | ||
216 | |||
217 | /* | ||
218 | * The API requires that no submissions are done from a | ||
219 | * callback, so we don't need to drop the lock here | ||
220 | */ | ||
221 | if (callback) | ||
222 | callback(param); | ||
223 | } | ||
224 | |||
225 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
226 | { | ||
227 | struct dw_desc *desc, *_desc; | ||
228 | LIST_HEAD(list); | ||
229 | |||
230 | if (dma_readl(dw, CH_EN) & dwc->mask) { | ||
231 | dev_err(&dwc->chan.dev, | ||
232 | "BUG: XFER bit set, but channel not idle!\n"); | ||
233 | |||
234 | /* Try to continue after resetting the channel... */ | ||
235 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
236 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
237 | cpu_relax(); | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * Submit queued descriptors ASAP, i.e. before we go through | ||
242 | * the completed ones. | ||
243 | */ | ||
244 | if (!list_empty(&dwc->queue)) | ||
245 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
246 | list_splice_init(&dwc->active_list, &list); | ||
247 | list_splice_init(&dwc->queue, &dwc->active_list); | ||
248 | |||
249 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
250 | dwc_descriptor_complete(dwc, desc); | ||
251 | } | ||
252 | |||
253 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
254 | { | ||
255 | dma_addr_t llp; | ||
256 | struct dw_desc *desc, *_desc; | ||
257 | struct dw_desc *child; | ||
258 | u32 status_xfer; | ||
259 | |||
260 | /* | ||
261 | * Clear block interrupt flag before scanning so that we don't | ||
262 | * miss any, and read LLP before RAW_XFER to ensure it is | ||
263 | * valid if we decide to scan the list. | ||
264 | */ | ||
265 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
266 | llp = channel_readl(dwc, LLP); | ||
267 | status_xfer = dma_readl(dw, RAW.XFER); | ||
268 | |||
269 | if (status_xfer & dwc->mask) { | ||
270 | /* Everything we've submitted is done */ | ||
271 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
272 | dwc_complete_all(dw, dwc); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); | ||
277 | |||
278 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | ||
279 | if (desc->lli.llp == llp) | ||
280 | /* This one is currently in progress */ | ||
281 | return; | ||
282 | |||
283 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | ||
284 | if (child->lli.llp == llp) | ||
285 | /* Currently in progress */ | ||
286 | return; | ||
287 | |||
288 | /* | ||
289 | * No descriptors so far seem to be in progress, i.e. | ||
290 | * this one must be done. | ||
291 | */ | ||
292 | dwc_descriptor_complete(dwc, desc); | ||
293 | } | ||
294 | |||
295 | dev_err(&dwc->chan.dev, | ||
296 | "BUG: All descriptors done, but channel not idle!\n"); | ||
297 | |||
298 | /* Try to continue after resetting the channel... */ | ||
299 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
300 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
301 | cpu_relax(); | ||
302 | |||
303 | if (!list_empty(&dwc->queue)) { | ||
304 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
305 | list_splice_init(&dwc->queue, &dwc->active_list); | ||
306 | } | ||
307 | } | ||
308 | |||
309 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | ||
310 | { | ||
311 | dev_printk(KERN_CRIT, &dwc->chan.dev, | ||
312 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | ||
313 | lli->sar, lli->dar, lli->llp, | ||
314 | lli->ctlhi, lli->ctllo); | ||
315 | } | ||
316 | |||
317 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
318 | { | ||
319 | struct dw_desc *bad_desc; | ||
320 | struct dw_desc *child; | ||
321 | |||
322 | dwc_scan_descriptors(dw, dwc); | ||
323 | |||
324 | /* | ||
325 | * The descriptor currently at the head of the active list is | ||
326 | * borked. Since we don't have any way to report errors, we'll | ||
327 | * just have to scream loudly and try to carry on. | ||
328 | */ | ||
329 | bad_desc = dwc_first_active(dwc); | ||
330 | list_del_init(&bad_desc->desc_node); | ||
331 | list_splice_init(&dwc->queue, dwc->active_list.prev); | ||
332 | |||
333 | /* Clear the error flag and try to restart the controller */ | ||
334 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | ||
335 | if (!list_empty(&dwc->active_list)) | ||
336 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
337 | |||
338 | /* | ||
339 | * KERN_CRITICAL may seem harsh, but since this only happens | ||
340 | * when someone submits a bad physical address in a | ||
341 | * descriptor, we should consider ourselves lucky that the | ||
342 | * controller flagged an error instead of scribbling over | ||
343 | * random memory locations. | ||
344 | */ | ||
345 | dev_printk(KERN_CRIT, &dwc->chan.dev, | ||
346 | "Bad descriptor submitted for DMA!\n"); | ||
347 | dev_printk(KERN_CRIT, &dwc->chan.dev, | ||
348 | " cookie: %d\n", bad_desc->txd.cookie); | ||
349 | dwc_dump_lli(dwc, &bad_desc->lli); | ||
350 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | ||
351 | dwc_dump_lli(dwc, &child->lli); | ||
352 | |||
353 | /* Pretend the descriptor completed successfully */ | ||
354 | dwc_descriptor_complete(dwc, bad_desc); | ||
355 | } | ||
356 | |||
357 | static void dw_dma_tasklet(unsigned long data) | ||
358 | { | ||
359 | struct dw_dma *dw = (struct dw_dma *)data; | ||
360 | struct dw_dma_chan *dwc; | ||
361 | u32 status_block; | ||
362 | u32 status_xfer; | ||
363 | u32 status_err; | ||
364 | int i; | ||
365 | |||
366 | status_block = dma_readl(dw, RAW.BLOCK); | ||
367 | status_xfer = dma_readl(dw, RAW.BLOCK); | ||
368 | status_err = dma_readl(dw, RAW.ERROR); | ||
369 | |||
370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | ||
371 | status_block, status_err); | ||
372 | |||
373 | for (i = 0; i < dw->dma.chancnt; i++) { | ||
374 | dwc = &dw->chan[i]; | ||
375 | spin_lock(&dwc->lock); | ||
376 | if (status_err & (1 << i)) | ||
377 | dwc_handle_error(dw, dwc); | ||
378 | else if ((status_block | status_xfer) & (1 << i)) | ||
379 | dwc_scan_descriptors(dw, dwc); | ||
380 | spin_unlock(&dwc->lock); | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Re-enable interrupts. Block Complete interrupts are only | ||
385 | * enabled if the INT_EN bit in the descriptor is set. This | ||
386 | * will trigger a scan before the whole list is done. | ||
387 | */ | ||
388 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
389 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
390 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
391 | } | ||
392 | |||
393 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | ||
394 | { | ||
395 | struct dw_dma *dw = dev_id; | ||
396 | u32 status; | ||
397 | |||
398 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | ||
399 | dma_readl(dw, STATUS_INT)); | ||
400 | |||
401 | /* | ||
402 | * Just disable the interrupts. We'll turn them back on in the | ||
403 | * softirq handler. | ||
404 | */ | ||
405 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
406 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
407 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
408 | |||
409 | status = dma_readl(dw, STATUS_INT); | ||
410 | if (status) { | ||
411 | dev_err(dw->dma.dev, | ||
412 | "BUG: Unexpected interrupts pending: 0x%x\n", | ||
413 | status); | ||
414 | |||
415 | /* Try to recover */ | ||
416 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | ||
417 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | ||
418 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | ||
419 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | ||
420 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | ||
421 | } | ||
422 | |||
423 | tasklet_schedule(&dw->tasklet); | ||
424 | |||
425 | return IRQ_HANDLED; | ||
426 | } | ||
427 | |||
428 | /*----------------------------------------------------------------------*/ | ||
429 | |||
430 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | ||
431 | { | ||
432 | struct dw_desc *desc = txd_to_dw_desc(tx); | ||
433 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | ||
434 | dma_cookie_t cookie; | ||
435 | |||
436 | spin_lock_bh(&dwc->lock); | ||
437 | cookie = dwc_assign_cookie(dwc, desc); | ||
438 | |||
439 | /* | ||
440 | * REVISIT: We should attempt to chain as many descriptors as | ||
441 | * possible, perhaps even appending to those already submitted | ||
442 | * for DMA. But this is hard to do in a race-free manner. | ||
443 | */ | ||
444 | if (list_empty(&dwc->active_list)) { | ||
445 | dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", | ||
446 | desc->txd.cookie); | ||
447 | dwc_dostart(dwc, desc); | ||
448 | list_add_tail(&desc->desc_node, &dwc->active_list); | ||
449 | } else { | ||
450 | dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", | ||
451 | desc->txd.cookie); | ||
452 | |||
453 | list_add_tail(&desc->desc_node, &dwc->queue); | ||
454 | } | ||
455 | |||
456 | spin_unlock_bh(&dwc->lock); | ||
457 | |||
458 | return cookie; | ||
459 | } | ||
460 | |||
461 | static struct dma_async_tx_descriptor * | ||
462 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
463 | size_t len, unsigned long flags) | ||
464 | { | ||
465 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
466 | struct dw_desc *desc; | ||
467 | struct dw_desc *first; | ||
468 | struct dw_desc *prev; | ||
469 | size_t xfer_count; | ||
470 | size_t offset; | ||
471 | unsigned int src_width; | ||
472 | unsigned int dst_width; | ||
473 | u32 ctllo; | ||
474 | |||
475 | dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | ||
476 | dest, src, len, flags); | ||
477 | |||
478 | if (unlikely(!len)) { | ||
479 | dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); | ||
480 | return NULL; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * We can be a lot more clever here, but this should take care | ||
485 | * of the most common optimization. | ||
486 | */ | ||
487 | if (!((src | dest | len) & 3)) | ||
488 | src_width = dst_width = 2; | ||
489 | else if (!((src | dest | len) & 1)) | ||
490 | src_width = dst_width = 1; | ||
491 | else | ||
492 | src_width = dst_width = 0; | ||
493 | |||
494 | ctllo = DWC_DEFAULT_CTLLO | ||
495 | | DWC_CTLL_DST_WIDTH(dst_width) | ||
496 | | DWC_CTLL_SRC_WIDTH(src_width) | ||
497 | | DWC_CTLL_DST_INC | ||
498 | | DWC_CTLL_SRC_INC | ||
499 | | DWC_CTLL_FC_M2M; | ||
500 | prev = first = NULL; | ||
501 | |||
502 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | ||
503 | xfer_count = min_t(size_t, (len - offset) >> src_width, | ||
504 | DWC_MAX_COUNT); | ||
505 | |||
506 | desc = dwc_desc_get(dwc); | ||
507 | if (!desc) | ||
508 | goto err_desc_get; | ||
509 | |||
510 | desc->lli.sar = src + offset; | ||
511 | desc->lli.dar = dest + offset; | ||
512 | desc->lli.ctllo = ctllo; | ||
513 | desc->lli.ctlhi = xfer_count; | ||
514 | |||
515 | if (!first) { | ||
516 | first = desc; | ||
517 | } else { | ||
518 | prev->lli.llp = desc->txd.phys; | ||
519 | dma_sync_single_for_device(chan->dev.parent, | ||
520 | prev->txd.phys, sizeof(prev->lli), | ||
521 | DMA_TO_DEVICE); | ||
522 | list_add_tail(&desc->desc_node, | ||
523 | &first->txd.tx_list); | ||
524 | } | ||
525 | prev = desc; | ||
526 | } | ||
527 | |||
528 | |||
529 | if (flags & DMA_PREP_INTERRUPT) | ||
530 | /* Trigger interrupt after last block */ | ||
531 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | ||
532 | |||
533 | prev->lli.llp = 0; | ||
534 | dma_sync_single_for_device(chan->dev.parent, | ||
535 | prev->txd.phys, sizeof(prev->lli), | ||
536 | DMA_TO_DEVICE); | ||
537 | |||
538 | first->txd.flags = flags; | ||
539 | first->len = len; | ||
540 | |||
541 | return &first->txd; | ||
542 | |||
543 | err_desc_get: | ||
544 | dwc_desc_put(dwc, first); | ||
545 | return NULL; | ||
546 | } | ||
547 | |||
548 | static struct dma_async_tx_descriptor * | ||
549 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
550 | unsigned int sg_len, enum dma_data_direction direction, | ||
551 | unsigned long flags) | ||
552 | { | ||
553 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
554 | struct dw_dma_slave *dws = dwc->dws; | ||
555 | struct dw_desc *prev; | ||
556 | struct dw_desc *first; | ||
557 | u32 ctllo; | ||
558 | dma_addr_t reg; | ||
559 | unsigned int reg_width; | ||
560 | unsigned int mem_width; | ||
561 | unsigned int i; | ||
562 | struct scatterlist *sg; | ||
563 | size_t total_len = 0; | ||
564 | |||
565 | dev_vdbg(&chan->dev, "prep_dma_slave\n"); | ||
566 | |||
567 | if (unlikely(!dws || !sg_len)) | ||
568 | return NULL; | ||
569 | |||
570 | reg_width = dws->slave.reg_width; | ||
571 | prev = first = NULL; | ||
572 | |||
573 | sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); | ||
574 | |||
575 | switch (direction) { | ||
576 | case DMA_TO_DEVICE: | ||
577 | ctllo = (DWC_DEFAULT_CTLLO | ||
578 | | DWC_CTLL_DST_WIDTH(reg_width) | ||
579 | | DWC_CTLL_DST_FIX | ||
580 | | DWC_CTLL_SRC_INC | ||
581 | | DWC_CTLL_FC_M2P); | ||
582 | reg = dws->slave.tx_reg; | ||
583 | for_each_sg(sgl, sg, sg_len, i) { | ||
584 | struct dw_desc *desc; | ||
585 | u32 len; | ||
586 | u32 mem; | ||
587 | |||
588 | desc = dwc_desc_get(dwc); | ||
589 | if (!desc) { | ||
590 | dev_err(&chan->dev, | ||
591 | "not enough descriptors available\n"); | ||
592 | goto err_desc_get; | ||
593 | } | ||
594 | |||
595 | mem = sg_phys(sg); | ||
596 | len = sg_dma_len(sg); | ||
597 | mem_width = 2; | ||
598 | if (unlikely(mem & 3 || len & 3)) | ||
599 | mem_width = 0; | ||
600 | |||
601 | desc->lli.sar = mem; | ||
602 | desc->lli.dar = reg; | ||
603 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | ||
604 | desc->lli.ctlhi = len >> mem_width; | ||
605 | |||
606 | if (!first) { | ||
607 | first = desc; | ||
608 | } else { | ||
609 | prev->lli.llp = desc->txd.phys; | ||
610 | dma_sync_single_for_device(chan->dev.parent, | ||
611 | prev->txd.phys, | ||
612 | sizeof(prev->lli), | ||
613 | DMA_TO_DEVICE); | ||
614 | list_add_tail(&desc->desc_node, | ||
615 | &first->txd.tx_list); | ||
616 | } | ||
617 | prev = desc; | ||
618 | total_len += len; | ||
619 | } | ||
620 | break; | ||
621 | case DMA_FROM_DEVICE: | ||
622 | ctllo = (DWC_DEFAULT_CTLLO | ||
623 | | DWC_CTLL_SRC_WIDTH(reg_width) | ||
624 | | DWC_CTLL_DST_INC | ||
625 | | DWC_CTLL_SRC_FIX | ||
626 | | DWC_CTLL_FC_P2M); | ||
627 | |||
628 | reg = dws->slave.rx_reg; | ||
629 | for_each_sg(sgl, sg, sg_len, i) { | ||
630 | struct dw_desc *desc; | ||
631 | u32 len; | ||
632 | u32 mem; | ||
633 | |||
634 | desc = dwc_desc_get(dwc); | ||
635 | if (!desc) { | ||
636 | dev_err(&chan->dev, | ||
637 | "not enough descriptors available\n"); | ||
638 | goto err_desc_get; | ||
639 | } | ||
640 | |||
641 | mem = sg_phys(sg); | ||
642 | len = sg_dma_len(sg); | ||
643 | mem_width = 2; | ||
644 | if (unlikely(mem & 3 || len & 3)) | ||
645 | mem_width = 0; | ||
646 | |||
647 | desc->lli.sar = reg; | ||
648 | desc->lli.dar = mem; | ||
649 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | ||
650 | desc->lli.ctlhi = len >> reg_width; | ||
651 | |||
652 | if (!first) { | ||
653 | first = desc; | ||
654 | } else { | ||
655 | prev->lli.llp = desc->txd.phys; | ||
656 | dma_sync_single_for_device(chan->dev.parent, | ||
657 | prev->txd.phys, | ||
658 | sizeof(prev->lli), | ||
659 | DMA_TO_DEVICE); | ||
660 | list_add_tail(&desc->desc_node, | ||
661 | &first->txd.tx_list); | ||
662 | } | ||
663 | prev = desc; | ||
664 | total_len += len; | ||
665 | } | ||
666 | break; | ||
667 | default: | ||
668 | return NULL; | ||
669 | } | ||
670 | |||
671 | if (flags & DMA_PREP_INTERRUPT) | ||
672 | /* Trigger interrupt after last block */ | ||
673 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | ||
674 | |||
675 | prev->lli.llp = 0; | ||
676 | dma_sync_single_for_device(chan->dev.parent, | ||
677 | prev->txd.phys, sizeof(prev->lli), | ||
678 | DMA_TO_DEVICE); | ||
679 | |||
680 | first->len = total_len; | ||
681 | |||
682 | return &first->txd; | ||
683 | |||
684 | err_desc_get: | ||
685 | dwc_desc_put(dwc, first); | ||
686 | return NULL; | ||
687 | } | ||
688 | |||
689 | static void dwc_terminate_all(struct dma_chan *chan) | ||
690 | { | ||
691 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
692 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
693 | struct dw_desc *desc, *_desc; | ||
694 | LIST_HEAD(list); | ||
695 | |||
696 | /* | ||
697 | * This is only called when something went wrong elsewhere, so | ||
698 | * we don't really care about the data. Just disable the | ||
699 | * channel. We still have to poll the channel enable bit due | ||
700 | * to AHB/HSB limitations. | ||
701 | */ | ||
702 | spin_lock_bh(&dwc->lock); | ||
703 | |||
704 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
705 | |||
706 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
707 | cpu_relax(); | ||
708 | |||
709 | /* active_list entries will end up before queued entries */ | ||
710 | list_splice_init(&dwc->queue, &list); | ||
711 | list_splice_init(&dwc->active_list, &list); | ||
712 | |||
713 | spin_unlock_bh(&dwc->lock); | ||
714 | |||
715 | /* Flush all pending and queued descriptors */ | ||
716 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
717 | dwc_descriptor_complete(dwc, desc); | ||
718 | } | ||
719 | |||
720 | static enum dma_status | ||
721 | dwc_is_tx_complete(struct dma_chan *chan, | ||
722 | dma_cookie_t cookie, | ||
723 | dma_cookie_t *done, dma_cookie_t *used) | ||
724 | { | ||
725 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
726 | dma_cookie_t last_used; | ||
727 | dma_cookie_t last_complete; | ||
728 | int ret; | ||
729 | |||
730 | last_complete = dwc->completed; | ||
731 | last_used = chan->cookie; | ||
732 | |||
733 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
734 | if (ret != DMA_SUCCESS) { | ||
735 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | ||
736 | |||
737 | last_complete = dwc->completed; | ||
738 | last_used = chan->cookie; | ||
739 | |||
740 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
741 | } | ||
742 | |||
743 | if (done) | ||
744 | *done = last_complete; | ||
745 | if (used) | ||
746 | *used = last_used; | ||
747 | |||
748 | return ret; | ||
749 | } | ||
750 | |||
751 | static void dwc_issue_pending(struct dma_chan *chan) | ||
752 | { | ||
753 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
754 | |||
755 | spin_lock_bh(&dwc->lock); | ||
756 | if (!list_empty(&dwc->queue)) | ||
757 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | ||
758 | spin_unlock_bh(&dwc->lock); | ||
759 | } | ||
760 | |||
761 | static int dwc_alloc_chan_resources(struct dma_chan *chan, | ||
762 | struct dma_client *client) | ||
763 | { | ||
764 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
765 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
766 | struct dw_desc *desc; | ||
767 | struct dma_slave *slave; | ||
768 | struct dw_dma_slave *dws; | ||
769 | int i; | ||
770 | u32 cfghi; | ||
771 | u32 cfglo; | ||
772 | |||
773 | dev_vdbg(&chan->dev, "alloc_chan_resources\n"); | ||
774 | |||
775 | /* Channels doing slave DMA can only handle one client. */ | ||
776 | if (dwc->dws || client->slave) { | ||
777 | if (chan->client_count) | ||
778 | return -EBUSY; | ||
779 | } | ||
780 | |||
781 | /* ASSERT: channel is idle */ | ||
782 | if (dma_readl(dw, CH_EN) & dwc->mask) { | ||
783 | dev_dbg(&chan->dev, "DMA channel not idle?\n"); | ||
784 | return -EIO; | ||
785 | } | ||
786 | |||
787 | dwc->completed = chan->cookie = 1; | ||
788 | |||
789 | cfghi = DWC_CFGH_FIFO_MODE; | ||
790 | cfglo = 0; | ||
791 | |||
792 | slave = client->slave; | ||
793 | if (slave) { | ||
794 | /* | ||
795 | * We need controller-specific data to set up slave | ||
796 | * transfers. | ||
797 | */ | ||
798 | BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); | ||
799 | |||
800 | dws = container_of(slave, struct dw_dma_slave, slave); | ||
801 | |||
802 | dwc->dws = dws; | ||
803 | cfghi = dws->cfg_hi; | ||
804 | cfglo = dws->cfg_lo; | ||
805 | } else { | ||
806 | dwc->dws = NULL; | ||
807 | } | ||
808 | |||
809 | channel_writel(dwc, CFG_LO, cfglo); | ||
810 | channel_writel(dwc, CFG_HI, cfghi); | ||
811 | |||
812 | /* | ||
813 | * NOTE: some controllers may have additional features that we | ||
814 | * need to initialize here, like "scatter-gather" (which | ||
815 | * doesn't mean what you think it means), and status writeback. | ||
816 | */ | ||
817 | |||
818 | spin_lock_bh(&dwc->lock); | ||
819 | i = dwc->descs_allocated; | ||
820 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
821 | spin_unlock_bh(&dwc->lock); | ||
822 | |||
823 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | ||
824 | if (!desc) { | ||
825 | dev_info(&chan->dev, | ||
826 | "only allocated %d descriptors\n", i); | ||
827 | spin_lock_bh(&dwc->lock); | ||
828 | break; | ||
829 | } | ||
830 | |||
831 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
832 | desc->txd.tx_submit = dwc_tx_submit; | ||
833 | desc->txd.flags = DMA_CTRL_ACK; | ||
834 | INIT_LIST_HEAD(&desc->txd.tx_list); | ||
835 | desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, | ||
836 | sizeof(desc->lli), DMA_TO_DEVICE); | ||
837 | dwc_desc_put(dwc, desc); | ||
838 | |||
839 | spin_lock_bh(&dwc->lock); | ||
840 | i = ++dwc->descs_allocated; | ||
841 | } | ||
842 | |||
843 | /* Enable interrupts */ | ||
844 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
845 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
846 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
847 | |||
848 | spin_unlock_bh(&dwc->lock); | ||
849 | |||
850 | dev_dbg(&chan->dev, | ||
851 | "alloc_chan_resources allocated %d descriptors\n", i); | ||
852 | |||
853 | return i; | ||
854 | } | ||
855 | |||
856 | static void dwc_free_chan_resources(struct dma_chan *chan) | ||
857 | { | ||
858 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
859 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
860 | struct dw_desc *desc, *_desc; | ||
861 | LIST_HEAD(list); | ||
862 | |||
863 | dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", | ||
864 | dwc->descs_allocated); | ||
865 | |||
866 | /* ASSERT: channel is idle */ | ||
867 | BUG_ON(!list_empty(&dwc->active_list)); | ||
868 | BUG_ON(!list_empty(&dwc->queue)); | ||
869 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | ||
870 | |||
871 | spin_lock_bh(&dwc->lock); | ||
872 | list_splice_init(&dwc->free_list, &list); | ||
873 | dwc->descs_allocated = 0; | ||
874 | dwc->dws = NULL; | ||
875 | |||
876 | /* Disable interrupts */ | ||
877 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | ||
878 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | ||
879 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | ||
880 | |||
881 | spin_unlock_bh(&dwc->lock); | ||
882 | |||
883 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | ||
884 | dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); | ||
885 | dma_unmap_single(chan->dev.parent, desc->txd.phys, | ||
886 | sizeof(desc->lli), DMA_TO_DEVICE); | ||
887 | kfree(desc); | ||
888 | } | ||
889 | |||
890 | dev_vdbg(&chan->dev, "free_chan_resources done\n"); | ||
891 | } | ||
892 | |||
893 | /*----------------------------------------------------------------------*/ | ||
894 | |||
895 | static void dw_dma_off(struct dw_dma *dw) | ||
896 | { | ||
897 | dma_writel(dw, CFG, 0); | ||
898 | |||
899 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
900 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
901 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
902 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
903 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
904 | |||
905 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | ||
906 | cpu_relax(); | ||
907 | } | ||
908 | |||
909 | static int __init dw_probe(struct platform_device *pdev) | ||
910 | { | ||
911 | struct dw_dma_platform_data *pdata; | ||
912 | struct resource *io; | ||
913 | struct dw_dma *dw; | ||
914 | size_t size; | ||
915 | int irq; | ||
916 | int err; | ||
917 | int i; | ||
918 | |||
919 | pdata = pdev->dev.platform_data; | ||
920 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
921 | return -EINVAL; | ||
922 | |||
923 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
924 | if (!io) | ||
925 | return -EINVAL; | ||
926 | |||
927 | irq = platform_get_irq(pdev, 0); | ||
928 | if (irq < 0) | ||
929 | return irq; | ||
930 | |||
931 | size = sizeof(struct dw_dma); | ||
932 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | ||
933 | dw = kzalloc(size, GFP_KERNEL); | ||
934 | if (!dw) | ||
935 | return -ENOMEM; | ||
936 | |||
937 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | ||
938 | err = -EBUSY; | ||
939 | goto err_kfree; | ||
940 | } | ||
941 | |||
942 | memset(dw, 0, sizeof *dw); | ||
943 | |||
944 | dw->regs = ioremap(io->start, DW_REGLEN); | ||
945 | if (!dw->regs) { | ||
946 | err = -ENOMEM; | ||
947 | goto err_release_r; | ||
948 | } | ||
949 | |||
950 | dw->clk = clk_get(&pdev->dev, "hclk"); | ||
951 | if (IS_ERR(dw->clk)) { | ||
952 | err = PTR_ERR(dw->clk); | ||
953 | goto err_clk; | ||
954 | } | ||
955 | clk_enable(dw->clk); | ||
956 | |||
957 | /* force dma off, just in case */ | ||
958 | dw_dma_off(dw); | ||
959 | |||
960 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | ||
961 | if (err) | ||
962 | goto err_irq; | ||
963 | |||
964 | platform_set_drvdata(pdev, dw); | ||
965 | |||
966 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | ||
967 | |||
968 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
969 | |||
970 | INIT_LIST_HEAD(&dw->dma.channels); | ||
971 | for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { | ||
972 | struct dw_dma_chan *dwc = &dw->chan[i]; | ||
973 | |||
974 | dwc->chan.device = &dw->dma; | ||
975 | dwc->chan.cookie = dwc->completed = 1; | ||
976 | dwc->chan.chan_id = i; | ||
977 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | ||
978 | |||
979 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | ||
980 | spin_lock_init(&dwc->lock); | ||
981 | dwc->mask = 1 << i; | ||
982 | |||
983 | INIT_LIST_HEAD(&dwc->active_list); | ||
984 | INIT_LIST_HEAD(&dwc->queue); | ||
985 | INIT_LIST_HEAD(&dwc->free_list); | ||
986 | |||
987 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
988 | } | ||
989 | |||
990 | /* Clear/disable all interrupts on all channels. */ | ||
991 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | ||
992 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | ||
993 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | ||
994 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | ||
995 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | ||
996 | |||
997 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
998 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
999 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
1000 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
1001 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
1002 | |||
1003 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | ||
1004 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | ||
1005 | dw->dma.dev = &pdev->dev; | ||
1006 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | ||
1007 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | ||
1008 | |||
1009 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | ||
1010 | |||
1011 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | ||
1012 | dw->dma.device_terminate_all = dwc_terminate_all; | ||
1013 | |||
1014 | dw->dma.device_is_tx_complete = dwc_is_tx_complete; | ||
1015 | dw->dma.device_issue_pending = dwc_issue_pending; | ||
1016 | |||
1017 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1018 | |||
1019 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | ||
1020 | pdev->dev.bus_id, dw->dma.chancnt); | ||
1021 | |||
1022 | dma_async_device_register(&dw->dma); | ||
1023 | |||
1024 | return 0; | ||
1025 | |||
1026 | err_irq: | ||
1027 | clk_disable(dw->clk); | ||
1028 | clk_put(dw->clk); | ||
1029 | err_clk: | ||
1030 | iounmap(dw->regs); | ||
1031 | dw->regs = NULL; | ||
1032 | err_release_r: | ||
1033 | release_resource(io); | ||
1034 | err_kfree: | ||
1035 | kfree(dw); | ||
1036 | return err; | ||
1037 | } | ||
1038 | |||
1039 | static int __exit dw_remove(struct platform_device *pdev) | ||
1040 | { | ||
1041 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
1042 | struct dw_dma_chan *dwc, *_dwc; | ||
1043 | struct resource *io; | ||
1044 | |||
1045 | dw_dma_off(dw); | ||
1046 | dma_async_device_unregister(&dw->dma); | ||
1047 | |||
1048 | free_irq(platform_get_irq(pdev, 0), dw); | ||
1049 | tasklet_kill(&dw->tasklet); | ||
1050 | |||
1051 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | ||
1052 | chan.device_node) { | ||
1053 | list_del(&dwc->chan.device_node); | ||
1054 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
1055 | } | ||
1056 | |||
1057 | clk_disable(dw->clk); | ||
1058 | clk_put(dw->clk); | ||
1059 | |||
1060 | iounmap(dw->regs); | ||
1061 | dw->regs = NULL; | ||
1062 | |||
1063 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1064 | release_mem_region(io->start, DW_REGLEN); | ||
1065 | |||
1066 | kfree(dw); | ||
1067 | |||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | static void dw_shutdown(struct platform_device *pdev) | ||
1072 | { | ||
1073 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
1074 | |||
1075 | dw_dma_off(platform_get_drvdata(pdev)); | ||
1076 | clk_disable(dw->clk); | ||
1077 | } | ||
1078 | |||
1079 | static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg) | ||
1080 | { | ||
1081 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
1082 | |||
1083 | dw_dma_off(platform_get_drvdata(pdev)); | ||
1084 | clk_disable(dw->clk); | ||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | static int dw_resume_early(struct platform_device *pdev) | ||
1089 | { | ||
1090 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
1091 | |||
1092 | clk_enable(dw->clk); | ||
1093 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
1094 | return 0; | ||
1095 | |||
1096 | } | ||
1097 | |||
1098 | static struct platform_driver dw_driver = { | ||
1099 | .remove = __exit_p(dw_remove), | ||
1100 | .shutdown = dw_shutdown, | ||
1101 | .suspend_late = dw_suspend_late, | ||
1102 | .resume_early = dw_resume_early, | ||
1103 | .driver = { | ||
1104 | .name = "dw_dmac", | ||
1105 | }, | ||
1106 | }; | ||
1107 | |||
1108 | static int __init dw_init(void) | ||
1109 | { | ||
1110 | return platform_driver_probe(&dw_driver, dw_probe); | ||
1111 | } | ||
1112 | module_init(dw_init); | ||
1113 | |||
1114 | static void __exit dw_exit(void) | ||
1115 | { | ||
1116 | platform_driver_unregister(&dw_driver); | ||
1117 | } | ||
1118 | module_exit(dw_exit); | ||
1119 | |||
1120 | MODULE_LICENSE("GPL v2"); | ||
1121 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | ||
1122 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h new file mode 100644 index 000000000000..00fdd187bb0c --- /dev/null +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare AHB DMA Controller | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Atmel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/dw_dmac.h> | ||
12 | |||
13 | #define DW_DMA_MAX_NR_CHANNELS 8 | ||
14 | |||
15 | /* | ||
16 | * Redefine this macro to handle differences between 32- and 64-bit | ||
17 | * addressing, big vs. little endian, etc. | ||
18 | */ | ||
19 | #define DW_REG(name) u32 name; u32 __pad_##name | ||
20 | |||
21 | /* Hardware register definitions. */ | ||
22 | struct dw_dma_chan_regs { | ||
23 | DW_REG(SAR); /* Source Address Register */ | ||
24 | DW_REG(DAR); /* Destination Address Register */ | ||
25 | DW_REG(LLP); /* Linked List Pointer */ | ||
26 | u32 CTL_LO; /* Control Register Low */ | ||
27 | u32 CTL_HI; /* Control Register High */ | ||
28 | DW_REG(SSTAT); | ||
29 | DW_REG(DSTAT); | ||
30 | DW_REG(SSTATAR); | ||
31 | DW_REG(DSTATAR); | ||
32 | u32 CFG_LO; /* Configuration Register Low */ | ||
33 | u32 CFG_HI; /* Configuration Register High */ | ||
34 | DW_REG(SGR); | ||
35 | DW_REG(DSR); | ||
36 | }; | ||
37 | |||
38 | struct dw_dma_irq_regs { | ||
39 | DW_REG(XFER); | ||
40 | DW_REG(BLOCK); | ||
41 | DW_REG(SRC_TRAN); | ||
42 | DW_REG(DST_TRAN); | ||
43 | DW_REG(ERROR); | ||
44 | }; | ||
45 | |||
46 | struct dw_dma_regs { | ||
47 | /* per-channel registers */ | ||
48 | struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS]; | ||
49 | |||
50 | /* irq handling */ | ||
51 | struct dw_dma_irq_regs RAW; /* r */ | ||
52 | struct dw_dma_irq_regs STATUS; /* r (raw & mask) */ | ||
53 | struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */ | ||
54 | struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */ | ||
55 | |||
56 | DW_REG(STATUS_INT); /* r */ | ||
57 | |||
58 | /* software handshaking */ | ||
59 | DW_REG(REQ_SRC); | ||
60 | DW_REG(REQ_DST); | ||
61 | DW_REG(SGL_REQ_SRC); | ||
62 | DW_REG(SGL_REQ_DST); | ||
63 | DW_REG(LAST_SRC); | ||
64 | DW_REG(LAST_DST); | ||
65 | |||
66 | /* miscellaneous */ | ||
67 | DW_REG(CFG); | ||
68 | DW_REG(CH_EN); | ||
69 | DW_REG(ID); | ||
70 | DW_REG(TEST); | ||
71 | |||
72 | /* optional encoded params, 0x3c8..0x3 */ | ||
73 | }; | ||
74 | |||
75 | /* Bitfields in CTL_LO */ | ||
76 | #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ | ||
77 | #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ | ||
78 | #define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) | ||
79 | #define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ | ||
80 | #define DWC_CTLL_DST_DEC (1<<7) | ||
81 | #define DWC_CTLL_DST_FIX (2<<7) | ||
82 | #define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ | ||
83 | #define DWC_CTLL_SRC_DEC (1<<9) | ||
84 | #define DWC_CTLL_SRC_FIX (2<<9) | ||
85 | #define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ | ||
86 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) | ||
87 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ | ||
88 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ | ||
89 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ | ||
90 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ | ||
91 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ | ||
92 | #define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */ | ||
93 | /* plus 4 transfer types for peripheral-as-flow-controller */ | ||
94 | #define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ | ||
95 | #define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ | ||
96 | #define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */ | ||
97 | #define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ | ||
98 | |||
99 | /* Bitfields in CTL_HI */ | ||
100 | #define DWC_CTLH_DONE 0x00001000 | ||
101 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff | ||
102 | |||
103 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ | ||
104 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ | ||
105 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ | ||
106 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ | ||
107 | #define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */ | ||
108 | #define DWC_CFGL_MAX_BURST(x) ((x) << 20) | ||
109 | #define DWC_CFGL_RELOAD_SAR (1 << 30) | ||
110 | #define DWC_CFGL_RELOAD_DAR (1 << 31) | ||
111 | |||
112 | /* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */ | ||
113 | #define DWC_CFGH_DS_UPD_EN (1 << 5) | ||
114 | #define DWC_CFGH_SS_UPD_EN (1 << 6) | ||
115 | |||
116 | /* Bitfields in SGR */ | ||
117 | #define DWC_SGR_SGI(x) ((x) << 0) | ||
118 | #define DWC_SGR_SGC(x) ((x) << 20) | ||
119 | |||
120 | /* Bitfields in DSR */ | ||
121 | #define DWC_DSR_DSI(x) ((x) << 0) | ||
122 | #define DWC_DSR_DSC(x) ((x) << 20) | ||
123 | |||
124 | /* Bitfields in CFG */ | ||
125 | #define DW_CFG_DMA_EN (1 << 0) | ||
126 | |||
127 | #define DW_REGLEN 0x400 | ||
128 | |||
129 | struct dw_dma_chan { | ||
130 | struct dma_chan chan; | ||
131 | void __iomem *ch_regs; | ||
132 | u8 mask; | ||
133 | |||
134 | spinlock_t lock; | ||
135 | |||
136 | /* these other elements are all protected by lock */ | ||
137 | dma_cookie_t completed; | ||
138 | struct list_head active_list; | ||
139 | struct list_head queue; | ||
140 | struct list_head free_list; | ||
141 | |||
142 | struct dw_dma_slave *dws; | ||
143 | |||
144 | unsigned int descs_allocated; | ||
145 | }; | ||
146 | |||
147 | static inline struct dw_dma_chan_regs __iomem * | ||
148 | __dwc_regs(struct dw_dma_chan *dwc) | ||
149 | { | ||
150 | return dwc->ch_regs; | ||
151 | } | ||
152 | |||
153 | #define channel_readl(dwc, name) \ | ||
154 | __raw_readl(&(__dwc_regs(dwc)->name)) | ||
155 | #define channel_writel(dwc, name, val) \ | ||
156 | __raw_writel((val), &(__dwc_regs(dwc)->name)) | ||
157 | |||
158 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) | ||
159 | { | ||
160 | return container_of(chan, struct dw_dma_chan, chan); | ||
161 | } | ||
162 | |||
163 | |||
164 | struct dw_dma { | ||
165 | struct dma_device dma; | ||
166 | void __iomem *regs; | ||
167 | struct tasklet_struct tasklet; | ||
168 | struct clk *clk; | ||
169 | |||
170 | u8 all_chan_mask; | ||
171 | |||
172 | struct dw_dma_chan chan[0]; | ||
173 | }; | ||
174 | |||
175 | static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | ||
176 | { | ||
177 | return dw->regs; | ||
178 | } | ||
179 | |||
180 | #define dma_readl(dw, name) \ | ||
181 | __raw_readl(&(__dw_regs(dw)->name)) | ||
182 | #define dma_writel(dw, name, val) \ | ||
183 | __raw_writel((val), &(__dw_regs(dw)->name)) | ||
184 | |||
185 | #define channel_set_bit(dw, reg, mask) \ | ||
186 | dma_writel(dw, reg, ((mask) << 8) | (mask)) | ||
187 | #define channel_clear_bit(dw, reg, mask) \ | ||
188 | dma_writel(dw, reg, ((mask) << 8) | 0) | ||
189 | |||
190 | static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) | ||
191 | { | ||
192 | return container_of(ddev, struct dw_dma, dma); | ||
193 | } | ||
194 | |||
195 | /* LLI == Linked List Item; a.k.a. DMA block descriptor */ | ||
196 | struct dw_lli { | ||
197 | /* values that are not changed by hardware */ | ||
198 | dma_addr_t sar; | ||
199 | dma_addr_t dar; | ||
200 | dma_addr_t llp; /* chain to next lli */ | ||
201 | u32 ctllo; | ||
202 | /* values that may get written back: */ | ||
203 | u32 ctlhi; | ||
204 | /* sstat and dstat can snapshot peripheral register state. | ||
205 | * silicon config may discard either or both... | ||
206 | */ | ||
207 | u32 sstat; | ||
208 | u32 dstat; | ||
209 | }; | ||
210 | |||
211 | struct dw_desc { | ||
212 | /* FIRST values the hardware uses */ | ||
213 | struct dw_lli lli; | ||
214 | |||
215 | /* THEN values for driver housekeeping */ | ||
216 | struct list_head desc_node; | ||
217 | struct dma_async_tx_descriptor txd; | ||
218 | size_t len; | ||
219 | }; | ||
220 | |||
221 | static inline struct dw_desc * | ||
222 | txd_to_dw_desc(struct dma_async_tx_descriptor *txd) | ||
223 | { | ||
224 | return container_of(txd, struct dw_desc, txd); | ||
225 | } | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 054eabffc185..c0059ca58340 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
366 | * | 366 | * |
367 | * Return - The number of descriptors allocated. | 367 | * Return - The number of descriptors allocated. |
368 | */ | 368 | */ |
369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | 369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, |
370 | struct dma_client *client) | ||
370 | { | 371 | { |
371 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 372 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); |
372 | LIST_HEAD(tmp_list); | 373 | LIST_HEAD(tmp_list); |
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | |||
809 | if (!src) { | 810 | if (!src) { |
810 | dev_err(fsl_chan->dev, | 811 | dev_err(fsl_chan->dev, |
811 | "selftest: Cannot alloc memory for test!\n"); | 812 | "selftest: Cannot alloc memory for test!\n"); |
812 | err = -ENOMEM; | 813 | return -ENOMEM; |
813 | goto out; | ||
814 | } | 814 | } |
815 | 815 | ||
816 | dest = src + test_size; | 816 | dest = src + test_size; |
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | |||
820 | 820 | ||
821 | chan = &fsl_chan->common; | 821 | chan = &fsl_chan->common; |
822 | 822 | ||
823 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | 823 | if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { |
824 | dev_err(fsl_chan->dev, | 824 | dev_err(fsl_chan->dev, |
825 | "selftest: Cannot alloc resources for DMA\n"); | 825 | "selftest: Cannot alloc resources for DMA\n"); |
826 | err = -ENODEV; | 826 | err = -ENODEV; |
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | |||
842 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 842 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
843 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | 843 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); |
844 | err = -ENODEV; | 844 | err = -ENODEV; |
845 | goto out; | 845 | goto free_resources; |
846 | } | 846 | } |
847 | 847 | ||
848 | /* Test free and re-alloc channel resources */ | 848 | /* Test free and re-alloc channel resources */ |
849 | fsl_dma_free_chan_resources(chan); | 849 | fsl_dma_free_chan_resources(chan); |
850 | 850 | ||
851 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | 851 | if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { |
852 | dev_err(fsl_chan->dev, | 852 | dev_err(fsl_chan->dev, |
853 | "selftest: Cannot alloc resources for DMA\n"); | 853 | "selftest: Cannot alloc resources for DMA\n"); |
854 | err = -ENODEV; | 854 | err = -ENODEV; |
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | |||
927 | if (!new_fsl_chan) { | 927 | if (!new_fsl_chan) { |
928 | dev_err(&dev->dev, "No free memory for allocating " | 928 | dev_err(&dev->dev, "No free memory for allocating " |
929 | "dma channels!\n"); | 929 | "dma channels!\n"); |
930 | err = -ENOMEM; | 930 | return -ENOMEM; |
931 | goto err; | ||
932 | } | 931 | } |
933 | 932 | ||
934 | /* get dma channel register base */ | 933 | /* get dma channel register base */ |
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | |||
936 | if (err) { | 935 | if (err) { |
937 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | 936 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", |
938 | dev->node->full_name); | 937 | dev->node->full_name); |
939 | goto err; | 938 | goto err_no_reg; |
940 | } | 939 | } |
941 | 940 | ||
942 | new_fsl_chan->feature = *(u32 *)match->data; | 941 | new_fsl_chan->feature = *(u32 *)match->data; |
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | |||
958 | dev_err(&dev->dev, "There is no %d channel!\n", | 957 | dev_err(&dev->dev, "There is no %d channel!\n", |
959 | new_fsl_chan->id); | 958 | new_fsl_chan->id); |
960 | err = -EINVAL; | 959 | err = -EINVAL; |
961 | goto err; | 960 | goto err_no_chan; |
962 | } | 961 | } |
963 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | 962 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; |
964 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | 963 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, |
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | |||
997 | if (err) { | 996 | if (err) { |
998 | dev_err(&dev->dev, "DMA channel %s request_irq error " | 997 | dev_err(&dev->dev, "DMA channel %s request_irq error " |
999 | "with return %d\n", dev->node->full_name, err); | 998 | "with return %d\n", dev->node->full_name, err); |
1000 | goto err; | 999 | goto err_no_irq; |
1001 | } | 1000 | } |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | err = fsl_dma_self_test(new_fsl_chan); | 1003 | err = fsl_dma_self_test(new_fsl_chan); |
1005 | if (err) | 1004 | if (err) |
1006 | goto err; | 1005 | goto err_self_test; |
1007 | 1006 | ||
1008 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | 1007 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, |
1009 | match->compatible, new_fsl_chan->irq); | 1008 | match->compatible, new_fsl_chan->irq); |
1010 | 1009 | ||
1011 | return 0; | 1010 | return 0; |
1012 | err: | 1011 | |
1013 | dma_halt(new_fsl_chan); | 1012 | err_self_test: |
1014 | iounmap(new_fsl_chan->reg_base); | ||
1015 | free_irq(new_fsl_chan->irq, new_fsl_chan); | 1013 | free_irq(new_fsl_chan->irq, new_fsl_chan); |
1014 | err_no_irq: | ||
1016 | list_del(&new_fsl_chan->common.device_node); | 1015 | list_del(&new_fsl_chan->common.device_node); |
1016 | err_no_chan: | ||
1017 | iounmap(new_fsl_chan->reg_base); | ||
1018 | err_no_reg: | ||
1017 | kfree(new_fsl_chan); | 1019 | kfree(new_fsl_chan); |
1018 | return err; | 1020 | return err; |
1019 | } | 1021 | } |
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
1054 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | 1056 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); |
1055 | if (!fdev) { | 1057 | if (!fdev) { |
1056 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | 1058 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); |
1057 | err = -ENOMEM; | 1059 | return -ENOMEM; |
1058 | goto err; | ||
1059 | } | 1060 | } |
1060 | fdev->dev = &dev->dev; | 1061 | fdev->dev = &dev->dev; |
1061 | INIT_LIST_HEAD(&fdev->common.channels); | 1062 | INIT_LIST_HEAD(&fdev->common.channels); |
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
1065 | if (err) { | 1066 | if (err) { |
1066 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | 1067 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", |
1067 | dev->node->full_name); | 1068 | dev->node->full_name); |
1068 | goto err; | 1069 | goto err_no_reg; |
1069 | } | 1070 | } |
1070 | 1071 | ||
1071 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | 1072 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " |
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
1103 | 1104 | ||
1104 | err: | 1105 | err: |
1105 | iounmap(fdev->reg_base); | 1106 | iounmap(fdev->reg_base); |
1107 | err_no_reg: | ||
1106 | kfree(fdev); | 1108 | kfree(fdev); |
1107 | return err; | 1109 | return err; |
1108 | } | 1110 | } |
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c index 16e0fd8facfb..9b16a3af9a0a 100644 --- a/drivers/dma/ioat.c +++ b/drivers/dma/ioat.c | |||
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
47 | 47 | ||
48 | /* I/OAT v2 platforms */ | 48 | /* I/OAT v2 platforms */ |
49 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, | 49 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, |
50 | |||
51 | /* I/OAT v3 platforms */ | ||
52 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, | ||
53 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, | ||
54 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, | ||
55 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, | ||
56 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, | ||
57 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, | ||
58 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, | ||
59 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, | ||
50 | { 0, } | 60 | { 0, } |
51 | }; | 61 | }; |
52 | 62 | ||
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase) | |||
83 | if (device->dma && ioat_dca_enabled) | 93 | if (device->dma && ioat_dca_enabled) |
84 | device->dca = ioat2_dca_init(pdev, iobase); | 94 | device->dca = ioat2_dca_init(pdev, iobase); |
85 | break; | 95 | break; |
96 | case IOAT_VER_3_0: | ||
97 | device->dma = ioat_dma_probe(pdev, iobase); | ||
98 | if (device->dma && ioat_dca_enabled) | ||
99 | device->dca = ioat3_dca_init(pdev, iobase); | ||
100 | break; | ||
86 | default: | 101 | default: |
87 | err = -ENODEV; | 102 | err = -ENODEV; |
88 | break; | 103 | break; |
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c index 9e922760b7ff..6cf622da0286 100644 --- a/drivers/dma/ioat_dca.c +++ b/drivers/dma/ioat_dca.c | |||
@@ -37,12 +37,18 @@ | |||
37 | #include "ioatdma_registers.h" | 37 | #include "ioatdma_registers.h" |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15 | 40 | * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 |
41 | * contain the bit number of the APIC ID to map into the DCA tag. If the valid | 41 | * contain the bit number of the APIC ID to map into the DCA tag. If the valid |
42 | * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. | 42 | * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. |
43 | */ | 43 | */ |
44 | #define DCA_TAG_MAP_VALID 0x80 | 44 | #define DCA_TAG_MAP_VALID 0x80 |
45 | 45 | ||
46 | #define DCA3_TAG_MAP_BIT_TO_INV 0x80 | ||
47 | #define DCA3_TAG_MAP_BIT_TO_SEL 0x40 | ||
48 | #define DCA3_TAG_MAP_LITERAL_VAL 0x1 | ||
49 | |||
50 | #define DCA_TAG_MAP_MASK 0xDF | ||
51 | |||
46 | /* | 52 | /* |
47 | * "Legacy" DCA systems do not implement the DCA register set in the | 53 | * "Legacy" DCA systems do not implement the DCA register set in the |
48 | * I/OAT device. Software needs direct support for their tag mappings. | 54 | * I/OAT device. Software needs direct support for their tag mappings. |
@@ -95,6 +101,7 @@ struct ioat_dca_slot { | |||
95 | }; | 101 | }; |
96 | 102 | ||
97 | #define IOAT_DCA_MAX_REQ 6 | 103 | #define IOAT_DCA_MAX_REQ 6 |
104 | #define IOAT3_DCA_MAX_REQ 2 | ||
98 | 105 | ||
99 | struct ioat_dca_priv { | 106 | struct ioat_dca_priv { |
100 | void __iomem *iobase; | 107 | void __iomem *iobase; |
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca, | |||
171 | return -ENODEV; | 178 | return -ENODEV; |
172 | } | 179 | } |
173 | 180 | ||
174 | static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu) | 181 | static u8 ioat_dca_get_tag(struct dca_provider *dca, |
182 | struct device *dev, | ||
183 | int cpu) | ||
175 | { | 184 | { |
176 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | 185 | struct ioat_dca_priv *ioatdca = dca_priv(dca); |
177 | int i, apic_id, bit, value; | 186 | int i, apic_id, bit, value; |
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu) | |||
193 | return tag; | 202 | return tag; |
194 | } | 203 | } |
195 | 204 | ||
205 | static int ioat_dca_dev_managed(struct dca_provider *dca, | ||
206 | struct device *dev) | ||
207 | { | ||
208 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
209 | struct pci_dev *pdev; | ||
210 | int i; | ||
211 | |||
212 | pdev = to_pci_dev(dev); | ||
213 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
214 | if (ioatdca->req_slots[i].pdev == pdev) | ||
215 | return 1; | ||
216 | } | ||
217 | return 0; | ||
218 | } | ||
219 | |||
196 | static struct dca_ops ioat_dca_ops = { | 220 | static struct dca_ops ioat_dca_ops = { |
197 | .add_requester = ioat_dca_add_requester, | 221 | .add_requester = ioat_dca_add_requester, |
198 | .remove_requester = ioat_dca_remove_requester, | 222 | .remove_requester = ioat_dca_remove_requester, |
199 | .get_tag = ioat_dca_get_tag, | 223 | .get_tag = ioat_dca_get_tag, |
224 | .dev_managed = ioat_dca_dev_managed, | ||
200 | }; | 225 | }; |
201 | 226 | ||
202 | 227 | ||
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | |||
207 | u8 *tag_map = NULL; | 232 | u8 *tag_map = NULL; |
208 | int i; | 233 | int i; |
209 | int err; | 234 | int err; |
235 | u8 version; | ||
236 | u8 max_requesters; | ||
210 | 237 | ||
211 | if (!system_has_dca_enabled(pdev)) | 238 | if (!system_has_dca_enabled(pdev)) |
212 | return NULL; | 239 | return NULL; |
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | |||
237 | if (tag_map == NULL) | 264 | if (tag_map == NULL) |
238 | return NULL; | 265 | return NULL; |
239 | 266 | ||
267 | version = readb(iobase + IOAT_VER_OFFSET); | ||
268 | if (version == IOAT_VER_3_0) | ||
269 | max_requesters = IOAT3_DCA_MAX_REQ; | ||
270 | else | ||
271 | max_requesters = IOAT_DCA_MAX_REQ; | ||
272 | |||
240 | dca = alloc_dca_provider(&ioat_dca_ops, | 273 | dca = alloc_dca_provider(&ioat_dca_ops, |
241 | sizeof(*ioatdca) + | 274 | sizeof(*ioatdca) + |
242 | (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ)); | 275 | (sizeof(struct ioat_dca_slot) * max_requesters)); |
243 | if (!dca) | 276 | if (!dca) |
244 | return NULL; | 277 | return NULL; |
245 | 278 | ||
246 | ioatdca = dca_priv(dca); | 279 | ioatdca = dca_priv(dca); |
247 | ioatdca->max_requesters = IOAT_DCA_MAX_REQ; | 280 | ioatdca->max_requesters = max_requesters; |
248 | |||
249 | ioatdca->dca_base = iobase + 0x54; | 281 | ioatdca->dca_base = iobase + 0x54; |
250 | 282 | ||
251 | /* copy over the APIC ID to DCA tag mapping */ | 283 | /* copy over the APIC ID to DCA tag mapping */ |
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca, | |||
323 | return -ENODEV; | 355 | return -ENODEV; |
324 | } | 356 | } |
325 | 357 | ||
326 | static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu) | 358 | static u8 ioat2_dca_get_tag(struct dca_provider *dca, |
359 | struct device *dev, | ||
360 | int cpu) | ||
327 | { | 361 | { |
328 | u8 tag; | 362 | u8 tag; |
329 | 363 | ||
330 | tag = ioat_dca_get_tag(dca, cpu); | 364 | tag = ioat_dca_get_tag(dca, dev, cpu); |
331 | tag = (~tag) & 0x1F; | 365 | tag = (~tag) & 0x1F; |
332 | return tag; | 366 | return tag; |
333 | } | 367 | } |
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = { | |||
336 | .add_requester = ioat2_dca_add_requester, | 370 | .add_requester = ioat2_dca_add_requester, |
337 | .remove_requester = ioat2_dca_remove_requester, | 371 | .remove_requester = ioat2_dca_remove_requester, |
338 | .get_tag = ioat2_dca_get_tag, | 372 | .get_tag = ioat2_dca_get_tag, |
373 | .dev_managed = ioat_dca_dev_managed, | ||
339 | }; | 374 | }; |
340 | 375 | ||
341 | static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) | 376 | static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) |
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | |||
425 | 460 | ||
426 | return dca; | 461 | return dca; |
427 | } | 462 | } |
463 | |||
464 | static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) | ||
465 | { | ||
466 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
467 | struct pci_dev *pdev; | ||
468 | int i; | ||
469 | u16 id; | ||
470 | u16 global_req_table; | ||
471 | |||
472 | /* This implementation only supports PCI-Express */ | ||
473 | if (dev->bus != &pci_bus_type) | ||
474 | return -ENODEV; | ||
475 | pdev = to_pci_dev(dev); | ||
476 | id = dcaid_from_pcidev(pdev); | ||
477 | |||
478 | if (ioatdca->requester_count == ioatdca->max_requesters) | ||
479 | return -ENODEV; | ||
480 | |||
481 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
482 | if (ioatdca->req_slots[i].pdev == NULL) { | ||
483 | /* found an empty slot */ | ||
484 | ioatdca->requester_count++; | ||
485 | ioatdca->req_slots[i].pdev = pdev; | ||
486 | ioatdca->req_slots[i].rid = id; | ||
487 | global_req_table = | ||
488 | readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); | ||
489 | writel(id | IOAT_DCA_GREQID_VALID, | ||
490 | ioatdca->iobase + global_req_table + (i * 4)); | ||
491 | return i; | ||
492 | } | ||
493 | } | ||
494 | /* Error, ioatdma->requester_count is out of whack */ | ||
495 | return -EFAULT; | ||
496 | } | ||
497 | |||
498 | static int ioat3_dca_remove_requester(struct dca_provider *dca, | ||
499 | struct device *dev) | ||
500 | { | ||
501 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
502 | struct pci_dev *pdev; | ||
503 | int i; | ||
504 | u16 global_req_table; | ||
505 | |||
506 | /* This implementation only supports PCI-Express */ | ||
507 | if (dev->bus != &pci_bus_type) | ||
508 | return -ENODEV; | ||
509 | pdev = to_pci_dev(dev); | ||
510 | |||
511 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
512 | if (ioatdca->req_slots[i].pdev == pdev) { | ||
513 | global_req_table = | ||
514 | readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); | ||
515 | writel(0, ioatdca->iobase + global_req_table + (i * 4)); | ||
516 | ioatdca->req_slots[i].pdev = NULL; | ||
517 | ioatdca->req_slots[i].rid = 0; | ||
518 | ioatdca->requester_count--; | ||
519 | return i; | ||
520 | } | ||
521 | } | ||
522 | return -ENODEV; | ||
523 | } | ||
524 | |||
525 | static u8 ioat3_dca_get_tag(struct dca_provider *dca, | ||
526 | struct device *dev, | ||
527 | int cpu) | ||
528 | { | ||
529 | u8 tag; | ||
530 | |||
531 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
532 | int i, apic_id, bit, value; | ||
533 | u8 entry; | ||
534 | |||
535 | tag = 0; | ||
536 | apic_id = cpu_physical_id(cpu); | ||
537 | |||
538 | for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { | ||
539 | entry = ioatdca->tag_map[i]; | ||
540 | if (entry & DCA3_TAG_MAP_BIT_TO_SEL) { | ||
541 | bit = entry & | ||
542 | ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV); | ||
543 | value = (apic_id & (1 << bit)) ? 1 : 0; | ||
544 | } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) { | ||
545 | bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV; | ||
546 | value = (apic_id & (1 << bit)) ? 0 : 1; | ||
547 | } else { | ||
548 | value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0; | ||
549 | } | ||
550 | tag |= (value << i); | ||
551 | } | ||
552 | |||
553 | return tag; | ||
554 | } | ||
555 | |||
556 | static struct dca_ops ioat3_dca_ops = { | ||
557 | .add_requester = ioat3_dca_add_requester, | ||
558 | .remove_requester = ioat3_dca_remove_requester, | ||
559 | .get_tag = ioat3_dca_get_tag, | ||
560 | .dev_managed = ioat_dca_dev_managed, | ||
561 | }; | ||
562 | |||
563 | static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) | ||
564 | { | ||
565 | int slots = 0; | ||
566 | u32 req; | ||
567 | u16 global_req_table; | ||
568 | |||
569 | global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET); | ||
570 | if (global_req_table == 0) | ||
571 | return 0; | ||
572 | |||
573 | do { | ||
574 | req = readl(iobase + global_req_table + (slots * sizeof(u32))); | ||
575 | slots++; | ||
576 | } while ((req & IOAT_DCA_GREQID_LASTID) == 0); | ||
577 | |||
578 | return slots; | ||
579 | } | ||
580 | |||
581 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
582 | { | ||
583 | struct dca_provider *dca; | ||
584 | struct ioat_dca_priv *ioatdca; | ||
585 | int slots; | ||
586 | int i; | ||
587 | int err; | ||
588 | u16 dca_offset; | ||
589 | u16 csi_fsb_control; | ||
590 | u16 pcie_control; | ||
591 | u8 bit; | ||
592 | |||
593 | union { | ||
594 | u64 full; | ||
595 | struct { | ||
596 | u32 low; | ||
597 | u32 high; | ||
598 | }; | ||
599 | } tag_map; | ||
600 | |||
601 | if (!system_has_dca_enabled(pdev)) | ||
602 | return NULL; | ||
603 | |||
604 | dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); | ||
605 | if (dca_offset == 0) | ||
606 | return NULL; | ||
607 | |||
608 | slots = ioat3_dca_count_dca_slots(iobase, dca_offset); | ||
609 | if (slots == 0) | ||
610 | return NULL; | ||
611 | |||
612 | dca = alloc_dca_provider(&ioat3_dca_ops, | ||
613 | sizeof(*ioatdca) | ||
614 | + (sizeof(struct ioat_dca_slot) * slots)); | ||
615 | if (!dca) | ||
616 | return NULL; | ||
617 | |||
618 | ioatdca = dca_priv(dca); | ||
619 | ioatdca->iobase = iobase; | ||
620 | ioatdca->dca_base = iobase + dca_offset; | ||
621 | ioatdca->max_requesters = slots; | ||
622 | |||
623 | /* some bios might not know to turn these on */ | ||
624 | csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); | ||
625 | if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) { | ||
626 | csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH; | ||
627 | writew(csi_fsb_control, | ||
628 | ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); | ||
629 | } | ||
630 | pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); | ||
631 | if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) { | ||
632 | pcie_control |= IOAT3_PCI_CONTROL_MEMWR; | ||
633 | writew(pcie_control, | ||
634 | ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); | ||
635 | } | ||
636 | |||
637 | |||
638 | /* TODO version, compatibility and configuration checks */ | ||
639 | |||
640 | /* copy out the APIC to DCA tag map */ | ||
641 | tag_map.low = | ||
642 | readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW); | ||
643 | tag_map.high = | ||
644 | readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH); | ||
645 | for (i = 0; i < 8; i++) { | ||
646 | bit = tag_map.full >> (8 * i); | ||
647 | ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; | ||
648 | } | ||
649 | |||
650 | err = register_dca_provider(dca, &pdev->dev); | ||
651 | if (err) { | ||
652 | free_dca_provider(dca); | ||
653 | return NULL; | ||
654 | } | ||
655 | |||
656 | return dca; | ||
657 | } | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 318e8a22d814..a52156e56886 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/dmaengine.h> | 32 | #include <linux/dmaengine.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | #include <linux/workqueue.h> | ||
35 | #include "ioatdma.h" | 36 | #include "ioatdma.h" |
36 | #include "ioatdma_registers.h" | 37 | #include "ioatdma_registers.h" |
37 | #include "ioatdma_hw.h" | 38 | #include "ioatdma_hw.h" |
@@ -41,11 +42,23 @@ | |||
41 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | 42 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
42 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) | 43 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) |
43 | 44 | ||
45 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | ||
44 | static int ioat_pending_level = 4; | 46 | static int ioat_pending_level = 4; |
45 | module_param(ioat_pending_level, int, 0644); | 47 | module_param(ioat_pending_level, int, 0644); |
46 | MODULE_PARM_DESC(ioat_pending_level, | 48 | MODULE_PARM_DESC(ioat_pending_level, |
47 | "high-water mark for pushing ioat descriptors (default: 4)"); | 49 | "high-water mark for pushing ioat descriptors (default: 4)"); |
48 | 50 | ||
51 | #define RESET_DELAY msecs_to_jiffies(100) | ||
52 | #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) | ||
53 | static void ioat_dma_chan_reset_part2(struct work_struct *work); | ||
54 | static void ioat_dma_chan_watchdog(struct work_struct *work); | ||
55 | |||
56 | /* | ||
57 | * workaround for IOAT ver.3.0 null descriptor issue | ||
58 | * (channel returns error when size is 0) | ||
59 | */ | ||
60 | #define NULL_DESC_BUFFER_SIZE 1 | ||
61 | |||
49 | /* internal functions */ | 62 | /* internal functions */ |
50 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); | 63 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); |
51 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | 64 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); |
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
122 | int i; | 135 | int i; |
123 | struct ioat_dma_chan *ioat_chan; | 136 | struct ioat_dma_chan *ioat_chan; |
124 | 137 | ||
138 | /* | ||
139 | * IOAT ver.3 workarounds | ||
140 | */ | ||
141 | if (device->version == IOAT_VER_3_0) { | ||
142 | u32 chan_err_mask; | ||
143 | u16 dev_id; | ||
144 | u32 dmauncerrsts; | ||
145 | |||
146 | /* | ||
147 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
148 | * that can cause stability issues for IOAT ver.3 | ||
149 | */ | ||
150 | chan_err_mask = 0x3E07; | ||
151 | pci_write_config_dword(device->pdev, | ||
152 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | ||
153 | chan_err_mask); | ||
154 | |||
155 | /* | ||
156 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
157 | * (workaround for spurious config parity error after restart) | ||
158 | */ | ||
159 | pci_read_config_word(device->pdev, | ||
160 | IOAT_PCI_DEVICE_ID_OFFSET, | ||
161 | &dev_id); | ||
162 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
163 | dmauncerrsts = 0x10; | ||
164 | pci_write_config_dword(device->pdev, | ||
165 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
166 | dmauncerrsts); | ||
167 | } | ||
168 | } | ||
169 | |||
125 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | 170 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
126 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | 171 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
127 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | 172 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
137 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); | 182 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); |
138 | ioat_chan->xfercap = xfercap; | 183 | ioat_chan->xfercap = xfercap; |
139 | ioat_chan->desccount = 0; | 184 | ioat_chan->desccount = 0; |
185 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); | ||
140 | if (ioat_chan->device->version != IOAT_VER_1_2) { | 186 | if (ioat_chan->device->version != IOAT_VER_1_2) { |
141 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | 187 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
142 | | IOAT_DMA_DCA_ANY_CPU, | 188 | | IOAT_DMA_DCA_ANY_CPU, |
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
175 | { | 221 | { |
176 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 222 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
177 | 223 | ||
178 | if (ioat_chan->pending != 0) { | 224 | if (ioat_chan->pending > 0) { |
179 | spin_lock_bh(&ioat_chan->desc_lock); | 225 | spin_lock_bh(&ioat_chan->desc_lock); |
180 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | 226 | __ioat1_dma_memcpy_issue_pending(ioat_chan); |
181 | spin_unlock_bh(&ioat_chan->desc_lock); | 227 | spin_unlock_bh(&ioat_chan->desc_lock); |
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
194 | { | 240 | { |
195 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 241 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
196 | 242 | ||
197 | if (ioat_chan->pending != 0) { | 243 | if (ioat_chan->pending > 0) { |
198 | spin_lock_bh(&ioat_chan->desc_lock); | 244 | spin_lock_bh(&ioat_chan->desc_lock); |
199 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | 245 | __ioat2_dma_memcpy_issue_pending(ioat_chan); |
200 | spin_unlock_bh(&ioat_chan->desc_lock); | 246 | spin_unlock_bh(&ioat_chan->desc_lock); |
201 | } | 247 | } |
202 | } | 248 | } |
203 | 249 | ||
250 | |||
251 | /** | ||
252 | * ioat_dma_chan_reset_part2 - reinit the channel after a reset | ||
253 | */ | ||
254 | static void ioat_dma_chan_reset_part2(struct work_struct *work) | ||
255 | { | ||
256 | struct ioat_dma_chan *ioat_chan = | ||
257 | container_of(work, struct ioat_dma_chan, work.work); | ||
258 | struct ioat_desc_sw *desc; | ||
259 | |||
260 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
261 | spin_lock_bh(&ioat_chan->desc_lock); | ||
262 | |||
263 | ioat_chan->completion_virt->low = 0; | ||
264 | ioat_chan->completion_virt->high = 0; | ||
265 | ioat_chan->pending = 0; | ||
266 | |||
267 | /* | ||
268 | * count the descriptors waiting, and be sure to do it | ||
269 | * right for both the CB1 line and the CB2 ring | ||
270 | */ | ||
271 | ioat_chan->dmacount = 0; | ||
272 | if (ioat_chan->used_desc.prev) { | ||
273 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
274 | do { | ||
275 | ioat_chan->dmacount++; | ||
276 | desc = to_ioat_desc(desc->node.next); | ||
277 | } while (&desc->node != ioat_chan->used_desc.next); | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * write the new starting descriptor address | ||
282 | * this puts channel engine into ARMED state | ||
283 | */ | ||
284 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | ||
285 | switch (ioat_chan->device->version) { | ||
286 | case IOAT_VER_1_2: | ||
287 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
288 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
289 | writel(((u64) desc->async_tx.phys) >> 32, | ||
290 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
291 | |||
292 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | ||
293 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
294 | break; | ||
295 | case IOAT_VER_2_0: | ||
296 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | ||
297 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | ||
298 | writel(((u64) desc->async_tx.phys) >> 32, | ||
299 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | ||
300 | |||
301 | /* tell the engine to go with what's left to be done */ | ||
302 | writew(ioat_chan->dmacount, | ||
303 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | ||
304 | |||
305 | break; | ||
306 | } | ||
307 | dev_err(&ioat_chan->device->pdev->dev, | ||
308 | "chan%d reset - %d descs waiting, %d total desc\n", | ||
309 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
310 | |||
311 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
312 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * ioat_dma_reset_channel - restart a channel | ||
317 | * @ioat_chan: IOAT DMA channel handle | ||
318 | */ | ||
319 | static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) | ||
320 | { | ||
321 | u32 chansts, chanerr; | ||
322 | |||
323 | if (!ioat_chan->used_desc.prev) | ||
324 | return; | ||
325 | |||
326 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
327 | chansts = (ioat_chan->completion_virt->low | ||
328 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | ||
329 | if (chanerr) { | ||
330 | dev_err(&ioat_chan->device->pdev->dev, | ||
331 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
332 | chan_num(ioat_chan), chansts, chanerr); | ||
333 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * whack it upside the head with a reset | ||
338 | * and wait for things to settle out. | ||
339 | * force the pending count to a really big negative | ||
340 | * to make sure no one forces an issue_pending | ||
341 | * while we're waiting. | ||
342 | */ | ||
343 | |||
344 | spin_lock_bh(&ioat_chan->desc_lock); | ||
345 | ioat_chan->pending = INT_MIN; | ||
346 | writeb(IOAT_CHANCMD_RESET, | ||
347 | ioat_chan->reg_base | ||
348 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | ||
349 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
350 | |||
351 | /* schedule the 2nd half instead of sleeping a long time */ | ||
352 | schedule_delayed_work(&ioat_chan->work, RESET_DELAY); | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * ioat_dma_chan_watchdog - watch for stuck channels | ||
357 | */ | ||
358 | static void ioat_dma_chan_watchdog(struct work_struct *work) | ||
359 | { | ||
360 | struct ioatdma_device *device = | ||
361 | container_of(work, struct ioatdma_device, work.work); | ||
362 | struct ioat_dma_chan *ioat_chan; | ||
363 | int i; | ||
364 | |||
365 | union { | ||
366 | u64 full; | ||
367 | struct { | ||
368 | u32 low; | ||
369 | u32 high; | ||
370 | }; | ||
371 | } completion_hw; | ||
372 | unsigned long compl_desc_addr_hw; | ||
373 | |||
374 | for (i = 0; i < device->common.chancnt; i++) { | ||
375 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
376 | |||
377 | if (ioat_chan->device->version == IOAT_VER_1_2 | ||
378 | /* have we started processing anything yet */ | ||
379 | && ioat_chan->last_completion | ||
380 | /* have we completed any since last watchdog cycle? */ | ||
381 | && (ioat_chan->last_completion == | ||
382 | ioat_chan->watchdog_completion) | ||
383 | /* has TCP stuck on one cookie since last watchdog? */ | ||
384 | && (ioat_chan->watchdog_tcp_cookie == | ||
385 | ioat_chan->watchdog_last_tcp_cookie) | ||
386 | && (ioat_chan->watchdog_tcp_cookie != | ||
387 | ioat_chan->completed_cookie) | ||
388 | /* is there something in the chain to be processed? */ | ||
389 | /* CB1 chain always has at least the last one processed */ | ||
390 | && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) | ||
391 | && ioat_chan->pending == 0) { | ||
392 | |||
393 | /* | ||
394 | * check CHANSTS register for completed | ||
395 | * descriptor address. | ||
396 | * if it is different than completion writeback, | ||
397 | * it is not zero | ||
398 | * and it has changed since the last watchdog | ||
399 | * we can assume that channel | ||
400 | * is still working correctly | ||
401 | * and the problem is in completion writeback. | ||
402 | * update completion writeback | ||
403 | * with actual CHANSTS value | ||
404 | * else | ||
405 | * try resetting the channel | ||
406 | */ | ||
407 | |||
408 | completion_hw.low = readl(ioat_chan->reg_base + | ||
409 | IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); | ||
410 | completion_hw.high = readl(ioat_chan->reg_base + | ||
411 | IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); | ||
412 | #if (BITS_PER_LONG == 64) | ||
413 | compl_desc_addr_hw = | ||
414 | completion_hw.full | ||
415 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | ||
416 | #else | ||
417 | compl_desc_addr_hw = | ||
418 | completion_hw.low & IOAT_LOW_COMPLETION_MASK; | ||
419 | #endif | ||
420 | |||
421 | if ((compl_desc_addr_hw != 0) | ||
422 | && (compl_desc_addr_hw != ioat_chan->watchdog_completion) | ||
423 | && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { | ||
424 | ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; | ||
425 | ioat_chan->completion_virt->low = completion_hw.low; | ||
426 | ioat_chan->completion_virt->high = completion_hw.high; | ||
427 | } else { | ||
428 | ioat_dma_reset_channel(ioat_chan); | ||
429 | ioat_chan->watchdog_completion = 0; | ||
430 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * for version 2.0 if there are descriptors yet to be processed | ||
435 | * and the last completed hasn't changed since the last watchdog | ||
436 | * if they haven't hit the pending level | ||
437 | * issue the pending to push them through | ||
438 | * else | ||
439 | * try resetting the channel | ||
440 | */ | ||
441 | } else if (ioat_chan->device->version == IOAT_VER_2_0 | ||
442 | && ioat_chan->used_desc.prev | ||
443 | && ioat_chan->last_completion | ||
444 | && ioat_chan->last_completion == ioat_chan->watchdog_completion) { | ||
445 | |||
446 | if (ioat_chan->pending < ioat_pending_level) | ||
447 | ioat2_dma_memcpy_issue_pending(&ioat_chan->common); | ||
448 | else { | ||
449 | ioat_dma_reset_channel(ioat_chan); | ||
450 | ioat_chan->watchdog_completion = 0; | ||
451 | } | ||
452 | } else { | ||
453 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
454 | ioat_chan->watchdog_completion | ||
455 | = ioat_chan->last_completion; | ||
456 | } | ||
457 | |||
458 | ioat_chan->watchdog_last_tcp_cookie = | ||
459 | ioat_chan->watchdog_tcp_cookie; | ||
460 | } | ||
461 | |||
462 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | ||
463 | } | ||
464 | |||
204 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | 465 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
205 | { | 466 | { |
206 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | 467 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
250 | prev = new; | 511 | prev = new; |
251 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); | 512 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); |
252 | 513 | ||
514 | if (!new) { | ||
515 | dev_err(&ioat_chan->device->pdev->dev, | ||
516 | "tx submit failed\n"); | ||
517 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
518 | return -ENOMEM; | ||
519 | } | ||
520 | |||
253 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | 521 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
254 | if (new->async_tx.callback) { | 522 | if (new->async_tx.callback) { |
255 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | 523 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
335 | desc_count++; | 603 | desc_count++; |
336 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); | 604 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); |
337 | 605 | ||
338 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | 606 | if (!new) { |
607 | dev_err(&ioat_chan->device->pdev->dev, | ||
608 | "tx submit failed\n"); | ||
609 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
610 | return -ENOMEM; | ||
611 | } | ||
612 | |||
613 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | ||
339 | if (new->async_tx.callback) { | 614 | if (new->async_tx.callback) { |
340 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | 615 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
341 | if (first != new) { | 616 | if (first != new) { |
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | |||
406 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | 681 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; |
407 | break; | 682 | break; |
408 | case IOAT_VER_2_0: | 683 | case IOAT_VER_2_0: |
684 | case IOAT_VER_3_0: | ||
409 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; | 685 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; |
410 | break; | 686 | break; |
411 | } | 687 | } |
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |||
452 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | 728 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors |
453 | * @chan: the channel to be filled out | 729 | * @chan: the channel to be filled out |
454 | */ | 730 | */ |
455 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | 731 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, |
732 | struct dma_client *client) | ||
456 | { | 733 | { |
457 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 734 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
458 | struct ioat_desc_sw *desc; | 735 | struct ioat_desc_sw *desc; |
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
555 | } | 832 | } |
556 | break; | 833 | break; |
557 | case IOAT_VER_2_0: | 834 | case IOAT_VER_2_0: |
835 | case IOAT_VER_3_0: | ||
558 | list_for_each_entry_safe(desc, _desc, | 836 | list_for_each_entry_safe(desc, _desc, |
559 | ioat_chan->free_desc.next, node) { | 837 | ioat_chan->free_desc.next, node) { |
560 | list_del(&desc->node); | 838 | list_del(&desc->node); |
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
585 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | 863 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
586 | ioat_chan->pending = 0; | 864 | ioat_chan->pending = 0; |
587 | ioat_chan->dmacount = 0; | 865 | ioat_chan->dmacount = 0; |
866 | ioat_chan->watchdog_completion = 0; | ||
867 | ioat_chan->last_compl_desc_addr_hw = 0; | ||
868 | ioat_chan->watchdog_tcp_cookie = | ||
869 | ioat_chan->watchdog_last_tcp_cookie = 0; | ||
588 | } | 870 | } |
589 | 871 | ||
590 | /** | 872 | /** |
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
640 | 922 | ||
641 | /* set up the noop descriptor */ | 923 | /* set up the noop descriptor */ |
642 | noop_desc = to_ioat_desc(ioat_chan->used_desc.next); | 924 | noop_desc = to_ioat_desc(ioat_chan->used_desc.next); |
643 | noop_desc->hw->size = 0; | 925 | /* set size to non-zero value (channel returns error when size is 0) */ |
926 | noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; | ||
644 | noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; | 927 | noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; |
645 | noop_desc->hw->src_addr = 0; | 928 | noop_desc->hw->src_addr = 0; |
646 | noop_desc->hw->dst_addr = 0; | 929 | noop_desc->hw->dst_addr = 0; |
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | |||
690 | return ioat1_dma_get_next_descriptor(ioat_chan); | 973 | return ioat1_dma_get_next_descriptor(ioat_chan); |
691 | break; | 974 | break; |
692 | case IOAT_VER_2_0: | 975 | case IOAT_VER_2_0: |
976 | case IOAT_VER_3_0: | ||
693 | return ioat2_dma_get_next_descriptor(ioat_chan); | 977 | return ioat2_dma_get_next_descriptor(ioat_chan); |
694 | break; | 978 | break; |
695 | } | 979 | } |
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
716 | new->src = dma_src; | 1000 | new->src = dma_src; |
717 | new->async_tx.flags = flags; | 1001 | new->async_tx.flags = flags; |
718 | return &new->async_tx; | 1002 | return &new->async_tx; |
719 | } else | 1003 | } else { |
1004 | dev_err(&ioat_chan->device->pdev->dev, | ||
1005 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | ||
1006 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
720 | return NULL; | 1007 | return NULL; |
1008 | } | ||
721 | } | 1009 | } |
722 | 1010 | ||
723 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | 1011 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( |
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
744 | new->src = dma_src; | 1032 | new->src = dma_src; |
745 | new->async_tx.flags = flags; | 1033 | new->async_tx.flags = flags; |
746 | return &new->async_tx; | 1034 | return &new->async_tx; |
747 | } else | 1035 | } else { |
1036 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1037 | dev_err(&ioat_chan->device->pdev->dev, | ||
1038 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | ||
1039 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | ||
748 | return NULL; | 1040 | return NULL; |
1041 | } | ||
749 | } | 1042 | } |
750 | 1043 | ||
751 | static void ioat_dma_cleanup_tasklet(unsigned long data) | 1044 | static void ioat_dma_cleanup_tasklet(unsigned long data) |
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) | |||
756 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | 1049 | chan->reg_base + IOAT_CHANCTRL_OFFSET); |
757 | } | 1050 | } |
758 | 1051 | ||
1052 | static void | ||
1053 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | ||
1054 | { | ||
1055 | /* | ||
1056 | * yes we are unmapping both _page and _single | ||
1057 | * alloc'd regions with unmap_page. Is this | ||
1058 | * *really* that bad? | ||
1059 | */ | ||
1060 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
1061 | pci_unmap_page(ioat_chan->device->pdev, | ||
1062 | pci_unmap_addr(desc, dst), | ||
1063 | pci_unmap_len(desc, len), | ||
1064 | PCI_DMA_FROMDEVICE); | ||
1065 | |||
1066 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
1067 | pci_unmap_page(ioat_chan->device->pdev, | ||
1068 | pci_unmap_addr(desc, src), | ||
1069 | pci_unmap_len(desc, len), | ||
1070 | PCI_DMA_TODEVICE); | ||
1071 | } | ||
1072 | |||
759 | /** | 1073 | /** |
760 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors | 1074 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors |
761 | * @chan: ioat channel to be cleaned up | 1075 | * @chan: ioat channel to be cleaned up |
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
799 | 1113 | ||
800 | if (phys_complete == ioat_chan->last_completion) { | 1114 | if (phys_complete == ioat_chan->last_completion) { |
801 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 1115 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
1116 | /* | ||
1117 | * perhaps we're stuck so hard that the watchdog can't go off? | ||
1118 | * try to catch it after 2 seconds | ||
1119 | */ | ||
1120 | if (ioat_chan->device->version != IOAT_VER_3_0) { | ||
1121 | if (time_after(jiffies, | ||
1122 | ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { | ||
1123 | ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); | ||
1124 | ioat_chan->last_completion_time = jiffies; | ||
1125 | } | ||
1126 | } | ||
802 | return; | 1127 | return; |
803 | } | 1128 | } |
1129 | ioat_chan->last_completion_time = jiffies; | ||
804 | 1130 | ||
805 | cookie = 0; | 1131 | cookie = 0; |
806 | spin_lock_bh(&ioat_chan->desc_lock); | 1132 | if (!spin_trylock_bh(&ioat_chan->desc_lock)) { |
1133 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
1134 | return; | ||
1135 | } | ||
1136 | |||
807 | switch (ioat_chan->device->version) { | 1137 | switch (ioat_chan->device->version) { |
808 | case IOAT_VER_1_2: | 1138 | case IOAT_VER_1_2: |
809 | list_for_each_entry_safe(desc, _desc, | 1139 | list_for_each_entry_safe(desc, _desc, |
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
816 | */ | 1146 | */ |
817 | if (desc->async_tx.cookie) { | 1147 | if (desc->async_tx.cookie) { |
818 | cookie = desc->async_tx.cookie; | 1148 | cookie = desc->async_tx.cookie; |
819 | 1149 | ioat_dma_unmap(ioat_chan, desc); | |
820 | /* | ||
821 | * yes we are unmapping both _page and _single | ||
822 | * alloc'd regions with unmap_page. Is this | ||
823 | * *really* that bad? | ||
824 | */ | ||
825 | pci_unmap_page(ioat_chan->device->pdev, | ||
826 | pci_unmap_addr(desc, dst), | ||
827 | pci_unmap_len(desc, len), | ||
828 | PCI_DMA_FROMDEVICE); | ||
829 | pci_unmap_page(ioat_chan->device->pdev, | ||
830 | pci_unmap_addr(desc, src), | ||
831 | pci_unmap_len(desc, len), | ||
832 | PCI_DMA_TODEVICE); | ||
833 | |||
834 | if (desc->async_tx.callback) { | 1150 | if (desc->async_tx.callback) { |
835 | desc->async_tx.callback(desc->async_tx.callback_param); | 1151 | desc->async_tx.callback(desc->async_tx.callback_param); |
836 | desc->async_tx.callback = NULL; | 1152 | desc->async_tx.callback = NULL; |
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
862 | } | 1178 | } |
863 | break; | 1179 | break; |
864 | case IOAT_VER_2_0: | 1180 | case IOAT_VER_2_0: |
1181 | case IOAT_VER_3_0: | ||
865 | /* has some other thread has already cleaned up? */ | 1182 | /* has some other thread has already cleaned up? */ |
866 | if (ioat_chan->used_desc.prev == NULL) | 1183 | if (ioat_chan->used_desc.prev == NULL) |
867 | break; | 1184 | break; |
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
889 | if (desc->async_tx.cookie) { | 1206 | if (desc->async_tx.cookie) { |
890 | cookie = desc->async_tx.cookie; | 1207 | cookie = desc->async_tx.cookie; |
891 | desc->async_tx.cookie = 0; | 1208 | desc->async_tx.cookie = 0; |
892 | 1209 | ioat_dma_unmap(ioat_chan, desc); | |
893 | pci_unmap_page(ioat_chan->device->pdev, | ||
894 | pci_unmap_addr(desc, dst), | ||
895 | pci_unmap_len(desc, len), | ||
896 | PCI_DMA_FROMDEVICE); | ||
897 | pci_unmap_page(ioat_chan->device->pdev, | ||
898 | pci_unmap_addr(desc, src), | ||
899 | pci_unmap_len(desc, len), | ||
900 | PCI_DMA_TODEVICE); | ||
901 | |||
902 | if (desc->async_tx.callback) { | 1210 | if (desc->async_tx.callback) { |
903 | desc->async_tx.callback(desc->async_tx.callback_param); | 1211 | desc->async_tx.callback(desc->async_tx.callback_param); |
904 | desc->async_tx.callback = NULL; | 1212 | desc->async_tx.callback = NULL; |
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | |||
943 | 1251 | ||
944 | last_used = chan->cookie; | 1252 | last_used = chan->cookie; |
945 | last_complete = ioat_chan->completed_cookie; | 1253 | last_complete = ioat_chan->completed_cookie; |
1254 | ioat_chan->watchdog_tcp_cookie = cookie; | ||
946 | 1255 | ||
947 | if (done) | 1256 | if (done) |
948 | *done = last_complete; | 1257 | *done = last_complete; |
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
973 | spin_lock_bh(&ioat_chan->desc_lock); | 1282 | spin_lock_bh(&ioat_chan->desc_lock); |
974 | 1283 | ||
975 | desc = ioat_dma_get_next_descriptor(ioat_chan); | 1284 | desc = ioat_dma_get_next_descriptor(ioat_chan); |
1285 | |||
1286 | if (!desc) { | ||
1287 | dev_err(&ioat_chan->device->pdev->dev, | ||
1288 | "Unable to start null desc - get next desc failed\n"); | ||
1289 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
1290 | return; | ||
1291 | } | ||
1292 | |||
976 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL | 1293 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL |
977 | | IOAT_DMA_DESCRIPTOR_CTL_INT_GN | 1294 | | IOAT_DMA_DESCRIPTOR_CTL_INT_GN |
978 | | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | 1295 | | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
979 | desc->hw->size = 0; | 1296 | /* set size to non-zero value (channel returns error when size is 0) */ |
1297 | desc->hw->size = NULL_DESC_BUFFER_SIZE; | ||
980 | desc->hw->src_addr = 0; | 1298 | desc->hw->src_addr = 0; |
981 | desc->hw->dst_addr = 0; | 1299 | desc->hw->dst_addr = 0; |
982 | async_tx_ack(&desc->async_tx); | 1300 | async_tx_ack(&desc->async_tx); |
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
994 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | 1312 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); |
995 | break; | 1313 | break; |
996 | case IOAT_VER_2_0: | 1314 | case IOAT_VER_2_0: |
1315 | case IOAT_VER_3_0: | ||
997 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | 1316 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, |
998 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | 1317 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); |
999 | writel(((u64) desc->async_tx.phys) >> 32, | 1318 | writel(((u64) desc->async_tx.phys) >> 32, |
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1049 | dma_chan = container_of(device->common.channels.next, | 1368 | dma_chan = container_of(device->common.channels.next, |
1050 | struct dma_chan, | 1369 | struct dma_chan, |
1051 | device_node); | 1370 | device_node); |
1052 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { | 1371 | if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { |
1053 | dev_err(&device->pdev->dev, | 1372 | dev_err(&device->pdev->dev, |
1054 | "selftest cannot allocate chan resource\n"); | 1373 | "selftest cannot allocate chan resource\n"); |
1055 | err = -ENODEV; | 1374 | err = -ENODEV; |
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
1312 | ioat1_dma_memcpy_issue_pending; | 1631 | ioat1_dma_memcpy_issue_pending; |
1313 | break; | 1632 | break; |
1314 | case IOAT_VER_2_0: | 1633 | case IOAT_VER_2_0: |
1634 | case IOAT_VER_3_0: | ||
1315 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | 1635 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; |
1316 | device->common.device_issue_pending = | 1636 | device->common.device_issue_pending = |
1317 | ioat2_dma_memcpy_issue_pending; | 1637 | ioat2_dma_memcpy_issue_pending; |
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
1331 | if (err) | 1651 | if (err) |
1332 | goto err_self_test; | 1652 | goto err_self_test; |
1333 | 1653 | ||
1654 | ioat_set_tcp_copy_break(device); | ||
1655 | |||
1334 | dma_async_device_register(&device->common); | 1656 | dma_async_device_register(&device->common); |
1335 | 1657 | ||
1658 | if (device->version != IOAT_VER_3_0) { | ||
1659 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | ||
1660 | schedule_delayed_work(&device->work, | ||
1661 | WATCHDOG_DELAY); | ||
1662 | } | ||
1663 | |||
1336 | return device; | 1664 | return device; |
1337 | 1665 | ||
1338 | err_self_test: | 1666 | err_self_test: |
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device) | |||
1365 | pci_release_regions(device->pdev); | 1693 | pci_release_regions(device->pdev); |
1366 | pci_disable_device(device->pdev); | 1694 | pci_disable_device(device->pdev); |
1367 | 1695 | ||
1696 | if (device->version != IOAT_VER_3_0) { | ||
1697 | cancel_delayed_work(&device->work); | ||
1698 | } | ||
1699 | |||
1368 | list_for_each_entry_safe(chan, _chan, | 1700 | list_for_each_entry_safe(chan, _chan, |
1369 | &device->common.channels, device_node) { | 1701 | &device->common.channels, device_node) { |
1370 | ioat_chan = to_ioat_chan(chan); | 1702 | ioat_chan = to_ioat_chan(chan); |
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h index f2c7fedbf009..a3306d0e1372 100644 --- a/drivers/dma/ioatdma.h +++ b/drivers/dma/ioatdma.h | |||
@@ -27,8 +27,9 @@ | |||
27 | #include <linux/dmapool.h> | 27 | #include <linux/dmapool.h> |
28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
29 | #include <linux/pci_ids.h> | 29 | #include <linux/pci_ids.h> |
30 | #include <net/tcp.h> | ||
30 | 31 | ||
31 | #define IOAT_DMA_VERSION "2.04" | 32 | #define IOAT_DMA_VERSION "3.30" |
32 | 33 | ||
33 | enum ioat_interrupt { | 34 | enum ioat_interrupt { |
34 | none = 0, | 35 | none = 0, |
@@ -40,6 +41,7 @@ enum ioat_interrupt { | |||
40 | 41 | ||
41 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | 42 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 |
42 | #define IOAT_DMA_DCA_ANY_CPU ~0 | 43 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
44 | #define IOAT_WATCHDOG_PERIOD (2 * HZ) | ||
43 | 45 | ||
44 | 46 | ||
45 | /** | 47 | /** |
@@ -62,6 +64,7 @@ struct ioatdma_device { | |||
62 | struct dma_device common; | 64 | struct dma_device common; |
63 | u8 version; | 65 | u8 version; |
64 | enum ioat_interrupt irq_mode; | 66 | enum ioat_interrupt irq_mode; |
67 | struct delayed_work work; | ||
65 | struct msix_entry msix_entries[4]; | 68 | struct msix_entry msix_entries[4]; |
66 | struct ioat_dma_chan *idx[4]; | 69 | struct ioat_dma_chan *idx[4]; |
67 | }; | 70 | }; |
@@ -75,6 +78,7 @@ struct ioat_dma_chan { | |||
75 | 78 | ||
76 | dma_cookie_t completed_cookie; | 79 | dma_cookie_t completed_cookie; |
77 | unsigned long last_completion; | 80 | unsigned long last_completion; |
81 | unsigned long last_completion_time; | ||
78 | 82 | ||
79 | size_t xfercap; /* XFERCAP register value expanded out */ | 83 | size_t xfercap; /* XFERCAP register value expanded out */ |
80 | 84 | ||
@@ -82,6 +86,10 @@ struct ioat_dma_chan { | |||
82 | spinlock_t desc_lock; | 86 | spinlock_t desc_lock; |
83 | struct list_head free_desc; | 87 | struct list_head free_desc; |
84 | struct list_head used_desc; | 88 | struct list_head used_desc; |
89 | unsigned long watchdog_completion; | ||
90 | int watchdog_tcp_cookie; | ||
91 | u32 watchdog_last_tcp_cookie; | ||
92 | struct delayed_work work; | ||
85 | 93 | ||
86 | int pending; | 94 | int pending; |
87 | int dmacount; | 95 | int dmacount; |
@@ -98,6 +106,7 @@ struct ioat_dma_chan { | |||
98 | u32 high; | 106 | u32 high; |
99 | }; | 107 | }; |
100 | } *completion_virt; | 108 | } *completion_virt; |
109 | unsigned long last_compl_desc_addr_hw; | ||
101 | struct tasklet_struct cleanup_task; | 110 | struct tasklet_struct cleanup_task; |
102 | }; | 111 | }; |
103 | 112 | ||
@@ -121,17 +130,34 @@ struct ioat_desc_sw { | |||
121 | struct dma_async_tx_descriptor async_tx; | 130 | struct dma_async_tx_descriptor async_tx; |
122 | }; | 131 | }; |
123 | 132 | ||
133 | static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) | ||
134 | { | ||
135 | #ifdef CONFIG_NET_DMA | ||
136 | switch (dev->version) { | ||
137 | case IOAT_VER_1_2: | ||
138 | case IOAT_VER_3_0: | ||
139 | sysctl_tcp_dma_copybreak = 4096; | ||
140 | break; | ||
141 | case IOAT_VER_2_0: | ||
142 | sysctl_tcp_dma_copybreak = 2048; | ||
143 | break; | ||
144 | } | ||
145 | #endif | ||
146 | } | ||
147 | |||
124 | #if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) | 148 | #if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) |
125 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | 149 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, |
126 | void __iomem *iobase); | 150 | void __iomem *iobase); |
127 | void ioat_dma_remove(struct ioatdma_device *device); | 151 | void ioat_dma_remove(struct ioatdma_device *device); |
128 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 152 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
129 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 153 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
154 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
130 | #else | 155 | #else |
131 | #define ioat_dma_probe(pdev, iobase) NULL | 156 | #define ioat_dma_probe(pdev, iobase) NULL |
132 | #define ioat_dma_remove(device) do { } while (0) | 157 | #define ioat_dma_remove(device) do { } while (0) |
133 | #define ioat_dca_init(pdev, iobase) NULL | 158 | #define ioat_dca_init(pdev, iobase) NULL |
134 | #define ioat2_dca_init(pdev, iobase) NULL | 159 | #define ioat2_dca_init(pdev, iobase) NULL |
160 | #define ioat3_dca_init(pdev, iobase) NULL | ||
135 | #endif | 161 | #endif |
136 | 162 | ||
137 | #endif /* IOATDMA_H */ | 163 | #endif /* IOATDMA_H */ |
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h index dd470fa91d86..f1ae2c776f74 100644 --- a/drivers/dma/ioatdma_hw.h +++ b/drivers/dma/ioatdma_hw.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #define IOAT_PCI_SID 0x8086 | 35 | #define IOAT_PCI_SID 0x8086 |
36 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | 36 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ |
37 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | 37 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ |
38 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | ||
38 | 39 | ||
39 | struct ioat_dma_descriptor { | 40 | struct ioat_dma_descriptor { |
40 | uint32_t size; | 41 | uint32_t size; |
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h index 9832d7ebd931..827cb503cac6 100644 --- a/drivers/dma/ioatdma_registers.h +++ b/drivers/dma/ioatdma_registers.h | |||
@@ -25,6 +25,10 @@ | |||
25 | #define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 | 25 | #define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 |
26 | #define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 | 26 | #define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 |
27 | 27 | ||
28 | #define IOAT_PCI_DEVICE_ID_OFFSET 0x02 | ||
29 | #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 | ||
30 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 | ||
31 | |||
28 | /* MMIO Device Registers */ | 32 | /* MMIO Device Registers */ |
29 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ | 33 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ |
30 | 34 | ||
@@ -149,7 +153,23 @@ | |||
149 | #define IOAT_DCA_GREQID_VALID 0x20000000 | 153 | #define IOAT_DCA_GREQID_VALID 0x20000000 |
150 | #define IOAT_DCA_GREQID_LASTID 0x80000000 | 154 | #define IOAT_DCA_GREQID_LASTID 0x80000000 |
151 | 155 | ||
156 | #define IOAT3_CSI_CAPABILITY_OFFSET 0x08 | ||
157 | #define IOAT3_CSI_CAPABILITY_PREFETCH 0x1 | ||
158 | |||
159 | #define IOAT3_PCI_CAPABILITY_OFFSET 0x0A | ||
160 | #define IOAT3_PCI_CAPABILITY_MEMWR 0x1 | ||
161 | |||
162 | #define IOAT3_CSI_CONTROL_OFFSET 0x0C | ||
163 | #define IOAT3_CSI_CONTROL_PREFETCH 0x1 | ||
164 | |||
165 | #define IOAT3_PCI_CONTROL_OFFSET 0x0E | ||
166 | #define IOAT3_PCI_CONTROL_MEMWR 0x1 | ||
167 | |||
168 | #define IOAT3_APICID_TAG_MAP_OFFSET 0x10 | ||
169 | #define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10 | ||
170 | #define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14 | ||
152 | 171 | ||
172 | #define IOAT3_DCA_GREQID_OFFSET 0x02 | ||
153 | 173 | ||
154 | #define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ | 174 | #define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ |
155 | #define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ | 175 | #define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 0ec0f431e6a1..85bfeba4d85e 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
82 | struct device *dev = | 82 | struct device *dev = |
83 | &iop_chan->device->pdev->dev; | 83 | &iop_chan->device->pdev->dev; |
84 | u32 len = unmap->unmap_len; | 84 | u32 len = unmap->unmap_len; |
85 | u32 src_cnt = unmap->unmap_src_cnt; | 85 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
86 | dma_addr_t addr = iop_desc_get_dest_addr(unmap, | 86 | u32 src_cnt; |
87 | iop_chan); | 87 | dma_addr_t addr; |
88 | 88 | ||
89 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | 89 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
90 | while (src_cnt--) { | 90 | addr = iop_desc_get_dest_addr(unmap, iop_chan); |
91 | addr = iop_desc_get_src_addr(unmap, | 91 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); |
92 | iop_chan, | 92 | } |
93 | src_cnt); | 93 | |
94 | dma_unmap_page(dev, addr, len, | 94 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
95 | DMA_TO_DEVICE); | 95 | src_cnt = unmap->unmap_src_cnt; |
96 | while (src_cnt--) { | ||
97 | addr = iop_desc_get_src_addr(unmap, | ||
98 | iop_chan, | ||
99 | src_cnt); | ||
100 | dma_unmap_page(dev, addr, len, | ||
101 | DMA_TO_DEVICE); | ||
102 | } | ||
96 | } | 103 | } |
97 | desc->group_head = NULL; | 104 | desc->group_head = NULL; |
98 | } | 105 | } |
@@ -366,8 +373,8 @@ retry: | |||
366 | if (!retry++) | 373 | if (!retry++) |
367 | goto retry; | 374 | goto retry; |
368 | 375 | ||
369 | /* try to free some slots if the allocation fails */ | 376 | /* perform direct reclaim if the allocation fails */ |
370 | tasklet_schedule(&iop_chan->irq_tasklet); | 377 | __iop_adma_slot_cleanup(iop_chan); |
371 | 378 | ||
372 | return NULL; | 379 | return NULL; |
373 | } | 380 | } |
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
443 | static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); | 450 | static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); |
444 | static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); | 451 | static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); |
445 | 452 | ||
446 | /* returns the number of allocated descriptors */ | 453 | /** |
447 | static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | 454 | * iop_adma_alloc_chan_resources - returns the number of allocated descriptors |
455 | * @chan - allocate descriptor resources for this channel | ||
456 | * @client - current client requesting the channel be ready for requests | ||
457 | * | ||
458 | * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To | ||
459 | * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be | ||
460 | * greater than 2x the number slots needed to satisfy a device->max_xor | ||
461 | * request. | ||
462 | * */ | ||
463 | static int iop_adma_alloc_chan_resources(struct dma_chan *chan, | ||
464 | struct dma_client *client) | ||
448 | { | 465 | { |
449 | char *hw_desc; | 466 | char *hw_desc; |
450 | int idx; | 467 | int idx; |
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
838 | dma_chan = container_of(device->common.channels.next, | 855 | dma_chan = container_of(device->common.channels.next, |
839 | struct dma_chan, | 856 | struct dma_chan, |
840 | device_node); | 857 | device_node); |
841 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { | 858 | if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { |
842 | err = -ENODEV; | 859 | err = -ENODEV; |
843 | goto out; | 860 | goto out; |
844 | } | 861 | } |
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
936 | dma_chan = container_of(device->common.channels.next, | 953 | dma_chan = container_of(device->common.channels.next, |
937 | struct dma_chan, | 954 | struct dma_chan, |
938 | device_node); | 955 | device_node); |
939 | if (iop_adma_alloc_chan_resources(dma_chan) < 1) { | 956 | if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { |
940 | err = -ENODEV; | 957 | err = -ENODEV; |
941 | goto out; | 958 | goto out; |
942 | } | 959 | } |
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1387 | spin_unlock_bh(&iop_chan->lock); | 1404 | spin_unlock_bh(&iop_chan->lock); |
1388 | } | 1405 | } |
1389 | 1406 | ||
1407 | MODULE_ALIAS("platform:iop-adma"); | ||
1408 | |||
1390 | static struct platform_driver iop_adma_driver = { | 1409 | static struct platform_driver iop_adma_driver = { |
1391 | .probe = iop_adma_probe, | 1410 | .probe = iop_adma_probe, |
1392 | .remove = iop_adma_remove, | 1411 | .remove = iop_adma_remove, |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c new file mode 100644 index 000000000000..a4e4494663bf --- /dev/null +++ b/drivers/dma/mv_xor.c | |||
@@ -0,0 +1,1375 @@ | |||
1 | /* | ||
2 | * offload engine driver for the Marvell XOR engine | ||
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/async_tx.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/memory.h> | ||
28 | #include <asm/plat-orion/mv_xor.h> | ||
29 | #include "mv_xor.h" | ||
30 | |||
31 | static void mv_xor_issue_pending(struct dma_chan *chan); | ||
32 | |||
33 | #define to_mv_xor_chan(chan) \ | ||
34 | container_of(chan, struct mv_xor_chan, common) | ||
35 | |||
36 | #define to_mv_xor_device(dev) \ | ||
37 | container_of(dev, struct mv_xor_device, common) | ||
38 | |||
39 | #define to_mv_xor_slot(tx) \ | ||
40 | container_of(tx, struct mv_xor_desc_slot, async_tx) | ||
41 | |||
42 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | ||
43 | { | ||
44 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
45 | |||
46 | hw_desc->status = (1 << 31); | ||
47 | hw_desc->phy_next_desc = 0; | ||
48 | hw_desc->desc_command = (1 << 31); | ||
49 | } | ||
50 | |||
51 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | ||
52 | { | ||
53 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
54 | return hw_desc->phy_dest_addr; | ||
55 | } | ||
56 | |||
57 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | ||
58 | int src_idx) | ||
59 | { | ||
60 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
61 | return hw_desc->phy_src_addr[src_idx]; | ||
62 | } | ||
63 | |||
64 | |||
65 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | ||
66 | u32 byte_count) | ||
67 | { | ||
68 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
69 | hw_desc->byte_count = byte_count; | ||
70 | } | ||
71 | |||
72 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, | ||
73 | u32 next_desc_addr) | ||
74 | { | ||
75 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
76 | BUG_ON(hw_desc->phy_next_desc); | ||
77 | hw_desc->phy_next_desc = next_desc_addr; | ||
78 | } | ||
79 | |||
80 | static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | ||
81 | { | ||
82 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
83 | hw_desc->phy_next_desc = 0; | ||
84 | } | ||
85 | |||
86 | static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) | ||
87 | { | ||
88 | desc->value = val; | ||
89 | } | ||
90 | |||
91 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | ||
92 | dma_addr_t addr) | ||
93 | { | ||
94 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
95 | hw_desc->phy_dest_addr = addr; | ||
96 | } | ||
97 | |||
98 | static int mv_chan_memset_slot_count(size_t len) | ||
99 | { | ||
100 | return 1; | ||
101 | } | ||
102 | |||
103 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | ||
104 | |||
105 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | ||
106 | int index, dma_addr_t addr) | ||
107 | { | ||
108 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
109 | hw_desc->phy_src_addr[index] = addr; | ||
110 | if (desc->type == DMA_XOR) | ||
111 | hw_desc->desc_command |= (1 << index); | ||
112 | } | ||
113 | |||
114 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | ||
115 | { | ||
116 | return __raw_readl(XOR_CURR_DESC(chan)); | ||
117 | } | ||
118 | |||
119 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | ||
120 | u32 next_desc_addr) | ||
121 | { | ||
122 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | ||
123 | } | ||
124 | |||
125 | static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) | ||
126 | { | ||
127 | __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); | ||
128 | } | ||
129 | |||
130 | static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) | ||
131 | { | ||
132 | __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); | ||
133 | } | ||
134 | |||
135 | static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) | ||
136 | { | ||
137 | __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); | ||
138 | __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); | ||
139 | } | ||
140 | |||
141 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | ||
142 | { | ||
143 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | ||
144 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); | ||
145 | __raw_writel(val, XOR_INTR_MASK(chan)); | ||
146 | } | ||
147 | |||
148 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | ||
149 | { | ||
150 | u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); | ||
151 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; | ||
152 | return intr_cause; | ||
153 | } | ||
154 | |||
155 | static int mv_is_err_intr(u32 intr_cause) | ||
156 | { | ||
157 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | ||
158 | return 1; | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | ||
164 | { | ||
165 | u32 val = (1 << (1 + (chan->idx * 16))); | ||
166 | dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); | ||
167 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | ||
168 | } | ||
169 | |||
170 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | ||
171 | { | ||
172 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | ||
173 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | ||
174 | } | ||
175 | |||
176 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | ||
177 | { | ||
178 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | ||
179 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | ||
180 | |||
181 | if (chain_old_tail->type != desc->type) | ||
182 | return 0; | ||
183 | if (desc->type == DMA_MEMSET) | ||
184 | return 0; | ||
185 | |||
186 | return 1; | ||
187 | } | ||
188 | |||
189 | static void mv_set_mode(struct mv_xor_chan *chan, | ||
190 | enum dma_transaction_type type) | ||
191 | { | ||
192 | u32 op_mode; | ||
193 | u32 config = __raw_readl(XOR_CONFIG(chan)); | ||
194 | |||
195 | switch (type) { | ||
196 | case DMA_XOR: | ||
197 | op_mode = XOR_OPERATION_MODE_XOR; | ||
198 | break; | ||
199 | case DMA_MEMCPY: | ||
200 | op_mode = XOR_OPERATION_MODE_MEMCPY; | ||
201 | break; | ||
202 | case DMA_MEMSET: | ||
203 | op_mode = XOR_OPERATION_MODE_MEMSET; | ||
204 | break; | ||
205 | default: | ||
206 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
207 | "error: unsupported operation %d.\n", | ||
208 | type); | ||
209 | BUG(); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | config &= ~0x7; | ||
214 | config |= op_mode; | ||
215 | __raw_writel(config, XOR_CONFIG(chan)); | ||
216 | chan->current_type = type; | ||
217 | } | ||
218 | |||
219 | static void mv_chan_activate(struct mv_xor_chan *chan) | ||
220 | { | ||
221 | u32 activation; | ||
222 | |||
223 | dev_dbg(chan->device->common.dev, " activate chan.\n"); | ||
224 | activation = __raw_readl(XOR_ACTIVATION(chan)); | ||
225 | activation |= 0x1; | ||
226 | __raw_writel(activation, XOR_ACTIVATION(chan)); | ||
227 | } | ||
228 | |||
229 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | ||
230 | { | ||
231 | u32 state = __raw_readl(XOR_ACTIVATION(chan)); | ||
232 | |||
233 | state = (state >> 4) & 0x3; | ||
234 | |||
235 | return (state == 1) ? 1 : 0; | ||
236 | } | ||
237 | |||
238 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | ||
239 | { | ||
240 | return 1; | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * mv_xor_free_slots - flags descriptor slots for reuse | ||
245 | * @slot: Slot to free | ||
246 | * Caller must hold &mv_chan->lock while calling this function | ||
247 | */ | ||
248 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | ||
249 | struct mv_xor_desc_slot *slot) | ||
250 | { | ||
251 | dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", | ||
252 | __func__, __LINE__, slot); | ||
253 | |||
254 | slot->slots_per_op = 0; | ||
255 | |||
256 | } | ||
257 | |||
258 | /* | ||
259 | * mv_xor_start_new_chain - program the engine to operate on new chain headed by | ||
260 | * sw_desc | ||
261 | * Caller must hold &mv_chan->lock while calling this function | ||
262 | */ | ||
263 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | ||
264 | struct mv_xor_desc_slot *sw_desc) | ||
265 | { | ||
266 | dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", | ||
267 | __func__, __LINE__, sw_desc); | ||
268 | if (sw_desc->type != mv_chan->current_type) | ||
269 | mv_set_mode(mv_chan, sw_desc->type); | ||
270 | |||
271 | if (sw_desc->type == DMA_MEMSET) { | ||
272 | /* for memset requests we need to program the engine, no | ||
273 | * descriptors used. | ||
274 | */ | ||
275 | struct mv_xor_desc *hw_desc = sw_desc->hw_desc; | ||
276 | mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); | ||
277 | mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); | ||
278 | mv_chan_set_value(mv_chan, sw_desc->value); | ||
279 | } else { | ||
280 | /* set the hardware chain */ | ||
281 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | ||
282 | } | ||
283 | mv_chan->pending += sw_desc->slot_cnt; | ||
284 | mv_xor_issue_pending(&mv_chan->common); | ||
285 | } | ||
286 | |||
287 | static dma_cookie_t | ||
288 | mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | ||
289 | struct mv_xor_chan *mv_chan, dma_cookie_t cookie) | ||
290 | { | ||
291 | BUG_ON(desc->async_tx.cookie < 0); | ||
292 | |||
293 | if (desc->async_tx.cookie > 0) { | ||
294 | cookie = desc->async_tx.cookie; | ||
295 | |||
296 | /* call the callback (must not sleep or submit new | ||
297 | * operations to this channel) | ||
298 | */ | ||
299 | if (desc->async_tx.callback) | ||
300 | desc->async_tx.callback( | ||
301 | desc->async_tx.callback_param); | ||
302 | |||
303 | /* unmap dma addresses | ||
304 | * (unmap_single vs unmap_page?) | ||
305 | */ | ||
306 | if (desc->group_head && desc->unmap_len) { | ||
307 | struct mv_xor_desc_slot *unmap = desc->group_head; | ||
308 | struct device *dev = | ||
309 | &mv_chan->device->pdev->dev; | ||
310 | u32 len = unmap->unmap_len; | ||
311 | enum dma_ctrl_flags flags = desc->async_tx.flags; | ||
312 | u32 src_cnt; | ||
313 | dma_addr_t addr; | ||
314 | |||
315 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
316 | addr = mv_desc_get_dest_addr(unmap); | ||
317 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | ||
318 | } | ||
319 | |||
320 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
321 | src_cnt = unmap->unmap_src_cnt; | ||
322 | while (src_cnt--) { | ||
323 | addr = mv_desc_get_src_addr(unmap, | ||
324 | src_cnt); | ||
325 | dma_unmap_page(dev, addr, len, | ||
326 | DMA_TO_DEVICE); | ||
327 | } | ||
328 | } | ||
329 | desc->group_head = NULL; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | /* run dependent operations */ | ||
334 | async_tx_run_dependencies(&desc->async_tx); | ||
335 | |||
336 | return cookie; | ||
337 | } | ||
338 | |||
339 | static int | ||
340 | mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | ||
341 | { | ||
342 | struct mv_xor_desc_slot *iter, *_iter; | ||
343 | |||
344 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); | ||
345 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | ||
346 | completed_node) { | ||
347 | |||
348 | if (async_tx_test_ack(&iter->async_tx)) { | ||
349 | list_del(&iter->completed_node); | ||
350 | mv_xor_free_slots(mv_chan, iter); | ||
351 | } | ||
352 | } | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int | ||
357 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | ||
358 | struct mv_xor_chan *mv_chan) | ||
359 | { | ||
360 | dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", | ||
361 | __func__, __LINE__, desc, desc->async_tx.flags); | ||
362 | list_del(&desc->chain_node); | ||
363 | /* the client is allowed to attach dependent operations | ||
364 | * until 'ack' is set | ||
365 | */ | ||
366 | if (!async_tx_test_ack(&desc->async_tx)) { | ||
367 | /* move this slot to the completed_slots */ | ||
368 | list_add_tail(&desc->completed_node, &mv_chan->completed_slots); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | mv_xor_free_slots(mv_chan, desc); | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | ||
377 | { | ||
378 | struct mv_xor_desc_slot *iter, *_iter; | ||
379 | dma_cookie_t cookie = 0; | ||
380 | int busy = mv_chan_is_busy(mv_chan); | ||
381 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | ||
382 | int seen_current = 0; | ||
383 | |||
384 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); | ||
385 | dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); | ||
386 | mv_xor_clean_completed_slots(mv_chan); | ||
387 | |||
388 | /* free completed slots from the chain starting with | ||
389 | * the oldest descriptor | ||
390 | */ | ||
391 | |||
392 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | ||
393 | chain_node) { | ||
394 | prefetch(_iter); | ||
395 | prefetch(&_iter->async_tx); | ||
396 | |||
397 | /* do not advance past the current descriptor loaded into the | ||
398 | * hardware channel, subsequent descriptors are either in | ||
399 | * process or have not been submitted | ||
400 | */ | ||
401 | if (seen_current) | ||
402 | break; | ||
403 | |||
404 | /* stop the search if we reach the current descriptor and the | ||
405 | * channel is busy | ||
406 | */ | ||
407 | if (iter->async_tx.phys == current_desc) { | ||
408 | seen_current = 1; | ||
409 | if (busy) | ||
410 | break; | ||
411 | } | ||
412 | |||
413 | cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); | ||
414 | |||
415 | if (mv_xor_clean_slot(iter, mv_chan)) | ||
416 | break; | ||
417 | } | ||
418 | |||
419 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | ||
420 | struct mv_xor_desc_slot *chain_head; | ||
421 | chain_head = list_entry(mv_chan->chain.next, | ||
422 | struct mv_xor_desc_slot, | ||
423 | chain_node); | ||
424 | |||
425 | mv_xor_start_new_chain(mv_chan, chain_head); | ||
426 | } | ||
427 | |||
428 | if (cookie > 0) | ||
429 | mv_chan->completed_cookie = cookie; | ||
430 | } | ||
431 | |||
432 | static void | ||
433 | mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | ||
434 | { | ||
435 | spin_lock_bh(&mv_chan->lock); | ||
436 | __mv_xor_slot_cleanup(mv_chan); | ||
437 | spin_unlock_bh(&mv_chan->lock); | ||
438 | } | ||
439 | |||
440 | static void mv_xor_tasklet(unsigned long data) | ||
441 | { | ||
442 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | ||
443 | __mv_xor_slot_cleanup(chan); | ||
444 | } | ||
445 | |||
446 | static struct mv_xor_desc_slot * | ||
447 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | ||
448 | int slots_per_op) | ||
449 | { | ||
450 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | ||
451 | LIST_HEAD(chain); | ||
452 | int slots_found, retry = 0; | ||
453 | |||
454 | /* start search from the last allocated descrtiptor | ||
455 | * if a contiguous allocation can not be found start searching | ||
456 | * from the beginning of the list | ||
457 | */ | ||
458 | retry: | ||
459 | slots_found = 0; | ||
460 | if (retry == 0) | ||
461 | iter = mv_chan->last_used; | ||
462 | else | ||
463 | iter = list_entry(&mv_chan->all_slots, | ||
464 | struct mv_xor_desc_slot, | ||
465 | slot_node); | ||
466 | |||
467 | list_for_each_entry_safe_continue( | ||
468 | iter, _iter, &mv_chan->all_slots, slot_node) { | ||
469 | prefetch(_iter); | ||
470 | prefetch(&_iter->async_tx); | ||
471 | if (iter->slots_per_op) { | ||
472 | /* give up after finding the first busy slot | ||
473 | * on the second pass through the list | ||
474 | */ | ||
475 | if (retry) | ||
476 | break; | ||
477 | |||
478 | slots_found = 0; | ||
479 | continue; | ||
480 | } | ||
481 | |||
482 | /* start the allocation if the slot is correctly aligned */ | ||
483 | if (!slots_found++) | ||
484 | alloc_start = iter; | ||
485 | |||
486 | if (slots_found == num_slots) { | ||
487 | struct mv_xor_desc_slot *alloc_tail = NULL; | ||
488 | struct mv_xor_desc_slot *last_used = NULL; | ||
489 | iter = alloc_start; | ||
490 | while (num_slots) { | ||
491 | int i; | ||
492 | |||
493 | /* pre-ack all but the last descriptor */ | ||
494 | async_tx_ack(&iter->async_tx); | ||
495 | |||
496 | list_add_tail(&iter->chain_node, &chain); | ||
497 | alloc_tail = iter; | ||
498 | iter->async_tx.cookie = 0; | ||
499 | iter->slot_cnt = num_slots; | ||
500 | iter->xor_check_result = NULL; | ||
501 | for (i = 0; i < slots_per_op; i++) { | ||
502 | iter->slots_per_op = slots_per_op - i; | ||
503 | last_used = iter; | ||
504 | iter = list_entry(iter->slot_node.next, | ||
505 | struct mv_xor_desc_slot, | ||
506 | slot_node); | ||
507 | } | ||
508 | num_slots -= slots_per_op; | ||
509 | } | ||
510 | alloc_tail->group_head = alloc_start; | ||
511 | alloc_tail->async_tx.cookie = -EBUSY; | ||
512 | list_splice(&chain, &alloc_tail->async_tx.tx_list); | ||
513 | mv_chan->last_used = last_used; | ||
514 | mv_desc_clear_next_desc(alloc_start); | ||
515 | mv_desc_clear_next_desc(alloc_tail); | ||
516 | return alloc_tail; | ||
517 | } | ||
518 | } | ||
519 | if (!retry++) | ||
520 | goto retry; | ||
521 | |||
522 | /* try to free some slots if the allocation fails */ | ||
523 | tasklet_schedule(&mv_chan->irq_tasklet); | ||
524 | |||
525 | return NULL; | ||
526 | } | ||
527 | |||
528 | static dma_cookie_t | ||
529 | mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, | ||
530 | struct mv_xor_desc_slot *desc) | ||
531 | { | ||
532 | dma_cookie_t cookie = mv_chan->common.cookie; | ||
533 | |||
534 | if (++cookie < 0) | ||
535 | cookie = 1; | ||
536 | mv_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
537 | return cookie; | ||
538 | } | ||
539 | |||
540 | /************************ DMA engine API functions ****************************/ | ||
541 | static dma_cookie_t | ||
542 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | ||
543 | { | ||
544 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | ||
545 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | ||
546 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | ||
547 | dma_cookie_t cookie; | ||
548 | int new_hw_chain = 1; | ||
549 | |||
550 | dev_dbg(mv_chan->device->common.dev, | ||
551 | "%s sw_desc %p: async_tx %p\n", | ||
552 | __func__, sw_desc, &sw_desc->async_tx); | ||
553 | |||
554 | grp_start = sw_desc->group_head; | ||
555 | |||
556 | spin_lock_bh(&mv_chan->lock); | ||
557 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); | ||
558 | |||
559 | if (list_empty(&mv_chan->chain)) | ||
560 | list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); | ||
561 | else { | ||
562 | new_hw_chain = 0; | ||
563 | |||
564 | old_chain_tail = list_entry(mv_chan->chain.prev, | ||
565 | struct mv_xor_desc_slot, | ||
566 | chain_node); | ||
567 | list_splice_init(&grp_start->async_tx.tx_list, | ||
568 | &old_chain_tail->chain_node); | ||
569 | |||
570 | if (!mv_can_chain(grp_start)) | ||
571 | goto submit_done; | ||
572 | |||
573 | dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", | ||
574 | old_chain_tail->async_tx.phys); | ||
575 | |||
576 | /* fix up the hardware chain */ | ||
577 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | ||
578 | |||
579 | /* if the channel is not busy */ | ||
580 | if (!mv_chan_is_busy(mv_chan)) { | ||
581 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | ||
582 | /* | ||
583 | * and the curren desc is the end of the chain before | ||
584 | * the append, then we need to start the channel | ||
585 | */ | ||
586 | if (current_desc == old_chain_tail->async_tx.phys) | ||
587 | new_hw_chain = 1; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | if (new_hw_chain) | ||
592 | mv_xor_start_new_chain(mv_chan, grp_start); | ||
593 | |||
594 | submit_done: | ||
595 | spin_unlock_bh(&mv_chan->lock); | ||
596 | |||
597 | return cookie; | ||
598 | } | ||
599 | |||
600 | /* returns the number of allocated descriptors */ | ||
601 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan, | ||
602 | struct dma_client *client) | ||
603 | { | ||
604 | char *hw_desc; | ||
605 | int idx; | ||
606 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
607 | struct mv_xor_desc_slot *slot = NULL; | ||
608 | struct mv_xor_platform_data *plat_data = | ||
609 | mv_chan->device->pdev->dev.platform_data; | ||
610 | int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; | ||
611 | |||
612 | /* Allocate descriptor slots */ | ||
613 | idx = mv_chan->slots_allocated; | ||
614 | while (idx < num_descs_in_pool) { | ||
615 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | ||
616 | if (!slot) { | ||
617 | printk(KERN_INFO "MV XOR Channel only initialized" | ||
618 | " %d descriptor slots", idx); | ||
619 | break; | ||
620 | } | ||
621 | hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; | ||
622 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | ||
623 | |||
624 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | ||
625 | slot->async_tx.tx_submit = mv_xor_tx_submit; | ||
626 | INIT_LIST_HEAD(&slot->chain_node); | ||
627 | INIT_LIST_HEAD(&slot->slot_node); | ||
628 | INIT_LIST_HEAD(&slot->async_tx.tx_list); | ||
629 | hw_desc = (char *) mv_chan->device->dma_desc_pool; | ||
630 | slot->async_tx.phys = | ||
631 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | ||
632 | slot->idx = idx++; | ||
633 | |||
634 | spin_lock_bh(&mv_chan->lock); | ||
635 | mv_chan->slots_allocated = idx; | ||
636 | list_add_tail(&slot->slot_node, &mv_chan->all_slots); | ||
637 | spin_unlock_bh(&mv_chan->lock); | ||
638 | } | ||
639 | |||
640 | if (mv_chan->slots_allocated && !mv_chan->last_used) | ||
641 | mv_chan->last_used = list_entry(mv_chan->all_slots.next, | ||
642 | struct mv_xor_desc_slot, | ||
643 | slot_node); | ||
644 | |||
645 | dev_dbg(mv_chan->device->common.dev, | ||
646 | "allocated %d descriptor slots last_used: %p\n", | ||
647 | mv_chan->slots_allocated, mv_chan->last_used); | ||
648 | |||
649 | return mv_chan->slots_allocated ? : -ENOMEM; | ||
650 | } | ||
651 | |||
652 | static struct dma_async_tx_descriptor * | ||
653 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
654 | size_t len, unsigned long flags) | ||
655 | { | ||
656 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
657 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
658 | int slot_cnt; | ||
659 | |||
660 | dev_dbg(mv_chan->device->common.dev, | ||
661 | "%s dest: %x src %x len: %u flags: %ld\n", | ||
662 | __func__, dest, src, len, flags); | ||
663 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
664 | return NULL; | ||
665 | |||
666 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | ||
667 | |||
668 | spin_lock_bh(&mv_chan->lock); | ||
669 | slot_cnt = mv_chan_memcpy_slot_count(len); | ||
670 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
671 | if (sw_desc) { | ||
672 | sw_desc->type = DMA_MEMCPY; | ||
673 | sw_desc->async_tx.flags = flags; | ||
674 | grp_start = sw_desc->group_head; | ||
675 | mv_desc_init(grp_start, flags); | ||
676 | mv_desc_set_byte_count(grp_start, len); | ||
677 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
678 | mv_desc_set_src_addr(grp_start, 0, src); | ||
679 | sw_desc->unmap_src_cnt = 1; | ||
680 | sw_desc->unmap_len = len; | ||
681 | } | ||
682 | spin_unlock_bh(&mv_chan->lock); | ||
683 | |||
684 | dev_dbg(mv_chan->device->common.dev, | ||
685 | "%s sw_desc %p async_tx %p\n", | ||
686 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); | ||
687 | |||
688 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
689 | } | ||
690 | |||
691 | static struct dma_async_tx_descriptor * | ||
692 | mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | ||
693 | size_t len, unsigned long flags) | ||
694 | { | ||
695 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
696 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
697 | int slot_cnt; | ||
698 | |||
699 | dev_dbg(mv_chan->device->common.dev, | ||
700 | "%s dest: %x len: %u flags: %ld\n", | ||
701 | __func__, dest, len, flags); | ||
702 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
703 | return NULL; | ||
704 | |||
705 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | ||
706 | |||
707 | spin_lock_bh(&mv_chan->lock); | ||
708 | slot_cnt = mv_chan_memset_slot_count(len); | ||
709 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
710 | if (sw_desc) { | ||
711 | sw_desc->type = DMA_MEMSET; | ||
712 | sw_desc->async_tx.flags = flags; | ||
713 | grp_start = sw_desc->group_head; | ||
714 | mv_desc_init(grp_start, flags); | ||
715 | mv_desc_set_byte_count(grp_start, len); | ||
716 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
717 | mv_desc_set_block_fill_val(grp_start, value); | ||
718 | sw_desc->unmap_src_cnt = 1; | ||
719 | sw_desc->unmap_len = len; | ||
720 | } | ||
721 | spin_unlock_bh(&mv_chan->lock); | ||
722 | dev_dbg(mv_chan->device->common.dev, | ||
723 | "%s sw_desc %p async_tx %p \n", | ||
724 | __func__, sw_desc, &sw_desc->async_tx); | ||
725 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
726 | } | ||
727 | |||
728 | static struct dma_async_tx_descriptor * | ||
729 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
730 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
731 | { | ||
732 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
733 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
734 | int slot_cnt; | ||
735 | |||
736 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
737 | return NULL; | ||
738 | |||
739 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | ||
740 | |||
741 | dev_dbg(mv_chan->device->common.dev, | ||
742 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | ||
743 | __func__, src_cnt, len, dest, flags); | ||
744 | |||
745 | spin_lock_bh(&mv_chan->lock); | ||
746 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | ||
747 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
748 | if (sw_desc) { | ||
749 | sw_desc->type = DMA_XOR; | ||
750 | sw_desc->async_tx.flags = flags; | ||
751 | grp_start = sw_desc->group_head; | ||
752 | mv_desc_init(grp_start, flags); | ||
753 | /* the byte count field is the same as in memcpy desc*/ | ||
754 | mv_desc_set_byte_count(grp_start, len); | ||
755 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
756 | sw_desc->unmap_src_cnt = src_cnt; | ||
757 | sw_desc->unmap_len = len; | ||
758 | while (src_cnt--) | ||
759 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | ||
760 | } | ||
761 | spin_unlock_bh(&mv_chan->lock); | ||
762 | dev_dbg(mv_chan->device->common.dev, | ||
763 | "%s sw_desc %p async_tx %p \n", | ||
764 | __func__, sw_desc, &sw_desc->async_tx); | ||
765 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
766 | } | ||
767 | |||
768 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | ||
769 | { | ||
770 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
771 | struct mv_xor_desc_slot *iter, *_iter; | ||
772 | int in_use_descs = 0; | ||
773 | |||
774 | mv_xor_slot_cleanup(mv_chan); | ||
775 | |||
776 | spin_lock_bh(&mv_chan->lock); | ||
777 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | ||
778 | chain_node) { | ||
779 | in_use_descs++; | ||
780 | list_del(&iter->chain_node); | ||
781 | } | ||
782 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | ||
783 | completed_node) { | ||
784 | in_use_descs++; | ||
785 | list_del(&iter->completed_node); | ||
786 | } | ||
787 | list_for_each_entry_safe_reverse( | ||
788 | iter, _iter, &mv_chan->all_slots, slot_node) { | ||
789 | list_del(&iter->slot_node); | ||
790 | kfree(iter); | ||
791 | mv_chan->slots_allocated--; | ||
792 | } | ||
793 | mv_chan->last_used = NULL; | ||
794 | |||
795 | dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", | ||
796 | __func__, mv_chan->slots_allocated); | ||
797 | spin_unlock_bh(&mv_chan->lock); | ||
798 | |||
799 | if (in_use_descs) | ||
800 | dev_err(mv_chan->device->common.dev, | ||
801 | "freeing %d in use descriptors!\n", in_use_descs); | ||
802 | } | ||
803 | |||
804 | /** | ||
805 | * mv_xor_is_complete - poll the status of an XOR transaction | ||
806 | * @chan: XOR channel handle | ||
807 | * @cookie: XOR transaction identifier | ||
808 | */ | ||
809 | static enum dma_status mv_xor_is_complete(struct dma_chan *chan, | ||
810 | dma_cookie_t cookie, | ||
811 | dma_cookie_t *done, | ||
812 | dma_cookie_t *used) | ||
813 | { | ||
814 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
815 | dma_cookie_t last_used; | ||
816 | dma_cookie_t last_complete; | ||
817 | enum dma_status ret; | ||
818 | |||
819 | last_used = chan->cookie; | ||
820 | last_complete = mv_chan->completed_cookie; | ||
821 | mv_chan->is_complete_cookie = cookie; | ||
822 | if (done) | ||
823 | *done = last_complete; | ||
824 | if (used) | ||
825 | *used = last_used; | ||
826 | |||
827 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
828 | if (ret == DMA_SUCCESS) { | ||
829 | mv_xor_clean_completed_slots(mv_chan); | ||
830 | return ret; | ||
831 | } | ||
832 | mv_xor_slot_cleanup(mv_chan); | ||
833 | |||
834 | last_used = chan->cookie; | ||
835 | last_complete = mv_chan->completed_cookie; | ||
836 | |||
837 | if (done) | ||
838 | *done = last_complete; | ||
839 | if (used) | ||
840 | *used = last_used; | ||
841 | |||
842 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
843 | } | ||
844 | |||
845 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | ||
846 | { | ||
847 | u32 val; | ||
848 | |||
849 | val = __raw_readl(XOR_CONFIG(chan)); | ||
850 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
851 | "config 0x%08x.\n", val); | ||
852 | |||
853 | val = __raw_readl(XOR_ACTIVATION(chan)); | ||
854 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
855 | "activation 0x%08x.\n", val); | ||
856 | |||
857 | val = __raw_readl(XOR_INTR_CAUSE(chan)); | ||
858 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
859 | "intr cause 0x%08x.\n", val); | ||
860 | |||
861 | val = __raw_readl(XOR_INTR_MASK(chan)); | ||
862 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
863 | "intr mask 0x%08x.\n", val); | ||
864 | |||
865 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); | ||
866 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
867 | "error cause 0x%08x.\n", val); | ||
868 | |||
869 | val = __raw_readl(XOR_ERROR_ADDR(chan)); | ||
870 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
871 | "error addr 0x%08x.\n", val); | ||
872 | } | ||
873 | |||
874 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | ||
875 | u32 intr_cause) | ||
876 | { | ||
877 | if (intr_cause & (1 << 4)) { | ||
878 | dev_dbg(chan->device->common.dev, | ||
879 | "ignore this error\n"); | ||
880 | return; | ||
881 | } | ||
882 | |||
883 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
884 | "error on chan %d. intr cause 0x%08x.\n", | ||
885 | chan->idx, intr_cause); | ||
886 | |||
887 | mv_dump_xor_regs(chan); | ||
888 | BUG(); | ||
889 | } | ||
890 | |||
891 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | ||
892 | { | ||
893 | struct mv_xor_chan *chan = data; | ||
894 | u32 intr_cause = mv_chan_get_intr_cause(chan); | ||
895 | |||
896 | dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); | ||
897 | |||
898 | if (mv_is_err_intr(intr_cause)) | ||
899 | mv_xor_err_interrupt_handler(chan, intr_cause); | ||
900 | |||
901 | tasklet_schedule(&chan->irq_tasklet); | ||
902 | |||
903 | mv_xor_device_clear_eoc_cause(chan); | ||
904 | |||
905 | return IRQ_HANDLED; | ||
906 | } | ||
907 | |||
908 | static void mv_xor_issue_pending(struct dma_chan *chan) | ||
909 | { | ||
910 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
911 | |||
912 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | ||
913 | mv_chan->pending = 0; | ||
914 | mv_chan_activate(mv_chan); | ||
915 | } | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * Perform a transaction to verify the HW works. | ||
920 | */ | ||
921 | #define MV_XOR_TEST_SIZE 2000 | ||
922 | |||
923 | static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) | ||
924 | { | ||
925 | int i; | ||
926 | void *src, *dest; | ||
927 | dma_addr_t src_dma, dest_dma; | ||
928 | struct dma_chan *dma_chan; | ||
929 | dma_cookie_t cookie; | ||
930 | struct dma_async_tx_descriptor *tx; | ||
931 | int err = 0; | ||
932 | struct mv_xor_chan *mv_chan; | ||
933 | |||
934 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | ||
935 | if (!src) | ||
936 | return -ENOMEM; | ||
937 | |||
938 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | ||
939 | if (!dest) { | ||
940 | kfree(src); | ||
941 | return -ENOMEM; | ||
942 | } | ||
943 | |||
944 | /* Fill in src buffer */ | ||
945 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | ||
946 | ((u8 *) src)[i] = (u8)i; | ||
947 | |||
948 | /* Start copy, using first DMA channel */ | ||
949 | dma_chan = container_of(device->common.channels.next, | ||
950 | struct dma_chan, | ||
951 | device_node); | ||
952 | if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { | ||
953 | err = -ENODEV; | ||
954 | goto out; | ||
955 | } | ||
956 | |||
957 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | ||
958 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | ||
959 | |||
960 | src_dma = dma_map_single(dma_chan->device->dev, src, | ||
961 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | ||
962 | |||
963 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | ||
964 | MV_XOR_TEST_SIZE, 0); | ||
965 | cookie = mv_xor_tx_submit(tx); | ||
966 | mv_xor_issue_pending(dma_chan); | ||
967 | async_tx_ack(tx); | ||
968 | msleep(1); | ||
969 | |||
970 | if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != | ||
971 | DMA_SUCCESS) { | ||
972 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
973 | "Self-test copy timed out, disabling\n"); | ||
974 | err = -ENODEV; | ||
975 | goto free_resources; | ||
976 | } | ||
977 | |||
978 | mv_chan = to_mv_xor_chan(dma_chan); | ||
979 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | ||
980 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | ||
981 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | ||
982 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
983 | "Self-test copy failed compare, disabling\n"); | ||
984 | err = -ENODEV; | ||
985 | goto free_resources; | ||
986 | } | ||
987 | |||
988 | free_resources: | ||
989 | mv_xor_free_chan_resources(dma_chan); | ||
990 | out: | ||
991 | kfree(src); | ||
992 | kfree(dest); | ||
993 | return err; | ||
994 | } | ||
995 | |||
996 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | ||
997 | static int __devinit | ||
998 | mv_xor_xor_self_test(struct mv_xor_device *device) | ||
999 | { | ||
1000 | int i, src_idx; | ||
1001 | struct page *dest; | ||
1002 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | ||
1003 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | ||
1004 | dma_addr_t dest_dma; | ||
1005 | struct dma_async_tx_descriptor *tx; | ||
1006 | struct dma_chan *dma_chan; | ||
1007 | dma_cookie_t cookie; | ||
1008 | u8 cmp_byte = 0; | ||
1009 | u32 cmp_word; | ||
1010 | int err = 0; | ||
1011 | struct mv_xor_chan *mv_chan; | ||
1012 | |||
1013 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | ||
1014 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | ||
1015 | if (!xor_srcs[src_idx]) | ||
1016 | while (src_idx--) { | ||
1017 | __free_page(xor_srcs[src_idx]); | ||
1018 | return -ENOMEM; | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | dest = alloc_page(GFP_KERNEL); | ||
1023 | if (!dest) | ||
1024 | while (src_idx--) { | ||
1025 | __free_page(xor_srcs[src_idx]); | ||
1026 | return -ENOMEM; | ||
1027 | } | ||
1028 | |||
1029 | /* Fill in src buffers */ | ||
1030 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | ||
1031 | u8 *ptr = page_address(xor_srcs[src_idx]); | ||
1032 | for (i = 0; i < PAGE_SIZE; i++) | ||
1033 | ptr[i] = (1 << src_idx); | ||
1034 | } | ||
1035 | |||
1036 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | ||
1037 | cmp_byte ^= (u8) (1 << src_idx); | ||
1038 | |||
1039 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | ||
1040 | (cmp_byte << 8) | cmp_byte; | ||
1041 | |||
1042 | memset(page_address(dest), 0, PAGE_SIZE); | ||
1043 | |||
1044 | dma_chan = container_of(device->common.channels.next, | ||
1045 | struct dma_chan, | ||
1046 | device_node); | ||
1047 | if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { | ||
1048 | err = -ENODEV; | ||
1049 | goto out; | ||
1050 | } | ||
1051 | |||
1052 | /* test xor */ | ||
1053 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | ||
1054 | DMA_FROM_DEVICE); | ||
1055 | |||
1056 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | ||
1057 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | ||
1058 | 0, PAGE_SIZE, DMA_TO_DEVICE); | ||
1059 | |||
1060 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | ||
1061 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | ||
1062 | |||
1063 | cookie = mv_xor_tx_submit(tx); | ||
1064 | mv_xor_issue_pending(dma_chan); | ||
1065 | async_tx_ack(tx); | ||
1066 | msleep(8); | ||
1067 | |||
1068 | if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1069 | DMA_SUCCESS) { | ||
1070 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
1071 | "Self-test xor timed out, disabling\n"); | ||
1072 | err = -ENODEV; | ||
1073 | goto free_resources; | ||
1074 | } | ||
1075 | |||
1076 | mv_chan = to_mv_xor_chan(dma_chan); | ||
1077 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | ||
1078 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1079 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | ||
1080 | u32 *ptr = page_address(dest); | ||
1081 | if (ptr[i] != cmp_word) { | ||
1082 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
1083 | "Self-test xor failed compare, disabling." | ||
1084 | " index %d, data %x, expected %x\n", i, | ||
1085 | ptr[i], cmp_word); | ||
1086 | err = -ENODEV; | ||
1087 | goto free_resources; | ||
1088 | } | ||
1089 | } | ||
1090 | |||
1091 | free_resources: | ||
1092 | mv_xor_free_chan_resources(dma_chan); | ||
1093 | out: | ||
1094 | src_idx = MV_XOR_NUM_SRC_TEST; | ||
1095 | while (src_idx--) | ||
1096 | __free_page(xor_srcs[src_idx]); | ||
1097 | __free_page(dest); | ||
1098 | return err; | ||
1099 | } | ||
1100 | |||
1101 | static int __devexit mv_xor_remove(struct platform_device *dev) | ||
1102 | { | ||
1103 | struct mv_xor_device *device = platform_get_drvdata(dev); | ||
1104 | struct dma_chan *chan, *_chan; | ||
1105 | struct mv_xor_chan *mv_chan; | ||
1106 | struct mv_xor_platform_data *plat_data = dev->dev.platform_data; | ||
1107 | |||
1108 | dma_async_device_unregister(&device->common); | ||
1109 | |||
1110 | dma_free_coherent(&dev->dev, plat_data->pool_size, | ||
1111 | device->dma_desc_pool_virt, device->dma_desc_pool); | ||
1112 | |||
1113 | list_for_each_entry_safe(chan, _chan, &device->common.channels, | ||
1114 | device_node) { | ||
1115 | mv_chan = to_mv_xor_chan(chan); | ||
1116 | list_del(&chan->device_node); | ||
1117 | } | ||
1118 | |||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | static int __devinit mv_xor_probe(struct platform_device *pdev) | ||
1123 | { | ||
1124 | int ret = 0; | ||
1125 | int irq; | ||
1126 | struct mv_xor_device *adev; | ||
1127 | struct mv_xor_chan *mv_chan; | ||
1128 | struct dma_device *dma_dev; | ||
1129 | struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; | ||
1130 | |||
1131 | |||
1132 | adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); | ||
1133 | if (!adev) | ||
1134 | return -ENOMEM; | ||
1135 | |||
1136 | dma_dev = &adev->common; | ||
1137 | |||
1138 | /* allocate coherent memory for hardware descriptors | ||
1139 | * note: writecombine gives slightly better performance, but | ||
1140 | * requires that we explicitly flush the writes | ||
1141 | */ | ||
1142 | adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, | ||
1143 | plat_data->pool_size, | ||
1144 | &adev->dma_desc_pool, | ||
1145 | GFP_KERNEL); | ||
1146 | if (!adev->dma_desc_pool_virt) | ||
1147 | return -ENOMEM; | ||
1148 | |||
1149 | adev->id = plat_data->hw_id; | ||
1150 | |||
1151 | /* discover transaction capabilites from the platform data */ | ||
1152 | dma_dev->cap_mask = plat_data->cap_mask; | ||
1153 | adev->pdev = pdev; | ||
1154 | platform_set_drvdata(pdev, adev); | ||
1155 | |||
1156 | adev->shared = platform_get_drvdata(plat_data->shared); | ||
1157 | |||
1158 | INIT_LIST_HEAD(&dma_dev->channels); | ||
1159 | |||
1160 | /* set base routines */ | ||
1161 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | ||
1162 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | ||
1163 | dma_dev->device_is_tx_complete = mv_xor_is_complete; | ||
1164 | dma_dev->device_issue_pending = mv_xor_issue_pending; | ||
1165 | dma_dev->dev = &pdev->dev; | ||
1166 | |||
1167 | /* set prep routines based on capability */ | ||
1168 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | ||
1169 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | ||
1170 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1171 | dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; | ||
1172 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||
1173 | dma_dev->max_xor = 8; ; | ||
1174 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | ||
1175 | } | ||
1176 | |||
1177 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); | ||
1178 | if (!mv_chan) { | ||
1179 | ret = -ENOMEM; | ||
1180 | goto err_free_dma; | ||
1181 | } | ||
1182 | mv_chan->device = adev; | ||
1183 | mv_chan->idx = plat_data->hw_id; | ||
1184 | mv_chan->mmr_base = adev->shared->xor_base; | ||
1185 | |||
1186 | if (!mv_chan->mmr_base) { | ||
1187 | ret = -ENOMEM; | ||
1188 | goto err_free_dma; | ||
1189 | } | ||
1190 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) | ||
1191 | mv_chan); | ||
1192 | |||
1193 | /* clear errors before enabling interrupts */ | ||
1194 | mv_xor_device_clear_err_status(mv_chan); | ||
1195 | |||
1196 | irq = platform_get_irq(pdev, 0); | ||
1197 | if (irq < 0) { | ||
1198 | ret = irq; | ||
1199 | goto err_free_dma; | ||
1200 | } | ||
1201 | ret = devm_request_irq(&pdev->dev, irq, | ||
1202 | mv_xor_interrupt_handler, | ||
1203 | 0, dev_name(&pdev->dev), mv_chan); | ||
1204 | if (ret) | ||
1205 | goto err_free_dma; | ||
1206 | |||
1207 | mv_chan_unmask_interrupts(mv_chan); | ||
1208 | |||
1209 | mv_set_mode(mv_chan, DMA_MEMCPY); | ||
1210 | |||
1211 | spin_lock_init(&mv_chan->lock); | ||
1212 | INIT_LIST_HEAD(&mv_chan->chain); | ||
1213 | INIT_LIST_HEAD(&mv_chan->completed_slots); | ||
1214 | INIT_LIST_HEAD(&mv_chan->all_slots); | ||
1215 | INIT_RCU_HEAD(&mv_chan->common.rcu); | ||
1216 | mv_chan->common.device = dma_dev; | ||
1217 | |||
1218 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | ||
1219 | |||
1220 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | ||
1221 | ret = mv_xor_memcpy_self_test(adev); | ||
1222 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); | ||
1223 | if (ret) | ||
1224 | goto err_free_dma; | ||
1225 | } | ||
1226 | |||
1227 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||
1228 | ret = mv_xor_xor_self_test(adev); | ||
1229 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | ||
1230 | if (ret) | ||
1231 | goto err_free_dma; | ||
1232 | } | ||
1233 | |||
1234 | dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " | ||
1235 | "( %s%s%s%s)\n", | ||
1236 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | ||
1237 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1238 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | ||
1239 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | ||
1240 | |||
1241 | dma_async_device_register(dma_dev); | ||
1242 | goto out; | ||
1243 | |||
1244 | err_free_dma: | ||
1245 | dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, | ||
1246 | adev->dma_desc_pool_virt, adev->dma_desc_pool); | ||
1247 | out: | ||
1248 | return ret; | ||
1249 | } | ||
1250 | |||
1251 | static void | ||
1252 | mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, | ||
1253 | struct mbus_dram_target_info *dram) | ||
1254 | { | ||
1255 | void __iomem *base = msp->xor_base; | ||
1256 | u32 win_enable = 0; | ||
1257 | int i; | ||
1258 | |||
1259 | for (i = 0; i < 8; i++) { | ||
1260 | writel(0, base + WINDOW_BASE(i)); | ||
1261 | writel(0, base + WINDOW_SIZE(i)); | ||
1262 | if (i < 4) | ||
1263 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
1264 | } | ||
1265 | |||
1266 | for (i = 0; i < dram->num_cs; i++) { | ||
1267 | struct mbus_dram_window *cs = dram->cs + i; | ||
1268 | |||
1269 | writel((cs->base & 0xffff0000) | | ||
1270 | (cs->mbus_attr << 8) | | ||
1271 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | ||
1272 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | ||
1273 | |||
1274 | win_enable |= (1 << i); | ||
1275 | win_enable |= 3 << (16 + (2 * i)); | ||
1276 | } | ||
1277 | |||
1278 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | ||
1279 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | ||
1280 | } | ||
1281 | |||
1282 | static struct platform_driver mv_xor_driver = { | ||
1283 | .probe = mv_xor_probe, | ||
1284 | .remove = mv_xor_remove, | ||
1285 | .driver = { | ||
1286 | .owner = THIS_MODULE, | ||
1287 | .name = MV_XOR_NAME, | ||
1288 | }, | ||
1289 | }; | ||
1290 | |||
1291 | static int mv_xor_shared_probe(struct platform_device *pdev) | ||
1292 | { | ||
1293 | struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; | ||
1294 | struct mv_xor_shared_private *msp; | ||
1295 | struct resource *res; | ||
1296 | |||
1297 | dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); | ||
1298 | |||
1299 | msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); | ||
1300 | if (!msp) | ||
1301 | return -ENOMEM; | ||
1302 | |||
1303 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1304 | if (!res) | ||
1305 | return -ENODEV; | ||
1306 | |||
1307 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, | ||
1308 | res->end - res->start + 1); | ||
1309 | if (!msp->xor_base) | ||
1310 | return -EBUSY; | ||
1311 | |||
1312 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1313 | if (!res) | ||
1314 | return -ENODEV; | ||
1315 | |||
1316 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, | ||
1317 | res->end - res->start + 1); | ||
1318 | if (!msp->xor_high_base) | ||
1319 | return -EBUSY; | ||
1320 | |||
1321 | platform_set_drvdata(pdev, msp); | ||
1322 | |||
1323 | /* | ||
1324 | * (Re-)program MBUS remapping windows if we are asked to. | ||
1325 | */ | ||
1326 | if (msd != NULL && msd->dram != NULL) | ||
1327 | mv_xor_conf_mbus_windows(msp, msd->dram); | ||
1328 | |||
1329 | return 0; | ||
1330 | } | ||
1331 | |||
1332 | static int mv_xor_shared_remove(struct platform_device *pdev) | ||
1333 | { | ||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | static struct platform_driver mv_xor_shared_driver = { | ||
1338 | .probe = mv_xor_shared_probe, | ||
1339 | .remove = mv_xor_shared_remove, | ||
1340 | .driver = { | ||
1341 | .owner = THIS_MODULE, | ||
1342 | .name = MV_XOR_SHARED_NAME, | ||
1343 | }, | ||
1344 | }; | ||
1345 | |||
1346 | |||
1347 | static int __init mv_xor_init(void) | ||
1348 | { | ||
1349 | int rc; | ||
1350 | |||
1351 | rc = platform_driver_register(&mv_xor_shared_driver); | ||
1352 | if (!rc) { | ||
1353 | rc = platform_driver_register(&mv_xor_driver); | ||
1354 | if (rc) | ||
1355 | platform_driver_unregister(&mv_xor_shared_driver); | ||
1356 | } | ||
1357 | return rc; | ||
1358 | } | ||
1359 | module_init(mv_xor_init); | ||
1360 | |||
1361 | /* it's currently unsafe to unload this module */ | ||
1362 | #if 0 | ||
1363 | static void __exit mv_xor_exit(void) | ||
1364 | { | ||
1365 | platform_driver_unregister(&mv_xor_driver); | ||
1366 | platform_driver_unregister(&mv_xor_shared_driver); | ||
1367 | return; | ||
1368 | } | ||
1369 | |||
1370 | module_exit(mv_xor_exit); | ||
1371 | #endif | ||
1372 | |||
1373 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | ||
1374 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | ||
1375 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h new file mode 100644 index 000000000000..06cafe1ef521 --- /dev/null +++ b/drivers/dma/mv_xor.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007, 2008, Marvell International Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software Foundation, | ||
15 | * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | */ | ||
17 | |||
18 | #ifndef MV_XOR_H | ||
19 | #define MV_XOR_H | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | |||
26 | #define USE_TIMER | ||
27 | #define MV_XOR_SLOT_SIZE 64 | ||
28 | #define MV_XOR_THRESHOLD 1 | ||
29 | |||
30 | #define XOR_OPERATION_MODE_XOR 0 | ||
31 | #define XOR_OPERATION_MODE_MEMCPY 2 | ||
32 | #define XOR_OPERATION_MODE_MEMSET 4 | ||
33 | |||
34 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | ||
35 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | ||
36 | #define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) | ||
37 | #define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) | ||
38 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) | ||
39 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) | ||
40 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) | ||
41 | |||
42 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) | ||
43 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) | ||
44 | #define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30) | ||
45 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) | ||
46 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) | ||
47 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) | ||
48 | #define XOR_INTR_MASK_VALUE 0x3F5 | ||
49 | |||
50 | #define WINDOW_BASE(w) (0x250 + ((w) << 2)) | ||
51 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) | ||
52 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) | ||
53 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) | ||
54 | |||
55 | struct mv_xor_shared_private { | ||
56 | void __iomem *xor_base; | ||
57 | void __iomem *xor_high_base; | ||
58 | }; | ||
59 | |||
60 | |||
61 | /** | ||
62 | * struct mv_xor_device - internal representation of a XOR device | ||
63 | * @pdev: Platform device | ||
64 | * @id: HW XOR Device selector | ||
65 | * @dma_desc_pool: base of DMA descriptor region (DMA address) | ||
66 | * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) | ||
67 | * @common: embedded struct dma_device | ||
68 | */ | ||
69 | struct mv_xor_device { | ||
70 | struct platform_device *pdev; | ||
71 | int id; | ||
72 | dma_addr_t dma_desc_pool; | ||
73 | void *dma_desc_pool_virt; | ||
74 | struct dma_device common; | ||
75 | struct mv_xor_shared_private *shared; | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * struct mv_xor_chan - internal representation of a XOR channel | ||
80 | * @pending: allows batching of hardware operations | ||
81 | * @completed_cookie: identifier for the most recently completed operation | ||
82 | * @lock: serializes enqueue/dequeue operations to the descriptors pool | ||
83 | * @mmr_base: memory mapped register base | ||
84 | * @idx: the index of the xor channel | ||
85 | * @chain: device chain view of the descriptors | ||
86 | * @completed_slots: slots completed by HW but still need to be acked | ||
87 | * @device: parent device | ||
88 | * @common: common dmaengine channel object members | ||
89 | * @last_used: place holder for allocation to continue from where it left off | ||
90 | * @all_slots: complete domain of slots usable by the channel | ||
91 | * @slots_allocated: records the actual size of the descriptor slot pool | ||
92 | * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs | ||
93 | */ | ||
94 | struct mv_xor_chan { | ||
95 | int pending; | ||
96 | dma_cookie_t completed_cookie; | ||
97 | spinlock_t lock; /* protects the descriptor slot pool */ | ||
98 | void __iomem *mmr_base; | ||
99 | unsigned int idx; | ||
100 | enum dma_transaction_type current_type; | ||
101 | struct list_head chain; | ||
102 | struct list_head completed_slots; | ||
103 | struct mv_xor_device *device; | ||
104 | struct dma_chan common; | ||
105 | struct mv_xor_desc_slot *last_used; | ||
106 | struct list_head all_slots; | ||
107 | int slots_allocated; | ||
108 | struct tasklet_struct irq_tasklet; | ||
109 | #ifdef USE_TIMER | ||
110 | unsigned long cleanup_time; | ||
111 | u32 current_on_last_cleanup; | ||
112 | dma_cookie_t is_complete_cookie; | ||
113 | #endif | ||
114 | }; | ||
115 | |||
116 | /** | ||
117 | * struct mv_xor_desc_slot - software descriptor | ||
118 | * @slot_node: node on the mv_xor_chan.all_slots list | ||
119 | * @chain_node: node on the mv_xor_chan.chain list | ||
120 | * @completed_node: node on the mv_xor_chan.completed_slots list | ||
121 | * @hw_desc: virtual address of the hardware descriptor chain | ||
122 | * @phys: hardware address of the hardware descriptor chain | ||
123 | * @group_head: first operation in a transaction | ||
124 | * @slot_cnt: total slots used in an transaction (group of operations) | ||
125 | * @slots_per_op: number of slots per operation | ||
126 | * @idx: pool index | ||
127 | * @unmap_src_cnt: number of xor sources | ||
128 | * @unmap_len: transaction bytecount | ||
129 | * @async_tx: support for the async_tx api | ||
130 | * @group_list: list of slots that make up a multi-descriptor transaction | ||
131 | * for example transfer lengths larger than the supported hw max | ||
132 | * @xor_check_result: result of zero sum | ||
133 | * @crc32_result: result crc calculation | ||
134 | */ | ||
135 | struct mv_xor_desc_slot { | ||
136 | struct list_head slot_node; | ||
137 | struct list_head chain_node; | ||
138 | struct list_head completed_node; | ||
139 | enum dma_transaction_type type; | ||
140 | void *hw_desc; | ||
141 | struct mv_xor_desc_slot *group_head; | ||
142 | u16 slot_cnt; | ||
143 | u16 slots_per_op; | ||
144 | u16 idx; | ||
145 | u16 unmap_src_cnt; | ||
146 | u32 value; | ||
147 | size_t unmap_len; | ||
148 | struct dma_async_tx_descriptor async_tx; | ||
149 | union { | ||
150 | u32 *xor_check_result; | ||
151 | u32 *crc32_result; | ||
152 | }; | ||
153 | #ifdef USE_TIMER | ||
154 | unsigned long arrival_time; | ||
155 | struct timer_list timeout; | ||
156 | #endif | ||
157 | }; | ||
158 | |||
159 | /* This structure describes XOR descriptor size 64bytes */ | ||
160 | struct mv_xor_desc { | ||
161 | u32 status; /* descriptor execution status */ | ||
162 | u32 crc32_result; /* result of CRC-32 calculation */ | ||
163 | u32 desc_command; /* type of operation to be carried out */ | ||
164 | u32 phy_next_desc; /* next descriptor address pointer */ | ||
165 | u32 byte_count; /* size of src/dst blocks in bytes */ | ||
166 | u32 phy_dest_addr; /* destination block address */ | ||
167 | u32 phy_src_addr[8]; /* source block addresses */ | ||
168 | u32 reserved0; | ||
169 | u32 reserved1; | ||
170 | }; | ||
171 | |||
172 | #define to_mv_sw_desc(addr_hw_desc) \ | ||
173 | container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) | ||
174 | |||
175 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ | ||
176 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) | ||
177 | |||
178 | #define MV_XOR_MIN_BYTE_COUNT (128) | ||
179 | #define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1) | ||
180 | #define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT | ||
181 | |||
182 | |||
183 | #endif | ||
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 25918f7dfd0f..0b624e927a6f 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, | |||
254 | static int smi_request(struct smi_cmd *smi_cmd) | 254 | static int smi_request(struct smi_cmd *smi_cmd) |
255 | { | 255 | { |
256 | cpumask_t old_mask; | 256 | cpumask_t old_mask; |
257 | cpumask_of_cpu_ptr(new_mask, 0); | ||
257 | int ret = 0; | 258 | int ret = 0; |
258 | 259 | ||
259 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 260 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
264 | 265 | ||
265 | /* SMI requires CPU 0 */ | 266 | /* SMI requires CPU 0 */ |
266 | old_mask = current->cpus_allowed; | 267 | old_mask = current->cpus_allowed; |
267 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | 268 | set_cpus_allowed_ptr(current, new_mask); |
268 | if (smp_processor_id() != 0) { | 269 | if (smp_processor_id() != 0) { |
269 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 270 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
270 | __func__); | 271 | __func__); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f43d6d3cf2fa..426ac5add585 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -780,7 +780,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n) | |||
780 | */ | 780 | */ |
781 | static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value) | 781 | static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value) |
782 | { | 782 | { |
783 | __le64 x; | 783 | u64 x; |
784 | u64 m = (1ULL << n) - 1; | 784 | u64 m = (1ULL << n) - 1; |
785 | 785 | ||
786 | if (n > 32) | 786 | if (n > 32) |
@@ -796,10 +796,10 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3 | |||
796 | report += offset >> 3; | 796 | report += offset >> 3; |
797 | offset &= 7; | 797 | offset &= 7; |
798 | 798 | ||
799 | x = get_unaligned((__le64 *)report); | 799 | x = get_unaligned_le64(report); |
800 | x &= cpu_to_le64(~(m << offset)); | 800 | x &= ~(m << offset); |
801 | x |= cpu_to_le64(((u64) value) << offset); | 801 | x |= ((u64)value) << offset; |
802 | put_unaligned(x, (__le64 *) report); | 802 | put_unaligned_le64(x, report); |
803 | } | 803 | } |
804 | 804 | ||
805 | /* | 805 | /* |
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c index 4c2052c658f1..16feea014494 100644 --- a/drivers/hid/hid-input-quirks.c +++ b/drivers/hid/hid-input-quirks.c | |||
@@ -89,6 +89,29 @@ static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_de | |||
89 | return 1; | 89 | return 1; |
90 | } | 90 | } |
91 | 91 | ||
92 | static int quirk_gyration_remote(struct hid_usage *usage, struct input_dev *input, | ||
93 | unsigned long **bit, int *max) | ||
94 | { | ||
95 | if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) | ||
96 | return 0; | ||
97 | |||
98 | set_bit(EV_REP, input->evbit); | ||
99 | switch(usage->hid & HID_USAGE) { | ||
100 | /* Reported on Gyration MCE Remote */ | ||
101 | case 0x00d: map_key_clear(KEY_HOME); break; | ||
102 | case 0x024: map_key_clear(KEY_DVD); break; | ||
103 | case 0x025: map_key_clear(KEY_PVR); break; | ||
104 | case 0x046: map_key_clear(KEY_MEDIA); break; | ||
105 | case 0x047: map_key_clear(KEY_MP3); break; | ||
106 | case 0x049: map_key_clear(KEY_CAMERA); break; | ||
107 | case 0x04a: map_key_clear(KEY_VIDEO); break; | ||
108 | |||
109 | default: | ||
110 | return 0; | ||
111 | } | ||
112 | return 1; | ||
113 | } | ||
114 | |||
92 | static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input, | 115 | static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input, |
93 | unsigned long **bit, int *max) | 116 | unsigned long **bit, int *max) |
94 | { | 117 | { |
@@ -303,6 +326,9 @@ static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *inp | |||
303 | #define VENDOR_ID_EZKEY 0x0518 | 326 | #define VENDOR_ID_EZKEY 0x0518 |
304 | #define DEVICE_ID_BTC_8193 0x0002 | 327 | #define DEVICE_ID_BTC_8193 0x0002 |
305 | 328 | ||
329 | #define VENDOR_ID_GYRATION 0x0c16 | ||
330 | #define DEVICE_ID_GYRATION_REMOTE 0x0002 | ||
331 | |||
306 | #define VENDOR_ID_LOGITECH 0x046d | 332 | #define VENDOR_ID_LOGITECH 0x046d |
307 | #define DEVICE_ID_LOGITECH_RECEIVER 0xc101 | 333 | #define DEVICE_ID_LOGITECH_RECEIVER 0xc101 |
308 | #define DEVICE_ID_S510_RECEIVER 0xc50c | 334 | #define DEVICE_ID_S510_RECEIVER 0xc50c |
@@ -337,6 +363,8 @@ static const struct hid_input_blacklist { | |||
337 | 363 | ||
338 | { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 }, | 364 | { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 }, |
339 | 365 | ||
366 | { VENDOR_ID_GYRATION, DEVICE_ID_GYRATION_REMOTE, quirk_gyration_remote }, | ||
367 | |||
340 | { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote }, | 368 | { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote }, |
341 | { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless }, | 369 | { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless }, |
342 | { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless }, | 370 | { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless }, |
@@ -438,6 +466,18 @@ int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struc | |||
438 | input_event(input, usage->type, REL_WHEEL, -value); | 466 | input_event(input, usage->type, REL_WHEEL, -value); |
439 | return 1; | 467 | return 1; |
440 | } | 468 | } |
469 | |||
470 | /* Gyration MCE remote "Sleep" key */ | ||
471 | if (hid->vendor == VENDOR_ID_GYRATION && | ||
472 | hid->product == DEVICE_ID_GYRATION_REMOTE && | ||
473 | (usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK && | ||
474 | (usage->hid & 0xff) == 0x82) { | ||
475 | input_event(input, usage->type, usage->code, 1); | ||
476 | input_sync(input); | ||
477 | input_event(input, usage->type, usage->code, 0); | ||
478 | input_sync(input); | ||
479 | return 1; | ||
480 | } | ||
441 | return 0; | 481 | return 0; |
442 | } | 482 | } |
443 | 483 | ||
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 5c52a20ad344..1b2e8dc3398d 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
@@ -100,6 +100,8 @@ static struct hidinput_key_translation apple_fn_keys[] = { | |||
100 | { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, | 100 | { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, |
101 | { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ | 101 | { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ |
102 | { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ | 102 | { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ |
103 | { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY }, | ||
104 | { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY }, | ||
103 | { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, | 105 | { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, |
104 | { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY }, | 106 | { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY }, |
105 | { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY }, | 107 | { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY }, |
@@ -612,6 +614,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
612 | case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break; | 614 | case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break; |
613 | case 0x0b7: map_key_clear(KEY_STOPCD); break; | 615 | case 0x0b7: map_key_clear(KEY_STOPCD); break; |
614 | case 0x0b8: map_key_clear(KEY_EJECTCD); break; | 616 | case 0x0b8: map_key_clear(KEY_EJECTCD); break; |
617 | case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT); break; | ||
615 | 618 | ||
616 | case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; | 619 | case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; |
617 | case 0x0e0: map_abs_clear(ABS_VOLUME); break; | 620 | case 0x0e0: map_abs_clear(ABS_VOLUME); break; |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 0c6b4d4e7e27..c40f0403edaf 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -105,6 +105,7 @@ out: | |||
105 | static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) | 105 | static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) |
106 | { | 106 | { |
107 | unsigned int minor = iminor(file->f_path.dentry->d_inode); | 107 | unsigned int minor = iminor(file->f_path.dentry->d_inode); |
108 | /* FIXME: What stops hidraw_table going NULL */ | ||
108 | struct hid_device *dev = hidraw_table[minor]->hid; | 109 | struct hid_device *dev = hidraw_table[minor]->hid; |
109 | __u8 *buf; | 110 | __u8 *buf; |
110 | int ret = 0; | 111 | int ret = 0; |
@@ -211,38 +212,43 @@ static int hidraw_release(struct inode * inode, struct file * file) | |||
211 | kfree(list->hidraw); | 212 | kfree(list->hidraw); |
212 | } | 213 | } |
213 | 214 | ||
215 | kfree(list); | ||
216 | |||
214 | return 0; | 217 | return 0; |
215 | } | 218 | } |
216 | 219 | ||
217 | static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 220 | static long hidraw_ioctl(struct file *file, unsigned int cmd, |
221 | unsigned long arg) | ||
218 | { | 222 | { |
223 | struct inode *inode = file->f_path.dentry->d_inode; | ||
219 | unsigned int minor = iminor(inode); | 224 | unsigned int minor = iminor(inode); |
225 | long ret = 0; | ||
226 | /* FIXME: What stops hidraw_table going NULL */ | ||
220 | struct hidraw *dev = hidraw_table[minor]; | 227 | struct hidraw *dev = hidraw_table[minor]; |
221 | void __user *user_arg = (void __user*) arg; | 228 | void __user *user_arg = (void __user*) arg; |
222 | 229 | ||
230 | lock_kernel(); | ||
223 | switch (cmd) { | 231 | switch (cmd) { |
224 | case HIDIOCGRDESCSIZE: | 232 | case HIDIOCGRDESCSIZE: |
225 | if (put_user(dev->hid->rsize, (int __user *)arg)) | 233 | if (put_user(dev->hid->rsize, (int __user *)arg)) |
226 | return -EFAULT; | 234 | ret = -EFAULT; |
227 | return 0; | 235 | break; |
228 | 236 | ||
229 | case HIDIOCGRDESC: | 237 | case HIDIOCGRDESC: |
230 | { | 238 | { |
231 | __u32 len; | 239 | __u32 len; |
232 | 240 | ||
233 | if (get_user(len, (int __user *)arg)) | 241 | if (get_user(len, (int __user *)arg)) |
234 | return -EFAULT; | 242 | ret = -EFAULT; |
235 | 243 | else if (len > HID_MAX_DESCRIPTOR_SIZE - 1) | |
236 | if (len > HID_MAX_DESCRIPTOR_SIZE - 1) | 244 | ret = -EINVAL; |
237 | return -EINVAL; | 245 | else if (copy_to_user(user_arg + offsetof( |
238 | 246 | struct hidraw_report_descriptor, | |
239 | if (copy_to_user(user_arg + offsetof( | 247 | value[0]), |
240 | struct hidraw_report_descriptor, | 248 | dev->hid->rdesc, |
241 | value[0]), | 249 | min(dev->hid->rsize, len))) |
242 | dev->hid->rdesc, | 250 | ret = -EFAULT; |
243 | min(dev->hid->rsize, len))) | 251 | break; |
244 | return -EFAULT; | ||
245 | return 0; | ||
246 | } | 252 | } |
247 | case HIDIOCGRAWINFO: | 253 | case HIDIOCGRAWINFO: |
248 | { | 254 | { |
@@ -252,15 +258,13 @@ static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd | |||
252 | dinfo.vendor = dev->hid->vendor; | 258 | dinfo.vendor = dev->hid->vendor; |
253 | dinfo.product = dev->hid->product; | 259 | dinfo.product = dev->hid->product; |
254 | if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) | 260 | if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) |
255 | return -EFAULT; | 261 | ret = -EFAULT; |
256 | 262 | break; | |
257 | return 0; | ||
258 | } | 263 | } |
259 | default: | 264 | default: |
260 | printk(KERN_EMERG "hidraw: unsupported ioctl() %x\n", | 265 | ret = -ENOTTY; |
261 | cmd); | ||
262 | } | 266 | } |
263 | return -EINVAL; | 267 | return ret; |
264 | } | 268 | } |
265 | 269 | ||
266 | static const struct file_operations hidraw_ops = { | 270 | static const struct file_operations hidraw_ops = { |
@@ -270,7 +274,7 @@ static const struct file_operations hidraw_ops = { | |||
270 | .poll = hidraw_poll, | 274 | .poll = hidraw_poll, |
271 | .open = hidraw_open, | 275 | .open = hidraw_open, |
272 | .release = hidraw_release, | 276 | .release = hidraw_release, |
273 | .ioctl = hidraw_ioctl, | 277 | .unlocked_ioctl = hidraw_ioctl, |
274 | }; | 278 | }; |
275 | 279 | ||
276 | void hidraw_report_event(struct hid_device *hid, u8 *data, int len) | 280 | void hidraw_report_event(struct hid_device *hid, u8 *data, int len) |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 1df832a8fcbc..61e78a4369b9 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -69,12 +69,18 @@ | |||
69 | #define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 | 69 | #define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 |
70 | #define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 | 70 | #define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 |
71 | #define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 | 71 | #define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 |
72 | #define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 | ||
73 | #define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 | ||
74 | #define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 | ||
72 | #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 | 75 | #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 |
73 | #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a | 76 | #define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a |
74 | #define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b | 77 | #define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b |
75 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c | 78 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c |
76 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d | 79 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d |
77 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e | 80 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e |
81 | #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 | ||
82 | #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 | ||
83 | #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 | ||
78 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a | 84 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a |
79 | #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b | 85 | #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b |
80 | #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 | 86 | #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 |
@@ -241,6 +247,8 @@ | |||
241 | #define USB_DEVICE_ID_LD_MACHINETEST 0x2040 | 247 | #define USB_DEVICE_ID_LD_MACHINETEST 0x2040 |
242 | 248 | ||
243 | #define USB_VENDOR_ID_LOGITECH 0x046d | 249 | #define USB_VENDOR_ID_LOGITECH 0x046d |
250 | #define USB_DEVICE_ID_LOGITECH_LX3 0xc044 | ||
251 | #define USB_DEVICE_ID_LOGITECH_V150 0xc047 | ||
244 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 | 252 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 |
245 | #define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110 | 253 | #define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110 |
246 | #define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111 | 254 | #define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111 |
@@ -314,6 +322,7 @@ | |||
314 | #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 | 322 | #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 |
315 | #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 | 323 | #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 |
316 | #define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 | 324 | #define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 |
325 | #define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 | ||
317 | #define USB_DEVICE_ID_DINOVO_EDGE 0xc714 | 326 | #define USB_DEVICE_ID_DINOVO_EDGE 0xc714 |
318 | #define USB_DEVICE_ID_DINOVO_MINI 0xc71f | 327 | #define USB_DEVICE_ID_DINOVO_MINI 0xc71f |
319 | 328 | ||
@@ -443,7 +452,8 @@ static const struct hid_blacklist { | |||
443 | { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD }, | 452 | { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD }, |
444 | { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, | 453 | { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, |
445 | { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, | 454 | { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, |
446 | 455 | ||
456 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP, HID_QUIRK_DUPLICATE_USAGES }, | ||
447 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, | 457 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, |
448 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES }, | 458 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES }, |
449 | 459 | ||
@@ -593,6 +603,8 @@ static const struct hid_blacklist { | |||
593 | 603 | ||
594 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, | 604 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, |
595 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, | 605 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, |
606 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3, HID_QUIRK_INVERT_HWHEEL }, | ||
607 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150, HID_QUIRK_INVERT_HWHEEL }, | ||
596 | 608 | ||
597 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS }, | 609 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS }, |
598 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS }, | 610 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS }, |
@@ -642,6 +654,12 @@ static const struct hid_blacklist { | |||
642 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, | 654 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, |
643 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, | 655 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, |
644 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, | 656 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, |
657 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN }, | ||
658 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, | ||
659 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN }, | ||
660 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN }, | ||
661 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, | ||
662 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN }, | ||
645 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, | 663 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, |
646 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, | 664 | { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, |
647 | 665 | ||
@@ -1128,7 +1146,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize) | |||
1128 | && rdesc[557] == 0x19 | 1146 | && rdesc[557] == 0x19 |
1129 | && rdesc[559] == 0x29) { | 1147 | && rdesc[559] == 0x29) { |
1130 | printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); | 1148 | printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); |
1131 | rdesc[284] = rdesc[304] = rdesc[558] = 0x35; | 1149 | rdesc[284] = rdesc[304] = rdesc[557] = 0x35; |
1132 | rdesc[352] = 0x36; | 1150 | rdesc[352] = 0x36; |
1133 | rdesc[286] = rdesc[355] = 0x46; | 1151 | rdesc[286] = rdesc[355] = 0x46; |
1134 | rdesc[306] = rdesc[559] = 0x45; | 1152 | rdesc[306] = rdesc[559] = 0x45; |
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 95cc192bc7af..842e9edb888e 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -406,6 +406,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | |||
406 | uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); | 406 | uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); |
407 | if (!uref_multi) | 407 | if (!uref_multi) |
408 | return -ENOMEM; | 408 | return -ENOMEM; |
409 | lock_kernel(); | ||
409 | uref = &uref_multi->uref; | 410 | uref = &uref_multi->uref; |
410 | if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { | 411 | if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { |
411 | if (copy_from_user(uref_multi, user_arg, | 412 | if (copy_from_user(uref_multi, user_arg, |
@@ -501,12 +502,15 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | |||
501 | } | 502 | } |
502 | 503 | ||
503 | goodreturn: | 504 | goodreturn: |
505 | unlock_kernel(); | ||
504 | kfree(uref_multi); | 506 | kfree(uref_multi); |
505 | return 0; | 507 | return 0; |
506 | fault: | 508 | fault: |
509 | unlock_kernel(); | ||
507 | kfree(uref_multi); | 510 | kfree(uref_multi); |
508 | return -EFAULT; | 511 | return -EFAULT; |
509 | inval: | 512 | inval: |
513 | unlock_kernel(); | ||
510 | kfree(uref_multi); | 514 | kfree(uref_multi); |
511 | return -EINVAL; | 515 | return -EINVAL; |
512 | } | 516 | } |
@@ -540,7 +544,7 @@ static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd, | |||
540 | return len; | 544 | return len; |
541 | } | 545 | } |
542 | 546 | ||
543 | static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | 547 | static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
544 | { | 548 | { |
545 | struct hiddev_list *list = file->private_data; | 549 | struct hiddev_list *list = file->private_data; |
546 | struct hiddev *hiddev = list->hiddev; | 550 | struct hiddev *hiddev = list->hiddev; |
@@ -555,7 +559,10 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd | |||
555 | struct usbhid_device *usbhid = hid->driver_data; | 559 | struct usbhid_device *usbhid = hid->driver_data; |
556 | void __user *user_arg = (void __user *)arg; | 560 | void __user *user_arg = (void __user *)arg; |
557 | int i; | 561 | int i; |
562 | |||
563 | /* Called without BKL by compat methods so no BKL taken */ | ||
558 | 564 | ||
565 | /* FIXME: Who or what stop this racing with a disconnect ?? */ | ||
559 | if (!hiddev->exist) | 566 | if (!hiddev->exist) |
560 | return -EIO; | 567 | return -EIO; |
561 | 568 | ||
@@ -756,8 +763,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd | |||
756 | #ifdef CONFIG_COMPAT | 763 | #ifdef CONFIG_COMPAT |
757 | static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 764 | static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
758 | { | 765 | { |
759 | struct inode *inode = file->f_path.dentry->d_inode; | 766 | return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); |
760 | return hiddev_ioctl(inode, file, cmd, (unsigned long)compat_ptr(arg)); | ||
761 | } | 767 | } |
762 | #endif | 768 | #endif |
763 | 769 | ||
@@ -768,7 +774,7 @@ static const struct file_operations hiddev_fops = { | |||
768 | .poll = hiddev_poll, | 774 | .poll = hiddev_poll, |
769 | .open = hiddev_open, | 775 | .open = hiddev_open, |
770 | .release = hiddev_release, | 776 | .release = hiddev_release, |
771 | .ioctl = hiddev_ioctl, | 777 | .unlocked_ioctl = hiddev_ioctl, |
772 | .fasync = hiddev_fasync, | 778 | .fasync = hiddev_fasync, |
773 | #ifdef CONFIG_COMPAT | 779 | #ifdef CONFIG_COMPAT |
774 | .compat_ioctl = hiddev_compat_ioctl, | 780 | .compat_ioctl = hiddev_compat_ioctl, |
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c index 3cd46d2e53c1..0caaafe01843 100644 --- a/drivers/hid/usbhid/usbkbd.c +++ b/drivers/hid/usbhid/usbkbd.c | |||
@@ -43,7 +43,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
43 | MODULE_DESCRIPTION(DRIVER_DESC); | 43 | MODULE_DESCRIPTION(DRIVER_DESC); |
44 | MODULE_LICENSE(DRIVER_LICENSE); | 44 | MODULE_LICENSE(DRIVER_LICENSE); |
45 | 45 | ||
46 | static unsigned char usb_kbd_keycode[256] = { | 46 | static const unsigned char usb_kbd_keycode[256] = { |
47 | 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, | 47 | 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, |
48 | 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, | 48 | 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, |
49 | 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, | 49 | 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, |
@@ -233,14 +233,6 @@ static int usb_kbd_probe(struct usb_interface *iface, | |||
233 | if (!usb_endpoint_is_int_in(endpoint)) | 233 | if (!usb_endpoint_is_int_in(endpoint)) |
234 | return -ENODEV; | 234 | return -ENODEV; |
235 | 235 | ||
236 | #ifdef CONFIG_USB_HID | ||
237 | if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), | ||
238 | le16_to_cpu(dev->descriptor.idProduct)) | ||
239 | & HID_QUIRK_IGNORE) { | ||
240 | return -ENODEV; | ||
241 | } | ||
242 | #endif | ||
243 | |||
244 | pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); | 236 | pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); |
245 | maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); | 237 | maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); |
246 | 238 | ||
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c index 703e9d0e8714..35689ef172cc 100644 --- a/drivers/hid/usbhid/usbmouse.c +++ b/drivers/hid/usbhid/usbmouse.c | |||
@@ -129,14 +129,6 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i | |||
129 | if (!usb_endpoint_is_int_in(endpoint)) | 129 | if (!usb_endpoint_is_int_in(endpoint)) |
130 | return -ENODEV; | 130 | return -ENODEV; |
131 | 131 | ||
132 | #ifdef CONFIG_USB_HID | ||
133 | if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), | ||
134 | le16_to_cpu(dev->descriptor.idProduct)) | ||
135 | & (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) { | ||
136 | return -ENODEV; | ||
137 | } | ||
138 | #endif | ||
139 | |||
140 | pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); | 132 | pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); |
141 | maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); | 133 | maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); |
142 | 134 | ||
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 15b09b89588a..04d9c4d459d0 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -510,6 +510,7 @@ config BLK_DEV_TRIFLEX | |||
510 | 510 | ||
511 | config BLK_DEV_CY82C693 | 511 | config BLK_DEV_CY82C693 |
512 | tristate "CY82C693 chipset support" | 512 | tristate "CY82C693 chipset support" |
513 | depends on ALPHA | ||
513 | select IDE_TIMINGS | 514 | select IDE_TIMINGS |
514 | select BLK_DEV_IDEDMA_PCI | 515 | select BLK_DEV_IDEDMA_PCI |
515 | help | 516 | help |
@@ -548,6 +549,7 @@ config BLK_DEV_CS5535 | |||
548 | 549 | ||
549 | config BLK_DEV_HPT34X | 550 | config BLK_DEV_HPT34X |
550 | tristate "HPT34X chipset support" | 551 | tristate "HPT34X chipset support" |
552 | depends on BROKEN | ||
551 | select BLK_DEV_IDEDMA_PCI | 553 | select BLK_DEV_IDEDMA_PCI |
552 | help | 554 | help |
553 | This driver adds up to 4 more EIDE devices sharing a single | 555 | This driver adds up to 4 more EIDE devices sharing a single |
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index 52f58c885783..f575e8341aec 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c | |||
@@ -72,7 +72,7 @@ struct icside_state { | |||
72 | void __iomem *ioc_base; | 72 | void __iomem *ioc_base; |
73 | unsigned int sel; | 73 | unsigned int sel; |
74 | unsigned int type; | 74 | unsigned int type; |
75 | ide_hwif_t *hwif[2]; | 75 | struct ide_host *host; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | #define ICS_TYPE_A3IN 0 | 78 | #define ICS_TYPE_A3IN 0 |
@@ -375,12 +375,14 @@ static int icside_dma_test_irq(ide_drive_t *drive) | |||
375 | 375 | ||
376 | static void icside_dma_timeout(ide_drive_t *drive) | 376 | static void icside_dma_timeout(ide_drive_t *drive) |
377 | { | 377 | { |
378 | ide_hwif_t *hwif = drive->hwif; | ||
379 | |||
378 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); | 380 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); |
379 | 381 | ||
380 | if (icside_dma_test_irq(drive)) | 382 | if (icside_dma_test_irq(drive)) |
381 | return; | 383 | return; |
382 | 384 | ||
383 | ide_dump_status(drive, "DMA timeout", ide_read_status(drive)); | 385 | ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); |
384 | 386 | ||
385 | icside_dma_end(drive); | 387 | icside_dma_end(drive); |
386 | } | 388 | } |
@@ -440,10 +442,10 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, | |||
440 | static int __init | 442 | static int __init |
441 | icside_register_v5(struct icside_state *state, struct expansion_card *ec) | 443 | icside_register_v5(struct icside_state *state, struct expansion_card *ec) |
442 | { | 444 | { |
443 | ide_hwif_t *hwif; | ||
444 | void __iomem *base; | 445 | void __iomem *base; |
445 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 446 | struct ide_host *host; |
446 | hw_regs_t hw; | 447 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
448 | int ret; | ||
447 | 449 | ||
448 | base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); | 450 | base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); |
449 | if (!base) | 451 | if (!base) |
@@ -463,22 +465,23 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) | |||
463 | 465 | ||
464 | icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); | 466 | icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); |
465 | 467 | ||
466 | hwif = ide_find_port(); | 468 | host = ide_host_alloc(NULL, hws); |
467 | if (!hwif) | 469 | if (host == NULL) |
468 | return -ENODEV; | 470 | return -ENODEV; |
469 | 471 | ||
470 | ide_init_port_hw(hwif, &hw); | 472 | state->host = host; |
471 | default_hwif_mmiops(hwif); | ||
472 | |||
473 | state->hwif[0] = hwif; | ||
474 | 473 | ||
475 | ecard_set_drvdata(ec, state); | 474 | ecard_set_drvdata(ec, state); |
476 | 475 | ||
477 | idx[0] = hwif->index; | 476 | ret = ide_host_register(host, NULL, hws); |
478 | 477 | if (ret) | |
479 | ide_device_add(idx, NULL); | 478 | goto err_free; |
480 | 479 | ||
481 | return 0; | 480 | return 0; |
481 | err_free: | ||
482 | ide_host_free(host); | ||
483 | ecard_set_drvdata(ec, NULL); | ||
484 | return ret; | ||
482 | } | 485 | } |
483 | 486 | ||
484 | static const struct ide_port_info icside_v6_port_info __initdata = { | 487 | static const struct ide_port_info icside_v6_port_info __initdata = { |
@@ -493,13 +496,12 @@ static const struct ide_port_info icside_v6_port_info __initdata = { | |||
493 | static int __init | 496 | static int __init |
494 | icside_register_v6(struct icside_state *state, struct expansion_card *ec) | 497 | icside_register_v6(struct icside_state *state, struct expansion_card *ec) |
495 | { | 498 | { |
496 | ide_hwif_t *hwif, *mate; | ||
497 | void __iomem *ioc_base, *easi_base; | 499 | void __iomem *ioc_base, *easi_base; |
500 | struct ide_host *host; | ||
498 | unsigned int sel = 0; | 501 | unsigned int sel = 0; |
499 | int ret; | 502 | int ret; |
500 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 503 | hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL }; |
501 | struct ide_port_info d = icside_v6_port_info; | 504 | struct ide_port_info d = icside_v6_port_info; |
502 | hw_regs_t hw[2]; | ||
503 | 505 | ||
504 | ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); | 506 | ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); |
505 | if (!ioc_base) { | 507 | if (!ioc_base) { |
@@ -538,28 +540,11 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) | |||
538 | icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); | 540 | icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); |
539 | icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); | 541 | icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); |
540 | 542 | ||
541 | /* | 543 | host = ide_host_alloc(&d, hws); |
542 | * Find and register the interfaces. | 544 | if (host == NULL) |
543 | */ | ||
544 | hwif = ide_find_port(); | ||
545 | if (hwif == NULL) | ||
546 | return -ENODEV; | 545 | return -ENODEV; |
547 | 546 | ||
548 | ide_init_port_hw(hwif, &hw[0]); | 547 | state->host = host; |
549 | default_hwif_mmiops(hwif); | ||
550 | |||
551 | idx[0] = hwif->index; | ||
552 | |||
553 | mate = ide_find_port(); | ||
554 | if (mate) { | ||
555 | ide_init_port_hw(mate, &hw[1]); | ||
556 | default_hwif_mmiops(mate); | ||
557 | |||
558 | idx[1] = mate->index; | ||
559 | } | ||
560 | |||
561 | state->hwif[0] = hwif; | ||
562 | state->hwif[1] = mate; | ||
563 | 548 | ||
564 | ecard_set_drvdata(ec, state); | 549 | ecard_set_drvdata(ec, state); |
565 | 550 | ||
@@ -569,11 +554,17 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) | |||
569 | d.dma_ops = NULL; | 554 | d.dma_ops = NULL; |
570 | } | 555 | } |
571 | 556 | ||
572 | ide_device_add(idx, &d); | 557 | ret = ide_host_register(host, NULL, hws); |
558 | if (ret) | ||
559 | goto err_free; | ||
573 | 560 | ||
574 | return 0; | 561 | return 0; |
575 | 562 | err_free: | |
576 | out: | 563 | ide_host_free(host); |
564 | if (d.dma_ops) | ||
565 | free_dma(ec->dma); | ||
566 | ecard_set_drvdata(ec, NULL); | ||
567 | out: | ||
577 | return ret; | 568 | return ret; |
578 | } | 569 | } |
579 | 570 | ||
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c index 2f311da4c963..176532ffae0e 100644 --- a/drivers/ide/arm/ide_arm.c +++ b/drivers/ide/arm/ide_arm.c | |||
@@ -28,10 +28,8 @@ | |||
28 | 28 | ||
29 | static int __init ide_arm_init(void) | 29 | static int __init ide_arm_init(void) |
30 | { | 30 | { |
31 | ide_hwif_t *hwif; | ||
32 | hw_regs_t hw; | ||
33 | unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206; | 31 | unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206; |
34 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 32 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
35 | 33 | ||
36 | if (!request_region(base, 8, DRV_NAME)) { | 34 | if (!request_region(base, 8, DRV_NAME)) { |
37 | printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", | 35 | printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", |
@@ -51,15 +49,7 @@ static int __init ide_arm_init(void) | |||
51 | hw.irq = IDE_ARM_IRQ; | 49 | hw.irq = IDE_ARM_IRQ; |
52 | hw.chipset = ide_generic; | 50 | hw.chipset = ide_generic; |
53 | 51 | ||
54 | hwif = ide_find_port(); | 52 | return ide_host_add(NULL, hws, NULL); |
55 | if (hwif) { | ||
56 | ide_init_port_hw(hwif, &hw); | ||
57 | idx[0] = hwif->index; | ||
58 | |||
59 | ide_device_add(idx, NULL); | ||
60 | } | ||
61 | |||
62 | return 0; | ||
63 | } | 53 | } |
64 | 54 | ||
65 | module_init(ide_arm_init); | 55 | module_init(ide_arm_init); |
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c index c79b85b6e4a3..65bb4b8fd570 100644 --- a/drivers/ide/arm/palm_bk3710.c +++ b/drivers/ide/arm/palm_bk3710.c | |||
@@ -316,15 +316,14 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif) | |||
316 | static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif, | 316 | static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif, |
317 | const struct ide_port_info *d) | 317 | const struct ide_port_info *d) |
318 | { | 318 | { |
319 | unsigned long base = | ||
320 | hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET; | ||
321 | |||
322 | printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); | 319 | printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); |
323 | 320 | ||
324 | if (ide_allocate_dma_engine(hwif)) | 321 | if (ide_allocate_dma_engine(hwif)) |
325 | return -1; | 322 | return -1; |
326 | 323 | ||
327 | ide_setup_dma(hwif, base); | 324 | hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET; |
325 | |||
326 | hwif->dma_ops = &sff_dma_ops; | ||
328 | 327 | ||
329 | return 0; | 328 | return 0; |
330 | } | 329 | } |
@@ -348,11 +347,10 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) | |||
348 | { | 347 | { |
349 | struct clk *clk; | 348 | struct clk *clk; |
350 | struct resource *mem, *irq; | 349 | struct resource *mem, *irq; |
351 | ide_hwif_t *hwif; | 350 | struct ide_host *host; |
352 | unsigned long base, rate; | 351 | unsigned long base, rate; |
353 | int i; | 352 | int i, rc; |
354 | hw_regs_t hw; | 353 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
355 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
356 | 354 | ||
357 | clk = clk_get(NULL, "IDECLK"); | 355 | clk = clk_get(NULL, "IDECLK"); |
358 | if (IS_ERR(clk)) | 356 | if (IS_ERR(clk)) |
@@ -394,24 +392,14 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) | |||
394 | hw.irq = irq->start; | 392 | hw.irq = irq->start; |
395 | hw.chipset = ide_palm3710; | 393 | hw.chipset = ide_palm3710; |
396 | 394 | ||
397 | hwif = ide_find_port(); | 395 | rc = ide_host_add(&palm_bk3710_port_info, hws, NULL); |
398 | if (hwif == NULL) | 396 | if (rc) |
399 | goto out; | 397 | goto out; |
400 | 398 | ||
401 | i = hwif->index; | ||
402 | |||
403 | ide_init_port_hw(hwif, &hw); | ||
404 | |||
405 | default_hwif_mmiops(hwif); | ||
406 | |||
407 | idx[0] = i; | ||
408 | |||
409 | ide_device_add(idx, &palm_bk3710_port_info); | ||
410 | |||
411 | return 0; | 399 | return 0; |
412 | out: | 400 | out: |
413 | printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); | 401 | printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); |
414 | return -ENODEV; | 402 | return rc; |
415 | } | 403 | } |
416 | 404 | ||
417 | /* work with hotplug and coldplug */ | 405 | /* work with hotplug and coldplug */ |
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c index 43057e0303c8..2bdd8b734afb 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/arm/rapide.c | |||
@@ -32,11 +32,10 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, | |||
32 | static int __devinit | 32 | static int __devinit |
33 | rapide_probe(struct expansion_card *ec, const struct ecard_id *id) | 33 | rapide_probe(struct expansion_card *ec, const struct ecard_id *id) |
34 | { | 34 | { |
35 | ide_hwif_t *hwif; | ||
36 | void __iomem *base; | 35 | void __iomem *base; |
36 | struct ide_host *host; | ||
37 | int ret; | 37 | int ret; |
38 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 38 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
39 | hw_regs_t hw; | ||
40 | 39 | ||
41 | ret = ecard_request_resources(ec); | 40 | ret = ecard_request_resources(ec); |
42 | if (ret) | 41 | if (ret) |
@@ -53,20 +52,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
53 | hw.chipset = ide_generic; | 52 | hw.chipset = ide_generic; |
54 | hw.dev = &ec->dev; | 53 | hw.dev = &ec->dev; |
55 | 54 | ||
56 | hwif = ide_find_port(); | 55 | ret = ide_host_add(&rapide_port_info, hws, &host); |
57 | if (hwif == NULL) { | 56 | if (ret) |
58 | ret = -ENOENT; | ||
59 | goto release; | 57 | goto release; |
60 | } | ||
61 | |||
62 | ide_init_port_hw(hwif, &hw); | ||
63 | default_hwif_mmiops(hwif); | ||
64 | |||
65 | idx[0] = hwif->index; | ||
66 | |||
67 | ide_device_add(idx, &rapide_port_info); | ||
68 | 58 | ||
69 | ecard_set_drvdata(ec, hwif); | 59 | ecard_set_drvdata(ec, host); |
70 | goto out; | 60 | goto out; |
71 | 61 | ||
72 | release: | 62 | release: |
@@ -77,11 +67,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
77 | 67 | ||
78 | static void __devexit rapide_remove(struct expansion_card *ec) | 68 | static void __devexit rapide_remove(struct expansion_card *ec) |
79 | { | 69 | { |
80 | ide_hwif_t *hwif = ecard_get_drvdata(ec); | 70 | struct ide_host *host = ecard_get_drvdata(ec); |
81 | 71 | ||
82 | ecard_set_drvdata(ec, NULL); | 72 | ecard_set_drvdata(ec, NULL); |
83 | 73 | ||
84 | ide_unregister(hwif); | 74 | ide_host_remove(host); |
85 | 75 | ||
86 | ecard_release_resources(ec); | 76 | ecard_release_resources(ec); |
87 | } | 77 | } |
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c index 20fad6d542cc..bde7a585f198 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/h8300/ide-h8300.c | |||
@@ -100,6 +100,8 @@ static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
100 | /* be sure we're looking at the low order bits */ | 100 | /* be sure we're looking at the low order bits */ |
101 | outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); | 101 | outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); |
102 | 102 | ||
103 | if (task->tf_flags & IDE_TFLAG_IN_FEATURE) | ||
104 | tf->feature = inb(io_ports->feature_addr); | ||
103 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) | 105 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) |
104 | tf->nsect = inb(io_ports->nsect_addr); | 106 | tf->nsect = inb(io_ports->nsect_addr); |
105 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) | 107 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) |
@@ -153,6 +155,21 @@ static void h8300_output_data(ide_drive_t *drive, struct request *rq, | |||
153 | mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2); | 155 | mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2); |
154 | } | 156 | } |
155 | 157 | ||
158 | static const struct ide_tp_ops h8300_tp_ops = { | ||
159 | .exec_command = ide_exec_command, | ||
160 | .read_status = ide_read_status, | ||
161 | .read_altstatus = ide_read_altstatus, | ||
162 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
163 | |||
164 | .set_irq = ide_set_irq, | ||
165 | |||
166 | .tf_load = h8300_tf_load, | ||
167 | .tf_read = h8300_tf_read, | ||
168 | |||
169 | .input_data = h8300_input_data, | ||
170 | .output_data = h8300_output_data, | ||
171 | }; | ||
172 | |||
156 | #define H8300_IDE_GAP (2) | 173 | #define H8300_IDE_GAP (2) |
157 | 174 | ||
158 | static inline void hw_setup(hw_regs_t *hw) | 175 | static inline void hw_setup(hw_regs_t *hw) |
@@ -167,27 +184,14 @@ static inline void hw_setup(hw_regs_t *hw) | |||
167 | hw->chipset = ide_generic; | 184 | hw->chipset = ide_generic; |
168 | } | 185 | } |
169 | 186 | ||
170 | static inline void hwif_setup(ide_hwif_t *hwif) | ||
171 | { | ||
172 | default_hwif_iops(hwif); | ||
173 | |||
174 | hwif->tf_load = h8300_tf_load; | ||
175 | hwif->tf_read = h8300_tf_read; | ||
176 | |||
177 | hwif->input_data = h8300_input_data; | ||
178 | hwif->output_data = h8300_output_data; | ||
179 | } | ||
180 | |||
181 | static const struct ide_port_info h8300_port_info = { | 187 | static const struct ide_port_info h8300_port_info = { |
188 | .tp_ops = &h8300_tp_ops, | ||
182 | .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, | 189 | .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, |
183 | }; | 190 | }; |
184 | 191 | ||
185 | static int __init h8300_ide_init(void) | 192 | static int __init h8300_ide_init(void) |
186 | { | 193 | { |
187 | hw_regs_t hw; | 194 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
188 | ide_hwif_t *hwif; | ||
189 | int index; | ||
190 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
191 | 195 | ||
192 | printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); | 196 | printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); |
193 | 197 | ||
@@ -200,19 +204,7 @@ static int __init h8300_ide_init(void) | |||
200 | 204 | ||
201 | hw_setup(&hw); | 205 | hw_setup(&hw); |
202 | 206 | ||
203 | hwif = ide_find_port_slot(&h8300_port_info); | 207 | return ide_host_add(&h8300_port_info, hws, NULL); |
204 | if (hwif == NULL) | ||
205 | return -ENOENT; | ||
206 | |||
207 | index = hwif->index; | ||
208 | ide_init_port_hw(hwif, &hw); | ||
209 | hwif_setup(hwif); | ||
210 | |||
211 | idx[0] = index; | ||
212 | |||
213 | ide_device_add(idx, &h8300_port_info); | ||
214 | |||
215 | return 0; | ||
216 | 208 | ||
217 | out_busy: | 209 | out_busy: |
218 | printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); | 210 | printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); |
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 2802031de670..adf04f99cdeb 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -22,6 +22,8 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
22 | void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int)) | 22 | void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int)) |
23 | { | 23 | { |
24 | ide_hwif_t *hwif = drive->hwif; | 24 | ide_hwif_t *hwif = drive->hwif; |
25 | struct request *rq = hwif->hwgroup->rq; | ||
26 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
25 | xfer_func_t *xferfunc; | 27 | xfer_func_t *xferfunc; |
26 | unsigned int temp; | 28 | unsigned int temp; |
27 | u16 bcount; | 29 | u16 bcount; |
@@ -30,12 +32,12 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
30 | debug_log("Enter %s - interrupt handler\n", __func__); | 32 | debug_log("Enter %s - interrupt handler\n", __func__); |
31 | 33 | ||
32 | if (pc->flags & PC_FLAG_TIMEDOUT) { | 34 | if (pc->flags & PC_FLAG_TIMEDOUT) { |
33 | pc->callback(drive); | 35 | drive->pc_callback(drive); |
34 | return ide_stopped; | 36 | return ide_stopped; |
35 | } | 37 | } |
36 | 38 | ||
37 | /* Clear the interrupt */ | 39 | /* Clear the interrupt */ |
38 | stat = ide_read_status(drive); | 40 | stat = tp_ops->read_status(hwif); |
39 | 41 | ||
40 | if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { | 42 | if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { |
41 | if (hwif->dma_ops->dma_end(drive) || | 43 | if (hwif->dma_ops->dma_end(drive) || |
@@ -63,8 +65,9 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
63 | local_irq_enable_in_hardirq(); | 65 | local_irq_enable_in_hardirq(); |
64 | 66 | ||
65 | if (drive->media == ide_tape && !scsi && | 67 | if (drive->media == ide_tape && !scsi && |
66 | (stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE) | 68 | (stat & ERR_STAT) && rq->cmd[0] == REQUEST_SENSE) |
67 | stat &= ~ERR_STAT; | 69 | stat &= ~ERR_STAT; |
70 | |||
68 | if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) { | 71 | if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) { |
69 | /* Error detected */ | 72 | /* Error detected */ |
70 | debug_log("%s: I/O error\n", drive->name); | 73 | debug_log("%s: I/O error\n", drive->name); |
@@ -75,16 +78,17 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
75 | goto cmd_finished; | 78 | goto cmd_finished; |
76 | } | 79 | } |
77 | 80 | ||
78 | if (pc->c[0] == REQUEST_SENSE) { | 81 | if (rq->cmd[0] == REQUEST_SENSE) { |
79 | printk(KERN_ERR "%s: I/O error in request sense" | 82 | printk(KERN_ERR "%s: I/O error in request sense" |
80 | " command\n", drive->name); | 83 | " command\n", drive->name); |
81 | return ide_do_reset(drive); | 84 | return ide_do_reset(drive); |
82 | } | 85 | } |
83 | 86 | ||
84 | debug_log("[cmd %x]: check condition\n", pc->c[0]); | 87 | debug_log("[cmd %x]: check condition\n", rq->cmd[0]); |
85 | 88 | ||
86 | /* Retry operation */ | 89 | /* Retry operation */ |
87 | retry_pc(drive); | 90 | retry_pc(drive); |
91 | |||
88 | /* queued, but not started */ | 92 | /* queued, but not started */ |
89 | return ide_stopped; | 93 | return ide_stopped; |
90 | } | 94 | } |
@@ -95,8 +99,10 @@ cmd_finished: | |||
95 | dsc_handle(drive); | 99 | dsc_handle(drive); |
96 | return ide_stopped; | 100 | return ide_stopped; |
97 | } | 101 | } |
102 | |||
98 | /* Command finished - Call the callback function */ | 103 | /* Command finished - Call the callback function */ |
99 | pc->callback(drive); | 104 | drive->pc_callback(drive); |
105 | |||
100 | return ide_stopped; | 106 | return ide_stopped; |
101 | } | 107 | } |
102 | 108 | ||
@@ -107,16 +113,15 @@ cmd_finished: | |||
107 | ide_dma_off(drive); | 113 | ide_dma_off(drive); |
108 | return ide_do_reset(drive); | 114 | return ide_do_reset(drive); |
109 | } | 115 | } |
110 | /* Get the number of bytes to transfer on this interrupt. */ | ||
111 | bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) | | ||
112 | hwif->INB(hwif->io_ports.lbam_addr); | ||
113 | 116 | ||
114 | ireason = hwif->INB(hwif->io_ports.nsect_addr); | 117 | /* Get the number of bytes to transfer on this interrupt. */ |
118 | ide_read_bcount_and_ireason(drive, &bcount, &ireason); | ||
115 | 119 | ||
116 | if (ireason & CD) { | 120 | if (ireason & CD) { |
117 | printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__); | 121 | printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__); |
118 | return ide_do_reset(drive); | 122 | return ide_do_reset(drive); |
119 | } | 123 | } |
124 | |||
120 | if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) { | 125 | if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) { |
121 | /* Hopefully, we will never get here */ | 126 | /* Hopefully, we will never get here */ |
122 | printk(KERN_ERR "%s: We wanted to %s, but the device wants us " | 127 | printk(KERN_ERR "%s: We wanted to %s, but the device wants us " |
@@ -125,6 +130,7 @@ cmd_finished: | |||
125 | (ireason & IO) ? "Read" : "Write"); | 130 | (ireason & IO) ? "Read" : "Write"); |
126 | return ide_do_reset(drive); | 131 | return ide_do_reset(drive); |
127 | } | 132 | } |
133 | |||
128 | if (!(pc->flags & PC_FLAG_WRITING)) { | 134 | if (!(pc->flags & PC_FLAG_WRITING)) { |
129 | /* Reading - Check that we have enough space */ | 135 | /* Reading - Check that we have enough space */ |
130 | temp = pc->xferred + bcount; | 136 | temp = pc->xferred + bcount; |
@@ -142,7 +148,7 @@ cmd_finished: | |||
142 | if (pc->sg) | 148 | if (pc->sg) |
143 | io_buffers(drive, pc, temp, 0); | 149 | io_buffers(drive, pc, temp, 0); |
144 | else | 150 | else |
145 | hwif->input_data(drive, NULL, | 151 | tp_ops->input_data(drive, NULL, |
146 | pc->cur_pos, temp); | 152 | pc->cur_pos, temp); |
147 | printk(KERN_ERR "%s: transferred %d of " | 153 | printk(KERN_ERR "%s: transferred %d of " |
148 | "%d bytes\n", | 154 | "%d bytes\n", |
@@ -159,9 +165,9 @@ cmd_finished: | |||
159 | debug_log("The device wants to send us more data than " | 165 | debug_log("The device wants to send us more data than " |
160 | "expected - allowing transfer\n"); | 166 | "expected - allowing transfer\n"); |
161 | } | 167 | } |
162 | xferfunc = hwif->input_data; | 168 | xferfunc = tp_ops->input_data; |
163 | } else | 169 | } else |
164 | xferfunc = hwif->output_data; | 170 | xferfunc = tp_ops->output_data; |
165 | 171 | ||
166 | if ((drive->media == ide_floppy && !scsi && !pc->buf) || | 172 | if ((drive->media == ide_floppy && !scsi && !pc->buf) || |
167 | (drive->media == ide_tape && !scsi && pc->bh) || | 173 | (drive->media == ide_tape && !scsi && pc->bh) || |
@@ -175,7 +181,7 @@ cmd_finished: | |||
175 | pc->cur_pos += bcount; | 181 | pc->cur_pos += bcount; |
176 | 182 | ||
177 | debug_log("[cmd %x] transferred %d bytes on that intr.\n", | 183 | debug_log("[cmd %x] transferred %d bytes on that intr.\n", |
178 | pc->c[0], bcount); | 184 | rq->cmd[0], bcount); |
179 | 185 | ||
180 | /* And set the interrupt handler again */ | 186 | /* And set the interrupt handler again */ |
181 | ide_set_handler(drive, handler, timeout, expiry); | 187 | ide_set_handler(drive, handler, timeout, expiry); |
@@ -183,16 +189,27 @@ cmd_finished: | |||
183 | } | 189 | } |
184 | EXPORT_SYMBOL_GPL(ide_pc_intr); | 190 | EXPORT_SYMBOL_GPL(ide_pc_intr); |
185 | 191 | ||
192 | static u8 ide_read_ireason(ide_drive_t *drive) | ||
193 | { | ||
194 | ide_task_t task; | ||
195 | |||
196 | memset(&task, 0, sizeof(task)); | ||
197 | task.tf_flags = IDE_TFLAG_IN_NSECT; | ||
198 | |||
199 | drive->hwif->tp_ops->tf_read(drive, &task); | ||
200 | |||
201 | return task.tf.nsect & 3; | ||
202 | } | ||
203 | |||
186 | static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) | 204 | static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) |
187 | { | 205 | { |
188 | ide_hwif_t *hwif = drive->hwif; | ||
189 | int retries = 100; | 206 | int retries = 100; |
190 | 207 | ||
191 | while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { | 208 | while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { |
192 | printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " | 209 | printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " |
193 | "a packet command, retrying\n", drive->name); | 210 | "a packet command, retrying\n", drive->name); |
194 | udelay(100); | 211 | udelay(100); |
195 | ireason = hwif->INB(hwif->io_ports.nsect_addr); | 212 | ireason = ide_read_ireason(drive); |
196 | if (retries == 0) { | 213 | if (retries == 0) { |
197 | printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " | 214 | printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " |
198 | "a packet command, ignoring\n", | 215 | "a packet command, ignoring\n", |
@@ -210,6 +227,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
210 | ide_expiry_t *expiry) | 227 | ide_expiry_t *expiry) |
211 | { | 228 | { |
212 | ide_hwif_t *hwif = drive->hwif; | 229 | ide_hwif_t *hwif = drive->hwif; |
230 | struct request *rq = hwif->hwgroup->rq; | ||
213 | ide_startstop_t startstop; | 231 | ide_startstop_t startstop; |
214 | u8 ireason; | 232 | u8 ireason; |
215 | 233 | ||
@@ -219,7 +237,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
219 | return startstop; | 237 | return startstop; |
220 | } | 238 | } |
221 | 239 | ||
222 | ireason = hwif->INB(hwif->io_ports.nsect_addr); | 240 | ireason = ide_read_ireason(drive); |
223 | if (drive->media == ide_tape && !drive->scsi) | 241 | if (drive->media == ide_tape && !drive->scsi) |
224 | ireason = ide_wait_ireason(drive, ireason); | 242 | ireason = ide_wait_ireason(drive, ireason); |
225 | 243 | ||
@@ -239,8 +257,8 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
239 | } | 257 | } |
240 | 258 | ||
241 | /* Send the actual packet */ | 259 | /* Send the actual packet */ |
242 | if ((pc->flags & PC_FLAG_ZIP_DRIVE) == 0) | 260 | if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) |
243 | hwif->output_data(drive, NULL, pc->c, 12); | 261 | hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12); |
244 | 262 | ||
245 | return ide_started; | 263 | return ide_started; |
246 | } | 264 | } |
@@ -284,7 +302,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
284 | bcount, dma); | 302 | bcount, dma); |
285 | 303 | ||
286 | /* Issue the packet command */ | 304 | /* Issue the packet command */ |
287 | if (pc->flags & PC_FLAG_DRQ_INTERRUPT) { | 305 | if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { |
288 | ide_execute_command(drive, WIN_PACKETCMD, handler, | 306 | ide_execute_command(drive, WIN_PACKETCMD, handler, |
289 | timeout, NULL); | 307 | timeout, NULL); |
290 | return ide_started; | 308 | return ide_started; |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 6e29dd532090..4e73aeee4053 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -85,10 +85,8 @@ static void ide_cd_put(struct cdrom_info *cd) | |||
85 | /* Mark that we've seen a media change and invalidate our internal buffers. */ | 85 | /* Mark that we've seen a media change and invalidate our internal buffers. */ |
86 | static void cdrom_saw_media_change(ide_drive_t *drive) | 86 | static void cdrom_saw_media_change(ide_drive_t *drive) |
87 | { | 87 | { |
88 | struct cdrom_info *cd = drive->driver_data; | 88 | drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED; |
89 | 89 | drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID; | |
90 | cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED; | ||
91 | cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; | ||
92 | } | 90 | } |
93 | 91 | ||
94 | static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, | 92 | static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, |
@@ -280,11 +278,12 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st) | |||
280 | */ | 278 | */ |
281 | static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) | 279 | static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) |
282 | { | 280 | { |
283 | struct request *rq = HWGROUP(drive)->rq; | 281 | ide_hwif_t *hwif = drive->hwif; |
282 | struct request *rq = hwif->hwgroup->rq; | ||
284 | int stat, err, sense_key; | 283 | int stat, err, sense_key; |
285 | 284 | ||
286 | /* check for errors */ | 285 | /* check for errors */ |
287 | stat = ide_read_status(drive); | 286 | stat = hwif->tp_ops->read_status(hwif); |
288 | 287 | ||
289 | if (stat_ret) | 288 | if (stat_ret) |
290 | *stat_ret = stat; | 289 | *stat_ret = stat; |
@@ -528,7 +527,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, | |||
528 | ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL, | 527 | ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL, |
529 | xferlen, info->dma); | 528 | xferlen, info->dma); |
530 | 529 | ||
531 | if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { | 530 | if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { |
532 | /* waiting for CDB interrupt, not DMA yet. */ | 531 | /* waiting for CDB interrupt, not DMA yet. */ |
533 | if (info->dma) | 532 | if (info->dma) |
534 | drive->waiting_for_dma = 0; | 533 | drive->waiting_for_dma = 0; |
@@ -560,7 +559,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, | |||
560 | struct cdrom_info *info = drive->driver_data; | 559 | struct cdrom_info *info = drive->driver_data; |
561 | ide_startstop_t startstop; | 560 | ide_startstop_t startstop; |
562 | 561 | ||
563 | if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { | 562 | if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { |
564 | /* | 563 | /* |
565 | * Here we should have been called after receiving an interrupt | 564 | * Here we should have been called after receiving an interrupt |
566 | * from the device. DRQ should how be set. | 565 | * from the device. DRQ should how be set. |
@@ -589,7 +588,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, | |||
589 | cmd_len = ATAPI_MIN_CDB_BYTES; | 588 | cmd_len = ATAPI_MIN_CDB_BYTES; |
590 | 589 | ||
591 | /* send the command to the device */ | 590 | /* send the command to the device */ |
592 | hwif->output_data(drive, NULL, rq->cmd, cmd_len); | 591 | hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); |
593 | 592 | ||
594 | /* start the DMA if need be */ | 593 | /* start the DMA if need be */ |
595 | if (info->dma) | 594 | if (info->dma) |
@@ -606,6 +605,8 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, | |||
606 | static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, | 605 | static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, |
607 | int len, int ireason, int rw) | 606 | int len, int ireason, int rw) |
608 | { | 607 | { |
608 | ide_hwif_t *hwif = drive->hwif; | ||
609 | |||
609 | /* | 610 | /* |
610 | * ireason == 0: the drive wants to receive data from us | 611 | * ireason == 0: the drive wants to receive data from us |
611 | * ireason == 2: the drive is expecting to transfer data to us | 612 | * ireason == 2: the drive is expecting to transfer data to us |
@@ -624,7 +625,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, | |||
624 | * Some drives (ASUS) seem to tell us that status info is | 625 | * Some drives (ASUS) seem to tell us that status info is |
625 | * available. Just get it and ignore. | 626 | * available. Just get it and ignore. |
626 | */ | 627 | */ |
627 | (void)ide_read_status(drive); | 628 | (void)hwif->tp_ops->read_status(hwif); |
628 | return 0; | 629 | return 0; |
629 | } else { | 630 | } else { |
630 | /* drive wants a command packet, or invalid ireason... */ | 631 | /* drive wants a command packet, or invalid ireason... */ |
@@ -645,20 +646,18 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, | |||
645 | */ | 646 | */ |
646 | static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) | 647 | static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) |
647 | { | 648 | { |
648 | struct cdrom_info *cd = drive->driver_data; | ||
649 | |||
650 | if ((len % SECTOR_SIZE) == 0) | 649 | if ((len % SECTOR_SIZE) == 0) |
651 | return 0; | 650 | return 0; |
652 | 651 | ||
653 | printk(KERN_ERR "%s: %s: Bad transfer size %d\n", | 652 | printk(KERN_ERR "%s: %s: Bad transfer size %d\n", |
654 | drive->name, __func__, len); | 653 | drive->name, __func__, len); |
655 | 654 | ||
656 | if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES) | 655 | if (drive->atapi_flags & IDE_AFLAG_LIMIT_NFRAMES) |
657 | printk(KERN_ERR " This drive is not supported by " | 656 | printk(KERN_ERR " This drive is not supported by " |
658 | "this version of the driver\n"); | 657 | "this version of the driver\n"); |
659 | else { | 658 | else { |
660 | printk(KERN_ERR " Trying to limit transfer sizes\n"); | 659 | printk(KERN_ERR " Trying to limit transfer sizes\n"); |
661 | cd->cd_flags |= IDE_CD_FLAG_LIMIT_NFRAMES; | 660 | drive->atapi_flags |= IDE_AFLAG_LIMIT_NFRAMES; |
662 | } | 661 | } |
663 | 662 | ||
664 | return 1; | 663 | return 1; |
@@ -735,7 +734,7 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive) | |||
735 | if (cdrom_decode_status(drive, 0, &stat)) | 734 | if (cdrom_decode_status(drive, 0, &stat)) |
736 | return ide_stopped; | 735 | return ide_stopped; |
737 | 736 | ||
738 | info->cd_flags |= IDE_CD_FLAG_SEEKING; | 737 | drive->atapi_flags |= IDE_AFLAG_SEEKING; |
739 | 738 | ||
740 | if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { | 739 | if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { |
741 | if (--retry == 0) | 740 | if (--retry == 0) |
@@ -892,10 +891,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
892 | struct request *rq = HWGROUP(drive)->rq; | 891 | struct request *rq = HWGROUP(drive)->rq; |
893 | xfer_func_t *xferfunc; | 892 | xfer_func_t *xferfunc; |
894 | ide_expiry_t *expiry = NULL; | 893 | ide_expiry_t *expiry = NULL; |
895 | int dma_error = 0, dma, stat, ireason, len, thislen, uptodate = 0; | 894 | int dma_error = 0, dma, stat, thislen, uptodate = 0; |
896 | int write = (rq_data_dir(rq) == WRITE) ? 1 : 0; | 895 | int write = (rq_data_dir(rq) == WRITE) ? 1 : 0; |
897 | unsigned int timeout; | 896 | unsigned int timeout; |
898 | u8 lowcyl, highcyl; | 897 | u16 len; |
898 | u8 ireason; | ||
899 | 899 | ||
900 | /* check for errors */ | 900 | /* check for errors */ |
901 | dma = info->dma; | 901 | dma = info->dma; |
@@ -923,12 +923,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
923 | goto end_request; | 923 | goto end_request; |
924 | } | 924 | } |
925 | 925 | ||
926 | /* ok we fall to pio :/ */ | 926 | ide_read_bcount_and_ireason(drive, &len, &ireason); |
927 | ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3; | ||
928 | lowcyl = hwif->INB(hwif->io_ports.lbam_addr); | ||
929 | highcyl = hwif->INB(hwif->io_ports.lbah_addr); | ||
930 | |||
931 | len = lowcyl + (256 * highcyl); | ||
932 | 927 | ||
933 | thislen = blk_fs_request(rq) ? len : rq->data_len; | 928 | thislen = blk_fs_request(rq) ? len : rq->data_len; |
934 | if (thislen > len) | 929 | if (thislen > len) |
@@ -991,10 +986,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
991 | 986 | ||
992 | if (ireason == 0) { | 987 | if (ireason == 0) { |
993 | write = 1; | 988 | write = 1; |
994 | xferfunc = hwif->output_data; | 989 | xferfunc = hwif->tp_ops->output_data; |
995 | } else { | 990 | } else { |
996 | write = 0; | 991 | write = 0; |
997 | xferfunc = hwif->input_data; | 992 | xferfunc = hwif->tp_ops->input_data; |
998 | } | 993 | } |
999 | 994 | ||
1000 | /* transfer data */ | 995 | /* transfer data */ |
@@ -1198,9 +1193,10 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
1198 | int xferlen; | 1193 | int xferlen; |
1199 | 1194 | ||
1200 | if (blk_fs_request(rq)) { | 1195 | if (blk_fs_request(rq)) { |
1201 | if (info->cd_flags & IDE_CD_FLAG_SEEKING) { | 1196 | if (drive->atapi_flags & IDE_AFLAG_SEEKING) { |
1197 | ide_hwif_t *hwif = drive->hwif; | ||
1202 | unsigned long elapsed = jiffies - info->start_seek; | 1198 | unsigned long elapsed = jiffies - info->start_seek; |
1203 | int stat = ide_read_status(drive); | 1199 | int stat = hwif->tp_ops->read_status(hwif); |
1204 | 1200 | ||
1205 | if ((stat & SEEK_STAT) != SEEK_STAT) { | 1201 | if ((stat & SEEK_STAT) != SEEK_STAT) { |
1206 | if (elapsed < IDECD_SEEK_TIMEOUT) { | 1202 | if (elapsed < IDECD_SEEK_TIMEOUT) { |
@@ -1211,7 +1207,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
1211 | printk(KERN_ERR "%s: DSC timeout\n", | 1207 | printk(KERN_ERR "%s: DSC timeout\n", |
1212 | drive->name); | 1208 | drive->name); |
1213 | } | 1209 | } |
1214 | info->cd_flags &= ~IDE_CD_FLAG_SEEKING; | 1210 | drive->atapi_flags &= ~IDE_AFLAG_SEEKING; |
1215 | } | 1211 | } |
1216 | if (rq_data_dir(rq) == READ && | 1212 | if (rq_data_dir(rq) == READ && |
1217 | IDE_LARGE_SEEK(info->last_block, block, | 1213 | IDE_LARGE_SEEK(info->last_block, block, |
@@ -1288,7 +1284,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) | |||
1288 | */ | 1284 | */ |
1289 | cmd[7] = cdi->sanyo_slot % 3; | 1285 | cmd[7] = cdi->sanyo_slot % 3; |
1290 | 1286 | ||
1291 | return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, REQ_QUIET); | 1287 | return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET); |
1292 | } | 1288 | } |
1293 | 1289 | ||
1294 | static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, | 1290 | static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, |
@@ -1296,8 +1292,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, | |||
1296 | struct request_sense *sense) | 1292 | struct request_sense *sense) |
1297 | { | 1293 | { |
1298 | struct { | 1294 | struct { |
1299 | __u32 lba; | 1295 | __be32 lba; |
1300 | __u32 blocklen; | 1296 | __be32 blocklen; |
1301 | } capbuf; | 1297 | } capbuf; |
1302 | 1298 | ||
1303 | int stat; | 1299 | int stat; |
@@ -1369,7 +1365,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1369 | */ | 1365 | */ |
1370 | (void) cdrom_check_status(drive, sense); | 1366 | (void) cdrom_check_status(drive, sense); |
1371 | 1367 | ||
1372 | if (info->cd_flags & IDE_CD_FLAG_TOC_VALID) | 1368 | if (drive->atapi_flags & IDE_AFLAG_TOC_VALID) |
1373 | return 0; | 1369 | return 0; |
1374 | 1370 | ||
1375 | /* try to get the total cdrom capacity and sector size */ | 1371 | /* try to get the total cdrom capacity and sector size */ |
@@ -1391,7 +1387,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1391 | if (stat) | 1387 | if (stat) |
1392 | return stat; | 1388 | return stat; |
1393 | 1389 | ||
1394 | if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { | 1390 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { |
1395 | toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); | 1391 | toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); |
1396 | toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); | 1392 | toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); |
1397 | } | 1393 | } |
@@ -1432,7 +1428,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1432 | if (stat) | 1428 | if (stat) |
1433 | return stat; | 1429 | return stat; |
1434 | 1430 | ||
1435 | if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { | 1431 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { |
1436 | toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); | 1432 | toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); |
1437 | toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); | 1433 | toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); |
1438 | } else { | 1434 | } else { |
@@ -1446,14 +1442,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1446 | 1442 | ||
1447 | toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); | 1443 | toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); |
1448 | 1444 | ||
1449 | if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { | 1445 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { |
1450 | toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); | 1446 | toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); |
1451 | toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); | 1447 | toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); |
1452 | } | 1448 | } |
1453 | 1449 | ||
1454 | for (i = 0; i <= ntracks; i++) { | 1450 | for (i = 0; i <= ntracks; i++) { |
1455 | if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { | 1451 | if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { |
1456 | if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) | 1452 | if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) |
1457 | toc->ent[i].track = BCD2BIN(toc->ent[i].track); | 1453 | toc->ent[i].track = BCD2BIN(toc->ent[i].track); |
1458 | msf_from_bcd(&toc->ent[i].addr.msf); | 1454 | msf_from_bcd(&toc->ent[i].addr.msf); |
1459 | } | 1455 | } |
@@ -1476,7 +1472,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1476 | toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ | 1472 | toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ |
1477 | } | 1473 | } |
1478 | 1474 | ||
1479 | if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { | 1475 | if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { |
1480 | /* re-read multisession information using MSF format */ | 1476 | /* re-read multisession information using MSF format */ |
1481 | stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, | 1477 | stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, |
1482 | sizeof(ms_tmp), sense); | 1478 | sizeof(ms_tmp), sense); |
@@ -1500,7 +1496,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) | |||
1500 | } | 1496 | } |
1501 | 1497 | ||
1502 | /* Remember that we've read this stuff. */ | 1498 | /* Remember that we've read this stuff. */ |
1503 | info->cd_flags |= IDE_CD_FLAG_TOC_VALID; | 1499 | drive->atapi_flags |= IDE_AFLAG_TOC_VALID; |
1504 | 1500 | ||
1505 | return 0; | 1501 | return 0; |
1506 | } | 1502 | } |
@@ -1512,7 +1508,7 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf) | |||
1512 | struct packet_command cgc; | 1508 | struct packet_command cgc; |
1513 | int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; | 1509 | int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; |
1514 | 1510 | ||
1515 | if ((info->cd_flags & IDE_CD_FLAG_FULL_CAPS_PAGE) == 0) | 1511 | if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0) |
1516 | size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; | 1512 | size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; |
1517 | 1513 | ||
1518 | init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); | 1514 | init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); |
@@ -1530,15 +1526,12 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf) | |||
1530 | struct cdrom_info *cd = drive->driver_data; | 1526 | struct cdrom_info *cd = drive->driver_data; |
1531 | u16 curspeed, maxspeed; | 1527 | u16 curspeed, maxspeed; |
1532 | 1528 | ||
1533 | curspeed = *(u16 *)&buf[8 + 14]; | 1529 | if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) { |
1534 | maxspeed = *(u16 *)&buf[8 + 8]; | 1530 | curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]); |
1535 | 1531 | maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]); | |
1536 | if (cd->cd_flags & IDE_CD_FLAG_LE_SPEED_FIELDS) { | ||
1537 | curspeed = le16_to_cpu(curspeed); | ||
1538 | maxspeed = le16_to_cpu(maxspeed); | ||
1539 | } else { | 1532 | } else { |
1540 | curspeed = be16_to_cpu(curspeed); | 1533 | curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]); |
1541 | maxspeed = be16_to_cpu(maxspeed); | 1534 | maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]); |
1542 | } | 1535 | } |
1543 | 1536 | ||
1544 | cd->current_speed = (curspeed + (176/2)) / 176; | 1537 | cd->current_speed = (curspeed + (176/2)) / 176; |
@@ -1579,7 +1572,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots) | |||
1579 | devinfo->handle = drive; | 1572 | devinfo->handle = drive; |
1580 | strcpy(devinfo->name, drive->name); | 1573 | strcpy(devinfo->name, drive->name); |
1581 | 1574 | ||
1582 | if (info->cd_flags & IDE_CD_FLAG_NO_SPEED_SELECT) | 1575 | if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT) |
1583 | devinfo->mask |= CDC_SELECT_SPEED; | 1576 | devinfo->mask |= CDC_SELECT_SPEED; |
1584 | 1577 | ||
1585 | devinfo->disk = info->disk; | 1578 | devinfo->disk = info->disk; |
@@ -1605,8 +1598,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) | |||
1605 | return nslots; | 1598 | return nslots; |
1606 | } | 1599 | } |
1607 | 1600 | ||
1608 | if (cd->cd_flags & IDE_CD_FLAG_PRE_ATAPI12) { | 1601 | if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) { |
1609 | cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; | 1602 | drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT; |
1610 | cdi->mask &= ~CDC_PLAY_AUDIO; | 1603 | cdi->mask &= ~CDC_PLAY_AUDIO; |
1611 | return nslots; | 1604 | return nslots; |
1612 | } | 1605 | } |
@@ -1624,9 +1617,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) | |||
1624 | return 0; | 1617 | return 0; |
1625 | 1618 | ||
1626 | if ((buf[8 + 6] & 0x01) == 0) | 1619 | if ((buf[8 + 6] & 0x01) == 0) |
1627 | cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; | 1620 | drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK; |
1628 | if (buf[8 + 6] & 0x08) | 1621 | if (buf[8 + 6] & 0x08) |
1629 | cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; | 1622 | drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT; |
1630 | if (buf[8 + 3] & 0x01) | 1623 | if (buf[8 + 3] & 0x01) |
1631 | cdi->mask &= ~CDC_CD_R; | 1624 | cdi->mask &= ~CDC_CD_R; |
1632 | if (buf[8 + 3] & 0x02) | 1625 | if (buf[8 + 3] & 0x02) |
@@ -1637,7 +1630,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) | |||
1637 | cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); | 1630 | cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); |
1638 | if (buf[8 + 3] & 0x10) | 1631 | if (buf[8 + 3] & 0x10) |
1639 | cdi->mask &= ~CDC_DVD_R; | 1632 | cdi->mask &= ~CDC_DVD_R; |
1640 | if ((buf[8 + 4] & 0x01) || (cd->cd_flags & IDE_CD_FLAG_PLAY_AUDIO_OK)) | 1633 | if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK)) |
1641 | cdi->mask &= ~CDC_PLAY_AUDIO; | 1634 | cdi->mask &= ~CDC_PLAY_AUDIO; |
1642 | 1635 | ||
1643 | mechtype = buf[8 + 6] >> 5; | 1636 | mechtype = buf[8 + 6] >> 5; |
@@ -1679,7 +1672,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) | |||
1679 | else | 1672 | else |
1680 | printk(KERN_CONT " drive"); | 1673 | printk(KERN_CONT " drive"); |
1681 | 1674 | ||
1682 | printk(KERN_CONT ", %dkB Cache\n", be16_to_cpu(*(u16 *)&buf[8 + 12])); | 1675 | printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12])); |
1683 | 1676 | ||
1684 | return nslots; | 1677 | return nslots; |
1685 | } | 1678 | } |
@@ -1802,43 +1795,43 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; } | |||
1802 | 1795 | ||
1803 | static const struct cd_list_entry ide_cd_quirks_list[] = { | 1796 | static const struct cd_list_entry ide_cd_quirks_list[] = { |
1804 | /* Limit transfer size per interrupt. */ | 1797 | /* Limit transfer size per interrupt. */ |
1805 | { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, | 1798 | { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_AFLAG_LIMIT_NFRAMES }, |
1806 | { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, | 1799 | { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_AFLAG_LIMIT_NFRAMES }, |
1807 | /* SCR-3231 doesn't support the SET_CD_SPEED command. */ | 1800 | /* SCR-3231 doesn't support the SET_CD_SPEED command. */ |
1808 | { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_CD_FLAG_NO_SPEED_SELECT }, | 1801 | { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT }, |
1809 | /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ | 1802 | /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ |
1810 | { "NEC CD-ROM DRIVE:260", "1.01", IDE_CD_FLAG_TOCADDR_AS_BCD | | 1803 | { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD | |
1811 | IDE_CD_FLAG_PRE_ATAPI12, }, | 1804 | IDE_AFLAG_PRE_ATAPI12, }, |
1812 | /* Vertos 300, some versions of this drive like to talk BCD. */ | 1805 | /* Vertos 300, some versions of this drive like to talk BCD. */ |
1813 | { "V003S0DS", NULL, IDE_CD_FLAG_VERTOS_300_SSD, }, | 1806 | { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, }, |
1814 | /* Vertos 600 ESD. */ | 1807 | /* Vertos 600 ESD. */ |
1815 | { "V006E0DS", NULL, IDE_CD_FLAG_VERTOS_600_ESD, }, | 1808 | { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, }, |
1816 | /* | 1809 | /* |
1817 | * Sanyo 3 CD changer uses a non-standard command for CD changing | 1810 | * Sanyo 3 CD changer uses a non-standard command for CD changing |
1818 | * (by default standard ATAPI support for CD changers is used). | 1811 | * (by default standard ATAPI support for CD changers is used). |
1819 | */ | 1812 | */ |
1820 | { "CD-ROM CDR-C3 G", NULL, IDE_CD_FLAG_SANYO_3CD }, | 1813 | { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD }, |
1821 | { "CD-ROM CDR-C3G", NULL, IDE_CD_FLAG_SANYO_3CD }, | 1814 | { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD }, |
1822 | { "CD-ROM CDR_C36", NULL, IDE_CD_FLAG_SANYO_3CD }, | 1815 | { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD }, |
1823 | /* Stingray 8X CD-ROM. */ | 1816 | /* Stingray 8X CD-ROM. */ |
1824 | { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_CD_FLAG_PRE_ATAPI12}, | 1817 | { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 }, |
1825 | /* | 1818 | /* |
1826 | * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length | 1819 | * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length |
1827 | * mode sense page capabilities size, but older drives break. | 1820 | * mode sense page capabilities size, but older drives break. |
1828 | */ | 1821 | */ |
1829 | { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, | 1822 | { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE }, |
1830 | { "WPI CDS-32X", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, | 1823 | { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE }, |
1831 | /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ | 1824 | /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ |
1832 | { "", "241N", IDE_CD_FLAG_LE_SPEED_FIELDS }, | 1825 | { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS }, |
1833 | /* | 1826 | /* |
1834 | * Some drives used by Apple don't advertise audio play | 1827 | * Some drives used by Apple don't advertise audio play |
1835 | * but they do support reading TOC & audio datas. | 1828 | * but they do support reading TOC & audio datas. |
1836 | */ | 1829 | */ |
1837 | { "MATSHITADVD-ROM SR-8187", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1830 | { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1838 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1831 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1839 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1832 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1840 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1833 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1841 | { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1834 | { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, |
1842 | { NULL, NULL, 0 } | 1835 | { NULL, NULL, 0 } |
1843 | }; | 1836 | }; |
1844 | 1837 | ||
@@ -1873,20 +1866,20 @@ static int ide_cdrom_setup(ide_drive_t *drive) | |||
1873 | 1866 | ||
1874 | drive->special.all = 0; | 1867 | drive->special.all = 0; |
1875 | 1868 | ||
1876 | cd->cd_flags = IDE_CD_FLAG_MEDIA_CHANGED | IDE_CD_FLAG_NO_EJECT | | 1869 | drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT | |
1877 | ide_cd_flags(id); | 1870 | ide_cd_flags(id); |
1878 | 1871 | ||
1879 | if ((id->config & 0x0060) == 0x20) | 1872 | if ((id->config & 0x0060) == 0x20) |
1880 | cd->cd_flags |= IDE_CD_FLAG_DRQ_INTERRUPT; | 1873 | drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT; |
1881 | 1874 | ||
1882 | if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_300_SSD) && | 1875 | if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) && |
1883 | id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') | 1876 | id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') |
1884 | cd->cd_flags |= (IDE_CD_FLAG_TOCTRACKS_AS_BCD | | 1877 | drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD | |
1885 | IDE_CD_FLAG_TOCADDR_AS_BCD); | 1878 | IDE_AFLAG_TOCADDR_AS_BCD); |
1886 | else if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_600_ESD) && | 1879 | else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) && |
1887 | id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') | 1880 | id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') |
1888 | cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD; | 1881 | drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD; |
1889 | else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD) | 1882 | else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD) |
1890 | /* 3 => use CD in slot 0 */ | 1883 | /* 3 => use CD in slot 0 */ |
1891 | cdi->sanyo_slot = 3; | 1884 | cdi->sanyo_slot = 3; |
1892 | 1885 | ||
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index fe0ea36e4124..61a4599b77db 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h | |||
@@ -27,42 +27,6 @@ | |||
27 | #define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20) | 27 | #define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20) |
28 | #define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4 | 28 | #define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4 |
29 | 29 | ||
30 | enum { | ||
31 | /* Device sends an interrupt when ready for a packet command. */ | ||
32 | IDE_CD_FLAG_DRQ_INTERRUPT = (1 << 0), | ||
33 | /* Drive cannot lock the door. */ | ||
34 | IDE_CD_FLAG_NO_DOORLOCK = (1 << 1), | ||
35 | /* Drive cannot eject the disc. */ | ||
36 | IDE_CD_FLAG_NO_EJECT = (1 << 2), | ||
37 | /* Drive is a pre ATAPI 1.2 drive. */ | ||
38 | IDE_CD_FLAG_PRE_ATAPI12 = (1 << 3), | ||
39 | /* TOC addresses are in BCD. */ | ||
40 | IDE_CD_FLAG_TOCADDR_AS_BCD = (1 << 4), | ||
41 | /* TOC track numbers are in BCD. */ | ||
42 | IDE_CD_FLAG_TOCTRACKS_AS_BCD = (1 << 5), | ||
43 | /* | ||
44 | * Drive does not provide data in multiples of SECTOR_SIZE | ||
45 | * when more than one interrupt is needed. | ||
46 | */ | ||
47 | IDE_CD_FLAG_LIMIT_NFRAMES = (1 << 6), | ||
48 | /* Seeking in progress. */ | ||
49 | IDE_CD_FLAG_SEEKING = (1 << 7), | ||
50 | /* Driver has noticed a media change. */ | ||
51 | IDE_CD_FLAG_MEDIA_CHANGED = (1 << 8), | ||
52 | /* Saved TOC information is current. */ | ||
53 | IDE_CD_FLAG_TOC_VALID = (1 << 9), | ||
54 | /* We think that the drive door is locked. */ | ||
55 | IDE_CD_FLAG_DOOR_LOCKED = (1 << 10), | ||
56 | /* SET_CD_SPEED command is unsupported. */ | ||
57 | IDE_CD_FLAG_NO_SPEED_SELECT = (1 << 11), | ||
58 | IDE_CD_FLAG_VERTOS_300_SSD = (1 << 12), | ||
59 | IDE_CD_FLAG_VERTOS_600_ESD = (1 << 13), | ||
60 | IDE_CD_FLAG_SANYO_3CD = (1 << 14), | ||
61 | IDE_CD_FLAG_FULL_CAPS_PAGE = (1 << 15), | ||
62 | IDE_CD_FLAG_PLAY_AUDIO_OK = (1 << 16), | ||
63 | IDE_CD_FLAG_LE_SPEED_FIELDS = (1 << 17), | ||
64 | }; | ||
65 | |||
66 | /* Structure of a MSF cdrom address. */ | 30 | /* Structure of a MSF cdrom address. */ |
67 | struct atapi_msf { | 31 | struct atapi_msf { |
68 | byte reserved; | 32 | byte reserved; |
@@ -128,8 +92,6 @@ struct cdrom_info { | |||
128 | unsigned long last_block; | 92 | unsigned long last_block; |
129 | unsigned long start_seek; | 93 | unsigned long start_seek; |
130 | 94 | ||
131 | unsigned int cd_flags; | ||
132 | |||
133 | u8 max_speed; /* Max speed of the drive. */ | 95 | u8 max_speed; /* Max speed of the drive. */ |
134 | u8 current_speed; /* Current speed of the drive. */ | 96 | u8 current_speed; /* Current speed of the drive. */ |
135 | 97 | ||
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 24d002addf73..74231b41f611 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
@@ -27,10 +27,9 @@ int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose) | |||
27 | void ide_cdrom_release_real(struct cdrom_device_info *cdi) | 27 | void ide_cdrom_release_real(struct cdrom_device_info *cdi) |
28 | { | 28 | { |
29 | ide_drive_t *drive = cdi->handle; | 29 | ide_drive_t *drive = cdi->handle; |
30 | struct cdrom_info *cd = drive->driver_data; | ||
31 | 30 | ||
32 | if (!cdi->use_count) | 31 | if (!cdi->use_count) |
33 | cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; | 32 | drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID; |
34 | } | 33 | } |
35 | 34 | ||
36 | /* | 35 | /* |
@@ -83,13 +82,12 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi, | |||
83 | int slot_nr) | 82 | int slot_nr) |
84 | { | 83 | { |
85 | ide_drive_t *drive = cdi->handle; | 84 | ide_drive_t *drive = cdi->handle; |
86 | struct cdrom_info *cd = drive->driver_data; | ||
87 | int retval; | 85 | int retval; |
88 | 86 | ||
89 | if (slot_nr == CDSL_CURRENT) { | 87 | if (slot_nr == CDSL_CURRENT) { |
90 | (void) cdrom_check_status(drive, NULL); | 88 | (void) cdrom_check_status(drive, NULL); |
91 | retval = (cd->cd_flags & IDE_CD_FLAG_MEDIA_CHANGED) ? 1 : 0; | 89 | retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0; |
92 | cd->cd_flags &= ~IDE_CD_FLAG_MEDIA_CHANGED; | 90 | drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED; |
93 | return retval; | 91 | return retval; |
94 | } else { | 92 | } else { |
95 | return -EINVAL; | 93 | return -EINVAL; |
@@ -107,11 +105,11 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag, | |||
107 | char loej = 0x02; | 105 | char loej = 0x02; |
108 | unsigned char cmd[BLK_MAX_CDB]; | 106 | unsigned char cmd[BLK_MAX_CDB]; |
109 | 107 | ||
110 | if ((cd->cd_flags & IDE_CD_FLAG_NO_EJECT) && !ejectflag) | 108 | if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag) |
111 | return -EDRIVE_CANT_DO_THIS; | 109 | return -EDRIVE_CANT_DO_THIS; |
112 | 110 | ||
113 | /* reload fails on some drives, if the tray is locked */ | 111 | /* reload fails on some drives, if the tray is locked */ |
114 | if ((cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) && ejectflag) | 112 | if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag) |
115 | return 0; | 113 | return 0; |
116 | 114 | ||
117 | /* only tell drive to close tray if open, if it can do that */ | 115 | /* only tell drive to close tray if open, if it can do that */ |
@@ -123,7 +121,7 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag, | |||
123 | cmd[0] = GPCMD_START_STOP_UNIT; | 121 | cmd[0] = GPCMD_START_STOP_UNIT; |
124 | cmd[4] = loej | (ejectflag != 0); | 122 | cmd[4] = loej | (ejectflag != 0); |
125 | 123 | ||
126 | return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, 0); | 124 | return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0); |
127 | } | 125 | } |
128 | 126 | ||
129 | /* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ | 127 | /* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ |
@@ -131,7 +129,6 @@ static | |||
131 | int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, | 129 | int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, |
132 | struct request_sense *sense) | 130 | struct request_sense *sense) |
133 | { | 131 | { |
134 | struct cdrom_info *cd = drive->driver_data; | ||
135 | struct request_sense my_sense; | 132 | struct request_sense my_sense; |
136 | int stat; | 133 | int stat; |
137 | 134 | ||
@@ -139,7 +136,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, | |||
139 | sense = &my_sense; | 136 | sense = &my_sense; |
140 | 137 | ||
141 | /* If the drive cannot lock the door, just pretend. */ | 138 | /* If the drive cannot lock the door, just pretend. */ |
142 | if (cd->cd_flags & IDE_CD_FLAG_NO_DOORLOCK) { | 139 | if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) { |
143 | stat = 0; | 140 | stat = 0; |
144 | } else { | 141 | } else { |
145 | unsigned char cmd[BLK_MAX_CDB]; | 142 | unsigned char cmd[BLK_MAX_CDB]; |
@@ -149,7 +146,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, | |||
149 | cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; | 146 | cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; |
150 | cmd[4] = lockflag ? 1 : 0; | 147 | cmd[4] = lockflag ? 1 : 0; |
151 | 148 | ||
152 | stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, | 149 | stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, |
153 | sense, 0, 0); | 150 | sense, 0, 0); |
154 | } | 151 | } |
155 | 152 | ||
@@ -160,7 +157,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, | |||
160 | (sense->asc == 0x24 || sense->asc == 0x20)) { | 157 | (sense->asc == 0x24 || sense->asc == 0x20)) { |
161 | printk(KERN_ERR "%s: door locking not supported\n", | 158 | printk(KERN_ERR "%s: door locking not supported\n", |
162 | drive->name); | 159 | drive->name); |
163 | cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; | 160 | drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK; |
164 | stat = 0; | 161 | stat = 0; |
165 | } | 162 | } |
166 | 163 | ||
@@ -170,9 +167,9 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, | |||
170 | 167 | ||
171 | if (stat == 0) { | 168 | if (stat == 0) { |
172 | if (lockflag) | 169 | if (lockflag) |
173 | cd->cd_flags |= IDE_CD_FLAG_DOOR_LOCKED; | 170 | drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED; |
174 | else | 171 | else |
175 | cd->cd_flags &= ~IDE_CD_FLAG_DOOR_LOCKED; | 172 | drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED; |
176 | } | 173 | } |
177 | 174 | ||
178 | return stat; | 175 | return stat; |
@@ -231,7 +228,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed) | |||
231 | cmd[5] = speed & 0xff; | 228 | cmd[5] = speed & 0xff; |
232 | } | 229 | } |
233 | 230 | ||
234 | stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); | 231 | stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0); |
235 | 232 | ||
236 | if (!ide_cdrom_get_capabilities(drive, buf)) { | 233 | if (!ide_cdrom_get_capabilities(drive, buf)) { |
237 | ide_cdrom_update_speed(drive, buf); | 234 | ide_cdrom_update_speed(drive, buf); |
@@ -250,7 +247,7 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi, | |||
250 | struct request_sense sense; | 247 | struct request_sense sense; |
251 | int ret; | 248 | int ret; |
252 | 249 | ||
253 | if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0 || !info->toc) { | 250 | if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) { |
254 | ret = ide_cd_read_toc(drive, &sense); | 251 | ret = ide_cd_read_toc(drive, &sense); |
255 | if (ret) | 252 | if (ret) |
256 | return ret; | 253 | return ret; |
@@ -308,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi) | |||
308 | * A reset will unlock the door. If it was previously locked, | 305 | * A reset will unlock the door. If it was previously locked, |
309 | * lock it again. | 306 | * lock it again. |
310 | */ | 307 | */ |
311 | if (cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) | 308 | if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) |
312 | (void)ide_cd_lockdoor(drive, 1, &sense); | 309 | (void)ide_cd_lockdoor(drive, 1, &sense); |
313 | 310 | ||
314 | return ret; | 311 | return ret; |
@@ -324,7 +321,7 @@ static int ide_cd_get_toc_entry(ide_drive_t *drive, int track, | |||
324 | /* | 321 | /* |
325 | * don't serve cached data, if the toc isn't valid | 322 | * don't serve cached data, if the toc isn't valid |
326 | */ | 323 | */ |
327 | if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0) | 324 | if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0) |
328 | return -EINVAL; | 325 | return -EINVAL; |
329 | 326 | ||
330 | /* Check validity of requested track number. */ | 327 | /* Check validity of requested track number. */ |
@@ -374,7 +371,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg) | |||
374 | lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]); | 371 | lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]); |
375 | lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]); | 372 | lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]); |
376 | 373 | ||
377 | return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); | 374 | return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0); |
378 | } | 375 | } |
379 | 376 | ||
380 | static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) | 377 | static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 3a2e80237c10..df5fe5756871 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -158,7 +158,7 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma) | |||
158 | write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; | 158 | write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; |
159 | 159 | ||
160 | if (dma) | 160 | if (dma) |
161 | index = drive->vdma ? 4 : 8; | 161 | index = 8; |
162 | else | 162 | else |
163 | index = drive->mult_count ? 0 : 4; | 163 | index = drive->mult_count ? 0 : 4; |
164 | 164 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 7ee44f86bc54..be99d463dcc7 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -100,10 +100,11 @@ static const struct drive_list_entry drive_blacklist [] = { | |||
100 | 100 | ||
101 | ide_startstop_t ide_dma_intr (ide_drive_t *drive) | 101 | ide_startstop_t ide_dma_intr (ide_drive_t *drive) |
102 | { | 102 | { |
103 | ide_hwif_t *hwif = drive->hwif; | ||
103 | u8 stat = 0, dma_stat = 0; | 104 | u8 stat = 0, dma_stat = 0; |
104 | 105 | ||
105 | dma_stat = drive->hwif->dma_ops->dma_end(drive); | 106 | dma_stat = hwif->dma_ops->dma_end(drive); |
106 | stat = ide_read_status(drive); | 107 | stat = hwif->tp_ops->read_status(hwif); |
107 | 108 | ||
108 | if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { | 109 | if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { |
109 | if (!dma_stat) { | 110 | if (!dma_stat) { |
@@ -334,7 +335,7 @@ static int config_drive_for_dma (ide_drive_t *drive) | |||
334 | static int dma_timer_expiry (ide_drive_t *drive) | 335 | static int dma_timer_expiry (ide_drive_t *drive) |
335 | { | 336 | { |
336 | ide_hwif_t *hwif = HWIF(drive); | 337 | ide_hwif_t *hwif = HWIF(drive); |
337 | u8 dma_stat = hwif->INB(hwif->dma_status); | 338 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
338 | 339 | ||
339 | printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", | 340 | printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", |
340 | drive->name, dma_stat); | 341 | drive->name, dma_stat); |
@@ -369,14 +370,18 @@ void ide_dma_host_set(ide_drive_t *drive, int on) | |||
369 | { | 370 | { |
370 | ide_hwif_t *hwif = HWIF(drive); | 371 | ide_hwif_t *hwif = HWIF(drive); |
371 | u8 unit = (drive->select.b.unit & 0x01); | 372 | u8 unit = (drive->select.b.unit & 0x01); |
372 | u8 dma_stat = hwif->INB(hwif->dma_status); | 373 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
373 | 374 | ||
374 | if (on) | 375 | if (on) |
375 | dma_stat |= (1 << (5 + unit)); | 376 | dma_stat |= (1 << (5 + unit)); |
376 | else | 377 | else |
377 | dma_stat &= ~(1 << (5 + unit)); | 378 | dma_stat &= ~(1 << (5 + unit)); |
378 | 379 | ||
379 | hwif->OUTB(dma_stat, hwif->dma_status); | 380 | if (hwif->host_flags & IDE_HFLAG_MMIO) |
381 | writeb(dma_stat, | ||
382 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
383 | else | ||
384 | outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); | ||
380 | } | 385 | } |
381 | 386 | ||
382 | EXPORT_SYMBOL_GPL(ide_dma_host_set); | 387 | EXPORT_SYMBOL_GPL(ide_dma_host_set); |
@@ -449,6 +454,7 @@ int ide_dma_setup(ide_drive_t *drive) | |||
449 | ide_hwif_t *hwif = drive->hwif; | 454 | ide_hwif_t *hwif = drive->hwif; |
450 | struct request *rq = HWGROUP(drive)->rq; | 455 | struct request *rq = HWGROUP(drive)->rq; |
451 | unsigned int reading; | 456 | unsigned int reading; |
457 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
452 | u8 dma_stat; | 458 | u8 dma_stat; |
453 | 459 | ||
454 | if (rq_data_dir(rq)) | 460 | if (rq_data_dir(rq)) |
@@ -470,13 +476,21 @@ int ide_dma_setup(ide_drive_t *drive) | |||
470 | outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); | 476 | outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); |
471 | 477 | ||
472 | /* specify r/w */ | 478 | /* specify r/w */ |
473 | hwif->OUTB(reading, hwif->dma_command); | 479 | if (mmio) |
480 | writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
481 | else | ||
482 | outb(reading, hwif->dma_base + ATA_DMA_CMD); | ||
474 | 483 | ||
475 | /* read dma_status for INTR & ERROR flags */ | 484 | /* read DMA status for INTR & ERROR flags */ |
476 | dma_stat = hwif->INB(hwif->dma_status); | 485 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
477 | 486 | ||
478 | /* clear INTR & ERROR flags */ | 487 | /* clear INTR & ERROR flags */ |
479 | hwif->OUTB(dma_stat|6, hwif->dma_status); | 488 | if (mmio) |
489 | writeb(dma_stat | 6, | ||
490 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
491 | else | ||
492 | outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); | ||
493 | |||
480 | drive->waiting_for_dma = 1; | 494 | drive->waiting_for_dma = 1; |
481 | return 0; | 495 | return 0; |
482 | } | 496 | } |
@@ -492,16 +506,24 @@ EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); | |||
492 | 506 | ||
493 | void ide_dma_start(ide_drive_t *drive) | 507 | void ide_dma_start(ide_drive_t *drive) |
494 | { | 508 | { |
495 | ide_hwif_t *hwif = HWIF(drive); | 509 | ide_hwif_t *hwif = drive->hwif; |
496 | u8 dma_cmd = hwif->INB(hwif->dma_command); | 510 | u8 dma_cmd; |
497 | 511 | ||
498 | /* Note that this is done *after* the cmd has | 512 | /* Note that this is done *after* the cmd has |
499 | * been issued to the drive, as per the BM-IDE spec. | 513 | * been issued to the drive, as per the BM-IDE spec. |
500 | * The Promise Ultra33 doesn't work correctly when | 514 | * The Promise Ultra33 doesn't work correctly when |
501 | * we do this part before issuing the drive cmd. | 515 | * we do this part before issuing the drive cmd. |
502 | */ | 516 | */ |
503 | /* start DMA */ | 517 | if (hwif->host_flags & IDE_HFLAG_MMIO) { |
504 | hwif->OUTB(dma_cmd|1, hwif->dma_command); | 518 | dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
519 | /* start DMA */ | ||
520 | writeb(dma_cmd | 1, | ||
521 | (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
522 | } else { | ||
523 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); | ||
524 | outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); | ||
525 | } | ||
526 | |||
505 | hwif->dma = 1; | 527 | hwif->dma = 1; |
506 | wmb(); | 528 | wmb(); |
507 | } | 529 | } |
@@ -511,18 +533,33 @@ EXPORT_SYMBOL_GPL(ide_dma_start); | |||
511 | /* returns 1 on error, 0 otherwise */ | 533 | /* returns 1 on error, 0 otherwise */ |
512 | int __ide_dma_end (ide_drive_t *drive) | 534 | int __ide_dma_end (ide_drive_t *drive) |
513 | { | 535 | { |
514 | ide_hwif_t *hwif = HWIF(drive); | 536 | ide_hwif_t *hwif = drive->hwif; |
537 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; | ||
515 | u8 dma_stat = 0, dma_cmd = 0; | 538 | u8 dma_stat = 0, dma_cmd = 0; |
516 | 539 | ||
517 | drive->waiting_for_dma = 0; | 540 | drive->waiting_for_dma = 0; |
518 | /* get dma_command mode */ | 541 | |
519 | dma_cmd = hwif->INB(hwif->dma_command); | 542 | if (mmio) { |
520 | /* stop DMA */ | 543 | /* get DMA command mode */ |
521 | hwif->OUTB(dma_cmd&~1, hwif->dma_command); | 544 | dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
545 | /* stop DMA */ | ||
546 | writeb(dma_cmd & ~1, | ||
547 | (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); | ||
548 | } else { | ||
549 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); | ||
550 | outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); | ||
551 | } | ||
552 | |||
522 | /* get DMA status */ | 553 | /* get DMA status */ |
523 | dma_stat = hwif->INB(hwif->dma_status); | 554 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
524 | /* clear the INTR & ERROR bits */ | 555 | |
525 | hwif->OUTB(dma_stat|6, hwif->dma_status); | 556 | if (mmio) |
557 | /* clear the INTR & ERROR bits */ | ||
558 | writeb(dma_stat | 6, | ||
559 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
560 | else | ||
561 | outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); | ||
562 | |||
526 | /* purge DMA mappings */ | 563 | /* purge DMA mappings */ |
527 | ide_destroy_dmatable(drive); | 564 | ide_destroy_dmatable(drive); |
528 | /* verify good DMA status */ | 565 | /* verify good DMA status */ |
@@ -537,7 +574,7 @@ EXPORT_SYMBOL(__ide_dma_end); | |||
537 | int ide_dma_test_irq(ide_drive_t *drive) | 574 | int ide_dma_test_irq(ide_drive_t *drive) |
538 | { | 575 | { |
539 | ide_hwif_t *hwif = HWIF(drive); | 576 | ide_hwif_t *hwif = HWIF(drive); |
540 | u8 dma_stat = hwif->INB(hwif->dma_status); | 577 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
541 | 578 | ||
542 | /* return 1 if INTR asserted */ | 579 | /* return 1 if INTR asserted */ |
543 | if ((dma_stat & 4) == 4) | 580 | if ((dma_stat & 4) == 4) |
@@ -719,9 +756,8 @@ static int ide_tune_dma(ide_drive_t *drive) | |||
719 | static int ide_dma_check(ide_drive_t *drive) | 756 | static int ide_dma_check(ide_drive_t *drive) |
720 | { | 757 | { |
721 | ide_hwif_t *hwif = drive->hwif; | 758 | ide_hwif_t *hwif = drive->hwif; |
722 | int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0; | ||
723 | 759 | ||
724 | if (!vdma && ide_tune_dma(drive)) | 760 | if (ide_tune_dma(drive)) |
725 | return 0; | 761 | return 0; |
726 | 762 | ||
727 | /* TODO: always do PIO fallback */ | 763 | /* TODO: always do PIO fallback */ |
@@ -730,7 +766,7 @@ static int ide_dma_check(ide_drive_t *drive) | |||
730 | 766 | ||
731 | ide_set_max_pio(drive); | 767 | ide_set_max_pio(drive); |
732 | 768 | ||
733 | return vdma ? 0 : -1; | 769 | return -1; |
734 | } | 770 | } |
735 | 771 | ||
736 | int ide_id_dma_bug(ide_drive_t *drive) | 772 | int ide_id_dma_bug(ide_drive_t *drive) |
@@ -842,7 +878,7 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif) | |||
842 | } | 878 | } |
843 | EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); | 879 | EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); |
844 | 880 | ||
845 | static const struct ide_dma_ops sff_dma_ops = { | 881 | const struct ide_dma_ops sff_dma_ops = { |
846 | .dma_host_set = ide_dma_host_set, | 882 | .dma_host_set = ide_dma_host_set, |
847 | .dma_setup = ide_dma_setup, | 883 | .dma_setup = ide_dma_setup, |
848 | .dma_exec_cmd = ide_dma_exec_cmd, | 884 | .dma_exec_cmd = ide_dma_exec_cmd, |
@@ -852,18 +888,5 @@ static const struct ide_dma_ops sff_dma_ops = { | |||
852 | .dma_timeout = ide_dma_timeout, | 888 | .dma_timeout = ide_dma_timeout, |
853 | .dma_lost_irq = ide_dma_lost_irq, | 889 | .dma_lost_irq = ide_dma_lost_irq, |
854 | }; | 890 | }; |
855 | 891 | EXPORT_SYMBOL_GPL(sff_dma_ops); | |
856 | void ide_setup_dma(ide_hwif_t *hwif, unsigned long base) | ||
857 | { | ||
858 | hwif->dma_base = base; | ||
859 | |||
860 | if (!hwif->dma_command) | ||
861 | hwif->dma_command = hwif->dma_base + 0; | ||
862 | if (!hwif->dma_status) | ||
863 | hwif->dma_status = hwif->dma_base + 2; | ||
864 | |||
865 | hwif->dma_ops = &sff_dma_ops; | ||
866 | } | ||
867 | |||
868 | EXPORT_SYMBOL_GPL(ide_setup_dma); | ||
869 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ | 892 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 011d72011cc4..3d8e6dd0f41e 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -125,26 +125,10 @@ typedef struct ide_floppy_obj { | |||
125 | int wp; | 125 | int wp; |
126 | /* Supports format progress report */ | 126 | /* Supports format progress report */ |
127 | int srfp; | 127 | int srfp; |
128 | /* Status/Action flags */ | ||
129 | unsigned long flags; | ||
130 | } idefloppy_floppy_t; | 128 | } idefloppy_floppy_t; |
131 | 129 | ||
132 | #define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */ | 130 | #define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */ |
133 | 131 | ||
134 | /* Floppy flag bits values. */ | ||
135 | enum { | ||
136 | /* DRQ interrupt device */ | ||
137 | IDEFLOPPY_FLAG_DRQ_INTERRUPT = (1 << 0), | ||
138 | /* Media may have changed */ | ||
139 | IDEFLOPPY_FLAG_MEDIA_CHANGED = (1 << 1), | ||
140 | /* Format in progress */ | ||
141 | IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS = (1 << 2), | ||
142 | /* Avoid commands not supported in Clik drive */ | ||
143 | IDEFLOPPY_FLAG_CLIK_DRIVE = (1 << 3), | ||
144 | /* Requires BH algorithm for packets */ | ||
145 | IDEFLOPPY_FLAG_ZIP_DRIVE = (1 << 4), | ||
146 | }; | ||
147 | |||
148 | /* Defines for the MODE SENSE command */ | 132 | /* Defines for the MODE SENSE command */ |
149 | #define MODE_SENSE_CURRENT 0x00 | 133 | #define MODE_SENSE_CURRENT 0x00 |
150 | #define MODE_SENSE_CHANGEABLE 0x01 | 134 | #define MODE_SENSE_CHANGEABLE 0x01 |
@@ -247,9 +231,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
247 | 231 | ||
248 | data = bvec_kmap_irq(bvec, &flags); | 232 | data = bvec_kmap_irq(bvec, &flags); |
249 | if (direction) | 233 | if (direction) |
250 | hwif->output_data(drive, NULL, data, count); | 234 | hwif->tp_ops->output_data(drive, NULL, data, count); |
251 | else | 235 | else |
252 | hwif->input_data(drive, NULL, data, count); | 236 | hwif->tp_ops->input_data(drive, NULL, data, count); |
253 | bvec_kunmap_irq(data, &flags); | 237 | bvec_kunmap_irq(data, &flags); |
254 | 238 | ||
255 | bcount -= count; | 239 | bcount -= count; |
@@ -291,6 +275,7 @@ static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
291 | rq->cmd_type = REQ_TYPE_SPECIAL; | 275 | rq->cmd_type = REQ_TYPE_SPECIAL; |
292 | rq->cmd_flags |= REQ_PREEMPT; | 276 | rq->cmd_flags |= REQ_PREEMPT; |
293 | rq->rq_disk = floppy->disk; | 277 | rq->rq_disk = floppy->disk; |
278 | memcpy(rq->cmd, pc->c, 12); | ||
294 | ide_do_drive_cmd(drive, rq); | 279 | ide_do_drive_cmd(drive, rq); |
295 | } | 280 | } |
296 | 281 | ||
@@ -354,7 +339,6 @@ static void idefloppy_init_pc(struct ide_atapi_pc *pc) | |||
354 | memset(pc, 0, sizeof(*pc)); | 339 | memset(pc, 0, sizeof(*pc)); |
355 | pc->buf = pc->pc_buf; | 340 | pc->buf = pc->pc_buf; |
356 | pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; | 341 | pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; |
357 | pc->callback = ide_floppy_callback; | ||
358 | } | 342 | } |
359 | 343 | ||
360 | static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc) | 344 | static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc) |
@@ -402,7 +386,7 @@ static int idefloppy_transfer_pc(ide_drive_t *drive) | |||
402 | idefloppy_floppy_t *floppy = drive->driver_data; | 386 | idefloppy_floppy_t *floppy = drive->driver_data; |
403 | 387 | ||
404 | /* Send the actual packet */ | 388 | /* Send the actual packet */ |
405 | drive->hwif->output_data(drive, NULL, floppy->pc->c, 12); | 389 | drive->hwif->tp_ops->output_data(drive, NULL, floppy->pc->c, 12); |
406 | 390 | ||
407 | /* Timeout for the packet command */ | 391 | /* Timeout for the packet command */ |
408 | return IDEFLOPPY_WAIT_CMD; | 392 | return IDEFLOPPY_WAIT_CMD; |
@@ -429,7 +413,7 @@ static ide_startstop_t idefloppy_start_pc_transfer(ide_drive_t *drive) | |||
429 | * 40 and 50msec work well. idefloppy_pc_intr will not be actually | 413 | * 40 and 50msec work well. idefloppy_pc_intr will not be actually |
430 | * used until after the packet is moved in about 50 msec. | 414 | * used until after the packet is moved in about 50 msec. |
431 | */ | 415 | */ |
432 | if (pc->flags & PC_FLAG_ZIP_DRIVE) { | 416 | if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { |
433 | timeout = floppy->ticks; | 417 | timeout = floppy->ticks; |
434 | expiry = &idefloppy_transfer_pc; | 418 | expiry = &idefloppy_transfer_pc; |
435 | } else { | 419 | } else { |
@@ -474,7 +458,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive, | |||
474 | pc->error = IDEFLOPPY_ERROR_GENERAL; | 458 | pc->error = IDEFLOPPY_ERROR_GENERAL; |
475 | 459 | ||
476 | floppy->failed_pc = NULL; | 460 | floppy->failed_pc = NULL; |
477 | pc->callback(drive); | 461 | drive->pc_callback(drive); |
478 | return ide_stopped; | 462 | return ide_stopped; |
479 | } | 463 | } |
480 | 464 | ||
@@ -574,6 +558,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy, | |||
574 | put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); | 558 | put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); |
575 | put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); | 559 | put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); |
576 | 560 | ||
561 | memcpy(rq->cmd, pc->c, 12); | ||
562 | |||
577 | pc->rq = rq; | 563 | pc->rq = rq; |
578 | pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; | 564 | pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; |
579 | if (rq->cmd_flags & REQ_RW) | 565 | if (rq->cmd_flags & REQ_RW) |
@@ -647,12 +633,6 @@ static ide_startstop_t idefloppy_do_request(ide_drive_t *drive, | |||
647 | return ide_stopped; | 633 | return ide_stopped; |
648 | } | 634 | } |
649 | 635 | ||
650 | if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT) | ||
651 | pc->flags |= PC_FLAG_DRQ_INTERRUPT; | ||
652 | |||
653 | if (floppy->flags & IDEFLOPPY_FLAG_ZIP_DRIVE) | ||
654 | pc->flags |= PC_FLAG_ZIP_DRIVE; | ||
655 | |||
656 | pc->rq = rq; | 636 | pc->rq = rq; |
657 | 637 | ||
658 | return idefloppy_issue_pc(drive, pc); | 638 | return idefloppy_issue_pc(drive, pc); |
@@ -671,6 +651,7 @@ static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) | |||
671 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 651 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
672 | rq->buffer = (char *) pc; | 652 | rq->buffer = (char *) pc; |
673 | rq->cmd_type = REQ_TYPE_SPECIAL; | 653 | rq->cmd_type = REQ_TYPE_SPECIAL; |
654 | memcpy(rq->cmd, pc->c, 12); | ||
674 | error = blk_execute_rq(drive->queue, floppy->disk, rq, 0); | 655 | error = blk_execute_rq(drive->queue, floppy->disk, rq, 0); |
675 | blk_put_request(rq); | 656 | blk_put_request(rq); |
676 | 657 | ||
@@ -795,7 +776,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) | |||
795 | switch (pc.buf[desc_start + 4] & 0x03) { | 776 | switch (pc.buf[desc_start + 4] & 0x03) { |
796 | /* Clik! drive returns this instead of CAPACITY_CURRENT */ | 777 | /* Clik! drive returns this instead of CAPACITY_CURRENT */ |
797 | case CAPACITY_UNFORMATTED: | 778 | case CAPACITY_UNFORMATTED: |
798 | if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) | 779 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) |
799 | /* | 780 | /* |
800 | * If it is not a clik drive, break out | 781 | * If it is not a clik drive, break out |
801 | * (maintains previous driver behaviour) | 782 | * (maintains previous driver behaviour) |
@@ -841,7 +822,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) | |||
841 | } | 822 | } |
842 | 823 | ||
843 | /* Clik! disk does not support get_flexible_disk_page */ | 824 | /* Clik! disk does not support get_flexible_disk_page */ |
844 | if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) | 825 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) |
845 | (void) ide_floppy_get_flexible_disk_page(drive); | 826 | (void) ide_floppy_get_flexible_disk_page(drive); |
846 | 827 | ||
847 | set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor); | 828 | set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor); |
@@ -949,11 +930,12 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg) | |||
949 | 930 | ||
950 | /* Else assume format_unit has finished, and we're at 0x10000 */ | 931 | /* Else assume format_unit has finished, and we're at 0x10000 */ |
951 | } else { | 932 | } else { |
933 | ide_hwif_t *hwif = drive->hwif; | ||
952 | unsigned long flags; | 934 | unsigned long flags; |
953 | u8 stat; | 935 | u8 stat; |
954 | 936 | ||
955 | local_irq_save(flags); | 937 | local_irq_save(flags); |
956 | stat = ide_read_status(drive); | 938 | stat = hwif->tp_ops->read_status(hwif); |
957 | local_irq_restore(flags); | 939 | local_irq_restore(flags); |
958 | 940 | ||
959 | progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000; | 941 | progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000; |
@@ -1039,9 +1021,10 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy) | |||
1039 | 1021 | ||
1040 | *((u16 *) &gcw) = drive->id->config; | 1022 | *((u16 *) &gcw) = drive->id->config; |
1041 | floppy->pc = floppy->pc_stack; | 1023 | floppy->pc = floppy->pc_stack; |
1024 | drive->pc_callback = ide_floppy_callback; | ||
1042 | 1025 | ||
1043 | if (((gcw[0] & 0x60) >> 5) == 1) | 1026 | if (((gcw[0] & 0x60) >> 5) == 1) |
1044 | floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT; | 1027 | drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT; |
1045 | /* | 1028 | /* |
1046 | * We used to check revisions here. At this point however I'm giving up. | 1029 | * We used to check revisions here. At this point however I'm giving up. |
1047 | * Just assume they are all broken, its easier. | 1030 | * Just assume they are all broken, its easier. |
@@ -1052,7 +1035,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy) | |||
1052 | * we'll leave the limitation below for the 2.2.x tree. | 1035 | * we'll leave the limitation below for the 2.2.x tree. |
1053 | */ | 1036 | */ |
1054 | if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) { | 1037 | if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) { |
1055 | floppy->flags |= IDEFLOPPY_FLAG_ZIP_DRIVE; | 1038 | drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; |
1056 | /* This value will be visible in the /proc/ide/hdx/settings */ | 1039 | /* This value will be visible in the /proc/ide/hdx/settings */ |
1057 | floppy->ticks = IDEFLOPPY_TICKS_DELAY; | 1040 | floppy->ticks = IDEFLOPPY_TICKS_DELAY; |
1058 | blk_queue_max_sectors(drive->queue, 64); | 1041 | blk_queue_max_sectors(drive->queue, 64); |
@@ -1064,7 +1047,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy) | |||
1064 | */ | 1047 | */ |
1065 | if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) { | 1048 | if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) { |
1066 | blk_queue_max_sectors(drive->queue, 64); | 1049 | blk_queue_max_sectors(drive->queue, 64); |
1067 | floppy->flags |= IDEFLOPPY_FLAG_CLIK_DRIVE; | 1050 | drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; |
1068 | } | 1051 | } |
1069 | 1052 | ||
1070 | (void) ide_floppy_get_capacity(drive); | 1053 | (void) ide_floppy_get_capacity(drive); |
@@ -1153,7 +1136,7 @@ static int idefloppy_open(struct inode *inode, struct file *filp) | |||
1153 | floppy->openers++; | 1136 | floppy->openers++; |
1154 | 1137 | ||
1155 | if (floppy->openers == 1) { | 1138 | if (floppy->openers == 1) { |
1156 | floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; | 1139 | drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS; |
1157 | /* Just in case */ | 1140 | /* Just in case */ |
1158 | 1141 | ||
1159 | idefloppy_init_pc(&pc); | 1142 | idefloppy_init_pc(&pc); |
@@ -1180,14 +1163,14 @@ static int idefloppy_open(struct inode *inode, struct file *filp) | |||
1180 | ret = -EROFS; | 1163 | ret = -EROFS; |
1181 | goto out_put_floppy; | 1164 | goto out_put_floppy; |
1182 | } | 1165 | } |
1183 | floppy->flags |= IDEFLOPPY_FLAG_MEDIA_CHANGED; | 1166 | drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED; |
1184 | /* IOMEGA Clik! drives do not support lock/unlock commands */ | 1167 | /* IOMEGA Clik! drives do not support lock/unlock commands */ |
1185 | if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { | 1168 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) { |
1186 | idefloppy_create_prevent_cmd(&pc, 1); | 1169 | idefloppy_create_prevent_cmd(&pc, 1); |
1187 | (void) idefloppy_queue_pc_tail(drive, &pc); | 1170 | (void) idefloppy_queue_pc_tail(drive, &pc); |
1188 | } | 1171 | } |
1189 | check_disk_change(inode->i_bdev); | 1172 | check_disk_change(inode->i_bdev); |
1190 | } else if (floppy->flags & IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS) { | 1173 | } else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) { |
1191 | ret = -EBUSY; | 1174 | ret = -EBUSY; |
1192 | goto out_put_floppy; | 1175 | goto out_put_floppy; |
1193 | } | 1176 | } |
@@ -1210,12 +1193,12 @@ static int idefloppy_release(struct inode *inode, struct file *filp) | |||
1210 | 1193 | ||
1211 | if (floppy->openers == 1) { | 1194 | if (floppy->openers == 1) { |
1212 | /* IOMEGA Clik! drives do not support lock/unlock commands */ | 1195 | /* IOMEGA Clik! drives do not support lock/unlock commands */ |
1213 | if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { | 1196 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) { |
1214 | idefloppy_create_prevent_cmd(&pc, 0); | 1197 | idefloppy_create_prevent_cmd(&pc, 0); |
1215 | (void) idefloppy_queue_pc_tail(drive, &pc); | 1198 | (void) idefloppy_queue_pc_tail(drive, &pc); |
1216 | } | 1199 | } |
1217 | 1200 | ||
1218 | floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; | 1201 | drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS; |
1219 | } | 1202 | } |
1220 | 1203 | ||
1221 | floppy->openers--; | 1204 | floppy->openers--; |
@@ -1236,15 +1219,17 @@ static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
1236 | return 0; | 1219 | return 0; |
1237 | } | 1220 | } |
1238 | 1221 | ||
1239 | static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy, | 1222 | static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc, |
1240 | struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd) | 1223 | unsigned long arg, unsigned int cmd) |
1241 | { | 1224 | { |
1225 | idefloppy_floppy_t *floppy = drive->driver_data; | ||
1226 | |||
1242 | if (floppy->openers > 1) | 1227 | if (floppy->openers > 1) |
1243 | return -EBUSY; | 1228 | return -EBUSY; |
1244 | 1229 | ||
1245 | /* The IOMEGA Clik! Drive doesn't support this command - | 1230 | /* The IOMEGA Clik! Drive doesn't support this command - |
1246 | * no room for an eject mechanism */ | 1231 | * no room for an eject mechanism */ |
1247 | if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { | 1232 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) { |
1248 | int prevent = arg ? 1 : 0; | 1233 | int prevent = arg ? 1 : 0; |
1249 | 1234 | ||
1250 | if (cmd == CDROMEJECT) | 1235 | if (cmd == CDROMEJECT) |
@@ -1265,16 +1250,17 @@ static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy, | |||
1265 | static int ide_floppy_format_unit(idefloppy_floppy_t *floppy, | 1250 | static int ide_floppy_format_unit(idefloppy_floppy_t *floppy, |
1266 | int __user *arg) | 1251 | int __user *arg) |
1267 | { | 1252 | { |
1268 | int blocks, length, flags, err = 0; | ||
1269 | struct ide_atapi_pc pc; | 1253 | struct ide_atapi_pc pc; |
1254 | ide_drive_t *drive = floppy->drive; | ||
1255 | int blocks, length, flags, err = 0; | ||
1270 | 1256 | ||
1271 | if (floppy->openers > 1) { | 1257 | if (floppy->openers > 1) { |
1272 | /* Don't format if someone is using the disk */ | 1258 | /* Don't format if someone is using the disk */ |
1273 | floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; | 1259 | drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS; |
1274 | return -EBUSY; | 1260 | return -EBUSY; |
1275 | } | 1261 | } |
1276 | 1262 | ||
1277 | floppy->flags |= IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; | 1263 | drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS; |
1278 | 1264 | ||
1279 | /* | 1265 | /* |
1280 | * Send ATAPI_FORMAT_UNIT to the drive. | 1266 | * Send ATAPI_FORMAT_UNIT to the drive. |
@@ -1298,15 +1284,15 @@ static int ide_floppy_format_unit(idefloppy_floppy_t *floppy, | |||
1298 | goto out; | 1284 | goto out; |
1299 | } | 1285 | } |
1300 | 1286 | ||
1301 | (void) idefloppy_get_sfrp_bit(floppy->drive); | 1287 | (void) idefloppy_get_sfrp_bit(drive); |
1302 | idefloppy_create_format_unit_cmd(&pc, blocks, length, flags); | 1288 | idefloppy_create_format_unit_cmd(&pc, blocks, length, flags); |
1303 | 1289 | ||
1304 | if (idefloppy_queue_pc_tail(floppy->drive, &pc)) | 1290 | if (idefloppy_queue_pc_tail(drive, &pc)) |
1305 | err = -EIO; | 1291 | err = -EIO; |
1306 | 1292 | ||
1307 | out: | 1293 | out: |
1308 | if (err) | 1294 | if (err) |
1309 | floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; | 1295 | drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS; |
1310 | return err; | 1296 | return err; |
1311 | } | 1297 | } |
1312 | 1298 | ||
@@ -1325,7 +1311,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file, | |||
1325 | case CDROMEJECT: | 1311 | case CDROMEJECT: |
1326 | /* fall through */ | 1312 | /* fall through */ |
1327 | case CDROM_LOCKDOOR: | 1313 | case CDROM_LOCKDOOR: |
1328 | return ide_floppy_lockdoor(floppy, &pc, arg, cmd); | 1314 | return ide_floppy_lockdoor(drive, &pc, arg, cmd); |
1329 | case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: | 1315 | case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: |
1330 | return 0; | 1316 | return 0; |
1331 | case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: | 1317 | case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: |
@@ -1366,8 +1352,8 @@ static int idefloppy_media_changed(struct gendisk *disk) | |||
1366 | drive->attach = 0; | 1352 | drive->attach = 0; |
1367 | return 0; | 1353 | return 0; |
1368 | } | 1354 | } |
1369 | ret = !!(floppy->flags & IDEFLOPPY_FLAG_MEDIA_CHANGED); | 1355 | ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED); |
1370 | floppy->flags &= ~IDEFLOPPY_FLAG_MEDIA_CHANGED; | 1356 | drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED; |
1371 | return ret; | 1357 | return ret; |
1372 | } | 1358 | } |
1373 | 1359 | ||
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index 2d92214096ab..31d98fec775f 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c | |||
@@ -28,29 +28,21 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports"); | |||
28 | 28 | ||
29 | static ssize_t store_add(struct class *cls, const char *buf, size_t n) | 29 | static ssize_t store_add(struct class *cls, const char *buf, size_t n) |
30 | { | 30 | { |
31 | ide_hwif_t *hwif; | ||
32 | unsigned int base, ctl; | 31 | unsigned int base, ctl; |
33 | int irq; | 32 | int irq, rc; |
34 | hw_regs_t hw; | 33 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
35 | u8 idx[] = { 0xff, 0xff, 0xff, 0xff }; | ||
36 | 34 | ||
37 | if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) | 35 | if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) |
38 | return -EINVAL; | 36 | return -EINVAL; |
39 | 37 | ||
40 | hwif = ide_find_port(); | ||
41 | if (hwif == NULL) | ||
42 | return -ENOENT; | ||
43 | |||
44 | memset(&hw, 0, sizeof(hw)); | 38 | memset(&hw, 0, sizeof(hw)); |
45 | ide_std_init_ports(&hw, base, ctl); | 39 | ide_std_init_ports(&hw, base, ctl); |
46 | hw.irq = irq; | 40 | hw.irq = irq; |
47 | hw.chipset = ide_generic; | 41 | hw.chipset = ide_generic; |
48 | 42 | ||
49 | ide_init_port_hw(hwif, &hw); | 43 | rc = ide_host_add(NULL, hws, NULL); |
50 | 44 | if (rc) | |
51 | idx[0] = hwif->index; | 45 | return rc; |
52 | |||
53 | ide_device_add(idx, NULL); | ||
54 | 46 | ||
55 | return n; | 47 | return n; |
56 | }; | 48 | }; |
@@ -90,18 +82,18 @@ static int __init ide_generic_sysfs_init(void) | |||
90 | 82 | ||
91 | static int __init ide_generic_init(void) | 83 | static int __init ide_generic_init(void) |
92 | { | 84 | { |
93 | u8 idx[MAX_HWIFS]; | 85 | hw_regs_t hw[MAX_HWIFS], *hws[MAX_HWIFS]; |
94 | int i; | 86 | struct ide_host *host; |
87 | unsigned long io_addr; | ||
88 | int i, rc; | ||
95 | 89 | ||
96 | printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module " | 90 | printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module " |
97 | "parameter for probing all legacy ISA IDE ports\n"); | 91 | "parameter for probing all legacy ISA IDE ports\n"); |
98 | 92 | ||
99 | for (i = 0; i < MAX_HWIFS; i++) { | 93 | for (i = 0; i < MAX_HWIFS; i++) { |
100 | ide_hwif_t *hwif; | 94 | io_addr = ide_default_io_base(i); |
101 | unsigned long io_addr = ide_default_io_base(i); | ||
102 | hw_regs_t hw; | ||
103 | 95 | ||
104 | idx[i] = 0xff; | 96 | hws[i] = NULL; |
105 | 97 | ||
106 | if ((probe_mask & (1 << i)) && io_addr) { | 98 | if ((probe_mask & (1 << i)) && io_addr) { |
107 | if (!request_region(io_addr, 8, DRV_NAME)) { | 99 | if (!request_region(io_addr, 8, DRV_NAME)) { |
@@ -119,33 +111,42 @@ static int __init ide_generic_init(void) | |||
119 | continue; | 111 | continue; |
120 | } | 112 | } |
121 | 113 | ||
122 | /* | 114 | memset(&hw[i], 0, sizeof(hw[i])); |
123 | * Skip probing if the corresponding | 115 | ide_std_init_ports(&hw[i], io_addr, io_addr + 0x206); |
124 | * slot is already occupied. | 116 | hw[i].irq = ide_default_irq(io_addr); |
125 | */ | 117 | hw[i].chipset = ide_generic; |
126 | hwif = ide_find_port(); | ||
127 | if (hwif == NULL || hwif->index != i) { | ||
128 | idx[i] = 0xff; | ||
129 | continue; | ||
130 | } | ||
131 | |||
132 | memset(&hw, 0, sizeof(hw)); | ||
133 | ide_std_init_ports(&hw, io_addr, io_addr + 0x206); | ||
134 | hw.irq = ide_default_irq(io_addr); | ||
135 | hw.chipset = ide_generic; | ||
136 | ide_init_port_hw(hwif, &hw); | ||
137 | 118 | ||
138 | idx[i] = i; | 119 | hws[i] = &hw[i]; |
139 | } | 120 | } |
140 | } | 121 | } |
141 | 122 | ||
142 | ide_device_add_all(idx, NULL); | 123 | host = ide_host_alloc_all(NULL, hws); |
124 | if (host == NULL) { | ||
125 | rc = -ENOMEM; | ||
126 | goto err; | ||
127 | } | ||
128 | |||
129 | rc = ide_host_register(host, NULL, hws); | ||
130 | if (rc) | ||
131 | goto err_free; | ||
143 | 132 | ||
144 | if (ide_generic_sysfs_init()) | 133 | if (ide_generic_sysfs_init()) |
145 | printk(KERN_ERR DRV_NAME ": failed to create ide_generic " | 134 | printk(KERN_ERR DRV_NAME ": failed to create ide_generic " |
146 | "class\n"); | 135 | "class\n"); |
147 | 136 | ||
148 | return 0; | 137 | return 0; |
138 | err_free: | ||
139 | ide_host_free(host); | ||
140 | err: | ||
141 | for (i = 0; i < MAX_HWIFS; i++) { | ||
142 | if (hws[i] == NULL) | ||
143 | continue; | ||
144 | |||
145 | io_addr = hws[i]->io_ports.data_addr; | ||
146 | release_region(io_addr + 0x206, 1); | ||
147 | release_region(io_addr, 8); | ||
148 | } | ||
149 | return rc; | ||
149 | } | 150 | } |
150 | 151 | ||
151 | module_init(ide_generic_init); | 152 | module_init(ide_generic_init); |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 661b75a89d4d..a896a283f27f 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -330,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
330 | tf->error = err; | 330 | tf->error = err; |
331 | tf->status = stat; | 331 | tf->status = stat; |
332 | 332 | ||
333 | drive->hwif->tf_read(drive, task); | 333 | drive->hwif->tp_ops->tf_read(drive, task); |
334 | 334 | ||
335 | if (task->tf_flags & IDE_TFLAG_DYN) | 335 | if (task->tf_flags & IDE_TFLAG_DYN) |
336 | kfree(task); | 336 | kfree(task); |
@@ -381,8 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 | |||
381 | if (err == ABRT_ERR) { | 381 | if (err == ABRT_ERR) { |
382 | if (drive->select.b.lba && | 382 | if (drive->select.b.lba && |
383 | /* some newer drives don't support WIN_SPECIFY */ | 383 | /* some newer drives don't support WIN_SPECIFY */ |
384 | hwif->INB(hwif->io_ports.command_addr) == | 384 | hwif->tp_ops->read_status(hwif) == WIN_SPECIFY) |
385 | WIN_SPECIFY) | ||
386 | return ide_stopped; | 385 | return ide_stopped; |
387 | } else if ((err & BAD_CRC) == BAD_CRC) { | 386 | } else if ((err & BAD_CRC) == BAD_CRC) { |
388 | /* UDMA crc error, just retry the operation */ | 387 | /* UDMA crc error, just retry the operation */ |
@@ -408,7 +407,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 | |||
408 | return ide_stopped; | 407 | return ide_stopped; |
409 | } | 408 | } |
410 | 409 | ||
411 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) | 410 | if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT)) |
412 | rq->errors |= ERROR_RESET; | 411 | rq->errors |= ERROR_RESET; |
413 | 412 | ||
414 | if ((rq->errors & ERROR_RESET) == ERROR_RESET) { | 413 | if ((rq->errors & ERROR_RESET) == ERROR_RESET) { |
@@ -435,10 +434,9 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u | |||
435 | /* add decoding error stuff */ | 434 | /* add decoding error stuff */ |
436 | } | 435 | } |
437 | 436 | ||
438 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) | 437 | if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT)) |
439 | /* force an abort */ | 438 | /* force an abort */ |
440 | hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, | 439 | hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE); |
441 | hwif->io_ports.command_addr); | ||
442 | 440 | ||
443 | if (rq->errors >= ERROR_MAX) { | 441 | if (rq->errors >= ERROR_MAX) { |
444 | ide_kill_rq(drive, rq); | 442 | ide_kill_rq(drive, rq); |
@@ -712,7 +710,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
712 | #ifdef DEBUG | 710 | #ifdef DEBUG |
713 | printk("%s: DRIVE_CMD (null)\n", drive->name); | 711 | printk("%s: DRIVE_CMD (null)\n", drive->name); |
714 | #endif | 712 | #endif |
715 | ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive)); | 713 | ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), |
714 | ide_read_error(drive)); | ||
716 | 715 | ||
717 | return ide_stopped; | 716 | return ide_stopped; |
718 | } | 717 | } |
@@ -747,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | |||
747 | * the bus may be broken enough to walk on our toes at this | 746 | * the bus may be broken enough to walk on our toes at this |
748 | * point. | 747 | * point. |
749 | */ | 748 | */ |
749 | ide_hwif_t *hwif = drive->hwif; | ||
750 | int rc; | 750 | int rc; |
751 | #ifdef DEBUG_PM | 751 | #ifdef DEBUG_PM |
752 | printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); | 752 | printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); |
753 | #endif | 753 | #endif |
754 | rc = ide_wait_not_busy(HWIF(drive), 35000); | 754 | rc = ide_wait_not_busy(hwif, 35000); |
755 | if (rc) | 755 | if (rc) |
756 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); | 756 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); |
757 | SELECT_DRIVE(drive); | 757 | SELECT_DRIVE(drive); |
758 | ide_set_irq(drive, 1); | 758 | hwif->tp_ops->set_irq(hwif, 1); |
759 | rc = ide_wait_not_busy(HWIF(drive), 100000); | 759 | rc = ide_wait_not_busy(hwif, 100000); |
760 | if (rc) | 760 | if (rc) |
761 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); | 761 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); |
762 | } | 762 | } |
@@ -1042,7 +1042,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | |||
1042 | * quirk_list may not like intr setups/cleanups | 1042 | * quirk_list may not like intr setups/cleanups |
1043 | */ | 1043 | */ |
1044 | if (drive->quirk_list != 1) | 1044 | if (drive->quirk_list != 1) |
1045 | ide_set_irq(drive, 0); | 1045 | hwif->tp_ops->set_irq(hwif, 0); |
1046 | } | 1046 | } |
1047 | hwgroup->hwif = hwif; | 1047 | hwgroup->hwif = hwif; |
1048 | hwgroup->drive = drive; | 1048 | hwgroup->drive = drive; |
@@ -1142,7 +1142,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) | |||
1142 | printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); | 1142 | printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); |
1143 | (void)hwif->dma_ops->dma_end(drive); | 1143 | (void)hwif->dma_ops->dma_end(drive); |
1144 | ret = ide_error(drive, "dma timeout error", | 1144 | ret = ide_error(drive, "dma timeout error", |
1145 | ide_read_status(drive)); | 1145 | hwif->tp_ops->read_status(hwif)); |
1146 | } else { | 1146 | } else { |
1147 | printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); | 1147 | printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); |
1148 | hwif->dma_ops->dma_timeout(drive); | 1148 | hwif->dma_ops->dma_timeout(drive); |
@@ -1267,7 +1267,7 @@ void ide_timer_expiry (unsigned long data) | |||
1267 | } else | 1267 | } else |
1268 | startstop = | 1268 | startstop = |
1269 | ide_error(drive, "irq timeout", | 1269 | ide_error(drive, "irq timeout", |
1270 | ide_read_status(drive)); | 1270 | hwif->tp_ops->read_status(hwif)); |
1271 | } | 1271 | } |
1272 | drive->service_time = jiffies - drive->service_start; | 1272 | drive->service_time = jiffies - drive->service_start; |
1273 | spin_lock_irq(&ide_lock); | 1273 | spin_lock_irq(&ide_lock); |
@@ -1323,7 +1323,8 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) | |||
1323 | */ | 1323 | */ |
1324 | do { | 1324 | do { |
1325 | if (hwif->irq == irq) { | 1325 | if (hwif->irq == irq) { |
1326 | stat = hwif->INB(hwif->io_ports.status_addr); | 1326 | stat = hwif->tp_ops->read_status(hwif); |
1327 | |||
1327 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { | 1328 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { |
1328 | /* Try to not flood the console with msgs */ | 1329 | /* Try to not flood the console with msgs */ |
1329 | static unsigned long last_msgtime, count; | 1330 | static unsigned long last_msgtime, count; |
@@ -1413,7 +1414,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1413 | * Whack the status register, just in case | 1414 | * Whack the status register, just in case |
1414 | * we have a leftover pending IRQ. | 1415 | * we have a leftover pending IRQ. |
1415 | */ | 1416 | */ |
1416 | (void) hwif->INB(hwif->io_ports.status_addr); | 1417 | (void)hwif->tp_ops->read_status(hwif); |
1417 | #endif /* CONFIG_BLK_DEV_IDEPCI */ | 1418 | #endif /* CONFIG_BLK_DEV_IDEPCI */ |
1418 | } | 1419 | } |
1419 | spin_unlock_irqrestore(&ide_lock, flags); | 1420 | spin_unlock_irqrestore(&ide_lock, flags); |
@@ -1519,6 +1520,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd); | |||
1519 | 1520 | ||
1520 | void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) | 1521 | void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) |
1521 | { | 1522 | { |
1523 | ide_hwif_t *hwif = drive->hwif; | ||
1522 | ide_task_t task; | 1524 | ide_task_t task; |
1523 | 1525 | ||
1524 | memset(&task, 0, sizeof(task)); | 1526 | memset(&task, 0, sizeof(task)); |
@@ -1529,9 +1531,9 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) | |||
1529 | task.tf.lbah = (bcount >> 8) & 0xff; | 1531 | task.tf.lbah = (bcount >> 8) & 0xff; |
1530 | 1532 | ||
1531 | ide_tf_dump(drive->name, &task.tf); | 1533 | ide_tf_dump(drive->name, &task.tf); |
1532 | ide_set_irq(drive, 1); | 1534 | hwif->tp_ops->set_irq(hwif, 1); |
1533 | SELECT_MASK(drive, 0); | 1535 | SELECT_MASK(drive, 0); |
1534 | drive->hwif->tf_load(drive, &task); | 1536 | hwif->tp_ops->tf_load(drive, &task); |
1535 | } | 1537 | } |
1536 | 1538 | ||
1537 | EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); | 1539 | EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); |
@@ -1543,9 +1545,9 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len) | |||
1543 | 1545 | ||
1544 | while (len > 0) { | 1546 | while (len > 0) { |
1545 | if (write) | 1547 | if (write) |
1546 | hwif->output_data(drive, NULL, buf, min(4, len)); | 1548 | hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); |
1547 | else | 1549 | else |
1548 | hwif->input_data(drive, NULL, buf, min(4, len)); | 1550 | hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); |
1549 | len -= 4; | 1551 | len -= 4; |
1550 | } | 1552 | } |
1551 | } | 1553 | } |
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 44aaec256a30..07da5fb9eaff 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -42,18 +42,6 @@ static void ide_outb (u8 val, unsigned long port) | |||
42 | outb(val, port); | 42 | outb(val, port); |
43 | } | 43 | } |
44 | 44 | ||
45 | static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port) | ||
46 | { | ||
47 | outb(addr, port); | ||
48 | } | ||
49 | |||
50 | void default_hwif_iops (ide_hwif_t *hwif) | ||
51 | { | ||
52 | hwif->OUTB = ide_outb; | ||
53 | hwif->OUTBSYNC = ide_outbsync; | ||
54 | hwif->INB = ide_inb; | ||
55 | } | ||
56 | |||
57 | /* | 45 | /* |
58 | * MMIO operations, typically used for SATA controllers | 46 | * MMIO operations, typically used for SATA controllers |
59 | */ | 47 | */ |
@@ -68,31 +56,19 @@ static void ide_mm_outb (u8 value, unsigned long port) | |||
68 | writeb(value, (void __iomem *) port); | 56 | writeb(value, (void __iomem *) port); |
69 | } | 57 | } |
70 | 58 | ||
71 | static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) | ||
72 | { | ||
73 | writeb(value, (void __iomem *) port); | ||
74 | } | ||
75 | |||
76 | void default_hwif_mmiops (ide_hwif_t *hwif) | ||
77 | { | ||
78 | hwif->OUTB = ide_mm_outb; | ||
79 | /* Most systems will need to override OUTBSYNC, alas however | ||
80 | this one is controller specific! */ | ||
81 | hwif->OUTBSYNC = ide_mm_outbsync; | ||
82 | hwif->INB = ide_mm_inb; | ||
83 | } | ||
84 | |||
85 | EXPORT_SYMBOL(default_hwif_mmiops); | ||
86 | |||
87 | void SELECT_DRIVE (ide_drive_t *drive) | 59 | void SELECT_DRIVE (ide_drive_t *drive) |
88 | { | 60 | { |
89 | ide_hwif_t *hwif = drive->hwif; | 61 | ide_hwif_t *hwif = drive->hwif; |
90 | const struct ide_port_ops *port_ops = hwif->port_ops; | 62 | const struct ide_port_ops *port_ops = hwif->port_ops; |
63 | ide_task_t task; | ||
91 | 64 | ||
92 | if (port_ops && port_ops->selectproc) | 65 | if (port_ops && port_ops->selectproc) |
93 | port_ops->selectproc(drive); | 66 | port_ops->selectproc(drive); |
94 | 67 | ||
95 | hwif->OUTB(drive->select.all, hwif->io_ports.device_addr); | 68 | memset(&task, 0, sizeof(task)); |
69 | task.tf_flags = IDE_TFLAG_OUT_DEVICE; | ||
70 | |||
71 | drive->hwif->tp_ops->tf_load(drive, &task); | ||
96 | } | 72 | } |
97 | 73 | ||
98 | void SELECT_MASK(ide_drive_t *drive, int mask) | 74 | void SELECT_MASK(ide_drive_t *drive, int mask) |
@@ -103,7 +79,61 @@ void SELECT_MASK(ide_drive_t *drive, int mask) | |||
103 | port_ops->maskproc(drive, mask); | 79 | port_ops->maskproc(drive, mask); |
104 | } | 80 | } |
105 | 81 | ||
106 | static void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | 82 | void ide_exec_command(ide_hwif_t *hwif, u8 cmd) |
83 | { | ||
84 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
85 | writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); | ||
86 | else | ||
87 | outb(cmd, hwif->io_ports.command_addr); | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(ide_exec_command); | ||
90 | |||
91 | u8 ide_read_status(ide_hwif_t *hwif) | ||
92 | { | ||
93 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
94 | return readb((void __iomem *)hwif->io_ports.status_addr); | ||
95 | else | ||
96 | return inb(hwif->io_ports.status_addr); | ||
97 | } | ||
98 | EXPORT_SYMBOL_GPL(ide_read_status); | ||
99 | |||
100 | u8 ide_read_altstatus(ide_hwif_t *hwif) | ||
101 | { | ||
102 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
103 | return readb((void __iomem *)hwif->io_ports.ctl_addr); | ||
104 | else | ||
105 | return inb(hwif->io_ports.ctl_addr); | ||
106 | } | ||
107 | EXPORT_SYMBOL_GPL(ide_read_altstatus); | ||
108 | |||
109 | u8 ide_read_sff_dma_status(ide_hwif_t *hwif) | ||
110 | { | ||
111 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
112 | return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); | ||
113 | else | ||
114 | return inb(hwif->dma_base + ATA_DMA_STATUS); | ||
115 | } | ||
116 | EXPORT_SYMBOL_GPL(ide_read_sff_dma_status); | ||
117 | |||
118 | void ide_set_irq(ide_hwif_t *hwif, int on) | ||
119 | { | ||
120 | u8 ctl = ATA_DEVCTL_OBS; | ||
121 | |||
122 | if (on == 4) { /* hack for SRST */ | ||
123 | ctl |= 4; | ||
124 | on &= ~4; | ||
125 | } | ||
126 | |||
127 | ctl |= on ? 0 : 2; | ||
128 | |||
129 | if (hwif->host_flags & IDE_HFLAG_MMIO) | ||
130 | writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); | ||
131 | else | ||
132 | outb(ctl, hwif->io_ports.ctl_addr); | ||
133 | } | ||
134 | EXPORT_SYMBOL_GPL(ide_set_irq); | ||
135 | |||
136 | void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | ||
107 | { | 137 | { |
108 | ide_hwif_t *hwif = drive->hwif; | 138 | ide_hwif_t *hwif = drive->hwif; |
109 | struct ide_io_ports *io_ports = &hwif->io_ports; | 139 | struct ide_io_ports *io_ports = &hwif->io_ports; |
@@ -155,8 +185,9 @@ static void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | |||
155 | tf_outb((tf->device & HIHI) | drive->select.all, | 185 | tf_outb((tf->device & HIHI) | drive->select.all, |
156 | io_ports->device_addr); | 186 | io_ports->device_addr); |
157 | } | 187 | } |
188 | EXPORT_SYMBOL_GPL(ide_tf_load); | ||
158 | 189 | ||
159 | static void ide_tf_read(ide_drive_t *drive, ide_task_t *task) | 190 | void ide_tf_read(ide_drive_t *drive, ide_task_t *task) |
160 | { | 191 | { |
161 | ide_hwif_t *hwif = drive->hwif; | 192 | ide_hwif_t *hwif = drive->hwif; |
162 | struct ide_io_ports *io_ports = &hwif->io_ports; | 193 | struct ide_io_ports *io_ports = &hwif->io_ports; |
@@ -188,6 +219,8 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
188 | /* be sure we're looking at the low order bits */ | 219 | /* be sure we're looking at the low order bits */ |
189 | tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); | 220 | tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); |
190 | 221 | ||
222 | if (task->tf_flags & IDE_TFLAG_IN_FEATURE) | ||
223 | tf->feature = tf_inb(io_ports->feature_addr); | ||
191 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) | 224 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) |
192 | tf->nsect = tf_inb(io_ports->nsect_addr); | 225 | tf->nsect = tf_inb(io_ports->nsect_addr); |
193 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) | 226 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) |
@@ -214,6 +247,7 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
214 | tf->hob_lbah = tf_inb(io_ports->lbah_addr); | 247 | tf->hob_lbah = tf_inb(io_ports->lbah_addr); |
215 | } | 248 | } |
216 | } | 249 | } |
250 | EXPORT_SYMBOL_GPL(ide_tf_read); | ||
217 | 251 | ||
218 | /* | 252 | /* |
219 | * Some localbus EIDE interfaces require a special access sequence | 253 | * Some localbus EIDE interfaces require a special access sequence |
@@ -236,8 +270,8 @@ static void ata_vlb_sync(unsigned long port) | |||
236 | * so if an odd len is specified, be sure that there's at least one | 270 | * so if an odd len is specified, be sure that there's at least one |
237 | * extra byte allocated for the buffer. | 271 | * extra byte allocated for the buffer. |
238 | */ | 272 | */ |
239 | static void ata_input_data(ide_drive_t *drive, struct request *rq, | 273 | void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf, |
240 | void *buf, unsigned int len) | 274 | unsigned int len) |
241 | { | 275 | { |
242 | ide_hwif_t *hwif = drive->hwif; | 276 | ide_hwif_t *hwif = drive->hwif; |
243 | struct ide_io_ports *io_ports = &hwif->io_ports; | 277 | struct ide_io_ports *io_ports = &hwif->io_ports; |
@@ -277,12 +311,13 @@ static void ata_input_data(ide_drive_t *drive, struct request *rq, | |||
277 | insw(data_addr, buf, len / 2); | 311 | insw(data_addr, buf, len / 2); |
278 | } | 312 | } |
279 | } | 313 | } |
314 | EXPORT_SYMBOL_GPL(ide_input_data); | ||
280 | 315 | ||
281 | /* | 316 | /* |
282 | * This is used for most PIO data transfers *to* the IDE interface | 317 | * This is used for most PIO data transfers *to* the IDE interface |
283 | */ | 318 | */ |
284 | static void ata_output_data(ide_drive_t *drive, struct request *rq, | 319 | void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf, |
285 | void *buf, unsigned int len) | 320 | unsigned int len) |
286 | { | 321 | { |
287 | ide_hwif_t *hwif = drive->hwif; | 322 | ide_hwif_t *hwif = drive->hwif; |
288 | struct ide_io_ports *io_ports = &hwif->io_ports; | 323 | struct ide_io_ports *io_ports = &hwif->io_ports; |
@@ -320,15 +355,50 @@ static void ata_output_data(ide_drive_t *drive, struct request *rq, | |||
320 | outsw(data_addr, buf, len / 2); | 355 | outsw(data_addr, buf, len / 2); |
321 | } | 356 | } |
322 | } | 357 | } |
358 | EXPORT_SYMBOL_GPL(ide_output_data); | ||
359 | |||
360 | u8 ide_read_error(ide_drive_t *drive) | ||
361 | { | ||
362 | ide_task_t task; | ||
363 | |||
364 | memset(&task, 0, sizeof(task)); | ||
365 | task.tf_flags = IDE_TFLAG_IN_FEATURE; | ||
366 | |||
367 | drive->hwif->tp_ops->tf_read(drive, &task); | ||
368 | |||
369 | return task.tf.error; | ||
370 | } | ||
371 | EXPORT_SYMBOL_GPL(ide_read_error); | ||
323 | 372 | ||
324 | void default_hwif_transport(ide_hwif_t *hwif) | 373 | void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) |
325 | { | 374 | { |
326 | hwif->tf_load = ide_tf_load; | 375 | ide_task_t task; |
327 | hwif->tf_read = ide_tf_read; | 376 | |
377 | memset(&task, 0, sizeof(task)); | ||
378 | task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | | ||
379 | IDE_TFLAG_IN_NSECT; | ||
328 | 380 | ||
329 | hwif->input_data = ata_input_data; | 381 | drive->hwif->tp_ops->tf_read(drive, &task); |
330 | hwif->output_data = ata_output_data; | 382 | |
383 | *bcount = (task.tf.lbah << 8) | task.tf.lbam; | ||
384 | *ireason = task.tf.nsect & 3; | ||
331 | } | 385 | } |
386 | EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); | ||
387 | |||
388 | const struct ide_tp_ops default_tp_ops = { | ||
389 | .exec_command = ide_exec_command, | ||
390 | .read_status = ide_read_status, | ||
391 | .read_altstatus = ide_read_altstatus, | ||
392 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
393 | |||
394 | .set_irq = ide_set_irq, | ||
395 | |||
396 | .tf_load = ide_tf_load, | ||
397 | .tf_read = ide_tf_read, | ||
398 | |||
399 | .input_data = ide_input_data, | ||
400 | .output_data = ide_output_data, | ||
401 | }; | ||
332 | 402 | ||
333 | void ide_fix_driveid (struct hd_driveid *id) | 403 | void ide_fix_driveid (struct hd_driveid *id) |
334 | { | 404 | { |
@@ -483,10 +553,10 @@ int drive_is_ready (ide_drive_t *drive) | |||
483 | * about possible isa-pnp and pci-pnp issues yet. | 553 | * about possible isa-pnp and pci-pnp issues yet. |
484 | */ | 554 | */ |
485 | if (hwif->io_ports.ctl_addr) | 555 | if (hwif->io_ports.ctl_addr) |
486 | stat = ide_read_altstatus(drive); | 556 | stat = hwif->tp_ops->read_altstatus(hwif); |
487 | else | 557 | else |
488 | /* Note: this may clear a pending IRQ!! */ | 558 | /* Note: this may clear a pending IRQ!! */ |
489 | stat = ide_read_status(drive); | 559 | stat = hwif->tp_ops->read_status(hwif); |
490 | 560 | ||
491 | if (stat & BUSY_STAT) | 561 | if (stat & BUSY_STAT) |
492 | /* drive busy: definitely not interrupting */ | 562 | /* drive busy: definitely not interrupting */ |
@@ -511,24 +581,26 @@ EXPORT_SYMBOL(drive_is_ready); | |||
511 | */ | 581 | */ |
512 | static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) | 582 | static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) |
513 | { | 583 | { |
584 | ide_hwif_t *hwif = drive->hwif; | ||
585 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
514 | unsigned long flags; | 586 | unsigned long flags; |
515 | int i; | 587 | int i; |
516 | u8 stat; | 588 | u8 stat; |
517 | 589 | ||
518 | udelay(1); /* spec allows drive 400ns to assert "BUSY" */ | 590 | udelay(1); /* spec allows drive 400ns to assert "BUSY" */ |
519 | stat = ide_read_status(drive); | 591 | stat = tp_ops->read_status(hwif); |
520 | 592 | ||
521 | if (stat & BUSY_STAT) { | 593 | if (stat & BUSY_STAT) { |
522 | local_irq_set(flags); | 594 | local_irq_set(flags); |
523 | timeout += jiffies; | 595 | timeout += jiffies; |
524 | while ((stat = ide_read_status(drive)) & BUSY_STAT) { | 596 | while ((stat = tp_ops->read_status(hwif)) & BUSY_STAT) { |
525 | if (time_after(jiffies, timeout)) { | 597 | if (time_after(jiffies, timeout)) { |
526 | /* | 598 | /* |
527 | * One last read after the timeout in case | 599 | * One last read after the timeout in case |
528 | * heavy interrupt load made us not make any | 600 | * heavy interrupt load made us not make any |
529 | * progress during the timeout.. | 601 | * progress during the timeout.. |
530 | */ | 602 | */ |
531 | stat = ide_read_status(drive); | 603 | stat = tp_ops->read_status(hwif); |
532 | if (!(stat & BUSY_STAT)) | 604 | if (!(stat & BUSY_STAT)) |
533 | break; | 605 | break; |
534 | 606 | ||
@@ -548,7 +620,7 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti | |||
548 | */ | 620 | */ |
549 | for (i = 0; i < 10; i++) { | 621 | for (i = 0; i < 10; i++) { |
550 | udelay(1); | 622 | udelay(1); |
551 | stat = ide_read_status(drive); | 623 | stat = tp_ops->read_status(hwif); |
552 | 624 | ||
553 | if (OK_STAT(stat, good, bad)) { | 625 | if (OK_STAT(stat, good, bad)) { |
554 | *rstat = stat; | 626 | *rstat = stat; |
@@ -674,6 +746,7 @@ no_80w: | |||
674 | int ide_driveid_update(ide_drive_t *drive) | 746 | int ide_driveid_update(ide_drive_t *drive) |
675 | { | 747 | { |
676 | ide_hwif_t *hwif = drive->hwif; | 748 | ide_hwif_t *hwif = drive->hwif; |
749 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
677 | struct hd_driveid *id; | 750 | struct hd_driveid *id; |
678 | unsigned long timeout, flags; | 751 | unsigned long timeout, flags; |
679 | u8 stat; | 752 | u8 stat; |
@@ -684,9 +757,9 @@ int ide_driveid_update(ide_drive_t *drive) | |||
684 | */ | 757 | */ |
685 | 758 | ||
686 | SELECT_MASK(drive, 1); | 759 | SELECT_MASK(drive, 1); |
687 | ide_set_irq(drive, 0); | 760 | tp_ops->set_irq(hwif, 0); |
688 | msleep(50); | 761 | msleep(50); |
689 | hwif->OUTBSYNC(hwif, WIN_IDENTIFY, hwif->io_ports.command_addr); | 762 | tp_ops->exec_command(hwif, WIN_IDENTIFY); |
690 | timeout = jiffies + WAIT_WORSTCASE; | 763 | timeout = jiffies + WAIT_WORSTCASE; |
691 | do { | 764 | do { |
692 | if (time_after(jiffies, timeout)) { | 765 | if (time_after(jiffies, timeout)) { |
@@ -695,11 +768,11 @@ int ide_driveid_update(ide_drive_t *drive) | |||
695 | } | 768 | } |
696 | 769 | ||
697 | msleep(50); /* give drive a breather */ | 770 | msleep(50); /* give drive a breather */ |
698 | stat = ide_read_altstatus(drive); | 771 | stat = tp_ops->read_altstatus(hwif); |
699 | } while (stat & BUSY_STAT); | 772 | } while (stat & BUSY_STAT); |
700 | 773 | ||
701 | msleep(50); /* wait for IRQ and DRQ_STAT */ | 774 | msleep(50); /* wait for IRQ and DRQ_STAT */ |
702 | stat = ide_read_status(drive); | 775 | stat = tp_ops->read_status(hwif); |
703 | 776 | ||
704 | if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { | 777 | if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { |
705 | SELECT_MASK(drive, 0); | 778 | SELECT_MASK(drive, 0); |
@@ -713,8 +786,8 @@ int ide_driveid_update(ide_drive_t *drive) | |||
713 | local_irq_restore(flags); | 786 | local_irq_restore(flags); |
714 | return 0; | 787 | return 0; |
715 | } | 788 | } |
716 | hwif->input_data(drive, NULL, id, SECTOR_SIZE); | 789 | tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); |
717 | (void)ide_read_status(drive); /* clear drive IRQ */ | 790 | (void)tp_ops->read_status(hwif); /* clear drive IRQ */ |
718 | local_irq_enable(); | 791 | local_irq_enable(); |
719 | local_irq_restore(flags); | 792 | local_irq_restore(flags); |
720 | ide_fix_driveid(id); | 793 | ide_fix_driveid(id); |
@@ -735,9 +808,10 @@ int ide_driveid_update(ide_drive_t *drive) | |||
735 | int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | 808 | int ide_config_drive_speed(ide_drive_t *drive, u8 speed) |
736 | { | 809 | { |
737 | ide_hwif_t *hwif = drive->hwif; | 810 | ide_hwif_t *hwif = drive->hwif; |
738 | struct ide_io_ports *io_ports = &hwif->io_ports; | 811 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
739 | int error = 0; | 812 | int error = 0; |
740 | u8 stat; | 813 | u8 stat; |
814 | ide_task_t task; | ||
741 | 815 | ||
742 | #ifdef CONFIG_BLK_DEV_IDEDMA | 816 | #ifdef CONFIG_BLK_DEV_IDEDMA |
743 | if (hwif->dma_ops) /* check if host supports DMA */ | 817 | if (hwif->dma_ops) /* check if host supports DMA */ |
@@ -770,12 +844,19 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
770 | SELECT_DRIVE(drive); | 844 | SELECT_DRIVE(drive); |
771 | SELECT_MASK(drive, 0); | 845 | SELECT_MASK(drive, 0); |
772 | udelay(1); | 846 | udelay(1); |
773 | ide_set_irq(drive, 0); | 847 | tp_ops->set_irq(hwif, 0); |
774 | hwif->OUTB(speed, io_ports->nsect_addr); | 848 | |
775 | hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr); | 849 | memset(&task, 0, sizeof(task)); |
776 | hwif->OUTBSYNC(hwif, WIN_SETFEATURES, io_ports->command_addr); | 850 | task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT; |
851 | task.tf.feature = SETFEATURES_XFER; | ||
852 | task.tf.nsect = speed; | ||
853 | |||
854 | tp_ops->tf_load(drive, &task); | ||
855 | |||
856 | tp_ops->exec_command(hwif, WIN_SETFEATURES); | ||
857 | |||
777 | if (drive->quirk_list == 2) | 858 | if (drive->quirk_list == 2) |
778 | ide_set_irq(drive, 1); | 859 | tp_ops->set_irq(hwif, 1); |
779 | 860 | ||
780 | error = __ide_wait_stat(drive, drive->ready_stat, | 861 | error = __ide_wait_stat(drive, drive->ready_stat, |
781 | BUSY_STAT|DRQ_STAT|ERR_STAT, | 862 | BUSY_STAT|DRQ_STAT|ERR_STAT, |
@@ -796,8 +877,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
796 | 877 | ||
797 | skip: | 878 | skip: |
798 | #ifdef CONFIG_BLK_DEV_IDEDMA | 879 | #ifdef CONFIG_BLK_DEV_IDEDMA |
799 | if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && | 880 | if (speed >= XFER_SW_DMA_0 && drive->using_dma) |
800 | drive->using_dma) | ||
801 | hwif->dma_ops->dma_host_set(drive, 1); | 881 | hwif->dma_ops->dma_host_set(drive, 1); |
802 | else if (hwif->dma_ops) /* check if host supports DMA */ | 882 | else if (hwif->dma_ops) /* check if host supports DMA */ |
803 | ide_dma_off_quietly(drive); | 883 | ide_dma_off_quietly(drive); |
@@ -881,7 +961,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, | |||
881 | 961 | ||
882 | spin_lock_irqsave(&ide_lock, flags); | 962 | spin_lock_irqsave(&ide_lock, flags); |
883 | __ide_set_handler(drive, handler, timeout, expiry); | 963 | __ide_set_handler(drive, handler, timeout, expiry); |
884 | hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); | 964 | hwif->tp_ops->exec_command(hwif, cmd); |
885 | /* | 965 | /* |
886 | * Drive takes 400nS to respond, we must avoid the IRQ being | 966 | * Drive takes 400nS to respond, we must avoid the IRQ being |
887 | * serviced before that. | 967 | * serviced before that. |
@@ -899,7 +979,7 @@ void ide_execute_pkt_cmd(ide_drive_t *drive) | |||
899 | unsigned long flags; | 979 | unsigned long flags; |
900 | 980 | ||
901 | spin_lock_irqsave(&ide_lock, flags); | 981 | spin_lock_irqsave(&ide_lock, flags); |
902 | hwif->OUTBSYNC(hwif, WIN_PACKETCMD, hwif->io_ports.command_addr); | 982 | hwif->tp_ops->exec_command(hwif, WIN_PACKETCMD); |
903 | ndelay(400); | 983 | ndelay(400); |
904 | spin_unlock_irqrestore(&ide_lock, flags); | 984 | spin_unlock_irqrestore(&ide_lock, flags); |
905 | } | 985 | } |
@@ -924,12 +1004,13 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int); | |||
924 | */ | 1004 | */ |
925 | static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) | 1005 | static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) |
926 | { | 1006 | { |
927 | ide_hwgroup_t *hwgroup = HWGROUP(drive); | 1007 | ide_hwif_t *hwif = drive->hwif; |
1008 | ide_hwgroup_t *hwgroup = hwif->hwgroup; | ||
928 | u8 stat; | 1009 | u8 stat; |
929 | 1010 | ||
930 | SELECT_DRIVE(drive); | 1011 | SELECT_DRIVE(drive); |
931 | udelay (10); | 1012 | udelay (10); |
932 | stat = ide_read_status(drive); | 1013 | stat = hwif->tp_ops->read_status(hwif); |
933 | 1014 | ||
934 | if (OK_STAT(stat, 0, BUSY_STAT)) | 1015 | if (OK_STAT(stat, 0, BUSY_STAT)) |
935 | printk("%s: ATAPI reset complete\n", drive->name); | 1016 | printk("%s: ATAPI reset complete\n", drive->name); |
@@ -975,7 +1056,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) | |||
975 | } | 1056 | } |
976 | } | 1057 | } |
977 | 1058 | ||
978 | tmp = ide_read_status(drive); | 1059 | tmp = hwif->tp_ops->read_status(hwif); |
979 | 1060 | ||
980 | if (!OK_STAT(tmp, 0, BUSY_STAT)) { | 1061 | if (!OK_STAT(tmp, 0, BUSY_STAT)) { |
981 | if (time_before(jiffies, hwgroup->poll_timeout)) { | 1062 | if (time_before(jiffies, hwgroup->poll_timeout)) { |
@@ -1089,8 +1170,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1089 | ide_hwif_t *hwif; | 1170 | ide_hwif_t *hwif; |
1090 | ide_hwgroup_t *hwgroup; | 1171 | ide_hwgroup_t *hwgroup; |
1091 | struct ide_io_ports *io_ports; | 1172 | struct ide_io_ports *io_ports; |
1173 | const struct ide_tp_ops *tp_ops; | ||
1092 | const struct ide_port_ops *port_ops; | 1174 | const struct ide_port_ops *port_ops; |
1093 | u8 ctl; | ||
1094 | 1175 | ||
1095 | spin_lock_irqsave(&ide_lock, flags); | 1176 | spin_lock_irqsave(&ide_lock, flags); |
1096 | hwif = HWIF(drive); | 1177 | hwif = HWIF(drive); |
@@ -1098,6 +1179,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1098 | 1179 | ||
1099 | io_ports = &hwif->io_ports; | 1180 | io_ports = &hwif->io_ports; |
1100 | 1181 | ||
1182 | tp_ops = hwif->tp_ops; | ||
1183 | |||
1101 | /* We must not reset with running handlers */ | 1184 | /* We must not reset with running handlers */ |
1102 | BUG_ON(hwgroup->handler != NULL); | 1185 | BUG_ON(hwgroup->handler != NULL); |
1103 | 1186 | ||
@@ -1106,7 +1189,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1106 | pre_reset(drive); | 1189 | pre_reset(drive); |
1107 | SELECT_DRIVE(drive); | 1190 | SELECT_DRIVE(drive); |
1108 | udelay (20); | 1191 | udelay (20); |
1109 | hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); | 1192 | tp_ops->exec_command(hwif, WIN_SRST); |
1110 | ndelay(400); | 1193 | ndelay(400); |
1111 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; | 1194 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; |
1112 | hwgroup->polling = 1; | 1195 | hwgroup->polling = 1; |
@@ -1135,16 +1218,15 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1135 | * immediate interrupt due to the edge transition it produces. | 1218 | * immediate interrupt due to the edge transition it produces. |
1136 | * This single interrupt gives us a "fast poll" for drives that | 1219 | * This single interrupt gives us a "fast poll" for drives that |
1137 | * recover from reset very quickly, saving us the first 50ms wait time. | 1220 | * recover from reset very quickly, saving us the first 50ms wait time. |
1221 | * | ||
1222 | * TODO: add ->softreset method and stop abusing ->set_irq | ||
1138 | */ | 1223 | */ |
1139 | /* set SRST and nIEN */ | 1224 | /* set SRST and nIEN */ |
1140 | hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr); | 1225 | tp_ops->set_irq(hwif, 4); |
1141 | /* more than enough time */ | 1226 | /* more than enough time */ |
1142 | udelay(10); | 1227 | udelay(10); |
1143 | if (drive->quirk_list == 2) | 1228 | /* clear SRST, leave nIEN (unless device is on the quirk list) */ |
1144 | ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */ | 1229 | tp_ops->set_irq(hwif, drive->quirk_list == 2); |
1145 | else | ||
1146 | ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */ | ||
1147 | hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr); | ||
1148 | /* more than enough time */ | 1230 | /* more than enough time */ |
1149 | udelay(10); | 1231 | udelay(10); |
1150 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; | 1232 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; |
@@ -1189,7 +1271,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) | |||
1189 | * about locking issues (2.5 work ?). | 1271 | * about locking issues (2.5 work ?). |
1190 | */ | 1272 | */ |
1191 | mdelay(1); | 1273 | mdelay(1); |
1192 | stat = hwif->INB(hwif->io_ports.status_addr); | 1274 | stat = hwif->tp_ops->read_status(hwif); |
1193 | if ((stat & BUSY_STAT) == 0) | 1275 | if ((stat & BUSY_STAT) == 0) |
1194 | return 0; | 1276 | return 0; |
1195 | /* | 1277 | /* |
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 13af72f09ec4..97fefabea8b8 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c | |||
@@ -266,22 +266,11 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) | |||
266 | 266 | ||
267 | rate = ide_rate_filter(drive, rate); | 267 | rate = ide_rate_filter(drive, rate); |
268 | 268 | ||
269 | BUG_ON(rate < XFER_PIO_0); | ||
270 | |||
269 | if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) | 271 | if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) |
270 | return ide_set_pio_mode(drive, rate); | 272 | return ide_set_pio_mode(drive, rate); |
271 | 273 | ||
272 | /* | ||
273 | * TODO: transfer modes 0x00-0x07 passed from the user-space are | ||
274 | * currently handled here which needs fixing (please note that such | ||
275 | * case could happen iff the transfer mode has already been set on | ||
276 | * the device by ide-proc.c::set_xfer_rate()). | ||
277 | */ | ||
278 | if (rate < XFER_PIO_0) { | ||
279 | if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE) | ||
280 | return ide_set_dma_mode(drive, rate); | ||
281 | else | ||
282 | return ide_config_drive_speed(drive, rate); | ||
283 | } | ||
284 | |||
285 | return ide_set_dma_mode(drive, rate); | 274 | return ide_set_dma_mode(drive, rate); |
286 | } | 275 | } |
287 | 276 | ||
@@ -336,7 +325,7 @@ static void ide_dump_sector(ide_drive_t *drive) | |||
336 | else | 325 | else |
337 | task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; | 326 | task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; |
338 | 327 | ||
339 | drive->hwif->tf_read(drive, &task); | 328 | drive->hwif->tp_ops->tf_read(drive, &task); |
340 | 329 | ||
341 | if (lba48 || (tf->device & ATA_LBA)) | 330 | if (lba48 || (tf->device & ATA_LBA)) |
342 | printk(", LBAsect=%llu", | 331 | printk(", LBAsect=%llu", |
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 03f2ef5470a3..bac9b392b689 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c | |||
@@ -29,9 +29,10 @@ static struct pnp_device_id idepnp_devices[] = { | |||
29 | 29 | ||
30 | static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | 30 | static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) |
31 | { | 31 | { |
32 | hw_regs_t hw; | 32 | struct ide_host *host; |
33 | ide_hwif_t *hwif; | ||
34 | unsigned long base, ctl; | 33 | unsigned long base, ctl; |
34 | int rc; | ||
35 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; | ||
35 | 36 | ||
36 | printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); | 37 | printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); |
37 | 38 | ||
@@ -59,31 +60,25 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
59 | hw.irq = pnp_irq(dev, 0); | 60 | hw.irq = pnp_irq(dev, 0); |
60 | hw.chipset = ide_generic; | 61 | hw.chipset = ide_generic; |
61 | 62 | ||
62 | hwif = ide_find_port(); | 63 | rc = ide_host_add(NULL, hws, &host); |
63 | if (hwif) { | 64 | if (rc) |
64 | u8 index = hwif->index; | 65 | goto out; |
65 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | ||
66 | 66 | ||
67 | ide_init_port_hw(hwif, &hw); | 67 | pnp_set_drvdata(dev, host); |
68 | |||
69 | pnp_set_drvdata(dev, hwif); | ||
70 | |||
71 | ide_device_add(idx, NULL); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | 68 | ||
69 | return 0; | ||
70 | out: | ||
76 | release_region(ctl, 1); | 71 | release_region(ctl, 1); |
77 | release_region(base, 8); | 72 | release_region(base, 8); |
78 | 73 | ||
79 | return -1; | 74 | return rc; |
80 | } | 75 | } |
81 | 76 | ||
82 | static void idepnp_remove(struct pnp_dev *dev) | 77 | static void idepnp_remove(struct pnp_dev *dev) |
83 | { | 78 | { |
84 | ide_hwif_t *hwif = pnp_get_drvdata(dev); | 79 | struct ide_host *host = pnp_get_drvdata(dev); |
85 | 80 | ||
86 | ide_unregister(hwif); | 81 | ide_host_remove(host); |
87 | 82 | ||
88 | release_region(pnp_port_start(dev, 1), 1); | 83 | release_region(pnp_port_start(dev, 1), 1); |
89 | release_region(pnp_port_start(dev, 0), 8); | 84 | release_region(pnp_port_start(dev, 0), 8); |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 235ebdb29b28..4aa76c453755 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -39,8 +39,6 @@ | |||
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ | ||
43 | |||
44 | /** | 42 | /** |
45 | * generic_id - add a generic drive id | 43 | * generic_id - add a generic drive id |
46 | * @drive: drive to make an ID block for | 44 | * @drive: drive to make an ID block for |
@@ -126,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd) | |||
126 | 124 | ||
127 | id = drive->id; | 125 | id = drive->id; |
128 | /* read 512 bytes of id info */ | 126 | /* read 512 bytes of id info */ |
129 | hwif->input_data(drive, NULL, id, SECTOR_SIZE); | 127 | hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); |
130 | 128 | ||
131 | drive->id_read = 1; | 129 | drive->id_read = 1; |
132 | local_irq_enable(); | 130 | local_irq_enable(); |
@@ -267,6 +265,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
267 | { | 265 | { |
268 | ide_hwif_t *hwif = HWIF(drive); | 266 | ide_hwif_t *hwif = HWIF(drive); |
269 | struct ide_io_ports *io_ports = &hwif->io_ports; | 267 | struct ide_io_ports *io_ports = &hwif->io_ports; |
268 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
270 | int use_altstatus = 0, rc; | 269 | int use_altstatus = 0, rc; |
271 | unsigned long timeout; | 270 | unsigned long timeout; |
272 | u8 s = 0, a = 0; | 271 | u8 s = 0, a = 0; |
@@ -275,8 +274,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
275 | msleep(50); | 274 | msleep(50); |
276 | 275 | ||
277 | if (io_ports->ctl_addr) { | 276 | if (io_ports->ctl_addr) { |
278 | a = ide_read_altstatus(drive); | 277 | a = tp_ops->read_altstatus(hwif); |
279 | s = ide_read_status(drive); | 278 | s = tp_ops->read_status(hwif); |
280 | if ((a ^ s) & ~INDEX_STAT) | 279 | if ((a ^ s) & ~INDEX_STAT) |
281 | /* ancient Seagate drives, broken interfaces */ | 280 | /* ancient Seagate drives, broken interfaces */ |
282 | printk(KERN_INFO "%s: probing with STATUS(0x%02x) " | 281 | printk(KERN_INFO "%s: probing with STATUS(0x%02x) " |
@@ -290,12 +289,18 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
290 | /* set features register for atapi | 289 | /* set features register for atapi |
291 | * identify command to be sure of reply | 290 | * identify command to be sure of reply |
292 | */ | 291 | */ |
293 | if ((cmd == WIN_PIDENTIFY)) | 292 | if (cmd == WIN_PIDENTIFY) { |
294 | /* disable dma & overlap */ | 293 | ide_task_t task; |
295 | hwif->OUTB(0, io_ports->feature_addr); | 294 | |
295 | memset(&task, 0, sizeof(task)); | ||
296 | /* disable DMA & overlap */ | ||
297 | task.tf_flags = IDE_TFLAG_OUT_FEATURE; | ||
298 | |||
299 | tp_ops->tf_load(drive, &task); | ||
300 | } | ||
296 | 301 | ||
297 | /* ask drive for ID */ | 302 | /* ask drive for ID */ |
298 | hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); | 303 | tp_ops->exec_command(hwif, cmd); |
299 | 304 | ||
300 | timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; | 305 | timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; |
301 | timeout += jiffies; | 306 | timeout += jiffies; |
@@ -306,13 +311,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
306 | } | 311 | } |
307 | /* give drive a breather */ | 312 | /* give drive a breather */ |
308 | msleep(50); | 313 | msleep(50); |
309 | s = use_altstatus ? ide_read_altstatus(drive) | 314 | s = use_altstatus ? tp_ops->read_altstatus(hwif) |
310 | : ide_read_status(drive); | 315 | : tp_ops->read_status(hwif); |
311 | } while (s & BUSY_STAT); | 316 | } while (s & BUSY_STAT); |
312 | 317 | ||
313 | /* wait for IRQ and DRQ_STAT */ | 318 | /* wait for IRQ and DRQ_STAT */ |
314 | msleep(50); | 319 | msleep(50); |
315 | s = ide_read_status(drive); | 320 | s = tp_ops->read_status(hwif); |
316 | 321 | ||
317 | if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) { | 322 | if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) { |
318 | unsigned long flags; | 323 | unsigned long flags; |
@@ -324,7 +329,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
324 | /* drive responded with ID */ | 329 | /* drive responded with ID */ |
325 | rc = 0; | 330 | rc = 0; |
326 | /* clear drive IRQ */ | 331 | /* clear drive IRQ */ |
327 | (void)ide_read_status(drive); | 332 | (void)tp_ops->read_status(hwif); |
328 | local_irq_restore(flags); | 333 | local_irq_restore(flags); |
329 | } else { | 334 | } else { |
330 | /* drive refused ID */ | 335 | /* drive refused ID */ |
@@ -346,6 +351,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
346 | static int try_to_identify (ide_drive_t *drive, u8 cmd) | 351 | static int try_to_identify (ide_drive_t *drive, u8 cmd) |
347 | { | 352 | { |
348 | ide_hwif_t *hwif = HWIF(drive); | 353 | ide_hwif_t *hwif = HWIF(drive); |
354 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
349 | int retval; | 355 | int retval; |
350 | int autoprobe = 0; | 356 | int autoprobe = 0; |
351 | unsigned long cookie = 0; | 357 | unsigned long cookie = 0; |
@@ -361,7 +367,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd) | |||
361 | autoprobe = 1; | 367 | autoprobe = 1; |
362 | cookie = probe_irq_on(); | 368 | cookie = probe_irq_on(); |
363 | } | 369 | } |
364 | ide_set_irq(drive, autoprobe); | 370 | tp_ops->set_irq(hwif, autoprobe); |
365 | } | 371 | } |
366 | 372 | ||
367 | retval = actual_try_to_identify(drive, cmd); | 373 | retval = actual_try_to_identify(drive, cmd); |
@@ -369,9 +375,9 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd) | |||
369 | if (autoprobe) { | 375 | if (autoprobe) { |
370 | int irq; | 376 | int irq; |
371 | 377 | ||
372 | ide_set_irq(drive, 0); | 378 | tp_ops->set_irq(hwif, 0); |
373 | /* clear drive IRQ */ | 379 | /* clear drive IRQ */ |
374 | (void)ide_read_status(drive); | 380 | (void)tp_ops->read_status(hwif); |
375 | udelay(5); | 381 | udelay(5); |
376 | irq = probe_irq_off(cookie); | 382 | irq = probe_irq_off(cookie); |
377 | if (!hwif->irq) { | 383 | if (!hwif->irq) { |
@@ -396,7 +402,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif) | |||
396 | 402 | ||
397 | do { | 403 | do { |
398 | msleep(50); | 404 | msleep(50); |
399 | stat = hwif->INB(hwif->io_ports.status_addr); | 405 | stat = hwif->tp_ops->read_status(hwif); |
400 | if ((stat & BUSY_STAT) == 0) | 406 | if ((stat & BUSY_STAT) == 0) |
401 | return 0; | 407 | return 0; |
402 | } while (time_before(jiffies, timeout)); | 408 | } while (time_before(jiffies, timeout)); |
@@ -404,6 +410,18 @@ static int ide_busy_sleep(ide_hwif_t *hwif) | |||
404 | return 1; | 410 | return 1; |
405 | } | 411 | } |
406 | 412 | ||
413 | static u8 ide_read_device(ide_drive_t *drive) | ||
414 | { | ||
415 | ide_task_t task; | ||
416 | |||
417 | memset(&task, 0, sizeof(task)); | ||
418 | task.tf_flags = IDE_TFLAG_IN_DEVICE; | ||
419 | |||
420 | drive->hwif->tp_ops->tf_read(drive, &task); | ||
421 | |||
422 | return task.tf.device; | ||
423 | } | ||
424 | |||
407 | /** | 425 | /** |
408 | * do_probe - probe an IDE device | 426 | * do_probe - probe an IDE device |
409 | * @drive: drive to probe | 427 | * @drive: drive to probe |
@@ -428,7 +446,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif) | |||
428 | static int do_probe (ide_drive_t *drive, u8 cmd) | 446 | static int do_probe (ide_drive_t *drive, u8 cmd) |
429 | { | 447 | { |
430 | ide_hwif_t *hwif = HWIF(drive); | 448 | ide_hwif_t *hwif = HWIF(drive); |
431 | struct ide_io_ports *io_ports = &hwif->io_ports; | 449 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
432 | int rc; | 450 | int rc; |
433 | u8 stat; | 451 | u8 stat; |
434 | 452 | ||
@@ -449,8 +467,8 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
449 | msleep(50); | 467 | msleep(50); |
450 | SELECT_DRIVE(drive); | 468 | SELECT_DRIVE(drive); |
451 | msleep(50); | 469 | msleep(50); |
452 | if (hwif->INB(io_ports->device_addr) != drive->select.all && | 470 | |
453 | !drive->present) { | 471 | if (ide_read_device(drive) != drive->select.all && !drive->present) { |
454 | if (drive->select.b.unit != 0) { | 472 | if (drive->select.b.unit != 0) { |
455 | /* exit with drive0 selected */ | 473 | /* exit with drive0 selected */ |
456 | SELECT_DRIVE(&hwif->drives[0]); | 474 | SELECT_DRIVE(&hwif->drives[0]); |
@@ -461,7 +479,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
461 | return 3; | 479 | return 3; |
462 | } | 480 | } |
463 | 481 | ||
464 | stat = ide_read_status(drive); | 482 | stat = tp_ops->read_status(hwif); |
465 | 483 | ||
466 | if (OK_STAT(stat, READY_STAT, BUSY_STAT) || | 484 | if (OK_STAT(stat, READY_STAT, BUSY_STAT) || |
467 | drive->present || cmd == WIN_PIDENTIFY) { | 485 | drive->present || cmd == WIN_PIDENTIFY) { |
@@ -471,7 +489,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
471 | rc = try_to_identify(drive,cmd); | 489 | rc = try_to_identify(drive,cmd); |
472 | } | 490 | } |
473 | 491 | ||
474 | stat = ide_read_status(drive); | 492 | stat = tp_ops->read_status(hwif); |
475 | 493 | ||
476 | if (stat == (BUSY_STAT | READY_STAT)) | 494 | if (stat == (BUSY_STAT | READY_STAT)) |
477 | return 4; | 495 | return 4; |
@@ -482,13 +500,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
482 | msleep(50); | 500 | msleep(50); |
483 | SELECT_DRIVE(drive); | 501 | SELECT_DRIVE(drive); |
484 | msleep(50); | 502 | msleep(50); |
485 | hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); | 503 | tp_ops->exec_command(hwif, WIN_SRST); |
486 | (void)ide_busy_sleep(hwif); | 504 | (void)ide_busy_sleep(hwif); |
487 | rc = try_to_identify(drive, cmd); | 505 | rc = try_to_identify(drive, cmd); |
488 | } | 506 | } |
489 | 507 | ||
490 | /* ensure drive IRQ is clear */ | 508 | /* ensure drive IRQ is clear */ |
491 | stat = ide_read_status(drive); | 509 | stat = tp_ops->read_status(hwif); |
492 | 510 | ||
493 | if (rc == 1) | 511 | if (rc == 1) |
494 | printk(KERN_ERR "%s: no response (status = 0x%02x)\n", | 512 | printk(KERN_ERR "%s: no response (status = 0x%02x)\n", |
@@ -502,7 +520,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
502 | SELECT_DRIVE(&hwif->drives[0]); | 520 | SELECT_DRIVE(&hwif->drives[0]); |
503 | msleep(50); | 521 | msleep(50); |
504 | /* ensure drive irq is clear */ | 522 | /* ensure drive irq is clear */ |
505 | (void)ide_read_status(drive); | 523 | (void)tp_ops->read_status(hwif); |
506 | } | 524 | } |
507 | return rc; | 525 | return rc; |
508 | } | 526 | } |
@@ -513,12 +531,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
513 | static void enable_nest (ide_drive_t *drive) | 531 | static void enable_nest (ide_drive_t *drive) |
514 | { | 532 | { |
515 | ide_hwif_t *hwif = HWIF(drive); | 533 | ide_hwif_t *hwif = HWIF(drive); |
534 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
516 | u8 stat; | 535 | u8 stat; |
517 | 536 | ||
518 | printk("%s: enabling %s -- ", hwif->name, drive->id->model); | 537 | printk("%s: enabling %s -- ", hwif->name, drive->id->model); |
519 | SELECT_DRIVE(drive); | 538 | SELECT_DRIVE(drive); |
520 | msleep(50); | 539 | msleep(50); |
521 | hwif->OUTBSYNC(hwif, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr); | 540 | tp_ops->exec_command(hwif, EXABYTE_ENABLE_NEST); |
522 | 541 | ||
523 | if (ide_busy_sleep(hwif)) { | 542 | if (ide_busy_sleep(hwif)) { |
524 | printk(KERN_CONT "failed (timeout)\n"); | 543 | printk(KERN_CONT "failed (timeout)\n"); |
@@ -527,7 +546,7 @@ static void enable_nest (ide_drive_t *drive) | |||
527 | 546 | ||
528 | msleep(50); | 547 | msleep(50); |
529 | 548 | ||
530 | stat = ide_read_status(drive); | 549 | stat = tp_ops->read_status(hwif); |
531 | 550 | ||
532 | if (!OK_STAT(stat, 0, BAD_STAT)) | 551 | if (!OK_STAT(stat, 0, BAD_STAT)) |
533 | printk(KERN_CONT "failed (status = 0x%02x)\n", stat); | 552 | printk(KERN_CONT "failed (status = 0x%02x)\n", stat); |
@@ -619,7 +638,7 @@ static inline u8 probe_for_drive (ide_drive_t *drive) | |||
619 | return drive->present; | 638 | return drive->present; |
620 | } | 639 | } |
621 | 640 | ||
622 | static void hwif_release_dev (struct device *dev) | 641 | static void hwif_release_dev(struct device *dev) |
623 | { | 642 | { |
624 | ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev); | 643 | ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev); |
625 | 644 | ||
@@ -709,7 +728,7 @@ static int ide_port_wait_ready(ide_hwif_t *hwif) | |||
709 | /* Ignore disks that we will not probe for later. */ | 728 | /* Ignore disks that we will not probe for later. */ |
710 | if (!drive->noprobe || drive->present) { | 729 | if (!drive->noprobe || drive->present) { |
711 | SELECT_DRIVE(drive); | 730 | SELECT_DRIVE(drive); |
712 | ide_set_irq(drive, 1); | 731 | hwif->tp_ops->set_irq(hwif, 1); |
713 | mdelay(2); | 732 | mdelay(2); |
714 | rc = ide_wait_not_busy(hwif, 35000); | 733 | rc = ide_wait_not_busy(hwif, 35000); |
715 | if (rc) | 734 | if (rc) |
@@ -971,6 +990,45 @@ static void ide_port_setup_devices(ide_hwif_t *hwif) | |||
971 | mutex_unlock(&ide_cfg_mtx); | 990 | mutex_unlock(&ide_cfg_mtx); |
972 | } | 991 | } |
973 | 992 | ||
993 | static ide_hwif_t *ide_ports[MAX_HWIFS]; | ||
994 | |||
995 | void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) | ||
996 | { | ||
997 | ide_hwgroup_t *hwgroup = hwif->hwgroup; | ||
998 | |||
999 | ide_ports[hwif->index] = NULL; | ||
1000 | |||
1001 | spin_lock_irq(&ide_lock); | ||
1002 | /* | ||
1003 | * Remove us from the hwgroup, and free | ||
1004 | * the hwgroup if we were the only member | ||
1005 | */ | ||
1006 | if (hwif->next == hwif) { | ||
1007 | BUG_ON(hwgroup->hwif != hwif); | ||
1008 | kfree(hwgroup); | ||
1009 | } else { | ||
1010 | /* There is another interface in hwgroup. | ||
1011 | * Unlink us, and set hwgroup->drive and ->hwif to | ||
1012 | * something sane. | ||
1013 | */ | ||
1014 | ide_hwif_t *g = hwgroup->hwif; | ||
1015 | |||
1016 | while (g->next != hwif) | ||
1017 | g = g->next; | ||
1018 | g->next = hwif->next; | ||
1019 | if (hwgroup->hwif == hwif) { | ||
1020 | /* Chose a random hwif for hwgroup->hwif. | ||
1021 | * It's guaranteed that there are no drives | ||
1022 | * left in the hwgroup. | ||
1023 | */ | ||
1024 | BUG_ON(hwgroup->drive != NULL); | ||
1025 | hwgroup->hwif = g; | ||
1026 | } | ||
1027 | BUG_ON(hwgroup->hwif == hwif); | ||
1028 | } | ||
1029 | spin_unlock_irq(&ide_lock); | ||
1030 | } | ||
1031 | |||
974 | /* | 1032 | /* |
975 | * This routine sets up the irq for an ide interface, and creates a new | 1033 | * This routine sets up the irq for an ide interface, and creates a new |
976 | * hwgroup for the irq/hwif if none was previously assigned. | 1034 | * hwgroup for the irq/hwif if none was previously assigned. |
@@ -998,8 +1056,9 @@ static int init_irq (ide_hwif_t *hwif) | |||
998 | * Group up with any other hwifs that share our irq(s). | 1056 | * Group up with any other hwifs that share our irq(s). |
999 | */ | 1057 | */ |
1000 | for (index = 0; index < MAX_HWIFS; index++) { | 1058 | for (index = 0; index < MAX_HWIFS; index++) { |
1001 | ide_hwif_t *h = &ide_hwifs[index]; | 1059 | ide_hwif_t *h = ide_ports[index]; |
1002 | if (h->hwgroup) { /* scan only initialized hwif's */ | 1060 | |
1061 | if (h && h->hwgroup) { /* scan only initialized ports */ | ||
1003 | if (hwif->irq == h->irq) { | 1062 | if (hwif->irq == h->irq) { |
1004 | hwif->sharing_irq = h->sharing_irq = 1; | 1063 | hwif->sharing_irq = h->sharing_irq = 1; |
1005 | if (hwif->chipset != ide_pci || | 1064 | if (hwif->chipset != ide_pci || |
@@ -1053,6 +1112,8 @@ static int init_irq (ide_hwif_t *hwif) | |||
1053 | hwgroup->timer.data = (unsigned long) hwgroup; | 1112 | hwgroup->timer.data = (unsigned long) hwgroup; |
1054 | } | 1113 | } |
1055 | 1114 | ||
1115 | ide_ports[hwif->index] = hwif; | ||
1116 | |||
1056 | /* | 1117 | /* |
1057 | * Allocate the irq, if not already obtained for another hwif | 1118 | * Allocate the irq, if not already obtained for another hwif |
1058 | */ | 1119 | */ |
@@ -1066,8 +1127,7 @@ static int init_irq (ide_hwif_t *hwif) | |||
1066 | sa = IRQF_SHARED; | 1127 | sa = IRQF_SHARED; |
1067 | 1128 | ||
1068 | if (io_ports->ctl_addr) | 1129 | if (io_ports->ctl_addr) |
1069 | /* clear nIEN */ | 1130 | hwif->tp_ops->set_irq(hwif, 1); |
1070 | hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr); | ||
1071 | 1131 | ||
1072 | if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) | 1132 | if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) |
1073 | goto out_unlink; | 1133 | goto out_unlink; |
@@ -1345,6 +1405,9 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, | |||
1345 | hwif->host_flags |= d->host_flags; | 1405 | hwif->host_flags |= d->host_flags; |
1346 | hwif->pio_mask = d->pio_mask; | 1406 | hwif->pio_mask = d->pio_mask; |
1347 | 1407 | ||
1408 | if (d->tp_ops) | ||
1409 | hwif->tp_ops = d->tp_ops; | ||
1410 | |||
1348 | /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ | 1411 | /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ |
1349 | if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) | 1412 | if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) |
1350 | hwif->port_ops = d->port_ops; | 1413 | hwif->port_ops = d->port_ops; |
@@ -1363,6 +1426,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, | |||
1363 | 1426 | ||
1364 | if (rc < 0) { | 1427 | if (rc < 0) { |
1365 | printk(KERN_INFO "%s: DMA disabled\n", hwif->name); | 1428 | printk(KERN_INFO "%s: DMA disabled\n", hwif->name); |
1429 | hwif->dma_base = 0; | ||
1366 | hwif->swdma_mask = 0; | 1430 | hwif->swdma_mask = 0; |
1367 | hwif->mwdma_mask = 0; | 1431 | hwif->mwdma_mask = 0; |
1368 | hwif->ultra_mask = 0; | 1432 | hwif->ultra_mask = 0; |
@@ -1446,18 +1510,20 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif) | |||
1446 | return rc; | 1510 | return rc; |
1447 | } | 1511 | } |
1448 | 1512 | ||
1513 | static unsigned int ide_indexes; | ||
1514 | |||
1449 | /** | 1515 | /** |
1450 | * ide_find_port_slot - find free ide_hwifs[] slot | 1516 | * ide_find_port_slot - find free port slot |
1451 | * @d: IDE port info | 1517 | * @d: IDE port info |
1452 | * | 1518 | * |
1453 | * Return the new hwif. If we are out of free slots return NULL. | 1519 | * Return the new port slot index or -ENOENT if we are out of free slots. |
1454 | */ | 1520 | */ |
1455 | 1521 | ||
1456 | ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) | 1522 | static int ide_find_port_slot(const struct ide_port_info *d) |
1457 | { | 1523 | { |
1458 | ide_hwif_t *hwif; | 1524 | int idx = -ENOENT; |
1459 | int i; | ||
1460 | u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; | 1525 | u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; |
1526 | u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;; | ||
1461 | 1527 | ||
1462 | /* | 1528 | /* |
1463 | * Claim an unassigned slot. | 1529 | * Claim an unassigned slot. |
@@ -1469,51 +1535,106 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) | |||
1469 | * Unless there is a bootable card that does not use the standard | 1535 | * Unless there is a bootable card that does not use the standard |
1470 | * ports 0x1f0/0x170 (the ide0/ide1 defaults). | 1536 | * ports 0x1f0/0x170 (the ide0/ide1 defaults). |
1471 | */ | 1537 | */ |
1472 | if (bootable) { | 1538 | mutex_lock(&ide_cfg_mtx); |
1473 | i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0; | 1539 | if (MAX_HWIFS == 1) { |
1474 | 1540 | if (ide_indexes == 0 && i == 0) | |
1475 | for (; i < MAX_HWIFS; i++) { | 1541 | idx = 1; |
1476 | hwif = &ide_hwifs[i]; | ||
1477 | if (hwif->chipset == ide_unknown) | ||
1478 | goto out_found; | ||
1479 | } | ||
1480 | } else { | 1542 | } else { |
1481 | for (i = 2; i < MAX_HWIFS; i++) { | 1543 | if (bootable) { |
1482 | hwif = &ide_hwifs[i]; | 1544 | if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1) |
1483 | if (hwif->chipset == ide_unknown) | 1545 | idx = ffz(ide_indexes | i); |
1484 | goto out_found; | 1546 | } else { |
1547 | if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1) | ||
1548 | idx = ffz(ide_indexes | 3); | ||
1549 | else if ((ide_indexes & 3) != 3) | ||
1550 | idx = ffz(ide_indexes); | ||
1485 | } | 1551 | } |
1486 | for (i = 0; i < 2 && i < MAX_HWIFS; i++) { | 1552 | } |
1487 | hwif = &ide_hwifs[i]; | 1553 | if (idx >= 0) |
1488 | if (hwif->chipset == ide_unknown) | 1554 | ide_indexes |= (1 << idx); |
1489 | goto out_found; | 1555 | mutex_unlock(&ide_cfg_mtx); |
1556 | |||
1557 | return idx; | ||
1558 | } | ||
1559 | |||
1560 | static void ide_free_port_slot(int idx) | ||
1561 | { | ||
1562 | mutex_lock(&ide_cfg_mtx); | ||
1563 | ide_indexes &= ~(1 << idx); | ||
1564 | mutex_unlock(&ide_cfg_mtx); | ||
1565 | } | ||
1566 | |||
1567 | struct ide_host *ide_host_alloc_all(const struct ide_port_info *d, | ||
1568 | hw_regs_t **hws) | ||
1569 | { | ||
1570 | struct ide_host *host; | ||
1571 | int i; | ||
1572 | |||
1573 | host = kzalloc(sizeof(*host), GFP_KERNEL); | ||
1574 | if (host == NULL) | ||
1575 | return NULL; | ||
1576 | |||
1577 | for (i = 0; i < MAX_HWIFS; i++) { | ||
1578 | ide_hwif_t *hwif; | ||
1579 | int idx; | ||
1580 | |||
1581 | if (hws[i] == NULL) | ||
1582 | continue; | ||
1583 | |||
1584 | hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); | ||
1585 | if (hwif == NULL) | ||
1586 | continue; | ||
1587 | |||
1588 | idx = ide_find_port_slot(d); | ||
1589 | if (idx < 0) { | ||
1590 | printk(KERN_ERR "%s: no free slot for interface\n", | ||
1591 | d ? d->name : "ide"); | ||
1592 | kfree(hwif); | ||
1593 | continue; | ||
1490 | } | 1594 | } |
1595 | |||
1596 | ide_init_port_data(hwif, idx); | ||
1597 | |||
1598 | host->ports[i] = hwif; | ||
1599 | host->n_ports++; | ||
1491 | } | 1600 | } |
1492 | 1601 | ||
1493 | printk(KERN_ERR "%s: no free slot for interface\n", | 1602 | if (host->n_ports == 0) { |
1494 | d ? d->name : "ide"); | 1603 | kfree(host); |
1604 | return NULL; | ||
1605 | } | ||
1495 | 1606 | ||
1496 | return NULL; | 1607 | return host; |
1608 | } | ||
1609 | EXPORT_SYMBOL_GPL(ide_host_alloc_all); | ||
1610 | |||
1611 | struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) | ||
1612 | { | ||
1613 | hw_regs_t *hws_all[MAX_HWIFS]; | ||
1614 | int i; | ||
1497 | 1615 | ||
1498 | out_found: | 1616 | for (i = 0; i < MAX_HWIFS; i++) |
1499 | ide_init_port_data(hwif, i); | 1617 | hws_all[i] = (i < 4) ? hws[i] : NULL; |
1500 | return hwif; | 1618 | |
1619 | return ide_host_alloc_all(d, hws_all); | ||
1501 | } | 1620 | } |
1502 | EXPORT_SYMBOL_GPL(ide_find_port_slot); | 1621 | EXPORT_SYMBOL_GPL(ide_host_alloc); |
1503 | 1622 | ||
1504 | int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | 1623 | int ide_host_register(struct ide_host *host, const struct ide_port_info *d, |
1624 | hw_regs_t **hws) | ||
1505 | { | 1625 | { |
1506 | ide_hwif_t *hwif, *mate = NULL; | 1626 | ide_hwif_t *hwif, *mate = NULL; |
1507 | int i, rc = 0; | 1627 | int i, j = 0; |
1508 | 1628 | ||
1509 | for (i = 0; i < MAX_HWIFS; i++) { | 1629 | for (i = 0; i < MAX_HWIFS; i++) { |
1510 | if (idx[i] == 0xff) { | 1630 | hwif = host->ports[i]; |
1631 | |||
1632 | if (hwif == NULL) { | ||
1511 | mate = NULL; | 1633 | mate = NULL; |
1512 | continue; | 1634 | continue; |
1513 | } | 1635 | } |
1514 | 1636 | ||
1515 | hwif = &ide_hwifs[idx[i]]; | 1637 | ide_init_port_hw(hwif, hws[i]); |
1516 | |||
1517 | ide_port_apply_params(hwif); | 1638 | ide_port_apply_params(hwif); |
1518 | 1639 | ||
1519 | if (d == NULL) { | 1640 | if (d == NULL) { |
@@ -1534,10 +1655,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1534 | } | 1655 | } |
1535 | 1656 | ||
1536 | for (i = 0; i < MAX_HWIFS; i++) { | 1657 | for (i = 0; i < MAX_HWIFS; i++) { |
1537 | if (idx[i] == 0xff) | 1658 | hwif = host->ports[i]; |
1538 | continue; | ||
1539 | 1659 | ||
1540 | hwif = &ide_hwifs[idx[i]]; | 1660 | if (hwif == NULL) |
1661 | continue; | ||
1541 | 1662 | ||
1542 | if (ide_probe_port(hwif) == 0) | 1663 | if (ide_probe_port(hwif) == 0) |
1543 | hwif->present = 1; | 1664 | hwif->present = 1; |
@@ -1551,19 +1672,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1551 | } | 1672 | } |
1552 | 1673 | ||
1553 | for (i = 0; i < MAX_HWIFS; i++) { | 1674 | for (i = 0; i < MAX_HWIFS; i++) { |
1554 | if (idx[i] == 0xff) | 1675 | hwif = host->ports[i]; |
1555 | continue; | ||
1556 | 1676 | ||
1557 | hwif = &ide_hwifs[idx[i]]; | 1677 | if (hwif == NULL) |
1678 | continue; | ||
1558 | 1679 | ||
1559 | if (hwif_init(hwif) == 0) { | 1680 | if (hwif_init(hwif) == 0) { |
1560 | printk(KERN_INFO "%s: failed to initialize IDE " | 1681 | printk(KERN_INFO "%s: failed to initialize IDE " |
1561 | "interface\n", hwif->name); | 1682 | "interface\n", hwif->name); |
1562 | hwif->present = 0; | 1683 | hwif->present = 0; |
1563 | rc = -1; | ||
1564 | continue; | 1684 | continue; |
1565 | } | 1685 | } |
1566 | 1686 | ||
1687 | j++; | ||
1688 | |||
1567 | if (hwif->present) | 1689 | if (hwif->present) |
1568 | ide_port_setup_devices(hwif); | 1690 | ide_port_setup_devices(hwif); |
1569 | 1691 | ||
@@ -1574,10 +1696,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1574 | } | 1696 | } |
1575 | 1697 | ||
1576 | for (i = 0; i < MAX_HWIFS; i++) { | 1698 | for (i = 0; i < MAX_HWIFS; i++) { |
1577 | if (idx[i] == 0xff) | 1699 | hwif = host->ports[i]; |
1578 | continue; | ||
1579 | 1700 | ||
1580 | hwif = &ide_hwifs[idx[i]]; | 1701 | if (hwif == NULL) |
1702 | continue; | ||
1581 | 1703 | ||
1582 | if (hwif->chipset == ide_unknown) | 1704 | if (hwif->chipset == ide_unknown) |
1583 | hwif->chipset = ide_generic; | 1705 | hwif->chipset = ide_generic; |
@@ -1587,10 +1709,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1587 | } | 1709 | } |
1588 | 1710 | ||
1589 | for (i = 0; i < MAX_HWIFS; i++) { | 1711 | for (i = 0; i < MAX_HWIFS; i++) { |
1590 | if (idx[i] == 0xff) | 1712 | hwif = host->ports[i]; |
1591 | continue; | ||
1592 | 1713 | ||
1593 | hwif = &ide_hwifs[idx[i]]; | 1714 | if (hwif == NULL) |
1715 | continue; | ||
1594 | 1716 | ||
1595 | ide_sysfs_register_port(hwif); | 1717 | ide_sysfs_register_port(hwif); |
1596 | ide_proc_register_port(hwif); | 1718 | ide_proc_register_port(hwif); |
@@ -1599,21 +1721,64 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1599 | ide_proc_port_register_devices(hwif); | 1721 | ide_proc_port_register_devices(hwif); |
1600 | } | 1722 | } |
1601 | 1723 | ||
1602 | return rc; | 1724 | return j ? 0 : -1; |
1603 | } | 1725 | } |
1604 | EXPORT_SYMBOL_GPL(ide_device_add_all); | 1726 | EXPORT_SYMBOL_GPL(ide_host_register); |
1605 | 1727 | ||
1606 | int ide_device_add(u8 idx[4], const struct ide_port_info *d) | 1728 | int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws, |
1729 | struct ide_host **hostp) | ||
1607 | { | 1730 | { |
1608 | u8 idx_all[MAX_HWIFS]; | 1731 | struct ide_host *host; |
1732 | int rc; | ||
1733 | |||
1734 | host = ide_host_alloc(d, hws); | ||
1735 | if (host == NULL) | ||
1736 | return -ENOMEM; | ||
1737 | |||
1738 | rc = ide_host_register(host, d, hws); | ||
1739 | if (rc) { | ||
1740 | ide_host_free(host); | ||
1741 | return rc; | ||
1742 | } | ||
1743 | |||
1744 | if (hostp) | ||
1745 | *hostp = host; | ||
1746 | |||
1747 | return 0; | ||
1748 | } | ||
1749 | EXPORT_SYMBOL_GPL(ide_host_add); | ||
1750 | |||
1751 | void ide_host_free(struct ide_host *host) | ||
1752 | { | ||
1753 | ide_hwif_t *hwif; | ||
1609 | int i; | 1754 | int i; |
1610 | 1755 | ||
1611 | for (i = 0; i < MAX_HWIFS; i++) | 1756 | for (i = 0; i < MAX_HWIFS; i++) { |
1612 | idx_all[i] = (i < 4) ? idx[i] : 0xff; | 1757 | hwif = host->ports[i]; |
1613 | 1758 | ||
1614 | return ide_device_add_all(idx_all, d); | 1759 | if (hwif == NULL) |
1760 | continue; | ||
1761 | |||
1762 | ide_free_port_slot(hwif->index); | ||
1763 | kfree(hwif); | ||
1764 | } | ||
1765 | |||
1766 | kfree(host); | ||
1615 | } | 1767 | } |
1616 | EXPORT_SYMBOL_GPL(ide_device_add); | 1768 | EXPORT_SYMBOL_GPL(ide_host_free); |
1769 | |||
1770 | void ide_host_remove(struct ide_host *host) | ||
1771 | { | ||
1772 | int i; | ||
1773 | |||
1774 | for (i = 0; i < MAX_HWIFS; i++) { | ||
1775 | if (host->ports[i]) | ||
1776 | ide_unregister(host->ports[i]); | ||
1777 | } | ||
1778 | |||
1779 | ide_host_free(host); | ||
1780 | } | ||
1781 | EXPORT_SYMBOL_GPL(ide_host_remove); | ||
1617 | 1782 | ||
1618 | void ide_port_scan(ide_hwif_t *hwif) | 1783 | void ide_port_scan(ide_hwif_t *hwif) |
1619 | { | 1784 | { |
@@ -1634,11 +1799,10 @@ void ide_port_scan(ide_hwif_t *hwif) | |||
1634 | } | 1799 | } |
1635 | EXPORT_SYMBOL_GPL(ide_port_scan); | 1800 | EXPORT_SYMBOL_GPL(ide_port_scan); |
1636 | 1801 | ||
1637 | static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no, | 1802 | static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, |
1638 | const struct ide_port_info *d, | 1803 | u8 port_no, const struct ide_port_info *d, |
1639 | unsigned long config) | 1804 | unsigned long config) |
1640 | { | 1805 | { |
1641 | ide_hwif_t *hwif; | ||
1642 | unsigned long base, ctl; | 1806 | unsigned long base, ctl; |
1643 | int irq; | 1807 | int irq; |
1644 | 1808 | ||
@@ -1668,33 +1832,25 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no, | |||
1668 | ide_std_init_ports(hw, base, ctl); | 1832 | ide_std_init_ports(hw, base, ctl); |
1669 | hw->irq = irq; | 1833 | hw->irq = irq; |
1670 | hw->chipset = d->chipset; | 1834 | hw->chipset = d->chipset; |
1835 | hw->config = config; | ||
1671 | 1836 | ||
1672 | hwif = ide_find_port_slot(d); | 1837 | hws[port_no] = hw; |
1673 | if (hwif) { | ||
1674 | ide_init_port_hw(hwif, hw); | ||
1675 | if (config) | ||
1676 | hwif->config_data = config; | ||
1677 | idx[port_no] = hwif->index; | ||
1678 | } | ||
1679 | } | 1838 | } |
1680 | 1839 | ||
1681 | int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) | 1840 | int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) |
1682 | { | 1841 | { |
1683 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 1842 | hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; |
1684 | hw_regs_t hw[2]; | ||
1685 | 1843 | ||
1686 | memset(&hw, 0, sizeof(hw)); | 1844 | memset(&hw, 0, sizeof(hw)); |
1687 | 1845 | ||
1688 | if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0) | 1846 | if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0) |
1689 | ide_legacy_init_one(idx, &hw[0], 0, d, config); | 1847 | ide_legacy_init_one(hws, &hw[0], 0, d, config); |
1690 | ide_legacy_init_one(idx, &hw[1], 1, d, config); | 1848 | ide_legacy_init_one(hws, &hw[1], 1, d, config); |
1691 | 1849 | ||
1692 | if (idx[0] == 0xff && idx[1] == 0xff && | 1850 | if (hws[0] == NULL && hws[1] == NULL && |
1693 | (d->host_flags & IDE_HFLAG_SINGLE)) | 1851 | (d->host_flags & IDE_HFLAG_SINGLE)) |
1694 | return -ENOENT; | 1852 | return -ENOENT; |
1695 | 1853 | ||
1696 | ide_device_add(idx, d); | 1854 | return ide_host_add(d, hws, NULL); |
1697 | |||
1698 | return 0; | ||
1699 | } | 1855 | } |
1700 | EXPORT_SYMBOL_GPL(ide_legacy_device_add); | 1856 | EXPORT_SYMBOL_GPL(ide_legacy_device_add); |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 8af88bf0969b..151c91e933da 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -345,7 +345,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg) | |||
345 | ide_task_t task; | 345 | ide_task_t task; |
346 | int err; | 346 | int err; |
347 | 347 | ||
348 | if (arg < 0 || arg > 70) | 348 | if (arg < XFER_PIO_0 || arg > XFER_UDMA_6) |
349 | return -EINVAL; | 349 | return -EINVAL; |
350 | 350 | ||
351 | memset(&task, 0, sizeof(task)); | 351 | memset(&task, 0, sizeof(task)); |
@@ -357,7 +357,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg) | |||
357 | 357 | ||
358 | err = ide_no_data_taskfile(drive, &task); | 358 | err = ide_no_data_taskfile(drive, &task); |
359 | 359 | ||
360 | if (!err && arg) { | 360 | if (!err) { |
361 | ide_set_xfer_rate(drive, (u8) arg); | 361 | ide_set_xfer_rate(drive, (u8) arg); |
362 | ide_driveid_update(drive); | 362 | ide_driveid_update(drive); |
363 | } | 363 | } |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 353dd11b9283..6962ca4891a1 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -195,23 +195,6 @@ enum { | |||
195 | #define IDETAPE_BLOCK_DESCRIPTOR 0 | 195 | #define IDETAPE_BLOCK_DESCRIPTOR 0 |
196 | #define IDETAPE_CAPABILITIES_PAGE 0x2a | 196 | #define IDETAPE_CAPABILITIES_PAGE 0x2a |
197 | 197 | ||
198 | /* Tape flag bits values. */ | ||
199 | enum { | ||
200 | IDETAPE_FLAG_IGNORE_DSC = (1 << 0), | ||
201 | /* 0 When the tape position is unknown */ | ||
202 | IDETAPE_FLAG_ADDRESS_VALID = (1 << 1), | ||
203 | /* Device already opened */ | ||
204 | IDETAPE_FLAG_BUSY = (1 << 2), | ||
205 | /* Attempt to auto-detect the current user block size */ | ||
206 | IDETAPE_FLAG_DETECT_BS = (1 << 3), | ||
207 | /* Currently on a filemark */ | ||
208 | IDETAPE_FLAG_FILEMARK = (1 << 4), | ||
209 | /* DRQ interrupt device */ | ||
210 | IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5), | ||
211 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ | ||
212 | IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6), | ||
213 | }; | ||
214 | |||
215 | /* | 198 | /* |
216 | * Most of our global data which we need to save even as we leave the driver due | 199 | * Most of our global data which we need to save even as we leave the driver due |
217 | * to an interrupt or a timer event is stored in the struct defined below. | 200 | * to an interrupt or a timer event is stored in the struct defined below. |
@@ -312,8 +295,6 @@ typedef struct ide_tape_obj { | |||
312 | /* Wasted space in each stage */ | 295 | /* Wasted space in each stage */ |
313 | int excess_bh_size; | 296 | int excess_bh_size; |
314 | 297 | ||
315 | /* Status/Action flags: long for set_bit */ | ||
316 | unsigned long flags; | ||
317 | /* protects the ide-tape queue */ | 298 | /* protects the ide-tape queue */ |
318 | spinlock_t lock; | 299 | spinlock_t lock; |
319 | 300 | ||
@@ -398,7 +379,7 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
398 | count = min( | 379 | count = min( |
399 | (unsigned int)(bh->b_size - atomic_read(&bh->b_count)), | 380 | (unsigned int)(bh->b_size - atomic_read(&bh->b_count)), |
400 | bcount); | 381 | bcount); |
401 | drive->hwif->input_data(drive, NULL, bh->b_data + | 382 | drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data + |
402 | atomic_read(&bh->b_count), count); | 383 | atomic_read(&bh->b_count), count); |
403 | bcount -= count; | 384 | bcount -= count; |
404 | atomic_add(count, &bh->b_count); | 385 | atomic_add(count, &bh->b_count); |
@@ -424,7 +405,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
424 | return; | 405 | return; |
425 | } | 406 | } |
426 | count = min((unsigned int)pc->b_count, (unsigned int)bcount); | 407 | count = min((unsigned int)pc->b_count, (unsigned int)bcount); |
427 | drive->hwif->output_data(drive, NULL, pc->b_data, count); | 408 | drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count); |
428 | bcount -= count; | 409 | bcount -= count; |
429 | pc->b_data += count; | 410 | pc->b_data += count; |
430 | pc->b_count -= count; | 411 | pc->b_count -= count; |
@@ -585,7 +566,6 @@ static void ide_tape_kfree_buffer(idetape_tape_t *tape) | |||
585 | bh = bh->b_reqnext; | 566 | bh = bh->b_reqnext; |
586 | kfree(prev_bh); | 567 | kfree(prev_bh); |
587 | } | 568 | } |
588 | kfree(tape->merge_bh); | ||
589 | } | 569 | } |
590 | 570 | ||
591 | static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) | 571 | static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) |
@@ -665,7 +645,7 @@ static void ide_tape_callback(ide_drive_t *drive) | |||
665 | if (readpos[0] & 0x4) { | 645 | if (readpos[0] & 0x4) { |
666 | printk(KERN_INFO "ide-tape: Block location is unknown" | 646 | printk(KERN_INFO "ide-tape: Block location is unknown" |
667 | "to the tape\n"); | 647 | "to the tape\n"); |
668 | clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); | 648 | clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); |
669 | uptodate = 0; | 649 | uptodate = 0; |
670 | } else { | 650 | } else { |
671 | debug_log(DBG_SENSE, "Block Location - %u\n", | 651 | debug_log(DBG_SENSE, "Block Location - %u\n", |
@@ -673,7 +653,7 @@ static void ide_tape_callback(ide_drive_t *drive) | |||
673 | 653 | ||
674 | tape->partition = readpos[1]; | 654 | tape->partition = readpos[1]; |
675 | tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]); | 655 | tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]); |
676 | set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); | 656 | set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); |
677 | } | 657 | } |
678 | } | 658 | } |
679 | 659 | ||
@@ -690,7 +670,6 @@ static void idetape_init_pc(struct ide_atapi_pc *pc) | |||
690 | pc->buf_size = IDETAPE_PC_BUFFER_SIZE; | 670 | pc->buf_size = IDETAPE_PC_BUFFER_SIZE; |
691 | pc->bh = NULL; | 671 | pc->bh = NULL; |
692 | pc->b_data = NULL; | 672 | pc->b_data = NULL; |
693 | pc->callback = ide_tape_callback; | ||
694 | } | 673 | } |
695 | 674 | ||
696 | static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc) | 675 | static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc) |
@@ -705,7 +684,7 @@ static void idetape_init_rq(struct request *rq, u8 cmd) | |||
705 | { | 684 | { |
706 | blk_rq_init(NULL, rq); | 685 | blk_rq_init(NULL, rq); |
707 | rq->cmd_type = REQ_TYPE_SPECIAL; | 686 | rq->cmd_type = REQ_TYPE_SPECIAL; |
708 | rq->cmd[0] = cmd; | 687 | rq->cmd[13] = cmd; |
709 | } | 688 | } |
710 | 689 | ||
711 | /* | 690 | /* |
@@ -732,6 +711,7 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
732 | rq->cmd_flags |= REQ_PREEMPT; | 711 | rq->cmd_flags |= REQ_PREEMPT; |
733 | rq->buffer = (char *) pc; | 712 | rq->buffer = (char *) pc; |
734 | rq->rq_disk = tape->disk; | 713 | rq->rq_disk = tape->disk; |
714 | memcpy(rq->cmd, pc->c, 12); | ||
735 | ide_do_drive_cmd(drive, rq); | 715 | ide_do_drive_cmd(drive, rq); |
736 | } | 716 | } |
737 | 717 | ||
@@ -742,7 +722,6 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
742 | */ | 722 | */ |
743 | static void idetape_retry_pc(ide_drive_t *drive) | 723 | static void idetape_retry_pc(ide_drive_t *drive) |
744 | { | 724 | { |
745 | idetape_tape_t *tape = drive->driver_data; | ||
746 | struct ide_atapi_pc *pc; | 725 | struct ide_atapi_pc *pc; |
747 | struct request *rq; | 726 | struct request *rq; |
748 | 727 | ||
@@ -750,7 +729,7 @@ static void idetape_retry_pc(ide_drive_t *drive) | |||
750 | pc = idetape_next_pc_storage(drive); | 729 | pc = idetape_next_pc_storage(drive); |
751 | rq = idetape_next_rq_storage(drive); | 730 | rq = idetape_next_rq_storage(drive); |
752 | idetape_create_request_sense_cmd(pc); | 731 | idetape_create_request_sense_cmd(pc); |
753 | set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); | 732 | set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); |
754 | idetape_queue_pc_head(drive, pc, rq); | 733 | idetape_queue_pc_head(drive, pc, rq); |
755 | } | 734 | } |
756 | 735 | ||
@@ -887,7 +866,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive, | |||
887 | pc->error = IDETAPE_ERROR_GENERAL; | 866 | pc->error = IDETAPE_ERROR_GENERAL; |
888 | } | 867 | } |
889 | tape->failed_pc = NULL; | 868 | tape->failed_pc = NULL; |
890 | pc->callback(drive); | 869 | drive->pc_callback(drive); |
891 | return ide_stopped; | 870 | return ide_stopped; |
892 | } | 871 | } |
893 | debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); | 872 | debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); |
@@ -927,11 +906,12 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code) | |||
927 | 906 | ||
928 | static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) | 907 | static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) |
929 | { | 908 | { |
909 | ide_hwif_t *hwif = drive->hwif; | ||
930 | idetape_tape_t *tape = drive->driver_data; | 910 | idetape_tape_t *tape = drive->driver_data; |
931 | struct ide_atapi_pc *pc = tape->pc; | 911 | struct ide_atapi_pc *pc = tape->pc; |
932 | u8 stat; | 912 | u8 stat; |
933 | 913 | ||
934 | stat = ide_read_status(drive); | 914 | stat = hwif->tp_ops->read_status(hwif); |
935 | 915 | ||
936 | if (stat & SEEK_STAT) { | 916 | if (stat & SEEK_STAT) { |
937 | if (stat & ERR_STAT) { | 917 | if (stat & ERR_STAT) { |
@@ -948,14 +928,17 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) | |||
948 | pc->error = IDETAPE_ERROR_GENERAL; | 928 | pc->error = IDETAPE_ERROR_GENERAL; |
949 | tape->failed_pc = NULL; | 929 | tape->failed_pc = NULL; |
950 | } | 930 | } |
951 | pc->callback(drive); | 931 | drive->pc_callback(drive); |
952 | return ide_stopped; | 932 | return ide_stopped; |
953 | } | 933 | } |
954 | 934 | ||
955 | static void ide_tape_create_rw_cmd(idetape_tape_t *tape, | 935 | static void ide_tape_create_rw_cmd(idetape_tape_t *tape, |
956 | struct ide_atapi_pc *pc, unsigned int length, | 936 | struct ide_atapi_pc *pc, struct request *rq, |
957 | struct idetape_bh *bh, u8 opcode) | 937 | u8 opcode) |
958 | { | 938 | { |
939 | struct idetape_bh *bh = (struct idetape_bh *)rq->special; | ||
940 | unsigned int length = rq->current_nr_sectors; | ||
941 | |||
959 | idetape_init_pc(pc); | 942 | idetape_init_pc(pc); |
960 | put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); | 943 | put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); |
961 | pc->c[1] = 1; | 944 | pc->c[1] = 1; |
@@ -975,11 +958,14 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape, | |||
975 | pc->b_data = bh->b_data; | 958 | pc->b_data = bh->b_data; |
976 | pc->b_count = atomic_read(&bh->b_count); | 959 | pc->b_count = atomic_read(&bh->b_count); |
977 | } | 960 | } |
961 | |||
962 | memcpy(rq->cmd, pc->c, 12); | ||
978 | } | 963 | } |
979 | 964 | ||
980 | static ide_startstop_t idetape_do_request(ide_drive_t *drive, | 965 | static ide_startstop_t idetape_do_request(ide_drive_t *drive, |
981 | struct request *rq, sector_t block) | 966 | struct request *rq, sector_t block) |
982 | { | 967 | { |
968 | ide_hwif_t *hwif = drive->hwif; | ||
983 | idetape_tape_t *tape = drive->driver_data; | 969 | idetape_tape_t *tape = drive->driver_data; |
984 | struct ide_atapi_pc *pc = NULL; | 970 | struct ide_atapi_pc *pc = NULL; |
985 | struct request *postponed_rq = tape->postponed_rq; | 971 | struct request *postponed_rq = tape->postponed_rq; |
@@ -1017,17 +1003,17 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
1017 | * If the tape is still busy, postpone our request and service | 1003 | * If the tape is still busy, postpone our request and service |
1018 | * the other device meanwhile. | 1004 | * the other device meanwhile. |
1019 | */ | 1005 | */ |
1020 | stat = ide_read_status(drive); | 1006 | stat = hwif->tp_ops->read_status(hwif); |
1021 | 1007 | ||
1022 | if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2)) | 1008 | if (!drive->dsc_overlap && !(rq->cmd[13] & REQ_IDETAPE_PC2)) |
1023 | set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); | 1009 | set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); |
1024 | 1010 | ||
1025 | if (drive->post_reset == 1) { | 1011 | if (drive->post_reset == 1) { |
1026 | set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); | 1012 | set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); |
1027 | drive->post_reset = 0; | 1013 | drive->post_reset = 0; |
1028 | } | 1014 | } |
1029 | 1015 | ||
1030 | if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && | 1016 | if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) && |
1031 | (stat & SEEK_STAT) == 0) { | 1017 | (stat & SEEK_STAT) == 0) { |
1032 | if (postponed_rq == NULL) { | 1018 | if (postponed_rq == NULL) { |
1033 | tape->dsc_polling_start = jiffies; | 1019 | tape->dsc_polling_start = jiffies; |
@@ -1036,7 +1022,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
1036 | } else if (time_after(jiffies, tape->dsc_timeout)) { | 1022 | } else if (time_after(jiffies, tape->dsc_timeout)) { |
1037 | printk(KERN_ERR "ide-tape: %s: DSC timeout\n", | 1023 | printk(KERN_ERR "ide-tape: %s: DSC timeout\n", |
1038 | tape->name); | 1024 | tape->name); |
1039 | if (rq->cmd[0] & REQ_IDETAPE_PC2) { | 1025 | if (rq->cmd[13] & REQ_IDETAPE_PC2) { |
1040 | idetape_media_access_finished(drive); | 1026 | idetape_media_access_finished(drive); |
1041 | return ide_stopped; | 1027 | return ide_stopped; |
1042 | } else { | 1028 | } else { |
@@ -1049,35 +1035,29 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
1049 | idetape_postpone_request(drive); | 1035 | idetape_postpone_request(drive); |
1050 | return ide_stopped; | 1036 | return ide_stopped; |
1051 | } | 1037 | } |
1052 | if (rq->cmd[0] & REQ_IDETAPE_READ) { | 1038 | if (rq->cmd[13] & REQ_IDETAPE_READ) { |
1053 | pc = idetape_next_pc_storage(drive); | 1039 | pc = idetape_next_pc_storage(drive); |
1054 | ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, | 1040 | ide_tape_create_rw_cmd(tape, pc, rq, READ_6); |
1055 | (struct idetape_bh *)rq->special, | ||
1056 | READ_6); | ||
1057 | goto out; | 1041 | goto out; |
1058 | } | 1042 | } |
1059 | if (rq->cmd[0] & REQ_IDETAPE_WRITE) { | 1043 | if (rq->cmd[13] & REQ_IDETAPE_WRITE) { |
1060 | pc = idetape_next_pc_storage(drive); | 1044 | pc = idetape_next_pc_storage(drive); |
1061 | ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, | 1045 | ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6); |
1062 | (struct idetape_bh *)rq->special, | ||
1063 | WRITE_6); | ||
1064 | goto out; | 1046 | goto out; |
1065 | } | 1047 | } |
1066 | if (rq->cmd[0] & REQ_IDETAPE_PC1) { | 1048 | if (rq->cmd[13] & REQ_IDETAPE_PC1) { |
1067 | pc = (struct ide_atapi_pc *) rq->buffer; | 1049 | pc = (struct ide_atapi_pc *) rq->buffer; |
1068 | rq->cmd[0] &= ~(REQ_IDETAPE_PC1); | 1050 | rq->cmd[13] &= ~(REQ_IDETAPE_PC1); |
1069 | rq->cmd[0] |= REQ_IDETAPE_PC2; | 1051 | rq->cmd[13] |= REQ_IDETAPE_PC2; |
1070 | goto out; | 1052 | goto out; |
1071 | } | 1053 | } |
1072 | if (rq->cmd[0] & REQ_IDETAPE_PC2) { | 1054 | if (rq->cmd[13] & REQ_IDETAPE_PC2) { |
1073 | idetape_media_access_finished(drive); | 1055 | idetape_media_access_finished(drive); |
1074 | return ide_stopped; | 1056 | return ide_stopped; |
1075 | } | 1057 | } |
1076 | BUG(); | 1058 | BUG(); |
1077 | out: | ||
1078 | if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) | ||
1079 | pc->flags |= PC_FLAG_DRQ_INTERRUPT; | ||
1080 | 1059 | ||
1060 | out: | ||
1081 | return idetape_issue_pc(drive, pc); | 1061 | return idetape_issue_pc(drive, pc); |
1082 | } | 1062 | } |
1083 | 1063 | ||
@@ -1281,8 +1261,9 @@ static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) | |||
1281 | 1261 | ||
1282 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 1262 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
1283 | rq->cmd_type = REQ_TYPE_SPECIAL; | 1263 | rq->cmd_type = REQ_TYPE_SPECIAL; |
1284 | rq->cmd[0] = REQ_IDETAPE_PC1; | 1264 | rq->cmd[13] = REQ_IDETAPE_PC1; |
1285 | rq->buffer = (char *)pc; | 1265 | rq->buffer = (char *)pc; |
1266 | memcpy(rq->cmd, pc->c, 12); | ||
1286 | error = blk_execute_rq(drive->queue, tape->disk, rq, 0); | 1267 | error = blk_execute_rq(drive->queue, tape->disk, rq, 0); |
1287 | blk_put_request(rq); | 1268 | blk_put_request(rq); |
1288 | return error; | 1269 | return error; |
@@ -1304,7 +1285,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) | |||
1304 | int load_attempted = 0; | 1285 | int load_attempted = 0; |
1305 | 1286 | ||
1306 | /* Wait for the tape to become ready */ | 1287 | /* Wait for the tape to become ready */ |
1307 | set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); | 1288 | set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); |
1308 | timeout += jiffies; | 1289 | timeout += jiffies; |
1309 | while (time_before(jiffies, timeout)) { | 1290 | while (time_before(jiffies, timeout)) { |
1310 | idetape_create_test_unit_ready_cmd(&pc); | 1291 | idetape_create_test_unit_ready_cmd(&pc); |
@@ -1397,7 +1378,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive) | |||
1397 | if (tape->chrdev_dir != IDETAPE_DIR_READ) | 1378 | if (tape->chrdev_dir != IDETAPE_DIR_READ) |
1398 | return; | 1379 | return; |
1399 | 1380 | ||
1400 | clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags); | 1381 | clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); |
1401 | tape->merge_bh_size = 0; | 1382 | tape->merge_bh_size = 0; |
1402 | if (tape->merge_bh != NULL) { | 1383 | if (tape->merge_bh != NULL) { |
1403 | ide_tape_kfree_buffer(tape); | 1384 | ide_tape_kfree_buffer(tape); |
@@ -1465,7 +1446,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, | |||
1465 | 1446 | ||
1466 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 1447 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
1467 | rq->cmd_type = REQ_TYPE_SPECIAL; | 1448 | rq->cmd_type = REQ_TYPE_SPECIAL; |
1468 | rq->cmd[0] = cmd; | 1449 | rq->cmd[13] = cmd; |
1469 | rq->rq_disk = tape->disk; | 1450 | rq->rq_disk = tape->disk; |
1470 | rq->special = (void *)bh; | 1451 | rq->special = (void *)bh; |
1471 | rq->sector = tape->first_frame; | 1452 | rq->sector = tape->first_frame; |
@@ -1636,7 +1617,7 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) | |||
1636 | debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); | 1617 | debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); |
1637 | 1618 | ||
1638 | /* If we are at a filemark, return a read length of 0 */ | 1619 | /* If we are at a filemark, return a read length of 0 */ |
1639 | if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) | 1620 | if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) |
1640 | return 0; | 1621 | return 0; |
1641 | 1622 | ||
1642 | idetape_init_read(drive); | 1623 | idetape_init_read(drive); |
@@ -1746,7 +1727,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, | |||
1746 | 1727 | ||
1747 | if (tape->chrdev_dir == IDETAPE_DIR_READ) { | 1728 | if (tape->chrdev_dir == IDETAPE_DIR_READ) { |
1748 | tape->merge_bh_size = 0; | 1729 | tape->merge_bh_size = 0; |
1749 | if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) | 1730 | if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) |
1750 | ++count; | 1731 | ++count; |
1751 | ide_tape_discard_merge_buffer(drive, 0); | 1732 | ide_tape_discard_merge_buffer(drive, 0); |
1752 | } | 1733 | } |
@@ -1801,7 +1782,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, | |||
1801 | debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); | 1782 | debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); |
1802 | 1783 | ||
1803 | if (tape->chrdev_dir != IDETAPE_DIR_READ) { | 1784 | if (tape->chrdev_dir != IDETAPE_DIR_READ) { |
1804 | if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags)) | 1785 | if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags)) |
1805 | if (count > tape->blk_size && | 1786 | if (count > tape->blk_size && |
1806 | (count % tape->blk_size) == 0) | 1787 | (count % tape->blk_size) == 0) |
1807 | tape->user_bs_factor = count / tape->blk_size; | 1788 | tape->user_bs_factor = count / tape->blk_size; |
@@ -1841,7 +1822,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, | |||
1841 | tape->merge_bh_size = bytes_read-temp; | 1822 | tape->merge_bh_size = bytes_read-temp; |
1842 | } | 1823 | } |
1843 | finish: | 1824 | finish: |
1844 | if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { | 1825 | if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { |
1845 | debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); | 1826 | debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); |
1846 | 1827 | ||
1847 | idetape_space_over_filemarks(drive, MTFSF, 1); | 1828 | idetape_space_over_filemarks(drive, MTFSF, 1); |
@@ -2027,7 +2008,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2027 | !IDETAPE_LU_LOAD_MASK); | 2008 | !IDETAPE_LU_LOAD_MASK); |
2028 | retval = idetape_queue_pc_tail(drive, &pc); | 2009 | retval = idetape_queue_pc_tail(drive, &pc); |
2029 | if (!retval) | 2010 | if (!retval) |
2030 | clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); | 2011 | clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); |
2031 | return retval; | 2012 | return retval; |
2032 | case MTNOP: | 2013 | case MTNOP: |
2033 | ide_tape_discard_merge_buffer(drive, 0); | 2014 | ide_tape_discard_merge_buffer(drive, 0); |
@@ -2050,9 +2031,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2050 | mt_count % tape->blk_size) | 2031 | mt_count % tape->blk_size) |
2051 | return -EIO; | 2032 | return -EIO; |
2052 | tape->user_bs_factor = mt_count / tape->blk_size; | 2033 | tape->user_bs_factor = mt_count / tape->blk_size; |
2053 | clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); | 2034 | clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); |
2054 | } else | 2035 | } else |
2055 | set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); | 2036 | set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); |
2056 | return 0; | 2037 | return 0; |
2057 | case MTSEEK: | 2038 | case MTSEEK: |
2058 | ide_tape_discard_merge_buffer(drive, 0); | 2039 | ide_tape_discard_merge_buffer(drive, 0); |
@@ -2202,20 +2183,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) | |||
2202 | 2183 | ||
2203 | filp->private_data = tape; | 2184 | filp->private_data = tape; |
2204 | 2185 | ||
2205 | if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) { | 2186 | if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) { |
2206 | retval = -EBUSY; | 2187 | retval = -EBUSY; |
2207 | goto out_put_tape; | 2188 | goto out_put_tape; |
2208 | } | 2189 | } |
2209 | 2190 | ||
2210 | retval = idetape_wait_ready(drive, 60 * HZ); | 2191 | retval = idetape_wait_ready(drive, 60 * HZ); |
2211 | if (retval) { | 2192 | if (retval) { |
2212 | clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); | 2193 | clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); |
2213 | printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); | 2194 | printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); |
2214 | goto out_put_tape; | 2195 | goto out_put_tape; |
2215 | } | 2196 | } |
2216 | 2197 | ||
2217 | idetape_read_position(drive); | 2198 | idetape_read_position(drive); |
2218 | if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) | 2199 | if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags)) |
2219 | (void)idetape_rewind_tape(drive); | 2200 | (void)idetape_rewind_tape(drive); |
2220 | 2201 | ||
2221 | /* Read block size and write protect status from drive. */ | 2202 | /* Read block size and write protect status from drive. */ |
@@ -2231,7 +2212,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) | |||
2231 | if (tape->write_prot) { | 2212 | if (tape->write_prot) { |
2232 | if ((filp->f_flags & O_ACCMODE) == O_WRONLY || | 2213 | if ((filp->f_flags & O_ACCMODE) == O_WRONLY || |
2233 | (filp->f_flags & O_ACCMODE) == O_RDWR) { | 2214 | (filp->f_flags & O_ACCMODE) == O_RDWR) { |
2234 | clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); | 2215 | clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); |
2235 | retval = -EROFS; | 2216 | retval = -EROFS; |
2236 | goto out_put_tape; | 2217 | goto out_put_tape; |
2237 | } | 2218 | } |
@@ -2291,7 +2272,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp) | |||
2291 | ide_tape_discard_merge_buffer(drive, 1); | 2272 | ide_tape_discard_merge_buffer(drive, 1); |
2292 | } | 2273 | } |
2293 | 2274 | ||
2294 | if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) | 2275 | if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags)) |
2295 | (void) idetape_rewind_tape(drive); | 2276 | (void) idetape_rewind_tape(drive); |
2296 | if (tape->chrdev_dir == IDETAPE_DIR_NONE) { | 2277 | if (tape->chrdev_dir == IDETAPE_DIR_NONE) { |
2297 | if (tape->door_locked == DOOR_LOCKED) { | 2278 | if (tape->door_locked == DOOR_LOCKED) { |
@@ -2301,7 +2282,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp) | |||
2301 | } | 2282 | } |
2302 | } | 2283 | } |
2303 | } | 2284 | } |
2304 | clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); | 2285 | clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); |
2305 | ide_tape_put(tape); | 2286 | ide_tape_put(tape); |
2306 | unlock_kernel(); | 2287 | unlock_kernel(); |
2307 | return 0; | 2288 | return 0; |
@@ -2464,6 +2445,8 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | |||
2464 | u8 gcw[2]; | 2445 | u8 gcw[2]; |
2465 | u16 *ctl = (u16 *)&tape->caps[12]; | 2446 | u16 *ctl = (u16 *)&tape->caps[12]; |
2466 | 2447 | ||
2448 | drive->pc_callback = ide_tape_callback; | ||
2449 | |||
2467 | spin_lock_init(&tape->lock); | 2450 | spin_lock_init(&tape->lock); |
2468 | drive->dsc_overlap = 1; | 2451 | drive->dsc_overlap = 1; |
2469 | if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) { | 2452 | if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) { |
@@ -2484,7 +2467,7 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | |||
2484 | 2467 | ||
2485 | /* Command packet DRQ type */ | 2468 | /* Command packet DRQ type */ |
2486 | if (((gcw[0] & 0x60) >> 5) == 1) | 2469 | if (((gcw[0] & 0x60) >> 5) == 1) |
2487 | set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); | 2470 | set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags); |
2488 | 2471 | ||
2489 | idetape_get_inquiry_results(drive); | 2472 | idetape_get_inquiry_results(drive); |
2490 | idetape_get_mode_sense_results(drive); | 2473 | idetape_get_mode_sense_results(drive); |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 1fbdb746dc88..aeddbbd69e86 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) | |||
64 | ide_hwif_t *hwif = HWIF(drive); | 64 | ide_hwif_t *hwif = HWIF(drive); |
65 | struct ide_taskfile *tf = &task->tf; | 65 | struct ide_taskfile *tf = &task->tf; |
66 | ide_handler_t *handler = NULL; | 66 | ide_handler_t *handler = NULL; |
67 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | ||
67 | const struct ide_dma_ops *dma_ops = hwif->dma_ops; | 68 | const struct ide_dma_ops *dma_ops = hwif->dma_ops; |
68 | 69 | ||
69 | if (task->data_phase == TASKFILE_MULTI_IN || | 70 | if (task->data_phase == TASKFILE_MULTI_IN || |
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) | |||
80 | 81 | ||
81 | if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { | 82 | if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { |
82 | ide_tf_dump(drive->name, tf); | 83 | ide_tf_dump(drive->name, tf); |
83 | ide_set_irq(drive, 1); | 84 | tp_ops->set_irq(hwif, 1); |
84 | SELECT_MASK(drive, 0); | 85 | SELECT_MASK(drive, 0); |
85 | hwif->tf_load(drive, task); | 86 | tp_ops->tf_load(drive, task); |
86 | } | 87 | } |
87 | 88 | ||
88 | switch (task->data_phase) { | 89 | switch (task->data_phase) { |
89 | case TASKFILE_MULTI_OUT: | 90 | case TASKFILE_MULTI_OUT: |
90 | case TASKFILE_OUT: | 91 | case TASKFILE_OUT: |
91 | hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr); | 92 | tp_ops->exec_command(hwif, tf->command); |
92 | ndelay(400); /* FIXME */ | 93 | ndelay(400); /* FIXME */ |
93 | return pre_task_out_intr(drive, task->rq); | 94 | return pre_task_out_intr(drive, task->rq); |
94 | case TASKFILE_MULTI_IN: | 95 | case TASKFILE_MULTI_IN: |
@@ -124,7 +125,8 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile); | |||
124 | */ | 125 | */ |
125 | static ide_startstop_t set_multmode_intr(ide_drive_t *drive) | 126 | static ide_startstop_t set_multmode_intr(ide_drive_t *drive) |
126 | { | 127 | { |
127 | u8 stat = ide_read_status(drive); | 128 | ide_hwif_t *hwif = drive->hwif; |
129 | u8 stat = hwif->tp_ops->read_status(hwif); | ||
128 | 130 | ||
129 | if (OK_STAT(stat, READY_STAT, BAD_STAT)) | 131 | if (OK_STAT(stat, READY_STAT, BAD_STAT)) |
130 | drive->mult_count = drive->mult_req; | 132 | drive->mult_count = drive->mult_req; |
@@ -141,11 +143,16 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive) | |||
141 | */ | 143 | */ |
142 | static ide_startstop_t set_geometry_intr(ide_drive_t *drive) | 144 | static ide_startstop_t set_geometry_intr(ide_drive_t *drive) |
143 | { | 145 | { |
146 | ide_hwif_t *hwif = drive->hwif; | ||
144 | int retries = 5; | 147 | int retries = 5; |
145 | u8 stat; | 148 | u8 stat; |
146 | 149 | ||
147 | while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--) | 150 | while (1) { |
151 | stat = hwif->tp_ops->read_status(hwif); | ||
152 | if ((stat & BUSY_STAT) == 0 || retries-- == 0) | ||
153 | break; | ||
148 | udelay(10); | 154 | udelay(10); |
155 | }; | ||
149 | 156 | ||
150 | if (OK_STAT(stat, READY_STAT, BAD_STAT)) | 157 | if (OK_STAT(stat, READY_STAT, BAD_STAT)) |
151 | return ide_stopped; | 158 | return ide_stopped; |
@@ -162,7 +169,8 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive) | |||
162 | */ | 169 | */ |
163 | static ide_startstop_t recal_intr(ide_drive_t *drive) | 170 | static ide_startstop_t recal_intr(ide_drive_t *drive) |
164 | { | 171 | { |
165 | u8 stat = ide_read_status(drive); | 172 | ide_hwif_t *hwif = drive->hwif; |
173 | u8 stat = hwif->tp_ops->read_status(hwif); | ||
166 | 174 | ||
167 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) | 175 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) |
168 | return ide_error(drive, "recal_intr", stat); | 176 | return ide_error(drive, "recal_intr", stat); |
@@ -174,11 +182,12 @@ static ide_startstop_t recal_intr(ide_drive_t *drive) | |||
174 | */ | 182 | */ |
175 | static ide_startstop_t task_no_data_intr(ide_drive_t *drive) | 183 | static ide_startstop_t task_no_data_intr(ide_drive_t *drive) |
176 | { | 184 | { |
177 | ide_task_t *args = HWGROUP(drive)->rq->special; | 185 | ide_hwif_t *hwif = drive->hwif; |
186 | ide_task_t *args = hwif->hwgroup->rq->special; | ||
178 | u8 stat; | 187 | u8 stat; |
179 | 188 | ||
180 | local_irq_enable_in_hardirq(); | 189 | local_irq_enable_in_hardirq(); |
181 | stat = ide_read_status(drive); | 190 | stat = hwif->tp_ops->read_status(hwif); |
182 | 191 | ||
183 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) | 192 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) |
184 | return ide_error(drive, "task_no_data_intr", stat); | 193 | return ide_error(drive, "task_no_data_intr", stat); |
@@ -192,6 +201,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) | |||
192 | 201 | ||
193 | static u8 wait_drive_not_busy(ide_drive_t *drive) | 202 | static u8 wait_drive_not_busy(ide_drive_t *drive) |
194 | { | 203 | { |
204 | ide_hwif_t *hwif = drive->hwif; | ||
195 | int retries; | 205 | int retries; |
196 | u8 stat; | 206 | u8 stat; |
197 | 207 | ||
@@ -200,7 +210,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive) | |||
200 | * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. | 210 | * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. |
201 | */ | 211 | */ |
202 | for (retries = 0; retries < 1000; retries++) { | 212 | for (retries = 0; retries < 1000; retries++) { |
203 | stat = ide_read_status(drive); | 213 | stat = hwif->tp_ops->read_status(hwif); |
204 | 214 | ||
205 | if (stat & BUSY_STAT) | 215 | if (stat & BUSY_STAT) |
206 | udelay(10); | 216 | udelay(10); |
@@ -255,9 +265,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq, | |||
255 | 265 | ||
256 | /* do the actual data transfer */ | 266 | /* do the actual data transfer */ |
257 | if (write) | 267 | if (write) |
258 | hwif->output_data(drive, rq, buf, SECTOR_SIZE); | 268 | hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE); |
259 | else | 269 | else |
260 | hwif->input_data(drive, rq, buf, SECTOR_SIZE); | 270 | hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE); |
261 | 271 | ||
262 | kunmap_atomic(buf, KM_BIO_SRC_IRQ); | 272 | kunmap_atomic(buf, KM_BIO_SRC_IRQ); |
263 | #ifdef CONFIG_HIGHMEM | 273 | #ifdef CONFIG_HIGHMEM |
@@ -383,8 +393,8 @@ static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq | |||
383 | static ide_startstop_t task_in_intr(ide_drive_t *drive) | 393 | static ide_startstop_t task_in_intr(ide_drive_t *drive) |
384 | { | 394 | { |
385 | ide_hwif_t *hwif = drive->hwif; | 395 | ide_hwif_t *hwif = drive->hwif; |
386 | struct request *rq = HWGROUP(drive)->rq; | 396 | struct request *rq = hwif->hwgroup->rq; |
387 | u8 stat = ide_read_status(drive); | 397 | u8 stat = hwif->tp_ops->read_status(hwif); |
388 | 398 | ||
389 | /* Error? */ | 399 | /* Error? */ |
390 | if (stat & ERR_STAT) | 400 | if (stat & ERR_STAT) |
@@ -418,7 +428,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive) | |||
418 | { | 428 | { |
419 | ide_hwif_t *hwif = drive->hwif; | 429 | ide_hwif_t *hwif = drive->hwif; |
420 | struct request *rq = HWGROUP(drive)->rq; | 430 | struct request *rq = HWGROUP(drive)->rq; |
421 | u8 stat = ide_read_status(drive); | 431 | u8 stat = hwif->tp_ops->read_status(hwif); |
422 | 432 | ||
423 | if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) | 433 | if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) |
424 | return task_error(drive, rq, __func__, stat); | 434 | return task_error(drive, rq, __func__, stat); |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index d4a6b102a772..60f0ca66aa93 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) | 2 | * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) |
3 | * Copyrifht (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz | 3 | * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz |
4 | */ | 4 | */ |
5 | 5 | ||
6 | /* | 6 | /* |
@@ -101,8 +101,7 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) | |||
101 | 101 | ||
102 | init_completion(&hwif->gendev_rel_comp); | 102 | init_completion(&hwif->gendev_rel_comp); |
103 | 103 | ||
104 | default_hwif_iops(hwif); | 104 | hwif->tp_ops = &default_tp_ops; |
105 | default_hwif_transport(hwif); | ||
106 | 105 | ||
107 | ide_port_init_devices_data(hwif); | 106 | ide_port_init_devices_data(hwif); |
108 | } | 107 | } |
@@ -134,41 +133,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) | |||
134 | } | 133 | } |
135 | } | 134 | } |
136 | 135 | ||
137 | void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) | ||
138 | { | ||
139 | ide_hwgroup_t *hwgroup = hwif->hwgroup; | ||
140 | |||
141 | spin_lock_irq(&ide_lock); | ||
142 | /* | ||
143 | * Remove us from the hwgroup, and free | ||
144 | * the hwgroup if we were the only member | ||
145 | */ | ||
146 | if (hwif->next == hwif) { | ||
147 | BUG_ON(hwgroup->hwif != hwif); | ||
148 | kfree(hwgroup); | ||
149 | } else { | ||
150 | /* There is another interface in hwgroup. | ||
151 | * Unlink us, and set hwgroup->drive and ->hwif to | ||
152 | * something sane. | ||
153 | */ | ||
154 | ide_hwif_t *g = hwgroup->hwif; | ||
155 | |||
156 | while (g->next != hwif) | ||
157 | g = g->next; | ||
158 | g->next = hwif->next; | ||
159 | if (hwgroup->hwif == hwif) { | ||
160 | /* Chose a random hwif for hwgroup->hwif. | ||
161 | * It's guaranteed that there are no drives | ||
162 | * left in the hwgroup. | ||
163 | */ | ||
164 | BUG_ON(hwgroup->drive != NULL); | ||
165 | hwgroup->hwif = g; | ||
166 | } | ||
167 | BUG_ON(hwgroup->hwif == hwif); | ||
168 | } | ||
169 | spin_unlock_irq(&ide_lock); | ||
170 | } | ||
171 | |||
172 | /* Called with ide_lock held. */ | 136 | /* Called with ide_lock held. */ |
173 | static void __ide_port_unregister_devices(ide_hwif_t *hwif) | 137 | static void __ide_port_unregister_devices(ide_hwif_t *hwif) |
174 | { | 138 | { |
@@ -269,16 +233,9 @@ void ide_unregister(ide_hwif_t *hwif) | |||
269 | if (hwif->dma_base) | 233 | if (hwif->dma_base) |
270 | ide_release_dma_engine(hwif); | 234 | ide_release_dma_engine(hwif); |
271 | 235 | ||
272 | spin_lock_irq(&ide_lock); | ||
273 | /* restore hwif data to pristine status */ | ||
274 | ide_init_port_data(hwif, hwif->index); | ||
275 | spin_unlock_irq(&ide_lock); | ||
276 | |||
277 | mutex_unlock(&ide_cfg_mtx); | 236 | mutex_unlock(&ide_cfg_mtx); |
278 | } | 237 | } |
279 | 238 | ||
280 | EXPORT_SYMBOL(ide_unregister); | ||
281 | |||
282 | void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) | 239 | void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) |
283 | { | 240 | { |
284 | memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); | 241 | memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); |
@@ -287,8 +244,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) | |||
287 | hwif->dev = hw->dev; | 244 | hwif->dev = hw->dev; |
288 | hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; | 245 | hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; |
289 | hwif->ack_intr = hw->ack_intr; | 246 | hwif->ack_intr = hw->ack_intr; |
247 | hwif->config_data = hw->config; | ||
290 | } | 248 | } |
291 | EXPORT_SYMBOL_GPL(ide_init_port_hw); | ||
292 | 249 | ||
293 | /* | 250 | /* |
294 | * Locks for IDE setting functionality | 251 | * Locks for IDE setting functionality |
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index 0497e7f85b09..7c2afa97f417 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #define CATWEASEL_NUM_HWIFS 3 | 37 | #define CATWEASEL_NUM_HWIFS 3 |
38 | #define XSURF_NUM_HWIFS 2 | 38 | #define XSURF_NUM_HWIFS 2 |
39 | 39 | ||
40 | #define MAX_NUM_HWIFS 3 | ||
41 | |||
40 | /* | 42 | /* |
41 | * Bases of the IDE interfaces (relative to the board address) | 43 | * Bases of the IDE interfaces (relative to the board address) |
42 | */ | 44 | */ |
@@ -148,18 +150,14 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, | |||
148 | 150 | ||
149 | static int __init buddha_init(void) | 151 | static int __init buddha_init(void) |
150 | { | 152 | { |
151 | hw_regs_t hw; | ||
152 | ide_hwif_t *hwif; | ||
153 | int i; | ||
154 | |||
155 | struct zorro_dev *z = NULL; | 153 | struct zorro_dev *z = NULL; |
156 | u_long buddha_board = 0; | 154 | u_long buddha_board = 0; |
157 | BuddhaType type; | 155 | BuddhaType type; |
158 | int buddha_num_hwifs; | 156 | int buddha_num_hwifs, i; |
159 | 157 | ||
160 | while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { | 158 | while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { |
161 | unsigned long board; | 159 | unsigned long board; |
162 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 160 | hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; |
163 | 161 | ||
164 | if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { | 162 | if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { |
165 | buddha_num_hwifs = BUDDHA_NUM_HWIFS; | 163 | buddha_num_hwifs = BUDDHA_NUM_HWIFS; |
@@ -221,19 +219,13 @@ fail_base2: | |||
221 | ack_intr = xsurf_ack_intr; | 219 | ack_intr = xsurf_ack_intr; |
222 | } | 220 | } |
223 | 221 | ||
224 | buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr); | 222 | buddha_setup_ports(&hw[i], base, ctl, irq_port, |
223 | ack_intr); | ||
225 | 224 | ||
226 | hwif = ide_find_port(); | 225 | hws[i] = &hw[i]; |
227 | if (hwif) { | ||
228 | u8 index = hwif->index; | ||
229 | |||
230 | ide_init_port_hw(hwif, &hw); | ||
231 | |||
232 | idx[i] = index; | ||
233 | } | ||
234 | } | 226 | } |
235 | 227 | ||
236 | ide_device_add(idx, NULL); | 228 | ide_host_add(NULL, hws, NULL); |
237 | } | 229 | } |
238 | 230 | ||
239 | return 0; | 231 | return 0; |
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index 129a812bb57f..724f95073d80 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c | |||
@@ -66,6 +66,27 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq, | |||
66 | outsw_swapw(data_addr, buf, (len + 1) / 2); | 66 | outsw_swapw(data_addr, buf, (len + 1) / 2); |
67 | } | 67 | } |
68 | 68 | ||
69 | /* Atari has a byte-swapped IDE interface */ | ||
70 | static const struct ide_tp_ops falconide_tp_ops = { | ||
71 | .exec_command = ide_exec_command, | ||
72 | .read_status = ide_read_status, | ||
73 | .read_altstatus = ide_read_altstatus, | ||
74 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
75 | |||
76 | .set_irq = ide_set_irq, | ||
77 | |||
78 | .tf_load = ide_tf_load, | ||
79 | .tf_read = ide_tf_read, | ||
80 | |||
81 | .input_data = falconide_input_data, | ||
82 | .output_data = falconide_output_data, | ||
83 | }; | ||
84 | |||
85 | static const struct ide_port_info falconide_port_info = { | ||
86 | .tp_ops = &falconide_tp_ops, | ||
87 | .host_flags = IDE_HFLAG_NO_DMA, | ||
88 | }; | ||
89 | |||
69 | static void __init falconide_setup_ports(hw_regs_t *hw) | 90 | static void __init falconide_setup_ports(hw_regs_t *hw) |
70 | { | 91 | { |
71 | int i; | 92 | int i; |
@@ -91,11 +112,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw) | |||
91 | 112 | ||
92 | static int __init falconide_init(void) | 113 | static int __init falconide_init(void) |
93 | { | 114 | { |
94 | hw_regs_t hw; | 115 | struct ide_host *host; |
95 | ide_hwif_t *hwif; | 116 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
117 | int rc; | ||
96 | 118 | ||
97 | if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) | 119 | if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) |
98 | return 0; | 120 | return -ENODEV; |
99 | 121 | ||
100 | printk(KERN_INFO "ide: Falcon IDE controller\n"); | 122 | printk(KERN_INFO "ide: Falcon IDE controller\n"); |
101 | 123 | ||
@@ -106,23 +128,25 @@ static int __init falconide_init(void) | |||
106 | 128 | ||
107 | falconide_setup_ports(&hw); | 129 | falconide_setup_ports(&hw); |
108 | 130 | ||
109 | hwif = ide_find_port(); | 131 | host = ide_host_alloc(&falconide_port_info, hws); |
110 | if (hwif) { | 132 | if (host == NULL) { |
111 | u8 index = hwif->index; | 133 | rc = -ENOMEM; |
112 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | 134 | goto err; |
113 | 135 | } | |
114 | ide_init_port_hw(hwif, &hw); | ||
115 | 136 | ||
116 | /* Atari has a byte-swapped IDE interface */ | 137 | ide_get_lock(NULL, NULL); |
117 | hwif->input_data = falconide_input_data; | 138 | rc = ide_host_register(host, &falconide_port_info, hws); |
118 | hwif->output_data = falconide_output_data; | 139 | ide_release_lock(); |
119 | 140 | ||
120 | ide_get_lock(NULL, NULL); | 141 | if (rc) |
121 | ide_device_add(idx, NULL); | 142 | goto err_free; |
122 | ide_release_lock(); | ||
123 | } | ||
124 | 143 | ||
125 | return 0; | 144 | return 0; |
145 | err_free: | ||
146 | ide_host_free(host); | ||
147 | err: | ||
148 | release_mem_region(ATA_HD_BASE, 0x40); | ||
149 | return rc; | ||
126 | } | 150 | } |
127 | 151 | ||
128 | module_init(falconide_init); | 152 | module_init(falconide_init); |
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index 7e74b20202df..dd5c467d8dd0 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ | 31 | #define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ |
32 | #define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ | 32 | #define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ |
33 | 33 | ||
34 | #define GAYLE_IDEREG_SIZE 0x2000 | ||
35 | |||
34 | /* | 36 | /* |
35 | * Offsets from one of the above bases | 37 | * Offsets from one of the above bases |
36 | */ | 38 | */ |
@@ -56,13 +58,11 @@ | |||
56 | #define GAYLE_NUM_HWIFS 1 | 58 | #define GAYLE_NUM_HWIFS 1 |
57 | #define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS | 59 | #define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS |
58 | #define GAYLE_HAS_CONTROL_REG 1 | 60 | #define GAYLE_HAS_CONTROL_REG 1 |
59 | #define GAYLE_IDEREG_SIZE 0x2000 | ||
60 | #else /* CONFIG_BLK_DEV_IDEDOUBLER */ | 61 | #else /* CONFIG_BLK_DEV_IDEDOUBLER */ |
61 | #define GAYLE_NUM_HWIFS 2 | 62 | #define GAYLE_NUM_HWIFS 2 |
62 | #define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \ | 63 | #define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \ |
63 | GAYLE_NUM_HWIFS-1) | 64 | GAYLE_NUM_HWIFS-1) |
64 | #define GAYLE_HAS_CONTROL_REG (!ide_doubler) | 65 | #define GAYLE_HAS_CONTROL_REG (!ide_doubler) |
65 | #define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) | ||
66 | 66 | ||
67 | static int ide_doubler; | 67 | static int ide_doubler; |
68 | module_param_named(doubler, ide_doubler, bool, 0); | 68 | module_param_named(doubler, ide_doubler, bool, 0); |
@@ -124,8 +124,11 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, | |||
124 | 124 | ||
125 | static int __init gayle_init(void) | 125 | static int __init gayle_init(void) |
126 | { | 126 | { |
127 | unsigned long phys_base, res_start, res_n; | ||
128 | unsigned long base, ctrlport, irqport; | ||
129 | ide_ack_intr_t *ack_intr; | ||
127 | int a4000, i; | 130 | int a4000, i; |
128 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 131 | hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; |
129 | 132 | ||
130 | if (!MACH_IS_AMIGA) | 133 | if (!MACH_IS_AMIGA) |
131 | return -ENODEV; | 134 | return -ENODEV; |
@@ -148,13 +151,6 @@ found: | |||
148 | #endif | 151 | #endif |
149 | ""); | 152 | ""); |
150 | 153 | ||
151 | for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { | ||
152 | unsigned long base, ctrlport, irqport; | ||
153 | ide_ack_intr_t *ack_intr; | ||
154 | hw_regs_t hw; | ||
155 | ide_hwif_t *hwif; | ||
156 | unsigned long phys_base, res_start, res_n; | ||
157 | |||
158 | if (a4000) { | 154 | if (a4000) { |
159 | phys_base = GAYLE_BASE_4000; | 155 | phys_base = GAYLE_BASE_4000; |
160 | irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); | 156 | irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); |
@@ -168,33 +164,22 @@ found: | |||
168 | * FIXME: we now have selectable modes between mmio v/s iomio | 164 | * FIXME: we now have selectable modes between mmio v/s iomio |
169 | */ | 165 | */ |
170 | 166 | ||
171 | phys_base += i*GAYLE_NEXT_PORT; | ||
172 | |||
173 | res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); | 167 | res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); |
174 | res_n = GAYLE_IDEREG_SIZE; | 168 | res_n = GAYLE_IDEREG_SIZE; |
175 | 169 | ||
176 | if (!request_mem_region(res_start, res_n, "IDE")) | 170 | if (!request_mem_region(res_start, res_n, "IDE")) |
177 | continue; | 171 | return -EBUSY; |
178 | 172 | ||
179 | base = (unsigned long)ZTWO_VADDR(phys_base); | 173 | for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { |
174 | base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT); | ||
180 | ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; | 175 | ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; |
181 | 176 | ||
182 | gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr); | 177 | gayle_setup_ports(&hw[i], base, ctrlport, irqport, ack_intr); |
183 | |||
184 | hwif = ide_find_port(); | ||
185 | if (hwif) { | ||
186 | u8 index = hwif->index; | ||
187 | 178 | ||
188 | ide_init_port_hw(hwif, &hw); | 179 | hws[i] = &hw[i]; |
189 | |||
190 | idx[i] = index; | ||
191 | } else | ||
192 | release_mem_region(res_start, res_n); | ||
193 | } | 180 | } |
194 | 181 | ||
195 | ide_device_add(idx, NULL); | 182 | return ide_host_add(NULL, hws, NULL); |
196 | |||
197 | return 0; | ||
198 | } | 183 | } |
199 | 184 | ||
200 | module_init(gayle_init); | 185 | module_init(gayle_init); |
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c index 89c8ff0a4d08..c76d55de6996 100644 --- a/drivers/ide/legacy/ide-4drives.c +++ b/drivers/ide/legacy/ide-4drives.c | |||
@@ -28,10 +28,8 @@ static const struct ide_port_info ide_4drives_port_info = { | |||
28 | 28 | ||
29 | static int __init ide_4drives_init(void) | 29 | static int __init ide_4drives_init(void) |
30 | { | 30 | { |
31 | ide_hwif_t *hwif, *mate; | ||
32 | unsigned long base = 0x1f0, ctl = 0x3f6; | 31 | unsigned long base = 0x1f0, ctl = 0x3f6; |
33 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 32 | hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL }; |
34 | hw_regs_t hw; | ||
35 | 33 | ||
36 | if (probe_4drives == 0) | 34 | if (probe_4drives == 0) |
37 | return -ENODEV; | 35 | return -ENODEV; |
@@ -55,21 +53,7 @@ static int __init ide_4drives_init(void) | |||
55 | hw.irq = 14; | 53 | hw.irq = 14; |
56 | hw.chipset = ide_4drives; | 54 | hw.chipset = ide_4drives; |
57 | 55 | ||
58 | hwif = ide_find_port(); | 56 | return ide_host_add(&ide_4drives_port_info, hws, NULL); |
59 | if (hwif) { | ||
60 | ide_init_port_hw(hwif, &hw); | ||
61 | idx[0] = hwif->index; | ||
62 | } | ||
63 | |||
64 | mate = ide_find_port(); | ||
65 | if (mate) { | ||
66 | ide_init_port_hw(mate, &hw); | ||
67 | idx[1] = mate->index; | ||
68 | } | ||
69 | |||
70 | ide_device_add(idx, &ide_4drives_port_info); | ||
71 | |||
72 | return 0; | ||
73 | } | 57 | } |
74 | 58 | ||
75 | module_init(ide_4drives_init); | 59 | module_init(ide_4drives_init); |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 27b1e0b7ecb4..21bfac137844 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
@@ -74,7 +74,7 @@ INT_MODULE_PARM(pc_debug, 0); | |||
74 | 74 | ||
75 | typedef struct ide_info_t { | 75 | typedef struct ide_info_t { |
76 | struct pcmcia_device *p_dev; | 76 | struct pcmcia_device *p_dev; |
77 | ide_hwif_t *hwif; | 77 | struct ide_host *host; |
78 | int ndev; | 78 | int ndev; |
79 | dev_node_t node; | 79 | dev_node_t node; |
80 | } ide_info_t; | 80 | } ide_info_t; |
@@ -132,7 +132,7 @@ static int ide_probe(struct pcmcia_device *link) | |||
132 | static void ide_detach(struct pcmcia_device *link) | 132 | static void ide_detach(struct pcmcia_device *link) |
133 | { | 133 | { |
134 | ide_info_t *info = link->priv; | 134 | ide_info_t *info = link->priv; |
135 | ide_hwif_t *hwif = info->hwif; | 135 | ide_hwif_t *hwif = info->host->ports[0]; |
136 | unsigned long data_addr, ctl_addr; | 136 | unsigned long data_addr, ctl_addr; |
137 | 137 | ||
138 | DEBUG(0, "ide_detach(0x%p)\n", link); | 138 | DEBUG(0, "ide_detach(0x%p)\n", link); |
@@ -157,13 +157,13 @@ static const struct ide_port_info idecs_port_info = { | |||
157 | .host_flags = IDE_HFLAG_NO_DMA, | 157 | .host_flags = IDE_HFLAG_NO_DMA, |
158 | }; | 158 | }; |
159 | 159 | ||
160 | static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, | 160 | static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, |
161 | unsigned long irq, struct pcmcia_device *handle) | 161 | unsigned long irq, struct pcmcia_device *handle) |
162 | { | 162 | { |
163 | struct ide_host *host; | ||
163 | ide_hwif_t *hwif; | 164 | ide_hwif_t *hwif; |
164 | hw_regs_t hw; | 165 | int i, rc; |
165 | int i; | 166 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
166 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
167 | 167 | ||
168 | if (!request_region(io, 8, DRV_NAME)) { | 168 | if (!request_region(io, 8, DRV_NAME)) { |
169 | printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", | 169 | printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", |
@@ -184,30 +184,24 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, | |||
184 | hw.chipset = ide_pci; | 184 | hw.chipset = ide_pci; |
185 | hw.dev = &handle->dev; | 185 | hw.dev = &handle->dev; |
186 | 186 | ||
187 | hwif = ide_find_port(); | 187 | rc = ide_host_add(&idecs_port_info, hws, &host); |
188 | if (hwif == NULL) | 188 | if (rc) |
189 | goto out_release; | 189 | goto out_release; |
190 | 190 | ||
191 | i = hwif->index; | 191 | hwif = host->ports[0]; |
192 | |||
193 | ide_init_port_hw(hwif, &hw); | ||
194 | |||
195 | idx[0] = i; | ||
196 | |||
197 | ide_device_add(idx, &idecs_port_info); | ||
198 | 192 | ||
199 | if (hwif->present) | 193 | if (hwif->present) |
200 | return hwif; | 194 | return host; |
201 | 195 | ||
202 | /* retry registration in case device is still spinning up */ | 196 | /* retry registration in case device is still spinning up */ |
203 | for (i = 0; i < 10; i++) { | 197 | for (i = 0; i < 10; i++) { |
204 | msleep(100); | 198 | msleep(100); |
205 | ide_port_scan(hwif); | 199 | ide_port_scan(hwif); |
206 | if (hwif->present) | 200 | if (hwif->present) |
207 | return hwif; | 201 | return host; |
208 | } | 202 | } |
209 | 203 | ||
210 | return hwif; | 204 | return host; |
211 | 205 | ||
212 | out_release: | 206 | out_release: |
213 | release_region(ctl, 1); | 207 | release_region(ctl, 1); |
@@ -239,7 +233,7 @@ static int ide_config(struct pcmcia_device *link) | |||
239 | cistpl_cftable_entry_t *cfg; | 233 | cistpl_cftable_entry_t *cfg; |
240 | int pass, last_ret = 0, last_fn = 0, is_kme = 0; | 234 | int pass, last_ret = 0, last_fn = 0, is_kme = 0; |
241 | unsigned long io_base, ctl_base; | 235 | unsigned long io_base, ctl_base; |
242 | ide_hwif_t *hwif; | 236 | struct ide_host *host; |
243 | 237 | ||
244 | DEBUG(0, "ide_config(0x%p)\n", link); | 238 | DEBUG(0, "ide_config(0x%p)\n", link); |
245 | 239 | ||
@@ -334,21 +328,21 @@ static int ide_config(struct pcmcia_device *link) | |||
334 | if (is_kme) | 328 | if (is_kme) |
335 | outb(0x81, ctl_base+1); | 329 | outb(0x81, ctl_base+1); |
336 | 330 | ||
337 | hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); | 331 | host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); |
338 | if (hwif == NULL && link->io.NumPorts1 == 0x20) { | 332 | if (host == NULL && link->io.NumPorts1 == 0x20) { |
339 | outb(0x02, ctl_base + 0x10); | 333 | outb(0x02, ctl_base + 0x10); |
340 | hwif = idecs_register(io_base + 0x10, ctl_base + 0x10, | 334 | host = idecs_register(io_base + 0x10, ctl_base + 0x10, |
341 | link->irq.AssignedIRQ, link); | 335 | link->irq.AssignedIRQ, link); |
342 | } | 336 | } |
343 | 337 | ||
344 | if (hwif == NULL) | 338 | if (host == NULL) |
345 | goto failed; | 339 | goto failed; |
346 | 340 | ||
347 | info->ndev = 1; | 341 | info->ndev = 1; |
348 | sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2); | 342 | sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2); |
349 | info->node.major = hwif->major; | 343 | info->node.major = host->ports[0]->major; |
350 | info->node.minor = 0; | 344 | info->node.minor = 0; |
351 | info->hwif = hwif; | 345 | info->host = host; |
352 | link->dev_node = &info->node; | 346 | link->dev_node = &info->node; |
353 | printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", | 347 | printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", |
354 | info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); | 348 | info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); |
@@ -379,15 +373,15 @@ failed: | |||
379 | static void ide_release(struct pcmcia_device *link) | 373 | static void ide_release(struct pcmcia_device *link) |
380 | { | 374 | { |
381 | ide_info_t *info = link->priv; | 375 | ide_info_t *info = link->priv; |
382 | ide_hwif_t *hwif = info->hwif; | 376 | struct ide_host *host = info->host; |
383 | 377 | ||
384 | DEBUG(0, "ide_release(0x%p)\n", link); | 378 | DEBUG(0, "ide_release(0x%p)\n", link); |
385 | 379 | ||
386 | if (info->ndev) { | 380 | if (info->ndev) |
387 | /* FIXME: if this fails we need to queue the cleanup somehow | 381 | /* FIXME: if this fails we need to queue the cleanup somehow |
388 | -- need to investigate the required PCMCIA magic */ | 382 | -- need to investigate the required PCMCIA magic */ |
389 | ide_unregister(hwif); | 383 | ide_host_remove(host); |
390 | } | 384 | |
391 | info->ndev = 0; | 385 | info->ndev = 0; |
392 | 386 | ||
393 | pcmcia_disable_device(link); | 387 | pcmcia_disable_device(link); |
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c index a249562b34b5..051b4ab0f359 100644 --- a/drivers/ide/legacy/ide_platform.c +++ b/drivers/ide/legacy/ide_platform.c | |||
@@ -52,12 +52,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) | |||
52 | { | 52 | { |
53 | struct resource *res_base, *res_alt, *res_irq; | 53 | struct resource *res_base, *res_alt, *res_irq; |
54 | void __iomem *base, *alt_base; | 54 | void __iomem *base, *alt_base; |
55 | ide_hwif_t *hwif; | ||
56 | struct pata_platform_info *pdata; | 55 | struct pata_platform_info *pdata; |
57 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 56 | struct ide_host *host; |
58 | int ret = 0; | 57 | int ret = 0, mmio = 0; |
59 | int mmio = 0; | 58 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
60 | hw_regs_t hw; | ||
61 | struct ide_port_info d = platform_ide_port_info; | 59 | struct ide_port_info d = platform_ide_port_info; |
62 | 60 | ||
63 | pdata = pdev->dev.platform_data; | 61 | pdata = pdev->dev.platform_data; |
@@ -94,28 +92,18 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) | |||
94 | res_alt->start, res_alt->end - res_alt->start + 1); | 92 | res_alt->start, res_alt->end - res_alt->start + 1); |
95 | } | 93 | } |
96 | 94 | ||
97 | hwif = ide_find_port(); | ||
98 | if (!hwif) { | ||
99 | ret = -ENODEV; | ||
100 | goto out; | ||
101 | } | ||
102 | |||
103 | memset(&hw, 0, sizeof(hw)); | 95 | memset(&hw, 0, sizeof(hw)); |
104 | plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); | 96 | plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); |
105 | hw.dev = &pdev->dev; | 97 | hw.dev = &pdev->dev; |
106 | 98 | ||
107 | ide_init_port_hw(hwif, &hw); | 99 | if (mmio) |
108 | |||
109 | if (mmio) { | ||
110 | d.host_flags |= IDE_HFLAG_MMIO; | 100 | d.host_flags |= IDE_HFLAG_MMIO; |
111 | default_hwif_mmiops(hwif); | ||
112 | } | ||
113 | 101 | ||
114 | idx[0] = hwif->index; | 102 | ret = ide_host_add(&d, hws, &host); |
115 | 103 | if (ret) | |
116 | ide_device_add(idx, &d); | 104 | goto out; |
117 | 105 | ||
118 | platform_set_drvdata(pdev, hwif); | 106 | platform_set_drvdata(pdev, host); |
119 | 107 | ||
120 | return 0; | 108 | return 0; |
121 | 109 | ||
@@ -125,9 +113,9 @@ out: | |||
125 | 113 | ||
126 | static int __devexit plat_ide_remove(struct platform_device *pdev) | 114 | static int __devexit plat_ide_remove(struct platform_device *pdev) |
127 | { | 115 | { |
128 | ide_hwif_t *hwif = pdev->dev.driver_data; | 116 | struct ide_host *host = pdev->dev.driver_data; |
129 | 117 | ||
130 | ide_unregister(hwif); | 118 | ide_host_remove(host); |
131 | 119 | ||
132 | return 0; | 120 | return 0; |
133 | } | 121 | } |
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index 0a6195bcfeda..a0bb167980e7 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c | |||
@@ -91,11 +91,10 @@ static const char *mac_ide_name[] = | |||
91 | 91 | ||
92 | static int __init macide_init(void) | 92 | static int __init macide_init(void) |
93 | { | 93 | { |
94 | ide_hwif_t *hwif; | ||
95 | ide_ack_intr_t *ack_intr; | 94 | ide_ack_intr_t *ack_intr; |
96 | unsigned long base; | 95 | unsigned long base; |
97 | int irq; | 96 | int irq; |
98 | hw_regs_t hw; | 97 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
99 | 98 | ||
100 | if (!MACH_IS_MAC) | 99 | if (!MACH_IS_MAC) |
101 | return -ENODEV; | 100 | return -ENODEV; |
@@ -125,17 +124,7 @@ static int __init macide_init(void) | |||
125 | 124 | ||
126 | macide_setup_ports(&hw, base, irq, ack_intr); | 125 | macide_setup_ports(&hw, base, irq, ack_intr); |
127 | 126 | ||
128 | hwif = ide_find_port(); | 127 | return ide_host_add(NULL, hws, NULL); |
129 | if (hwif) { | ||
130 | u8 index = hwif->index; | ||
131 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | ||
132 | |||
133 | ide_init_port_hw(hwif, &hw); | ||
134 | |||
135 | ide_device_add(idx, NULL); | ||
136 | } | ||
137 | |||
138 | return 0; | ||
139 | } | 128 | } |
140 | 129 | ||
141 | module_init(macide_init); | 130 | module_init(macide_init); |
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index 9c2b9d078f69..4abd8fc78197 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c | |||
@@ -96,6 +96,27 @@ static void q40ide_output_data(ide_drive_t *drive, struct request *rq, | |||
96 | outsw_swapw(data_addr, buf, (len + 1) / 2); | 96 | outsw_swapw(data_addr, buf, (len + 1) / 2); |
97 | } | 97 | } |
98 | 98 | ||
99 | /* Q40 has a byte-swapped IDE interface */ | ||
100 | static const struct ide_tp_ops q40ide_tp_ops = { | ||
101 | .exec_command = ide_exec_command, | ||
102 | .read_status = ide_read_status, | ||
103 | .read_altstatus = ide_read_altstatus, | ||
104 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
105 | |||
106 | .set_irq = ide_set_irq, | ||
107 | |||
108 | .tf_load = ide_tf_load, | ||
109 | .tf_read = ide_tf_read, | ||
110 | |||
111 | .input_data = q40ide_input_data, | ||
112 | .output_data = q40ide_output_data, | ||
113 | }; | ||
114 | |||
115 | static const struct ide_port_info q40ide_port_info = { | ||
116 | .tp_ops = &q40ide_tp_ops, | ||
117 | .host_flags = IDE_HFLAG_NO_DMA, | ||
118 | }; | ||
119 | |||
99 | /* | 120 | /* |
100 | * the static array is needed to have the name reported in /proc/ioports, | 121 | * the static array is needed to have the name reported in /proc/ioports, |
101 | * hwif->name unfortunately isn't available yet | 122 | * hwif->name unfortunately isn't available yet |
@@ -111,9 +132,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={ | |||
111 | static int __init q40ide_init(void) | 132 | static int __init q40ide_init(void) |
112 | { | 133 | { |
113 | int i; | 134 | int i; |
114 | ide_hwif_t *hwif; | 135 | hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; |
115 | const char *name; | ||
116 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
117 | 136 | ||
118 | if (!MACH_IS_Q40) | 137 | if (!MACH_IS_Q40) |
119 | return -ENODEV; | 138 | return -ENODEV; |
@@ -121,9 +140,8 @@ static int __init q40ide_init(void) | |||
121 | printk(KERN_INFO "ide: Q40 IDE controller\n"); | 140 | printk(KERN_INFO "ide: Q40 IDE controller\n"); |
122 | 141 | ||
123 | for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { | 142 | for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { |
124 | hw_regs_t hw; | 143 | const char *name = q40_ide_names[i]; |
125 | 144 | ||
126 | name = q40_ide_names[i]; | ||
127 | if (!request_region(pcide_bases[i], 8, name)) { | 145 | if (!request_region(pcide_bases[i], 8, name)) { |
128 | printk("could not reserve ports %lx-%lx for %s\n", | 146 | printk("could not reserve ports %lx-%lx for %s\n", |
129 | pcide_bases[i],pcide_bases[i]+8,name); | 147 | pcide_bases[i],pcide_bases[i]+8,name); |
@@ -135,26 +153,13 @@ static int __init q40ide_init(void) | |||
135 | release_region(pcide_bases[i], 8); | 153 | release_region(pcide_bases[i], 8); |
136 | continue; | 154 | continue; |
137 | } | 155 | } |
138 | q40_ide_setup_ports(&hw, pcide_bases[i], | 156 | q40_ide_setup_ports(&hw[i], pcide_bases[i], NULL, |
139 | NULL, | ||
140 | // m68kide_iops, | ||
141 | q40ide_default_irq(pcide_bases[i])); | 157 | q40ide_default_irq(pcide_bases[i])); |
142 | 158 | ||
143 | hwif = ide_find_port(); | 159 | hws[i] = &hw[i]; |
144 | if (hwif) { | ||
145 | ide_init_port_hw(hwif, &hw); | ||
146 | |||
147 | /* Q40 has a byte-swapped IDE interface */ | ||
148 | hwif->input_data = q40ide_input_data; | ||
149 | hwif->output_data = q40ide_output_data; | ||
150 | |||
151 | idx[i] = hwif->index; | ||
152 | } | ||
153 | } | 160 | } |
154 | 161 | ||
155 | ide_device_add(idx, NULL); | 162 | return ide_host_add(&q40ide_port_info, hws, NULL); |
156 | |||
157 | return 0; | ||
158 | } | 163 | } |
159 | 164 | ||
160 | module_init(q40ide_init); | 165 | module_init(q40ide_init); |
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index 48d57cae63c6..11b7f61aae40 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c | |||
@@ -519,6 +519,23 @@ static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) | |||
519 | *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT); | 519 | *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT); |
520 | } | 520 | } |
521 | 521 | ||
522 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | ||
523 | static const struct ide_tp_ops au1xxx_tp_ops = { | ||
524 | .exec_command = ide_exec_command, | ||
525 | .read_status = ide_read_status, | ||
526 | .read_altstatus = ide_read_altstatus, | ||
527 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
528 | |||
529 | .set_irq = ide_set_irq, | ||
530 | |||
531 | .tf_load = ide_tf_load, | ||
532 | .tf_read = ide_tf_read, | ||
533 | |||
534 | .input_data = au1xxx_input_data, | ||
535 | .output_data = au1xxx_output_data, | ||
536 | }; | ||
537 | #endif | ||
538 | |||
522 | static const struct ide_port_ops au1xxx_port_ops = { | 539 | static const struct ide_port_ops au1xxx_port_ops = { |
523 | .set_pio_mode = au1xxx_set_pio_mode, | 540 | .set_pio_mode = au1xxx_set_pio_mode, |
524 | .set_dma_mode = auide_set_dma_mode, | 541 | .set_dma_mode = auide_set_dma_mode, |
@@ -526,6 +543,9 @@ static const struct ide_port_ops au1xxx_port_ops = { | |||
526 | 543 | ||
527 | static const struct ide_port_info au1xxx_port_info = { | 544 | static const struct ide_port_info au1xxx_port_info = { |
528 | .init_dma = auide_ddma_init, | 545 | .init_dma = auide_ddma_init, |
546 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | ||
547 | .tp_ops = &au1xxx_tp_ops, | ||
548 | #endif | ||
529 | .port_ops = &au1xxx_port_ops, | 549 | .port_ops = &au1xxx_port_ops, |
530 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 550 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
531 | .dma_ops = &au1xxx_dma_ops, | 551 | .dma_ops = &au1xxx_dma_ops, |
@@ -543,11 +563,10 @@ static int au_ide_probe(struct device *dev) | |||
543 | { | 563 | { |
544 | struct platform_device *pdev = to_platform_device(dev); | 564 | struct platform_device *pdev = to_platform_device(dev); |
545 | _auide_hwif *ahwif = &auide_hwif; | 565 | _auide_hwif *ahwif = &auide_hwif; |
546 | ide_hwif_t *hwif; | ||
547 | struct resource *res; | 566 | struct resource *res; |
567 | struct ide_host *host; | ||
548 | int ret = 0; | 568 | int ret = 0; |
549 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 569 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
550 | hw_regs_t hw; | ||
551 | 570 | ||
552 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 571 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) |
553 | char *mode = "MWDMA2"; | 572 | char *mode = "MWDMA2"; |
@@ -584,36 +603,19 @@ static int au_ide_probe(struct device *dev) | |||
584 | goto out; | 603 | goto out; |
585 | } | 604 | } |
586 | 605 | ||
587 | hwif = ide_find_port(); | ||
588 | if (hwif == NULL) { | ||
589 | ret = -ENOENT; | ||
590 | goto out; | ||
591 | } | ||
592 | |||
593 | memset(&hw, 0, sizeof(hw)); | 606 | memset(&hw, 0, sizeof(hw)); |
594 | auide_setup_ports(&hw, ahwif); | 607 | auide_setup_ports(&hw, ahwif); |
595 | hw.irq = ahwif->irq; | 608 | hw.irq = ahwif->irq; |
596 | hw.dev = dev; | 609 | hw.dev = dev; |
597 | hw.chipset = ide_au1xxx; | 610 | hw.chipset = ide_au1xxx; |
598 | 611 | ||
599 | ide_init_port_hw(hwif, &hw); | 612 | ret = ide_host_add(&au1xxx_port_info, hws, &host); |
600 | 613 | if (ret) | |
601 | /* If the user has selected DDMA assisted copies, | 614 | goto out; |
602 | then set up a few local I/O function entry points | ||
603 | */ | ||
604 | |||
605 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | ||
606 | hwif->input_data = au1xxx_input_data; | ||
607 | hwif->output_data = au1xxx_output_data; | ||
608 | #endif | ||
609 | |||
610 | auide_hwif.hwif = hwif; | ||
611 | |||
612 | idx[0] = hwif->index; | ||
613 | 615 | ||
614 | ide_device_add(idx, &au1xxx_port_info); | 616 | auide_hwif.hwif = host->ports[0]; |
615 | 617 | ||
616 | dev_set_drvdata(dev, hwif); | 618 | dev_set_drvdata(dev, host); |
617 | 619 | ||
618 | printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); | 620 | printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); |
619 | 621 | ||
@@ -625,10 +627,10 @@ static int au_ide_remove(struct device *dev) | |||
625 | { | 627 | { |
626 | struct platform_device *pdev = to_platform_device(dev); | 628 | struct platform_device *pdev = to_platform_device(dev); |
627 | struct resource *res; | 629 | struct resource *res; |
628 | ide_hwif_t *hwif = dev_get_drvdata(dev); | 630 | struct ide_host *host = dev_get_drvdata(dev); |
629 | _auide_hwif *ahwif = &auide_hwif; | 631 | _auide_hwif *ahwif = &auide_hwif; |
630 | 632 | ||
631 | ide_unregister(hwif); | 633 | ide_host_remove(host); |
632 | 634 | ||
633 | iounmap((void *)ahwif->regbase); | 635 | iounmap((void *)ahwif->regbase); |
634 | 636 | ||
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index 9f1212cc4aed..badf79fc9e3a 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c | |||
@@ -72,12 +72,11 @@ static const struct ide_port_info swarm_port_info = { | |||
72 | */ | 72 | */ |
73 | static int __devinit swarm_ide_probe(struct device *dev) | 73 | static int __devinit swarm_ide_probe(struct device *dev) |
74 | { | 74 | { |
75 | ide_hwif_t *hwif; | ||
76 | u8 __iomem *base; | 75 | u8 __iomem *base; |
76 | struct ide_host *host; | ||
77 | phys_t offset, size; | 77 | phys_t offset, size; |
78 | hw_regs_t hw; | 78 | int i, rc; |
79 | int i; | 79 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
80 | u8 idx[] = { 0xff, 0xff, 0xff, 0xff }; | ||
81 | 80 | ||
82 | if (!SIBYTE_HAVE_IDE) | 81 | if (!SIBYTE_HAVE_IDE) |
83 | return -ENODEV; | 82 | return -ENODEV; |
@@ -116,26 +115,17 @@ static int __devinit swarm_ide_probe(struct device *dev) | |||
116 | hw.irq = K_INT_GB_IDE; | 115 | hw.irq = K_INT_GB_IDE; |
117 | hw.chipset = ide_generic; | 116 | hw.chipset = ide_generic; |
118 | 117 | ||
119 | hwif = ide_find_port_slot(&swarm_port_info); | 118 | rc = ide_host_add(&swarm_port_info, hws, &host); |
120 | if (hwif == NULL) | 119 | if (rc) |
121 | goto err; | 120 | goto err; |
122 | 121 | ||
123 | ide_init_port_hw(hwif, &hw); | 122 | dev_set_drvdata(dev, host); |
124 | |||
125 | /* Setup MMIO ops. */ | ||
126 | default_hwif_mmiops(hwif); | ||
127 | |||
128 | idx[0] = hwif->index; | ||
129 | |||
130 | ide_device_add(idx, &swarm_port_info); | ||
131 | |||
132 | dev_set_drvdata(dev, hwif); | ||
133 | 123 | ||
134 | return 0; | 124 | return 0; |
135 | err: | 125 | err: |
136 | release_resource(&swarm_ide_resource); | 126 | release_resource(&swarm_ide_resource); |
137 | iounmap(base); | 127 | iounmap(base); |
138 | return -ENOMEM; | 128 | return rc; |
139 | } | 129 | } |
140 | 130 | ||
141 | static struct device_driver swarm_ide_driver = { | 131 | static struct device_driver swarm_ide_driver = { |
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index ae7a4329a581..fbc43e121e6b 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c | |||
@@ -195,7 +195,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { | |||
195 | .host_flags = IDE_HFLAG_SERIALIZE | | 195 | .host_flags = IDE_HFLAG_SERIALIZE | |
196 | IDE_HFLAG_NO_ATAPI_DMA | | 196 | IDE_HFLAG_NO_ATAPI_DMA | |
197 | IDE_HFLAG_NO_DSC | | 197 | IDE_HFLAG_NO_DSC | |
198 | IDE_HFLAG_ABUSE_SET_DMA_MODE | | ||
199 | IDE_HFLAG_OFF_BOARD, | 198 | IDE_HFLAG_OFF_BOARD, |
200 | .pio_mask = ATA_PIO4, | 199 | .pio_mask = ATA_PIO4, |
201 | .mwdma_mask = ATA_MWDMA2, | 200 | .mwdma_mask = ATA_MWDMA2, |
@@ -205,7 +204,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { | |||
205 | .init_chipset = init_chipset_aec62xx, | 204 | .init_chipset = init_chipset_aec62xx, |
206 | .port_ops = &atp86x_port_ops, | 205 | .port_ops = &atp86x_port_ops, |
207 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | | 206 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | |
208 | IDE_HFLAG_ABUSE_SET_DMA_MODE | | ||
209 | IDE_HFLAG_OFF_BOARD, | 207 | IDE_HFLAG_OFF_BOARD, |
210 | .pio_mask = ATA_PIO4, | 208 | .pio_mask = ATA_PIO4, |
211 | .mwdma_mask = ATA_MWDMA2, | 209 | .mwdma_mask = ATA_MWDMA2, |
@@ -216,7 +214,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { | |||
216 | .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, | 214 | .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, |
217 | .port_ops = &atp86x_port_ops, | 215 | .port_ops = &atp86x_port_ops, |
218 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | | 216 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | |
219 | IDE_HFLAG_ABUSE_SET_DMA_MODE | | ||
220 | IDE_HFLAG_NON_BOOTABLE, | 217 | IDE_HFLAG_NON_BOOTABLE, |
221 | .pio_mask = ATA_PIO4, | 218 | .pio_mask = ATA_PIO4, |
222 | .mwdma_mask = ATA_MWDMA2, | 219 | .mwdma_mask = ATA_MWDMA2, |
@@ -226,7 +223,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { | |||
226 | .init_chipset = init_chipset_aec62xx, | 223 | .init_chipset = init_chipset_aec62xx, |
227 | .port_ops = &atp86x_port_ops, | 224 | .port_ops = &atp86x_port_ops, |
228 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | | 225 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | |
229 | IDE_HFLAG_ABUSE_SET_DMA_MODE | | ||
230 | IDE_HFLAG_OFF_BOARD, | 226 | IDE_HFLAG_OFF_BOARD, |
231 | .pio_mask = ATA_PIO4, | 227 | .pio_mask = ATA_PIO4, |
232 | .mwdma_mask = ATA_MWDMA2, | 228 | .mwdma_mask = ATA_MWDMA2, |
@@ -237,7 +233,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { | |||
237 | .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, | 233 | .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, |
238 | .port_ops = &atp86x_port_ops, | 234 | .port_ops = &atp86x_port_ops, |
239 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | | 235 | .host_flags = IDE_HFLAG_NO_ATAPI_DMA | |
240 | IDE_HFLAG_ABUSE_SET_DMA_MODE | | ||
241 | IDE_HFLAG_OFF_BOARD, | 236 | IDE_HFLAG_OFF_BOARD, |
242 | .pio_mask = ATA_PIO4, | 237 | .pio_mask = ATA_PIO4, |
243 | .mwdma_mask = ATA_MWDMA2, | 238 | .mwdma_mask = ATA_MWDMA2, |
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index 80d19c0eb780..5ef7817ac64f 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c | |||
@@ -471,7 +471,15 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif, | |||
471 | struct pci_dev *dev = to_pci_dev(hwif->dev); | 471 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
472 | unsigned long base = ide_pci_dma_base(hwif, d); | 472 | unsigned long base = ide_pci_dma_base(hwif, d); |
473 | 473 | ||
474 | if (base == 0 || ide_pci_set_master(dev, d->name) < 0) | 474 | if (base == 0) |
475 | return -1; | ||
476 | |||
477 | hwif->dma_base = base; | ||
478 | |||
479 | if (ide_pci_check_simplex(hwif, d) < 0) | ||
480 | return -1; | ||
481 | |||
482 | if (ide_pci_set_master(dev, d->name) < 0) | ||
475 | return -1; | 483 | return -1; |
476 | 484 | ||
477 | if (!hwif->channel) | 485 | if (!hwif->channel) |
@@ -483,7 +491,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif, | |||
483 | if (ide_allocate_dma_engine(hwif)) | 491 | if (ide_allocate_dma_engine(hwif)) |
484 | return -1; | 492 | return -1; |
485 | 493 | ||
486 | ide_setup_dma(hwif, base); | 494 | hwif->dma_ops = &sff_dma_ops; |
487 | 495 | ||
488 | return 0; | 496 | return 0; |
489 | } | 497 | } |
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index 0bfcdd0e77b3..ef7d971031ee 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c | |||
@@ -218,7 +218,6 @@ static const struct ide_port_ops amd_port_ops = { | |||
218 | 218 | ||
219 | #define IDE_HFLAGS_AMD \ | 219 | #define IDE_HFLAGS_AMD \ |
220 | (IDE_HFLAG_PIO_NO_BLACKLIST | \ | 220 | (IDE_HFLAG_PIO_NO_BLACKLIST | \ |
221 | IDE_HFLAG_ABUSE_SET_DMA_MODE | \ | ||
222 | IDE_HFLAG_POST_SET_MODE | \ | 221 | IDE_HFLAG_POST_SET_MODE | \ |
223 | IDE_HFLAG_IO_32BIT | \ | 222 | IDE_HFLAG_IO_32BIT | \ |
224 | IDE_HFLAG_UNMASK_IRQS) | 223 | IDE_HFLAG_UNMASK_IRQS) |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index 1ad1e23e3105..e6c62006ca1a 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -181,11 +181,6 @@ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */ | |||
181 | static DEFINE_SPINLOCK(cmd640_lock); | 181 | static DEFINE_SPINLOCK(cmd640_lock); |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * These are initialized to point at the devices we control | ||
185 | */ | ||
186 | static ide_hwif_t *cmd_hwif0, *cmd_hwif1; | ||
187 | |||
188 | /* | ||
189 | * Interface to access cmd640x registers | 184 | * Interface to access cmd640x registers |
190 | */ | 185 | */ |
191 | static unsigned int cmd640_key; | 186 | static unsigned int cmd640_key; |
@@ -717,8 +712,7 @@ static int __init cmd640x_init(void) | |||
717 | int second_port_cmd640 = 0, rc; | 712 | int second_port_cmd640 = 0, rc; |
718 | const char *bus_type, *port2; | 713 | const char *bus_type, *port2; |
719 | u8 b, cfr; | 714 | u8 b, cfr; |
720 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 715 | hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; |
721 | hw_regs_t hw[2]; | ||
722 | 716 | ||
723 | if (cmd640_vlb && probe_for_cmd640_vlb()) { | 717 | if (cmd640_vlb && probe_for_cmd640_vlb()) { |
724 | bus_type = "VLB"; | 718 | bus_type = "VLB"; |
@@ -781,15 +775,10 @@ static int __init cmd640x_init(void) | |||
781 | printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" | 775 | printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" |
782 | "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); | 776 | "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); |
783 | 777 | ||
784 | cmd_hwif0 = ide_find_port(); | ||
785 | |||
786 | /* | 778 | /* |
787 | * Initialize data for primary port | 779 | * Initialize data for primary port |
788 | */ | 780 | */ |
789 | if (cmd_hwif0) { | 781 | hws[0] = &hw[0]; |
790 | ide_init_port_hw(cmd_hwif0, &hw[0]); | ||
791 | idx[0] = cmd_hwif0->index; | ||
792 | } | ||
793 | 782 | ||
794 | /* | 783 | /* |
795 | * Ensure compatibility by always using the slowest timings | 784 | * Ensure compatibility by always using the slowest timings |
@@ -829,13 +818,9 @@ static int __init cmd640x_init(void) | |||
829 | /* | 818 | /* |
830 | * Initialize data for secondary cmd640 port, if enabled | 819 | * Initialize data for secondary cmd640 port, if enabled |
831 | */ | 820 | */ |
832 | if (second_port_cmd640) { | 821 | if (second_port_cmd640) |
833 | cmd_hwif1 = ide_find_port(); | 822 | hws[1] = &hw[1]; |
834 | if (cmd_hwif1) { | 823 | |
835 | ide_init_port_hw(cmd_hwif1, &hw[1]); | ||
836 | idx[1] = cmd_hwif1->index; | ||
837 | } | ||
838 | } | ||
839 | printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", | 824 | printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", |
840 | second_port_cmd640 ? "" : "not ", port2); | 825 | second_port_cmd640 ? "" : "not ", port2); |
841 | 826 | ||
@@ -843,9 +828,7 @@ static int __init cmd640x_init(void) | |||
843 | cmd640_dump_regs(); | 828 | cmd640_dump_regs(); |
844 | #endif | 829 | #endif |
845 | 830 | ||
846 | ide_device_add(idx, &cmd640_port_info); | 831 | return ide_host_add(&cmd640_port_info, hws, NULL); |
847 | |||
848 | return 1; | ||
849 | } | 832 | } |
850 | 833 | ||
851 | module_param_named(probe_vlb, cmd640_vlb, bool, 0); | 834 | module_param_named(probe_vlb, cmd640_vlb, bool, 0); |
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index cfa784bacf48..ce58bfcdb3c6 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c | |||
@@ -262,7 +262,7 @@ static int cmd648_dma_test_irq(ide_drive_t *drive) | |||
262 | unsigned long base = hwif->dma_base - (hwif->channel * 8); | 262 | unsigned long base = hwif->dma_base - (hwif->channel * 8); |
263 | u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : | 263 | u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : |
264 | MRDMODE_INTR_CH0; | 264 | MRDMODE_INTR_CH0; |
265 | u8 dma_stat = inb(hwif->dma_status); | 265 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
266 | u8 mrdmode = inb(base + 1); | 266 | u8 mrdmode = inb(base + 1); |
267 | 267 | ||
268 | #ifdef DEBUG | 268 | #ifdef DEBUG |
@@ -286,7 +286,7 @@ static int cmd64x_dma_test_irq(ide_drive_t *drive) | |||
286 | int irq_reg = hwif->channel ? ARTTIM23 : CFR; | 286 | int irq_reg = hwif->channel ? ARTTIM23 : CFR; |
287 | u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : | 287 | u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : |
288 | CFR_INTR_CH0; | 288 | CFR_INTR_CH0; |
289 | u8 dma_stat = inb(hwif->dma_status); | 289 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
290 | u8 irq_stat = 0; | 290 | u8 irq_stat = 0; |
291 | 291 | ||
292 | (void) pci_read_config_byte(dev, irq_reg, &irq_stat); | 292 | (void) pci_read_config_byte(dev, irq_reg, &irq_stat); |
@@ -317,13 +317,13 @@ static int cmd646_1_dma_end(ide_drive_t *drive) | |||
317 | 317 | ||
318 | drive->waiting_for_dma = 0; | 318 | drive->waiting_for_dma = 0; |
319 | /* get DMA status */ | 319 | /* get DMA status */ |
320 | dma_stat = inb(hwif->dma_status); | 320 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
321 | /* read DMA command state */ | 321 | /* read DMA command state */ |
322 | dma_cmd = inb(hwif->dma_command); | 322 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
323 | /* stop DMA */ | 323 | /* stop DMA */ |
324 | outb(dma_cmd & ~1, hwif->dma_command); | 324 | outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); |
325 | /* clear the INTR & ERROR bits */ | 325 | /* clear the INTR & ERROR bits */ |
326 | outb(dma_stat | 6, hwif->dma_status); | 326 | outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); |
327 | /* and free any DMA resources */ | 327 | /* and free any DMA resources */ |
328 | ide_destroy_dmatable(drive); | 328 | ide_destroy_dmatable(drive); |
329 | /* verify good DMA status */ | 329 | /* verify good DMA status */ |
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c index 992b1cf8db69..b03d8ae947e6 100644 --- a/drivers/ide/pci/cs5520.c +++ b/drivers/ide/pci/cs5520.c | |||
@@ -62,8 +62,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
62 | struct pci_dev *pdev = to_pci_dev(hwif->dev); | 62 | struct pci_dev *pdev = to_pci_dev(hwif->dev); |
63 | int controller = drive->dn > 1 ? 1 : 0; | 63 | int controller = drive->dn > 1 ? 1 : 0; |
64 | 64 | ||
65 | /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */ | ||
66 | |||
67 | /* 8bit CAT/CRT - 8bit command timing for channel */ | 65 | /* 8bit CAT/CRT - 8bit command timing for channel */ |
68 | pci_write_config_byte(pdev, 0x62 + controller, | 66 | pci_write_config_byte(pdev, 0x62 + controller, |
69 | (cs5520_pio_clocks[pio].recovery << 4) | | 67 | (cs5520_pio_clocks[pio].recovery << 4) | |
@@ -89,46 +87,17 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
89 | cs5520_set_pio_mode(drive, 0); | 87 | cs5520_set_pio_mode(drive, 0); |
90 | } | 88 | } |
91 | 89 | ||
92 | /* | ||
93 | * We wrap the DMA activate to set the vdma flag. This is needed | ||
94 | * so that the IDE DMA layer issues PIO not DMA commands over the | ||
95 | * DMA channel | ||
96 | * | ||
97 | * ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA | ||
98 | */ | ||
99 | |||
100 | static void cs5520_dma_host_set(ide_drive_t *drive, int on) | ||
101 | { | ||
102 | drive->vdma = on; | ||
103 | ide_dma_host_set(drive, on); | ||
104 | } | ||
105 | |||
106 | static const struct ide_port_ops cs5520_port_ops = { | 90 | static const struct ide_port_ops cs5520_port_ops = { |
107 | .set_pio_mode = cs5520_set_pio_mode, | 91 | .set_pio_mode = cs5520_set_pio_mode, |
108 | .set_dma_mode = cs5520_set_dma_mode, | 92 | .set_dma_mode = cs5520_set_dma_mode, |
109 | }; | 93 | }; |
110 | 94 | ||
111 | static const struct ide_dma_ops cs5520_dma_ops = { | ||
112 | .dma_host_set = cs5520_dma_host_set, | ||
113 | .dma_setup = ide_dma_setup, | ||
114 | .dma_exec_cmd = ide_dma_exec_cmd, | ||
115 | .dma_start = ide_dma_start, | ||
116 | .dma_end = __ide_dma_end, | ||
117 | .dma_test_irq = ide_dma_test_irq, | ||
118 | .dma_lost_irq = ide_dma_lost_irq, | ||
119 | .dma_timeout = ide_dma_timeout, | ||
120 | }; | ||
121 | |||
122 | /* FIXME: VDMA is disabled because it caused system hangs */ | ||
123 | #define DECLARE_CS_DEV(name_str) \ | 95 | #define DECLARE_CS_DEV(name_str) \ |
124 | { \ | 96 | { \ |
125 | .name = name_str, \ | 97 | .name = name_str, \ |
126 | .port_ops = &cs5520_port_ops, \ | 98 | .port_ops = &cs5520_port_ops, \ |
127 | .dma_ops = &cs5520_dma_ops, \ | ||
128 | .host_flags = IDE_HFLAG_ISA_PORTS | \ | 99 | .host_flags = IDE_HFLAG_ISA_PORTS | \ |
129 | IDE_HFLAG_CS5520 | \ | 100 | IDE_HFLAG_CS5520, \ |
130 | IDE_HFLAG_NO_ATAPI_DMA | \ | ||
131 | IDE_HFLAG_ABUSE_SET_DMA_MODE, \ | ||
132 | .pio_mask = ATA_PIO4, \ | 101 | .pio_mask = ATA_PIO4, \ |
133 | } | 102 | } |
134 | 103 | ||
@@ -146,7 +115,7 @@ static const struct ide_port_info cyrix_chipsets[] __devinitdata = { | |||
146 | static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 115 | static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
147 | { | 116 | { |
148 | const struct ide_port_info *d = &cyrix_chipsets[id->driver_data]; | 117 | const struct ide_port_info *d = &cyrix_chipsets[id->driver_data]; |
149 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 118 | hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; |
150 | 119 | ||
151 | ide_setup_pci_noise(dev, d); | 120 | ide_setup_pci_noise(dev, d); |
152 | 121 | ||
@@ -168,11 +137,9 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic | |||
168 | * do all the device setup for us | 137 | * do all the device setup for us |
169 | */ | 138 | */ |
170 | 139 | ||
171 | ide_pci_setup_ports(dev, d, 14, &idx[0]); | 140 | ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]); |
172 | |||
173 | ide_device_add(idx, d); | ||
174 | 141 | ||
175 | return 0; | 142 | return ide_host_add(d, hws, NULL); |
176 | } | 143 | } |
177 | 144 | ||
178 | static const struct pci_device_id cs5520_pci_tbl[] = { | 145 | static const struct pci_device_id cs5520_pci_tbl[] = { |
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c index dc97c48623f3..5404fe4f701d 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/pci/cs5535.c | |||
@@ -171,8 +171,7 @@ static const struct ide_port_ops cs5535_port_ops = { | |||
171 | static const struct ide_port_info cs5535_chipset __devinitdata = { | 171 | static const struct ide_port_info cs5535_chipset __devinitdata = { |
172 | .name = "CS5535", | 172 | .name = "CS5535", |
173 | .port_ops = &cs5535_port_ops, | 173 | .port_ops = &cs5535_port_ops, |
174 | .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | | 174 | .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE, |
175 | IDE_HFLAG_ABUSE_SET_DMA_MODE, | ||
176 | .pio_mask = ATA_PIO4, | 175 | .pio_mask = ATA_PIO4, |
177 | .mwdma_mask = ATA_MWDMA2, | 176 | .mwdma_mask = ATA_MWDMA2, |
178 | .udma_mask = ATA_UDMA4, | 177 | .udma_mask = ATA_UDMA4, |
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index 0106e2a2df77..f84bfb4f600f 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c | |||
@@ -56,11 +56,10 @@ static const struct ide_port_info delkin_cb_port_info = { | |||
56 | static int __devinit | 56 | static int __devinit |
57 | delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | 57 | delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) |
58 | { | 58 | { |
59 | struct ide_host *host; | ||
59 | unsigned long base; | 60 | unsigned long base; |
60 | hw_regs_t hw; | ||
61 | ide_hwif_t *hwif = NULL; | ||
62 | int i, rc; | 61 | int i, rc; |
63 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 62 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
64 | 63 | ||
65 | rc = pci_enable_device(dev); | 64 | rc = pci_enable_device(dev); |
66 | if (rc) { | 65 | if (rc) { |
@@ -87,34 +86,26 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | |||
87 | hw.dev = &dev->dev; | 86 | hw.dev = &dev->dev; |
88 | hw.chipset = ide_pci; /* this enables IRQ sharing */ | 87 | hw.chipset = ide_pci; /* this enables IRQ sharing */ |
89 | 88 | ||
90 | hwif = ide_find_port(); | 89 | rc = ide_host_add(&delkin_cb_port_info, hws, &host); |
91 | if (hwif == NULL) | 90 | if (rc) |
92 | goto out_disable; | 91 | goto out_disable; |
93 | 92 | ||
94 | i = hwif->index; | 93 | pci_set_drvdata(dev, host); |
95 | |||
96 | ide_init_port_hw(hwif, &hw); | ||
97 | |||
98 | idx[0] = i; | ||
99 | |||
100 | ide_device_add(idx, &delkin_cb_port_info); | ||
101 | |||
102 | pci_set_drvdata(dev, hwif); | ||
103 | 94 | ||
104 | return 0; | 95 | return 0; |
105 | 96 | ||
106 | out_disable: | 97 | out_disable: |
107 | pci_release_regions(dev); | 98 | pci_release_regions(dev); |
108 | pci_disable_device(dev); | 99 | pci_disable_device(dev); |
109 | return -ENODEV; | 100 | return rc; |
110 | } | 101 | } |
111 | 102 | ||
112 | static void | 103 | static void |
113 | delkin_cb_remove (struct pci_dev *dev) | 104 | delkin_cb_remove (struct pci_dev *dev) |
114 | { | 105 | { |
115 | ide_hwif_t *hwif = pci_get_drvdata(dev); | 106 | struct ide_host *host = pci_get_drvdata(dev); |
116 | 107 | ||
117 | ide_unregister(hwif); | 108 | ide_host_remove(host); |
118 | 109 | ||
119 | pci_release_regions(dev); | 110 | pci_release_regions(dev); |
120 | pci_disable_device(dev); | 111 | pci_disable_device(dev); |
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c index 84c36c117194..9e1d1c4741da 100644 --- a/drivers/ide/pci/hpt34x.c +++ b/drivers/ide/pci/hpt34x.c | |||
@@ -123,7 +123,6 @@ static const struct ide_port_ops hpt34x_port_ops = { | |||
123 | #define IDE_HFLAGS_HPT34X \ | 123 | #define IDE_HFLAGS_HPT34X \ |
124 | (IDE_HFLAG_NO_ATAPI_DMA | \ | 124 | (IDE_HFLAG_NO_ATAPI_DMA | \ |
125 | IDE_HFLAG_NO_DSC | \ | 125 | IDE_HFLAG_NO_DSC | \ |
126 | IDE_HFLAG_ABUSE_SET_DMA_MODE | \ | ||
127 | IDE_HFLAG_NO_AUTODMA) | 126 | IDE_HFLAG_NO_AUTODMA) |
128 | 127 | ||
129 | static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { | 128 | static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 397c6cbe953c..1f1135ce7cd6 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -801,9 +801,9 @@ static void hpt370_irq_timeout(ide_drive_t *drive) | |||
801 | printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); | 801 | printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); |
802 | 802 | ||
803 | /* get DMA command mode */ | 803 | /* get DMA command mode */ |
804 | dma_cmd = inb(hwif->dma_command); | 804 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
805 | /* stop DMA */ | 805 | /* stop DMA */ |
806 | outb(dma_cmd & ~0x1, hwif->dma_command); | 806 | outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD); |
807 | hpt370_clear_engine(drive); | 807 | hpt370_clear_engine(drive); |
808 | } | 808 | } |
809 | 809 | ||
@@ -818,12 +818,12 @@ static void hpt370_dma_start(ide_drive_t *drive) | |||
818 | static int hpt370_dma_end(ide_drive_t *drive) | 818 | static int hpt370_dma_end(ide_drive_t *drive) |
819 | { | 819 | { |
820 | ide_hwif_t *hwif = HWIF(drive); | 820 | ide_hwif_t *hwif = HWIF(drive); |
821 | u8 dma_stat = inb(hwif->dma_status); | 821 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
822 | 822 | ||
823 | if (dma_stat & 0x01) { | 823 | if (dma_stat & 0x01) { |
824 | /* wait a little */ | 824 | /* wait a little */ |
825 | udelay(20); | 825 | udelay(20); |
826 | dma_stat = inb(hwif->dma_status); | 826 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
827 | if (dma_stat & 0x01) | 827 | if (dma_stat & 0x01) |
828 | hpt370_irq_timeout(drive); | 828 | hpt370_irq_timeout(drive); |
829 | } | 829 | } |
@@ -850,7 +850,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive) | |||
850 | return 0; | 850 | return 0; |
851 | } | 851 | } |
852 | 852 | ||
853 | dma_stat = inb(hwif->dma_status); | 853 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
854 | /* return 1 if INTR asserted */ | 854 | /* return 1 if INTR asserted */ |
855 | if (dma_stat & 4) | 855 | if (dma_stat & 4) |
856 | return 1; | 856 | return 1; |
@@ -1320,7 +1320,15 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif, | |||
1320 | unsigned long flags, base = ide_pci_dma_base(hwif, d); | 1320 | unsigned long flags, base = ide_pci_dma_base(hwif, d); |
1321 | u8 dma_old, dma_new, masterdma = 0, slavedma = 0; | 1321 | u8 dma_old, dma_new, masterdma = 0, slavedma = 0; |
1322 | 1322 | ||
1323 | if (base == 0 || ide_pci_set_master(dev, d->name) < 0) | 1323 | if (base == 0) |
1324 | return -1; | ||
1325 | |||
1326 | hwif->dma_base = base; | ||
1327 | |||
1328 | if (ide_pci_check_simplex(hwif, d) < 0) | ||
1329 | return -1; | ||
1330 | |||
1331 | if (ide_pci_set_master(dev, d->name) < 0) | ||
1324 | return -1; | 1332 | return -1; |
1325 | 1333 | ||
1326 | dma_old = inb(base + 2); | 1334 | dma_old = inb(base + 2); |
@@ -1346,7 +1354,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif, | |||
1346 | if (ide_allocate_dma_engine(hwif)) | 1354 | if (ide_allocate_dma_engine(hwif)) |
1347 | return -1; | 1355 | return -1; |
1348 | 1356 | ||
1349 | ide_setup_dma(hwif, base); | 1357 | hwif->dma_ops = &sff_dma_ops; |
1350 | 1358 | ||
1351 | return 0; | 1359 | return 0; |
1352 | } | 1360 | } |
@@ -1401,7 +1409,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) | |||
1401 | 1409 | ||
1402 | #define IDE_HFLAGS_HPT3XX \ | 1410 | #define IDE_HFLAGS_HPT3XX \ |
1403 | (IDE_HFLAG_NO_ATAPI_DMA | \ | 1411 | (IDE_HFLAG_NO_ATAPI_DMA | \ |
1404 | IDE_HFLAG_ABUSE_SET_DMA_MODE | \ | ||
1405 | IDE_HFLAG_OFF_BOARD) | 1412 | IDE_HFLAG_OFF_BOARD) |
1406 | 1413 | ||
1407 | static const struct ide_port_ops hpt3xx_port_ops = { | 1414 | static const struct ide_port_ops hpt3xx_port_ops = { |
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c index 45ba71a7182f..5cd2b32ff0ef 100644 --- a/drivers/ide/pci/ns87415.c +++ b/drivers/ide/pci/ns87415.c | |||
@@ -28,10 +28,6 @@ | |||
28 | */ | 28 | */ |
29 | #include <asm/superio.h> | 29 | #include <asm/superio.h> |
30 | 30 | ||
31 | static unsigned long superio_ide_status[2]; | ||
32 | static unsigned long superio_ide_select[2]; | ||
33 | static unsigned long superio_ide_dma_status[2]; | ||
34 | |||
35 | #define SUPERIO_IDE_MAX_RETRIES 25 | 31 | #define SUPERIO_IDE_MAX_RETRIES 25 |
36 | 32 | ||
37 | /* Because of a defect in Super I/O, all reads of the PCI DMA status | 33 | /* Because of a defect in Super I/O, all reads of the PCI DMA status |
@@ -40,27 +36,28 @@ static unsigned long superio_ide_dma_status[2]; | |||
40 | */ | 36 | */ |
41 | static u8 superio_ide_inb (unsigned long port) | 37 | static u8 superio_ide_inb (unsigned long port) |
42 | { | 38 | { |
43 | if (port == superio_ide_status[0] || | 39 | u8 tmp; |
44 | port == superio_ide_status[1] || | 40 | int retries = SUPERIO_IDE_MAX_RETRIES; |
45 | port == superio_ide_select[0] || | ||
46 | port == superio_ide_select[1] || | ||
47 | port == superio_ide_dma_status[0] || | ||
48 | port == superio_ide_dma_status[1]) { | ||
49 | u8 tmp; | ||
50 | int retries = SUPERIO_IDE_MAX_RETRIES; | ||
51 | 41 | ||
52 | /* printk(" [ reading port 0x%x with retry ] ", port); */ | 42 | /* printk(" [ reading port 0x%x with retry ] ", port); */ |
53 | 43 | ||
54 | do { | 44 | do { |
55 | tmp = inb(port); | 45 | tmp = inb(port); |
56 | if (tmp == 0) | 46 | if (tmp == 0) |
57 | udelay(50); | 47 | udelay(50); |
58 | } while (tmp == 0 && retries-- > 0); | 48 | } while (tmp == 0 && retries-- > 0); |
59 | 49 | ||
60 | return tmp; | 50 | return tmp; |
61 | } | 51 | } |
62 | 52 | ||
63 | return inb(port); | 53 | static u8 superio_read_status(ide_hwif_t *hwif) |
54 | { | ||
55 | return superio_ide_inb(hwif->io_ports.status_addr); | ||
56 | } | ||
57 | |||
58 | static u8 superio_read_sff_dma_status(ide_hwif_t *hwif) | ||
59 | { | ||
60 | return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS); | ||
64 | } | 61 | } |
65 | 62 | ||
66 | static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) | 63 | static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) |
@@ -78,6 +75,8 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
78 | /* be sure we're looking at the low order bits */ | 75 | /* be sure we're looking at the low order bits */ |
79 | outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); | 76 | outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); |
80 | 77 | ||
78 | if (task->tf_flags & IDE_TFLAG_IN_FEATURE) | ||
79 | tf->feature = inb(io_ports->feature_addr); | ||
81 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) | 80 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) |
82 | tf->nsect = inb(io_ports->nsect_addr); | 81 | tf->nsect = inb(io_ports->nsect_addr); |
83 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) | 82 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) |
@@ -105,36 +104,32 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
105 | } | 104 | } |
106 | } | 105 | } |
107 | 106 | ||
108 | static void __devinit superio_ide_init_iops (struct hwif_s *hwif) | 107 | static const struct ide_tp_ops superio_tp_ops = { |
109 | { | 108 | .exec_command = ide_exec_command, |
110 | struct pci_dev *pdev = to_pci_dev(hwif->dev); | 109 | .read_status = superio_read_status, |
111 | u32 base, dmabase; | 110 | .read_altstatus = ide_read_altstatus, |
112 | u8 port = hwif->channel, tmp; | 111 | .read_sff_dma_status = superio_read_sff_dma_status, |
113 | 112 | ||
114 | base = pci_resource_start(pdev, port * 2) & ~3; | 113 | .set_irq = ide_set_irq, |
115 | dmabase = pci_resource_start(pdev, 4) & ~3; | ||
116 | |||
117 | superio_ide_status[port] = base + 7; | ||
118 | superio_ide_select[port] = base + 6; | ||
119 | superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa); | ||
120 | |||
121 | /* Clear error/interrupt, enable dma */ | ||
122 | tmp = superio_ide_inb(superio_ide_dma_status[port]); | ||
123 | outb(tmp | 0x66, superio_ide_dma_status[port]); | ||
124 | 114 | ||
125 | hwif->tf_read = superio_tf_read; | 115 | .tf_load = ide_tf_load, |
116 | .tf_read = superio_tf_read, | ||
126 | 117 | ||
127 | /* We need to override inb to workaround a SuperIO errata */ | 118 | .input_data = ide_input_data, |
128 | hwif->INB = superio_ide_inb; | 119 | .output_data = ide_output_data, |
129 | } | 120 | }; |
130 | 121 | ||
131 | static void __devinit init_iops_ns87415(ide_hwif_t *hwif) | 122 | static void __devinit superio_init_iops(struct hwif_s *hwif) |
132 | { | 123 | { |
133 | struct pci_dev *dev = to_pci_dev(hwif->dev); | 124 | struct pci_dev *pdev = to_pci_dev(hwif->dev); |
125 | u32 dma_stat; | ||
126 | u8 port = hwif->channel, tmp; | ||
134 | 127 | ||
135 | if (PCI_SLOT(dev->devfn) == 0xE) | 128 | dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa); |
136 | /* Built-in - assume it's under superio. */ | 129 | |
137 | superio_ide_init_iops(hwif); | 130 | /* Clear error/interrupt, enable dma */ |
131 | tmp = superio_ide_inb(dma_stat); | ||
132 | outb(tmp | 0x66, dma_stat); | ||
138 | } | 133 | } |
139 | #endif | 134 | #endif |
140 | 135 | ||
@@ -200,14 +195,14 @@ static int ns87415_dma_end(ide_drive_t *drive) | |||
200 | u8 dma_stat = 0, dma_cmd = 0; | 195 | u8 dma_stat = 0, dma_cmd = 0; |
201 | 196 | ||
202 | drive->waiting_for_dma = 0; | 197 | drive->waiting_for_dma = 0; |
203 | dma_stat = hwif->INB(hwif->dma_status); | 198 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
204 | /* get dma command mode */ | 199 | /* get DMA command mode */ |
205 | dma_cmd = hwif->INB(hwif->dma_command); | 200 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
206 | /* stop DMA */ | 201 | /* stop DMA */ |
207 | outb(dma_cmd & ~1, hwif->dma_command); | 202 | outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); |
208 | /* from ERRATA: clear the INTR & ERROR bits */ | 203 | /* from ERRATA: clear the INTR & ERROR bits */ |
209 | dma_cmd = hwif->INB(hwif->dma_command); | 204 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
210 | outb(dma_cmd | 6, hwif->dma_command); | 205 | outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD); |
211 | /* and free any DMA resources */ | 206 | /* and free any DMA resources */ |
212 | ide_destroy_dmatable(drive); | 207 | ide_destroy_dmatable(drive); |
213 | /* verify good DMA status */ | 208 | /* verify good DMA status */ |
@@ -276,7 +271,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif) | |||
276 | outb(8, hwif->io_ports.ctl_addr); | 271 | outb(8, hwif->io_ports.ctl_addr); |
277 | do { | 272 | do { |
278 | udelay(50); | 273 | udelay(50); |
279 | stat = hwif->INB(hwif->io_ports.status_addr); | 274 | stat = hwif->tp_ops->read_status(hwif); |
280 | if (stat == 0xff) | 275 | if (stat == 0xff) |
281 | break; | 276 | break; |
282 | } while ((stat & BUSY_STAT) && --timeout); | 277 | } while ((stat & BUSY_STAT) && --timeout); |
@@ -291,7 +286,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif) | |||
291 | if (!hwif->dma_base) | 286 | if (!hwif->dma_base) |
292 | return; | 287 | return; |
293 | 288 | ||
294 | outb(0x60, hwif->dma_status); | 289 | outb(0x60, hwif->dma_base + ATA_DMA_STATUS); |
295 | } | 290 | } |
296 | 291 | ||
297 | static const struct ide_port_ops ns87415_port_ops = { | 292 | static const struct ide_port_ops ns87415_port_ops = { |
@@ -311,9 +306,6 @@ static const struct ide_dma_ops ns87415_dma_ops = { | |||
311 | 306 | ||
312 | static const struct ide_port_info ns87415_chipset __devinitdata = { | 307 | static const struct ide_port_info ns87415_chipset __devinitdata = { |
313 | .name = "NS87415", | 308 | .name = "NS87415", |
314 | #ifdef CONFIG_SUPERIO | ||
315 | .init_iops = init_iops_ns87415, | ||
316 | #endif | ||
317 | .init_hwif = init_hwif_ns87415, | 309 | .init_hwif = init_hwif_ns87415, |
318 | .port_ops = &ns87415_port_ops, | 310 | .port_ops = &ns87415_port_ops, |
319 | .dma_ops = &ns87415_dma_ops, | 311 | .dma_ops = &ns87415_dma_ops, |
@@ -323,7 +315,16 @@ static const struct ide_port_info ns87415_chipset __devinitdata = { | |||
323 | 315 | ||
324 | static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 316 | static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
325 | { | 317 | { |
326 | return ide_setup_pci_device(dev, &ns87415_chipset); | 318 | struct ide_port_info d = ns87415_chipset; |
319 | |||
320 | #ifdef CONFIG_SUPERIO | ||
321 | if (PCI_SLOT(dev->devfn) == 0xE) { | ||
322 | /* Built-in - assume it's under superio. */ | ||
323 | d.init_iops = superio_init_iops; | ||
324 | d.tp_ops = &superio_tp_ops; | ||
325 | } | ||
326 | #endif | ||
327 | return ide_setup_pci_device(dev, &d); | ||
327 | } | 328 | } |
328 | 329 | ||
329 | static const struct pci_device_id ns87415_pci_tbl[] = { | 330 | static const struct pci_device_id ns87415_pci_tbl[] = { |
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index fca89eda5c02..e54dc653b8c4 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c | |||
@@ -206,7 +206,7 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive) | |||
206 | { | 206 | { |
207 | ide_hwif_t *hwif = HWIF(drive); | 207 | ide_hwif_t *hwif = HWIF(drive); |
208 | unsigned long high_16 = hwif->extra_base - 16; | 208 | unsigned long high_16 = hwif->extra_base - 16; |
209 | u8 dma_stat = inb(hwif->dma_status); | 209 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
210 | u8 sc1d = inb(high_16 + 0x001d); | 210 | u8 sc1d = inb(high_16 + 0x001d); |
211 | 211 | ||
212 | if (hwif->channel) { | 212 | if (hwif->channel) { |
@@ -312,7 +312,6 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, | |||
312 | 312 | ||
313 | #define IDE_HFLAGS_PDC202XX \ | 313 | #define IDE_HFLAGS_PDC202XX \ |
314 | (IDE_HFLAG_ERROR_STOPS_FIFO | \ | 314 | (IDE_HFLAG_ERROR_STOPS_FIFO | \ |
315 | IDE_HFLAG_ABUSE_SET_DMA_MODE | \ | ||
316 | IDE_HFLAG_OFF_BOARD) | 315 | IDE_HFLAG_OFF_BOARD) |
317 | 316 | ||
318 | static const struct ide_port_ops pdc20246_port_ops = { | 317 | static const struct ide_port_ops pdc20246_port_ops = { |
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c index f04738d14a6f..0ce41b4dddaf 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/pci/piix.c | |||
@@ -227,9 +227,9 @@ static void piix_dma_clear_irq(ide_drive_t *drive) | |||
227 | u8 dma_stat; | 227 | u8 dma_stat; |
228 | 228 | ||
229 | /* clear the INTR & ERROR bits */ | 229 | /* clear the INTR & ERROR bits */ |
230 | dma_stat = inb(hwif->dma_status); | 230 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
231 | /* Should we force the bit as well ? */ | 231 | /* Should we force the bit as well ? */ |
232 | outb(dma_stat, hwif->dma_status); | 232 | outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); |
233 | } | 233 | } |
234 | 234 | ||
235 | struct ich_laptop { | 235 | struct ich_laptop { |
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index 789c66dfbde5..94a7ab864236 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c | |||
@@ -65,7 +65,7 @@ | |||
65 | 65 | ||
66 | static struct scc_ports { | 66 | static struct scc_ports { |
67 | unsigned long ctl, dma; | 67 | unsigned long ctl, dma; |
68 | ide_hwif_t *hwif; /* for removing port from system */ | 68 | struct ide_host *host; /* for removing port from system */ |
69 | } scc_ports[MAX_HWIFS]; | 69 | } scc_ports[MAX_HWIFS]; |
70 | 70 | ||
71 | /* PIO transfer mode table */ | 71 | /* PIO transfer mode table */ |
@@ -126,6 +126,46 @@ static u8 scc_ide_inb(unsigned long port) | |||
126 | return (u8)data; | 126 | return (u8)data; |
127 | } | 127 | } |
128 | 128 | ||
129 | static void scc_exec_command(ide_hwif_t *hwif, u8 cmd) | ||
130 | { | ||
131 | out_be32((void *)hwif->io_ports.command_addr, cmd); | ||
132 | eieio(); | ||
133 | in_be32((void *)(hwif->dma_base + 0x01c)); | ||
134 | eieio(); | ||
135 | } | ||
136 | |||
137 | static u8 scc_read_status(ide_hwif_t *hwif) | ||
138 | { | ||
139 | return (u8)in_be32((void *)hwif->io_ports.status_addr); | ||
140 | } | ||
141 | |||
142 | static u8 scc_read_altstatus(ide_hwif_t *hwif) | ||
143 | { | ||
144 | return (u8)in_be32((void *)hwif->io_ports.ctl_addr); | ||
145 | } | ||
146 | |||
147 | static u8 scc_read_sff_dma_status(ide_hwif_t *hwif) | ||
148 | { | ||
149 | return (u8)in_be32((void *)(hwif->dma_base + 4)); | ||
150 | } | ||
151 | |||
152 | static void scc_set_irq(ide_hwif_t *hwif, int on) | ||
153 | { | ||
154 | u8 ctl = ATA_DEVCTL_OBS; | ||
155 | |||
156 | if (on == 4) { /* hack for SRST */ | ||
157 | ctl |= 4; | ||
158 | on &= ~4; | ||
159 | } | ||
160 | |||
161 | ctl |= on ? 0 : 2; | ||
162 | |||
163 | out_be32((void *)hwif->io_ports.ctl_addr, ctl); | ||
164 | eieio(); | ||
165 | in_be32((void *)(hwif->dma_base + 0x01c)); | ||
166 | eieio(); | ||
167 | } | ||
168 | |||
129 | static void scc_ide_insw(unsigned long port, void *addr, u32 count) | 169 | static void scc_ide_insw(unsigned long port, void *addr, u32 count) |
130 | { | 170 | { |
131 | u16 *ptr = (u16 *)addr; | 171 | u16 *ptr = (u16 *)addr; |
@@ -148,14 +188,6 @@ static void scc_ide_outb(u8 addr, unsigned long port) | |||
148 | out_be32((void*)port, addr); | 188 | out_be32((void*)port, addr); |
149 | } | 189 | } |
150 | 190 | ||
151 | static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port) | ||
152 | { | ||
153 | out_be32((void*)port, addr); | ||
154 | eieio(); | ||
155 | in_be32((void*)(hwif->dma_base + 0x01c)); | ||
156 | eieio(); | ||
157 | } | ||
158 | |||
159 | static void | 191 | static void |
160 | scc_ide_outsw(unsigned long port, void *addr, u32 count) | 192 | scc_ide_outsw(unsigned long port, void *addr, u32 count) |
161 | { | 193 | { |
@@ -261,14 +293,14 @@ static void scc_dma_host_set(ide_drive_t *drive, int on) | |||
261 | { | 293 | { |
262 | ide_hwif_t *hwif = drive->hwif; | 294 | ide_hwif_t *hwif = drive->hwif; |
263 | u8 unit = (drive->select.b.unit & 0x01); | 295 | u8 unit = (drive->select.b.unit & 0x01); |
264 | u8 dma_stat = scc_ide_inb(hwif->dma_status); | 296 | u8 dma_stat = scc_ide_inb(hwif->dma_base + 4); |
265 | 297 | ||
266 | if (on) | 298 | if (on) |
267 | dma_stat |= (1 << (5 + unit)); | 299 | dma_stat |= (1 << (5 + unit)); |
268 | else | 300 | else |
269 | dma_stat &= ~(1 << (5 + unit)); | 301 | dma_stat &= ~(1 << (5 + unit)); |
270 | 302 | ||
271 | scc_ide_outb(dma_stat, hwif->dma_status); | 303 | scc_ide_outb(dma_stat, hwif->dma_base + 4); |
272 | } | 304 | } |
273 | 305 | ||
274 | /** | 306 | /** |
@@ -304,13 +336,13 @@ static int scc_dma_setup(ide_drive_t *drive) | |||
304 | out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); | 336 | out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); |
305 | 337 | ||
306 | /* specify r/w */ | 338 | /* specify r/w */ |
307 | out_be32((void __iomem *)hwif->dma_command, reading); | 339 | out_be32((void __iomem *)hwif->dma_base, reading); |
308 | 340 | ||
309 | /* read dma_status for INTR & ERROR flags */ | 341 | /* read DMA status for INTR & ERROR flags */ |
310 | dma_stat = in_be32((void __iomem *)hwif->dma_status); | 342 | dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4)); |
311 | 343 | ||
312 | /* clear INTR & ERROR flags */ | 344 | /* clear INTR & ERROR flags */ |
313 | out_be32((void __iomem *)hwif->dma_status, dma_stat|6); | 345 | out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6); |
314 | drive->waiting_for_dma = 1; | 346 | drive->waiting_for_dma = 1; |
315 | return 0; | 347 | return 0; |
316 | } | 348 | } |
@@ -318,10 +350,10 @@ static int scc_dma_setup(ide_drive_t *drive) | |||
318 | static void scc_dma_start(ide_drive_t *drive) | 350 | static void scc_dma_start(ide_drive_t *drive) |
319 | { | 351 | { |
320 | ide_hwif_t *hwif = drive->hwif; | 352 | ide_hwif_t *hwif = drive->hwif; |
321 | u8 dma_cmd = scc_ide_inb(hwif->dma_command); | 353 | u8 dma_cmd = scc_ide_inb(hwif->dma_base); |
322 | 354 | ||
323 | /* start DMA */ | 355 | /* start DMA */ |
324 | scc_ide_outb(dma_cmd | 1, hwif->dma_command); | 356 | scc_ide_outb(dma_cmd | 1, hwif->dma_base); |
325 | hwif->dma = 1; | 357 | hwif->dma = 1; |
326 | wmb(); | 358 | wmb(); |
327 | } | 359 | } |
@@ -333,13 +365,13 @@ static int __scc_dma_end(ide_drive_t *drive) | |||
333 | 365 | ||
334 | drive->waiting_for_dma = 0; | 366 | drive->waiting_for_dma = 0; |
335 | /* get DMA command mode */ | 367 | /* get DMA command mode */ |
336 | dma_cmd = scc_ide_inb(hwif->dma_command); | 368 | dma_cmd = scc_ide_inb(hwif->dma_base); |
337 | /* stop DMA */ | 369 | /* stop DMA */ |
338 | scc_ide_outb(dma_cmd & ~1, hwif->dma_command); | 370 | scc_ide_outb(dma_cmd & ~1, hwif->dma_base); |
339 | /* get DMA status */ | 371 | /* get DMA status */ |
340 | dma_stat = scc_ide_inb(hwif->dma_status); | 372 | dma_stat = scc_ide_inb(hwif->dma_base + 4); |
341 | /* clear the INTR & ERROR bits */ | 373 | /* clear the INTR & ERROR bits */ |
342 | scc_ide_outb(dma_stat | 6, hwif->dma_status); | 374 | scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); |
343 | /* purge DMA mappings */ | 375 | /* purge DMA mappings */ |
344 | ide_destroy_dmatable(drive); | 376 | ide_destroy_dmatable(drive); |
345 | /* verify good DMA status */ | 377 | /* verify good DMA status */ |
@@ -359,6 +391,7 @@ static int __scc_dma_end(ide_drive_t *drive) | |||
359 | static int scc_dma_end(ide_drive_t *drive) | 391 | static int scc_dma_end(ide_drive_t *drive) |
360 | { | 392 | { |
361 | ide_hwif_t *hwif = HWIF(drive); | 393 | ide_hwif_t *hwif = HWIF(drive); |
394 | void __iomem *dma_base = (void __iomem *)hwif->dma_base; | ||
362 | unsigned long intsts_port = hwif->dma_base + 0x014; | 395 | unsigned long intsts_port = hwif->dma_base + 0x014; |
363 | u32 reg; | 396 | u32 reg; |
364 | int dma_stat, data_loss = 0; | 397 | int dma_stat, data_loss = 0; |
@@ -397,7 +430,7 @@ static int scc_dma_end(ide_drive_t *drive) | |||
397 | printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); | 430 | printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); |
398 | out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); | 431 | out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); |
399 | 432 | ||
400 | out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); | 433 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); |
401 | continue; | 434 | continue; |
402 | } | 435 | } |
403 | 436 | ||
@@ -412,7 +445,7 @@ static int scc_dma_end(ide_drive_t *drive) | |||
412 | 445 | ||
413 | out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); | 446 | out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); |
414 | 447 | ||
415 | out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); | 448 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); |
416 | continue; | 449 | continue; |
417 | } | 450 | } |
418 | 451 | ||
@@ -420,12 +453,12 @@ static int scc_dma_end(ide_drive_t *drive) | |||
420 | printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); | 453 | printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); |
421 | out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); | 454 | out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); |
422 | 455 | ||
423 | out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); | 456 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); |
424 | continue; | 457 | continue; |
425 | } | 458 | } |
426 | 459 | ||
427 | if (reg & INTSTS_ICERR) { | 460 | if (reg & INTSTS_ICERR) { |
428 | out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); | 461 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); |
429 | 462 | ||
430 | printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); | 463 | printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); |
431 | out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); | 464 | out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); |
@@ -553,14 +586,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, | |||
553 | const struct ide_port_info *d) | 586 | const struct ide_port_info *d) |
554 | { | 587 | { |
555 | struct scc_ports *ports = pci_get_drvdata(dev); | 588 | struct scc_ports *ports = pci_get_drvdata(dev); |
556 | ide_hwif_t *hwif = NULL; | 589 | struct ide_host *host; |
557 | hw_regs_t hw; | 590 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
558 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 591 | int i, rc; |
559 | int i; | ||
560 | |||
561 | hwif = ide_find_port_slot(d); | ||
562 | if (hwif == NULL) | ||
563 | return -ENOMEM; | ||
564 | 592 | ||
565 | memset(&hw, 0, sizeof(hw)); | 593 | memset(&hw, 0, sizeof(hw)); |
566 | for (i = 0; i <= 8; i++) | 594 | for (i = 0; i <= 8; i++) |
@@ -568,11 +596,12 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, | |||
568 | hw.irq = dev->irq; | 596 | hw.irq = dev->irq; |
569 | hw.dev = &dev->dev; | 597 | hw.dev = &dev->dev; |
570 | hw.chipset = ide_pci; | 598 | hw.chipset = ide_pci; |
571 | ide_init_port_hw(hwif, &hw); | ||
572 | 599 | ||
573 | idx[0] = hwif->index; | 600 | rc = ide_host_add(d, hws, &host); |
601 | if (rc) | ||
602 | return rc; | ||
574 | 603 | ||
575 | ide_device_add(idx, d); | 604 | ports->host = host; |
576 | 605 | ||
577 | return 0; | 606 | return 0; |
578 | } | 607 | } |
@@ -701,6 +730,8 @@ static void scc_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
701 | /* be sure we're looking at the low order bits */ | 730 | /* be sure we're looking at the low order bits */ |
702 | scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); | 731 | scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); |
703 | 732 | ||
733 | if (task->tf_flags & IDE_TFLAG_IN_FEATURE) | ||
734 | tf->feature = scc_ide_inb(io_ports->feature_addr); | ||
704 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) | 735 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) |
705 | tf->nsect = scc_ide_inb(io_ports->nsect_addr); | 736 | tf->nsect = scc_ide_inb(io_ports->nsect_addr); |
706 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) | 737 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) |
@@ -774,16 +805,6 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif) | |||
774 | 805 | ||
775 | ide_set_hwifdata(hwif, ports); | 806 | ide_set_hwifdata(hwif, ports); |
776 | 807 | ||
777 | hwif->tf_load = scc_tf_load; | ||
778 | hwif->tf_read = scc_tf_read; | ||
779 | |||
780 | hwif->input_data = scc_input_data; | ||
781 | hwif->output_data = scc_output_data; | ||
782 | |||
783 | hwif->INB = scc_ide_inb; | ||
784 | hwif->OUTB = scc_ide_outb; | ||
785 | hwif->OUTBSYNC = scc_ide_outbsync; | ||
786 | |||
787 | hwif->dma_base = dma_base; | 808 | hwif->dma_base = dma_base; |
788 | hwif->config_data = ports->ctl; | 809 | hwif->config_data = ports->ctl; |
789 | } | 810 | } |
@@ -824,11 +845,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif) | |||
824 | { | 845 | { |
825 | struct scc_ports *ports = ide_get_hwifdata(hwif); | 846 | struct scc_ports *ports = ide_get_hwifdata(hwif); |
826 | 847 | ||
827 | ports->hwif = hwif; | ||
828 | |||
829 | hwif->dma_command = hwif->dma_base; | ||
830 | hwif->dma_status = hwif->dma_base + 0x04; | ||
831 | |||
832 | /* PTERADD */ | 848 | /* PTERADD */ |
833 | out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); | 849 | out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); |
834 | 850 | ||
@@ -838,6 +854,21 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif) | |||
838 | hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ | 854 | hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ |
839 | } | 855 | } |
840 | 856 | ||
857 | static const struct ide_tp_ops scc_tp_ops = { | ||
858 | .exec_command = scc_exec_command, | ||
859 | .read_status = scc_read_status, | ||
860 | .read_altstatus = scc_read_altstatus, | ||
861 | .read_sff_dma_status = scc_read_sff_dma_status, | ||
862 | |||
863 | .set_irq = scc_set_irq, | ||
864 | |||
865 | .tf_load = scc_tf_load, | ||
866 | .tf_read = scc_tf_read, | ||
867 | |||
868 | .input_data = scc_input_data, | ||
869 | .output_data = scc_output_data, | ||
870 | }; | ||
871 | |||
841 | static const struct ide_port_ops scc_port_ops = { | 872 | static const struct ide_port_ops scc_port_ops = { |
842 | .set_pio_mode = scc_set_pio_mode, | 873 | .set_pio_mode = scc_set_pio_mode, |
843 | .set_dma_mode = scc_set_dma_mode, | 874 | .set_dma_mode = scc_set_dma_mode, |
@@ -861,6 +892,7 @@ static const struct ide_dma_ops scc_dma_ops = { | |||
861 | .name = name_str, \ | 892 | .name = name_str, \ |
862 | .init_iops = init_iops_scc, \ | 893 | .init_iops = init_iops_scc, \ |
863 | .init_hwif = init_hwif_scc, \ | 894 | .init_hwif = init_hwif_scc, \ |
895 | .tp_ops = &scc_tp_ops, \ | ||
864 | .port_ops = &scc_port_ops, \ | 896 | .port_ops = &scc_port_ops, \ |
865 | .dma_ops = &scc_dma_ops, \ | 897 | .dma_ops = &scc_dma_ops, \ |
866 | .host_flags = IDE_HFLAG_SINGLE, \ | 898 | .host_flags = IDE_HFLAG_SINGLE, \ |
@@ -895,7 +927,8 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i | |||
895 | static void __devexit scc_remove(struct pci_dev *dev) | 927 | static void __devexit scc_remove(struct pci_dev *dev) |
896 | { | 928 | { |
897 | struct scc_ports *ports = pci_get_drvdata(dev); | 929 | struct scc_ports *ports = pci_get_drvdata(dev); |
898 | ide_hwif_t *hwif = ports->hwif; | 930 | struct ide_host *host = ports->host; |
931 | ide_hwif_t *hwif = host->ports[0]; | ||
899 | 932 | ||
900 | if (hwif->dmatable_cpu) { | 933 | if (hwif->dmatable_cpu) { |
901 | pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, | 934 | pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, |
@@ -903,7 +936,7 @@ static void __devexit scc_remove(struct pci_dev *dev) | |||
903 | hwif->dmatable_cpu = NULL; | 936 | hwif->dmatable_cpu = NULL; |
904 | } | 937 | } |
905 | 938 | ||
906 | ide_unregister(hwif); | 939 | ide_host_remove(host); |
907 | 940 | ||
908 | iounmap((void*)ports->dma); | 941 | iounmap((void*)ports->dma); |
909 | iounmap((void*)ports->ctl); | 942 | iounmap((void*)ports->ctl); |
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c index a1fb20826a5b..127ccb45e261 100644 --- a/drivers/ide/pci/serverworks.c +++ b/drivers/ide/pci/serverworks.c | |||
@@ -349,9 +349,7 @@ static const struct ide_port_ops svwks_port_ops = { | |||
349 | .cable_detect = svwks_cable_detect, | 349 | .cable_detect = svwks_cable_detect, |
350 | }; | 350 | }; |
351 | 351 | ||
352 | #define IDE_HFLAGS_SVWKS \ | 352 | #define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS |
353 | (IDE_HFLAG_LEGACY_IRQS | \ | ||
354 | IDE_HFLAG_ABUSE_SET_DMA_MODE) | ||
355 | 353 | ||
356 | static const struct ide_port_info serverworks_chipsets[] __devinitdata = { | 354 | static const struct ide_port_info serverworks_chipsets[] __devinitdata = { |
357 | { /* 0 */ | 355 | { /* 0 */ |
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index c79ff5b41088..42eef19a18f1 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
@@ -127,7 +127,7 @@ sgiioc4_checkirq(ide_hwif_t * hwif) | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static u8 sgiioc4_INB(unsigned long); | 130 | static u8 sgiioc4_read_status(ide_hwif_t *); |
131 | 131 | ||
132 | static int | 132 | static int |
133 | sgiioc4_clearirq(ide_drive_t * drive) | 133 | sgiioc4_clearirq(ide_drive_t * drive) |
@@ -141,18 +141,19 @@ sgiioc4_clearirq(ide_drive_t * drive) | |||
141 | intr_reg = readl((void __iomem *)other_ir); | 141 | intr_reg = readl((void __iomem *)other_ir); |
142 | if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ | 142 | if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ |
143 | /* | 143 | /* |
144 | * Using sgiioc4_INB to read the Status register has a side | 144 | * Using sgiioc4_read_status to read the Status register has a |
145 | * effect of clearing the interrupt. The first read should | 145 | * side effect of clearing the interrupt. The first read should |
146 | * clear it if it is set. The second read should return | 146 | * clear it if it is set. The second read should return |
147 | * a "clear" status if it got cleared. If not, then spin | 147 | * a "clear" status if it got cleared. If not, then spin |
148 | * for a bit trying to clear it. | 148 | * for a bit trying to clear it. |
149 | */ | 149 | */ |
150 | u8 stat = sgiioc4_INB(io_ports->status_addr); | 150 | u8 stat = sgiioc4_read_status(hwif); |
151 | int count = 0; | 151 | int count = 0; |
152 | stat = sgiioc4_INB(io_ports->status_addr); | 152 | |
153 | stat = sgiioc4_read_status(hwif); | ||
153 | while ((stat & 0x80) && (count++ < 100)) { | 154 | while ((stat & 0x80) && (count++ < 100)) { |
154 | udelay(1); | 155 | udelay(1); |
155 | stat = sgiioc4_INB(io_ports->status_addr); | 156 | stat = sgiioc4_read_status(hwif); |
156 | } | 157 | } |
157 | 158 | ||
158 | if (intr_reg & 0x02) { | 159 | if (intr_reg & 0x02) { |
@@ -304,9 +305,9 @@ sgiioc4_dma_lost_irq(ide_drive_t * drive) | |||
304 | ide_dma_lost_irq(drive); | 305 | ide_dma_lost_irq(drive); |
305 | } | 306 | } |
306 | 307 | ||
307 | static u8 | 308 | static u8 sgiioc4_read_status(ide_hwif_t *hwif) |
308 | sgiioc4_INB(unsigned long port) | ||
309 | { | 309 | { |
310 | unsigned long port = hwif->io_ports.status_addr; | ||
310 | u8 reg = (u8) readb((void __iomem *) port); | 311 | u8 reg = (u8) readb((void __iomem *) port); |
311 | 312 | ||
312 | if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ | 313 | if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ |
@@ -549,6 +550,21 @@ static int sgiioc4_dma_setup(ide_drive_t *drive) | |||
549 | return 0; | 550 | return 0; |
550 | } | 551 | } |
551 | 552 | ||
553 | static const struct ide_tp_ops sgiioc4_tp_ops = { | ||
554 | .exec_command = ide_exec_command, | ||
555 | .read_status = sgiioc4_read_status, | ||
556 | .read_altstatus = ide_read_altstatus, | ||
557 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
558 | |||
559 | .set_irq = ide_set_irq, | ||
560 | |||
561 | .tf_load = ide_tf_load, | ||
562 | .tf_read = ide_tf_read, | ||
563 | |||
564 | .input_data = ide_input_data, | ||
565 | .output_data = ide_output_data, | ||
566 | }; | ||
567 | |||
552 | static const struct ide_port_ops sgiioc4_port_ops = { | 568 | static const struct ide_port_ops sgiioc4_port_ops = { |
553 | .set_dma_mode = sgiioc4_set_dma_mode, | 569 | .set_dma_mode = sgiioc4_set_dma_mode, |
554 | /* reset DMA engine, clear IRQs */ | 570 | /* reset DMA engine, clear IRQs */ |
@@ -571,6 +587,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = { | |||
571 | .name = DRV_NAME, | 587 | .name = DRV_NAME, |
572 | .chipset = ide_pci, | 588 | .chipset = ide_pci, |
573 | .init_dma = ide_dma_sgiioc4, | 589 | .init_dma = ide_dma_sgiioc4, |
590 | .tp_ops = &sgiioc4_tp_ops, | ||
574 | .port_ops = &sgiioc4_port_ops, | 591 | .port_ops = &sgiioc4_port_ops, |
575 | .dma_ops = &sgiioc4_dma_ops, | 592 | .dma_ops = &sgiioc4_dma_ops, |
576 | .host_flags = IDE_HFLAG_MMIO, | 593 | .host_flags = IDE_HFLAG_MMIO, |
@@ -583,10 +600,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
583 | unsigned long cmd_base, irqport; | 600 | unsigned long cmd_base, irqport; |
584 | unsigned long bar0, cmd_phys_base, ctl; | 601 | unsigned long bar0, cmd_phys_base, ctl; |
585 | void __iomem *virt_base; | 602 | void __iomem *virt_base; |
586 | ide_hwif_t *hwif; | 603 | struct ide_host *host; |
587 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 604 | hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; |
588 | hw_regs_t hw; | ||
589 | struct ide_port_info d = sgiioc4_port_info; | 605 | struct ide_port_info d = sgiioc4_port_info; |
606 | int rc; | ||
590 | 607 | ||
591 | /* Get the CmdBlk and CtrlBlk Base Registers */ | 608 | /* Get the CmdBlk and CtrlBlk Base Registers */ |
592 | bar0 = pci_resource_start(dev, 0); | 609 | bar0 = pci_resource_start(dev, 0); |
@@ -618,30 +635,26 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
618 | hw.chipset = ide_pci; | 635 | hw.chipset = ide_pci; |
619 | hw.dev = &dev->dev; | 636 | hw.dev = &dev->dev; |
620 | 637 | ||
621 | hwif = ide_find_port_slot(&d); | ||
622 | if (hwif == NULL) | ||
623 | goto err; | ||
624 | |||
625 | ide_init_port_hw(hwif, &hw); | ||
626 | |||
627 | /* The IOC4 uses MMIO rather than Port IO. */ | ||
628 | default_hwif_mmiops(hwif); | ||
629 | |||
630 | /* Initializing chipset IRQ Registers */ | 638 | /* Initializing chipset IRQ Registers */ |
631 | writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); | 639 | writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); |
632 | 640 | ||
633 | hwif->INB = &sgiioc4_INB; | 641 | host = ide_host_alloc(&d, hws); |
634 | 642 | if (host == NULL) { | |
635 | idx[0] = hwif->index; | 643 | rc = -ENOMEM; |
644 | goto err; | ||
645 | } | ||
636 | 646 | ||
637 | if (ide_device_add(idx, &d)) | 647 | rc = ide_host_register(host, &d, hws); |
638 | return -EIO; | 648 | if (rc) |
649 | goto err_free; | ||
639 | 650 | ||
640 | return 0; | 651 | return 0; |
652 | err_free: | ||
653 | ide_host_free(host); | ||
641 | err: | 654 | err: |
642 | release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); | 655 | release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); |
643 | iounmap(virt_base); | 656 | iounmap(virt_base); |
644 | return -ENOMEM; | 657 | return rc; |
645 | } | 658 | } |
646 | 659 | ||
647 | static unsigned int __devinit | 660 | static unsigned int __devinit |
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index 6e9d7655d89c..5965a35d94ae 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c | |||
@@ -334,7 +334,7 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive) | |||
334 | unsigned long addr = siimage_selreg(hwif, 1); | 334 | unsigned long addr = siimage_selreg(hwif, 1); |
335 | 335 | ||
336 | /* return 1 if INTR asserted */ | 336 | /* return 1 if INTR asserted */ |
337 | if (hwif->INB(hwif->dma_status) & 4) | 337 | if (inb(hwif->dma_base + ATA_DMA_STATUS) & 4) |
338 | return 1; | 338 | return 1; |
339 | 339 | ||
340 | /* return 1 if Device INTR asserted */ | 340 | /* return 1 if Device INTR asserted */ |
@@ -382,7 +382,7 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive) | |||
382 | } | 382 | } |
383 | 383 | ||
384 | /* return 1 if INTR asserted */ | 384 | /* return 1 if INTR asserted */ |
385 | if (readb((void __iomem *)hwif->dma_status) & 0x04) | 385 | if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4) |
386 | return 1; | 386 | return 1; |
387 | 387 | ||
388 | /* return 1 if Device INTR asserted */ | 388 | /* return 1 if Device INTR asserted */ |
@@ -601,7 +601,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) | |||
601 | * Fill in the basic hwif bits | 601 | * Fill in the basic hwif bits |
602 | */ | 602 | */ |
603 | hwif->host_flags |= IDE_HFLAG_MMIO; | 603 | hwif->host_flags |= IDE_HFLAG_MMIO; |
604 | default_hwif_mmiops(hwif); | 604 | |
605 | hwif->hwif_data = addr; | 605 | hwif->hwif_data = addr; |
606 | 606 | ||
607 | /* | 607 | /* |
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c index 6efbde297174..f82a6502c1b7 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/pci/sl82c105.c | |||
@@ -157,9 +157,9 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive) | |||
157 | * Was DMA enabled? If so, disable it - we're resetting the | 157 | * Was DMA enabled? If so, disable it - we're resetting the |
158 | * host. The IDE layer will be handling the drive for us. | 158 | * host. The IDE layer will be handling the drive for us. |
159 | */ | 159 | */ |
160 | dma_cmd = inb(hwif->dma_command); | 160 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
161 | if (dma_cmd & 1) { | 161 | if (dma_cmd & 1) { |
162 | outb(dma_cmd & ~1, hwif->dma_command); | 162 | outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); |
163 | printk("sl82c105: DMA was enabled\n"); | 163 | printk("sl82c105: DMA was enabled\n"); |
164 | } | 164 | } |
165 | 165 | ||
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c index 9b4b27a4c711..477e19790102 100644 --- a/drivers/ide/pci/tc86c001.c +++ b/drivers/ide/pci/tc86c001.c | |||
@@ -63,7 +63,7 @@ static int tc86c001_timer_expiry(ide_drive_t *drive) | |||
63 | ide_hwif_t *hwif = HWIF(drive); | 63 | ide_hwif_t *hwif = HWIF(drive); |
64 | ide_expiry_t *expiry = ide_get_hwifdata(hwif); | 64 | ide_expiry_t *expiry = ide_get_hwifdata(hwif); |
65 | ide_hwgroup_t *hwgroup = HWGROUP(drive); | 65 | ide_hwgroup_t *hwgroup = HWGROUP(drive); |
66 | u8 dma_stat = inb(hwif->dma_status); | 66 | u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
67 | 67 | ||
68 | /* Restore a higher level driver's expiry handler first. */ | 68 | /* Restore a higher level driver's expiry handler first. */ |
69 | hwgroup->expiry = expiry; | 69 | hwgroup->expiry = expiry; |
@@ -71,21 +71,24 @@ static int tc86c001_timer_expiry(ide_drive_t *drive) | |||
71 | if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */ | 71 | if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */ |
72 | unsigned long sc_base = hwif->config_data; | 72 | unsigned long sc_base = hwif->config_data; |
73 | unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); | 73 | unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); |
74 | u8 dma_cmd = inb(hwif->dma_command); | 74 | u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
75 | 75 | ||
76 | printk(KERN_WARNING "%s: DMA interrupt possibly stuck, " | 76 | printk(KERN_WARNING "%s: DMA interrupt possibly stuck, " |
77 | "attempting recovery...\n", drive->name); | 77 | "attempting recovery...\n", drive->name); |
78 | 78 | ||
79 | /* Stop DMA */ | 79 | /* Stop DMA */ |
80 | outb(dma_cmd & ~0x01, hwif->dma_command); | 80 | outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD); |
81 | 81 | ||
82 | /* Setup the dummy DMA transfer */ | 82 | /* Setup the dummy DMA transfer */ |
83 | outw(0, sc_base + 0x0a); /* Sector Count */ | 83 | outw(0, sc_base + 0x0a); /* Sector Count */ |
84 | outw(0, twcr_port); /* Transfer Word Count 1 or 2 */ | 84 | outw(0, twcr_port); /* Transfer Word Count 1 or 2 */ |
85 | 85 | ||
86 | /* Start the dummy DMA transfer */ | 86 | /* Start the dummy DMA transfer */ |
87 | outb(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */ | 87 | |
88 | outb(0x01, hwif->dma_command); /* set START_STOPBM */ | 88 | /* clear R_OR_WCTR for write */ |
89 | outb(0x00, hwif->dma_base + ATA_DMA_CMD); | ||
90 | /* set START_STOPBM */ | ||
91 | outb(0x01, hwif->dma_base + ATA_DMA_CMD); | ||
89 | 92 | ||
90 | /* | 93 | /* |
91 | * If an interrupt was pending, it should come thru shortly. | 94 | * If an interrupt was pending, it should come thru shortly. |
@@ -203,8 +206,7 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = { | |||
203 | .init_hwif = init_hwif_tc86c001, | 206 | .init_hwif = init_hwif_tc86c001, |
204 | .port_ops = &tc86c001_port_ops, | 207 | .port_ops = &tc86c001_port_ops, |
205 | .dma_ops = &tc86c001_dma_ops, | 208 | .dma_ops = &tc86c001_dma_ops, |
206 | .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | | 209 | .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD, |
207 | IDE_HFLAG_ABUSE_SET_DMA_MODE, | ||
208 | .pio_mask = ATA_PIO4, | 210 | .pio_mask = ATA_PIO4, |
209 | .mwdma_mask = ATA_MWDMA2, | 211 | .mwdma_mask = ATA_MWDMA2, |
210 | .udma_mask = ATA_UDMA4, | 212 | .udma_mask = ATA_UDMA4, |
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index e47384c70c40..09dc4803ef9d 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c | |||
@@ -425,7 +425,6 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = { | |||
425 | .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, | 425 | .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, |
426 | .port_ops = &via_port_ops, | 426 | .port_ops = &via_port_ops, |
427 | .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | | 427 | .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | |
428 | IDE_HFLAG_ABUSE_SET_DMA_MODE | | ||
429 | IDE_HFLAG_POST_SET_MODE | | 428 | IDE_HFLAG_POST_SET_MODE | |
430 | IDE_HFLAG_IO_32BIT, | 429 | IDE_HFLAG_IO_32BIT, |
431 | .pio_mask = ATA_PIO5, | 430 | .pio_mask = ATA_PIO5, |
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index 93fb9067c043..c521bf6e1bf2 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c | |||
@@ -48,6 +48,8 @@ | |||
48 | #include <asm/mediabay.h> | 48 | #include <asm/mediabay.h> |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #define DRV_NAME "ide-pmac" | ||
52 | |||
51 | #undef IDE_PMAC_DEBUG | 53 | #undef IDE_PMAC_DEBUG |
52 | 54 | ||
53 | #define DMA_WAIT_TIMEOUT 50 | 55 | #define DMA_WAIT_TIMEOUT 50 |
@@ -424,7 +426,9 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive); | |||
424 | static void | 426 | static void |
425 | pmac_ide_selectproc(ide_drive_t *drive) | 427 | pmac_ide_selectproc(ide_drive_t *drive) |
426 | { | 428 | { |
427 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 429 | ide_hwif_t *hwif = drive->hwif; |
430 | pmac_ide_hwif_t *pmif = | ||
431 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
428 | 432 | ||
429 | if (pmif == NULL) | 433 | if (pmif == NULL) |
430 | return; | 434 | return; |
@@ -444,7 +448,9 @@ pmac_ide_selectproc(ide_drive_t *drive) | |||
444 | static void | 448 | static void |
445 | pmac_ide_kauai_selectproc(ide_drive_t *drive) | 449 | pmac_ide_kauai_selectproc(ide_drive_t *drive) |
446 | { | 450 | { |
447 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 451 | ide_hwif_t *hwif = drive->hwif; |
452 | pmac_ide_hwif_t *pmif = | ||
453 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
448 | 454 | ||
449 | if (pmif == NULL) | 455 | if (pmif == NULL) |
450 | return; | 456 | return; |
@@ -465,7 +471,9 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive) | |||
465 | static void | 471 | static void |
466 | pmac_ide_do_update_timings(ide_drive_t *drive) | 472 | pmac_ide_do_update_timings(ide_drive_t *drive) |
467 | { | 473 | { |
468 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 474 | ide_hwif_t *hwif = drive->hwif; |
475 | pmac_ide_hwif_t *pmif = | ||
476 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
469 | 477 | ||
470 | if (pmif == NULL) | 478 | if (pmif == NULL) |
471 | return; | 479 | return; |
@@ -478,12 +486,26 @@ pmac_ide_do_update_timings(ide_drive_t *drive) | |||
478 | pmac_ide_selectproc(drive); | 486 | pmac_ide_selectproc(drive); |
479 | } | 487 | } |
480 | 488 | ||
481 | static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) | 489 | static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd) |
482 | { | 490 | { |
483 | u32 tmp; | 491 | writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); |
484 | 492 | (void)readl((void __iomem *)(hwif->io_ports.data_addr | |
485 | writeb(value, (void __iomem *) port); | 493 | + IDE_TIMING_CONFIG)); |
486 | tmp = readl((void __iomem *)(hwif->io_ports.data_addr | 494 | } |
495 | |||
496 | static void pmac_set_irq(ide_hwif_t *hwif, int on) | ||
497 | { | ||
498 | u8 ctl = ATA_DEVCTL_OBS; | ||
499 | |||
500 | if (on == 4) { /* hack for SRST */ | ||
501 | ctl |= 4; | ||
502 | on &= ~4; | ||
503 | } | ||
504 | |||
505 | ctl |= on ? 0 : 2; | ||
506 | |||
507 | writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); | ||
508 | (void)readl((void __iomem *)(hwif->io_ports.data_addr | ||
487 | + IDE_TIMING_CONFIG)); | 509 | + IDE_TIMING_CONFIG)); |
488 | } | 510 | } |
489 | 511 | ||
@@ -493,11 +515,13 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) | |||
493 | static void | 515 | static void |
494 | pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) | 516 | pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) |
495 | { | 517 | { |
518 | ide_hwif_t *hwif = drive->hwif; | ||
519 | pmac_ide_hwif_t *pmif = | ||
520 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
496 | struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); | 521 | struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); |
497 | u32 *timings, t; | 522 | u32 *timings, t; |
498 | unsigned accessTicks, recTicks; | 523 | unsigned accessTicks, recTicks; |
499 | unsigned accessTime, recTime; | 524 | unsigned accessTime, recTime; |
500 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | ||
501 | unsigned int cycle_time; | 525 | unsigned int cycle_time; |
502 | 526 | ||
503 | if (pmif == NULL) | 527 | if (pmif == NULL) |
@@ -778,9 +802,11 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, | |||
778 | 802 | ||
779 | static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) | 803 | static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) |
780 | { | 804 | { |
805 | ide_hwif_t *hwif = drive->hwif; | ||
806 | pmac_ide_hwif_t *pmif = | ||
807 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
781 | int unit = (drive->select.b.unit & 0x01); | 808 | int unit = (drive->select.b.unit & 0x01); |
782 | int ret = 0; | 809 | int ret = 0; |
783 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | ||
784 | u32 *timings, *timings2, tl[2]; | 810 | u32 *timings, *timings2, tl[2]; |
785 | 811 | ||
786 | timings = &pmif->timings[unit]; | 812 | timings = &pmif->timings[unit]; |
@@ -852,11 +878,8 @@ sanitize_timings(pmac_ide_hwif_t *pmif) | |||
852 | /* Suspend call back, should be called after the child devices | 878 | /* Suspend call back, should be called after the child devices |
853 | * have actually been suspended | 879 | * have actually been suspended |
854 | */ | 880 | */ |
855 | static int | 881 | static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) |
856 | pmac_ide_do_suspend(ide_hwif_t *hwif) | ||
857 | { | 882 | { |
858 | pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; | ||
859 | |||
860 | /* We clear the timings */ | 883 | /* We clear the timings */ |
861 | pmif->timings[0] = 0; | 884 | pmif->timings[0] = 0; |
862 | pmif->timings[1] = 0; | 885 | pmif->timings[1] = 0; |
@@ -884,11 +907,8 @@ pmac_ide_do_suspend(ide_hwif_t *hwif) | |||
884 | /* Resume call back, should be called before the child devices | 907 | /* Resume call back, should be called before the child devices |
885 | * are resumed | 908 | * are resumed |
886 | */ | 909 | */ |
887 | static int | 910 | static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) |
888 | pmac_ide_do_resume(ide_hwif_t *hwif) | ||
889 | { | 911 | { |
890 | pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; | ||
891 | |||
892 | /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ | 912 | /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ |
893 | if (!pmif->mediabay) { | 913 | if (!pmif->mediabay) { |
894 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); | 914 | ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); |
@@ -916,7 +936,8 @@ pmac_ide_do_resume(ide_hwif_t *hwif) | |||
916 | 936 | ||
917 | static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) | 937 | static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) |
918 | { | 938 | { |
919 | pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)ide_get_hwifdata(hwif); | 939 | pmac_ide_hwif_t *pmif = |
940 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
920 | struct device_node *np = pmif->node; | 941 | struct device_node *np = pmif->node; |
921 | const char *cable = of_get_property(np, "cable-type", NULL); | 942 | const char *cable = of_get_property(np, "cable-type", NULL); |
922 | 943 | ||
@@ -936,7 +957,40 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) | |||
936 | return ATA_CBL_PATA40; | 957 | return ATA_CBL_PATA40; |
937 | } | 958 | } |
938 | 959 | ||
960 | static void pmac_ide_init_dev(ide_drive_t *drive) | ||
961 | { | ||
962 | ide_hwif_t *hwif = drive->hwif; | ||
963 | pmac_ide_hwif_t *pmif = | ||
964 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
965 | |||
966 | if (pmif->mediabay) { | ||
967 | #ifdef CONFIG_PMAC_MEDIABAY | ||
968 | if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) { | ||
969 | drive->noprobe = 0; | ||
970 | return; | ||
971 | } | ||
972 | #endif | ||
973 | drive->noprobe = 1; | ||
974 | } | ||
975 | } | ||
976 | |||
977 | static const struct ide_tp_ops pmac_tp_ops = { | ||
978 | .exec_command = pmac_exec_command, | ||
979 | .read_status = ide_read_status, | ||
980 | .read_altstatus = ide_read_altstatus, | ||
981 | .read_sff_dma_status = ide_read_sff_dma_status, | ||
982 | |||
983 | .set_irq = pmac_set_irq, | ||
984 | |||
985 | .tf_load = ide_tf_load, | ||
986 | .tf_read = ide_tf_read, | ||
987 | |||
988 | .input_data = ide_input_data, | ||
989 | .output_data = ide_output_data, | ||
990 | }; | ||
991 | |||
939 | static const struct ide_port_ops pmac_ide_ata6_port_ops = { | 992 | static const struct ide_port_ops pmac_ide_ata6_port_ops = { |
993 | .init_dev = pmac_ide_init_dev, | ||
940 | .set_pio_mode = pmac_ide_set_pio_mode, | 994 | .set_pio_mode = pmac_ide_set_pio_mode, |
941 | .set_dma_mode = pmac_ide_set_dma_mode, | 995 | .set_dma_mode = pmac_ide_set_dma_mode, |
942 | .selectproc = pmac_ide_kauai_selectproc, | 996 | .selectproc = pmac_ide_kauai_selectproc, |
@@ -944,6 +998,7 @@ static const struct ide_port_ops pmac_ide_ata6_port_ops = { | |||
944 | }; | 998 | }; |
945 | 999 | ||
946 | static const struct ide_port_ops pmac_ide_ata4_port_ops = { | 1000 | static const struct ide_port_ops pmac_ide_ata4_port_ops = { |
1001 | .init_dev = pmac_ide_init_dev, | ||
947 | .set_pio_mode = pmac_ide_set_pio_mode, | 1002 | .set_pio_mode = pmac_ide_set_pio_mode, |
948 | .set_dma_mode = pmac_ide_set_dma_mode, | 1003 | .set_dma_mode = pmac_ide_set_dma_mode, |
949 | .selectproc = pmac_ide_selectproc, | 1004 | .selectproc = pmac_ide_selectproc, |
@@ -951,6 +1006,7 @@ static const struct ide_port_ops pmac_ide_ata4_port_ops = { | |||
951 | }; | 1006 | }; |
952 | 1007 | ||
953 | static const struct ide_port_ops pmac_ide_port_ops = { | 1008 | static const struct ide_port_ops pmac_ide_port_ops = { |
1009 | .init_dev = pmac_ide_init_dev, | ||
954 | .set_pio_mode = pmac_ide_set_pio_mode, | 1010 | .set_pio_mode = pmac_ide_set_pio_mode, |
955 | .set_dma_mode = pmac_ide_set_dma_mode, | 1011 | .set_dma_mode = pmac_ide_set_dma_mode, |
956 | .selectproc = pmac_ide_selectproc, | 1012 | .selectproc = pmac_ide_selectproc, |
@@ -959,12 +1015,14 @@ static const struct ide_port_ops pmac_ide_port_ops = { | |||
959 | static const struct ide_dma_ops pmac_dma_ops; | 1015 | static const struct ide_dma_ops pmac_dma_ops; |
960 | 1016 | ||
961 | static const struct ide_port_info pmac_port_info = { | 1017 | static const struct ide_port_info pmac_port_info = { |
1018 | .name = DRV_NAME, | ||
962 | .init_dma = pmac_ide_init_dma, | 1019 | .init_dma = pmac_ide_init_dma, |
963 | .chipset = ide_pmac, | 1020 | .chipset = ide_pmac, |
1021 | .tp_ops = &pmac_tp_ops, | ||
1022 | .port_ops = &pmac_ide_port_ops, | ||
964 | #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC | 1023 | #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC |
965 | .dma_ops = &pmac_dma_ops, | 1024 | .dma_ops = &pmac_dma_ops, |
966 | #endif | 1025 | #endif |
967 | .port_ops = &pmac_ide_port_ops, | ||
968 | .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | | 1026 | .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | |
969 | IDE_HFLAG_POST_SET_MODE | | 1027 | IDE_HFLAG_POST_SET_MODE | |
970 | IDE_HFLAG_MMIO | | 1028 | IDE_HFLAG_MMIO | |
@@ -977,13 +1035,15 @@ static const struct ide_port_info pmac_port_info = { | |||
977 | * Setup, register & probe an IDE channel driven by this driver, this is | 1035 | * Setup, register & probe an IDE channel driven by this driver, this is |
978 | * called by one of the 2 probe functions (macio or PCI). | 1036 | * called by one of the 2 probe functions (macio or PCI). |
979 | */ | 1037 | */ |
980 | static int __devinit | 1038 | static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) |
981 | pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw) | ||
982 | { | 1039 | { |
983 | struct device_node *np = pmif->node; | 1040 | struct device_node *np = pmif->node; |
984 | const int *bidp; | 1041 | const int *bidp; |
985 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 1042 | struct ide_host *host; |
1043 | ide_hwif_t *hwif; | ||
1044 | hw_regs_t *hws[] = { hw, NULL, NULL, NULL }; | ||
986 | struct ide_port_info d = pmac_port_info; | 1045 | struct ide_port_info d = pmac_port_info; |
1046 | int rc; | ||
987 | 1047 | ||
988 | pmif->broken_dma = pmif->broken_dma_warn = 0; | 1048 | pmif->broken_dma = pmif->broken_dma_warn = 0; |
989 | if (of_device_is_compatible(np, "shasta-ata")) { | 1049 | if (of_device_is_compatible(np, "shasta-ata")) { |
@@ -1054,31 +1114,16 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw) | |||
1054 | msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); | 1114 | msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); |
1055 | } | 1115 | } |
1056 | 1116 | ||
1057 | /* Setup MMIO ops */ | 1117 | printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " |
1058 | default_hwif_mmiops(hwif); | 1118 | "bus ID %d%s, irq %d\n", model_name[pmif->kind], |
1059 | hwif->OUTBSYNC = pmac_outbsync; | 1119 | pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, |
1120 | pmif->mediabay ? " (mediabay)" : "", hw->irq); | ||
1060 | 1121 | ||
1061 | hwif->hwif_data = pmif; | 1122 | rc = ide_host_add(&d, hws, &host); |
1062 | ide_init_port_hw(hwif, hw); | 1123 | if (rc) |
1124 | return rc; | ||
1063 | 1125 | ||
1064 | printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", | 1126 | hwif = host->ports[0]; |
1065 | hwif->index, model_name[pmif->kind], pmif->aapl_bus_id, | ||
1066 | pmif->mediabay ? " (mediabay)" : "", hwif->irq); | ||
1067 | |||
1068 | if (pmif->mediabay) { | ||
1069 | #ifdef CONFIG_PMAC_MEDIABAY | ||
1070 | if (check_media_bay_by_base(pmif->regbase, MB_CD)) { | ||
1071 | #else | ||
1072 | if (1) { | ||
1073 | #endif | ||
1074 | hwif->drives[0].noprobe = 1; | ||
1075 | hwif->drives[1].noprobe = 1; | ||
1076 | } | ||
1077 | } | ||
1078 | |||
1079 | idx[0] = hwif->index; | ||
1080 | |||
1081 | ide_device_add(idx, &d); | ||
1082 | 1127 | ||
1083 | return 0; | 1128 | return 0; |
1084 | } | 1129 | } |
@@ -1101,7 +1146,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1101 | { | 1146 | { |
1102 | void __iomem *base; | 1147 | void __iomem *base; |
1103 | unsigned long regbase; | 1148 | unsigned long regbase; |
1104 | ide_hwif_t *hwif; | ||
1105 | pmac_ide_hwif_t *pmif; | 1149 | pmac_ide_hwif_t *pmif; |
1106 | int irq, rc; | 1150 | int irq, rc; |
1107 | hw_regs_t hw; | 1151 | hw_regs_t hw; |
@@ -1110,14 +1154,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1110 | if (pmif == NULL) | 1154 | if (pmif == NULL) |
1111 | return -ENOMEM; | 1155 | return -ENOMEM; |
1112 | 1156 | ||
1113 | hwif = ide_find_port(); | ||
1114 | if (hwif == NULL) { | ||
1115 | printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n"); | ||
1116 | printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name); | ||
1117 | rc = -ENODEV; | ||
1118 | goto out_free_pmif; | ||
1119 | } | ||
1120 | |||
1121 | if (macio_resource_count(mdev) == 0) { | 1157 | if (macio_resource_count(mdev) == 0) { |
1122 | printk(KERN_WARNING "ide-pmac: no address for %s\n", | 1158 | printk(KERN_WARNING "ide-pmac: no address for %s\n", |
1123 | mdev->ofdev.node->full_name); | 1159 | mdev->ofdev.node->full_name); |
@@ -1164,7 +1200,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1164 | } else | 1200 | } else |
1165 | pmif->dma_regs = NULL; | 1201 | pmif->dma_regs = NULL; |
1166 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ | 1202 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ |
1167 | dev_set_drvdata(&mdev->ofdev.dev, hwif); | 1203 | dev_set_drvdata(&mdev->ofdev.dev, pmif); |
1168 | 1204 | ||
1169 | memset(&hw, 0, sizeof(hw)); | 1205 | memset(&hw, 0, sizeof(hw)); |
1170 | pmac_ide_init_ports(&hw, pmif->regbase); | 1206 | pmac_ide_init_ports(&hw, pmif->regbase); |
@@ -1172,7 +1208,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1172 | hw.dev = &mdev->bus->pdev->dev; | 1208 | hw.dev = &mdev->bus->pdev->dev; |
1173 | hw.parent = &mdev->ofdev.dev; | 1209 | hw.parent = &mdev->ofdev.dev; |
1174 | 1210 | ||
1175 | rc = pmac_ide_setup_device(pmif, hwif, &hw); | 1211 | rc = pmac_ide_setup_device(pmif, &hw); |
1176 | if (rc != 0) { | 1212 | if (rc != 0) { |
1177 | /* The inteface is released to the common IDE layer */ | 1213 | /* The inteface is released to the common IDE layer */ |
1178 | dev_set_drvdata(&mdev->ofdev.dev, NULL); | 1214 | dev_set_drvdata(&mdev->ofdev.dev, NULL); |
@@ -1195,12 +1231,13 @@ out_free_pmif: | |||
1195 | static int | 1231 | static int |
1196 | pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) | 1232 | pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) |
1197 | { | 1233 | { |
1198 | ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); | 1234 | pmac_ide_hwif_t *pmif = |
1199 | int rc = 0; | 1235 | (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); |
1236 | int rc = 0; | ||
1200 | 1237 | ||
1201 | if (mesg.event != mdev->ofdev.dev.power.power_state.event | 1238 | if (mesg.event != mdev->ofdev.dev.power.power_state.event |
1202 | && (mesg.event & PM_EVENT_SLEEP)) { | 1239 | && (mesg.event & PM_EVENT_SLEEP)) { |
1203 | rc = pmac_ide_do_suspend(hwif); | 1240 | rc = pmac_ide_do_suspend(pmif); |
1204 | if (rc == 0) | 1241 | if (rc == 0) |
1205 | mdev->ofdev.dev.power.power_state = mesg; | 1242 | mdev->ofdev.dev.power.power_state = mesg; |
1206 | } | 1243 | } |
@@ -1211,11 +1248,12 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) | |||
1211 | static int | 1248 | static int |
1212 | pmac_ide_macio_resume(struct macio_dev *mdev) | 1249 | pmac_ide_macio_resume(struct macio_dev *mdev) |
1213 | { | 1250 | { |
1214 | ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); | 1251 | pmac_ide_hwif_t *pmif = |
1215 | int rc = 0; | 1252 | (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); |
1216 | 1253 | int rc = 0; | |
1254 | |||
1217 | if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { | 1255 | if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { |
1218 | rc = pmac_ide_do_resume(hwif); | 1256 | rc = pmac_ide_do_resume(pmif); |
1219 | if (rc == 0) | 1257 | if (rc == 0) |
1220 | mdev->ofdev.dev.power.power_state = PMSG_ON; | 1258 | mdev->ofdev.dev.power.power_state = PMSG_ON; |
1221 | } | 1259 | } |
@@ -1229,7 +1267,6 @@ pmac_ide_macio_resume(struct macio_dev *mdev) | |||
1229 | static int __devinit | 1267 | static int __devinit |
1230 | pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) | 1268 | pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) |
1231 | { | 1269 | { |
1232 | ide_hwif_t *hwif; | ||
1233 | struct device_node *np; | 1270 | struct device_node *np; |
1234 | pmac_ide_hwif_t *pmif; | 1271 | pmac_ide_hwif_t *pmif; |
1235 | void __iomem *base; | 1272 | void __iomem *base; |
@@ -1247,14 +1284,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1247 | if (pmif == NULL) | 1284 | if (pmif == NULL) |
1248 | return -ENOMEM; | 1285 | return -ENOMEM; |
1249 | 1286 | ||
1250 | hwif = ide_find_port(); | ||
1251 | if (hwif == NULL) { | ||
1252 | printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n"); | ||
1253 | printk(KERN_ERR " %s\n", np->full_name); | ||
1254 | rc = -ENODEV; | ||
1255 | goto out_free_pmif; | ||
1256 | } | ||
1257 | |||
1258 | if (pci_enable_device(pdev)) { | 1287 | if (pci_enable_device(pdev)) { |
1259 | printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " | 1288 | printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " |
1260 | "%s\n", np->full_name); | 1289 | "%s\n", np->full_name); |
@@ -1284,14 +1313,14 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1284 | pmif->kauai_fcr = base; | 1313 | pmif->kauai_fcr = base; |
1285 | pmif->irq = pdev->irq; | 1314 | pmif->irq = pdev->irq; |
1286 | 1315 | ||
1287 | pci_set_drvdata(pdev, hwif); | 1316 | pci_set_drvdata(pdev, pmif); |
1288 | 1317 | ||
1289 | memset(&hw, 0, sizeof(hw)); | 1318 | memset(&hw, 0, sizeof(hw)); |
1290 | pmac_ide_init_ports(&hw, pmif->regbase); | 1319 | pmac_ide_init_ports(&hw, pmif->regbase); |
1291 | hw.irq = pdev->irq; | 1320 | hw.irq = pdev->irq; |
1292 | hw.dev = &pdev->dev; | 1321 | hw.dev = &pdev->dev; |
1293 | 1322 | ||
1294 | rc = pmac_ide_setup_device(pmif, hwif, &hw); | 1323 | rc = pmac_ide_setup_device(pmif, &hw); |
1295 | if (rc != 0) { | 1324 | if (rc != 0) { |
1296 | /* The inteface is released to the common IDE layer */ | 1325 | /* The inteface is released to the common IDE layer */ |
1297 | pci_set_drvdata(pdev, NULL); | 1326 | pci_set_drvdata(pdev, NULL); |
@@ -1310,12 +1339,12 @@ out_free_pmif: | |||
1310 | static int | 1339 | static int |
1311 | pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) | 1340 | pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) |
1312 | { | 1341 | { |
1313 | ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); | 1342 | pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev); |
1314 | int rc = 0; | 1343 | int rc = 0; |
1315 | 1344 | ||
1316 | if (mesg.event != pdev->dev.power.power_state.event | 1345 | if (mesg.event != pdev->dev.power.power_state.event |
1317 | && (mesg.event & PM_EVENT_SLEEP)) { | 1346 | && (mesg.event & PM_EVENT_SLEEP)) { |
1318 | rc = pmac_ide_do_suspend(hwif); | 1347 | rc = pmac_ide_do_suspend(pmif); |
1319 | if (rc == 0) | 1348 | if (rc == 0) |
1320 | pdev->dev.power.power_state = mesg; | 1349 | pdev->dev.power.power_state = mesg; |
1321 | } | 1350 | } |
@@ -1326,11 +1355,11 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) | |||
1326 | static int | 1355 | static int |
1327 | pmac_ide_pci_resume(struct pci_dev *pdev) | 1356 | pmac_ide_pci_resume(struct pci_dev *pdev) |
1328 | { | 1357 | { |
1329 | ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); | 1358 | pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev); |
1330 | int rc = 0; | 1359 | int rc = 0; |
1331 | 1360 | ||
1332 | if (pdev->dev.power.power_state.event != PM_EVENT_ON) { | 1361 | if (pdev->dev.power.power_state.event != PM_EVENT_ON) { |
1333 | rc = pmac_ide_do_resume(hwif); | 1362 | rc = pmac_ide_do_resume(pmif); |
1334 | if (rc == 0) | 1363 | if (rc == 0) |
1335 | pdev->dev.power.power_state = PMSG_ON; | 1364 | pdev->dev.power.power_state = PMSG_ON; |
1336 | } | 1365 | } |
@@ -1421,10 +1450,11 @@ out: | |||
1421 | static int | 1450 | static int |
1422 | pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) | 1451 | pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) |
1423 | { | 1452 | { |
1453 | ide_hwif_t *hwif = drive->hwif; | ||
1454 | pmac_ide_hwif_t *pmif = | ||
1455 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1424 | struct dbdma_cmd *table; | 1456 | struct dbdma_cmd *table; |
1425 | int i, count = 0; | 1457 | int i, count = 0; |
1426 | ide_hwif_t *hwif = HWIF(drive); | ||
1427 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; | ||
1428 | volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; | 1458 | volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; |
1429 | struct scatterlist *sg; | 1459 | struct scatterlist *sg; |
1430 | int wr = (rq_data_dir(rq) == WRITE); | 1460 | int wr = (rq_data_dir(rq) == WRITE); |
@@ -1520,7 +1550,8 @@ static int | |||
1520 | pmac_ide_dma_setup(ide_drive_t *drive) | 1550 | pmac_ide_dma_setup(ide_drive_t *drive) |
1521 | { | 1551 | { |
1522 | ide_hwif_t *hwif = HWIF(drive); | 1552 | ide_hwif_t *hwif = HWIF(drive); |
1523 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; | 1553 | pmac_ide_hwif_t *pmif = |
1554 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1524 | struct request *rq = HWGROUP(drive)->rq; | 1555 | struct request *rq = HWGROUP(drive)->rq; |
1525 | u8 unit = (drive->select.b.unit & 0x01); | 1556 | u8 unit = (drive->select.b.unit & 0x01); |
1526 | u8 ata4; | 1557 | u8 ata4; |
@@ -1560,7 +1591,9 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command) | |||
1560 | static void | 1591 | static void |
1561 | pmac_ide_dma_start(ide_drive_t *drive) | 1592 | pmac_ide_dma_start(ide_drive_t *drive) |
1562 | { | 1593 | { |
1563 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 1594 | ide_hwif_t *hwif = drive->hwif; |
1595 | pmac_ide_hwif_t *pmif = | ||
1596 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1564 | volatile struct dbdma_regs __iomem *dma; | 1597 | volatile struct dbdma_regs __iomem *dma; |
1565 | 1598 | ||
1566 | dma = pmif->dma_regs; | 1599 | dma = pmif->dma_regs; |
@@ -1576,7 +1609,9 @@ pmac_ide_dma_start(ide_drive_t *drive) | |||
1576 | static int | 1609 | static int |
1577 | pmac_ide_dma_end (ide_drive_t *drive) | 1610 | pmac_ide_dma_end (ide_drive_t *drive) |
1578 | { | 1611 | { |
1579 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 1612 | ide_hwif_t *hwif = drive->hwif; |
1613 | pmac_ide_hwif_t *pmif = | ||
1614 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1580 | volatile struct dbdma_regs __iomem *dma; | 1615 | volatile struct dbdma_regs __iomem *dma; |
1581 | u32 dstat; | 1616 | u32 dstat; |
1582 | 1617 | ||
@@ -1604,7 +1639,9 @@ pmac_ide_dma_end (ide_drive_t *drive) | |||
1604 | static int | 1639 | static int |
1605 | pmac_ide_dma_test_irq (ide_drive_t *drive) | 1640 | pmac_ide_dma_test_irq (ide_drive_t *drive) |
1606 | { | 1641 | { |
1607 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 1642 | ide_hwif_t *hwif = drive->hwif; |
1643 | pmac_ide_hwif_t *pmif = | ||
1644 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1608 | volatile struct dbdma_regs __iomem *dma; | 1645 | volatile struct dbdma_regs __iomem *dma; |
1609 | unsigned long status, timeout; | 1646 | unsigned long status, timeout; |
1610 | 1647 | ||
@@ -1664,7 +1701,9 @@ static void pmac_ide_dma_host_set(ide_drive_t *drive, int on) | |||
1664 | static void | 1701 | static void |
1665 | pmac_ide_dma_lost_irq (ide_drive_t *drive) | 1702 | pmac_ide_dma_lost_irq (ide_drive_t *drive) |
1666 | { | 1703 | { |
1667 | pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; | 1704 | ide_hwif_t *hwif = drive->hwif; |
1705 | pmac_ide_hwif_t *pmif = | ||
1706 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1668 | volatile struct dbdma_regs __iomem *dma; | 1707 | volatile struct dbdma_regs __iomem *dma; |
1669 | unsigned long status; | 1708 | unsigned long status; |
1670 | 1709 | ||
@@ -1694,7 +1733,8 @@ static const struct ide_dma_ops pmac_dma_ops = { | |||
1694 | static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, | 1733 | static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, |
1695 | const struct ide_port_info *d) | 1734 | const struct ide_port_info *d) |
1696 | { | 1735 | { |
1697 | pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; | 1736 | pmac_ide_hwif_t *pmif = |
1737 | (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); | ||
1698 | struct pci_dev *dev = to_pci_dev(hwif->dev); | 1738 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
1699 | 1739 | ||
1700 | /* We won't need pci_dev if we switch to generic consistent | 1740 | /* We won't need pci_dev if we switch to generic consistent |
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 65fc08b6b6d0..b15cad58dc81 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c | |||
@@ -73,15 +73,12 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name) | |||
73 | * @d: IDE port info | 73 | * @d: IDE port info |
74 | * | 74 | * |
75 | * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. | 75 | * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. |
76 | * Where a device has a partner that is already in DMA mode we check | ||
77 | * and enforce IDE simplex rules. | ||
78 | */ | 76 | */ |
79 | 77 | ||
80 | unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) | 78 | unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) |
81 | { | 79 | { |
82 | struct pci_dev *dev = to_pci_dev(hwif->dev); | 80 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
83 | unsigned long dma_base = 0; | 81 | unsigned long dma_base = 0; |
84 | u8 dma_stat = 0; | ||
85 | 82 | ||
86 | if (hwif->host_flags & IDE_HFLAG_MMIO) | 83 | if (hwif->host_flags & IDE_HFLAG_MMIO) |
87 | return hwif->dma_base; | 84 | return hwif->dma_base; |
@@ -102,11 +99,19 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
102 | if (hwif->channel) | 99 | if (hwif->channel) |
103 | dma_base += 8; | 100 | dma_base += 8; |
104 | 101 | ||
105 | if (d->host_flags & IDE_HFLAG_CS5520) | 102 | return dma_base; |
103 | } | ||
104 | EXPORT_SYMBOL_GPL(ide_pci_dma_base); | ||
105 | |||
106 | int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d) | ||
107 | { | ||
108 | u8 dma_stat; | ||
109 | |||
110 | if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520)) | ||
106 | goto out; | 111 | goto out; |
107 | 112 | ||
108 | if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) { | 113 | if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) { |
109 | ide_pci_clear_simplex(dma_base, d->name); | 114 | ide_pci_clear_simplex(hwif->dma_base, d->name); |
110 | goto out; | 115 | goto out; |
111 | } | 116 | } |
112 | 117 | ||
@@ -120,15 +125,15 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
120 | * we tune the drive then try to grab DMA ownership if we want to be | 125 | * we tune the drive then try to grab DMA ownership if we want to be |
121 | * the DMA end. This has to be become dynamic to handle hot-plug. | 126 | * the DMA end. This has to be become dynamic to handle hot-plug. |
122 | */ | 127 | */ |
123 | dma_stat = hwif->INB(dma_base + 2); | 128 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
124 | if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) { | 129 | if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) { |
125 | printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name); | 130 | printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name); |
126 | dma_base = 0; | 131 | return -1; |
127 | } | 132 | } |
128 | out: | 133 | out: |
129 | return dma_base; | 134 | return 0; |
130 | } | 135 | } |
131 | EXPORT_SYMBOL_GPL(ide_pci_dma_base); | 136 | EXPORT_SYMBOL_GPL(ide_pci_check_simplex); |
132 | 137 | ||
133 | /* | 138 | /* |
134 | * Set up BM-DMA capability (PnP BIOS should have done this) | 139 | * Set up BM-DMA capability (PnP BIOS should have done this) |
@@ -284,33 +289,31 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info * | |||
284 | } | 289 | } |
285 | 290 | ||
286 | /** | 291 | /** |
287 | * ide_hwif_configure - configure an IDE interface | 292 | * ide_hw_configure - configure a hw_regs_t instance |
288 | * @dev: PCI device holding interface | 293 | * @dev: PCI device holding interface |
289 | * @d: IDE port info | 294 | * @d: IDE port info |
290 | * @port: port number | 295 | * @port: port number |
291 | * @irq: PCI IRQ | 296 | * @irq: PCI IRQ |
297 | * @hw: hw_regs_t instance corresponding to this port | ||
292 | * | 298 | * |
293 | * Perform the initial set up for the hardware interface structure. This | 299 | * Perform the initial set up for the hardware interface structure. This |
294 | * is done per interface port rather than per PCI device. There may be | 300 | * is done per interface port rather than per PCI device. There may be |
295 | * more than one port per device. | 301 | * more than one port per device. |
296 | * | 302 | * |
297 | * Returns the new hardware interface structure, or NULL on a failure | 303 | * Returns zero on success or an error code. |
298 | */ | 304 | */ |
299 | 305 | ||
300 | static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, | 306 | static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, |
301 | const struct ide_port_info *d, | 307 | unsigned int port, int irq, hw_regs_t *hw) |
302 | unsigned int port, int irq) | ||
303 | { | 308 | { |
304 | unsigned long ctl = 0, base = 0; | 309 | unsigned long ctl = 0, base = 0; |
305 | ide_hwif_t *hwif; | ||
306 | struct hw_regs_s hw; | ||
307 | 310 | ||
308 | if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { | 311 | if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { |
309 | if (ide_pci_check_iomem(dev, d, 2 * port) || | 312 | if (ide_pci_check_iomem(dev, d, 2 * port) || |
310 | ide_pci_check_iomem(dev, d, 2 * port + 1)) { | 313 | ide_pci_check_iomem(dev, d, 2 * port + 1)) { |
311 | printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported " | 314 | printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported " |
312 | "as MEM for port %d!\n", d->name, port); | 315 | "as MEM for port %d!\n", d->name, port); |
313 | return NULL; | 316 | return -EINVAL; |
314 | } | 317 | } |
315 | 318 | ||
316 | ctl = pci_resource_start(dev, 2*port+1); | 319 | ctl = pci_resource_start(dev, 2*port+1); |
@@ -324,22 +327,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, | |||
324 | if (!base || !ctl) { | 327 | if (!base || !ctl) { |
325 | printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", | 328 | printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", |
326 | d->name, port); | 329 | d->name, port); |
327 | return NULL; | 330 | return -EINVAL; |
328 | } | 331 | } |
329 | 332 | ||
330 | hwif = ide_find_port_slot(d); | 333 | memset(hw, 0, sizeof(*hw)); |
331 | if (hwif == NULL) | 334 | hw->irq = irq; |
332 | return NULL; | 335 | hw->dev = &dev->dev; |
333 | 336 | hw->chipset = d->chipset ? d->chipset : ide_pci; | |
334 | memset(&hw, 0, sizeof(hw)); | 337 | ide_std_init_ports(hw, base, ctl | 2); |
335 | hw.irq = irq; | ||
336 | hw.dev = &dev->dev; | ||
337 | hw.chipset = d->chipset ? d->chipset : ide_pci; | ||
338 | ide_std_init_ports(&hw, base, ctl | 2); | ||
339 | |||
340 | ide_init_port_hw(hwif, &hw); | ||
341 | 338 | ||
342 | return hwif; | 339 | return 0; |
343 | } | 340 | } |
344 | 341 | ||
345 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | 342 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI |
@@ -362,7 +359,15 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
362 | (dev->class & 0x80))) { | 359 | (dev->class & 0x80))) { |
363 | unsigned long base = ide_pci_dma_base(hwif, d); | 360 | unsigned long base = ide_pci_dma_base(hwif, d); |
364 | 361 | ||
365 | if (base == 0 || ide_pci_set_master(dev, d->name) < 0) | 362 | if (base == 0) |
363 | return -1; | ||
364 | |||
365 | hwif->dma_base = base; | ||
366 | |||
367 | if (ide_pci_check_simplex(hwif, d) < 0) | ||
368 | return -1; | ||
369 | |||
370 | if (ide_pci_set_master(dev, d->name) < 0) | ||
366 | return -1; | 371 | return -1; |
367 | 372 | ||
368 | if (hwif->host_flags & IDE_HFLAG_MMIO) | 373 | if (hwif->host_flags & IDE_HFLAG_MMIO) |
@@ -376,7 +381,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
376 | if (ide_allocate_dma_engine(hwif)) | 381 | if (ide_allocate_dma_engine(hwif)) |
377 | return -1; | 382 | return -1; |
378 | 383 | ||
379 | ide_setup_dma(hwif, base); | 384 | hwif->dma_ops = &sff_dma_ops; |
380 | } | 385 | } |
381 | 386 | ||
382 | return 0; | 387 | return 0; |
@@ -429,7 +434,8 @@ out: | |||
429 | * @dev: PCI device | 434 | * @dev: PCI device |
430 | * @d: IDE port info | 435 | * @d: IDE port info |
431 | * @pciirq: IRQ line | 436 | * @pciirq: IRQ line |
432 | * @idx: ATA index table to update | 437 | * @hw: hw_regs_t instances corresponding to this PCI IDE device |
438 | * @hws: hw_regs_t pointers table to update | ||
433 | * | 439 | * |
434 | * Scan the interfaces attached to this device and do any | 440 | * Scan the interfaces attached to this device and do any |
435 | * necessary per port setup. Attach the devices and ask the | 441 | * necessary per port setup. Attach the devices and ask the |
@@ -440,10 +446,10 @@ out: | |||
440 | * where the chipset setup is not the default PCI IDE one. | 446 | * where the chipset setup is not the default PCI IDE one. |
441 | */ | 447 | */ |
442 | 448 | ||
443 | void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int pciirq, u8 *idx) | 449 | void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, |
450 | int pciirq, hw_regs_t *hw, hw_regs_t **hws) | ||
444 | { | 451 | { |
445 | int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; | 452 | int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; |
446 | ide_hwif_t *hwif; | ||
447 | u8 tmp; | 453 | u8 tmp; |
448 | 454 | ||
449 | /* | 455 | /* |
@@ -459,11 +465,10 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int | |||
459 | continue; /* port not enabled */ | 465 | continue; /* port not enabled */ |
460 | } | 466 | } |
461 | 467 | ||
462 | hwif = ide_hwif_configure(dev, d, port, pciirq); | 468 | if (ide_hw_configure(dev, d, port, pciirq, hw + port)) |
463 | if (hwif == NULL) | ||
464 | continue; | 469 | continue; |
465 | 470 | ||
466 | *(idx + port) = hwif->index; | 471 | *(hws + port) = hw + port; |
467 | } | 472 | } |
468 | } | 473 | } |
469 | EXPORT_SYMBOL_GPL(ide_pci_setup_ports); | 474 | EXPORT_SYMBOL_GPL(ide_pci_setup_ports); |
@@ -480,7 +485,7 @@ EXPORT_SYMBOL_GPL(ide_pci_setup_ports); | |||
480 | */ | 485 | */ |
481 | static int do_ide_setup_pci_device(struct pci_dev *dev, | 486 | static int do_ide_setup_pci_device(struct pci_dev *dev, |
482 | const struct ide_port_info *d, | 487 | const struct ide_port_info *d, |
483 | u8 *idx, u8 noisy) | 488 | u8 noisy) |
484 | { | 489 | { |
485 | int tried_config = 0; | 490 | int tried_config = 0; |
486 | int pciirq, ret; | 491 | int pciirq, ret; |
@@ -529,22 +534,24 @@ static int do_ide_setup_pci_device(struct pci_dev *dev, | |||
529 | d->name, pciirq); | 534 | d->name, pciirq); |
530 | } | 535 | } |
531 | 536 | ||
532 | /* FIXME: silent failure can happen */ | 537 | ret = pciirq; |
533 | |||
534 | ide_pci_setup_ports(dev, d, pciirq, idx); | ||
535 | out: | 538 | out: |
536 | return ret; | 539 | return ret; |
537 | } | 540 | } |
538 | 541 | ||
539 | int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d) | 542 | int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d) |
540 | { | 543 | { |
541 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 544 | hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; |
542 | int ret; | 545 | int ret; |
543 | 546 | ||
544 | ret = do_ide_setup_pci_device(dev, d, &idx[0], 1); | 547 | ret = do_ide_setup_pci_device(dev, d, 1); |
548 | |||
549 | if (ret >= 0) { | ||
550 | /* FIXME: silent failure can happen */ | ||
551 | ide_pci_setup_ports(dev, d, ret, &hw[0], &hws[0]); | ||
545 | 552 | ||
546 | if (ret >= 0) | 553 | ret = ide_host_add(d, hws, NULL); |
547 | ide_device_add(idx, d); | 554 | } |
548 | 555 | ||
549 | return ret; | 556 | return ret; |
550 | } | 557 | } |
@@ -555,19 +562,23 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2, | |||
555 | { | 562 | { |
556 | struct pci_dev *pdev[] = { dev1, dev2 }; | 563 | struct pci_dev *pdev[] = { dev1, dev2 }; |
557 | int ret, i; | 564 | int ret, i; |
558 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 565 | hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; |
559 | 566 | ||
560 | for (i = 0; i < 2; i++) { | 567 | for (i = 0; i < 2; i++) { |
561 | ret = do_ide_setup_pci_device(pdev[i], d, &idx[i*2], !i); | 568 | ret = do_ide_setup_pci_device(pdev[i], d, !i); |
569 | |||
562 | /* | 570 | /* |
563 | * FIXME: Mom, mom, they stole me the helper function to undo | 571 | * FIXME: Mom, mom, they stole me the helper function to undo |
564 | * do_ide_setup_pci_device() on the first device! | 572 | * do_ide_setup_pci_device() on the first device! |
565 | */ | 573 | */ |
566 | if (ret < 0) | 574 | if (ret < 0) |
567 | goto out; | 575 | goto out; |
576 | |||
577 | /* FIXME: silent failure can happen */ | ||
578 | ide_pci_setup_ports(pdev[i], d, ret, &hw[i*2], &hws[i*2]); | ||
568 | } | 579 | } |
569 | 580 | ||
570 | ide_device_add(idx, d); | 581 | ret = ide_host_add(d, hws, NULL); |
571 | out: | 582 | out: |
572 | return ret; | 583 | return ret; |
573 | } | 584 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 0792d930c481..7a64aa9b51b6 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
646 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 646 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
647 | 647 | ||
648 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 648 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
649 | cpu = next_cpu(pool->last_cpu, cpu_online_map); | 649 | cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); |
650 | if (cpu == NR_CPUS) | 650 | if (cpu >= nr_cpu_ids) |
651 | cpu = first_cpu(cpu_online_map); | 651 | cpu = first_cpu(cpu_online_map); |
652 | pool->last_cpu = cpu; | 652 | pool->last_cpu = cpu; |
653 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | 653 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); |
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c index 94e444b4ee15..b12b7ee4b6aa 100644 --- a/drivers/input/keyboard/tosakbd.c +++ b/drivers/input/keyboard/tosakbd.c | |||
@@ -215,8 +215,6 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state) | |||
215 | unsigned long flags; | 215 | unsigned long flags; |
216 | 216 | ||
217 | spin_lock_irqsave(&tosakbd->lock, flags); | 217 | spin_lock_irqsave(&tosakbd->lock, flags); |
218 | PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT); | ||
219 | PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT); | ||
220 | tosakbd->suspended = 1; | 218 | tosakbd->suspended = 1; |
221 | spin_unlock_irqrestore(&tosakbd->lock, flags); | 219 | spin_unlock_irqrestore(&tosakbd->lock, flags); |
222 | 220 | ||
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 260bade0a5ec..9f93c29fed35 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -5,6 +5,10 @@ | |||
5 | menu "Multifunction device drivers" | 5 | menu "Multifunction device drivers" |
6 | depends on HAS_IOMEM | 6 | depends on HAS_IOMEM |
7 | 7 | ||
8 | config MFD_CORE | ||
9 | tristate | ||
10 | default n | ||
11 | |||
8 | config MFD_SM501 | 12 | config MFD_SM501 |
9 | tristate "Support for Silicon Motion SM501" | 13 | tristate "Support for Silicon Motion SM501" |
10 | ---help--- | 14 | ---help--- |
@@ -38,6 +42,13 @@ config HTC_PASIC3 | |||
38 | HTC Magician devices, respectively. Actual functionality is | 42 | HTC Magician devices, respectively. Actual functionality is |
39 | handled by the leds-pasic3 and ds1wm drivers. | 43 | handled by the leds-pasic3 and ds1wm drivers. |
40 | 44 | ||
45 | config MFD_TC6393XB | ||
46 | bool "Support Toshiba TC6393XB" | ||
47 | depends on HAVE_GPIO_LIB | ||
48 | select MFD_CORE | ||
49 | help | ||
50 | Support for Toshiba Mobile IO Controller TC6393XB | ||
51 | |||
41 | endmenu | 52 | endmenu |
42 | 53 | ||
43 | menu "Multimedia Capabilities Port drivers" | 54 | menu "Multimedia Capabilities Port drivers" |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index eef4e26807df..33daa2f45dd8 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -8,6 +8,10 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o | |||
8 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o | 8 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o |
9 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o | 9 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o |
10 | 10 | ||
11 | obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o | ||
12 | |||
13 | obj-$(CONFIG_MFD_CORE) += mfd-core.o | ||
14 | |||
11 | obj-$(CONFIG_MCP) += mcp-core.o | 15 | obj-$(CONFIG_MCP) += mcp-core.o |
12 | obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o | 16 | obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o |
13 | obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o | 17 | obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c new file mode 100644 index 000000000000..d7d88ce053a6 --- /dev/null +++ b/drivers/mfd/mfd-core.c | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * drivers/mfd/mfd-core.c | ||
3 | * | ||
4 | * core MFD support | ||
5 | * Copyright (c) 2006 Ian Molton | ||
6 | * Copyright (c) 2007,2008 Dmitry Baryshkov | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/mfd/core.h> | ||
17 | |||
18 | static int mfd_add_device(struct platform_device *parent, | ||
19 | const struct mfd_cell *cell, | ||
20 | struct resource *mem_base, | ||
21 | int irq_base) | ||
22 | { | ||
23 | struct resource res[cell->num_resources]; | ||
24 | struct platform_device *pdev; | ||
25 | int ret = -ENOMEM; | ||
26 | int r; | ||
27 | |||
28 | pdev = platform_device_alloc(cell->name, parent->id); | ||
29 | if (!pdev) | ||
30 | goto fail_alloc; | ||
31 | |||
32 | pdev->dev.parent = &parent->dev; | ||
33 | |||
34 | ret = platform_device_add_data(pdev, | ||
35 | cell, sizeof(struct mfd_cell)); | ||
36 | if (ret) | ||
37 | goto fail_device; | ||
38 | |||
39 | memzero(res, sizeof(res)); | ||
40 | for (r = 0; r < cell->num_resources; r++) { | ||
41 | res[r].name = cell->resources[r].name; | ||
42 | res[r].flags = cell->resources[r].flags; | ||
43 | |||
44 | /* Find out base to use */ | ||
45 | if (cell->resources[r].flags & IORESOURCE_MEM) { | ||
46 | res[r].parent = mem_base; | ||
47 | res[r].start = mem_base->start + | ||
48 | cell->resources[r].start; | ||
49 | res[r].end = mem_base->start + | ||
50 | cell->resources[r].end; | ||
51 | } else if (cell->resources[r].flags & IORESOURCE_IRQ) { | ||
52 | res[r].start = irq_base + | ||
53 | cell->resources[r].start; | ||
54 | res[r].end = irq_base + | ||
55 | cell->resources[r].end; | ||
56 | } else { | ||
57 | res[r].parent = cell->resources[r].parent; | ||
58 | res[r].start = cell->resources[r].start; | ||
59 | res[r].end = cell->resources[r].end; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | platform_device_add_resources(pdev, res, cell->num_resources); | ||
64 | |||
65 | ret = platform_device_add(pdev); | ||
66 | if (ret) | ||
67 | goto fail_device; | ||
68 | |||
69 | return 0; | ||
70 | |||
71 | /* platform_device_del(pdev); */ | ||
72 | fail_device: | ||
73 | platform_device_put(pdev); | ||
74 | fail_alloc: | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | int mfd_add_devices( | ||
79 | struct platform_device *parent, | ||
80 | const struct mfd_cell *cells, int n_devs, | ||
81 | struct resource *mem_base, | ||
82 | int irq_base) | ||
83 | { | ||
84 | int i; | ||
85 | int ret = 0; | ||
86 | |||
87 | for (i = 0; i < n_devs; i++) { | ||
88 | ret = mfd_add_device(parent, cells + i, mem_base, irq_base); | ||
89 | if (ret) | ||
90 | break; | ||
91 | } | ||
92 | |||
93 | if (ret) | ||
94 | mfd_remove_devices(parent); | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | EXPORT_SYMBOL(mfd_add_devices); | ||
99 | |||
100 | static int mfd_remove_devices_fn(struct device *dev, void *unused) | ||
101 | { | ||
102 | platform_device_unregister( | ||
103 | container_of(dev, struct platform_device, dev)); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | void mfd_remove_devices(struct platform_device *parent) | ||
108 | { | ||
109 | device_for_each_child(&parent->dev, NULL, mfd_remove_devices_fn); | ||
110 | } | ||
111 | EXPORT_SYMBOL(mfd_remove_devices); | ||
112 | |||
113 | MODULE_LICENSE("GPL"); | ||
114 | MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov"); | ||
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c new file mode 100644 index 000000000000..2d87501b6fd4 --- /dev/null +++ b/drivers/mfd/tc6393xb.c | |||
@@ -0,0 +1,600 @@ | |||
1 | /* | ||
2 | * Toshiba TC6393XB SoC support | ||
3 | * | ||
4 | * Copyright(c) 2005-2006 Chris Humbert | ||
5 | * Copyright(c) 2005 Dirk Opfer | ||
6 | * Copyright(c) 2005 Ian Molton <spyro@f2s.com> | ||
7 | * Copyright(c) 2007 Dmitry Baryshkov | ||
8 | * | ||
9 | * Based on code written by Sharp/Lineo for 2.4 kernels | ||
10 | * Based on locomo.c | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/fb.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/mfd/core.h> | ||
25 | #include <linux/mfd/tmio.h> | ||
26 | #include <linux/mfd/tc6393xb.h> | ||
27 | #include <linux/gpio.h> | ||
28 | |||
29 | #define SCR_REVID 0x08 /* b Revision ID */ | ||
30 | #define SCR_ISR 0x50 /* b Interrupt Status */ | ||
31 | #define SCR_IMR 0x52 /* b Interrupt Mask */ | ||
32 | #define SCR_IRR 0x54 /* b Interrupt Routing */ | ||
33 | #define SCR_GPER 0x60 /* w GP Enable */ | ||
34 | #define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */ | ||
35 | #define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */ | ||
36 | #define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */ | ||
37 | #define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */ | ||
38 | #define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */ | ||
39 | #define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */ | ||
40 | #define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */ | ||
41 | #define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */ | ||
42 | #define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */ | ||
43 | #define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */ | ||
44 | #define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */ | ||
45 | #define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */ | ||
46 | #define SCR_CCR 0x98 /* w Clock Control */ | ||
47 | #define SCR_PLL2CR 0x9a /* w PLL2 Control */ | ||
48 | #define SCR_PLL1CR 0x9c /* l PLL1 Control */ | ||
49 | #define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */ | ||
50 | #define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */ | ||
51 | #define SCR_FER 0xe0 /* b Function Enable */ | ||
52 | #define SCR_MCR 0xe4 /* w Mode Control */ | ||
53 | #define SCR_CONFIG 0xfc /* b Configuration Control */ | ||
54 | #define SCR_DEBUG 0xff /* b Debug */ | ||
55 | |||
56 | #define SCR_CCR_CK32K BIT(0) | ||
57 | #define SCR_CCR_USBCK BIT(1) | ||
58 | #define SCR_CCR_UNK1 BIT(4) | ||
59 | #define SCR_CCR_MCLK_MASK (7 << 8) | ||
60 | #define SCR_CCR_MCLK_OFF (0 << 8) | ||
61 | #define SCR_CCR_MCLK_12 (1 << 8) | ||
62 | #define SCR_CCR_MCLK_24 (2 << 8) | ||
63 | #define SCR_CCR_MCLK_48 (3 << 8) | ||
64 | #define SCR_CCR_HCLK_MASK (3 << 12) | ||
65 | #define SCR_CCR_HCLK_24 (0 << 12) | ||
66 | #define SCR_CCR_HCLK_48 (1 << 12) | ||
67 | |||
68 | #define SCR_FER_USBEN BIT(0) /* USB host enable */ | ||
69 | #define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */ | ||
70 | #define SCR_FER_SLCDEN BIT(2) /* SLCD enable */ | ||
71 | |||
72 | #define SCR_MCR_RDY_MASK (3 << 0) | ||
73 | #define SCR_MCR_RDY_OPENDRAIN (0 << 0) | ||
74 | #define SCR_MCR_RDY_TRISTATE (1 << 0) | ||
75 | #define SCR_MCR_RDY_PUSHPULL (2 << 0) | ||
76 | #define SCR_MCR_RDY_UNK BIT(2) | ||
77 | #define SCR_MCR_RDY_EN BIT(3) | ||
78 | #define SCR_MCR_INT_MASK (3 << 4) | ||
79 | #define SCR_MCR_INT_OPENDRAIN (0 << 4) | ||
80 | #define SCR_MCR_INT_TRISTATE (1 << 4) | ||
81 | #define SCR_MCR_INT_PUSHPULL (2 << 4) | ||
82 | #define SCR_MCR_INT_UNK BIT(6) | ||
83 | #define SCR_MCR_INT_EN BIT(7) | ||
84 | /* bits 8 - 16 are unknown */ | ||
85 | |||
86 | #define TC_GPIO_BIT(i) (1 << (i & 0x7)) | ||
87 | |||
88 | /*--------------------------------------------------------------------------*/ | ||
89 | |||
90 | struct tc6393xb { | ||
91 | void __iomem *scr; | ||
92 | |||
93 | struct gpio_chip gpio; | ||
94 | |||
95 | struct clk *clk; /* 3,6 Mhz */ | ||
96 | |||
97 | spinlock_t lock; /* protects RMW cycles */ | ||
98 | |||
99 | struct { | ||
100 | u8 fer; | ||
101 | u16 ccr; | ||
102 | u8 gpi_bcr[3]; | ||
103 | u8 gpo_dsr[3]; | ||
104 | u8 gpo_doecr[3]; | ||
105 | } suspend_state; | ||
106 | |||
107 | struct resource rscr; | ||
108 | struct resource *iomem; | ||
109 | int irq; | ||
110 | int irq_base; | ||
111 | }; | ||
112 | |||
113 | enum { | ||
114 | TC6393XB_CELL_NAND, | ||
115 | }; | ||
116 | |||
117 | /*--------------------------------------------------------------------------*/ | ||
118 | |||
119 | static int tc6393xb_nand_enable(struct platform_device *nand) | ||
120 | { | ||
121 | struct platform_device *dev = to_platform_device(nand->dev.parent); | ||
122 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | ||
123 | unsigned long flags; | ||
124 | |||
125 | spin_lock_irqsave(&tc6393xb->lock, flags); | ||
126 | |||
127 | /* SMD buffer on */ | ||
128 | dev_dbg(&dev->dev, "SMD buffer on\n"); | ||
129 | iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1)); | ||
130 | |||
131 | spin_unlock_irqrestore(&tc6393xb->lock, flags); | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static struct resource __devinitdata tc6393xb_nand_resources[] = { | ||
137 | { | ||
138 | .name = TMIO_NAND_CONFIG, | ||
139 | .start = 0x0100, | ||
140 | .end = 0x01ff, | ||
141 | .flags = IORESOURCE_MEM, | ||
142 | }, | ||
143 | { | ||
144 | .name = TMIO_NAND_CONTROL, | ||
145 | .start = 0x1000, | ||
146 | .end = 0x1007, | ||
147 | .flags = IORESOURCE_MEM, | ||
148 | }, | ||
149 | { | ||
150 | .name = TMIO_NAND_IRQ, | ||
151 | .start = IRQ_TC6393_NAND, | ||
152 | .end = IRQ_TC6393_NAND, | ||
153 | .flags = IORESOURCE_IRQ, | ||
154 | }, | ||
155 | }; | ||
156 | |||
157 | static struct mfd_cell __devinitdata tc6393xb_cells[] = { | ||
158 | [TC6393XB_CELL_NAND] = { | ||
159 | .name = "tmio-nand", | ||
160 | .enable = tc6393xb_nand_enable, | ||
161 | .num_resources = ARRAY_SIZE(tc6393xb_nand_resources), | ||
162 | .resources = tc6393xb_nand_resources, | ||
163 | }, | ||
164 | }; | ||
165 | |||
166 | /*--------------------------------------------------------------------------*/ | ||
167 | |||
168 | static int tc6393xb_gpio_get(struct gpio_chip *chip, | ||
169 | unsigned offset) | ||
170 | { | ||
171 | struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); | ||
172 | |||
173 | /* XXX: does dsr also represent inputs? */ | ||
174 | return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)) | ||
175 | & TC_GPIO_BIT(offset); | ||
176 | } | ||
177 | |||
178 | static void __tc6393xb_gpio_set(struct gpio_chip *chip, | ||
179 | unsigned offset, int value) | ||
180 | { | ||
181 | struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); | ||
182 | u8 dsr; | ||
183 | |||
184 | dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)); | ||
185 | if (value) | ||
186 | dsr |= TC_GPIO_BIT(offset); | ||
187 | else | ||
188 | dsr &= ~TC_GPIO_BIT(offset); | ||
189 | |||
190 | iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8)); | ||
191 | } | ||
192 | |||
193 | static void tc6393xb_gpio_set(struct gpio_chip *chip, | ||
194 | unsigned offset, int value) | ||
195 | { | ||
196 | struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); | ||
197 | unsigned long flags; | ||
198 | |||
199 | spin_lock_irqsave(&tc6393xb->lock, flags); | ||
200 | |||
201 | __tc6393xb_gpio_set(chip, offset, value); | ||
202 | |||
203 | spin_unlock_irqrestore(&tc6393xb->lock, flags); | ||
204 | } | ||
205 | |||
206 | static int tc6393xb_gpio_direction_input(struct gpio_chip *chip, | ||
207 | unsigned offset) | ||
208 | { | ||
209 | struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); | ||
210 | unsigned long flags; | ||
211 | u8 doecr; | ||
212 | |||
213 | spin_lock_irqsave(&tc6393xb->lock, flags); | ||
214 | |||
215 | doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); | ||
216 | doecr &= ~TC_GPIO_BIT(offset); | ||
217 | iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); | ||
218 | |||
219 | spin_unlock_irqrestore(&tc6393xb->lock, flags); | ||
220 | |||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static int tc6393xb_gpio_direction_output(struct gpio_chip *chip, | ||
225 | unsigned offset, int value) | ||
226 | { | ||
227 | struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); | ||
228 | unsigned long flags; | ||
229 | u8 doecr; | ||
230 | |||
231 | spin_lock_irqsave(&tc6393xb->lock, flags); | ||
232 | |||
233 | __tc6393xb_gpio_set(chip, offset, value); | ||
234 | |||
235 | doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); | ||
236 | doecr |= TC_GPIO_BIT(offset); | ||
237 | iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); | ||
238 | |||
239 | spin_unlock_irqrestore(&tc6393xb->lock, flags); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base) | ||
245 | { | ||
246 | tc6393xb->gpio.label = "tc6393xb"; | ||
247 | tc6393xb->gpio.base = gpio_base; | ||
248 | tc6393xb->gpio.ngpio = 16; | ||
249 | tc6393xb->gpio.set = tc6393xb_gpio_set; | ||
250 | tc6393xb->gpio.get = tc6393xb_gpio_get; | ||
251 | tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input; | ||
252 | tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output; | ||
253 | |||
254 | return gpiochip_add(&tc6393xb->gpio); | ||
255 | } | ||
256 | |||
257 | /*--------------------------------------------------------------------------*/ | ||
258 | |||
259 | static void | ||
260 | tc6393xb_irq(unsigned int irq, struct irq_desc *desc) | ||
261 | { | ||
262 | struct tc6393xb *tc6393xb = get_irq_data(irq); | ||
263 | unsigned int isr; | ||
264 | unsigned int i, irq_base; | ||
265 | |||
266 | irq_base = tc6393xb->irq_base; | ||
267 | |||
268 | while ((isr = ioread8(tc6393xb->scr + SCR_ISR) & | ||
269 | ~ioread8(tc6393xb->scr + SCR_IMR))) | ||
270 | for (i = 0; i < TC6393XB_NR_IRQS; i++) { | ||
271 | if (isr & (1 << i)) | ||
272 | generic_handle_irq(irq_base + i); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static void tc6393xb_irq_ack(unsigned int irq) | ||
277 | { | ||
278 | } | ||
279 | |||
280 | static void tc6393xb_irq_mask(unsigned int irq) | ||
281 | { | ||
282 | struct tc6393xb *tc6393xb = get_irq_chip_data(irq); | ||
283 | unsigned long flags; | ||
284 | u8 imr; | ||
285 | |||
286 | spin_lock_irqsave(&tc6393xb->lock, flags); | ||
287 | imr = ioread8(tc6393xb->scr + SCR_IMR); | ||
288 | imr |= 1 << (irq - tc6393xb->irq_base); | ||
289 | iowrite8(imr, tc6393xb->scr + SCR_IMR); | ||
290 | spin_unlock_irqrestore(&tc6393xb->lock, flags); | ||
291 | } | ||
292 | |||
293 | static void tc6393xb_irq_unmask(unsigned int irq) | ||
294 | { | ||
295 | struct tc6393xb *tc6393xb = get_irq_chip_data(irq); | ||
296 | unsigned long flags; | ||
297 | u8 imr; | ||
298 | |||
299 | spin_lock_irqsave(&tc6393xb->lock, flags); | ||
300 | imr = ioread8(tc6393xb->scr + SCR_IMR); | ||
301 | imr &= ~(1 << (irq - tc6393xb->irq_base)); | ||
302 | iowrite8(imr, tc6393xb->scr + SCR_IMR); | ||
303 | spin_unlock_irqrestore(&tc6393xb->lock, flags); | ||
304 | } | ||
305 | |||
306 | static struct irq_chip tc6393xb_chip = { | ||
307 | .name = "tc6393xb", | ||
308 | .ack = tc6393xb_irq_ack, | ||
309 | .mask = tc6393xb_irq_mask, | ||
310 | .unmask = tc6393xb_irq_unmask, | ||
311 | }; | ||
312 | |||
313 | static void tc6393xb_attach_irq(struct platform_device *dev) | ||
314 | { | ||
315 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | ||
316 | unsigned int irq, irq_base; | ||
317 | |||
318 | irq_base = tc6393xb->irq_base; | ||
319 | |||
320 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { | ||
321 | set_irq_chip(irq, &tc6393xb_chip); | ||
322 | set_irq_chip_data(irq, tc6393xb); | ||
323 | set_irq_handler(irq, handle_edge_irq); | ||
324 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
325 | } | ||
326 | |||
327 | set_irq_type(tc6393xb->irq, IRQT_FALLING); | ||
328 | set_irq_data(tc6393xb->irq, tc6393xb); | ||
329 | set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq); | ||
330 | } | ||
331 | |||
332 | static void tc6393xb_detach_irq(struct platform_device *dev) | ||
333 | { | ||
334 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | ||
335 | unsigned int irq, irq_base; | ||
336 | |||
337 | set_irq_chained_handler(tc6393xb->irq, NULL); | ||
338 | set_irq_data(tc6393xb->irq, NULL); | ||
339 | |||
340 | irq_base = tc6393xb->irq_base; | ||
341 | |||
342 | for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { | ||
343 | set_irq_flags(irq, 0); | ||
344 | set_irq_chip(irq, NULL); | ||
345 | set_irq_chip_data(irq, NULL); | ||
346 | } | ||
347 | } | ||
348 | |||
349 | /*--------------------------------------------------------------------------*/ | ||
350 | |||
351 | static int tc6393xb_hw_init(struct platform_device *dev) | ||
352 | { | ||
353 | struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; | ||
354 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | ||
355 | int i; | ||
356 | |||
357 | iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER); | ||
358 | iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR); | ||
359 | iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR); | ||
360 | iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN | | ||
361 | SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN | | ||
362 | BIT(15), tc6393xb->scr + SCR_MCR); | ||
363 | iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER); | ||
364 | iowrite8(0, tc6393xb->scr + SCR_IRR); | ||
365 | iowrite8(0xbf, tc6393xb->scr + SCR_IMR); | ||
366 | |||
367 | for (i = 0; i < 3; i++) { | ||
368 | iowrite8(tc6393xb->suspend_state.gpo_dsr[i], | ||
369 | tc6393xb->scr + SCR_GPO_DSR(i)); | ||
370 | iowrite8(tc6393xb->suspend_state.gpo_doecr[i], | ||
371 | tc6393xb->scr + SCR_GPO_DOECR(i)); | ||
372 | iowrite8(tc6393xb->suspend_state.gpi_bcr[i], | ||
373 | tc6393xb->scr + SCR_GPI_BCR(i)); | ||
374 | } | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int __devinit tc6393xb_probe(struct platform_device *dev) | ||
380 | { | ||
381 | struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; | ||
382 | struct tc6393xb *tc6393xb; | ||
383 | struct resource *iomem; | ||
384 | struct resource *rscr; | ||
385 | int retval, temp; | ||
386 | int i; | ||
387 | |||
388 | iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
389 | if (!iomem) | ||
390 | return -EINVAL; | ||
391 | |||
392 | tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL); | ||
393 | if (!tc6393xb) { | ||
394 | retval = -ENOMEM; | ||
395 | goto err_kzalloc; | ||
396 | } | ||
397 | |||
398 | spin_lock_init(&tc6393xb->lock); | ||
399 | |||
400 | platform_set_drvdata(dev, tc6393xb); | ||
401 | tc6393xb->iomem = iomem; | ||
402 | tc6393xb->irq = platform_get_irq(dev, 0); | ||
403 | tc6393xb->irq_base = tcpd->irq_base; | ||
404 | |||
405 | tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */); | ||
406 | if (IS_ERR(tc6393xb->clk)) { | ||
407 | retval = PTR_ERR(tc6393xb->clk); | ||
408 | goto err_clk_get; | ||
409 | } | ||
410 | |||
411 | rscr = &tc6393xb->rscr; | ||
412 | rscr->name = "tc6393xb-core"; | ||
413 | rscr->start = iomem->start; | ||
414 | rscr->end = iomem->start + 0xff; | ||
415 | rscr->flags = IORESOURCE_MEM; | ||
416 | |||
417 | retval = request_resource(iomem, rscr); | ||
418 | if (retval) | ||
419 | goto err_request_scr; | ||
420 | |||
421 | tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1); | ||
422 | if (!tc6393xb->scr) { | ||
423 | retval = -ENOMEM; | ||
424 | goto err_ioremap; | ||
425 | } | ||
426 | |||
427 | retval = clk_enable(tc6393xb->clk); | ||
428 | if (retval) | ||
429 | goto err_clk_enable; | ||
430 | |||
431 | retval = tcpd->enable(dev); | ||
432 | if (retval) | ||
433 | goto err_enable; | ||
434 | |||
435 | tc6393xb->suspend_state.fer = 0; | ||
436 | for (i = 0; i < 3; i++) { | ||
437 | tc6393xb->suspend_state.gpo_dsr[i] = | ||
438 | (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff; | ||
439 | tc6393xb->suspend_state.gpo_doecr[i] = | ||
440 | (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff; | ||
441 | } | ||
442 | /* | ||
443 | * It may be necessary to change this back to | ||
444 | * platform-dependant code | ||
445 | */ | ||
446 | tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 | | ||
447 | SCR_CCR_HCLK_48; | ||
448 | |||
449 | retval = tc6393xb_hw_init(dev); | ||
450 | if (retval) | ||
451 | goto err_hw_init; | ||
452 | |||
453 | printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n", | ||
454 | ioread8(tc6393xb->scr + SCR_REVID), | ||
455 | (unsigned long) iomem->start, tc6393xb->irq); | ||
456 | |||
457 | tc6393xb->gpio.base = -1; | ||
458 | |||
459 | if (tcpd->gpio_base >= 0) { | ||
460 | retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base); | ||
461 | if (retval) | ||
462 | goto err_gpio_add; | ||
463 | } | ||
464 | |||
465 | if (tc6393xb->irq) | ||
466 | tc6393xb_attach_irq(dev); | ||
467 | |||
468 | tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data; | ||
469 | |||
470 | retval = mfd_add_devices(dev, | ||
471 | tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), | ||
472 | iomem, tcpd->irq_base); | ||
473 | |||
474 | return 0; | ||
475 | |||
476 | if (tc6393xb->irq) | ||
477 | tc6393xb_detach_irq(dev); | ||
478 | |||
479 | err_gpio_add: | ||
480 | if (tc6393xb->gpio.base != -1) | ||
481 | temp = gpiochip_remove(&tc6393xb->gpio); | ||
482 | err_hw_init: | ||
483 | tcpd->disable(dev); | ||
484 | err_clk_enable: | ||
485 | clk_disable(tc6393xb->clk); | ||
486 | err_enable: | ||
487 | iounmap(tc6393xb->scr); | ||
488 | err_ioremap: | ||
489 | release_resource(&tc6393xb->rscr); | ||
490 | err_request_scr: | ||
491 | clk_put(tc6393xb->clk); | ||
492 | err_clk_get: | ||
493 | kfree(tc6393xb); | ||
494 | err_kzalloc: | ||
495 | return retval; | ||
496 | } | ||
497 | |||
498 | static int __devexit tc6393xb_remove(struct platform_device *dev) | ||
499 | { | ||
500 | struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; | ||
501 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | ||
502 | int ret; | ||
503 | |||
504 | mfd_remove_devices(dev); | ||
505 | |||
506 | if (tc6393xb->irq) | ||
507 | tc6393xb_detach_irq(dev); | ||
508 | |||
509 | if (tc6393xb->gpio.base != -1) { | ||
510 | ret = gpiochip_remove(&tc6393xb->gpio); | ||
511 | if (ret) { | ||
512 | dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret); | ||
513 | return ret; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | ret = tcpd->disable(dev); | ||
518 | |||
519 | clk_disable(tc6393xb->clk); | ||
520 | |||
521 | iounmap(tc6393xb->scr); | ||
522 | |||
523 | release_resource(&tc6393xb->rscr); | ||
524 | |||
525 | platform_set_drvdata(dev, NULL); | ||
526 | |||
527 | clk_put(tc6393xb->clk); | ||
528 | |||
529 | kfree(tc6393xb); | ||
530 | |||
531 | return ret; | ||
532 | } | ||
533 | |||
534 | #ifdef CONFIG_PM | ||
535 | static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state) | ||
536 | { | ||
537 | struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; | ||
538 | struct tc6393xb *tc6393xb = platform_get_drvdata(dev); | ||
539 | int i; | ||
540 | |||
541 | |||
542 | tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR); | ||
543 | tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER); | ||
544 | |||
545 | for (i = 0; i < 3; i++) { | ||
546 | tc6393xb->suspend_state.gpo_dsr[i] = | ||
547 | ioread8(tc6393xb->scr + SCR_GPO_DSR(i)); | ||
548 | tc6393xb->suspend_state.gpo_doecr[i] = | ||
549 | ioread8(tc6393xb->scr + SCR_GPO_DOECR(i)); | ||
550 | tc6393xb->suspend_state.gpi_bcr[i] = | ||
551 | ioread8(tc6393xb->scr + SCR_GPI_BCR(i)); | ||
552 | } | ||
553 | |||
554 | return tcpd->suspend(dev); | ||
555 | } | ||
556 | |||
557 | static int tc6393xb_resume(struct platform_device *dev) | ||
558 | { | ||
559 | struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; | ||
560 | int ret = tcpd->resume(dev); | ||
561 | |||
562 | if (ret) | ||
563 | return ret; | ||
564 | |||
565 | return tc6393xb_hw_init(dev); | ||
566 | } | ||
567 | #else | ||
568 | #define tc6393xb_suspend NULL | ||
569 | #define tc6393xb_resume NULL | ||
570 | #endif | ||
571 | |||
572 | static struct platform_driver tc6393xb_driver = { | ||
573 | .probe = tc6393xb_probe, | ||
574 | .remove = __devexit_p(tc6393xb_remove), | ||
575 | .suspend = tc6393xb_suspend, | ||
576 | .resume = tc6393xb_resume, | ||
577 | |||
578 | .driver = { | ||
579 | .name = "tc6393xb", | ||
580 | .owner = THIS_MODULE, | ||
581 | }, | ||
582 | }; | ||
583 | |||
584 | static int __init tc6393xb_init(void) | ||
585 | { | ||
586 | return platform_driver_register(&tc6393xb_driver); | ||
587 | } | ||
588 | |||
589 | static void __exit tc6393xb_exit(void) | ||
590 | { | ||
591 | platform_driver_unregister(&tc6393xb_driver); | ||
592 | } | ||
593 | |||
594 | subsys_initcall(tc6393xb_init); | ||
595 | module_exit(tc6393xb_exit); | ||
596 | |||
597 | MODULE_LICENSE("GPL"); | ||
598 | MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer"); | ||
599 | MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller"); | ||
600 | MODULE_ALIAS("platform:tc6393xb"); | ||
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 08256ed0d9a6..579b01ff82d4 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore) | |||
229 | int last_IRQ_count = 0; | 229 | int last_IRQ_count = 0; |
230 | int new_IRQ_count; | 230 | int new_IRQ_count; |
231 | int force_IRQ = 0; | 231 | int force_IRQ = 0; |
232 | cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU); | ||
232 | 233 | ||
233 | /* this thread was marked active by xpc_hb_init() */ | 234 | /* this thread was marked active by xpc_hb_init() */ |
234 | 235 | ||
235 | set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); | 236 | set_cpus_allowed_ptr(current, cpumask); |
236 | 237 | ||
237 | /* set our heartbeating to other partitions into motion */ | 238 | /* set our heartbeating to other partitions into motion */ |
238 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 239 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index d6b9b486417c..a067fe436301 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -21,13 +21,17 @@ | |||
21 | #define RESULT_UNSUP_HOST 2 | 21 | #define RESULT_UNSUP_HOST 2 |
22 | #define RESULT_UNSUP_CARD 3 | 22 | #define RESULT_UNSUP_CARD 3 |
23 | 23 | ||
24 | #define BUFFER_SIZE (PAGE_SIZE * 4) | 24 | #define BUFFER_ORDER 2 |
25 | #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) | ||
25 | 26 | ||
26 | struct mmc_test_card { | 27 | struct mmc_test_card { |
27 | struct mmc_card *card; | 28 | struct mmc_card *card; |
28 | 29 | ||
29 | u8 scratch[BUFFER_SIZE]; | 30 | u8 scratch[BUFFER_SIZE]; |
30 | u8 *buffer; | 31 | u8 *buffer; |
32 | #ifdef CONFIG_HIGHMEM | ||
33 | struct page *highmem; | ||
34 | #endif | ||
31 | }; | 35 | }; |
32 | 36 | ||
33 | /*******************************************************************/ | 37 | /*******************************************************************/ |
@@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test, | |||
384 | int ret, i; | 388 | int ret, i; |
385 | unsigned long flags; | 389 | unsigned long flags; |
386 | 390 | ||
391 | BUG_ON(blocks * blksz > BUFFER_SIZE); | ||
392 | |||
387 | if (write) { | 393 | if (write) { |
388 | for (i = 0;i < blocks * blksz;i++) | 394 | for (i = 0;i < blocks * blksz;i++) |
389 | test->scratch[i] = i; | 395 | test->scratch[i] = i; |
390 | } else { | 396 | } else { |
391 | memset(test->scratch, 0, BUFFER_SIZE); | 397 | memset(test->scratch, 0, blocks * blksz); |
392 | } | 398 | } |
393 | local_irq_save(flags); | 399 | local_irq_save(flags); |
394 | sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); | 400 | sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz); |
395 | local_irq_restore(flags); | 401 | local_irq_restore(flags); |
396 | 402 | ||
397 | ret = mmc_test_set_blksize(test, blksz); | 403 | ret = mmc_test_set_blksize(test, blksz); |
@@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test, | |||
438 | } | 444 | } |
439 | } else { | 445 | } else { |
440 | local_irq_save(flags); | 446 | local_irq_save(flags); |
441 | sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); | 447 | sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz); |
442 | local_irq_restore(flags); | 448 | local_irq_restore(flags); |
443 | for (i = 0;i < blocks * blksz;i++) { | 449 | for (i = 0;i < blocks * blksz;i++) { |
444 | if (test->scratch[i] != (u8)i) | 450 | if (test->scratch[i] != (u8)i) |
@@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) | |||
799 | return 0; | 805 | return 0; |
800 | } | 806 | } |
801 | 807 | ||
808 | static int mmc_test_bigsg_write(struct mmc_test_card *test) | ||
809 | { | ||
810 | int ret; | ||
811 | unsigned int size; | ||
812 | struct scatterlist sg; | ||
813 | |||
814 | if (test->card->host->max_blk_count == 1) | ||
815 | return RESULT_UNSUP_HOST; | ||
816 | |||
817 | size = PAGE_SIZE * 2; | ||
818 | size = min(size, test->card->host->max_req_size); | ||
819 | size = min(size, test->card->host->max_seg_size); | ||
820 | size = min(size, test->card->host->max_blk_count * 512); | ||
821 | |||
822 | memset(test->buffer, 0, BUFFER_SIZE); | ||
823 | |||
824 | if (size < 1024) | ||
825 | return RESULT_UNSUP_HOST; | ||
826 | |||
827 | sg_init_table(&sg, 1); | ||
828 | sg_init_one(&sg, test->buffer, BUFFER_SIZE); | ||
829 | |||
830 | ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); | ||
831 | if (ret) | ||
832 | return ret; | ||
833 | |||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | static int mmc_test_bigsg_read(struct mmc_test_card *test) | ||
838 | { | ||
839 | int ret, i; | ||
840 | unsigned int size; | ||
841 | struct scatterlist sg; | ||
842 | |||
843 | if (test->card->host->max_blk_count == 1) | ||
844 | return RESULT_UNSUP_HOST; | ||
845 | |||
846 | size = PAGE_SIZE * 2; | ||
847 | size = min(size, test->card->host->max_req_size); | ||
848 | size = min(size, test->card->host->max_seg_size); | ||
849 | size = min(size, test->card->host->max_blk_count * 512); | ||
850 | |||
851 | if (size < 1024) | ||
852 | return RESULT_UNSUP_HOST; | ||
853 | |||
854 | memset(test->buffer, 0xCD, BUFFER_SIZE); | ||
855 | |||
856 | sg_init_table(&sg, 1); | ||
857 | sg_init_one(&sg, test->buffer, BUFFER_SIZE); | ||
858 | ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); | ||
859 | if (ret) | ||
860 | return ret; | ||
861 | |||
862 | /* mmc_test_transfer() doesn't check for read overflows */ | ||
863 | for (i = size;i < BUFFER_SIZE;i++) { | ||
864 | if (test->buffer[i] != 0xCD) | ||
865 | return RESULT_FAIL; | ||
866 | } | ||
867 | |||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | #ifdef CONFIG_HIGHMEM | ||
872 | |||
873 | static int mmc_test_write_high(struct mmc_test_card *test) | ||
874 | { | ||
875 | int ret; | ||
876 | struct scatterlist sg; | ||
877 | |||
878 | sg_init_table(&sg, 1); | ||
879 | sg_set_page(&sg, test->highmem, 512, 0); | ||
880 | |||
881 | ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); | ||
882 | if (ret) | ||
883 | return ret; | ||
884 | |||
885 | return 0; | ||
886 | } | ||
887 | |||
888 | static int mmc_test_read_high(struct mmc_test_card *test) | ||
889 | { | ||
890 | int ret; | ||
891 | struct scatterlist sg; | ||
892 | |||
893 | sg_init_table(&sg, 1); | ||
894 | sg_set_page(&sg, test->highmem, 512, 0); | ||
895 | |||
896 | ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); | ||
897 | if (ret) | ||
898 | return ret; | ||
899 | |||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | static int mmc_test_multi_write_high(struct mmc_test_card *test) | ||
904 | { | ||
905 | int ret; | ||
906 | unsigned int size; | ||
907 | struct scatterlist sg; | ||
908 | |||
909 | if (test->card->host->max_blk_count == 1) | ||
910 | return RESULT_UNSUP_HOST; | ||
911 | |||
912 | size = PAGE_SIZE * 2; | ||
913 | size = min(size, test->card->host->max_req_size); | ||
914 | size = min(size, test->card->host->max_seg_size); | ||
915 | size = min(size, test->card->host->max_blk_count * 512); | ||
916 | |||
917 | if (size < 1024) | ||
918 | return RESULT_UNSUP_HOST; | ||
919 | |||
920 | sg_init_table(&sg, 1); | ||
921 | sg_set_page(&sg, test->highmem, size, 0); | ||
922 | |||
923 | ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); | ||
924 | if (ret) | ||
925 | return ret; | ||
926 | |||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | static int mmc_test_multi_read_high(struct mmc_test_card *test) | ||
931 | { | ||
932 | int ret; | ||
933 | unsigned int size; | ||
934 | struct scatterlist sg; | ||
935 | |||
936 | if (test->card->host->max_blk_count == 1) | ||
937 | return RESULT_UNSUP_HOST; | ||
938 | |||
939 | size = PAGE_SIZE * 2; | ||
940 | size = min(size, test->card->host->max_req_size); | ||
941 | size = min(size, test->card->host->max_seg_size); | ||
942 | size = min(size, test->card->host->max_blk_count * 512); | ||
943 | |||
944 | if (size < 1024) | ||
945 | return RESULT_UNSUP_HOST; | ||
946 | |||
947 | sg_init_table(&sg, 1); | ||
948 | sg_set_page(&sg, test->highmem, size, 0); | ||
949 | |||
950 | ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); | ||
951 | if (ret) | ||
952 | return ret; | ||
953 | |||
954 | return 0; | ||
955 | } | ||
956 | |||
957 | #endif /* CONFIG_HIGHMEM */ | ||
958 | |||
802 | static const struct mmc_test_case mmc_test_cases[] = { | 959 | static const struct mmc_test_case mmc_test_cases[] = { |
803 | { | 960 | { |
804 | .name = "Basic write (no data verification)", | 961 | .name = "Basic write (no data verification)", |
@@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = { | |||
913 | .name = "Correct xfer_size at read (midway failure)", | 1070 | .name = "Correct xfer_size at read (midway failure)", |
914 | .run = mmc_test_multi_xfersize_read, | 1071 | .run = mmc_test_multi_xfersize_read, |
915 | }, | 1072 | }, |
1073 | |||
1074 | { | ||
1075 | .name = "Over-sized SG list write", | ||
1076 | .prepare = mmc_test_prepare_write, | ||
1077 | .run = mmc_test_bigsg_write, | ||
1078 | .cleanup = mmc_test_cleanup, | ||
1079 | }, | ||
1080 | |||
1081 | { | ||
1082 | .name = "Over-sized SG list read", | ||
1083 | .prepare = mmc_test_prepare_read, | ||
1084 | .run = mmc_test_bigsg_read, | ||
1085 | .cleanup = mmc_test_cleanup, | ||
1086 | }, | ||
1087 | |||
1088 | #ifdef CONFIG_HIGHMEM | ||
1089 | |||
1090 | { | ||
1091 | .name = "Highmem write", | ||
1092 | .prepare = mmc_test_prepare_write, | ||
1093 | .run = mmc_test_write_high, | ||
1094 | .cleanup = mmc_test_cleanup, | ||
1095 | }, | ||
1096 | |||
1097 | { | ||
1098 | .name = "Highmem read", | ||
1099 | .prepare = mmc_test_prepare_read, | ||
1100 | .run = mmc_test_read_high, | ||
1101 | .cleanup = mmc_test_cleanup, | ||
1102 | }, | ||
1103 | |||
1104 | { | ||
1105 | .name = "Multi-block highmem write", | ||
1106 | .prepare = mmc_test_prepare_write, | ||
1107 | .run = mmc_test_multi_write_high, | ||
1108 | .cleanup = mmc_test_cleanup, | ||
1109 | }, | ||
1110 | |||
1111 | { | ||
1112 | .name = "Multi-block highmem read", | ||
1113 | .prepare = mmc_test_prepare_read, | ||
1114 | .run = mmc_test_multi_read_high, | ||
1115 | .cleanup = mmc_test_cleanup, | ||
1116 | }, | ||
1117 | |||
1118 | #endif /* CONFIG_HIGHMEM */ | ||
1119 | |||
916 | }; | 1120 | }; |
917 | 1121 | ||
918 | static struct mutex mmc_test_lock; | 1122 | static struct mutex mmc_test_lock; |
@@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev, | |||
1014 | test->card = card; | 1218 | test->card = card; |
1015 | 1219 | ||
1016 | test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); | 1220 | test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); |
1221 | #ifdef CONFIG_HIGHMEM | ||
1222 | test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); | ||
1223 | #endif | ||
1224 | |||
1225 | #ifdef CONFIG_HIGHMEM | ||
1226 | if (test->buffer && test->highmem) { | ||
1227 | #else | ||
1017 | if (test->buffer) { | 1228 | if (test->buffer) { |
1229 | #endif | ||
1018 | mutex_lock(&mmc_test_lock); | 1230 | mutex_lock(&mmc_test_lock); |
1019 | mmc_test_run(test, testcase); | 1231 | mmc_test_run(test, testcase); |
1020 | mutex_unlock(&mmc_test_lock); | 1232 | mutex_unlock(&mmc_test_lock); |
1021 | } | 1233 | } |
1022 | 1234 | ||
1235 | #ifdef CONFIG_HIGHMEM | ||
1236 | __free_pages(test->highmem, BUFFER_ORDER); | ||
1237 | #endif | ||
1023 | kfree(test->buffer); | 1238 | kfree(test->buffer); |
1024 | kfree(test); | 1239 | kfree(test); |
1025 | 1240 | ||
@@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card) | |||
1041 | if (ret) | 1256 | if (ret) |
1042 | return ret; | 1257 | return ret; |
1043 | 1258 | ||
1259 | dev_info(&card->dev, "Card claimed for testing.\n"); | ||
1260 | |||
1044 | return 0; | 1261 | return 0; |
1045 | } | 1262 | } |
1046 | 1263 | ||
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7731ddefdc1b..3dee97e7d165 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
148 | printk(KERN_WARNING "%s: unable to allocate " | 148 | printk(KERN_WARNING "%s: unable to allocate " |
149 | "bounce buffer\n", mmc_card_name(card)); | 149 | "bounce buffer\n", mmc_card_name(card)); |
150 | } else { | 150 | } else { |
151 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); | 151 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
152 | blk_queue_max_sectors(mq->queue, bouncesz / 512); | 152 | blk_queue_max_sectors(mq->queue, bouncesz / 512); |
153 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); | 153 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); |
154 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); | 154 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); |
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
290 | } | 290 | } |
291 | } | 291 | } |
292 | 292 | ||
293 | static void copy_sg(struct scatterlist *dst, unsigned int dst_len, | 293 | /* |
294 | struct scatterlist *src, unsigned int src_len) | 294 | * Prepare the sg list(s) to be handed of to the host driver |
295 | { | 295 | */ |
296 | unsigned int chunk; | ||
297 | char *dst_buf, *src_buf; | ||
298 | unsigned int dst_size, src_size; | ||
299 | |||
300 | dst_buf = NULL; | ||
301 | src_buf = NULL; | ||
302 | dst_size = 0; | ||
303 | src_size = 0; | ||
304 | |||
305 | while (src_len) { | ||
306 | BUG_ON(dst_len == 0); | ||
307 | |||
308 | if (dst_size == 0) { | ||
309 | dst_buf = sg_virt(dst); | ||
310 | dst_size = dst->length; | ||
311 | } | ||
312 | |||
313 | if (src_size == 0) { | ||
314 | src_buf = sg_virt(src); | ||
315 | src_size = src->length; | ||
316 | } | ||
317 | |||
318 | chunk = min(dst_size, src_size); | ||
319 | |||
320 | memcpy(dst_buf, src_buf, chunk); | ||
321 | |||
322 | dst_buf += chunk; | ||
323 | src_buf += chunk; | ||
324 | dst_size -= chunk; | ||
325 | src_size -= chunk; | ||
326 | |||
327 | if (dst_size == 0) { | ||
328 | dst++; | ||
329 | dst_len--; | ||
330 | } | ||
331 | |||
332 | if (src_size == 0) { | ||
333 | src++; | ||
334 | src_len--; | ||
335 | } | ||
336 | } | ||
337 | } | ||
338 | |||
339 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | 296 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) |
340 | { | 297 | { |
341 | unsigned int sg_len; | 298 | unsigned int sg_len; |
299 | size_t buflen; | ||
300 | struct scatterlist *sg; | ||
301 | int i; | ||
342 | 302 | ||
343 | if (!mq->bounce_buf) | 303 | if (!mq->bounce_buf) |
344 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | 304 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); |
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | |||
349 | 309 | ||
350 | mq->bounce_sg_len = sg_len; | 310 | mq->bounce_sg_len = sg_len; |
351 | 311 | ||
352 | /* | 312 | buflen = 0; |
353 | * Shortcut in the event we only get a single entry. | 313 | for_each_sg(mq->bounce_sg, sg, sg_len, i) |
354 | */ | 314 | buflen += sg->length; |
355 | if (sg_len == 1) { | ||
356 | memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); | ||
357 | return 1; | ||
358 | } | ||
359 | 315 | ||
360 | sg_init_one(mq->sg, mq->bounce_buf, 0); | 316 | sg_init_one(mq->sg, mq->bounce_buf, buflen); |
361 | |||
362 | while (sg_len) { | ||
363 | mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; | ||
364 | sg_len--; | ||
365 | } | ||
366 | 317 | ||
367 | return 1; | 318 | return 1; |
368 | } | 319 | } |
369 | 320 | ||
321 | /* | ||
322 | * If writing, bounce the data to the buffer before the request | ||
323 | * is sent to the host driver | ||
324 | */ | ||
370 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | 325 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
371 | { | 326 | { |
327 | unsigned long flags; | ||
328 | |||
372 | if (!mq->bounce_buf) | 329 | if (!mq->bounce_buf) |
373 | return; | 330 | return; |
374 | 331 | ||
375 | if (mq->bounce_sg_len == 1) | ||
376 | return; | ||
377 | if (rq_data_dir(mq->req) != WRITE) | 332 | if (rq_data_dir(mq->req) != WRITE) |
378 | return; | 333 | return; |
379 | 334 | ||
380 | copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); | 335 | local_irq_save(flags); |
336 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | ||
337 | mq->bounce_buf, mq->sg[0].length); | ||
338 | local_irq_restore(flags); | ||
381 | } | 339 | } |
382 | 340 | ||
341 | /* | ||
342 | * If reading, bounce the data from the buffer after the request | ||
343 | * has been handled by the host driver | ||
344 | */ | ||
383 | void mmc_queue_bounce_post(struct mmc_queue *mq) | 345 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
384 | { | 346 | { |
347 | unsigned long flags; | ||
348 | |||
385 | if (!mq->bounce_buf) | 349 | if (!mq->bounce_buf) |
386 | return; | 350 | return; |
387 | 351 | ||
388 | if (mq->bounce_sg_len == 1) | ||
389 | return; | ||
390 | if (rq_data_dir(mq->req) != READ) | 352 | if (rq_data_dir(mq->req) != READ) |
391 | return; | 353 | return; |
392 | 354 | ||
393 | copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); | 355 | local_irq_save(flags); |
356 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | ||
357 | mq->bounce_buf, mq->sg[0].length); | ||
358 | local_irq_restore(flags); | ||
394 | } | 359 | } |
395 | 360 | ||
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index 3f15eb204895..99b20917cc0f 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c | |||
@@ -1043,7 +1043,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev) | |||
1043 | goto out6; | 1043 | goto out6; |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | platform_set_drvdata(pdev, mmc); | 1046 | platform_set_drvdata(pdev, host); |
1047 | 1047 | ||
1048 | printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" | 1048 | printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" |
1049 | " (mode=%s)\n", pdev->id, host->iobase, | 1049 | " (mode=%s)\n", pdev->id, host->iobase, |
@@ -1087,13 +1087,10 @@ out0: | |||
1087 | 1087 | ||
1088 | static int __devexit au1xmmc_remove(struct platform_device *pdev) | 1088 | static int __devexit au1xmmc_remove(struct platform_device *pdev) |
1089 | { | 1089 | { |
1090 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 1090 | struct au1xmmc_host *host = platform_get_drvdata(pdev); |
1091 | struct au1xmmc_host *host; | ||
1092 | |||
1093 | if (mmc) { | ||
1094 | host = mmc_priv(mmc); | ||
1095 | 1091 | ||
1096 | mmc_remove_host(mmc); | 1092 | if (host) { |
1093 | mmc_remove_host(host->mmc); | ||
1097 | 1094 | ||
1098 | #ifdef CONFIG_LEDS_CLASS | 1095 | #ifdef CONFIG_LEDS_CLASS |
1099 | if (host->platdata && host->platdata->led) | 1096 | if (host->platdata && host->platdata->led) |
@@ -1101,8 +1098,8 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev) | |||
1101 | #endif | 1098 | #endif |
1102 | 1099 | ||
1103 | if (host->platdata && host->platdata->cd_setup && | 1100 | if (host->platdata && host->platdata->cd_setup && |
1104 | !(mmc->caps & MMC_CAP_NEEDS_POLL)) | 1101 | !(host->mmc->caps & MMC_CAP_NEEDS_POLL)) |
1105 | host->platdata->cd_setup(mmc, 0); | 1102 | host->platdata->cd_setup(host->mmc, 0); |
1106 | 1103 | ||
1107 | au_writel(0, HOST_ENABLE(host)); | 1104 | au_writel(0, HOST_ENABLE(host)); |
1108 | au_writel(0, HOST_CONFIG(host)); | 1105 | au_writel(0, HOST_CONFIG(host)); |
@@ -1122,16 +1119,49 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev) | |||
1122 | release_resource(host->ioarea); | 1119 | release_resource(host->ioarea); |
1123 | kfree(host->ioarea); | 1120 | kfree(host->ioarea); |
1124 | 1121 | ||
1125 | mmc_free_host(mmc); | 1122 | mmc_free_host(host->mmc); |
1123 | platform_set_drvdata(pdev, NULL); | ||
1126 | } | 1124 | } |
1127 | return 0; | 1125 | return 0; |
1128 | } | 1126 | } |
1129 | 1127 | ||
1128 | #ifdef CONFIG_PM | ||
1129 | static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) | ||
1130 | { | ||
1131 | struct au1xmmc_host *host = platform_get_drvdata(pdev); | ||
1132 | int ret; | ||
1133 | |||
1134 | ret = mmc_suspend_host(host->mmc, state); | ||
1135 | if (ret) | ||
1136 | return ret; | ||
1137 | |||
1138 | au_writel(0, HOST_CONFIG2(host)); | ||
1139 | au_writel(0, HOST_CONFIG(host)); | ||
1140 | au_writel(0xffffffff, HOST_STATUS(host)); | ||
1141 | au_writel(0, HOST_ENABLE(host)); | ||
1142 | au_sync(); | ||
1143 | |||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | static int au1xmmc_resume(struct platform_device *pdev) | ||
1148 | { | ||
1149 | struct au1xmmc_host *host = platform_get_drvdata(pdev); | ||
1150 | |||
1151 | au1xmmc_reset_controller(host); | ||
1152 | |||
1153 | return mmc_resume_host(host->mmc); | ||
1154 | } | ||
1155 | #else | ||
1156 | #define au1xmmc_suspend NULL | ||
1157 | #define au1xmmc_resume NULL | ||
1158 | #endif | ||
1159 | |||
1130 | static struct platform_driver au1xmmc_driver = { | 1160 | static struct platform_driver au1xmmc_driver = { |
1131 | .probe = au1xmmc_probe, | 1161 | .probe = au1xmmc_probe, |
1132 | .remove = au1xmmc_remove, | 1162 | .remove = au1xmmc_remove, |
1133 | .suspend = NULL, | 1163 | .suspend = au1xmmc_suspend, |
1134 | .resume = NULL, | 1164 | .resume = au1xmmc_resume, |
1135 | .driver = { | 1165 | .driver = { |
1136 | .name = DRIVER_NAME, | 1166 | .name = DRIVER_NAME, |
1137 | .owner = THIS_MODULE, | 1167 | .owner = THIS_MODULE, |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index d39f59738866..a8e18fe53077 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) | |||
177 | if (dalgn) | 177 | if (dalgn) |
178 | DALGN |= (1 << host->dma); | 178 | DALGN |= (1 << host->dma); |
179 | else | 179 | else |
180 | DALGN &= (1 << host->dma); | 180 | DALGN &= ~(1 << host->dma); |
181 | DDADR(host->dma) = host->sg_dma; | 181 | DDADR(host->dma) = host->sg_dma; |
182 | DCSR(host->dma) = DCSR_RUN; | 182 | DCSR(host->dma) = DCSR_RUN; |
183 | } | 183 | } |
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 6a1e4994b724..be550c26da68 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c | |||
@@ -1331,21 +1331,30 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) | |||
1331 | return ret; | 1331 | return ret; |
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | static void s3cmci_shutdown(struct platform_device *pdev) | ||
1335 | { | ||
1336 | struct mmc_host *mmc = platform_get_drvdata(pdev); | ||
1337 | struct s3cmci_host *host = mmc_priv(mmc); | ||
1338 | |||
1339 | if (host->irq_cd >= 0) | ||
1340 | free_irq(host->irq_cd, host); | ||
1341 | |||
1342 | mmc_remove_host(mmc); | ||
1343 | clk_disable(host->clk); | ||
1344 | } | ||
1345 | |||
1334 | static int __devexit s3cmci_remove(struct platform_device *pdev) | 1346 | static int __devexit s3cmci_remove(struct platform_device *pdev) |
1335 | { | 1347 | { |
1336 | struct mmc_host *mmc = platform_get_drvdata(pdev); | 1348 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1337 | struct s3cmci_host *host = mmc_priv(mmc); | 1349 | struct s3cmci_host *host = mmc_priv(mmc); |
1338 | 1350 | ||
1339 | mmc_remove_host(mmc); | 1351 | s3cmci_shutdown(pdev); |
1340 | 1352 | ||
1341 | clk_disable(host->clk); | ||
1342 | clk_put(host->clk); | 1353 | clk_put(host->clk); |
1343 | 1354 | ||
1344 | tasklet_disable(&host->pio_tasklet); | 1355 | tasklet_disable(&host->pio_tasklet); |
1345 | s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); | 1356 | s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); |
1346 | 1357 | ||
1347 | if (host->irq_cd >= 0) | ||
1348 | free_irq(host->irq_cd, host); | ||
1349 | free_irq(host->irq, host); | 1358 | free_irq(host->irq, host); |
1350 | 1359 | ||
1351 | iounmap(host->base); | 1360 | iounmap(host->base); |
@@ -1355,17 +1364,17 @@ static int __devexit s3cmci_remove(struct platform_device *pdev) | |||
1355 | return 0; | 1364 | return 0; |
1356 | } | 1365 | } |
1357 | 1366 | ||
1358 | static int __devinit s3cmci_probe_2410(struct platform_device *dev) | 1367 | static int __devinit s3cmci_2410_probe(struct platform_device *dev) |
1359 | { | 1368 | { |
1360 | return s3cmci_probe(dev, 0); | 1369 | return s3cmci_probe(dev, 0); |
1361 | } | 1370 | } |
1362 | 1371 | ||
1363 | static int __devinit s3cmci_probe_2412(struct platform_device *dev) | 1372 | static int __devinit s3cmci_2412_probe(struct platform_device *dev) |
1364 | { | 1373 | { |
1365 | return s3cmci_probe(dev, 1); | 1374 | return s3cmci_probe(dev, 1); |
1366 | } | 1375 | } |
1367 | 1376 | ||
1368 | static int __devinit s3cmci_probe_2440(struct platform_device *dev) | 1377 | static int __devinit s3cmci_2440_probe(struct platform_device *dev) |
1369 | { | 1378 | { |
1370 | return s3cmci_probe(dev, 1); | 1379 | return s3cmci_probe(dev, 1); |
1371 | } | 1380 | } |
@@ -1392,29 +1401,32 @@ static int s3cmci_resume(struct platform_device *dev) | |||
1392 | #endif /* CONFIG_PM */ | 1401 | #endif /* CONFIG_PM */ |
1393 | 1402 | ||
1394 | 1403 | ||
1395 | static struct platform_driver s3cmci_driver_2410 = { | 1404 | static struct platform_driver s3cmci_2410_driver = { |
1396 | .driver.name = "s3c2410-sdi", | 1405 | .driver.name = "s3c2410-sdi", |
1397 | .driver.owner = THIS_MODULE, | 1406 | .driver.owner = THIS_MODULE, |
1398 | .probe = s3cmci_probe_2410, | 1407 | .probe = s3cmci_2410_probe, |
1399 | .remove = __devexit_p(s3cmci_remove), | 1408 | .remove = __devexit_p(s3cmci_remove), |
1409 | .shutdown = s3cmci_shutdown, | ||
1400 | .suspend = s3cmci_suspend, | 1410 | .suspend = s3cmci_suspend, |
1401 | .resume = s3cmci_resume, | 1411 | .resume = s3cmci_resume, |
1402 | }; | 1412 | }; |
1403 | 1413 | ||
1404 | static struct platform_driver s3cmci_driver_2412 = { | 1414 | static struct platform_driver s3cmci_2412_driver = { |
1405 | .driver.name = "s3c2412-sdi", | 1415 | .driver.name = "s3c2412-sdi", |
1406 | .driver.owner = THIS_MODULE, | 1416 | .driver.owner = THIS_MODULE, |
1407 | .probe = s3cmci_probe_2412, | 1417 | .probe = s3cmci_2412_probe, |
1408 | .remove = __devexit_p(s3cmci_remove), | 1418 | .remove = __devexit_p(s3cmci_remove), |
1419 | .shutdown = s3cmci_shutdown, | ||
1409 | .suspend = s3cmci_suspend, | 1420 | .suspend = s3cmci_suspend, |
1410 | .resume = s3cmci_resume, | 1421 | .resume = s3cmci_resume, |
1411 | }; | 1422 | }; |
1412 | 1423 | ||
1413 | static struct platform_driver s3cmci_driver_2440 = { | 1424 | static struct platform_driver s3cmci_2440_driver = { |
1414 | .driver.name = "s3c2440-sdi", | 1425 | .driver.name = "s3c2440-sdi", |
1415 | .driver.owner = THIS_MODULE, | 1426 | .driver.owner = THIS_MODULE, |
1416 | .probe = s3cmci_probe_2440, | 1427 | .probe = s3cmci_2440_probe, |
1417 | .remove = __devexit_p(s3cmci_remove), | 1428 | .remove = __devexit_p(s3cmci_remove), |
1429 | .shutdown = s3cmci_shutdown, | ||
1418 | .suspend = s3cmci_suspend, | 1430 | .suspend = s3cmci_suspend, |
1419 | .resume = s3cmci_resume, | 1431 | .resume = s3cmci_resume, |
1420 | }; | 1432 | }; |
@@ -1422,17 +1434,17 @@ static struct platform_driver s3cmci_driver_2440 = { | |||
1422 | 1434 | ||
1423 | static int __init s3cmci_init(void) | 1435 | static int __init s3cmci_init(void) |
1424 | { | 1436 | { |
1425 | platform_driver_register(&s3cmci_driver_2410); | 1437 | platform_driver_register(&s3cmci_2410_driver); |
1426 | platform_driver_register(&s3cmci_driver_2412); | 1438 | platform_driver_register(&s3cmci_2412_driver); |
1427 | platform_driver_register(&s3cmci_driver_2440); | 1439 | platform_driver_register(&s3cmci_2440_driver); |
1428 | return 0; | 1440 | return 0; |
1429 | } | 1441 | } |
1430 | 1442 | ||
1431 | static void __exit s3cmci_exit(void) | 1443 | static void __exit s3cmci_exit(void) |
1432 | { | 1444 | { |
1433 | platform_driver_unregister(&s3cmci_driver_2410); | 1445 | platform_driver_unregister(&s3cmci_2410_driver); |
1434 | platform_driver_unregister(&s3cmci_driver_2412); | 1446 | platform_driver_unregister(&s3cmci_2412_driver); |
1435 | platform_driver_unregister(&s3cmci_driver_2440); | 1447 | platform_driver_unregister(&s3cmci_2440_driver); |
1436 | } | 1448 | } |
1437 | 1449 | ||
1438 | module_init(s3cmci_init); | 1450 | module_init(s3cmci_init); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 17701c3da733..c3a5db72ddd7 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -173,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led, | |||
173 | * * | 173 | * * |
174 | \*****************************************************************************/ | 174 | \*****************************************************************************/ |
175 | 175 | ||
176 | static inline char* sdhci_sg_to_buffer(struct sdhci_host* host) | ||
177 | { | ||
178 | return sg_virt(host->cur_sg); | ||
179 | } | ||
180 | |||
181 | static inline int sdhci_next_sg(struct sdhci_host* host) | ||
182 | { | ||
183 | /* | ||
184 | * Skip to next SG entry. | ||
185 | */ | ||
186 | host->cur_sg++; | ||
187 | host->num_sg--; | ||
188 | |||
189 | /* | ||
190 | * Any entries left? | ||
191 | */ | ||
192 | if (host->num_sg > 0) { | ||
193 | host->offset = 0; | ||
194 | host->remain = host->cur_sg->length; | ||
195 | } | ||
196 | |||
197 | return host->num_sg; | ||
198 | } | ||
199 | |||
200 | static void sdhci_read_block_pio(struct sdhci_host *host) | 176 | static void sdhci_read_block_pio(struct sdhci_host *host) |
201 | { | 177 | { |
202 | int blksize, chunk_remain; | 178 | unsigned long flags; |
203 | u32 data; | 179 | size_t blksize, len, chunk; |
204 | char *buffer; | 180 | u32 scratch; |
205 | int size; | 181 | u8 *buf; |
206 | 182 | ||
207 | DBG("PIO reading\n"); | 183 | DBG("PIO reading\n"); |
208 | 184 | ||
209 | blksize = host->data->blksz; | 185 | blksize = host->data->blksz; |
210 | chunk_remain = 0; | 186 | chunk = 0; |
211 | data = 0; | ||
212 | 187 | ||
213 | buffer = sdhci_sg_to_buffer(host) + host->offset; | 188 | local_irq_save(flags); |
214 | 189 | ||
215 | while (blksize) { | 190 | while (blksize) { |
216 | if (chunk_remain == 0) { | 191 | if (!sg_miter_next(&host->sg_miter)) |
217 | data = readl(host->ioaddr + SDHCI_BUFFER); | 192 | BUG(); |
218 | chunk_remain = min(blksize, 4); | ||
219 | } | ||
220 | 193 | ||
221 | size = min(host->remain, chunk_remain); | 194 | len = min(host->sg_miter.length, blksize); |
222 | 195 | ||
223 | chunk_remain -= size; | 196 | blksize -= len; |
224 | blksize -= size; | 197 | host->sg_miter.consumed = len; |
225 | host->offset += size; | ||
226 | host->remain -= size; | ||
227 | 198 | ||
228 | while (size) { | 199 | buf = host->sg_miter.addr; |
229 | *buffer = data & 0xFF; | ||
230 | buffer++; | ||
231 | data >>= 8; | ||
232 | size--; | ||
233 | } | ||
234 | 200 | ||
235 | if (host->remain == 0) { | 201 | while (len) { |
236 | if (sdhci_next_sg(host) == 0) { | 202 | if (chunk == 0) { |
237 | BUG_ON(blksize != 0); | 203 | scratch = readl(host->ioaddr + SDHCI_BUFFER); |
238 | return; | 204 | chunk = 4; |
239 | } | 205 | } |
240 | buffer = sdhci_sg_to_buffer(host); | 206 | |
207 | *buf = scratch & 0xFF; | ||
208 | |||
209 | buf++; | ||
210 | scratch >>= 8; | ||
211 | chunk--; | ||
212 | len--; | ||
241 | } | 213 | } |
242 | } | 214 | } |
215 | |||
216 | sg_miter_stop(&host->sg_miter); | ||
217 | |||
218 | local_irq_restore(flags); | ||
243 | } | 219 | } |
244 | 220 | ||
245 | static void sdhci_write_block_pio(struct sdhci_host *host) | 221 | static void sdhci_write_block_pio(struct sdhci_host *host) |
246 | { | 222 | { |
247 | int blksize, chunk_remain; | 223 | unsigned long flags; |
248 | u32 data; | 224 | size_t blksize, len, chunk; |
249 | char *buffer; | 225 | u32 scratch; |
250 | int bytes, size; | 226 | u8 *buf; |
251 | 227 | ||
252 | DBG("PIO writing\n"); | 228 | DBG("PIO writing\n"); |
253 | 229 | ||
254 | blksize = host->data->blksz; | 230 | blksize = host->data->blksz; |
255 | chunk_remain = 4; | 231 | chunk = 0; |
256 | data = 0; | 232 | scratch = 0; |
257 | 233 | ||
258 | bytes = 0; | 234 | local_irq_save(flags); |
259 | buffer = sdhci_sg_to_buffer(host) + host->offset; | ||
260 | 235 | ||
261 | while (blksize) { | 236 | while (blksize) { |
262 | size = min(host->remain, chunk_remain); | 237 | if (!sg_miter_next(&host->sg_miter)) |
263 | 238 | BUG(); | |
264 | chunk_remain -= size; | ||
265 | blksize -= size; | ||
266 | host->offset += size; | ||
267 | host->remain -= size; | ||
268 | |||
269 | while (size) { | ||
270 | data >>= 8; | ||
271 | data |= (u32)*buffer << 24; | ||
272 | buffer++; | ||
273 | size--; | ||
274 | } | ||
275 | 239 | ||
276 | if (chunk_remain == 0) { | 240 | len = min(host->sg_miter.length, blksize); |
277 | writel(data, host->ioaddr + SDHCI_BUFFER); | 241 | |
278 | chunk_remain = min(blksize, 4); | 242 | blksize -= len; |
279 | } | 243 | host->sg_miter.consumed = len; |
244 | |||
245 | buf = host->sg_miter.addr; | ||
280 | 246 | ||
281 | if (host->remain == 0) { | 247 | while (len) { |
282 | if (sdhci_next_sg(host) == 0) { | 248 | scratch |= (u32)*buf << (chunk * 8); |
283 | BUG_ON(blksize != 0); | 249 | |
284 | return; | 250 | buf++; |
251 | chunk++; | ||
252 | len--; | ||
253 | |||
254 | if ((chunk == 4) || ((len == 0) && (blksize == 0))) { | ||
255 | writel(scratch, host->ioaddr + SDHCI_BUFFER); | ||
256 | chunk = 0; | ||
257 | scratch = 0; | ||
285 | } | 258 | } |
286 | buffer = sdhci_sg_to_buffer(host); | ||
287 | } | 259 | } |
288 | } | 260 | } |
261 | |||
262 | sg_miter_stop(&host->sg_miter); | ||
263 | |||
264 | local_irq_restore(flags); | ||
289 | } | 265 | } |
290 | 266 | ||
291 | static void sdhci_transfer_pio(struct sdhci_host *host) | 267 | static void sdhci_transfer_pio(struct sdhci_host *host) |
@@ -294,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host) | |||
294 | 270 | ||
295 | BUG_ON(!host->data); | 271 | BUG_ON(!host->data); |
296 | 272 | ||
297 | if (host->num_sg == 0) | 273 | if (host->blocks == 0) |
298 | return; | 274 | return; |
299 | 275 | ||
300 | if (host->data->flags & MMC_DATA_READ) | 276 | if (host->data->flags & MMC_DATA_READ) |
@@ -308,7 +284,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host) | |||
308 | else | 284 | else |
309 | sdhci_write_block_pio(host); | 285 | sdhci_write_block_pio(host); |
310 | 286 | ||
311 | if (host->num_sg == 0) | 287 | host->blocks--; |
288 | if (host->blocks == 0) | ||
312 | break; | 289 | break; |
313 | } | 290 | } |
314 | 291 | ||
@@ -389,6 +366,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, | |||
389 | if (offset) { | 366 | if (offset) { |
390 | if (data->flags & MMC_DATA_WRITE) { | 367 | if (data->flags & MMC_DATA_WRITE) { |
391 | buffer = sdhci_kmap_atomic(sg, &flags); | 368 | buffer = sdhci_kmap_atomic(sg, &flags); |
369 | WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); | ||
392 | memcpy(align, buffer, offset); | 370 | memcpy(align, buffer, offset); |
393 | sdhci_kunmap_atomic(buffer, &flags); | 371 | sdhci_kunmap_atomic(buffer, &flags); |
394 | } | 372 | } |
@@ -510,6 +488,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, | |||
510 | size = 4 - (sg_dma_address(sg) & 0x3); | 488 | size = 4 - (sg_dma_address(sg) & 0x3); |
511 | 489 | ||
512 | buffer = sdhci_kmap_atomic(sg, &flags); | 490 | buffer = sdhci_kmap_atomic(sg, &flags); |
491 | WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); | ||
513 | memcpy(buffer, align, size); | 492 | memcpy(buffer, align, size); |
514 | sdhci_kunmap_atomic(buffer, &flags); | 493 | sdhci_kunmap_atomic(buffer, &flags); |
515 | 494 | ||
@@ -687,7 +666,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
687 | WARN_ON(1); | 666 | WARN_ON(1); |
688 | host->flags &= ~SDHCI_USE_DMA; | 667 | host->flags &= ~SDHCI_USE_DMA; |
689 | } else { | 668 | } else { |
690 | WARN_ON(count != 1); | 669 | WARN_ON(sg_cnt != 1); |
691 | writel(sg_dma_address(data->sg), | 670 | writel(sg_dma_address(data->sg), |
692 | host->ioaddr + SDHCI_DMA_ADDRESS); | 671 | host->ioaddr + SDHCI_DMA_ADDRESS); |
693 | } | 672 | } |
@@ -711,11 +690,9 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | |||
711 | } | 690 | } |
712 | 691 | ||
713 | if (!(host->flags & SDHCI_REQ_USE_DMA)) { | 692 | if (!(host->flags & SDHCI_REQ_USE_DMA)) { |
714 | host->cur_sg = data->sg; | 693 | sg_miter_start(&host->sg_miter, |
715 | host->num_sg = data->sg_len; | 694 | data->sg, data->sg_len, SG_MITER_ATOMIC); |
716 | 695 | host->blocks = data->blocks; | |
717 | host->offset = 0; | ||
718 | host->remain = host->cur_sg->length; | ||
719 | } | 696 | } |
720 | 697 | ||
721 | /* We do not handle DMA boundaries, so set it to max (512 KiB) */ | 698 | /* We do not handle DMA boundaries, so set it to max (512 KiB) */ |
@@ -1581,9 +1558,15 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1581 | } | 1558 | } |
1582 | } | 1559 | } |
1583 | 1560 | ||
1584 | /* XXX: Hack to get MMC layer to avoid highmem */ | 1561 | /* |
1585 | if (!(host->flags & SDHCI_USE_DMA)) | 1562 | * If we use DMA, then it's up to the caller to set the DMA |
1586 | mmc_dev(host->mmc)->dma_mask = NULL; | 1563 | * mask, but PIO does not need the hw shim so we set a new |
1564 | * mask here in that case. | ||
1565 | */ | ||
1566 | if (!(host->flags & SDHCI_USE_DMA)) { | ||
1567 | host->dma_mask = DMA_BIT_MASK(64); | ||
1568 | mmc_dev(host->mmc)->dma_mask = &host->dma_mask; | ||
1569 | } | ||
1587 | 1570 | ||
1588 | host->max_clk = | 1571 | host->max_clk = |
1589 | (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; | 1572 | (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 5bb355281765..a06bf8b89343 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -212,6 +212,7 @@ struct sdhci_host { | |||
212 | 212 | ||
213 | /* Internal data */ | 213 | /* Internal data */ |
214 | struct mmc_host *mmc; /* MMC structure */ | 214 | struct mmc_host *mmc; /* MMC structure */ |
215 | u64 dma_mask; /* custom DMA mask */ | ||
215 | 216 | ||
216 | #ifdef CONFIG_LEDS_CLASS | 217 | #ifdef CONFIG_LEDS_CLASS |
217 | struct led_classdev led; /* LED control */ | 218 | struct led_classdev led; /* LED control */ |
@@ -238,10 +239,8 @@ struct sdhci_host { | |||
238 | struct mmc_data *data; /* Current data request */ | 239 | struct mmc_data *data; /* Current data request */ |
239 | unsigned int data_early:1; /* Data finished before cmd */ | 240 | unsigned int data_early:1; /* Data finished before cmd */ |
240 | 241 | ||
241 | struct scatterlist *cur_sg; /* We're working on this */ | 242 | struct sg_mapping_iter sg_miter; /* SG state for PIO */ |
242 | int num_sg; /* Entries left */ | 243 | unsigned int blocks; /* remaining PIO blocks */ |
243 | int offset; /* Offset into current sg */ | ||
244 | int remain; /* Bytes left in current */ | ||
245 | 244 | ||
246 | int sg_count; /* Mapped sg entries */ | 245 | int sg_count; /* Mapped sg entries */ |
247 | 246 | ||
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index cb663ef245d5..fc8529bedfdf 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c | |||
@@ -20,9 +20,11 @@ | |||
20 | 20 | ||
21 | #include <linux/mtd/nand.h> | 21 | #include <linux/mtd/nand.h> |
22 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
23 | #include <linux/gpio.h> | ||
23 | 24 | ||
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
27 | #include <asm/mach-types.h> | ||
26 | 28 | ||
27 | #include <asm/arch/hardware.h> | 29 | #include <asm/arch/hardware.h> |
28 | #include <asm/arch/pxa-regs.h> | 30 | #include <asm/arch/pxa-regs.h> |
@@ -30,20 +32,6 @@ | |||
30 | #define GPIO_NAND_CS (11) | 32 | #define GPIO_NAND_CS (11) |
31 | #define GPIO_NAND_RB (89) | 33 | #define GPIO_NAND_RB (89) |
32 | 34 | ||
33 | /* This macro needed to ensure in-order operation of GPIO and local | ||
34 | * bus. Without both asm command and dummy uncached read there're | ||
35 | * states when NAND access is broken. I've looked for such macro(s) in | ||
36 | * include/asm-arm but found nothing approptiate. | ||
37 | * dmac_clean_range is close, but is makes cache invalidation | ||
38 | * unnecessary here and it cannot be used in module | ||
39 | */ | ||
40 | #define DRAIN_WB() \ | ||
41 | do { \ | ||
42 | unsigned char dummy; \ | ||
43 | asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \ | ||
44 | dummy=*((unsigned char*)UNCACHED_ADDR); \ | ||
45 | } while(0) | ||
46 | |||
47 | /* MTD structure for CM-X270 board */ | 35 | /* MTD structure for CM-X270 board */ |
48 | static struct mtd_info *cmx270_nand_mtd; | 36 | static struct mtd_info *cmx270_nand_mtd; |
49 | 37 | ||
@@ -103,14 +91,14 @@ static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | |||
103 | 91 | ||
104 | static inline void nand_cs_on(void) | 92 | static inline void nand_cs_on(void) |
105 | { | 93 | { |
106 | GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); | 94 | gpio_set_value(GPIO_NAND_CS, 0); |
107 | } | 95 | } |
108 | 96 | ||
109 | static void nand_cs_off(void) | 97 | static void nand_cs_off(void) |
110 | { | 98 | { |
111 | DRAIN_WB(); | 99 | dsb(); |
112 | 100 | ||
113 | GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); | 101 | gpio_set_value(GPIO_NAND_CS, 1); |
114 | } | 102 | } |
115 | 103 | ||
116 | /* | 104 | /* |
@@ -122,7 +110,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, | |||
122 | struct nand_chip* this = mtd->priv; | 110 | struct nand_chip* this = mtd->priv; |
123 | unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; | 111 | unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; |
124 | 112 | ||
125 | DRAIN_WB(); | 113 | dsb(); |
126 | 114 | ||
127 | if (ctrl & NAND_CTRL_CHANGE) { | 115 | if (ctrl & NAND_CTRL_CHANGE) { |
128 | if ( ctrl & NAND_ALE ) | 116 | if ( ctrl & NAND_ALE ) |
@@ -139,12 +127,12 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, | |||
139 | nand_cs_off(); | 127 | nand_cs_off(); |
140 | } | 128 | } |
141 | 129 | ||
142 | DRAIN_WB(); | 130 | dsb(); |
143 | this->IO_ADDR_W = (void __iomem*)nandaddr; | 131 | this->IO_ADDR_W = (void __iomem*)nandaddr; |
144 | if (dat != NAND_CMD_NONE) | 132 | if (dat != NAND_CMD_NONE) |
145 | writel((dat << 16), this->IO_ADDR_W); | 133 | writel((dat << 16), this->IO_ADDR_W); |
146 | 134 | ||
147 | DRAIN_WB(); | 135 | dsb(); |
148 | } | 136 | } |
149 | 137 | ||
150 | /* | 138 | /* |
@@ -152,9 +140,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, | |||
152 | */ | 140 | */ |
153 | static int cmx270_device_ready(struct mtd_info *mtd) | 141 | static int cmx270_device_ready(struct mtd_info *mtd) |
154 | { | 142 | { |
155 | DRAIN_WB(); | 143 | dsb(); |
156 | 144 | ||
157 | return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB)); | 145 | return (gpio_get_value(GPIO_NAND_RB)); |
158 | } | 146 | } |
159 | 147 | ||
160 | /* | 148 | /* |
@@ -168,20 +156,40 @@ static int cmx270_init(void) | |||
168 | int mtd_parts_nb = 0; | 156 | int mtd_parts_nb = 0; |
169 | int ret; | 157 | int ret; |
170 | 158 | ||
159 | if (!machine_is_armcore()) | ||
160 | return -ENODEV; | ||
161 | |||
162 | ret = gpio_request(GPIO_NAND_CS, "NAND CS"); | ||
163 | if (ret) { | ||
164 | pr_warning("CM-X270: failed to request NAND CS gpio\n"); | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | gpio_direction_output(GPIO_NAND_CS, 1); | ||
169 | |||
170 | ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); | ||
171 | if (ret) { | ||
172 | pr_warning("CM-X270: failed to request NAND R/B gpio\n"); | ||
173 | goto err_gpio_request; | ||
174 | } | ||
175 | |||
176 | gpio_direction_input(GPIO_NAND_RB); | ||
177 | |||
171 | /* Allocate memory for MTD device structure and private data */ | 178 | /* Allocate memory for MTD device structure and private data */ |
172 | cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + | 179 | cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + |
173 | sizeof(struct nand_chip), | 180 | sizeof(struct nand_chip), |
174 | GFP_KERNEL); | 181 | GFP_KERNEL); |
175 | if (!cmx270_nand_mtd) { | 182 | if (!cmx270_nand_mtd) { |
176 | printk("Unable to allocate CM-X270 NAND MTD device structure.\n"); | 183 | pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n"); |
177 | return -ENOMEM; | 184 | ret = -ENOMEM; |
185 | goto err_kzalloc; | ||
178 | } | 186 | } |
179 | 187 | ||
180 | cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); | 188 | cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); |
181 | if (!cmx270_nand_io) { | 189 | if (!cmx270_nand_io) { |
182 | printk("Unable to ioremap NAND device\n"); | 190 | pr_debug("Unable to ioremap NAND device\n"); |
183 | ret = -EINVAL; | 191 | ret = -EINVAL; |
184 | goto err1; | 192 | goto err_ioremap; |
185 | } | 193 | } |
186 | 194 | ||
187 | /* Get pointer to private data */ | 195 | /* Get pointer to private data */ |
@@ -209,9 +217,9 @@ static int cmx270_init(void) | |||
209 | 217 | ||
210 | /* Scan to find existence of the device */ | 218 | /* Scan to find existence of the device */ |
211 | if (nand_scan (cmx270_nand_mtd, 1)) { | 219 | if (nand_scan (cmx270_nand_mtd, 1)) { |
212 | printk(KERN_NOTICE "No NAND device\n"); | 220 | pr_notice("No NAND device\n"); |
213 | ret = -ENXIO; | 221 | ret = -ENXIO; |
214 | goto err2; | 222 | goto err_scan; |
215 | } | 223 | } |
216 | 224 | ||
217 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 225 | #ifdef CONFIG_MTD_CMDLINE_PARTS |
@@ -229,18 +237,22 @@ static int cmx270_init(void) | |||
229 | } | 237 | } |
230 | 238 | ||
231 | /* Register the partitions */ | 239 | /* Register the partitions */ |
232 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | 240 | pr_notice("Using %s partition definition\n", part_type); |
233 | ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); | 241 | ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); |
234 | if (ret) | 242 | if (ret) |
235 | goto err2; | 243 | goto err_scan; |
236 | 244 | ||
237 | /* Return happy */ | 245 | /* Return happy */ |
238 | return 0; | 246 | return 0; |
239 | 247 | ||
240 | err2: | 248 | err_scan: |
241 | iounmap(cmx270_nand_io); | 249 | iounmap(cmx270_nand_io); |
242 | err1: | 250 | err_ioremap: |
243 | kfree(cmx270_nand_mtd); | 251 | kfree(cmx270_nand_mtd); |
252 | err_kzalloc: | ||
253 | gpio_free(GPIO_NAND_RB); | ||
254 | err_gpio_request: | ||
255 | gpio_free(GPIO_NAND_CS); | ||
244 | 256 | ||
245 | return ret; | 257 | return ret; |
246 | 258 | ||
@@ -255,6 +267,9 @@ static void cmx270_cleanup(void) | |||
255 | /* Release resources, unregister device */ | 267 | /* Release resources, unregister device */ |
256 | nand_release(cmx270_nand_mtd); | 268 | nand_release(cmx270_nand_mtd); |
257 | 269 | ||
270 | gpio_free(GPIO_NAND_RB); | ||
271 | gpio_free(GPIO_NAND_CS); | ||
272 | |||
258 | iounmap(cmx270_nand_io); | 273 | iounmap(cmx270_nand_io); |
259 | 274 | ||
260 | /* Free the MTD device structure */ | 275 | /* Free the MTD device structure */ |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index f2051b209da2..2040965d7724 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -308,7 +308,7 @@ static void smc_reset(struct net_device *dev) | |||
308 | * can't handle it then there will be no recovery except for | 308 | * can't handle it then there will be no recovery except for |
309 | * a hard reset or power cycle | 309 | * a hard reset or power cycle |
310 | */ | 310 | */ |
311 | if (nowait) | 311 | if (lp->cfg.flags & SMC91X_NOWAIT) |
312 | cfg |= CONFIG_NO_WAIT; | 312 | cfg |= CONFIG_NO_WAIT; |
313 | 313 | ||
314 | /* | 314 | /* |
@@ -1939,8 +1939,11 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr, | |||
1939 | if (retval) | 1939 | if (retval) |
1940 | goto err_out; | 1940 | goto err_out; |
1941 | 1941 | ||
1942 | #ifdef SMC_USE_PXA_DMA | 1942 | #ifdef CONFIG_ARCH_PXA |
1943 | { | 1943 | # ifdef SMC_USE_PXA_DMA |
1944 | lp->cfg.flags |= SMC91X_USE_DMA; | ||
1945 | # endif | ||
1946 | if (lp->cfg.flags & SMC91X_USE_DMA) { | ||
1944 | int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, | 1947 | int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, |
1945 | smc_pxa_dma_irq, NULL); | 1948 | smc_pxa_dma_irq, NULL); |
1946 | if (dma >= 0) | 1949 | if (dma >= 0) |
@@ -1980,7 +1983,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr, | |||
1980 | } | 1983 | } |
1981 | 1984 | ||
1982 | err_out: | 1985 | err_out: |
1983 | #ifdef SMC_USE_PXA_DMA | 1986 | #ifdef CONFIG_ARCH_PXA |
1984 | if (retval && dev->dma != (unsigned char)-1) | 1987 | if (retval && dev->dma != (unsigned char)-1) |
1985 | pxa_free_dma(dev->dma); | 1988 | pxa_free_dma(dev->dma); |
1986 | #endif | 1989 | #endif |
@@ -2050,9 +2053,11 @@ static int smc_enable_device(struct platform_device *pdev) | |||
2050 | return 0; | 2053 | return 0; |
2051 | } | 2054 | } |
2052 | 2055 | ||
2053 | static int smc_request_attrib(struct platform_device *pdev) | 2056 | static int smc_request_attrib(struct platform_device *pdev, |
2057 | struct net_device *ndev) | ||
2054 | { | 2058 | { |
2055 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); | 2059 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); |
2060 | struct smc_local *lp = netdev_priv(ndev); | ||
2056 | 2061 | ||
2057 | if (!res) | 2062 | if (!res) |
2058 | return 0; | 2063 | return 0; |
@@ -2063,9 +2068,11 @@ static int smc_request_attrib(struct platform_device *pdev) | |||
2063 | return 0; | 2068 | return 0; |
2064 | } | 2069 | } |
2065 | 2070 | ||
2066 | static void smc_release_attrib(struct platform_device *pdev) | 2071 | static void smc_release_attrib(struct platform_device *pdev, |
2072 | struct net_device *ndev) | ||
2067 | { | 2073 | { |
2068 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); | 2074 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); |
2075 | struct smc_local *lp = netdev_priv(ndev); | ||
2069 | 2076 | ||
2070 | if (res) | 2077 | if (res) |
2071 | release_mem_region(res->start, ATTRIB_SIZE); | 2078 | release_mem_region(res->start, ATTRIB_SIZE); |
@@ -2123,27 +2130,14 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2123 | struct net_device *ndev; | 2130 | struct net_device *ndev; |
2124 | struct resource *res, *ires; | 2131 | struct resource *res, *ires; |
2125 | unsigned int __iomem *addr; | 2132 | unsigned int __iomem *addr; |
2133 | unsigned long irq_flags = SMC_IRQ_FLAGS; | ||
2126 | int ret; | 2134 | int ret; |
2127 | 2135 | ||
2128 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); | ||
2129 | if (!res) | ||
2130 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2131 | if (!res) { | ||
2132 | ret = -ENODEV; | ||
2133 | goto out; | ||
2134 | } | ||
2135 | |||
2136 | |||
2137 | if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { | ||
2138 | ret = -EBUSY; | ||
2139 | goto out; | ||
2140 | } | ||
2141 | |||
2142 | ndev = alloc_etherdev(sizeof(struct smc_local)); | 2136 | ndev = alloc_etherdev(sizeof(struct smc_local)); |
2143 | if (!ndev) { | 2137 | if (!ndev) { |
2144 | printk("%s: could not allocate device.\n", CARDNAME); | 2138 | printk("%s: could not allocate device.\n", CARDNAME); |
2145 | ret = -ENOMEM; | 2139 | ret = -ENOMEM; |
2146 | goto out_release_io; | 2140 | goto out; |
2147 | } | 2141 | } |
2148 | SET_NETDEV_DEV(ndev, &pdev->dev); | 2142 | SET_NETDEV_DEV(ndev, &pdev->dev); |
2149 | 2143 | ||
@@ -2152,37 +2146,47 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2152 | */ | 2146 | */ |
2153 | 2147 | ||
2154 | lp = netdev_priv(ndev); | 2148 | lp = netdev_priv(ndev); |
2155 | lp->cfg.irq_flags = SMC_IRQ_FLAGS; | ||
2156 | 2149 | ||
2157 | #ifdef SMC_DYNAMIC_BUS_CONFIG | 2150 | if (pd) { |
2158 | if (pd) | ||
2159 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); | 2151 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); |
2160 | else { | 2152 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); |
2161 | lp->cfg.flags = SMC91X_USE_8BIT; | 2153 | } else { |
2162 | lp->cfg.flags |= SMC91X_USE_16BIT; | 2154 | lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; |
2163 | lp->cfg.flags |= SMC91X_USE_32BIT; | 2155 | lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; |
2156 | lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; | ||
2157 | lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0; | ||
2164 | } | 2158 | } |
2165 | 2159 | ||
2166 | lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT); | ||
2167 | lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT); | ||
2168 | lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT); | ||
2169 | #endif | ||
2170 | |||
2171 | ndev->dma = (unsigned char)-1; | 2160 | ndev->dma = (unsigned char)-1; |
2172 | 2161 | ||
2162 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); | ||
2163 | if (!res) | ||
2164 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2165 | if (!res) { | ||
2166 | ret = -ENODEV; | ||
2167 | goto out_free_netdev; | ||
2168 | } | ||
2169 | |||
2170 | |||
2171 | if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { | ||
2172 | ret = -EBUSY; | ||
2173 | goto out_free_netdev; | ||
2174 | } | ||
2175 | |||
2173 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 2176 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
2174 | if (!ires) { | 2177 | if (!ires) { |
2175 | ret = -ENODEV; | 2178 | ret = -ENODEV; |
2176 | goto out_free_netdev; | 2179 | goto out_release_io; |
2177 | } | 2180 | } |
2178 | 2181 | ||
2179 | ndev->irq = ires->start; | 2182 | ndev->irq = ires->start; |
2180 | if (SMC_IRQ_FLAGS == -1) | ||
2181 | lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK; | ||
2182 | 2183 | ||
2183 | ret = smc_request_attrib(pdev); | 2184 | if (ires->flags & IRQF_TRIGGER_MASK) |
2185 | irq_flags = ires->flags & IRQF_TRIGGER_MASK; | ||
2186 | |||
2187 | ret = smc_request_attrib(pdev, ndev); | ||
2184 | if (ret) | 2188 | if (ret) |
2185 | goto out_free_netdev; | 2189 | goto out_release_io; |
2186 | #if defined(CONFIG_SA1100_ASSABET) | 2190 | #if defined(CONFIG_SA1100_ASSABET) |
2187 | NCR_0 |= NCR_ENET_OSC_EN; | 2191 | NCR_0 |= NCR_ENET_OSC_EN; |
2188 | #endif | 2192 | #endif |
@@ -2197,7 +2201,7 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2197 | goto out_release_attrib; | 2201 | goto out_release_attrib; |
2198 | } | 2202 | } |
2199 | 2203 | ||
2200 | #ifdef SMC_USE_PXA_DMA | 2204 | #ifdef CONFIG_ARCH_PXA |
2201 | { | 2205 | { |
2202 | struct smc_local *lp = netdev_priv(ndev); | 2206 | struct smc_local *lp = netdev_priv(ndev); |
2203 | lp->device = &pdev->dev; | 2207 | lp->device = &pdev->dev; |
@@ -2205,7 +2209,7 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2205 | } | 2209 | } |
2206 | #endif | 2210 | #endif |
2207 | 2211 | ||
2208 | ret = smc_probe(ndev, addr, lp->cfg.irq_flags); | 2212 | ret = smc_probe(ndev, addr, irq_flags); |
2209 | if (ret != 0) | 2213 | if (ret != 0) |
2210 | goto out_iounmap; | 2214 | goto out_iounmap; |
2211 | 2215 | ||
@@ -2217,11 +2221,11 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2217 | platform_set_drvdata(pdev, NULL); | 2221 | platform_set_drvdata(pdev, NULL); |
2218 | iounmap(addr); | 2222 | iounmap(addr); |
2219 | out_release_attrib: | 2223 | out_release_attrib: |
2220 | smc_release_attrib(pdev); | 2224 | smc_release_attrib(pdev, ndev); |
2221 | out_free_netdev: | ||
2222 | free_netdev(ndev); | ||
2223 | out_release_io: | 2225 | out_release_io: |
2224 | release_mem_region(res->start, SMC_IO_EXTENT); | 2226 | release_mem_region(res->start, SMC_IO_EXTENT); |
2227 | out_free_netdev: | ||
2228 | free_netdev(ndev); | ||
2225 | out: | 2229 | out: |
2226 | printk("%s: not found (%d).\n", CARDNAME, ret); | 2230 | printk("%s: not found (%d).\n", CARDNAME, ret); |
2227 | 2231 | ||
@@ -2240,14 +2244,14 @@ static int smc_drv_remove(struct platform_device *pdev) | |||
2240 | 2244 | ||
2241 | free_irq(ndev->irq, ndev); | 2245 | free_irq(ndev->irq, ndev); |
2242 | 2246 | ||
2243 | #ifdef SMC_USE_PXA_DMA | 2247 | #ifdef CONFIG_ARCH_PXA |
2244 | if (ndev->dma != (unsigned char)-1) | 2248 | if (ndev->dma != (unsigned char)-1) |
2245 | pxa_free_dma(ndev->dma); | 2249 | pxa_free_dma(ndev->dma); |
2246 | #endif | 2250 | #endif |
2247 | iounmap(lp->base); | 2251 | iounmap(lp->base); |
2248 | 2252 | ||
2249 | smc_release_datacs(pdev,ndev); | 2253 | smc_release_datacs(pdev,ndev); |
2250 | smc_release_attrib(pdev); | 2254 | smc_release_attrib(pdev,ndev); |
2251 | 2255 | ||
2252 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); | 2256 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); |
2253 | if (!res) | 2257 | if (!res) |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 8606818653f8..22209b6f1405 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -40,23 +40,46 @@ | |||
40 | * Define your architecture specific bus configuration parameters here. | 40 | * Define your architecture specific bus configuration parameters here. |
41 | */ | 41 | */ |
42 | 42 | ||
43 | #if defined(CONFIG_ARCH_LUBBOCK) | 43 | #if defined(CONFIG_ARCH_LUBBOCK) ||\ |
44 | defined(CONFIG_MACH_MAINSTONE) ||\ | ||
45 | defined(CONFIG_MACH_ZYLONITE) ||\ | ||
46 | defined(CONFIG_MACH_LITTLETON) | ||
44 | 47 | ||
45 | /* We can only do 16-bit reads and writes in the static memory space. */ | 48 | #include <asm/mach-types.h> |
46 | #define SMC_CAN_USE_8BIT 0 | 49 | |
50 | /* Now the bus width is specified in the platform data | ||
51 | * pretend here to support all I/O access types | ||
52 | */ | ||
53 | #define SMC_CAN_USE_8BIT 1 | ||
47 | #define SMC_CAN_USE_16BIT 1 | 54 | #define SMC_CAN_USE_16BIT 1 |
48 | #define SMC_CAN_USE_32BIT 0 | 55 | #define SMC_CAN_USE_32BIT 1 |
49 | #define SMC_NOWAIT 1 | 56 | #define SMC_NOWAIT 1 |
50 | 57 | ||
51 | /* The first two address lines aren't connected... */ | 58 | #define SMC_IO_SHIFT (lp->io_shift) |
52 | #define SMC_IO_SHIFT 2 | ||
53 | 59 | ||
60 | #define SMC_inb(a, r) readb((a) + (r)) | ||
54 | #define SMC_inw(a, r) readw((a) + (r)) | 61 | #define SMC_inw(a, r) readw((a) + (r)) |
55 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | 62 | #define SMC_inl(a, r) readl((a) + (r)) |
63 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
64 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
56 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | 65 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) |
57 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | 66 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) |
67 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
68 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
58 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 69 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
59 | 70 | ||
71 | /* We actually can't write halfwords properly if not word aligned */ | ||
72 | static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | ||
73 | { | ||
74 | if (machine_is_mainstone() && reg & 2) { | ||
75 | unsigned int v = val << 16; | ||
76 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; | ||
77 | writel(v, ioaddr + (reg & ~2)); | ||
78 | } else { | ||
79 | writew(val, ioaddr + reg); | ||
80 | } | ||
81 | } | ||
82 | |||
60 | #elif defined(CONFIG_BLACKFIN) | 83 | #elif defined(CONFIG_BLACKFIN) |
61 | 84 | ||
62 | #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH | 85 | #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH |
@@ -195,7 +218,6 @@ | |||
195 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | 218 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) |
196 | 219 | ||
197 | #elif defined(CONFIG_ARCH_INNOKOM) || \ | 220 | #elif defined(CONFIG_ARCH_INNOKOM) || \ |
198 | defined(CONFIG_MACH_MAINSTONE) || \ | ||
199 | defined(CONFIG_ARCH_PXA_IDP) || \ | 221 | defined(CONFIG_ARCH_PXA_IDP) || \ |
200 | defined(CONFIG_ARCH_RAMSES) || \ | 222 | defined(CONFIG_ARCH_RAMSES) || \ |
201 | defined(CONFIG_ARCH_PCM027) | 223 | defined(CONFIG_ARCH_PCM027) |
@@ -229,22 +251,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
229 | } | 251 | } |
230 | } | 252 | } |
231 | 253 | ||
232 | #elif defined(CONFIG_MACH_ZYLONITE) | ||
233 | |||
234 | #define SMC_CAN_USE_8BIT 1 | ||
235 | #define SMC_CAN_USE_16BIT 1 | ||
236 | #define SMC_CAN_USE_32BIT 0 | ||
237 | #define SMC_IO_SHIFT 0 | ||
238 | #define SMC_NOWAIT 1 | ||
239 | #define SMC_USE_PXA_DMA 1 | ||
240 | #define SMC_inb(a, r) readb((a) + (r)) | ||
241 | #define SMC_inw(a, r) readw((a) + (r)) | ||
242 | #define SMC_insw(a, r, p, l) insw((a) + (r), p, l) | ||
243 | #define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l) | ||
244 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
245 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
246 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
247 | |||
248 | #elif defined(CONFIG_ARCH_OMAP) | 254 | #elif defined(CONFIG_ARCH_OMAP) |
249 | 255 | ||
250 | /* We can only do 16-bit reads and writes in the static memory space. */ | 256 | /* We can only do 16-bit reads and writes in the static memory space. */ |
@@ -454,7 +460,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, | |||
454 | #define RPC_LSA_DEFAULT RPC_LED_100_10 | 460 | #define RPC_LSA_DEFAULT RPC_LED_100_10 |
455 | #define RPC_LSB_DEFAULT RPC_LED_TX_RX | 461 | #define RPC_LSB_DEFAULT RPC_LED_TX_RX |
456 | 462 | ||
457 | #define SMC_DYNAMIC_BUS_CONFIG | ||
458 | #endif | 463 | #endif |
459 | 464 | ||
460 | 465 | ||
@@ -493,7 +498,7 @@ struct smc_local { | |||
493 | 498 | ||
494 | spinlock_t lock; | 499 | spinlock_t lock; |
495 | 500 | ||
496 | #ifdef SMC_USE_PXA_DMA | 501 | #ifdef CONFIG_ARCH_PXA |
497 | /* DMA needs the physical address of the chip */ | 502 | /* DMA needs the physical address of the chip */ |
498 | u_long physaddr; | 503 | u_long physaddr; |
499 | struct device *device; | 504 | struct device *device; |
@@ -501,20 +506,17 @@ struct smc_local { | |||
501 | void __iomem *base; | 506 | void __iomem *base; |
502 | void __iomem *datacs; | 507 | void __iomem *datacs; |
503 | 508 | ||
509 | /* the low address lines on some platforms aren't connected... */ | ||
510 | int io_shift; | ||
511 | |||
504 | struct smc91x_platdata cfg; | 512 | struct smc91x_platdata cfg; |
505 | }; | 513 | }; |
506 | 514 | ||
507 | #ifdef SMC_DYNAMIC_BUS_CONFIG | 515 | #define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT) |
508 | #define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT) | 516 | #define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT) |
509 | #define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT) | 517 | #define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT) |
510 | #define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT) | ||
511 | #else | ||
512 | #define SMC_8BIT(p) SMC_CAN_USE_8BIT | ||
513 | #define SMC_16BIT(p) SMC_CAN_USE_16BIT | ||
514 | #define SMC_32BIT(p) SMC_CAN_USE_32BIT | ||
515 | #endif | ||
516 | 518 | ||
517 | #ifdef SMC_USE_PXA_DMA | 519 | #ifdef CONFIG_ARCH_PXA |
518 | /* | 520 | /* |
519 | * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is | 521 | * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is |
520 | * always happening in irq context so no need to worry about races. TX is | 522 | * always happening in irq context so no need to worry about races. TX is |
@@ -608,7 +610,7 @@ smc_pxa_dma_irq(int dma, void *dummy) | |||
608 | { | 610 | { |
609 | DCSR(dma) = 0; | 611 | DCSR(dma) = 0; |
610 | } | 612 | } |
611 | #endif /* SMC_USE_PXA_DMA */ | 613 | #endif /* CONFIG_ARCH_PXA */ |
612 | 614 | ||
613 | 615 | ||
614 | /* | 616 | /* |
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index e45402adac3f..e0f884034c9f 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig | |||
@@ -219,7 +219,8 @@ config PCMCIA_SA1111 | |||
219 | config PCMCIA_PXA2XX | 219 | config PCMCIA_PXA2XX |
220 | tristate "PXA2xx support" | 220 | tristate "PXA2xx support" |
221 | depends on ARM && ARCH_PXA && PCMCIA | 221 | depends on ARM && ARCH_PXA && PCMCIA |
222 | depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE | 222 | depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ |
223 | || MACH_ARMCORE || ARCH_PXA_PALM) | ||
223 | help | 224 | help |
224 | Say Y here to include support for the PXA2xx PCMCIA controller | 225 | Say Y here to include support for the PXA2xx PCMCIA controller |
225 | 226 | ||
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index 85c6cc931f97..269a9e913ba2 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -72,4 +72,5 @@ pxa2xx_cs-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock.o sa1111_generic.o | |||
72 | pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o | 72 | pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o |
73 | pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o | 73 | pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o |
74 | pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o | 74 | pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o |
75 | pxa2xx_cs-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o | ||
75 | 76 | ||
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c index f123fce65f2e..bb95db7d2b76 100644 --- a/drivers/pcmcia/pxa2xx_cm_x270.c +++ b/drivers/pcmcia/pxa2xx_cm_x270.c | |||
@@ -5,83 +5,60 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * Compulab Ltd., 2003, 2007 | 8 | * Compulab Ltd., 2003, 2007, 2008 |
9 | * Mike Rapoport <mike@compulab.co.il> | 9 | * Mike Rapoport <mike@compulab.co.il> |
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
16 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
17 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/gpio.h> | ||
18 | 17 | ||
19 | #include <pcmcia/ss.h> | ||
20 | #include <asm/hardware.h> | ||
21 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
22 | |||
23 | #include <asm/arch/pxa-regs.h> | 19 | #include <asm/arch/pxa-regs.h> |
24 | #include <asm/arch/pxa2xx-gpio.h> | ||
25 | #include <asm/arch/cm-x270.h> | ||
26 | 20 | ||
27 | #include "soc_common.h" | 21 | #include "soc_common.h" |
28 | 22 | ||
23 | #define GPIO_PCMCIA_S0_CD_VALID (84) | ||
24 | #define GPIO_PCMCIA_S0_RDYINT (82) | ||
25 | #define GPIO_PCMCIA_RESET (53) | ||
26 | |||
27 | #define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID) | ||
28 | #define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT) | ||
29 | |||
30 | |||
29 | static struct pcmcia_irqs irqs[] = { | 31 | static struct pcmcia_irqs irqs[] = { |
30 | { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" }, | 32 | { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" }, |
31 | { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" }, | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | 35 | static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) |
35 | { | 36 | { |
36 | GPSR(GPIO48_nPOE) = GPIO_bit(GPIO48_nPOE) | | 37 | int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset"); |
37 | GPIO_bit(GPIO49_nPWE) | | 38 | if (ret) |
38 | GPIO_bit(GPIO50_nPIOR) | | 39 | return ret; |
39 | GPIO_bit(GPIO51_nPIOW) | | 40 | gpio_direction_output(GPIO_PCMCIA_RESET, 0); |
40 | GPIO_bit(GPIO85_nPCE_1) | | 41 | |
41 | GPIO_bit(GPIO54_nPCE_2); | 42 | skt->irq = PCMCIA_S0_RDYINT; |
42 | 43 | ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | |
43 | pxa_gpio_mode(GPIO48_nPOE_MD); | 44 | if (!ret) |
44 | pxa_gpio_mode(GPIO49_nPWE_MD); | 45 | gpio_free(GPIO_PCMCIA_RESET); |
45 | pxa_gpio_mode(GPIO50_nPIOR_MD); | 46 | |
46 | pxa_gpio_mode(GPIO51_nPIOW_MD); | 47 | return ret; |
47 | pxa_gpio_mode(GPIO85_nPCE_1_MD); | ||
48 | pxa_gpio_mode(GPIO54_nPCE_2_MD); | ||
49 | pxa_gpio_mode(GPIO55_nPREG_MD); | ||
50 | pxa_gpio_mode(GPIO56_nPWAIT_MD); | ||
51 | pxa_gpio_mode(GPIO57_nIOIS16_MD); | ||
52 | |||
53 | /* Reset signal */ | ||
54 | pxa_gpio_mode(GPIO53_nPCE_2 | GPIO_OUT); | ||
55 | GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); | ||
56 | |||
57 | set_irq_type(PCMCIA_S0_CD_VALID, IRQ_TYPE_EDGE_BOTH); | ||
58 | set_irq_type(PCMCIA_S1_CD_VALID, IRQ_TYPE_EDGE_BOTH); | ||
59 | |||
60 | /* irq's for slots: */ | ||
61 | set_irq_type(PCMCIA_S0_RDYINT, IRQ_TYPE_EDGE_FALLING); | ||
62 | set_irq_type(PCMCIA_S1_RDYINT, IRQ_TYPE_EDGE_FALLING); | ||
63 | |||
64 | skt->irq = (skt->nr == 0) ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT; | ||
65 | return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); | ||
66 | } | 48 | } |
67 | 49 | ||
68 | static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt) | 50 | static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt) |
69 | { | 51 | { |
70 | soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); | 52 | soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); |
71 | 53 | gpio_free(GPIO_PCMCIA_RESET); | |
72 | set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_CD_VALID), IRQ_TYPE_NONE); | ||
73 | set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_CD_VALID), IRQ_TYPE_NONE); | ||
74 | |||
75 | set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_RDYINT), IRQ_TYPE_NONE); | ||
76 | set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_RDYINT), IRQ_TYPE_NONE); | ||
77 | } | 54 | } |
78 | 55 | ||
79 | 56 | ||
80 | static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | 57 | static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, |
81 | struct pcmcia_state *state) | 58 | struct pcmcia_state *state) |
82 | { | 59 | { |
83 | state->detect = (PCC_DETECT(skt->nr) == 0) ? 1 : 0; | 60 | state->detect = (gpio_get_value(GPIO_PCMCIA_S0_CD_VALID) == 0) ? 1 : 0; |
84 | state->ready = (PCC_READY(skt->nr) == 0) ? 0 : 1; | 61 | state->ready = (gpio_get_value(GPIO_PCMCIA_S0_RDYINT) == 0) ? 0 : 1; |
85 | state->bvd1 = 1; | 62 | state->bvd1 = 1; |
86 | state->bvd2 = 1; | 63 | state->bvd2 = 1; |
87 | state->vs_3v = 0; | 64 | state->vs_3v = 0; |
@@ -93,32 +70,16 @@ static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | |||
93 | static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | 70 | static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, |
94 | const socket_state_t *state) | 71 | const socket_state_t *state) |
95 | { | 72 | { |
96 | GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); | ||
97 | pxa_gpio_mode(GPIO49_nPWE | GPIO_OUT); | ||
98 | |||
99 | switch (skt->nr) { | 73 | switch (skt->nr) { |
100 | case 0: | 74 | case 0: |
101 | if (state->flags & SS_RESET) { | 75 | if (state->flags & SS_RESET) { |
102 | GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); | 76 | gpio_set_value(GPIO_PCMCIA_RESET, 1); |
103 | GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); | ||
104 | udelay(10); | ||
105 | GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); | ||
106 | GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); | ||
107 | } | ||
108 | break; | ||
109 | case 1: | ||
110 | if (state->flags & SS_RESET) { | ||
111 | GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); | ||
112 | GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); | ||
113 | udelay(10); | 77 | udelay(10); |
114 | GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); | 78 | gpio_set_value(GPIO_PCMCIA_RESET, 0); |
115 | GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); | ||
116 | } | 79 | } |
117 | break; | 80 | break; |
118 | } | 81 | } |
119 | 82 | ||
120 | pxa_gpio_mode(GPIO49_nPWE_MD); | ||
121 | |||
122 | return 0; | 83 | return 0; |
123 | } | 84 | } |
124 | 85 | ||
@@ -139,7 +100,7 @@ static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = { | |||
139 | .configure_socket = cmx270_pcmcia_configure_socket, | 100 | .configure_socket = cmx270_pcmcia_configure_socket, |
140 | .socket_init = cmx270_pcmcia_socket_init, | 101 | .socket_init = cmx270_pcmcia_socket_init, |
141 | .socket_suspend = cmx270_pcmcia_socket_suspend, | 102 | .socket_suspend = cmx270_pcmcia_socket_suspend, |
142 | .nr = 2, | 103 | .nr = 1, |
143 | }; | 104 | }; |
144 | 105 | ||
145 | static struct platform_device *cmx270_pcmcia_device; | 106 | static struct platform_device *cmx270_pcmcia_device; |
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c new file mode 100644 index 000000000000..4abde190c1f5 --- /dev/null +++ b/drivers/pcmcia/pxa2xx_palmtx.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * linux/drivers/pcmcia/pxa2xx_palmtx.c | ||
3 | * | ||
4 | * Driver for Palm T|X PCMCIA | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | |||
17 | #include <asm/mach-types.h> | ||
18 | |||
19 | #include <asm/arch/gpio.h> | ||
20 | #include <asm/arch/palmtx.h> | ||
21 | |||
22 | #include "soc_common.h" | ||
23 | |||
24 | static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | ||
25 | { | ||
26 | skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY); | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt, | ||
35 | struct pcmcia_state *state) | ||
36 | { | ||
37 | state->detect = 1; /* always inserted */ | ||
38 | state->ready = !!gpio_get_value(GPIO_NR_PALMTX_PCMCIA_READY); | ||
39 | state->bvd1 = 1; | ||
40 | state->bvd2 = 1; | ||
41 | state->wrprot = 0; | ||
42 | state->vs_3v = 1; | ||
43 | state->vs_Xv = 0; | ||
44 | } | ||
45 | |||
46 | static int | ||
47 | palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, | ||
48 | const socket_state_t *state) | ||
49 | { | ||
50 | gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1); | ||
51 | gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1); | ||
52 | gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET, | ||
53 | !!(state->flags & SS_RESET)); | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt) | ||
59 | { | ||
60 | } | ||
61 | |||
62 | static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | static struct pcmcia_low_level palmtx_pcmcia_ops = { | ||
67 | .owner = THIS_MODULE, | ||
68 | |||
69 | .first = 0, | ||
70 | .nr = 1, | ||
71 | |||
72 | .hw_init = palmtx_pcmcia_hw_init, | ||
73 | .hw_shutdown = palmtx_pcmcia_hw_shutdown, | ||
74 | |||
75 | .socket_state = palmtx_pcmcia_socket_state, | ||
76 | .configure_socket = palmtx_pcmcia_configure_socket, | ||
77 | |||
78 | .socket_init = palmtx_pcmcia_socket_init, | ||
79 | .socket_suspend = palmtx_pcmcia_socket_suspend, | ||
80 | }; | ||
81 | |||
82 | static struct platform_device *palmtx_pcmcia_device; | ||
83 | |||
84 | static int __init palmtx_pcmcia_init(void) | ||
85 | { | ||
86 | int ret; | ||
87 | |||
88 | if (!machine_is_palmtx()) | ||
89 | return -ENODEV; | ||
90 | |||
91 | palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); | ||
92 | if (!palmtx_pcmcia_device) | ||
93 | return -ENOMEM; | ||
94 | |||
95 | ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops, | ||
96 | sizeof(palmtx_pcmcia_ops)); | ||
97 | |||
98 | if (!ret) | ||
99 | ret = platform_device_add(palmtx_pcmcia_device); | ||
100 | |||
101 | if (ret) | ||
102 | platform_device_put(palmtx_pcmcia_device); | ||
103 | |||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | static void __exit palmtx_pcmcia_exit(void) | ||
108 | { | ||
109 | platform_device_unregister(palmtx_pcmcia_device); | ||
110 | } | ||
111 | |||
112 | fs_initcall(palmtx_pcmcia_init); | ||
113 | module_exit(palmtx_pcmcia_exit); | ||
114 | |||
115 | MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); | ||
116 | MODULE_DESCRIPTION("PCMCIA support for Palm T|X"); | ||
117 | MODULE_ALIAS("platform:pxa2xx-pcmcia"); | ||
118 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 58c806e9c58a..4d17d384578d 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
@@ -49,4 +49,10 @@ config BATTERY_OLPC | |||
49 | help | 49 | help |
50 | Say Y to enable support for the battery on the OLPC laptop. | 50 | Say Y to enable support for the battery on the OLPC laptop. |
51 | 51 | ||
52 | config BATTERY_PALMTX | ||
53 | tristate "Palm T|X battery" | ||
54 | depends on MACH_PALMTX | ||
55 | help | ||
56 | Say Y to enable support for the battery in Palm T|X. | ||
57 | |||
52 | endif # POWER_SUPPLY | 58 | endif # POWER_SUPPLY |
diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 6413ded5fe5f..6f43a54ee420 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile | |||
@@ -20,3 +20,4 @@ obj-$(CONFIG_APM_POWER) += apm_power.o | |||
20 | obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o | 20 | obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o |
21 | obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o | 21 | obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o |
22 | obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o | 22 | obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o |
23 | obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o | ||
diff --git a/drivers/power/palmtx_battery.c b/drivers/power/palmtx_battery.c new file mode 100644 index 000000000000..244bb273a637 --- /dev/null +++ b/drivers/power/palmtx_battery.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * linux/drivers/power/palmtx_battery.c | ||
3 | * | ||
4 | * Battery measurement code for Palm T|X Handheld computer | ||
5 | * | ||
6 | * based on tosa_battery.c | ||
7 | * | ||
8 | * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/power_supply.h> | ||
18 | #include <linux/wm97xx.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/gpio.h> | ||
23 | |||
24 | #include <asm/mach-types.h> | ||
25 | #include <asm/arch/palmtx.h> | ||
26 | |||
27 | static DEFINE_MUTEX(bat_lock); | ||
28 | static struct work_struct bat_work; | ||
29 | struct mutex work_lock; | ||
30 | int bat_status = POWER_SUPPLY_STATUS_DISCHARGING; | ||
31 | |||
32 | static unsigned long palmtx_read_bat(struct power_supply *bat_ps) | ||
33 | { | ||
34 | return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, | ||
35 | WM97XX_AUX_ID3) * 1000 / 414; | ||
36 | } | ||
37 | |||
38 | static unsigned long palmtx_read_temp(struct power_supply *bat_ps) | ||
39 | { | ||
40 | return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, | ||
41 | WM97XX_AUX_ID2); | ||
42 | } | ||
43 | |||
44 | static int palmtx_bat_get_property(struct power_supply *bat_ps, | ||
45 | enum power_supply_property psp, | ||
46 | union power_supply_propval *val) | ||
47 | { | ||
48 | switch (psp) { | ||
49 | case POWER_SUPPLY_PROP_STATUS: | ||
50 | val->intval = bat_status; | ||
51 | break; | ||
52 | case POWER_SUPPLY_PROP_TECHNOLOGY: | ||
53 | val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO; | ||
54 | break; | ||
55 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | ||
56 | val->intval = palmtx_read_bat(bat_ps); | ||
57 | break; | ||
58 | case POWER_SUPPLY_PROP_VOLTAGE_MAX: | ||
59 | case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: | ||
60 | val->intval = PALMTX_BAT_MAX_VOLTAGE; | ||
61 | break; | ||
62 | case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: | ||
63 | val->intval = PALMTX_BAT_MIN_VOLTAGE; | ||
64 | break; | ||
65 | case POWER_SUPPLY_PROP_TEMP: | ||
66 | val->intval = palmtx_read_temp(bat_ps); | ||
67 | break; | ||
68 | case POWER_SUPPLY_PROP_PRESENT: | ||
69 | val->intval = 1; | ||
70 | break; | ||
71 | default: | ||
72 | return -EINVAL; | ||
73 | } | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void palmtx_bat_external_power_changed(struct power_supply *bat_ps) | ||
78 | { | ||
79 | schedule_work(&bat_work); | ||
80 | } | ||
81 | |||
82 | static char *status_text[] = { | ||
83 | [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown", | ||
84 | [POWER_SUPPLY_STATUS_CHARGING] = "Charging", | ||
85 | [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging", | ||
86 | }; | ||
87 | |||
88 | static void palmtx_bat_update(struct power_supply *bat_ps) | ||
89 | { | ||
90 | int old_status = bat_status; | ||
91 | |||
92 | mutex_lock(&work_lock); | ||
93 | |||
94 | bat_status = gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT) ? | ||
95 | POWER_SUPPLY_STATUS_CHARGING : | ||
96 | POWER_SUPPLY_STATUS_DISCHARGING; | ||
97 | |||
98 | if (old_status != bat_status) { | ||
99 | pr_debug("%s %s -> %s\n", bat_ps->name, | ||
100 | status_text[old_status], | ||
101 | status_text[bat_status]); | ||
102 | power_supply_changed(bat_ps); | ||
103 | } | ||
104 | |||
105 | mutex_unlock(&work_lock); | ||
106 | } | ||
107 | |||
108 | static enum power_supply_property palmtx_bat_main_props[] = { | ||
109 | POWER_SUPPLY_PROP_STATUS, | ||
110 | POWER_SUPPLY_PROP_TECHNOLOGY, | ||
111 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | ||
112 | POWER_SUPPLY_PROP_VOLTAGE_MAX, | ||
113 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, | ||
114 | POWER_SUPPLY_PROP_TEMP, | ||
115 | POWER_SUPPLY_PROP_PRESENT, | ||
116 | }; | ||
117 | |||
118 | struct power_supply bat_ps = { | ||
119 | .name = "main-battery", | ||
120 | .type = POWER_SUPPLY_TYPE_BATTERY, | ||
121 | .properties = palmtx_bat_main_props, | ||
122 | .num_properties = ARRAY_SIZE(palmtx_bat_main_props), | ||
123 | .get_property = palmtx_bat_get_property, | ||
124 | .external_power_changed = palmtx_bat_external_power_changed, | ||
125 | .use_for_apm = 1, | ||
126 | }; | ||
127 | |||
128 | static void palmtx_bat_work(struct work_struct *work) | ||
129 | { | ||
130 | palmtx_bat_update(&bat_ps); | ||
131 | } | ||
132 | |||
133 | #ifdef CONFIG_PM | ||
134 | static int palmtx_bat_suspend(struct platform_device *dev, pm_message_t state) | ||
135 | { | ||
136 | flush_scheduled_work(); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int palmtx_bat_resume(struct platform_device *dev) | ||
141 | { | ||
142 | schedule_work(&bat_work); | ||
143 | return 0; | ||
144 | } | ||
145 | #else | ||
146 | #define palmtx_bat_suspend NULL | ||
147 | #define palmtx_bat_resume NULL | ||
148 | #endif | ||
149 | |||
150 | static int __devinit palmtx_bat_probe(struct platform_device *dev) | ||
151 | { | ||
152 | int ret = 0; | ||
153 | |||
154 | if (!machine_is_palmtx()) | ||
155 | return -ENODEV; | ||
156 | |||
157 | mutex_init(&work_lock); | ||
158 | |||
159 | INIT_WORK(&bat_work, palmtx_bat_work); | ||
160 | |||
161 | ret = power_supply_register(&dev->dev, &bat_ps); | ||
162 | if (!ret) | ||
163 | schedule_work(&bat_work); | ||
164 | |||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | static int __devexit palmtx_bat_remove(struct platform_device *dev) | ||
169 | { | ||
170 | power_supply_unregister(&bat_ps); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static struct platform_driver palmtx_bat_driver = { | ||
175 | .driver.name = "wm97xx-battery", | ||
176 | .driver.owner = THIS_MODULE, | ||
177 | .probe = palmtx_bat_probe, | ||
178 | .remove = __devexit_p(palmtx_bat_remove), | ||
179 | .suspend = palmtx_bat_suspend, | ||
180 | .resume = palmtx_bat_resume, | ||
181 | }; | ||
182 | |||
183 | static int __init palmtx_bat_init(void) | ||
184 | { | ||
185 | return platform_driver_register(&palmtx_bat_driver); | ||
186 | } | ||
187 | |||
188 | static void __exit palmtx_bat_exit(void) | ||
189 | { | ||
190 | platform_driver_unregister(&palmtx_bat_driver); | ||
191 | } | ||
192 | |||
193 | module_init(palmtx_bat_init); | ||
194 | module_exit(palmtx_bat_exit); | ||
195 | |||
196 | MODULE_LICENSE("GPL"); | ||
197 | MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); | ||
198 | MODULE_DESCRIPTION("Palm T|X battery driver"); | ||
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index f843c1383a4b..538552495d48 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj { | |||
84 | struct Scsi_Host *host; | 84 | struct Scsi_Host *host; |
85 | 85 | ||
86 | struct ide_atapi_pc *pc; /* Current packet command */ | 86 | struct ide_atapi_pc *pc; /* Current packet command */ |
87 | unsigned long flags; /* Status/Action flags */ | ||
88 | unsigned long transform; /* SCSI cmd translation layer */ | 87 | unsigned long transform; /* SCSI cmd translation layer */ |
89 | unsigned long log; /* log flags */ | 88 | unsigned long log; /* log flags */ |
90 | } idescsi_scsi_t; | 89 | } idescsi_scsi_t; |
@@ -126,23 +125,14 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive) | |||
126 | } | 125 | } |
127 | 126 | ||
128 | /* | 127 | /* |
129 | * Per ATAPI device status bits. | ||
130 | */ | ||
131 | #define IDESCSI_DRQ_INTERRUPT 0 /* DRQ interrupt device */ | ||
132 | |||
133 | /* | ||
134 | * ide-scsi requests. | ||
135 | */ | ||
136 | #define IDESCSI_PC_RQ 90 | ||
137 | |||
138 | /* | ||
139 | * PIO data transfer routine using the scatter gather table. | 128 | * PIO data transfer routine using the scatter gather table. |
140 | */ | 129 | */ |
141 | static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, | 130 | static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, |
142 | unsigned int bcount, int write) | 131 | unsigned int bcount, int write) |
143 | { | 132 | { |
144 | ide_hwif_t *hwif = drive->hwif; | 133 | ide_hwif_t *hwif = drive->hwif; |
145 | xfer_func_t *xf = write ? hwif->output_data : hwif->input_data; | 134 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
135 | xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data; | ||
146 | char *buf; | 136 | char *buf; |
147 | int count; | 137 | int count; |
148 | 138 | ||
@@ -228,7 +218,6 @@ static int idescsi_check_condition(ide_drive_t *drive, | |||
228 | rq->cmd_type = REQ_TYPE_SENSE; | 218 | rq->cmd_type = REQ_TYPE_SENSE; |
229 | rq->cmd_flags |= REQ_PREEMPT; | 219 | rq->cmd_flags |= REQ_PREEMPT; |
230 | pc->timeout = jiffies + WAIT_READY; | 220 | pc->timeout = jiffies + WAIT_READY; |
231 | pc->callback = ide_scsi_callback; | ||
232 | /* NOTE! Save the failed packet command in "rq->buffer" */ | 221 | /* NOTE! Save the failed packet command in "rq->buffer" */ |
233 | rq->buffer = (void *) failed_cmd->special; | 222 | rq->buffer = (void *) failed_cmd->special; |
234 | pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd; | 223 | pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd; |
@@ -237,6 +226,7 @@ static int idescsi_check_condition(ide_drive_t *drive, | |||
237 | ide_scsi_hex_dump(pc->c, 6); | 226 | ide_scsi_hex_dump(pc->c, 6); |
238 | } | 227 | } |
239 | rq->rq_disk = scsi->disk; | 228 | rq->rq_disk = scsi->disk; |
229 | memcpy(rq->cmd, pc->c, 12); | ||
240 | ide_do_drive_cmd(drive, rq); | 230 | ide_do_drive_cmd(drive, rq); |
241 | return 0; | 231 | return 0; |
242 | } | 232 | } |
@@ -246,10 +236,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) | |||
246 | { | 236 | { |
247 | ide_hwif_t *hwif = drive->hwif; | 237 | ide_hwif_t *hwif = drive->hwif; |
248 | 238 | ||
249 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) | 239 | if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT)) |
250 | /* force an abort */ | 240 | /* force an abort */ |
251 | hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, | 241 | hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE); |
252 | hwif->io_ports.command_addr); | ||
253 | 242 | ||
254 | rq->errors++; | 243 | rq->errors++; |
255 | 244 | ||
@@ -421,10 +410,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r | |||
421 | 410 | ||
422 | if (blk_sense_request(rq) || blk_special_request(rq)) { | 411 | if (blk_sense_request(rq) || blk_special_request(rq)) { |
423 | struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; | 412 | struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; |
424 | idescsi_scsi_t *scsi = drive_to_idescsi(drive); | ||
425 | |||
426 | if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) | ||
427 | pc->flags |= PC_FLAG_DRQ_INTERRUPT; | ||
428 | 413 | ||
429 | if (drive->using_dma && !idescsi_map_sg(drive, pc)) | 414 | if (drive->using_dma && !idescsi_map_sg(drive, pc)) |
430 | pc->flags |= PC_FLAG_DMA_OK; | 415 | pc->flags |= PC_FLAG_DMA_OK; |
@@ -460,11 +445,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; } | |||
460 | static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) | 445 | static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) |
461 | { | 446 | { |
462 | if (drive->id && (drive->id->config & 0x0060) == 0x20) | 447 | if (drive->id && (drive->id->config & 0x0060) == 0x20) |
463 | set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags); | 448 | set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags); |
464 | clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); | 449 | clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); |
465 | #if IDESCSI_DEBUG_LOG | 450 | #if IDESCSI_DEBUG_LOG |
466 | set_bit(IDESCSI_LOG_CMD, &scsi->log); | 451 | set_bit(IDESCSI_LOG_CMD, &scsi->log); |
467 | #endif /* IDESCSI_DEBUG_LOG */ | 452 | #endif /* IDESCSI_DEBUG_LOG */ |
453 | |||
454 | drive->pc_callback = ide_scsi_callback; | ||
455 | |||
468 | idescsi_add_settings(drive); | 456 | idescsi_add_settings(drive); |
469 | } | 457 | } |
470 | 458 | ||
@@ -616,7 +604,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd, | |||
616 | pc->scsi_cmd = cmd; | 604 | pc->scsi_cmd = cmd; |
617 | pc->done = done; | 605 | pc->done = done; |
618 | pc->timeout = jiffies + cmd->timeout_per_command; | 606 | pc->timeout = jiffies + cmd->timeout_per_command; |
619 | pc->callback = ide_scsi_callback; | ||
620 | 607 | ||
621 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { | 608 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { |
622 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); | 609 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); |
@@ -631,6 +618,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd, | |||
631 | rq->special = (char *) pc; | 618 | rq->special = (char *) pc; |
632 | rq->cmd_type = REQ_TYPE_SPECIAL; | 619 | rq->cmd_type = REQ_TYPE_SPECIAL; |
633 | spin_unlock_irq(host->host_lock); | 620 | spin_unlock_irq(host->host_lock); |
621 | memcpy(rq->cmd, pc->c, 12); | ||
634 | blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL); | 622 | blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL); |
635 | spin_lock_irq(host->host_lock); | 623 | spin_lock_irq(host->host_lock); |
636 | return 0; | 624 | return 0; |
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index 93e407ee08b9..1ff80de177db 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c | |||
@@ -201,6 +201,10 @@ static void cpm_uart_int_tx(struct uart_port *port) | |||
201 | cpm_uart_tx_pump(port); | 201 | cpm_uart_tx_pump(port); |
202 | } | 202 | } |
203 | 203 | ||
204 | #ifdef CONFIG_CONSOLE_POLL | ||
205 | static int serial_polled; | ||
206 | #endif | ||
207 | |||
204 | /* | 208 | /* |
205 | * Receive characters | 209 | * Receive characters |
206 | */ | 210 | */ |
@@ -222,6 +226,12 @@ static void cpm_uart_int_rx(struct uart_port *port) | |||
222 | */ | 226 | */ |
223 | bdp = pinfo->rx_cur; | 227 | bdp = pinfo->rx_cur; |
224 | for (;;) { | 228 | for (;;) { |
229 | #ifdef CONFIG_CONSOLE_POLL | ||
230 | if (unlikely(serial_polled)) { | ||
231 | serial_polled = 0; | ||
232 | return; | ||
233 | } | ||
234 | #endif | ||
225 | /* get status */ | 235 | /* get status */ |
226 | status = in_be16(&bdp->cbd_sc); | 236 | status = in_be16(&bdp->cbd_sc); |
227 | /* If this one is empty, return happy */ | 237 | /* If this one is empty, return happy */ |
@@ -253,7 +263,12 @@ static void cpm_uart_int_rx(struct uart_port *port) | |||
253 | goto handle_error; | 263 | goto handle_error; |
254 | if (uart_handle_sysrq_char(port, ch)) | 264 | if (uart_handle_sysrq_char(port, ch)) |
255 | continue; | 265 | continue; |
256 | 266 | #ifdef CONFIG_CONSOLE_POLL | |
267 | if (unlikely(serial_polled)) { | ||
268 | serial_polled = 0; | ||
269 | return; | ||
270 | } | ||
271 | #endif | ||
257 | error_return: | 272 | error_return: |
258 | tty_insert_flip_char(tty, ch, flg); | 273 | tty_insert_flip_char(tty, ch, flg); |
259 | 274 | ||
@@ -865,6 +880,80 @@ static void cpm_uart_config_port(struct uart_port *port, int flags) | |||
865 | cpm_uart_request_port(port); | 880 | cpm_uart_request_port(port); |
866 | } | 881 | } |
867 | } | 882 | } |
883 | |||
884 | #ifdef CONFIG_CONSOLE_POLL | ||
885 | /* Serial polling routines for writing and reading from the uart while | ||
886 | * in an interrupt or debug context. | ||
887 | */ | ||
888 | |||
889 | #define GDB_BUF_SIZE 512 /* power of 2, please */ | ||
890 | |||
891 | static char poll_buf[GDB_BUF_SIZE]; | ||
892 | static char *pollp; | ||
893 | static int poll_chars; | ||
894 | |||
895 | static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) | ||
896 | { | ||
897 | u_char c, *cp; | ||
898 | volatile cbd_t *bdp; | ||
899 | int i; | ||
900 | |||
901 | /* Get the address of the host memory buffer. | ||
902 | */ | ||
903 | bdp = pinfo->rx_cur; | ||
904 | while (bdp->cbd_sc & BD_SC_EMPTY) | ||
905 | ; | ||
906 | |||
907 | /* If the buffer address is in the CPM DPRAM, don't | ||
908 | * convert it. | ||
909 | */ | ||
910 | cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo); | ||
911 | |||
912 | if (obuf) { | ||
913 | i = c = bdp->cbd_datlen; | ||
914 | while (i-- > 0) | ||
915 | *obuf++ = *cp++; | ||
916 | } else | ||
917 | c = *cp; | ||
918 | bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID); | ||
919 | bdp->cbd_sc |= BD_SC_EMPTY; | ||
920 | |||
921 | if (bdp->cbd_sc & BD_SC_WRAP) | ||
922 | bdp = pinfo->rx_bd_base; | ||
923 | else | ||
924 | bdp++; | ||
925 | pinfo->rx_cur = (cbd_t *)bdp; | ||
926 | |||
927 | return (int)c; | ||
928 | } | ||
929 | |||
930 | static int cpm_get_poll_char(struct uart_port *port) | ||
931 | { | ||
932 | struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; | ||
933 | |||
934 | if (!serial_polled) { | ||
935 | serial_polled = 1; | ||
936 | poll_chars = 0; | ||
937 | } | ||
938 | if (poll_chars <= 0) { | ||
939 | poll_chars = poll_wait_key(poll_buf, pinfo); | ||
940 | pollp = poll_buf; | ||
941 | } | ||
942 | poll_chars--; | ||
943 | return *pollp++; | ||
944 | } | ||
945 | |||
946 | static void cpm_put_poll_char(struct uart_port *port, | ||
947 | unsigned char c) | ||
948 | { | ||
949 | struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; | ||
950 | static char ch[2]; | ||
951 | |||
952 | ch[0] = (char)c; | ||
953 | cpm_uart_early_write(pinfo->port.line, ch, 1); | ||
954 | } | ||
955 | #endif /* CONFIG_CONSOLE_POLL */ | ||
956 | |||
868 | static struct uart_ops cpm_uart_pops = { | 957 | static struct uart_ops cpm_uart_pops = { |
869 | .tx_empty = cpm_uart_tx_empty, | 958 | .tx_empty = cpm_uart_tx_empty, |
870 | .set_mctrl = cpm_uart_set_mctrl, | 959 | .set_mctrl = cpm_uart_set_mctrl, |
@@ -882,6 +971,10 @@ static struct uart_ops cpm_uart_pops = { | |||
882 | .request_port = cpm_uart_request_port, | 971 | .request_port = cpm_uart_request_port, |
883 | .config_port = cpm_uart_config_port, | 972 | .config_port = cpm_uart_config_port, |
884 | .verify_port = cpm_uart_verify_port, | 973 | .verify_port = cpm_uart_verify_port, |
974 | #ifdef CONFIG_CONSOLE_POLL | ||
975 | .poll_get_char = cpm_get_poll_char, | ||
976 | .poll_put_char = cpm_put_poll_char, | ||
977 | #endif | ||
885 | }; | 978 | }; |
886 | 979 | ||
887 | struct uart_cpm_port cpm_uart_ports[UART_NR]; | 980 | struct uart_cpm_port cpm_uart_ports[UART_NR]; |
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c index c9f53e71f252..61d3ade5286c 100644 --- a/drivers/serial/mpsc.c +++ b/drivers/serial/mpsc.c | |||
@@ -921,6 +921,10 @@ static int mpsc_make_ready(struct mpsc_port_info *pi) | |||
921 | return 0; | 921 | return 0; |
922 | } | 922 | } |
923 | 923 | ||
924 | #ifdef CONFIG_CONSOLE_POLL | ||
925 | static int serial_polled; | ||
926 | #endif | ||
927 | |||
924 | /* | 928 | /* |
925 | ****************************************************************************** | 929 | ****************************************************************************** |
926 | * | 930 | * |
@@ -956,7 +960,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi) | |||
956 | while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) | 960 | while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) |
957 | & SDMA_DESC_CMDSTAT_O)) { | 961 | & SDMA_DESC_CMDSTAT_O)) { |
958 | bytes_in = be16_to_cpu(rxre->bytecnt); | 962 | bytes_in = be16_to_cpu(rxre->bytecnt); |
959 | 963 | #ifdef CONFIG_CONSOLE_POLL | |
964 | if (unlikely(serial_polled)) { | ||
965 | serial_polled = 0; | ||
966 | return 0; | ||
967 | } | ||
968 | #endif | ||
960 | /* Following use of tty struct directly is deprecated */ | 969 | /* Following use of tty struct directly is deprecated */ |
961 | if (unlikely(tty_buffer_request_room(tty, bytes_in) | 970 | if (unlikely(tty_buffer_request_room(tty, bytes_in) |
962 | < bytes_in)) { | 971 | < bytes_in)) { |
@@ -1017,6 +1026,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi) | |||
1017 | if (uart_handle_sysrq_char(&pi->port, *bp)) { | 1026 | if (uart_handle_sysrq_char(&pi->port, *bp)) { |
1018 | bp++; | 1027 | bp++; |
1019 | bytes_in--; | 1028 | bytes_in--; |
1029 | #ifdef CONFIG_CONSOLE_POLL | ||
1030 | if (unlikely(serial_polled)) { | ||
1031 | serial_polled = 0; | ||
1032 | return 0; | ||
1033 | } | ||
1034 | #endif | ||
1020 | goto next_frame; | 1035 | goto next_frame; |
1021 | } | 1036 | } |
1022 | 1037 | ||
@@ -1519,6 +1534,133 @@ static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser) | |||
1519 | 1534 | ||
1520 | return rc; | 1535 | return rc; |
1521 | } | 1536 | } |
1537 | #ifdef CONFIG_CONSOLE_POLL | ||
1538 | /* Serial polling routines for writing and reading from the uart while | ||
1539 | * in an interrupt or debug context. | ||
1540 | */ | ||
1541 | |||
1542 | static char poll_buf[2048]; | ||
1543 | static int poll_ptr; | ||
1544 | static int poll_cnt; | ||
1545 | static void mpsc_put_poll_char(struct uart_port *port, | ||
1546 | unsigned char c); | ||
1547 | |||
1548 | static int mpsc_get_poll_char(struct uart_port *port) | ||
1549 | { | ||
1550 | struct mpsc_port_info *pi = (struct mpsc_port_info *)port; | ||
1551 | struct mpsc_rx_desc *rxre; | ||
1552 | u32 cmdstat, bytes_in, i; | ||
1553 | u8 *bp; | ||
1554 | |||
1555 | if (!serial_polled) | ||
1556 | serial_polled = 1; | ||
1557 | |||
1558 | pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); | ||
1559 | |||
1560 | if (poll_cnt) { | ||
1561 | poll_cnt--; | ||
1562 | return poll_buf[poll_ptr++]; | ||
1563 | } | ||
1564 | poll_ptr = 0; | ||
1565 | poll_cnt = 0; | ||
1566 | |||
1567 | while (poll_cnt == 0) { | ||
1568 | rxre = (struct mpsc_rx_desc *)(pi->rxr + | ||
1569 | (pi->rxr_posn*MPSC_RXRE_SIZE)); | ||
1570 | dma_cache_sync(pi->port.dev, (void *)rxre, | ||
1571 | MPSC_RXRE_SIZE, DMA_FROM_DEVICE); | ||
1572 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) | ||
1573 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ | ||
1574 | invalidate_dcache_range((ulong)rxre, | ||
1575 | (ulong)rxre + MPSC_RXRE_SIZE); | ||
1576 | #endif | ||
1577 | /* | ||
1578 | * Loop through Rx descriptors handling ones that have | ||
1579 | * been completed. | ||
1580 | */ | ||
1581 | while (poll_cnt == 0 && | ||
1582 | !((cmdstat = be32_to_cpu(rxre->cmdstat)) & | ||
1583 | SDMA_DESC_CMDSTAT_O)){ | ||
1584 | bytes_in = be16_to_cpu(rxre->bytecnt); | ||
1585 | bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); | ||
1586 | dma_cache_sync(pi->port.dev, (void *) bp, | ||
1587 | MPSC_RXBE_SIZE, DMA_FROM_DEVICE); | ||
1588 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) | ||
1589 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ | ||
1590 | invalidate_dcache_range((ulong)bp, | ||
1591 | (ulong)bp + MPSC_RXBE_SIZE); | ||
1592 | #endif | ||
1593 | if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | | ||
1594 | SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) && | ||
1595 | !(cmdstat & pi->port.ignore_status_mask)) { | ||
1596 | poll_buf[poll_cnt] = *bp; | ||
1597 | poll_cnt++; | ||
1598 | } else { | ||
1599 | for (i = 0; i < bytes_in; i++) { | ||
1600 | poll_buf[poll_cnt] = *bp++; | ||
1601 | poll_cnt++; | ||
1602 | } | ||
1603 | pi->port.icount.rx += bytes_in; | ||
1604 | } | ||
1605 | rxre->bytecnt = cpu_to_be16(0); | ||
1606 | wmb(); | ||
1607 | rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | | ||
1608 | SDMA_DESC_CMDSTAT_EI | | ||
1609 | SDMA_DESC_CMDSTAT_F | | ||
1610 | SDMA_DESC_CMDSTAT_L); | ||
1611 | wmb(); | ||
1612 | dma_cache_sync(pi->port.dev, (void *)rxre, | ||
1613 | MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); | ||
1614 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) | ||
1615 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ | ||
1616 | flush_dcache_range((ulong)rxre, | ||
1617 | (ulong)rxre + MPSC_RXRE_SIZE); | ||
1618 | #endif | ||
1619 | |||
1620 | /* Advance to next descriptor */ | ||
1621 | pi->rxr_posn = (pi->rxr_posn + 1) & | ||
1622 | (MPSC_RXR_ENTRIES - 1); | ||
1623 | rxre = (struct mpsc_rx_desc *)(pi->rxr + | ||
1624 | (pi->rxr_posn * MPSC_RXRE_SIZE)); | ||
1625 | dma_cache_sync(pi->port.dev, (void *)rxre, | ||
1626 | MPSC_RXRE_SIZE, DMA_FROM_DEVICE); | ||
1627 | #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) | ||
1628 | if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ | ||
1629 | invalidate_dcache_range((ulong)rxre, | ||
1630 | (ulong)rxre + MPSC_RXRE_SIZE); | ||
1631 | #endif | ||
1632 | } | ||
1633 | |||
1634 | /* Restart rx engine, if its stopped */ | ||
1635 | if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) | ||
1636 | mpsc_start_rx(pi); | ||
1637 | } | ||
1638 | if (poll_cnt) { | ||
1639 | poll_cnt--; | ||
1640 | return poll_buf[poll_ptr++]; | ||
1641 | } | ||
1642 | |||
1643 | return 0; | ||
1644 | } | ||
1645 | |||
1646 | |||
1647 | static void mpsc_put_poll_char(struct uart_port *port, | ||
1648 | unsigned char c) | ||
1649 | { | ||
1650 | struct mpsc_port_info *pi = (struct mpsc_port_info *)port; | ||
1651 | u32 data; | ||
1652 | |||
1653 | data = readl(pi->mpsc_base + MPSC_MPCR); | ||
1654 | writeb(c, pi->mpsc_base + MPSC_CHR_1); | ||
1655 | mb(); | ||
1656 | data = readl(pi->mpsc_base + MPSC_CHR_2); | ||
1657 | data |= MPSC_CHR_2_TTCS; | ||
1658 | writel(data, pi->mpsc_base + MPSC_CHR_2); | ||
1659 | mb(); | ||
1660 | |||
1661 | while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS); | ||
1662 | } | ||
1663 | #endif | ||
1522 | 1664 | ||
1523 | static struct uart_ops mpsc_pops = { | 1665 | static struct uart_ops mpsc_pops = { |
1524 | .tx_empty = mpsc_tx_empty, | 1666 | .tx_empty = mpsc_tx_empty, |
@@ -1537,6 +1679,10 @@ static struct uart_ops mpsc_pops = { | |||
1537 | .request_port = mpsc_request_port, | 1679 | .request_port = mpsc_request_port, |
1538 | .config_port = mpsc_config_port, | 1680 | .config_port = mpsc_config_port, |
1539 | .verify_port = mpsc_verify_port, | 1681 | .verify_port = mpsc_verify_port, |
1682 | #ifdef CONFIG_CONSOLE_POLL | ||
1683 | .poll_get_char = mpsc_get_poll_char, | ||
1684 | .poll_put_char = mpsc_put_poll_char, | ||
1685 | #endif | ||
1540 | }; | 1686 | }; |
1541 | 1687 | ||
1542 | /* | 1688 | /* |
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index fbd6289977c8..8fb0066609bb 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c | |||
@@ -152,9 +152,10 @@ static int is_vbus_present(void) | |||
152 | static void pullup_off(void) | 152 | static void pullup_off(void) |
153 | { | 153 | { |
154 | struct pxa2xx_udc_mach_info *mach = the_controller->mach; | 154 | struct pxa2xx_udc_mach_info *mach = the_controller->mach; |
155 | int off_level = mach->gpio_pullup_inverted; | ||
155 | 156 | ||
156 | if (mach->gpio_pullup) | 157 | if (mach->gpio_pullup) |
157 | gpio_set_value(mach->gpio_pullup, 0); | 158 | gpio_set_value(mach->gpio_pullup, off_level); |
158 | else if (mach->udc_command) | 159 | else if (mach->udc_command) |
159 | mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); | 160 | mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); |
160 | } | 161 | } |
@@ -162,9 +163,10 @@ static void pullup_off(void) | |||
162 | static void pullup_on(void) | 163 | static void pullup_on(void) |
163 | { | 164 | { |
164 | struct pxa2xx_udc_mach_info *mach = the_controller->mach; | 165 | struct pxa2xx_udc_mach_info *mach = the_controller->mach; |
166 | int on_level = !mach->gpio_pullup_inverted; | ||
165 | 167 | ||
166 | if (mach->gpio_pullup) | 168 | if (mach->gpio_pullup) |
167 | gpio_set_value(mach->gpio_pullup, 1); | 169 | gpio_set_value(mach->gpio_pullup, on_level); |
168 | else if (mach->udc_command) | 170 | else if (mach->udc_command) |
169 | mach->udc_command(PXA2XX_UDC_CMD_CONNECT); | 171 | mach->udc_command(PXA2XX_UDC_CMD_CONNECT); |
170 | } | 172 | } |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index d0746261c957..bb2514369507 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -227,6 +227,22 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var) | |||
227 | case 4: ret = LCCR3_4BPP; break; | 227 | case 4: ret = LCCR3_4BPP; break; |
228 | case 8: ret = LCCR3_8BPP; break; | 228 | case 8: ret = LCCR3_8BPP; break; |
229 | case 16: ret = LCCR3_16BPP; break; | 229 | case 16: ret = LCCR3_16BPP; break; |
230 | case 24: | ||
231 | switch (var->red.length + var->green.length + | ||
232 | var->blue.length + var->transp.length) { | ||
233 | case 18: ret = LCCR3_18BPP_P | LCCR3_PDFOR_3; break; | ||
234 | case 19: ret = LCCR3_19BPP_P; break; | ||
235 | } | ||
236 | break; | ||
237 | case 32: | ||
238 | switch (var->red.length + var->green.length + | ||
239 | var->blue.length + var->transp.length) { | ||
240 | case 18: ret = LCCR3_18BPP | LCCR3_PDFOR_3; break; | ||
241 | case 19: ret = LCCR3_19BPP; break; | ||
242 | case 24: ret = LCCR3_24BPP | LCCR3_PDFOR_3; break; | ||
243 | case 25: ret = LCCR3_25BPP; break; | ||
244 | } | ||
245 | break; | ||
230 | } | 246 | } |
231 | return ret; | 247 | return ret; |
232 | } | 248 | } |
@@ -345,6 +361,41 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
345 | var->green.offset = 5; var->green.length = 6; | 361 | var->green.offset = 5; var->green.length = 6; |
346 | var->blue.offset = 0; var->blue.length = 5; | 362 | var->blue.offset = 0; var->blue.length = 5; |
347 | var->transp.offset = var->transp.length = 0; | 363 | var->transp.offset = var->transp.length = 0; |
364 | } else if (var->bits_per_pixel > 16) { | ||
365 | struct pxafb_mode_info *mode; | ||
366 | |||
367 | mode = pxafb_getmode(inf, var); | ||
368 | if (!mode) | ||
369 | return -EINVAL; | ||
370 | |||
371 | switch (mode->depth) { | ||
372 | case 18: /* RGB666 */ | ||
373 | var->transp.offset = var->transp.length = 0; | ||
374 | var->red.offset = 12; var->red.length = 6; | ||
375 | var->green.offset = 6; var->green.length = 6; | ||
376 | var->blue.offset = 0; var->blue.length = 6; | ||
377 | break; | ||
378 | case 19: /* RGBT666 */ | ||
379 | var->transp.offset = 18; var->transp.length = 1; | ||
380 | var->red.offset = 12; var->red.length = 6; | ||
381 | var->green.offset = 6; var->green.length = 6; | ||
382 | var->blue.offset = 0; var->blue.length = 6; | ||
383 | break; | ||
384 | case 24: /* RGB888 */ | ||
385 | var->transp.offset = var->transp.length = 0; | ||
386 | var->red.offset = 16; var->red.length = 8; | ||
387 | var->green.offset = 8; var->green.length = 8; | ||
388 | var->blue.offset = 0; var->blue.length = 8; | ||
389 | break; | ||
390 | case 25: /* RGBT888 */ | ||
391 | var->transp.offset = 24; var->transp.length = 1; | ||
392 | var->red.offset = 16; var->red.length = 8; | ||
393 | var->green.offset = 8; var->green.length = 8; | ||
394 | var->blue.offset = 0; var->blue.length = 8; | ||
395 | break; | ||
396 | default: | ||
397 | return -EINVAL; | ||
398 | } | ||
348 | } else { | 399 | } else { |
349 | var->red.offset = var->green.offset = 0; | 400 | var->red.offset = var->green.offset = 0; |
350 | var->blue.offset = var->transp.offset = 0; | 401 | var->blue.offset = var->transp.offset = 0; |
@@ -376,7 +427,7 @@ static int pxafb_set_par(struct fb_info *info) | |||
376 | struct pxafb_info *fbi = (struct pxafb_info *)info; | 427 | struct pxafb_info *fbi = (struct pxafb_info *)info; |
377 | struct fb_var_screeninfo *var = &info->var; | 428 | struct fb_var_screeninfo *var = &info->var; |
378 | 429 | ||
379 | if (var->bits_per_pixel == 16) | 430 | if (var->bits_per_pixel >= 16) |
380 | fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; | 431 | fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; |
381 | else if (!fbi->cmap_static) | 432 | else if (!fbi->cmap_static) |
382 | fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; | 433 | fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; |
@@ -391,7 +442,7 @@ static int pxafb_set_par(struct fb_info *info) | |||
391 | 442 | ||
392 | fbi->fb.fix.line_length = var->xres_virtual * | 443 | fbi->fb.fix.line_length = var->xres_virtual * |
393 | var->bits_per_pixel / 8; | 444 | var->bits_per_pixel / 8; |
394 | if (var->bits_per_pixel == 16) | 445 | if (var->bits_per_pixel >= 16) |
395 | fbi->palette_size = 0; | 446 | fbi->palette_size = 0; |
396 | else | 447 | else |
397 | fbi->palette_size = var->bits_per_pixel == 1 ? | 448 | fbi->palette_size = var->bits_per_pixel == 1 ? |
@@ -404,7 +455,7 @@ static int pxafb_set_par(struct fb_info *info) | |||
404 | */ | 455 | */ |
405 | pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR); | 456 | pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR); |
406 | 457 | ||
407 | if (fbi->fb.var.bits_per_pixel == 16) | 458 | if (fbi->fb.var.bits_per_pixel >= 16) |
408 | fb_dealloc_cmap(&fbi->fb.cmap); | 459 | fb_dealloc_cmap(&fbi->fb.cmap); |
409 | else | 460 | else |
410 | fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); | 461 | fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); |
@@ -831,6 +882,8 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, | |||
831 | case 4: | 882 | case 4: |
832 | case 8: | 883 | case 8: |
833 | case 16: | 884 | case 16: |
885 | case 24: | ||
886 | case 32: | ||
834 | break; | 887 | break; |
835 | default: | 888 | default: |
836 | printk(KERN_ERR "%s: invalid bit depth %d\n", | 889 | printk(KERN_ERR "%s: invalid bit depth %d\n", |
@@ -968,6 +1021,11 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi) | |||
968 | 1021 | ||
969 | for (gpio = 58; ldd_bits; gpio++, ldd_bits--) | 1022 | for (gpio = 58; ldd_bits; gpio++, ldd_bits--) |
970 | pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); | 1023 | pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); |
1024 | /* 18 bit interface */ | ||
1025 | if (fbi->fb.var.bits_per_pixel > 16) { | ||
1026 | pxa_gpio_mode(86 | GPIO_ALT_FN_2_OUT); | ||
1027 | pxa_gpio_mode(87 | GPIO_ALT_FN_2_OUT); | ||
1028 | } | ||
971 | pxa_gpio_mode(GPIO74_LCD_FCLK_MD); | 1029 | pxa_gpio_mode(GPIO74_LCD_FCLK_MD); |
972 | pxa_gpio_mode(GPIO75_LCD_LCLK_MD); | 1030 | pxa_gpio_mode(GPIO75_LCD_LCLK_MD); |
973 | pxa_gpio_mode(GPIO76_LCD_PCLK_MD); | 1031 | pxa_gpio_mode(GPIO76_LCD_PCLK_MD); |
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h index 90d14ee564f5..ef4f5da2029f 100644 --- a/include/asm-arm/arch-iop13xx/adma.h +++ b/include/asm-arm/arch-iop13xx/adma.h | |||
@@ -198,17 +198,13 @@ iop_chan_memset_slot_count(size_t len, int *slots_per_op) | |||
198 | static inline int | 198 | static inline int |
199 | iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) | 199 | iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) |
200 | { | 200 | { |
201 | int num_slots; | 201 | static const char slot_count_table[] = { 1, 2, 2, 2, |
202 | /* slots_to_find = 1 for basic descriptor + 1 per 4 sources above 1 | 202 | 2, 3, 3, 3, |
203 | * (1 source => 8 bytes) (1 slot => 32 bytes) | 203 | 3, 4, 4, 4, |
204 | */ | 204 | 4, 5, 5, 5, |
205 | num_slots = 1 + (((src_cnt - 1) << 3) >> 5); | 205 | }; |
206 | if (((src_cnt - 1) << 3) & 0x1f) | 206 | *slots_per_op = slot_count_table[src_cnt - 1]; |
207 | num_slots++; | 207 | return *slots_per_op; |
208 | |||
209 | *slots_per_op = num_slots; | ||
210 | |||
211 | return num_slots; | ||
212 | } | 208 | } |
213 | 209 | ||
214 | #define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) | 210 | #define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) |
diff --git a/include/asm-arm/arch-pxa/cm-x270.h b/include/asm-arm/arch-pxa/cm-x270.h deleted file mode 100644 index f8fac9e18009..000000000000 --- a/include/asm-arm/arch-pxa/cm-x270.h +++ /dev/null | |||
@@ -1,50 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/asm/arch-pxa/cm-x270.h | ||
3 | * | ||
4 | * Copyright Compulab Ltd., 2003, 2007 | ||
5 | * Mike Rapoport <mike@compulab.co.il> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | |||
13 | /* CM-x270 device physical addresses */ | ||
14 | #define CMX270_CS1_PHYS (PXA_CS1_PHYS) | ||
15 | #define MARATHON_PHYS (PXA_CS2_PHYS) | ||
16 | #define CMX270_IDE104_PHYS (PXA_CS3_PHYS) | ||
17 | #define CMX270_IT8152_PHYS (PXA_CS4_PHYS) | ||
18 | |||
19 | /* Statically mapped regions */ | ||
20 | #define CMX270_VIRT_BASE (0xe8000000) | ||
21 | #define CMX270_IT8152_VIRT (CMX270_VIRT_BASE) | ||
22 | #define CMX270_IDE104_VIRT (CMX270_IT8152_VIRT + SZ_64M) | ||
23 | |||
24 | /* GPIO related definitions */ | ||
25 | #define GPIO_IT8152_IRQ (22) | ||
26 | |||
27 | #define IRQ_GPIO_IT8152_IRQ IRQ_GPIO(GPIO_IT8152_IRQ) | ||
28 | #define PME_IRQ IRQ_GPIO(0) | ||
29 | #define CMX270_IDE_IRQ IRQ_GPIO(100) | ||
30 | #define CMX270_GPIRQ1 IRQ_GPIO(101) | ||
31 | #define CMX270_TOUCHIRQ IRQ_GPIO(96) | ||
32 | #define CMX270_ETHIRQ IRQ_GPIO(10) | ||
33 | #define CMX270_GFXIRQ IRQ_GPIO(95) | ||
34 | #define CMX270_NANDIRQ IRQ_GPIO(89) | ||
35 | #define CMX270_MMC_IRQ IRQ_GPIO(83) | ||
36 | |||
37 | /* PCMCIA related definitions */ | ||
38 | #define PCC_DETECT(x) (GPLR(84 - (x)) & GPIO_bit(84 - (x))) | ||
39 | #define PCC_READY(x) (GPLR(82 - (x)) & GPIO_bit(82 - (x))) | ||
40 | |||
41 | #define PCMCIA_S0_CD_VALID IRQ_GPIO(84) | ||
42 | #define PCMCIA_S0_CD_VALID_EDGE GPIO_BOTH_EDGES | ||
43 | |||
44 | #define PCMCIA_S1_CD_VALID IRQ_GPIO(83) | ||
45 | #define PCMCIA_S1_CD_VALID_EDGE GPIO_BOTH_EDGES | ||
46 | |||
47 | #define PCMCIA_S0_RDYINT IRQ_GPIO(82) | ||
48 | #define PCMCIA_S1_RDYINT IRQ_GPIO(81) | ||
49 | |||
50 | #define PCMCIA_RESET_GPIO 53 | ||
diff --git a/include/asm-arm/arch-pxa/eseries-gpio.h b/include/asm-arm/arch-pxa/eseries-gpio.h new file mode 100644 index 000000000000..4c90b1310270 --- /dev/null +++ b/include/asm-arm/arch-pxa/eseries-gpio.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * eseries-gpio.h | ||
3 | * | ||
4 | * Copyright (C) Ian Molton <spyro@f2s.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | /* e-series power button */ | ||
13 | #define GPIO_ESERIES_POWERBTN 0 | ||
14 | |||
15 | /* UDC GPIO definitions */ | ||
16 | #define GPIO_E7XX_USB_DISC 13 | ||
17 | #define GPIO_E7XX_USB_PULLUP 3 | ||
18 | |||
19 | #define GPIO_E800_USB_DISC 4 | ||
20 | #define GPIO_E800_USB_PULLUP 84 | ||
21 | |||
22 | /* e740 PCMCIA GPIO definitions */ | ||
23 | /* Note: PWR1 seems to be inverted */ | ||
24 | #define GPIO_E740_PCMCIA_CD0 8 | ||
25 | #define GPIO_E740_PCMCIA_CD1 44 | ||
26 | #define GPIO_E740_PCMCIA_RDY0 11 | ||
27 | #define GPIO_E740_PCMCIA_RDY1 6 | ||
28 | #define GPIO_E740_PCMCIA_RST0 27 | ||
29 | #define GPIO_E740_PCMCIA_RST1 24 | ||
30 | #define GPIO_E740_PCMCIA_PWR0 20 | ||
31 | #define GPIO_E740_PCMCIA_PWR1 23 | ||
32 | |||
33 | /* e750 PCMCIA GPIO definitions */ | ||
34 | #define GPIO_E750_PCMCIA_CD0 8 | ||
35 | #define GPIO_E750_PCMCIA_RDY0 12 | ||
36 | #define GPIO_E750_PCMCIA_RST0 27 | ||
37 | #define GPIO_E750_PCMCIA_PWR0 20 | ||
38 | |||
39 | /* e800 PCMCIA GPIO definitions */ | ||
40 | #define GPIO_E800_PCMCIA_RST0 69 | ||
41 | #define GPIO_E800_PCMCIA_RST1 72 | ||
42 | #define GPIO_E800_PCMCIA_PWR0 20 | ||
43 | #define GPIO_E800_PCMCIA_PWR1 73 | ||
44 | |||
45 | /* e7xx IrDA power control */ | ||
46 | #define GPIO_E7XX_IR_ON 38 | ||
47 | |||
48 | /* ASIC related GPIOs */ | ||
49 | #define GPIO_ESERIES_TMIO_IRQ 5 | ||
50 | #define GPIO_E800_ANGELX_IRQ 8 | ||
diff --git a/include/asm-arm/arch-pxa/eseries-irq.h b/include/asm-arm/arch-pxa/eseries-irq.h new file mode 100644 index 000000000000..f2a93d5e31d3 --- /dev/null +++ b/include/asm-arm/arch-pxa/eseries-irq.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * eseries-irq.h | ||
3 | * | ||
4 | * Copyright (C) Ian Molton <spyro@f2s.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #define ANGELX_IRQ_BASE (IRQ_BOARD_START+8) | ||
13 | #define IRQ_ANGELX(n) (ANGELX_IRQ_BASE + (n)) | ||
14 | |||
15 | #define ANGELX_RDY0_IRQ IRQ_ANGELX(0) | ||
16 | #define ANGELX_ST0_IRQ IRQ_ANGELX(1) | ||
17 | #define ANGELX_CD0_IRQ IRQ_ANGELX(2) | ||
18 | #define ANGELX_RDY1_IRQ IRQ_ANGELX(3) | ||
19 | #define ANGELX_ST1_IRQ IRQ_ANGELX(4) | ||
20 | #define ANGELX_CD1_IRQ IRQ_ANGELX(5) | ||
21 | |||
22 | #define TMIO_IRQ_BASE (IRQ_BOARD_START+0) | ||
23 | #define IRQ_TMIO(n) (TMIO_IRQ_BASE + (n)) | ||
24 | |||
25 | #define TMIO_SD_IRQ IRQ_TMIO(1) | ||
26 | #define TMIO_USB_IRQ IRQ_TMIO(2) | ||
27 | |||
diff --git a/include/asm-arm/arch-pxa/hardware.h b/include/asm-arm/arch-pxa/hardware.h index d9af6dabc899..979a45695d7d 100644 --- a/include/asm-arm/arch-pxa/hardware.h +++ b/include/asm-arm/arch-pxa/hardware.h | |||
@@ -69,6 +69,12 @@ | |||
69 | _id == 0x212; \ | 69 | _id == 0x212; \ |
70 | }) | 70 | }) |
71 | 71 | ||
72 | #define __cpu_is_pxa255(id) \ | ||
73 | ({ \ | ||
74 | unsigned int _id = (id) >> 4 & 0xfff; \ | ||
75 | _id == 0x2d0; \ | ||
76 | }) | ||
77 | |||
72 | #define __cpu_is_pxa25x(id) \ | 78 | #define __cpu_is_pxa25x(id) \ |
73 | ({ \ | 79 | ({ \ |
74 | unsigned int _id = (id) >> 4 & 0xfff; \ | 80 | unsigned int _id = (id) >> 4 & 0xfff; \ |
@@ -76,6 +82,7 @@ | |||
76 | }) | 82 | }) |
77 | #else | 83 | #else |
78 | #define __cpu_is_pxa21x(id) (0) | 84 | #define __cpu_is_pxa21x(id) (0) |
85 | #define __cpu_is_pxa255(id) (0) | ||
79 | #define __cpu_is_pxa25x(id) (0) | 86 | #define __cpu_is_pxa25x(id) (0) |
80 | #endif | 87 | #endif |
81 | 88 | ||
@@ -119,11 +126,26 @@ | |||
119 | #define __cpu_is_pxa320(id) (0) | 126 | #define __cpu_is_pxa320(id) (0) |
120 | #endif | 127 | #endif |
121 | 128 | ||
129 | #ifdef CONFIG_CPU_PXA930 | ||
130 | #define __cpu_is_pxa930(id) \ | ||
131 | ({ \ | ||
132 | unsigned int _id = (id) >> 4 & 0xfff; \ | ||
133 | _id == 0x683; \ | ||
134 | }) | ||
135 | #else | ||
136 | #define __cpu_is_pxa930(id) (0) | ||
137 | #endif | ||
138 | |||
122 | #define cpu_is_pxa21x() \ | 139 | #define cpu_is_pxa21x() \ |
123 | ({ \ | 140 | ({ \ |
124 | __cpu_is_pxa21x(read_cpuid_id()); \ | 141 | __cpu_is_pxa21x(read_cpuid_id()); \ |
125 | }) | 142 | }) |
126 | 143 | ||
144 | #define cpu_is_pxa255() \ | ||
145 | ({ \ | ||
146 | __cpu_is_pxa255(read_cpuid_id()); \ | ||
147 | }) | ||
148 | |||
127 | #define cpu_is_pxa25x() \ | 149 | #define cpu_is_pxa25x() \ |
128 | ({ \ | 150 | ({ \ |
129 | __cpu_is_pxa25x(read_cpuid_id()); \ | 151 | __cpu_is_pxa25x(read_cpuid_id()); \ |
@@ -149,6 +171,12 @@ | |||
149 | __cpu_is_pxa320(read_cpuid_id()); \ | 171 | __cpu_is_pxa320(read_cpuid_id()); \ |
150 | }) | 172 | }) |
151 | 173 | ||
174 | #define cpu_is_pxa930() \ | ||
175 | ({ \ | ||
176 | unsigned int id = read_cpuid(CPUID_ID); \ | ||
177 | __cpu_is_pxa930(id); \ | ||
178 | }) | ||
179 | |||
152 | /* | 180 | /* |
153 | * CPUID Core Generation Bit | 181 | * CPUID Core Generation Bit |
154 | * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x | 182 | * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x |
@@ -196,6 +224,11 @@ extern void pxa_gpio_set_value(unsigned gpio, int value); | |||
196 | */ | 224 | */ |
197 | extern unsigned int get_memclk_frequency_10khz(void); | 225 | extern unsigned int get_memclk_frequency_10khz(void); |
198 | 226 | ||
227 | /* | ||
228 | * register GPIO as reset generator | ||
229 | */ | ||
230 | extern int init_gpio_reset(int gpio); | ||
231 | |||
199 | #endif | 232 | #endif |
200 | 233 | ||
201 | #if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) | 234 | #if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) |
diff --git a/include/asm-arm/arch-pxa/irqs.h b/include/asm-arm/arch-pxa/irqs.h index b6c8fe377683..9413121b0ed9 100644 --- a/include/asm-arm/arch-pxa/irqs.h +++ b/include/asm-arm/arch-pxa/irqs.h | |||
@@ -180,10 +180,13 @@ | |||
180 | #define NR_IRQS (IRQ_LOCOMO_SPI_TEND + 1) | 180 | #define NR_IRQS (IRQ_LOCOMO_SPI_TEND + 1) |
181 | #elif defined(CONFIG_ARCH_LUBBOCK) || \ | 181 | #elif defined(CONFIG_ARCH_LUBBOCK) || \ |
182 | defined(CONFIG_MACH_LOGICPD_PXA270) || \ | 182 | defined(CONFIG_MACH_LOGICPD_PXA270) || \ |
183 | defined(CONFIG_MACH_TOSA) || \ | ||
183 | defined(CONFIG_MACH_MAINSTONE) || \ | 184 | defined(CONFIG_MACH_MAINSTONE) || \ |
184 | defined(CONFIG_MACH_PCM027) || \ | 185 | defined(CONFIG_MACH_PCM027) || \ |
185 | defined(CONFIG_MACH_MAGICIAN) | 186 | defined(CONFIG_MACH_MAGICIAN) |
186 | #define NR_IRQS (IRQ_BOARD_END) | 187 | #define NR_IRQS (IRQ_BOARD_END) |
188 | #elif defined(CONFIG_MACH_ZYLONITE) | ||
189 | #define NR_IRQS (IRQ_BOARD_START + 32) | ||
187 | #else | 190 | #else |
188 | #define NR_IRQS (IRQ_BOARD_START) | 191 | #define NR_IRQS (IRQ_BOARD_START) |
189 | #endif | 192 | #endif |
diff --git a/include/asm-arm/arch-pxa/mfp-pxa2xx.h b/include/asm-arm/arch-pxa/mfp-pxa2xx.h index db8d890d237c..8de1c0dae624 100644 --- a/include/asm-arm/arch-pxa/mfp-pxa2xx.h +++ b/include/asm-arm/arch-pxa/mfp-pxa2xx.h | |||
@@ -128,5 +128,6 @@ | |||
128 | #define GPIO84_GPIO MFP_CFG_IN(GPIO84, AF0) | 128 | #define GPIO84_GPIO MFP_CFG_IN(GPIO84, AF0) |
129 | 129 | ||
130 | extern void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num); | 130 | extern void pxa2xx_mfp_config(unsigned long *mfp_cfgs, int num); |
131 | extern void pxa2xx_mfp_set_lpm(int mfp, unsigned long lpm); | ||
131 | extern int gpio_set_wake(unsigned int gpio, unsigned int on); | 132 | extern int gpio_set_wake(unsigned int gpio, unsigned int on); |
132 | #endif /* __ASM_ARCH_MFP_PXA2XX_H */ | 133 | #endif /* __ASM_ARCH_MFP_PXA2XX_H */ |
diff --git a/include/asm-arm/arch-pxa/mfp-pxa930.h b/include/asm-arm/arch-pxa/mfp-pxa930.h new file mode 100644 index 000000000000..c4e945ab1923 --- /dev/null +++ b/include/asm-arm/arch-pxa/mfp-pxa930.h | |||
@@ -0,0 +1,491 @@ | |||
1 | /* | ||
2 | * linux/include/asm-arm/arch-pxa/mfp-pxa930.h | ||
3 | * | ||
4 | * PXA930 specific MFP configuration definitions | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Marvell International Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __ASM_ARCH_MFP_PXA9xx_H | ||
14 | #define __ASM_ARCH_MFP_PXA9xx_H | ||
15 | |||
16 | #include <asm/arch/mfp.h> | ||
17 | #include <asm/arch/mfp-pxa3xx.h> | ||
18 | |||
19 | /* GPIO */ | ||
20 | #define GPIO46_GPIO MFP_CFG(GPIO46, AF0) | ||
21 | #define GPIO49_GPIO MFP_CFG(GPIO49, AF0) | ||
22 | #define GPIO50_GPIO MFP_CFG(GPIO50, AF0) | ||
23 | #define GPIO51_GPIO MFP_CFG(GPIO51, AF0) | ||
24 | #define GPIO52_GPIO MFP_CFG(GPIO52, AF0) | ||
25 | #define GPIO56_GPIO MFP_CFG(GPIO56, AF0) | ||
26 | #define GPIO58_GPIO MFP_CFG(GPIO58, AF0) | ||
27 | #define GPIO59_GPIO MFP_CFG(GPIO59, AF0) | ||
28 | #define GPIO60_GPIO MFP_CFG(GPIO60, AF0) | ||
29 | #define GPIO61_GPIO MFP_CFG(GPIO61, AF0) | ||
30 | #define GPIO62_GPIO MFP_CFG(GPIO62, AF0) | ||
31 | |||
32 | #define GSIM_UCLK_GPIO_79 MFP_CFG(GSIM_UCLK, AF0) | ||
33 | #define GSIM_UIO_GPIO_80 MFP_CFG(GSIM_UIO, AF0) | ||
34 | #define GSIM_nURST_GPIO_81 MFP_CFG(GSIM_nURST, AF0) | ||
35 | #define GSIM_UDET_GPIO_82 MFP_CFG(GSIM_UDET, AF0) | ||
36 | |||
37 | #define DF_IO15_GPIO_28 MFP_CFG(DF_IO15, AF0) | ||
38 | #define DF_IO14_GPIO_29 MFP_CFG(DF_IO14, AF0) | ||
39 | #define DF_IO13_GPIO_30 MFP_CFG(DF_IO13, AF0) | ||
40 | #define DF_IO12_GPIO_31 MFP_CFG(DF_IO12, AF0) | ||
41 | #define DF_IO11_GPIO_32 MFP_CFG(DF_IO11, AF0) | ||
42 | #define DF_IO10_GPIO_33 MFP_CFG(DF_IO10, AF0) | ||
43 | #define DF_IO9_GPIO_34 MFP_CFG(DF_IO9, AF0) | ||
44 | #define DF_IO8_GPIO_35 MFP_CFG(DF_IO8, AF0) | ||
45 | #define DF_IO7_GPIO_36 MFP_CFG(DF_IO7, AF0) | ||
46 | #define DF_IO6_GPIO_37 MFP_CFG(DF_IO6, AF0) | ||
47 | #define DF_IO5_GPIO_38 MFP_CFG(DF_IO5, AF0) | ||
48 | #define DF_IO4_GPIO_39 MFP_CFG(DF_IO4, AF0) | ||
49 | #define DF_IO3_GPIO_40 MFP_CFG(DF_IO3, AF0) | ||
50 | #define DF_IO2_GPIO_41 MFP_CFG(DF_IO2, AF0) | ||
51 | #define DF_IO1_GPIO_42 MFP_CFG(DF_IO1, AF0) | ||
52 | #define DF_IO0_GPIO_43 MFP_CFG(DF_IO0, AF0) | ||
53 | #define DF_nCS0_GPIO_44 MFP_CFG(DF_nCS0, AF0) | ||
54 | #define DF_nCS1_GPIO_45 MFP_CFG(DF_nCS1, AF0) | ||
55 | #define DF_nWE_GPIO_46 MFP_CFG(DF_nWE, AF0) | ||
56 | #define DF_nRE_nOE_GPIO_47 MFP_CFG(DF_nRE_nOE, AF0) | ||
57 | #define DF_CLE_nOE_GPIO_48 MFP_CFG(DF_CLE_nOE, AF0) | ||
58 | #define DF_nADV1_ALE_GPIO_49 MFP_CFG(DF_nADV1_ALE, AF0) | ||
59 | #define DF_nADV2_ALE_GPIO_50 MFP_CFG(DF_nADV2_ALE, AF0) | ||
60 | #define DF_INT_RnB_GPIO_51 MFP_CFG(DF_INT_RnB, AF0) | ||
61 | #define DF_SCLK_E_GPIO_52 MFP_CFG(DF_SCLK_E, AF0) | ||
62 | |||
63 | #define DF_ADDR0_GPIO_53 MFP_CFG(DF_ADDR0, AF0) | ||
64 | #define DF_ADDR1_GPIO_54 MFP_CFG(DF_ADDR1, AF0) | ||
65 | #define DF_ADDR2_GPIO_55 MFP_CFG(DF_ADDR2, AF0) | ||
66 | #define DF_ADDR3_GPIO_56 MFP_CFG(DF_ADDR3, AF0) | ||
67 | #define nXCVREN_GPIO_57 MFP_CFG(nXCVREN, AF0) | ||
68 | #define nLUA_GPIO_58 MFP_CFG(nLUA, AF0) | ||
69 | #define nLLA_GPIO_59 MFP_CFG(nLLA, AF0) | ||
70 | #define nBE0_GPIO_60 MFP_CFG(nBE0, AF0) | ||
71 | #define nBE1_GPIO_61 MFP_CFG(nBE1, AF0) | ||
72 | #define RDY_GPIO_62 MFP_CFG(RDY, AF0) | ||
73 | |||
74 | /* Chip Select */ | ||
75 | #define DF_nCS0_nCS2 MFP_CFG_LPM(DF_nCS0, AF3, PULL_HIGH) | ||
76 | #define DF_nCS1_nCS3 MFP_CFG_LPM(DF_nCS1, AF3, PULL_HIGH) | ||
77 | |||
78 | /* AC97 */ | ||
79 | #define GPIO83_BAC97_SYSCLK MFP_CFG(GPIO83, AF3) | ||
80 | #define GPIO84_BAC97_SDATA_IN0 MFP_CFG(GPIO84, AF3) | ||
81 | #define GPIO85_BAC97_BITCLK MFP_CFG(GPIO85, AF3) | ||
82 | #define GPIO86_BAC97_nRESET MFP_CFG(GPIO86, AF3) | ||
83 | #define GPIO87_BAC97_SYNC MFP_CFG(GPIO87, AF3) | ||
84 | #define GPIO88_BAC97_SDATA_OUT MFP_CFG(GPIO88, AF3) | ||
85 | |||
86 | /* I2C */ | ||
87 | #define GPIO39_CI2C_SCL MFP_CFG_LPM(GPIO39, AF3, PULL_HIGH) | ||
88 | #define GPIO40_CI2C_SDA MFP_CFG_LPM(GPIO40, AF3, PULL_HIGH) | ||
89 | |||
90 | #define GPIO51_CI2C_SCL MFP_CFG_LPM(GPIO51, AF3, PULL_HIGH) | ||
91 | #define GPIO52_CI2C_SDA MFP_CFG_LPM(GPIO52, AF3, PULL_HIGH) | ||
92 | |||
93 | #define GPIO63_CI2C_SCL MFP_CFG_LPM(GPIO63, AF4, PULL_HIGH) | ||
94 | #define GPIO64_CI2C_SDA MFP_CFG_LPM(GPIO64, AF4, PULL_HIGH) | ||
95 | |||
96 | #define GPIO77_CI2C_SCL MFP_CFG_LPM(GPIO77, AF2, PULL_HIGH) | ||
97 | #define GPIO78_CI2C_SDA MFP_CFG_LPM(GPIO78, AF2, PULL_HIGH) | ||
98 | |||
99 | #define GPIO89_CI2C_SCL MFP_CFG_LPM(GPIO89, AF1, PULL_HIGH) | ||
100 | #define GPIO90_CI2C_SDA MFP_CFG_LPM(GPIO90, AF1, PULL_HIGH) | ||
101 | |||
102 | #define GPIO95_CI2C_SCL MFP_CFG_LPM(GPIO95, AF1, PULL_HIGH) | ||
103 | #define GPIO96_CI2C_SDA MFP_CFG_LPM(GPIO96, AF1, PULL_HIGH) | ||
104 | |||
105 | #define GPIO97_CI2C_SCL MFP_CFG_LPM(GPIO97, AF3, PULL_HIGH) | ||
106 | #define GPIO98_CI2C_SDA MFP_CFG_LPM(GPIO98, AF3, PULL_HIGH) | ||
107 | |||
108 | /* QCI */ | ||
109 | #define GPIO63_CI_DD_9 MFP_CFG_LPM(GPIO63, AF1, PULL_LOW) | ||
110 | #define GPIO64_CI_DD_8 MFP_CFG_LPM(GPIO64, AF1, PULL_LOW) | ||
111 | #define GPIO65_CI_DD_7 MFP_CFG_LPM(GPIO65, AF1, PULL_LOW) | ||
112 | #define GPIO66_CI_DD_6 MFP_CFG_LPM(GPIO66, AF1, PULL_LOW) | ||
113 | #define GPIO67_CI_DD_5 MFP_CFG_LPM(GPIO67, AF1, PULL_LOW) | ||
114 | #define GPIO68_CI_DD_4 MFP_CFG_LPM(GPIO68, AF1, PULL_LOW) | ||
115 | #define GPIO69_CI_DD_3 MFP_CFG_LPM(GPIO69, AF1, PULL_LOW) | ||
116 | #define GPIO70_CI_DD_2 MFP_CFG_LPM(GPIO70, AF1, PULL_LOW) | ||
117 | #define GPIO71_CI_DD_1 MFP_CFG_LPM(GPIO71, AF1, PULL_LOW) | ||
118 | #define GPIO72_CI_DD_0 MFP_CFG_LPM(GPIO72, AF1, PULL_LOW) | ||
119 | #define GPIO73_CI_HSYNC MFP_CFG_LPM(GPIO73, AF1, PULL_LOW) | ||
120 | #define GPIO74_CI_VSYNC MFP_CFG_LPM(GPIO74, AF1, PULL_LOW) | ||
121 | #define GPIO75_CI_MCLK MFP_CFG_LPM(GPIO75, AF1, PULL_LOW) | ||
122 | #define GPIO76_CI_PCLK MFP_CFG_LPM(GPIO76, AF1, PULL_LOW) | ||
123 | |||
124 | /* KEYPAD */ | ||
125 | #define GPIO4_KP_DKIN_4 MFP_CFG_LPM(GPIO4, AF3, FLOAT) | ||
126 | #define GPIO5_KP_DKIN_5 MFP_CFG_LPM(GPIO5, AF3, FLOAT) | ||
127 | #define GPIO6_KP_DKIN_6 MFP_CFG_LPM(GPIO6, AF3, FLOAT) | ||
128 | #define GPIO7_KP_DKIN_7 MFP_CFG_LPM(GPIO7, AF3, FLOAT) | ||
129 | #define GPIO8_KP_DKIN_4 MFP_CFG_LPM(GPIO8, AF3, FLOAT) | ||
130 | #define GPIO9_KP_DKIN_5 MFP_CFG_LPM(GPIO9, AF3, FLOAT) | ||
131 | #define GPIO10_KP_DKIN_6 MFP_CFG_LPM(GPIO10, AF3, FLOAT) | ||
132 | #define GPIO11_KP_DKIN_7 MFP_CFG_LPM(GPIO11, AF3, FLOAT) | ||
133 | |||
134 | #define GPIO12_KP_DKIN_0 MFP_CFG_LPM(GPIO12, AF2, FLOAT) | ||
135 | #define GPIO13_KP_DKIN_1 MFP_CFG_LPM(GPIO13, AF2, FLOAT) | ||
136 | #define GPIO14_KP_DKIN_2 MFP_CFG_LPM(GPIO14, AF2, FLOAT) | ||
137 | #define GPIO15_KP_DKIN_3 MFP_CFG_LPM(GPIO15, AF2, FLOAT) | ||
138 | |||
139 | #define GPIO41_KP_DKIN_0 MFP_CFG_LPM(GPIO41, AF2, FLOAT) | ||
140 | #define GPIO42_KP_DKIN_1 MFP_CFG_LPM(GPIO42, AF2, FLOAT) | ||
141 | #define GPIO43_KP_DKIN_2 MFP_CFG_LPM(GPIO43, AF2, FLOAT) | ||
142 | #define GPIO44_KP_DKIN_3 MFP_CFG_LPM(GPIO44, AF2, FLOAT) | ||
143 | #define GPIO41_KP_DKIN_4 MFP_CFG_LPM(GPIO41, AF4, FLOAT) | ||
144 | #define GPIO42_KP_DKIN_5 MFP_CFG_LPM(GPIO42, AF4, FLOAT) | ||
145 | |||
146 | #define GPIO0_KP_MKIN_0 MFP_CFG_LPM(GPIO0, AF1, FLOAT) | ||
147 | #define GPIO2_KP_MKIN_1 MFP_CFG_LPM(GPIO2, AF1, FLOAT) | ||
148 | #define GPIO4_KP_MKIN_2 MFP_CFG_LPM(GPIO4, AF1, FLOAT) | ||
149 | #define GPIO6_KP_MKIN_3 MFP_CFG_LPM(GPIO6, AF1, FLOAT) | ||
150 | #define GPIO8_KP_MKIN_4 MFP_CFG_LPM(GPIO8, AF1, FLOAT) | ||
151 | #define GPIO10_KP_MKIN_5 MFP_CFG_LPM(GPIO10, AF1, FLOAT) | ||
152 | #define GPIO12_KP_MKIN_6 MFP_CFG_LPM(GPIO12, AF1, FLOAT) | ||
153 | #define GPIO14_KP_MKIN_7 MFP_CFG(GPIO14, AF1) | ||
154 | #define GPIO35_KP_MKIN_5 MFP_CFG(GPIO35, AF4) | ||
155 | |||
156 | #define GPIO1_KP_MKOUT_0 MFP_CFG_LPM(GPIO1, AF1, DRIVE_HIGH) | ||
157 | #define GPIO3_KP_MKOUT_1 MFP_CFG_LPM(GPIO3, AF1, DRIVE_HIGH) | ||
158 | #define GPIO5_KP_MKOUT_2 MFP_CFG_LPM(GPIO5, AF1, DRIVE_HIGH) | ||
159 | #define GPIO7_KP_MKOUT_3 MFP_CFG_LPM(GPIO7, AF1, DRIVE_HIGH) | ||
160 | #define GPIO9_KP_MKOUT_4 MFP_CFG_LPM(GPIO9, AF1, DRIVE_HIGH) | ||
161 | #define GPIO11_KP_MKOUT_5 MFP_CFG_LPM(GPIO11, AF1, DRIVE_HIGH) | ||
162 | #define GPIO13_KP_MKOUT_6 MFP_CFG_LPM(GPIO13, AF1, DRIVE_HIGH) | ||
163 | #define GPIO15_KP_MKOUT_7 MFP_CFG_LPM(GPIO15, AF1, DRIVE_HIGH) | ||
164 | #define GPIO36_KP_MKOUT_5 MFP_CFG_LPM(GPIO36, AF4, DRIVE_HIGH) | ||
165 | |||
166 | /* LCD */ | ||
167 | #define GPIO17_LCD_FCLK_RD MFP_CFG(GPIO17, AF1) | ||
168 | #define GPIO18_LCD_LCLK_A0 MFP_CFG(GPIO18, AF1) | ||
169 | #define GPIO19_LCD_PCLK_WR MFP_CFG(GPIO19, AF1) | ||
170 | #define GPIO20_LCD_BIAS MFP_CFG(GPIO20, AF1) | ||
171 | #define GPIO21_LCD_CS MFP_CFG(GPIO21, AF1) | ||
172 | #define GPIO22_LCD_CS2 MFP_CFG(GPIO22, AF2) | ||
173 | #define GPIO22_LCD_VSYNC MFP_CFG(GPIO22, AF1) | ||
174 | #define GPIO23_LCD_DD0 MFP_CFG(GPIO23, AF1) | ||
175 | #define GPIO24_LCD_DD1 MFP_CFG(GPIO24, AF1) | ||
176 | #define GPIO25_LCD_DD2 MFP_CFG(GPIO25, AF1) | ||
177 | #define GPIO26_LCD_DD3 MFP_CFG(GPIO26, AF1) | ||
178 | #define GPIO27_LCD_DD4 MFP_CFG(GPIO27, AF1) | ||
179 | #define GPIO28_LCD_DD5 MFP_CFG(GPIO28, AF1) | ||
180 | #define GPIO29_LCD_DD6 MFP_CFG(GPIO29, AF1) | ||
181 | #define GPIO30_LCD_DD7 MFP_CFG(GPIO30, AF1) | ||
182 | #define GPIO31_LCD_DD8 MFP_CFG(GPIO31, AF1) | ||
183 | #define GPIO32_LCD_DD9 MFP_CFG(GPIO32, AF1) | ||
184 | #define GPIO33_LCD_DD10 MFP_CFG(GPIO33, AF1) | ||
185 | #define GPIO34_LCD_DD11 MFP_CFG(GPIO34, AF1) | ||
186 | #define GPIO35_LCD_DD12 MFP_CFG(GPIO35, AF1) | ||
187 | #define GPIO36_LCD_DD13 MFP_CFG(GPIO36, AF1) | ||
188 | #define GPIO37_LCD_DD14 MFP_CFG(GPIO37, AF1) | ||
189 | #define GPIO38_LCD_DD15 MFP_CFG(GPIO38, AF1) | ||
190 | #define GPIO39_LCD_DD16 MFP_CFG(GPIO39, AF1) | ||
191 | #define GPIO40_LCD_DD17 MFP_CFG(GPIO40, AF1) | ||
192 | #define GPIO41_LCD_CS2 MFP_CFG(GPIO41, AF3) | ||
193 | #define GPIO42_LCD_VSYNC2 MFP_CFG(GPIO42, AF3) | ||
194 | #define GPIO44_LCD_DD7 MFP_CFG(GPIO44, AF1) | ||
195 | |||
196 | /* Mini-LCD */ | ||
197 | #define GPIO17_MLCD_FCLK MFP_CFG(GPIO17, AF3) | ||
198 | #define GPIO18_MLCD_LCLK MFP_CFG(GPIO18, AF3) | ||
199 | #define GPIO19_MLCD_PCLK MFP_CFG(GPIO19, AF3) | ||
200 | #define GPIO20_MLCD_BIAS MFP_CFG(GPIO20, AF3) | ||
201 | #define GPIO23_MLCD_DD0 MFP_CFG(GPIO23, AF3) | ||
202 | #define GPIO24_MLCD_DD1 MFP_CFG(GPIO24, AF3) | ||
203 | #define GPIO25_MLCD_DD2 MFP_CFG(GPIO25, AF3) | ||
204 | #define GPIO26_MLCD_DD3 MFP_CFG(GPIO26, AF3) | ||
205 | #define GPIO27_MLCD_DD4 MFP_CFG(GPIO27, AF3) | ||
206 | #define GPIO28_MLCD_DD5 MFP_CFG(GPIO28, AF3) | ||
207 | #define GPIO29_MLCD_DD6 MFP_CFG(GPIO29, AF3) | ||
208 | #define GPIO30_MLCD_DD7 MFP_CFG(GPIO30, AF3) | ||
209 | #define GPIO31_MLCD_DD8 MFP_CFG(GPIO31, AF3) | ||
210 | #define GPIO32_MLCD_DD9 MFP_CFG(GPIO32, AF3) | ||
211 | #define GPIO33_MLCD_DD10 MFP_CFG(GPIO33, AF3) | ||
212 | #define GPIO34_MLCD_DD11 MFP_CFG(GPIO34, AF3) | ||
213 | #define GPIO35_MLCD_DD12 MFP_CFG(GPIO35, AF3) | ||
214 | #define GPIO36_MLCD_DD13 MFP_CFG(GPIO36, AF3) | ||
215 | #define GPIO37_MLCD_DD14 MFP_CFG(GPIO37, AF3) | ||
216 | #define GPIO38_MLCD_DD15 MFP_CFG(GPIO38, AF3) | ||
217 | #define GPIO44_MLCD_DD7 MFP_CFG(GPIO44, AF5) | ||
218 | |||
219 | /* MMC1 */ | ||
220 | #define GPIO10_MMC1_DAT3 MFP_CFG(GPIO10, AF4) | ||
221 | #define GPIO11_MMC1_DAT2 MFP_CFG(GPIO11, AF4) | ||
222 | #define GPIO12_MMC1_DAT1 MFP_CFG(GPIO12, AF4) | ||
223 | #define GPIO13_MMC1_DAT0 MFP_CFG(GPIO13, AF4) | ||
224 | #define GPIO14_MMC1_CMD MFP_CFG(GPIO14, AF4) | ||
225 | #define GPIO15_MMC1_CLK MFP_CFG(GPIO15, AF4) | ||
226 | #define GPIO55_MMC1_CMD MFP_CFG(GPIO55, AF3) | ||
227 | #define GPIO56_MMC1_CLK MFP_CFG(GPIO56, AF3) | ||
228 | #define GPIO57_MMC1_DAT0 MFP_CFG(GPIO57, AF3) | ||
229 | #define GPIO58_MMC1_DAT1 MFP_CFG(GPIO58, AF3) | ||
230 | #define GPIO59_MMC1_DAT2 MFP_CFG(GPIO59, AF3) | ||
231 | #define GPIO60_MMC1_DAT3 MFP_CFG(GPIO60, AF3) | ||
232 | |||
233 | #define DF_ADDR0_MMC1_CLK MFP_CFG(DF_ADDR0, AF2) | ||
234 | #define DF_ADDR1_MMC1_CMD MFP_CFG(DF_ADDR1, AF2) | ||
235 | #define DF_ADDR2_MMC1_DAT0 MFP_CFG(DF_ADDR2, AF2) | ||
236 | #define DF_ADDR3_MMC1_DAT1 MFP_CFG(DF_ADDR3, AF3) | ||
237 | #define nXCVREN_MMC1_DAT2 MFP_CFG(nXCVREN, AF2) | ||
238 | |||
239 | /* MMC2 */ | ||
240 | #define GPIO31_MMC2_CMD MFP_CFG(GPIO31, AF7) | ||
241 | #define GPIO32_MMC2_CLK MFP_CFG(GPIO32, AF7) | ||
242 | #define GPIO33_MMC2_DAT0 MFP_CFG(GPIO33, AF7) | ||
243 | #define GPIO34_MMC2_DAT1 MFP_CFG(GPIO34, AF7) | ||
244 | #define GPIO35_MMC2_DAT2 MFP_CFG(GPIO35, AF7) | ||
245 | #define GPIO36_MMC2_DAT3 MFP_CFG(GPIO36, AF7) | ||
246 | |||
247 | #define GPIO101_MMC2_DAT3 MFP_CFG(GPIO101, AF1) | ||
248 | #define GPIO102_MMC2_DAT2 MFP_CFG(GPIO102, AF1) | ||
249 | #define GPIO103_MMC2_DAT1 MFP_CFG(GPIO103, AF1) | ||
250 | #define GPIO104_MMC2_DAT0 MFP_CFG(GPIO104, AF1) | ||
251 | #define GPIO105_MMC2_CMD MFP_CFG(GPIO105, AF1) | ||
252 | #define GPIO106_MMC2_CLK MFP_CFG(GPIO106, AF1) | ||
253 | |||
254 | #define DF_IO10_MMC2_DAT3 MFP_CFG(DF_IO10, AF3) | ||
255 | #define DF_IO11_MMC2_DAT2 MFP_CFG(DF_IO11, AF3) | ||
256 | #define DF_IO12_MMC2_DAT1 MFP_CFG(DF_IO12, AF3) | ||
257 | #define DF_IO13_MMC2_DAT0 MFP_CFG(DF_IO13, AF3) | ||
258 | #define DF_IO14_MMC2_CLK MFP_CFG(DF_IO14, AF3) | ||
259 | #define DF_IO15_MMC2_CMD MFP_CFG(DF_IO15, AF3) | ||
260 | |||
261 | /* BSSP1 */ | ||
262 | #define GPIO12_BSSP1_CLK MFP_CFG(GPIO12, AF3) | ||
263 | #define GPIO13_BSSP1_FRM MFP_CFG(GPIO13, AF3) | ||
264 | #define GPIO14_BSSP1_RXD MFP_CFG(GPIO14, AF3) | ||
265 | #define GPIO15_BSSP1_TXD MFP_CFG(GPIO15, AF3) | ||
266 | #define GPIO97_BSSP1_CLK MFP_CFG(GPIO97, AF5) | ||
267 | #define GPIO98_BSSP1_FRM MFP_CFG(GPIO98, AF5) | ||
268 | |||
269 | /* BSSP2 */ | ||
270 | #define GPIO84_BSSP2_SDATA_IN MFP_CFG(GPIO84, AF1) | ||
271 | #define GPIO85_BSSP2_BITCLK MFP_CFG(GPIO85, AF1) | ||
272 | #define GPIO86_BSSP2_SYSCLK MFP_CFG(GPIO86, AF1) | ||
273 | #define GPIO87_BSSP2_SYNC MFP_CFG(GPIO87, AF1) | ||
274 | #define GPIO88_BSSP2_DATA_OUT MFP_CFG(GPIO88, AF1) | ||
275 | #define GPIO86_BSSP2_SDATA_IN MFP_CFG(GPIO86, AF4) | ||
276 | |||
277 | /* BSSP3 */ | ||
278 | #define GPIO79_BSSP3_CLK MFP_CFG(GPIO79, AF1) | ||
279 | #define GPIO80_BSSP3_FRM MFP_CFG(GPIO80, AF1) | ||
280 | #define GPIO81_BSSP3_TXD MFP_CFG(GPIO81, AF1) | ||
281 | #define GPIO82_BSSP3_RXD MFP_CFG(GPIO82, AF1) | ||
282 | #define GPIO83_BSSP3_SYSCLK MFP_CFG(GPIO83, AF1) | ||
283 | |||
284 | /* BSSP4 */ | ||
285 | #define GPIO43_BSSP4_CLK MFP_CFG(GPIO43, AF4) | ||
286 | #define GPIO44_BSSP4_FRM MFP_CFG(GPIO44, AF4) | ||
287 | #define GPIO45_BSSP4_TXD MFP_CFG(GPIO45, AF4) | ||
288 | #define GPIO46_BSSP4_RXD MFP_CFG(GPIO46, AF4) | ||
289 | |||
290 | #define GPIO51_BSSP4_CLK MFP_CFG(GPIO51, AF4) | ||
291 | #define GPIO52_BSSP4_FRM MFP_CFG(GPIO52, AF4) | ||
292 | #define GPIO53_BSSP4_TXD MFP_CFG(GPIO53, AF4) | ||
293 | #define GPIO54_BSSP4_RXD MFP_CFG(GPIO54, AF4) | ||
294 | |||
295 | /* GSSP1 */ | ||
296 | #define GPIO79_GSSP1_CLK MFP_CFG(GPIO79, AF2) | ||
297 | #define GPIO80_GSSP1_FRM MFP_CFG(GPIO80, AF2) | ||
298 | #define GPIO81_GSSP1_TXD MFP_CFG(GPIO81, AF2) | ||
299 | #define GPIO82_GSSP1_RXD MFP_CFG(GPIO82, AF2) | ||
300 | #define GPIO83_GSSP1_SYSCLK MFP_CFG(GPIO83, AF2) | ||
301 | |||
302 | #define GPIO93_GSSP1_CLK MFP_CFG(GPIO93, AF4) | ||
303 | #define GPIO94_GSSP1_FRM MFP_CFG(GPIO94, AF4) | ||
304 | #define GPIO95_GSSP1_TXD MFP_CFG(GPIO95, AF4) | ||
305 | #define GPIO96_GSSP1_RXD MFP_CFG(GPIO96, AF4) | ||
306 | |||
307 | /* GSSP2 */ | ||
308 | #define GPIO47_GSSP2_CLK MFP_CFG(GPIO47, AF4) | ||
309 | #define GPIO48_GSSP2_FRM MFP_CFG(GPIO48, AF4) | ||
310 | #define GPIO49_GSSP2_RXD MFP_CFG(GPIO49, AF4) | ||
311 | #define GPIO50_GSSP2_TXD MFP_CFG(GPIO50, AF4) | ||
312 | |||
313 | #define GPIO69_GSSP2_CLK MFP_CFG(GPIO69, AF4) | ||
314 | #define GPIO70_GSSP2_FRM MFP_CFG(GPIO70, AF4) | ||
315 | #define GPIO71_GSSP2_RXD MFP_CFG(GPIO71, AF4) | ||
316 | #define GPIO72_GSSP2_TXD MFP_CFG(GPIO72, AF4) | ||
317 | |||
318 | #define GPIO84_GSSP2_RXD MFP_CFG(GPIO84, AF2) | ||
319 | #define GPIO85_GSSP2_CLK MFP_CFG(GPIO85, AF2) | ||
320 | #define GPIO86_GSSP2_SYSCLK MFP_CFG(GPIO86, AF2) | ||
321 | #define GPIO87_GSSP2_FRM MFP_CFG(GPIO87, AF2) | ||
322 | #define GPIO88_GSSP2_TXD MFP_CFG(GPIO88, AF2) | ||
323 | #define GPIO86_GSSP2_RXD MFP_CFG(GPIO86, AF5) | ||
324 | |||
325 | #define GPIO103_GSSP2_CLK MFP_CFG(GPIO103, AF2) | ||
326 | #define GPIO104_GSSP2_FRM MFP_CFG(GPIO104, AF2) | ||
327 | #define GPIO105_GSSP2_RXD MFP_CFG(GPIO105, AF2) | ||
328 | #define GPIO106_GSSP2_TXD MFP_CFG(GPIO106, AF2) | ||
329 | |||
330 | /* UART1 - FFUART */ | ||
331 | #define GPIO47_UART1_DSR_N MFP_CFG(GPIO47, AF1) | ||
332 | #define GPIO48_UART1_DTR_N MFP_CFG(GPIO48, AF1) | ||
333 | #define GPIO49_UART1_RI MFP_CFG(GPIO49, AF1) | ||
334 | #define GPIO50_UART1_DCD MFP_CFG(GPIO50, AF1) | ||
335 | #define GPIO51_UART1_CTS MFP_CFG(GPIO51, AF1) | ||
336 | #define GPIO52_UART1_RTS MFP_CFG(GPIO52, AF1) | ||
337 | #define GPIO53_UART1_RXD MFP_CFG(GPIO53, AF1) | ||
338 | #define GPIO54_UART1_TXD MFP_CFG(GPIO54, AF1) | ||
339 | |||
340 | #define GPIO63_UART1_TXD MFP_CFG(GPIO63, AF2) | ||
341 | #define GPIO64_UART1_RXD MFP_CFG(GPIO64, AF2) | ||
342 | #define GPIO65_UART1_DSR MFP_CFG(GPIO65, AF2) | ||
343 | #define GPIO66_UART1_DTR MFP_CFG(GPIO66, AF2) | ||
344 | #define GPIO67_UART1_RI MFP_CFG(GPIO67, AF2) | ||
345 | #define GPIO68_UART1_DCD MFP_CFG(GPIO68, AF2) | ||
346 | #define GPIO69_UART1_CTS MFP_CFG(GPIO69, AF2) | ||
347 | #define GPIO70_UART1_RTS MFP_CFG(GPIO70, AF2) | ||
348 | |||
349 | /* UART2 - BTUART */ | ||
350 | #define GPIO91_UART2_RXD MFP_CFG(GPIO91, AF1) | ||
351 | #define GPIO92_UART2_TXD MFP_CFG(GPIO92, AF1) | ||
352 | #define GPIO93_UART2_CTS MFP_CFG(GPIO93, AF1) | ||
353 | #define GPIO94_UART2_RTS MFP_CFG(GPIO94, AF1) | ||
354 | |||
355 | /* UART3 - STUART */ | ||
356 | #define GPIO43_UART3_RTS MFP_CFG(GPIO43, AF3) | ||
357 | #define GPIO44_UART3_CTS MFP_CFG(GPIO44, AF3) | ||
358 | #define GPIO45_UART3_RXD MFP_CFG(GPIO45, AF3) | ||
359 | #define GPIO46_UART3_TXD MFP_CFG(GPIO46, AF3) | ||
360 | |||
361 | #define GPIO75_UART3_RTS MFP_CFG(GPIO75, AF5) | ||
362 | #define GPIO76_UART3_CTS MFP_CFG(GPIO76, AF5) | ||
363 | #define GPIO77_UART3_TXD MFP_CFG(GPIO77, AF5) | ||
364 | #define GPIO78_UART3_RXD MFP_CFG(GPIO78, AF5) | ||
365 | |||
366 | /* DFI */ | ||
367 | #define DF_IO0_DF_IO0 MFP_CFG(DF_IO0, AF2) | ||
368 | #define DF_IO1_DF_IO1 MFP_CFG(DF_IO1, AF2) | ||
369 | #define DF_IO2_DF_IO2 MFP_CFG(DF_IO2, AF2) | ||
370 | #define DF_IO3_DF_IO3 MFP_CFG(DF_IO3, AF2) | ||
371 | #define DF_IO4_DF_IO4 MFP_CFG(DF_IO4, AF2) | ||
372 | #define DF_IO5_DF_IO5 MFP_CFG(DF_IO5, AF2) | ||
373 | #define DF_IO6_DF_IO6 MFP_CFG(DF_IO6, AF2) | ||
374 | #define DF_IO7_DF_IO7 MFP_CFG(DF_IO7, AF2) | ||
375 | #define DF_IO8_DF_IO8 MFP_CFG(DF_IO8, AF2) | ||
376 | #define DF_IO9_DF_IO9 MFP_CFG(DF_IO9, AF2) | ||
377 | #define DF_IO10_DF_IO10 MFP_CFG(DF_IO10, AF2) | ||
378 | #define DF_IO11_DF_IO11 MFP_CFG(DF_IO11, AF2) | ||
379 | #define DF_IO12_DF_IO12 MFP_CFG(DF_IO12, AF2) | ||
380 | #define DF_IO13_DF_IO13 MFP_CFG(DF_IO13, AF2) | ||
381 | #define DF_IO14_DF_IO14 MFP_CFG(DF_IO14, AF2) | ||
382 | #define DF_IO15_DF_IO15 MFP_CFG(DF_IO15, AF2) | ||
383 | #define DF_nADV1_ALE_DF_nADV1 MFP_CFG(DF_nADV1_ALE, AF2) | ||
384 | #define DF_nADV2_ALE_DF_nADV2 MFP_CFG(DF_nADV2_ALE, AF2) | ||
385 | #define DF_nCS0_DF_nCS0 MFP_CFG(DF_nCS0, AF2) | ||
386 | #define DF_nCS1_DF_nCS1 MFP_CFG(DF_nCS1, AF2) | ||
387 | #define DF_nRE_nOE_DF_nOE MFP_CFG(DF_nRE_nOE, AF2) | ||
388 | #define DF_nWE_DF_nWE MFP_CFG(DF_nWE, AF2) | ||
389 | |||
390 | /* DFI - NAND */ | ||
391 | #define DF_CLE_nOE_ND_CLE MFP_CFG_LPM(DF_CLE_nOE, AF1, PULL_HIGH) | ||
392 | #define DF_INT_RnB_ND_INT_RnB MFP_CFG_LPM(DF_INT_RnB, AF1, PULL_LOW) | ||
393 | #define DF_IO0_ND_IO0 MFP_CFG_LPM(DF_IO0, AF1, PULL_LOW) | ||
394 | #define DF_IO1_ND_IO1 MFP_CFG_LPM(DF_IO1, AF1, PULL_LOW) | ||
395 | #define DF_IO2_ND_IO2 MFP_CFG_LPM(DF_IO2, AF1, PULL_LOW) | ||
396 | #define DF_IO3_ND_IO3 MFP_CFG_LPM(DF_IO3, AF1, PULL_LOW) | ||
397 | #define DF_IO4_ND_IO4 MFP_CFG_LPM(DF_IO4, AF1, PULL_LOW) | ||
398 | #define DF_IO5_ND_IO5 MFP_CFG_LPM(DF_IO5, AF1, PULL_LOW) | ||
399 | #define DF_IO6_ND_IO6 MFP_CFG_LPM(DF_IO6, AF1, PULL_LOW) | ||
400 | #define DF_IO7_ND_IO7 MFP_CFG_LPM(DF_IO7, AF1, PULL_LOW) | ||
401 | #define DF_IO8_ND_IO8 MFP_CFG_LPM(DF_IO8, AF1, PULL_LOW) | ||
402 | #define DF_IO9_ND_IO9 MFP_CFG_LPM(DF_IO9, AF1, PULL_LOW) | ||
403 | #define DF_IO10_ND_IO10 MFP_CFG_LPM(DF_IO10, AF1, PULL_LOW) | ||
404 | #define DF_IO11_ND_IO11 MFP_CFG_LPM(DF_IO11, AF1, PULL_LOW) | ||
405 | #define DF_IO12_ND_IO12 MFP_CFG_LPM(DF_IO12, AF1, PULL_LOW) | ||
406 | #define DF_IO13_ND_IO13 MFP_CFG_LPM(DF_IO13, AF1, PULL_LOW) | ||
407 | #define DF_IO14_ND_IO14 MFP_CFG_LPM(DF_IO14, AF1, PULL_LOW) | ||
408 | #define DF_IO15_ND_IO15 MFP_CFG_LPM(DF_IO15, AF1, PULL_LOW) | ||
409 | #define DF_nADV1_ALE_ND_ALE MFP_CFG_LPM(DF_nADV1_ALE, AF1, PULL_HIGH) | ||
410 | #define DF_nADV2_ALE_ND_ALE MFP_CFG_LPM(DF_nADV2_ALE, AF1, PULL_HIGH) | ||
411 | #define DF_nADV2_ALE_nCS3 MFP_CFG_LPM(DF_nADV2_ALE, AF3, PULL_HIGH) | ||
412 | #define DF_nCS0_ND_nCS0 MFP_CFG_LPM(DF_nCS0, AF1, PULL_HIGH) | ||
413 | #define DF_nCS1_ND_nCS1 MFP_CFG_LPM(DF_nCS1, AF1, PULL_HIGH) | ||
414 | #define DF_nRE_nOE_ND_nRE MFP_CFG_LPM(DF_nRE_nOE, AF1, PULL_HIGH) | ||
415 | #define DF_nWE_ND_nWE MFP_CFG_LPM(DF_nWE, AF1, PULL_HIGH) | ||
416 | |||
417 | /* PWM */ | ||
418 | #define GPIO41_PWM0 MFP_CFG_LPM(GPIO41, AF1, PULL_LOW) | ||
419 | #define GPIO42_PWM1 MFP_CFG_LPM(GPIO42, AF1, PULL_LOW) | ||
420 | #define GPIO43_PWM3 MFP_CFG_LPM(GPIO43, AF1, PULL_LOW) | ||
421 | #define GPIO20_PWM0 MFP_CFG_LPM(GPIO20, AF2, PULL_LOW) | ||
422 | #define GPIO21_PWM2 MFP_CFG_LPM(GPIO21, AF3, PULL_LOW) | ||
423 | #define GPIO22_PWM3 MFP_CFG_LPM(GPIO22, AF3, PULL_LOW) | ||
424 | |||
425 | /* CIR */ | ||
426 | #define GPIO46_CIR_OUT MFP_CFG(GPIO46, AF1) | ||
427 | #define GPIO77_CIR_OUT MFP_CFG(GPIO77, AF3) | ||
428 | |||
429 | /* USB P2 */ | ||
430 | #define GPIO0_USB_P2_7 MFP_CFG(GPIO0, AF3) | ||
431 | #define GPIO15_USB_P2_7 MFP_CFG(GPIO15, AF5) | ||
432 | #define GPIO16_USB_P2_7 MFP_CFG(GPIO16, AF2) | ||
433 | #define GPIO48_USB_P2_7 MFP_CFG(GPIO48, AF7) | ||
434 | #define GPIO49_USB_P2_7 MFP_CFG(GPIO49, AF6) | ||
435 | #define DF_IO9_USB_P2_7 MFP_CFG(DF_IO9, AF3) | ||
436 | |||
437 | #define GPIO48_USB_P2_8 MFP_CFG(GPIO48, AF2) | ||
438 | #define GPIO50_USB_P2_7 MFP_CFG_X(GPIO50, AF2, DS02X, FLOAT) | ||
439 | #define GPIO51_USB_P2_5 MFP_CFG(GPIO51, AF2) | ||
440 | #define GPIO47_USB_P2_4 MFP_CFG(GPIO47, AF2) | ||
441 | #define GPIO53_USB_P2_3 MFP_CFG(GPIO53, AF2) | ||
442 | #define GPIO54_USB_P2_6 MFP_CFG(GPIO54, AF2) | ||
443 | #define GPIO49_USB_P2_2 MFP_CFG(GPIO49, AF2) | ||
444 | #define GPIO52_USB_P2_1 MFP_CFG(GPIO52, AF2) | ||
445 | |||
446 | #define GPIO63_USB_P2_8 MFP_CFG(GPIO63, AF3) | ||
447 | #define GPIO64_USB_P2_7 MFP_CFG(GPIO64, AF3) | ||
448 | #define GPIO65_USB_P2_6 MFP_CFG(GPIO65, AF3) | ||
449 | #define GPIO66_USG_P2_5 MFP_CFG(GPIO66, AF3) | ||
450 | #define GPIO67_USB_P2_4 MFP_CFG(GPIO67, AF3) | ||
451 | #define GPIO68_USB_P2_3 MFP_CFG(GPIO68, AF3) | ||
452 | #define GPIO69_USB_P2_2 MFP_CFG(GPIO69, AF3) | ||
453 | #define GPIO70_USB_P2_1 MFP_CFG(GPIO70, AF3) | ||
454 | |||
455 | /* ULPI */ | ||
456 | #define GPIO31_USB_ULPI_D0 MFP_CFG(GPIO31, AF4) | ||
457 | #define GPIO30_USB_ULPI_D1 MFP_CFG(GPIO30, AF7) | ||
458 | #define GPIO33_USB_ULPI_D2 MFP_CFG(GPIO33, AF5) | ||
459 | #define GPIO34_USB_ULPI_D3 MFP_CFG(GPIO34, AF5) | ||
460 | #define GPIO35_USB_ULPI_D4 MFP_CFG(GPIO35, AF5) | ||
461 | #define GPIO36_USB_ULPI_D5 MFP_CFG(GPIO36, AF5) | ||
462 | #define GPIO41_USB_ULPI_D6 MFP_CFG(GPIO41, AF5) | ||
463 | #define GPIO42_USB_ULPI_D7 MFP_CFG(GPIO42, AF5) | ||
464 | #define GPIO37_USB_ULPI_DIR MFP_CFG(GPIO37, AF4) | ||
465 | #define GPIO38_USB_ULPI_CLK MFP_CFG(GPIO38, AF4) | ||
466 | #define GPIO39_USB_ULPI_STP MFP_CFG(GPIO39, AF4) | ||
467 | #define GPIO40_USB_ULPI_NXT MFP_CFG(GPIO40, AF4) | ||
468 | |||
469 | #define GPIO3_CLK26MOUTDMD MFP_CFG(GPIO3, AF3) | ||
470 | #define GPIO40_CLK26MOUTDMD MFP_CFG(GPIO40, AF7) | ||
471 | #define GPIO94_CLK26MOUTDMD MFP_CFG(GPIO94, AF5) | ||
472 | #define GPIO104_CLK26MOUTDMD MFP_CFG(GPIO104, AF4) | ||
473 | #define DF_ADDR1_CLK26MOUTDMD MFP_CFG(DF_ADDR2, AF3) | ||
474 | #define DF_ADDR3_CLK26MOUTDMD MFP_CFG(DF_ADDR3, AF3) | ||
475 | |||
476 | #define GPIO14_CLK26MOUT MFP_CFG(GPIO14, AF5) | ||
477 | #define GPIO38_CLK26MOUT MFP_CFG(GPIO38, AF7) | ||
478 | #define GPIO92_CLK26MOUT MFP_CFG(GPIO92, AF5) | ||
479 | #define GPIO105_CLK26MOUT MFP_CFG(GPIO105, AF4) | ||
480 | |||
481 | #define GPIO2_CLK13MOUTDMD MFP_CFG(GPIO2, AF3) | ||
482 | #define GPIO39_CLK13MOUTDMD MFP_CFG(GPIO39, AF7) | ||
483 | #define GPIO50_CLK13MOUTDMD MFP_CFG(GPIO50, AF3) | ||
484 | #define GPIO93_CLK13MOUTDMD MFP_CFG(GPIO93, AF5) | ||
485 | #define GPIO103_CLK13MOUTDMD MFP_CFG(GPIO103, AF4) | ||
486 | #define DF_ADDR2_CLK13MOUTDMD MFP_CFG(DF_ADDR2, AF3) | ||
487 | |||
488 | /* 1 wire */ | ||
489 | #define GPIO95_OW_DQ_IN MFP_CFG(GPIO95, AF5) | ||
490 | |||
491 | #endif /* __ASM_ARCH_MFP_PXA9xx_H */ | ||
diff --git a/include/asm-arm/arch-pxa/mfp.h b/include/asm-arm/arch-pxa/mfp.h index 02f6157396d3..e7d58798da67 100644 --- a/include/asm-arm/arch-pxa/mfp.h +++ b/include/asm-arm/arch-pxa/mfp.h | |||
@@ -210,6 +210,14 @@ enum { | |||
210 | MFP_PIN_DF_IO14, | 210 | MFP_PIN_DF_IO14, |
211 | MFP_PIN_DF_IO15, | 211 | MFP_PIN_DF_IO15, |
212 | 212 | ||
213 | /* additional pins on PXA930 */ | ||
214 | MFP_PIN_GSIM_UIO, | ||
215 | MFP_PIN_GSIM_UCLK, | ||
216 | MFP_PIN_GSIM_UDET, | ||
217 | MFP_PIN_GSIM_nURST, | ||
218 | MFP_PIN_PMIC_INT, | ||
219 | MFP_PIN_RDY, | ||
220 | |||
213 | MFP_PIN_MAX, | 221 | MFP_PIN_MAX, |
214 | }; | 222 | }; |
215 | 223 | ||
diff --git a/include/asm-arm/arch-pxa/palmtx.h b/include/asm-arm/arch-pxa/palmtx.h new file mode 100644 index 000000000000..1e8bccbda510 --- /dev/null +++ b/include/asm-arm/arch-pxa/palmtx.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * GPIOs and interrupts for Palm T|X Handheld Computer | ||
3 | * | ||
4 | * Based on palmld-gpio.h by Alex Osborne | ||
5 | * | ||
6 | * Authors: Marek Vasut <marek.vasut@gmail.com> | ||
7 | * Cristiano P. <cristianop@users.sourceforge.net> | ||
8 | * Jan Herman <2hp@seznam.cz> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _INCLUDE_PALMTX_H_ | ||
17 | #define _INCLUDE_PALMTX_H_ | ||
18 | |||
19 | /** HERE ARE GPIOs **/ | ||
20 | |||
21 | /* GPIOs */ | ||
22 | #define GPIO_NR_PALMTX_GPIO_RESET 1 | ||
23 | |||
24 | #define GPIO_NR_PALMTX_POWER_DETECT 12 /* 90 */ | ||
25 | #define GPIO_NR_PALMTX_HOTSYNC_BUTTON_N 10 | ||
26 | #define GPIO_NR_PALMTX_EARPHONE_DETECT 107 | ||
27 | |||
28 | /* SD/MMC */ | ||
29 | #define GPIO_NR_PALMTX_SD_DETECT_N 14 | ||
30 | #define GPIO_NR_PALMTX_SD_POWER 114 /* probably */ | ||
31 | #define GPIO_NR_PALMTX_SD_READONLY 115 /* probably */ | ||
32 | |||
33 | /* TOUCHSCREEN */ | ||
34 | #define GPIO_NR_PALMTX_WM9712_IRQ 27 | ||
35 | |||
36 | /* IRDA - disable GPIO connected to SD pin of tranceiver (TFBS4710?) ? */ | ||
37 | #define GPIO_NR_PALMTX_IR_DISABLE 40 | ||
38 | |||
39 | /* USB */ | ||
40 | #define GPIO_NR_PALMTX_USB_DETECT_N 13 | ||
41 | #define GPIO_NR_PALMTX_USB_POWER 95 | ||
42 | #define GPIO_NR_PALMTX_USB_PULLUP 93 | ||
43 | |||
44 | /* LCD/BACKLIGHT */ | ||
45 | #define GPIO_NR_PALMTX_BL_POWER 84 | ||
46 | #define GPIO_NR_PALMTX_LCD_POWER 96 | ||
47 | |||
48 | /* LCD BORDER */ | ||
49 | #define GPIO_NR_PALMTX_BORDER_SWITCH 98 | ||
50 | #define GPIO_NR_PALMTX_BORDER_SELECT 22 | ||
51 | |||
52 | /* BLUETOOTH */ | ||
53 | #define GPIO_NR_PALMTX_BT_POWER 17 | ||
54 | #define GPIO_NR_PALMTX_BT_RESET 83 | ||
55 | |||
56 | /* PCMCIA (WiFi) */ | ||
57 | #define GPIO_NR_PALMTX_PCMCIA_POWER1 94 | ||
58 | #define GPIO_NR_PALMTX_PCMCIA_POWER2 108 | ||
59 | #define GPIO_NR_PALMTX_PCMCIA_RESET 79 | ||
60 | #define GPIO_NR_PALMTX_PCMCIA_READY 116 | ||
61 | |||
62 | /* NAND Flash ... this GPIO may be incorrect! */ | ||
63 | #define GPIO_NR_PALMTX_NAND_BUFFER_DIR 79 | ||
64 | |||
65 | /* INTERRUPTS */ | ||
66 | #define IRQ_GPIO_PALMTX_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMTX_SD_DETECT_N) | ||
67 | #define IRQ_GPIO_PALMTX_WM9712_IRQ IRQ_GPIO(GPIO_NR_PALMTX_WM9712_IRQ) | ||
68 | #define IRQ_GPIO_PALMTX_USB_DETECT IRQ_GPIO(GPIO_NR_PALMTX_USB_DETECT) | ||
69 | #define IRQ_GPIO_PALMTX_GPIO_RESET IRQ_GPIO(GPIO_NR_PALMTX_GPIO_RESET) | ||
70 | |||
71 | /** HERE ARE INIT VALUES **/ | ||
72 | |||
73 | /* Various addresses */ | ||
74 | #define PALMTX_PCMCIA_PHYS 0x28000000 | ||
75 | #define PALMTX_PCMCIA_VIRT 0xf0000000 | ||
76 | #define PALMTX_PCMCIA_SIZE 0x100000 | ||
77 | |||
78 | #define PALMTX_PHYS_RAM_START 0xa0000000 | ||
79 | #define PALMTX_PHYS_IO_START 0x40000000 | ||
80 | |||
81 | #define PALMTX_PHYS_FLASH_START PXA_CS0_PHYS /* ChipSelect 0 */ | ||
82 | #define PALMTX_PHYS_NAND_START PXA_CS1_PHYS /* ChipSelect 1 */ | ||
83 | |||
84 | /* TOUCHSCREEN */ | ||
85 | #define AC97_LINK_FRAME 21 | ||
86 | |||
87 | |||
88 | /* BATTERY */ | ||
89 | #define PALMTX_BAT_MAX_VOLTAGE 4000 /* 4.00v current voltage */ | ||
90 | #define PALMTX_BAT_MIN_VOLTAGE 3550 /* 3.55v critical voltage */ | ||
91 | #define PALMTX_BAT_MAX_CURRENT 0 /* unknokn */ | ||
92 | #define PALMTX_BAT_MIN_CURRENT 0 /* unknown */ | ||
93 | #define PALMTX_BAT_MAX_CHARGE 1 /* unknown */ | ||
94 | #define PALMTX_BAT_MIN_CHARGE 1 /* unknown */ | ||
95 | #define PALMTX_MAX_LIFE_MINS 360 /* on-life in minutes */ | ||
96 | |||
97 | #define PALMTX_BAT_MEASURE_DELAY (HZ * 1) | ||
98 | |||
99 | /* BACKLIGHT */ | ||
100 | #define PALMTX_MAX_INTENSITY 0xFE | ||
101 | #define PALMTX_DEFAULT_INTENSITY 0x7E | ||
102 | #define PALMTX_LIMIT_MASK 0x7F | ||
103 | #define PALMTX_PRESCALER 0x3F | ||
104 | #define PALMTX_PERIOD_NS 3500 | ||
105 | |||
106 | #endif | ||
diff --git a/include/asm-arm/arch-pxa/pxa27x-udc.h b/include/asm-arm/arch-pxa/pxa27x-udc.h index bc1cf7d0773a..ab1443f8bd89 100644 --- a/include/asm-arm/arch-pxa/pxa27x-udc.h +++ b/include/asm-arm/arch-pxa/pxa27x-udc.h | |||
@@ -97,7 +97,7 @@ | |||
97 | #define UP2OCR_IDON (1 << 10) /* OTG ID Read Enable */ | 97 | #define UP2OCR_IDON (1 << 10) /* OTG ID Read Enable */ |
98 | #define UP2OCR_HXS (1 << 16) /* Host Port 2 Transceiver Output Select */ | 98 | #define UP2OCR_HXS (1 << 16) /* Host Port 2 Transceiver Output Select */ |
99 | #define UP2OCR_HXOE (1 << 17) /* Host Port 2 Transceiver Output Enable */ | 99 | #define UP2OCR_HXOE (1 << 17) /* Host Port 2 Transceiver Output Enable */ |
100 | #define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */ | 100 | #define UP2OCR_SEOS(x) ((x & 7) << 24) /* Single-Ended Output Select */ |
101 | 101 | ||
102 | #define UDCCSN(x) __REG2(0x40600100, (x) << 2) | 102 | #define UDCCSN(x) __REG2(0x40600100, (x) << 2) |
103 | #define UDCCSR0 __REG(0x40600100) /* UDC Control/Status register - Endpoint 0 */ | 103 | #define UDCCSR0 __REG(0x40600100) /* UDC Control/Status register - Endpoint 0 */ |
diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h index 3459fb26ce97..2206cb61a9f9 100644 --- a/include/asm-arm/arch-pxa/pxa2xx_spi.h +++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h | |||
@@ -41,4 +41,6 @@ struct pxa2xx_spi_chip { | |||
41 | void (*cs_control)(u32 command); | 41 | void (*cs_control)(u32 command); |
42 | }; | 42 | }; |
43 | 43 | ||
44 | extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); | ||
45 | |||
44 | #endif /*PXA2XX_SPI_H_*/ | 46 | #endif /*PXA2XX_SPI_H_*/ |
diff --git a/include/asm-arm/arch-pxa/pxa3xx_nand.h b/include/asm-arm/arch-pxa/pxa3xx_nand.h index 81a8937486cb..eb4b190b6657 100644 --- a/include/asm-arm/arch-pxa/pxa3xx_nand.h +++ b/include/asm-arm/arch-pxa/pxa3xx_nand.h | |||
@@ -15,4 +15,6 @@ struct pxa3xx_nand_platform_data { | |||
15 | struct mtd_partition *parts; | 15 | struct mtd_partition *parts; |
16 | unsigned int nr_parts; | 16 | unsigned int nr_parts; |
17 | }; | 17 | }; |
18 | |||
19 | extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); | ||
18 | #endif /* __ASM_ARCH_PXA3XX_NAND_H */ | 20 | #endif /* __ASM_ARCH_PXA3XX_NAND_H */ |
diff --git a/include/asm-arm/arch-pxa/pxafb.h b/include/asm-arm/arch-pxa/pxafb.h index bbd22396841a..daf018d0c604 100644 --- a/include/asm-arm/arch-pxa/pxafb.h +++ b/include/asm-arm/arch-pxa/pxafb.h | |||
@@ -71,7 +71,8 @@ struct pxafb_mode_info { | |||
71 | 71 | ||
72 | u_char bpp; | 72 | u_char bpp; |
73 | u_int cmap_greyscale:1, | 73 | u_int cmap_greyscale:1, |
74 | unused:31; | 74 | depth:8, |
75 | unused:23; | ||
75 | 76 | ||
76 | /* Parallel Mode Timing */ | 77 | /* Parallel Mode Timing */ |
77 | u_char hsync_len; | 78 | u_char hsync_len; |
diff --git a/include/asm-arm/arch-pxa/regs-lcd.h b/include/asm-arm/arch-pxa/regs-lcd.h index 3ba464c913a5..820a189684a9 100644 --- a/include/asm-arm/arch-pxa/regs-lcd.h +++ b/include/asm-arm/arch-pxa/regs-lcd.h | |||
@@ -27,6 +27,12 @@ | |||
27 | #define LCCR3_4BPP (2 << 24) | 27 | #define LCCR3_4BPP (2 << 24) |
28 | #define LCCR3_8BPP (3 << 24) | 28 | #define LCCR3_8BPP (3 << 24) |
29 | #define LCCR3_16BPP (4 << 24) | 29 | #define LCCR3_16BPP (4 << 24) |
30 | #define LCCR3_18BPP (5 << 24) | ||
31 | #define LCCR3_18BPP_P (6 << 24) | ||
32 | #define LCCR3_19BPP (7 << 24) | ||
33 | #define LCCR3_19BPP_P (1 << 29) | ||
34 | #define LCCR3_24BPP ((1 << 29) | (1 << 24)) | ||
35 | #define LCCR3_25BPP ((1 << 29) | (2 << 24)) | ||
30 | 36 | ||
31 | #define LCCR3_PDFOR_0 (0 << 30) | 37 | #define LCCR3_PDFOR_0 (0 << 30) |
32 | #define LCCR3_PDFOR_1 (1 << 30) | 38 | #define LCCR3_PDFOR_1 (1 << 30) |
diff --git a/include/asm-arm/arch-pxa/regs-ssp.h b/include/asm-arm/arch-pxa/regs-ssp.h index 0255328c3c18..3c04cde2cf1f 100644 --- a/include/asm-arm/arch-pxa/regs-ssp.h +++ b/include/asm-arm/arch-pxa/regs-ssp.h | |||
@@ -20,6 +20,10 @@ | |||
20 | #define SSTSS (0x38) /* SSP Timeslot Status */ | 20 | #define SSTSS (0x38) /* SSP Timeslot Status */ |
21 | #define SSACD (0x3C) /* SSP Audio Clock Divider */ | 21 | #define SSACD (0x3C) /* SSP Audio Clock Divider */ |
22 | 22 | ||
23 | #if defined(CONFIG_PXA3xx) | ||
24 | #define SSACDD (0x40) /* SSP Audio Clock Dither Divider */ | ||
25 | #endif | ||
26 | |||
23 | /* Common PXA2xx bits first */ | 27 | /* Common PXA2xx bits first */ |
24 | #define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ | 28 | #define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */ |
25 | #define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ | 29 | #define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */ |
@@ -29,10 +33,12 @@ | |||
29 | #define SSCR0_National (0x2 << 4) /* National Microwire */ | 33 | #define SSCR0_National (0x2 << 4) /* National Microwire */ |
30 | #define SSCR0_ECS (1 << 6) /* External clock select */ | 34 | #define SSCR0_ECS (1 << 6) /* External clock select */ |
31 | #define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ | 35 | #define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */ |
36 | |||
32 | #if defined(CONFIG_PXA25x) | 37 | #if defined(CONFIG_PXA25x) |
33 | #define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */ | 38 | #define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */ |
34 | #define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */ | 39 | #define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */ |
35 | #elif defined(CONFIG_PXA27x) | 40 | |
41 | #elif defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) | ||
36 | #define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */ | 42 | #define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */ |
37 | #define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */ | 43 | #define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */ |
38 | #define SSCR0_EDSS (1 << 20) /* Extended data size select */ | 44 | #define SSCR0_EDSS (1 << 20) /* Extended data size select */ |
@@ -45,6 +51,10 @@ | |||
45 | #define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ | 51 | #define SSCR0_MOD (1 << 31) /* Mode (normal or network) */ |
46 | #endif | 52 | #endif |
47 | 53 | ||
54 | #if defined(CONFIG_PXA3xx) | ||
55 | #define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */ | ||
56 | #endif | ||
57 | |||
48 | #define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ | 58 | #define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */ |
49 | #define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ | 59 | #define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */ |
50 | #define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ | 60 | #define SSCR1_LBM (1 << 2) /* Loop-Back Mode */ |
@@ -109,5 +119,9 @@ | |||
109 | #define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ | 119 | #define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ |
110 | #define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ | 120 | #define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ |
111 | #define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ | 121 | #define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ |
122 | #if defined(CONFIG_PXA3xx) | ||
123 | #define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ | ||
124 | #endif | ||
125 | |||
112 | 126 | ||
113 | #endif /* __ASM_ARCH_REGS_SSP_H */ | 127 | #endif /* __ASM_ARCH_REGS_SSP_H */ |
diff --git a/include/asm-arm/arch-pxa/system.h b/include/asm-arm/arch-pxa/system.h index ba7e132de1b3..6956fc5235f8 100644 --- a/include/asm-arm/arch-pxa/system.h +++ b/include/asm-arm/arch-pxa/system.h | |||
@@ -21,19 +21,4 @@ static inline void arch_idle(void) | |||
21 | } | 21 | } |
22 | 22 | ||
23 | 23 | ||
24 | static inline void arch_reset(char mode) | 24 | void arch_reset(char mode); |
25 | { | ||
26 | if (cpu_is_pxa2xx()) | ||
27 | RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; | ||
28 | |||
29 | if (mode == 's') { | ||
30 | /* Jump into ROM at address 0 */ | ||
31 | cpu_reset(0); | ||
32 | } else { | ||
33 | /* Initialize the watchdog and let it fire */ | ||
34 | OWER = OWER_WME; | ||
35 | OSSR = OSSR_M3; | ||
36 | OSMR3 = OSCR + 368640; /* ... in 100 ms */ | ||
37 | } | ||
38 | } | ||
39 | |||
diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h index c5b6fde6907c..a72803f0461b 100644 --- a/include/asm-arm/arch-pxa/tosa.h +++ b/include/asm-arm/arch-pxa/tosa.h | |||
@@ -25,21 +25,18 @@ | |||
25 | */ | 25 | */ |
26 | #define TOSA_SCOOP_GPIO_BASE NR_BUILTIN_GPIO | 26 | #define TOSA_SCOOP_GPIO_BASE NR_BUILTIN_GPIO |
27 | #define TOSA_SCOOP_PXA_VCORE1 SCOOP_GPCR_PA11 | 27 | #define TOSA_SCOOP_PXA_VCORE1 SCOOP_GPCR_PA11 |
28 | #define TOSA_SCOOP_TC6393_REST_IN SCOOP_GPCR_PA12 | 28 | #define TOSA_GPIO_TC6393XB_REST_IN (TOSA_SCOOP_GPIO_BASE + 1) |
29 | #define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2) | 29 | #define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2) |
30 | #define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) | 30 | #define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) |
31 | #define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) | 31 | #define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) |
32 | #define TOSA_SCOOP_AUD_PWR_ON SCOOP_GPCR_PA16 | 32 | #define TOSA_SCOOP_AUD_PWR_ON SCOOP_GPCR_PA16 |
33 | #define TOSA_SCOOP_BT_RESET SCOOP_GPCR_PA17 | 33 | #define TOSA_GPIO_BT_RESET (TOSA_SCOOP_GPIO_BASE + 6) |
34 | #define TOSA_SCOOP_BT_PWR_EN SCOOP_GPCR_PA18 | 34 | #define TOSA_GPIO_BT_PWR_EN (TOSA_SCOOP_GPIO_BASE + 7) |
35 | #define TOSA_SCOOP_AC_IN_OL SCOOP_GPCR_PA19 | 35 | #define TOSA_SCOOP_AC_IN_OL SCOOP_GPCR_PA19 |
36 | 36 | ||
37 | /* GPIO Direction 1 : output mode / 0:input mode */ | 37 | /* GPIO Direction 1 : output mode / 0:input mode */ |
38 | #define TOSA_SCOOP_IO_DIR ( TOSA_SCOOP_PXA_VCORE1 | TOSA_SCOOP_TC6393_REST_IN | \ | 38 | #define TOSA_SCOOP_IO_DIR (TOSA_SCOOP_PXA_VCORE1 | \ |
39 | TOSA_SCOOP_AUD_PWR_ON |\ | 39 | TOSA_SCOOP_AUD_PWR_ON) |
40 | TOSA_SCOOP_BT_RESET | TOSA_SCOOP_BT_PWR_EN ) | ||
41 | /* GPIO out put level when init 1: Hi */ | ||
42 | #define TOSA_SCOOP_IO_OUT ( TOSA_SCOOP_TC6393_REST_IN ) | ||
43 | 40 | ||
44 | /* | 41 | /* |
45 | * SCOOP2 jacket GPIOs | 42 | * SCOOP2 jacket GPIOs |
@@ -49,16 +46,34 @@ | |||
49 | #define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1) | 46 | #define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1) |
50 | #define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2) | 47 | #define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2) |
51 | #define TOSA_GPIO_USB_PULLUP (TOSA_SCOOP_JC_GPIO_BASE + 3) | 48 | #define TOSA_GPIO_USB_PULLUP (TOSA_SCOOP_JC_GPIO_BASE + 3) |
52 | #define TOSA_SCOOP_JC_TC6393_SUSPEND SCOOP_GPCR_PA15 | 49 | #define TOSA_GPIO_TC6393XB_SUSPEND (TOSA_SCOOP_JC_GPIO_BASE + 4) |
53 | #define TOSA_SCOOP_JC_TC3693_L3V_ON SCOOP_GPCR_PA16 | 50 | #define TOSA_GPIO_TC6393XB_L3V_ON (TOSA_SCOOP_JC_GPIO_BASE + 5) |
54 | #define TOSA_SCOOP_JC_WLAN_DETECT SCOOP_GPCR_PA17 | 51 | #define TOSA_SCOOP_JC_WLAN_DETECT SCOOP_GPCR_PA17 |
55 | #define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7) | 52 | #define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7) |
56 | #define TOSA_SCOOP_JC_CARD_LIMIT_SEL SCOOP_GPCR_PA19 | 53 | #define TOSA_SCOOP_JC_CARD_LIMIT_SEL SCOOP_GPCR_PA19 |
57 | 54 | ||
58 | /* GPIO Direction 1 : output mode / 0:input mode */ | 55 | /* GPIO Direction 1 : output mode / 0:input mode */ |
59 | #define TOSA_SCOOP_JC_IO_DIR ( \ | 56 | #define TOSA_SCOOP_JC_IO_DIR (TOSA_SCOOP_JC_CARD_LIMIT_SEL) |
60 | TOSA_SCOOP_JC_TC6393_SUSPEND | TOSA_SCOOP_JC_TC3693_L3V_ON | \ | 57 | |
61 | TOSA_SCOOP_JC_CARD_LIMIT_SEL ) | 58 | /* |
59 | * TC6393XB GPIOs | ||
60 | */ | ||
61 | #define TOSA_TC6393XB_GPIO_BASE (NR_BUILTIN_GPIO + 2 * 12) | ||
62 | #define TOSA_TC6393XB_GPIO(i) (TOSA_TC6393XB_GPIO_BASE + (i)) | ||
63 | #define TOSA_TC6393XB_GPIO_BIT(gpio) (1 << (gpio - TOSA_TC6393XB_GPIO_BASE)) | ||
64 | |||
65 | #define TOSA_GPIO_TG_ON (TOSA_TC6393XB_GPIO_BASE + 0) | ||
66 | #define TOSA_GPIO_L_MUTE (TOSA_TC6393XB_GPIO_BASE + 1) | ||
67 | #define TOSA_GPIO_BL_C20MA (TOSA_TC6393XB_GPIO_BASE + 3) | ||
68 | #define TOSA_GPIO_CARD_VCC_ON (TOSA_TC6393XB_GPIO_BASE + 4) | ||
69 | #define TOSA_GPIO_CHARGE_OFF (TOSA_TC6393XB_GPIO_BASE + 6) | ||
70 | #define TOSA_GPIO_CHARGE_OFF_JC (TOSA_TC6393XB_GPIO_BASE + 7) | ||
71 | #define TOSA_GPIO_BAT0_V_ON (TOSA_TC6393XB_GPIO_BASE + 9) | ||
72 | #define TOSA_GPIO_BAT1_V_ON (TOSA_TC6393XB_GPIO_BASE + 10) | ||
73 | #define TOSA_GPIO_BU_CHRG_ON (TOSA_TC6393XB_GPIO_BASE + 11) | ||
74 | #define TOSA_GPIO_BAT_SW_ON (TOSA_TC6393XB_GPIO_BASE + 12) | ||
75 | #define TOSA_GPIO_BAT0_TH_ON (TOSA_TC6393XB_GPIO_BASE + 14) | ||
76 | #define TOSA_GPIO_BAT1_TH_ON (TOSA_TC6393XB_GPIO_BASE + 15) | ||
62 | 77 | ||
63 | /* | 78 | /* |
64 | * Timing Generator | 79 | * Timing Generator |
@@ -84,13 +99,13 @@ | |||
84 | #define TOSA_GPIO_JACKET_DETECT (7) | 99 | #define TOSA_GPIO_JACKET_DETECT (7) |
85 | #define TOSA_GPIO_nSD_DETECT (9) | 100 | #define TOSA_GPIO_nSD_DETECT (9) |
86 | #define TOSA_GPIO_nSD_INT (10) | 101 | #define TOSA_GPIO_nSD_INT (10) |
87 | #define TOSA_GPIO_TC6393_CLK (11) | 102 | #define TOSA_GPIO_TC6393XB_CLK (11) |
88 | #define TOSA_GPIO_BAT1_CRG (12) | 103 | #define TOSA_GPIO_BAT1_CRG (12) |
89 | #define TOSA_GPIO_CF_CD (13) | 104 | #define TOSA_GPIO_CF_CD (13) |
90 | #define TOSA_GPIO_BAT0_CRG (14) | 105 | #define TOSA_GPIO_BAT0_CRG (14) |
91 | #define TOSA_GPIO_TC6393_INT (15) | 106 | #define TOSA_GPIO_TC6393XB_INT (15) |
92 | #define TOSA_GPIO_BAT0_LOW (17) | 107 | #define TOSA_GPIO_BAT0_LOW (17) |
93 | #define TOSA_GPIO_TC6393_RDY (18) | 108 | #define TOSA_GPIO_TC6393XB_RDY (18) |
94 | #define TOSA_GPIO_ON_RESET (19) | 109 | #define TOSA_GPIO_ON_RESET (19) |
95 | #define TOSA_GPIO_EAR_IN (20) | 110 | #define TOSA_GPIO_EAR_IN (20) |
96 | #define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */ | 111 | #define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */ |
@@ -99,6 +114,7 @@ | |||
99 | #define TOSA_GPIO_TP_INT (32) /* Touch Panel pen down interrupt */ | 114 | #define TOSA_GPIO_TP_INT (32) /* Touch Panel pen down interrupt */ |
100 | #define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */ | 115 | #define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */ |
101 | #define TOSA_GPIO_BAT_LOCKED (38) /* Battery locked */ | 116 | #define TOSA_GPIO_BAT_LOCKED (38) /* Battery locked */ |
117 | #define TOSA_GPIO_IRDA_TX (47) | ||
102 | #define TOSA_GPIO_TG_SPI_SCLK (81) | 118 | #define TOSA_GPIO_TG_SPI_SCLK (81) |
103 | #define TOSA_GPIO_TG_SPI_CS (82) | 119 | #define TOSA_GPIO_TG_SPI_CS (82) |
104 | #define TOSA_GPIO_TG_SPI_MOSI (83) | 120 | #define TOSA_GPIO_TG_SPI_MOSI (83) |
@@ -137,7 +153,7 @@ | |||
137 | #define TOSA_IRQ_GPIO_BAT1_CRG IRQ_GPIO(TOSA_GPIO_BAT1_CRG) | 153 | #define TOSA_IRQ_GPIO_BAT1_CRG IRQ_GPIO(TOSA_GPIO_BAT1_CRG) |
138 | #define TOSA_IRQ_GPIO_CF_CD IRQ_GPIO(TOSA_GPIO_CF_CD) | 154 | #define TOSA_IRQ_GPIO_CF_CD IRQ_GPIO(TOSA_GPIO_CF_CD) |
139 | #define TOSA_IRQ_GPIO_BAT0_CRG IRQ_GPIO(TOSA_GPIO_BAT0_CRG) | 155 | #define TOSA_IRQ_GPIO_BAT0_CRG IRQ_GPIO(TOSA_GPIO_BAT0_CRG) |
140 | #define TOSA_IRQ_GPIO_TC6393_INT IRQ_GPIO(TOSA_GPIO_TC6393_INT) | 156 | #define TOSA_IRQ_GPIO_TC6393XB_INT IRQ_GPIO(TOSA_GPIO_TC6393XB_INT) |
141 | #define TOSA_IRQ_GPIO_BAT0_LOW IRQ_GPIO(TOSA_GPIO_BAT0_LOW) | 157 | #define TOSA_IRQ_GPIO_BAT0_LOW IRQ_GPIO(TOSA_GPIO_BAT0_LOW) |
142 | #define TOSA_IRQ_GPIO_EAR_IN IRQ_GPIO(TOSA_GPIO_EAR_IN) | 158 | #define TOSA_IRQ_GPIO_EAR_IN IRQ_GPIO(TOSA_GPIO_EAR_IN) |
143 | #define TOSA_IRQ_GPIO_CF_IRQ IRQ_GPIO(TOSA_GPIO_CF_IRQ) | 159 | #define TOSA_IRQ_GPIO_CF_IRQ IRQ_GPIO(TOSA_GPIO_CF_IRQ) |
diff --git a/include/asm-arm/arch-pxa/tosa_bt.h b/include/asm-arm/arch-pxa/tosa_bt.h new file mode 100644 index 000000000000..efc3c3d3b75d --- /dev/null +++ b/include/asm-arm/arch-pxa/tosa_bt.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Tosa bluetooth built-in chip control. | ||
3 | * | ||
4 | * Later it may be shared with some other platforms. | ||
5 | * | ||
6 | * Copyright (c) 2008 Dmitry Baryshkov | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef TOSA_BT_H | ||
14 | #define TOSA_BT_H | ||
15 | |||
16 | struct tosa_bt_data { | ||
17 | int gpio_pwr; | ||
18 | int gpio_reset; | ||
19 | }; | ||
20 | |||
21 | #endif | ||
22 | |||
diff --git a/include/asm-arm/arch-pxa/uncompress.h b/include/asm-arm/arch-pxa/uncompress.h index dadf4c20b622..f4551269aaf2 100644 --- a/include/asm-arm/arch-pxa/uncompress.h +++ b/include/asm-arm/arch-pxa/uncompress.h | |||
@@ -11,11 +11,11 @@ | |||
11 | 11 | ||
12 | #include <linux/serial_reg.h> | 12 | #include <linux/serial_reg.h> |
13 | #include <asm/arch/pxa-regs.h> | 13 | #include <asm/arch/pxa-regs.h> |
14 | #include <asm/mach-types.h> | ||
14 | 15 | ||
15 | #define __REG(x) ((volatile unsigned long *)x) | 16 | #define __REG(x) ((volatile unsigned long *)x) |
16 | |||
17 | #define UART FFUART | ||
18 | 17 | ||
18 | static volatile unsigned long *UART = FFUART; | ||
19 | 19 | ||
20 | static inline void putc(char c) | 20 | static inline void putc(char c) |
21 | { | 21 | { |
@@ -33,8 +33,13 @@ static inline void flush(void) | |||
33 | { | 33 | { |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void arch_decomp_setup(void) | ||
37 | { | ||
38 | if (machine_is_littleton()) | ||
39 | UART = STUART; | ||
40 | } | ||
41 | |||
36 | /* | 42 | /* |
37 | * nothing to do | 43 | * nothing to do |
38 | */ | 44 | */ |
39 | #define arch_decomp_setup() | ||
40 | #define arch_decomp_wdog() | 45 | #define arch_decomp_wdog() |
diff --git a/include/asm-arm/arch-pxa/zylonite.h b/include/asm-arm/arch-pxa/zylonite.h index de577de8d18c..0d35ca04731e 100644 --- a/include/asm-arm/arch-pxa/zylonite.h +++ b/include/asm-arm/arch-pxa/zylonite.h | |||
@@ -16,6 +16,8 @@ struct platform_mmc_slot { | |||
16 | extern struct platform_mmc_slot zylonite_mmc_slot[]; | 16 | extern struct platform_mmc_slot zylonite_mmc_slot[]; |
17 | 17 | ||
18 | extern int gpio_eth_irq; | 18 | extern int gpio_eth_irq; |
19 | extern int gpio_debug_led1; | ||
20 | extern int gpio_debug_led2; | ||
19 | 21 | ||
20 | extern int wm9713_irq; | 22 | extern int wm9713_irq; |
21 | 23 | ||
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h index a32b86ac62aa..af64676650a2 100644 --- a/include/asm-arm/hardware/iop3xx-adma.h +++ b/include/asm-arm/hardware/iop3xx-adma.h | |||
@@ -260,7 +260,7 @@ static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op) | |||
260 | static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, | 260 | static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, |
261 | int *slots_per_op) | 261 | int *slots_per_op) |
262 | { | 262 | { |
263 | static const int slot_count_table[] = { 0, | 263 | static const char slot_count_table[] = { |
264 | 1, 1, 1, 1, /* 01 - 04 */ | 264 | 1, 1, 1, 1, /* 01 - 04 */ |
265 | 2, 2, 2, 2, /* 05 - 08 */ | 265 | 2, 2, 2, 2, /* 05 - 08 */ |
266 | 4, 4, 4, 4, /* 09 - 12 */ | 266 | 4, 4, 4, 4, /* 09 - 12 */ |
@@ -270,7 +270,7 @@ static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, | |||
270 | 8, 8, 8, 8, /* 25 - 28 */ | 270 | 8, 8, 8, 8, /* 25 - 28 */ |
271 | 8, 8, 8, 8, /* 29 - 32 */ | 271 | 8, 8, 8, 8, /* 29 - 32 */ |
272 | }; | 272 | }; |
273 | *slots_per_op = slot_count_table[src_cnt]; | 273 | *slots_per_op = slot_count_table[src_cnt - 1]; |
274 | return *slots_per_op; | 274 | return *slots_per_op; |
275 | } | 275 | } |
276 | 276 | ||
diff --git a/include/asm-arm/kgdb.h b/include/asm-arm/kgdb.h new file mode 100644 index 000000000000..67af4b841984 --- /dev/null +++ b/include/asm-arm/kgdb.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * ARM KGDB support | ||
3 | * | ||
4 | * Author: Deepak Saxena <dsaxena@mvista.com> | ||
5 | * | ||
6 | * Copyright (C) 2002 MontaVista Software Inc. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __ARM_KGDB_H__ | ||
11 | #define __ARM_KGDB_H__ | ||
12 | |||
13 | #include <linux/ptrace.h> | ||
14 | |||
15 | /* | ||
16 | * GDB assumes that we're a user process being debugged, so | ||
17 | * it will send us an SWI command to write into memory as the | ||
18 | * debug trap. When an SWI occurs, the next instruction addr is | ||
19 | * placed into R14_svc before jumping to the vector trap. | ||
20 | * This doesn't work for kernel debugging as we are already in SVC | ||
21 | * we would loose the kernel's LR, which is a bad thing. This | ||
22 | * is bad thing. | ||
23 | * | ||
24 | * By doing this as an undefined instruction trap, we force a mode | ||
25 | * switch from SVC to UND mode, allowing us to save full kernel state. | ||
26 | * | ||
27 | * We also define a KGDB_COMPILED_BREAK which can be used to compile | ||
28 | * in breakpoints. This is important for things like sysrq-G and for | ||
29 | * the initial breakpoint from trap_init(). | ||
30 | * | ||
31 | * Note to ARM HW designers: Add real trap support like SH && PPC to | ||
32 | * make our lives much much simpler. :) | ||
33 | */ | ||
34 | #define BREAK_INSTR_SIZE 4 | ||
35 | #define GDB_BREAKINST 0xef9f0001 | ||
36 | #define KGDB_BREAKINST 0xe7ffdefe | ||
37 | #define KGDB_COMPILED_BREAK 0xe7ffdeff | ||
38 | #define CACHE_FLUSH_IS_SAFE 1 | ||
39 | |||
40 | #ifndef __ASSEMBLY__ | ||
41 | |||
42 | static inline void arch_kgdb_breakpoint(void) | ||
43 | { | ||
44 | asm(".word 0xe7ffdeff"); | ||
45 | } | ||
46 | |||
47 | extern void kgdb_handle_bus_error(void); | ||
48 | extern int kgdb_fault_expected; | ||
49 | |||
50 | #endif /* !__ASSEMBLY__ */ | ||
51 | |||
52 | /* | ||
53 | * From Kevin Hilman: | ||
54 | * | ||
55 | * gdb is expecting the following registers layout. | ||
56 | * | ||
57 | * r0-r15: 1 long word each | ||
58 | * f0-f7: unused, 3 long words each !! | ||
59 | * fps: unused, 1 long word | ||
60 | * cpsr: 1 long word | ||
61 | * | ||
62 | * Even though f0-f7 and fps are not used, they need to be | ||
63 | * present in the registers sent for correct processing in | ||
64 | * the host-side gdb. | ||
65 | * | ||
66 | * In particular, it is crucial that CPSR is in the right place, | ||
67 | * otherwise gdb will not be able to correctly interpret stepping over | ||
68 | * conditional branches. | ||
69 | */ | ||
70 | #define _GP_REGS 16 | ||
71 | #define _FP_REGS 8 | ||
72 | #define _EXTRA_REGS 2 | ||
73 | #define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) | ||
74 | |||
75 | #define KGDB_MAX_NO_CPUS 1 | ||
76 | #define BUFMAX 400 | ||
77 | #define NUMREGBYTES (GDB_MAX_REGS << 2) | ||
78 | #define NUMCRITREGBYTES (32 << 2) | ||
79 | |||
80 | #define _R0 0 | ||
81 | #define _R1 1 | ||
82 | #define _R2 2 | ||
83 | #define _R3 3 | ||
84 | #define _R4 4 | ||
85 | #define _R5 5 | ||
86 | #define _R6 6 | ||
87 | #define _R7 7 | ||
88 | #define _R8 8 | ||
89 | #define _R9 9 | ||
90 | #define _R10 10 | ||
91 | #define _FP 11 | ||
92 | #define _IP 12 | ||
93 | #define _SPT 13 | ||
94 | #define _LR 14 | ||
95 | #define _PC 15 | ||
96 | #define _CPSR (GDB_MAX_REGS - 1) | ||
97 | |||
98 | /* | ||
99 | * So that we can denote the end of a frame for tracing, | ||
100 | * in the simple case: | ||
101 | */ | ||
102 | #define CFI_END_FRAME(func) __CFI_END_FRAME(_PC, _SPT, func) | ||
103 | |||
104 | #endif /* __ASM_KGDB_H__ */ | ||
diff --git a/include/asm-arm/mach/udc_pxa2xx.h b/include/asm-arm/mach/udc_pxa2xx.h index f9f3606986c2..9e5ed7c0f27f 100644 --- a/include/asm-arm/mach/udc_pxa2xx.h +++ b/include/asm-arm/mach/udc_pxa2xx.h | |||
@@ -23,6 +23,7 @@ struct pxa2xx_udc_mach_info { | |||
23 | */ | 23 | */ |
24 | bool gpio_vbus_inverted; | 24 | bool gpio_vbus_inverted; |
25 | u16 gpio_vbus; /* high == vbus present */ | 25 | u16 gpio_vbus; /* high == vbus present */ |
26 | bool gpio_pullup_inverted; | ||
26 | u16 gpio_pullup; /* high == pullup activated */ | 27 | u16 gpio_pullup; /* high == pullup activated */ |
27 | }; | 28 | }; |
28 | 29 | ||
diff --git a/include/asm-arm/plat-orion/mv_xor.h b/include/asm-arm/plat-orion/mv_xor.h new file mode 100644 index 000000000000..c349e8ff5cc0 --- /dev/null +++ b/include/asm-arm/plat-orion/mv_xor.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * Marvell XOR platform device data definition file. | ||
3 | */ | ||
4 | |||
5 | #ifndef __ASM_PLAT_ORION_MV_XOR_H | ||
6 | #define __ASM_PLAT_ORION_MV_XOR_H | ||
7 | |||
8 | #include <linux/dmaengine.h> | ||
9 | #include <linux/mbus.h> | ||
10 | |||
11 | #define MV_XOR_SHARED_NAME "mv_xor_shared" | ||
12 | #define MV_XOR_NAME "mv_xor" | ||
13 | |||
14 | struct mbus_dram_target_info; | ||
15 | |||
16 | struct mv_xor_platform_shared_data { | ||
17 | struct mbus_dram_target_info *dram; | ||
18 | }; | ||
19 | |||
20 | struct mv_xor_platform_data { | ||
21 | struct platform_device *shared; | ||
22 | int hw_id; | ||
23 | dma_cap_mask_t cap_mask; | ||
24 | size_t pool_size; | ||
25 | }; | ||
26 | |||
27 | |||
28 | #endif | ||
diff --git a/include/asm-arm/traps.h b/include/asm-arm/traps.h index f1541afcf85c..aa399aec568e 100644 --- a/include/asm-arm/traps.h +++ b/include/asm-arm/traps.h | |||
@@ -24,4 +24,6 @@ static inline int in_exception_text(unsigned long ptr) | |||
24 | ptr < (unsigned long)&__exception_text_end; | 24 | ptr < (unsigned long)&__exception_text_end; |
25 | } | 25 | } |
26 | 26 | ||
27 | extern void __init early_trap_init(void); | ||
28 | |||
27 | #endif | 29 | #endif |
diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h index 31e48b0e7324..d18a3053be0d 100644 --- a/include/asm-avr32/arch-at32ap/at32ap700x.h +++ b/include/asm-avr32/arch-at32ap/at32ap700x.h | |||
@@ -30,4 +30,20 @@ | |||
30 | #define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) | 30 | #define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) |
31 | #define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) | 31 | #define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) |
32 | 32 | ||
33 | |||
34 | /* | ||
35 | * DMAC peripheral hardware handshaking interfaces, used with dw_dmac | ||
36 | */ | ||
37 | #define DMAC_MCI_RX 0 | ||
38 | #define DMAC_MCI_TX 1 | ||
39 | #define DMAC_DAC_TX 2 | ||
40 | #define DMAC_AC97_A_RX 3 | ||
41 | #define DMAC_AC97_A_TX 4 | ||
42 | #define DMAC_AC97_B_RX 5 | ||
43 | #define DMAC_AC97_B_TX 6 | ||
44 | #define DMAC_DMAREQ_0 7 | ||
45 | #define DMAC_DMAREQ_1 8 | ||
46 | #define DMAC_DMAREQ_2 9 | ||
47 | #define DMAC_DMAREQ_3 10 | ||
48 | |||
33 | #endif /* __ASM_ARCH_AT32AP700X_H__ */ | 49 | #endif /* __ASM_ARCH_AT32AP700X_H__ */ |
diff --git a/include/asm-powerpc/kgdb.h b/include/asm-powerpc/kgdb.h index b617dac82969..1399caf719ae 100644 --- a/include/asm-powerpc/kgdb.h +++ b/include/asm-powerpc/kgdb.h | |||
@@ -1,57 +1,65 @@ | |||
1 | /* | 1 | /* |
2 | * kgdb.h: Defines and declarations for serial line source level | 2 | * include/asm-powerpc/kgdb.h |
3 | * remote debugging of the Linux kernel using gdb. | ||
4 | * | 3 | * |
4 | * The PowerPC (32/64) specific defines / externs for KGDB. Based on | ||
5 | * the previous 32bit and 64bit specific files, which had the following | ||
6 | * copyrights: | ||
7 | * | ||
8 | * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com) | ||
9 | * PPC Mods (C) 2004 Tom Rini (trini@mvista.com) | ||
10 | * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com) | ||
5 | * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu) | 11 | * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu) |
6 | * | 12 | * |
13 | * | ||
7 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 14 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
15 | * Author: Tom Rini <trini@kernel.crashing.org> | ||
16 | * | ||
17 | * 2006 (c) MontaVista Software, Inc. This file is licensed under | ||
18 | * the terms of the GNU General Public License version 2. This program | ||
19 | * is licensed "as is" without any warranty of any kind, whether express | ||
20 | * or implied. | ||
8 | */ | 21 | */ |
9 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
10 | #ifndef _PPC_KGDB_H | 23 | #ifndef __POWERPC_KGDB_H__ |
11 | #define _PPC_KGDB_H | 24 | #define __POWERPC_KGDB_H__ |
12 | 25 | ||
13 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
14 | 27 | ||
15 | /* Things specific to the gen550 backend. */ | 28 | #define BREAK_INSTR_SIZE 4 |
16 | struct uart_port; | 29 | #define BUFMAX ((NUMREGBYTES * 2) + 512) |
17 | 30 | #define OUTBUFMAX ((NUMREGBYTES * 2) + 512) | |
18 | extern void gen550_progress(char *, unsigned short); | 31 | static inline void arch_kgdb_breakpoint(void) |
19 | extern void gen550_kgdb_map_scc(void); | 32 | { |
20 | extern void gen550_init(int, struct uart_port *); | 33 | asm(".long 0x7d821008"); /* twge r2, r2 */ |
21 | 34 | } | |
22 | /* Things specific to the pmac backend. */ | 35 | #define CACHE_FLUSH_IS_SAFE 1 |
23 | extern void zs_kgdb_hook(int tty_num); | ||
24 | |||
25 | /* To init the kgdb engine. (called by serial hook)*/ | ||
26 | extern void set_debug_traps(void); | ||
27 | |||
28 | /* To enter the debugger explicitly. */ | ||
29 | extern void breakpoint(void); | ||
30 | |||
31 | /* For taking exceptions | ||
32 | * these are defined in traps.c | ||
33 | */ | ||
34 | extern int (*debugger)(struct pt_regs *regs); | ||
35 | extern int (*debugger_bpt)(struct pt_regs *regs); | ||
36 | extern int (*debugger_sstep)(struct pt_regs *regs); | ||
37 | extern int (*debugger_iabr_match)(struct pt_regs *regs); | ||
38 | extern int (*debugger_dabr_match)(struct pt_regs *regs); | ||
39 | extern void (*debugger_fault_handler)(struct pt_regs *regs); | ||
40 | |||
41 | /* What we bring to the party */ | ||
42 | int kgdb_bpt(struct pt_regs *regs); | ||
43 | int kgdb_sstep(struct pt_regs *regs); | ||
44 | void kgdb(struct pt_regs *regs); | ||
45 | int kgdb_iabr_match(struct pt_regs *regs); | ||
46 | int kgdb_dabr_match(struct pt_regs *regs); | ||
47 | 36 | ||
37 | /* The number bytes of registers we have to save depends on a few | ||
38 | * things. For 64bit we default to not including vector registers and | ||
39 | * vector state registers. */ | ||
40 | #ifdef CONFIG_PPC64 | ||
48 | /* | 41 | /* |
49 | * external low-level support routines (ie macserial.c) | 42 | * 64 bit (8 byte) registers: |
43 | * 32 gpr, 32 fpr, nip, msr, link, ctr | ||
44 | * 32 bit (4 byte) registers: | ||
45 | * ccr, xer, fpscr | ||
50 | */ | 46 | */ |
51 | extern void kgdb_interruptible(int); /* control interrupts from serial */ | 47 | #define NUMREGBYTES ((68 * 8) + (3 * 4)) |
52 | extern void putDebugChar(char); /* write a single character */ | 48 | #define NUMCRITREGBYTES 184 |
53 | extern char getDebugChar(void); /* read and return a single char */ | 49 | #else /* CONFIG_PPC32 */ |
54 | 50 | /* On non-E500 family PPC32 we determine the size by picking the last | |
51 | * register we need, but on E500 we skip sections so we list what we | ||
52 | * need to store, and add it up. */ | ||
53 | #ifndef CONFIG_E500 | ||
54 | #define MAXREG (PT_FPSCR+1) | ||
55 | #else | ||
56 | /* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/ | ||
57 | #define MAXREG ((32*2)+6+2+1) | ||
58 | #endif | ||
59 | #define NUMREGBYTES (MAXREG * sizeof(int)) | ||
60 | /* CR/LR, R1, R2, R13-R31 inclusive. */ | ||
61 | #define NUMCRITREGBYTES (23 * sizeof(int)) | ||
62 | #endif /* 32/64 */ | ||
55 | #endif /* !(__ASSEMBLY__) */ | 63 | #endif /* !(__ASSEMBLY__) */ |
56 | #endif /* !(_PPC_KGDB_H) */ | 64 | #endif /* !__POWERPC_KGDB_H__ */ |
57 | #endif /* __KERNEL__ */ | 65 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index 196d63c28aa4..bb1c09f7a76c 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h | |||
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
122 | * - mbligh | 122 | * - mbligh |
123 | */ | 123 | */ |
124 | local_irq_save(flags); | 124 | local_irq_save(flags); |
125 | for_each_cpu_mask(query_cpu, mask) { | 125 | for_each_cpu_mask_nr(query_cpu, mask) { |
126 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), | 126 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), |
127 | vector, APIC_DEST_PHYSICAL); | 127 | vector, APIC_DEST_PHYSICAL); |
128 | } | 128 | } |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 15cb82a44e89..5f58da401b43 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -134,7 +134,7 @@ extern __u32 cleared_cpu_caps[NCAPINTS]; | |||
134 | #ifdef CONFIG_SMP | 134 | #ifdef CONFIG_SMP |
135 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | 135 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); |
136 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | 136 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
137 | #define current_cpu_data cpu_data(smp_processor_id()) | 137 | #define current_cpu_data __get_cpu_var(cpu_info) |
138 | #else | 138 | #else |
139 | #define cpu_data(cpu) boot_cpu_data | 139 | #define cpu_data(cpu) boot_cpu_data |
140 | #define current_cpu_data boot_cpu_data | 140 | #define current_cpu_data boot_cpu_data |
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 0a8f27d31d0d..3f2de1050988 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h | |||
@@ -79,7 +79,6 @@ struct thread_info { | |||
79 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 79 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
80 | #define TIF_SECCOMP 8 /* secure computing */ | 80 | #define TIF_SECCOMP 8 /* secure computing */ |
81 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 81 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
82 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | ||
83 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 82 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
84 | #define TIF_IA32 17 /* 32bit process */ | 83 | #define TIF_IA32 17 /* 32bit process */ |
85 | #define TIF_FORK 18 /* ret_from_fork */ | 84 | #define TIF_FORK 18 /* ret_from_fork */ |
@@ -102,7 +101,6 @@ struct thread_info { | |||
102 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 101 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
103 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 102 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
104 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) | 103 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
105 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) | ||
106 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 104 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
107 | #define _TIF_IA32 (1 << TIF_IA32) | 105 | #define _TIF_IA32 (1 << TIF_IA32) |
108 | #define _TIF_FORK (1 << TIF_FORK) | 106 | #define _TIF_FORK (1 << TIF_FORK) |
@@ -135,7 +133,7 @@ struct thread_info { | |||
135 | 133 | ||
136 | /* Only used for 64 bit */ | 134 | /* Only used for 64 bit */ |
137 | #define _TIF_DO_NOTIFY_MASK \ | 135 | #define _TIF_DO_NOTIFY_MASK \ |
138 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | 136 | (_TIF_SIGPENDING|_TIF_MCE_NOTIFY) |
139 | 137 | ||
140 | /* flags to check in __switch_to() */ | 138 | /* flags to check in __switch_to() */ |
141 | #define _TIF_WORK_CTXSW \ | 139 | #define _TIF_WORK_CTXSW \ |
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index eb640f0acfac..0f50d4cc4360 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
101 | 101 | ||
102 | /** | 102 | /** |
103 | * async_tx_sync_epilog - actions to take if an operation is run synchronously | 103 | * async_tx_sync_epilog - actions to take if an operation is run synchronously |
104 | * @flags: async_tx flags | ||
105 | * @depend_tx: transaction depends on depend_tx | ||
106 | * @cb_fn: function to call when the transaction completes | 104 | * @cb_fn: function to call when the transaction completes |
107 | * @cb_fn_param: parameter to pass to the callback routine | 105 | * @cb_fn_param: parameter to pass to the callback routine |
108 | */ | 106 | */ |
109 | static inline void | 107 | static inline void |
110 | async_tx_sync_epilog(unsigned long flags, | 108 | async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) |
111 | struct dma_async_tx_descriptor *depend_tx, | ||
112 | dma_async_tx_callback cb_fn, void *cb_fn_param) | ||
113 | { | 109 | { |
114 | if (cb_fn) | 110 | if (cb_fn) |
115 | cb_fn(cb_fn_param); | 111 | cb_fn(cb_fn_param); |
116 | |||
117 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) | ||
118 | async_tx_ack(depend_tx); | ||
119 | } | 112 | } |
120 | 113 | ||
121 | void | 114 | void |
@@ -152,4 +145,6 @@ struct dma_async_tx_descriptor * | |||
152 | async_trigger_callback(enum async_tx_flags flags, | 145 | async_trigger_callback(enum async_tx_flags flags, |
153 | struct dma_async_tx_descriptor *depend_tx, | 146 | struct dma_async_tx_descriptor *depend_tx, |
154 | dma_async_tx_callback cb_fn, void *cb_fn_param); | 147 | dma_async_tx_callback cb_fn, void *cb_fn_param); |
148 | |||
149 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx); | ||
155 | #endif /* _ASYNC_TX_H_ */ | 150 | #endif /* _ASYNC_TX_H_ */ |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c24875bd9c5b..1b5c98e7fef7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -17,6 +17,20 @@ | |||
17 | * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. | 17 | * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. |
18 | * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. | 18 | * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. |
19 | * | 19 | * |
20 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | ||
21 | * Note: The alternate operations with the suffix "_nr" are used | ||
22 | * to limit the range of the loop to nr_cpu_ids instead of | ||
23 | * NR_CPUS when NR_CPUS > 64 for performance reasons. | ||
24 | * If NR_CPUS is <= 64 then most assembler bitmask | ||
25 | * operators execute faster with a constant range, so | ||
26 | * the operator will continue to use NR_CPUS. | ||
27 | * | ||
28 | * Another consideration is that nr_cpu_ids is initialized | ||
29 | * to NR_CPUS and isn't lowered until the possible cpus are | ||
30 | * discovered (including any disabled cpus). So early uses | ||
31 | * will span the entire range of NR_CPUS. | ||
32 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | ||
33 | * | ||
20 | * The available cpumask operations are: | 34 | * The available cpumask operations are: |
21 | * | 35 | * |
22 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask | 36 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask |
@@ -38,18 +52,60 @@ | |||
38 | * int cpus_empty(mask) Is mask empty (no bits sets)? | 52 | * int cpus_empty(mask) Is mask empty (no bits sets)? |
39 | * int cpus_full(mask) Is mask full (all bits sets)? | 53 | * int cpus_full(mask) Is mask full (all bits sets)? |
40 | * int cpus_weight(mask) Hamming weigh - number of set bits | 54 | * int cpus_weight(mask) Hamming weigh - number of set bits |
55 | * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS | ||
41 | * | 56 | * |
42 | * void cpus_shift_right(dst, src, n) Shift right | 57 | * void cpus_shift_right(dst, src, n) Shift right |
43 | * void cpus_shift_left(dst, src, n) Shift left | 58 | * void cpus_shift_left(dst, src, n) Shift left |
44 | * | 59 | * |
45 | * int first_cpu(mask) Number lowest set bit, or NR_CPUS | 60 | * int first_cpu(mask) Number lowest set bit, or NR_CPUS |
46 | * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS | 61 | * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS |
62 | * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids | ||
47 | * | 63 | * |
48 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set | 64 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set |
65 | *ifdef CONFIG_HAS_CPUMASK_OF_CPU | ||
66 | * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v | ||
67 | * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] | ||
68 | * cpumask_of_cpu_ptr(v, cpu) Combines above two operations | ||
69 | *else | ||
70 | * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v | ||
71 | * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) | ||
72 | * cpumask_of_cpu_ptr(v, cpu) Combines above two operations | ||
73 | *endif | ||
49 | * CPU_MASK_ALL Initializer - all bits set | 74 | * CPU_MASK_ALL Initializer - all bits set |
50 | * CPU_MASK_NONE Initializer - no bits set | 75 | * CPU_MASK_NONE Initializer - no bits set |
51 | * unsigned long *cpus_addr(mask) Array of unsigned long's in mask | 76 | * unsigned long *cpus_addr(mask) Array of unsigned long's in mask |
52 | * | 77 | * |
78 | * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t | ||
79 | * variables, and CPUMASK_PTR provides pointers to each field. | ||
80 | * | ||
81 | * The structure should be defined something like this: | ||
82 | * struct my_cpumasks { | ||
83 | * cpumask_t mask1; | ||
84 | * cpumask_t mask2; | ||
85 | * }; | ||
86 | * | ||
87 | * Usage is then: | ||
88 | * CPUMASK_ALLOC(my_cpumasks); | ||
89 | * CPUMASK_PTR(mask1, my_cpumasks); | ||
90 | * CPUMASK_PTR(mask2, my_cpumasks); | ||
91 | * | ||
92 | * --- DO NOT reference cpumask_t pointers until this check --- | ||
93 | * if (my_cpumasks == NULL) | ||
94 | * "kmalloc failed"... | ||
95 | * | ||
96 | * References are now pointers to the cpumask_t variables (*mask1, ...) | ||
97 | * | ||
98 | *if NR_CPUS > BITS_PER_LONG | ||
99 | * CPUMASK_ALLOC(m) Declares and allocates struct m *m = | ||
100 | * kmalloc(sizeof(*m), GFP_KERNEL) | ||
101 | * CPUMASK_FREE(m) Macro for kfree(m) | ||
102 | *else | ||
103 | * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m | ||
104 | * CPUMASK_FREE(m) Nop | ||
105 | *endif | ||
106 | * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) | ||
107 | * ------------------------------------------------------------------------ | ||
108 | * | ||
53 | * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing | 109 | * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing |
54 | * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask | 110 | * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask |
55 | * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing | 111 | * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing |
@@ -59,7 +115,8 @@ | |||
59 | * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap | 115 | * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap |
60 | * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz | 116 | * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz |
61 | * | 117 | * |
62 | * for_each_cpu_mask(cpu, mask) for-loop cpu over mask | 118 | * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS |
119 | * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids | ||
63 | * | 120 | * |
64 | * int num_online_cpus() Number of online CPUs | 121 | * int num_online_cpus() Number of online CPUs |
65 | * int num_possible_cpus() Number of all possible CPUs | 122 | * int num_possible_cpus() Number of all possible CPUs |
@@ -216,23 +273,19 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
216 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | 273 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
217 | } | 274 | } |
218 | 275 | ||
219 | #ifdef CONFIG_SMP | ||
220 | int __first_cpu(const cpumask_t *srcp); | ||
221 | #define first_cpu(src) __first_cpu(&(src)) | ||
222 | int __next_cpu(int n, const cpumask_t *srcp); | ||
223 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
224 | #else | ||
225 | #define first_cpu(src) ({ (void)(src); 0; }) | ||
226 | #define next_cpu(n, src) ({ (void)(src); 1; }) | ||
227 | #endif | ||
228 | 276 | ||
229 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | 277 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP |
230 | extern cpumask_t *cpumask_of_cpu_map; | 278 | extern cpumask_t *cpumask_of_cpu_map; |
231 | #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) | 279 | #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) |
232 | 280 | #define cpumask_of_cpu_ptr(v, cpu) \ | |
281 | const cpumask_t *v = &cpumask_of_cpu(cpu) | ||
282 | #define cpumask_of_cpu_ptr_declare(v) \ | ||
283 | const cpumask_t *v | ||
284 | #define cpumask_of_cpu_ptr_next(v, cpu) \ | ||
285 | v = &cpumask_of_cpu(cpu) | ||
233 | #else | 286 | #else |
234 | #define cpumask_of_cpu(cpu) \ | 287 | #define cpumask_of_cpu(cpu) \ |
235 | (*({ \ | 288 | ({ \ |
236 | typeof(_unused_cpumask_arg_) m; \ | 289 | typeof(_unused_cpumask_arg_) m; \ |
237 | if (sizeof(m) == sizeof(unsigned long)) { \ | 290 | if (sizeof(m) == sizeof(unsigned long)) { \ |
238 | m.bits[0] = 1UL<<(cpu); \ | 291 | m.bits[0] = 1UL<<(cpu); \ |
@@ -240,8 +293,16 @@ extern cpumask_t *cpumask_of_cpu_map; | |||
240 | cpus_clear(m); \ | 293 | cpus_clear(m); \ |
241 | cpu_set((cpu), m); \ | 294 | cpu_set((cpu), m); \ |
242 | } \ | 295 | } \ |
243 | &m; \ | 296 | m; \ |
244 | })) | 297 | }) |
298 | #define cpumask_of_cpu_ptr(v, cpu) \ | ||
299 | cpumask_t _##v = cpumask_of_cpu(cpu); \ | ||
300 | const cpumask_t *v = &_##v | ||
301 | #define cpumask_of_cpu_ptr_declare(v) \ | ||
302 | cpumask_t _##v; \ | ||
303 | const cpumask_t *v = &_##v | ||
304 | #define cpumask_of_cpu_ptr_next(v, cpu) \ | ||
305 | _##v = cpumask_of_cpu(cpu) | ||
245 | #endif | 306 | #endif |
246 | 307 | ||
247 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) | 308 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) |
@@ -281,6 +342,15 @@ extern cpumask_t cpu_mask_all; | |||
281 | 342 | ||
282 | #define cpus_addr(src) ((src).bits) | 343 | #define cpus_addr(src) ((src).bits) |
283 | 344 | ||
345 | #if NR_CPUS > BITS_PER_LONG | ||
346 | #define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) | ||
347 | #define CPUMASK_FREE(m) kfree(m) | ||
348 | #else | ||
349 | #define CPUMASK_ALLOC(m) struct m _m, *m = &_m | ||
350 | #define CPUMASK_FREE(m) | ||
351 | #endif | ||
352 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) | ||
353 | |||
284 | #define cpumask_scnprintf(buf, len, src) \ | 354 | #define cpumask_scnprintf(buf, len, src) \ |
285 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) | 355 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) |
286 | static inline int __cpumask_scnprintf(char *buf, int len, | 356 | static inline int __cpumask_scnprintf(char *buf, int len, |
@@ -343,29 +413,59 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
343 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); | 413 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); |
344 | } | 414 | } |
345 | 415 | ||
346 | #if NR_CPUS > 1 | 416 | #if NR_CPUS == 1 |
347 | #define for_each_cpu_mask(cpu, mask) \ | 417 | |
348 | for ((cpu) = first_cpu(mask); \ | 418 | #define nr_cpu_ids 1 |
349 | (cpu) < NR_CPUS; \ | 419 | #define first_cpu(src) ({ (void)(src); 0; }) |
350 | (cpu) = next_cpu((cpu), (mask))) | 420 | #define next_cpu(n, src) ({ (void)(src); 1; }) |
351 | #else /* NR_CPUS == 1 */ | 421 | #define any_online_cpu(mask) 0 |
352 | #define for_each_cpu_mask(cpu, mask) \ | 422 | #define for_each_cpu_mask(cpu, mask) \ |
353 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | 423 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
354 | #endif /* NR_CPUS */ | 424 | |
425 | #else /* NR_CPUS > 1 */ | ||
426 | |||
427 | extern int nr_cpu_ids; | ||
428 | int __first_cpu(const cpumask_t *srcp); | ||
429 | int __next_cpu(int n, const cpumask_t *srcp); | ||
430 | int __any_online_cpu(const cpumask_t *mask); | ||
431 | |||
432 | #define first_cpu(src) __first_cpu(&(src)) | ||
433 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
434 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | ||
435 | #define for_each_cpu_mask(cpu, mask) \ | ||
436 | for ((cpu) = -1; \ | ||
437 | (cpu) = next_cpu((cpu), (mask)), \ | ||
438 | (cpu) < NR_CPUS; ) | ||
439 | #endif | ||
440 | |||
441 | #if NR_CPUS <= 64 | ||
355 | 442 | ||
356 | #define next_cpu_nr(n, src) next_cpu(n, src) | 443 | #define next_cpu_nr(n, src) next_cpu(n, src) |
357 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) | 444 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) |
358 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) | 445 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) |
359 | 446 | ||
447 | #else /* NR_CPUS > 64 */ | ||
448 | |||
449 | int __next_cpu_nr(int n, const cpumask_t *srcp); | ||
450 | #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) | ||
451 | #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) | ||
452 | #define for_each_cpu_mask_nr(cpu, mask) \ | ||
453 | for ((cpu) = -1; \ | ||
454 | (cpu) = next_cpu_nr((cpu), (mask)), \ | ||
455 | (cpu) < nr_cpu_ids; ) | ||
456 | |||
457 | #endif /* NR_CPUS > 64 */ | ||
458 | |||
360 | /* | 459 | /* |
361 | * The following particular system cpumasks and operations manage | 460 | * The following particular system cpumasks and operations manage |
362 | * possible, present and online cpus. Each of them is a fixed size | 461 | * possible, present, active and online cpus. Each of them is a fixed size |
363 | * bitmap of size NR_CPUS. | 462 | * bitmap of size NR_CPUS. |
364 | * | 463 | * |
365 | * #ifdef CONFIG_HOTPLUG_CPU | 464 | * #ifdef CONFIG_HOTPLUG_CPU |
366 | * cpu_possible_map - has bit 'cpu' set iff cpu is populatable | 465 | * cpu_possible_map - has bit 'cpu' set iff cpu is populatable |
367 | * cpu_present_map - has bit 'cpu' set iff cpu is populated | 466 | * cpu_present_map - has bit 'cpu' set iff cpu is populated |
368 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler | 467 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler |
468 | * cpu_active_map - has bit 'cpu' set iff cpu available to migration | ||
369 | * #else | 469 | * #else |
370 | * cpu_possible_map - has bit 'cpu' set iff cpu is populated | 470 | * cpu_possible_map - has bit 'cpu' set iff cpu is populated |
371 | * cpu_present_map - copy of cpu_possible_map | 471 | * cpu_present_map - copy of cpu_possible_map |
@@ -416,14 +516,16 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
416 | extern cpumask_t cpu_possible_map; | 516 | extern cpumask_t cpu_possible_map; |
417 | extern cpumask_t cpu_online_map; | 517 | extern cpumask_t cpu_online_map; |
418 | extern cpumask_t cpu_present_map; | 518 | extern cpumask_t cpu_present_map; |
519 | extern cpumask_t cpu_active_map; | ||
419 | 520 | ||
420 | #if NR_CPUS > 1 | 521 | #if NR_CPUS > 1 |
421 | #define num_online_cpus() cpus_weight(cpu_online_map) | 522 | #define num_online_cpus() cpus_weight_nr(cpu_online_map) |
422 | #define num_possible_cpus() cpus_weight(cpu_possible_map) | 523 | #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) |
423 | #define num_present_cpus() cpus_weight(cpu_present_map) | 524 | #define num_present_cpus() cpus_weight_nr(cpu_present_map) |
424 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) | 525 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) |
425 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) | 526 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) |
426 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) | 527 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) |
528 | #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) | ||
427 | #else | 529 | #else |
428 | #define num_online_cpus() 1 | 530 | #define num_online_cpus() 1 |
429 | #define num_possible_cpus() 1 | 531 | #define num_possible_cpus() 1 |
@@ -431,21 +533,13 @@ extern cpumask_t cpu_present_map; | |||
431 | #define cpu_online(cpu) ((cpu) == 0) | 533 | #define cpu_online(cpu) ((cpu) == 0) |
432 | #define cpu_possible(cpu) ((cpu) == 0) | 534 | #define cpu_possible(cpu) ((cpu) == 0) |
433 | #define cpu_present(cpu) ((cpu) == 0) | 535 | #define cpu_present(cpu) ((cpu) == 0) |
536 | #define cpu_active(cpu) ((cpu) == 0) | ||
434 | #endif | 537 | #endif |
435 | 538 | ||
436 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) | 539 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
437 | 540 | ||
438 | #ifdef CONFIG_SMP | 541 | #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) |
439 | extern int nr_cpu_ids; | 542 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) |
440 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | 543 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) |
441 | int __any_online_cpu(const cpumask_t *mask); | ||
442 | #else | ||
443 | #define nr_cpu_ids 1 | ||
444 | #define any_online_cpu(mask) 0 | ||
445 | #endif | ||
446 | |||
447 | #define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) | ||
448 | #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) | ||
449 | #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) | ||
450 | 544 | ||
451 | #endif /* __LINUX_CPUMASK_H */ | 545 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 038578362b47..e8f450c499b0 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -78,6 +78,8 @@ extern void cpuset_track_online_nodes(void); | |||
78 | 78 | ||
79 | extern int current_cpuset_is_being_rebound(void); | 79 | extern int current_cpuset_is_being_rebound(void); |
80 | 80 | ||
81 | extern void rebuild_sched_domains(void); | ||
82 | |||
81 | #else /* !CONFIG_CPUSETS */ | 83 | #else /* !CONFIG_CPUSETS */ |
82 | 84 | ||
83 | static inline int cpuset_init_early(void) { return 0; } | 85 | static inline int cpuset_init_early(void) { return 0; } |
@@ -156,6 +158,11 @@ static inline int current_cpuset_is_being_rebound(void) | |||
156 | return 0; | 158 | return 0; |
157 | } | 159 | } |
158 | 160 | ||
161 | static inline void rebuild_sched_domains(void) | ||
162 | { | ||
163 | partition_sched_domains(0, NULL, NULL); | ||
164 | } | ||
165 | |||
159 | #endif /* !CONFIG_CPUSETS */ | 166 | #endif /* !CONFIG_CPUSETS */ |
160 | 167 | ||
161 | #endif /* _LINUX_CPUSET_H */ | 168 | #endif /* _LINUX_CPUSET_H */ |
diff --git a/include/linux/dca.h b/include/linux/dca.h index af61cd1f37e9..b00a753eda53 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h | |||
@@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb); | |||
10 | #define DCA_PROVIDER_REMOVE 0x0002 | 10 | #define DCA_PROVIDER_REMOVE 0x0002 |
11 | 11 | ||
12 | struct dca_provider { | 12 | struct dca_provider { |
13 | struct list_head node; | ||
13 | struct dca_ops *ops; | 14 | struct dca_ops *ops; |
14 | struct device *cd; | 15 | struct device *cd; |
15 | int id; | 16 | int id; |
@@ -18,7 +19,9 @@ struct dca_provider { | |||
18 | struct dca_ops { | 19 | struct dca_ops { |
19 | int (*add_requester) (struct dca_provider *, struct device *); | 20 | int (*add_requester) (struct dca_provider *, struct device *); |
20 | int (*remove_requester) (struct dca_provider *, struct device *); | 21 | int (*remove_requester) (struct dca_provider *, struct device *); |
21 | u8 (*get_tag) (struct dca_provider *, int cpu); | 22 | u8 (*get_tag) (struct dca_provider *, struct device *, |
23 | int cpu); | ||
24 | int (*dev_managed) (struct dca_provider *, struct device *); | ||
22 | }; | 25 | }; |
23 | 26 | ||
24 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); | 27 | struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); |
@@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca) | |||
32 | } | 35 | } |
33 | 36 | ||
34 | /* Requester API */ | 37 | /* Requester API */ |
38 | #define DCA_GET_TAG_TWO_ARGS | ||
35 | int dca_add_requester(struct device *dev); | 39 | int dca_add_requester(struct device *dev); |
36 | int dca_remove_requester(struct device *dev); | 40 | int dca_remove_requester(struct device *dev); |
37 | u8 dca_get_tag(int cpu); | 41 | u8 dca_get_tag(int cpu); |
42 | u8 dca3_get_tag(struct device *dev, int cpu); | ||
38 | 43 | ||
39 | /* internal stuff */ | 44 | /* internal stuff */ |
40 | int __init dca_sysfs_init(void); | 45 | int __init dca_sysfs_init(void); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d08a5c5eb928..adb0b084eb5a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -89,10 +89,23 @@ enum dma_transaction_type { | |||
89 | DMA_MEMSET, | 89 | DMA_MEMSET, |
90 | DMA_MEMCPY_CRC32C, | 90 | DMA_MEMCPY_CRC32C, |
91 | DMA_INTERRUPT, | 91 | DMA_INTERRUPT, |
92 | DMA_SLAVE, | ||
92 | }; | 93 | }; |
93 | 94 | ||
94 | /* last transaction type for creation of the capabilities mask */ | 95 | /* last transaction type for creation of the capabilities mask */ |
95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | 96 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) |
97 | |||
98 | /** | ||
99 | * enum dma_slave_width - DMA slave register access width. | ||
100 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
101 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
102 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
103 | */ | ||
104 | enum dma_slave_width { | ||
105 | DMA_SLAVE_WIDTH_8BIT, | ||
106 | DMA_SLAVE_WIDTH_16BIT, | ||
107 | DMA_SLAVE_WIDTH_32BIT, | ||
108 | }; | ||
96 | 109 | ||
97 | /** | 110 | /** |
98 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 111 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
@@ -102,10 +115,14 @@ enum dma_transaction_type { | |||
102 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | 115 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client |
103 | * acknowledges receipt, i.e. has has a chance to establish any | 116 | * acknowledges receipt, i.e. has has a chance to establish any |
104 | * dependency chains | 117 | * dependency chains |
118 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | ||
119 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | ||
105 | */ | 120 | */ |
106 | enum dma_ctrl_flags { | 121 | enum dma_ctrl_flags { |
107 | DMA_PREP_INTERRUPT = (1 << 0), | 122 | DMA_PREP_INTERRUPT = (1 << 0), |
108 | DMA_CTRL_ACK = (1 << 1), | 123 | DMA_CTRL_ACK = (1 << 1), |
124 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | ||
125 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | ||
109 | }; | 126 | }; |
110 | 127 | ||
111 | /** | 128 | /** |
@@ -115,6 +132,32 @@ enum dma_ctrl_flags { | |||
115 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 132 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; |
116 | 133 | ||
117 | /** | 134 | /** |
135 | * struct dma_slave - Information about a DMA slave | ||
136 | * @dev: device acting as DMA slave | ||
137 | * @dma_dev: required DMA master device. If non-NULL, the client can not be | ||
138 | * bound to other masters than this. | ||
139 | * @tx_reg: physical address of data register used for | ||
140 | * memory-to-peripheral transfers | ||
141 | * @rx_reg: physical address of data register used for | ||
142 | * peripheral-to-memory transfers | ||
143 | * @reg_width: peripheral register width | ||
144 | * | ||
145 | * If dma_dev is non-NULL, the client can not be bound to other DMA | ||
146 | * masters than the one corresponding to this device. The DMA master | ||
147 | * driver may use this to determine if there is controller-specific | ||
148 | * data wrapped around this struct. Drivers of platform code that sets | ||
149 | * the dma_dev field must therefore make sure to use an appropriate | ||
150 | * controller-specific dma slave structure wrapping this struct. | ||
151 | */ | ||
152 | struct dma_slave { | ||
153 | struct device *dev; | ||
154 | struct device *dma_dev; | ||
155 | dma_addr_t tx_reg; | ||
156 | dma_addr_t rx_reg; | ||
157 | enum dma_slave_width reg_width; | ||
158 | }; | ||
159 | |||
160 | /** | ||
118 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 161 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
119 | * @refcount: local_t used for open-coded "bigref" counting | 162 | * @refcount: local_t used for open-coded "bigref" counting |
120 | * @memcpy_count: transaction counter | 163 | * @memcpy_count: transaction counter |
@@ -139,6 +182,7 @@ struct dma_chan_percpu { | |||
139 | * @rcu: the DMA channel's RCU head | 182 | * @rcu: the DMA channel's RCU head |
140 | * @device_node: used to add this to the device chan list | 183 | * @device_node: used to add this to the device chan list |
141 | * @local: per-cpu pointer to a struct dma_chan_percpu | 184 | * @local: per-cpu pointer to a struct dma_chan_percpu |
185 | * @client-count: how many clients are using this channel | ||
142 | */ | 186 | */ |
143 | struct dma_chan { | 187 | struct dma_chan { |
144 | struct dma_device *device; | 188 | struct dma_device *device; |
@@ -154,6 +198,7 @@ struct dma_chan { | |||
154 | 198 | ||
155 | struct list_head device_node; | 199 | struct list_head device_node; |
156 | struct dma_chan_percpu *local; | 200 | struct dma_chan_percpu *local; |
201 | int client_count; | ||
157 | }; | 202 | }; |
158 | 203 | ||
159 | #define to_dma_chan(p) container_of(p, struct dma_chan, dev) | 204 | #define to_dma_chan(p) container_of(p, struct dma_chan, dev) |
@@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, | |||
202 | * @event_callback: func ptr to call when something happens | 247 | * @event_callback: func ptr to call when something happens |
203 | * @cap_mask: only return channels that satisfy the requested capabilities | 248 | * @cap_mask: only return channels that satisfy the requested capabilities |
204 | * a value of zero corresponds to any capability | 249 | * a value of zero corresponds to any capability |
250 | * @slave: data for preparing slave transfer. Must be non-NULL iff the | ||
251 | * DMA_SLAVE capability is requested. | ||
205 | * @global_node: list_head for global dma_client_list | 252 | * @global_node: list_head for global dma_client_list |
206 | */ | 253 | */ |
207 | struct dma_client { | 254 | struct dma_client { |
208 | dma_event_callback event_callback; | 255 | dma_event_callback event_callback; |
209 | dma_cap_mask_t cap_mask; | 256 | dma_cap_mask_t cap_mask; |
257 | struct dma_slave *slave; | ||
210 | struct list_head global_node; | 258 | struct list_head global_node; |
211 | }; | 259 | }; |
212 | 260 | ||
@@ -263,6 +311,8 @@ struct dma_async_tx_descriptor { | |||
263 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | 311 | * @device_prep_dma_zero_sum: prepares a zero_sum operation |
264 | * @device_prep_dma_memset: prepares a memset operation | 312 | * @device_prep_dma_memset: prepares a memset operation |
265 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 313 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
314 | * @device_prep_slave_sg: prepares a slave dma operation | ||
315 | * @device_terminate_all: terminate all pending operations | ||
266 | * @device_issue_pending: push pending transactions to hardware | 316 | * @device_issue_pending: push pending transactions to hardware |
267 | */ | 317 | */ |
268 | struct dma_device { | 318 | struct dma_device { |
@@ -279,7 +329,8 @@ struct dma_device { | |||
279 | int dev_id; | 329 | int dev_id; |
280 | struct device *dev; | 330 | struct device *dev; |
281 | 331 | ||
282 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 332 | int (*device_alloc_chan_resources)(struct dma_chan *chan, |
333 | struct dma_client *client); | ||
283 | void (*device_free_chan_resources)(struct dma_chan *chan); | 334 | void (*device_free_chan_resources)(struct dma_chan *chan); |
284 | 335 | ||
285 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 336 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
@@ -297,6 +348,12 @@ struct dma_device { | |||
297 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 348 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
298 | struct dma_chan *chan, unsigned long flags); | 349 | struct dma_chan *chan, unsigned long flags); |
299 | 350 | ||
351 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | ||
352 | struct dma_chan *chan, struct scatterlist *sgl, | ||
353 | unsigned int sg_len, enum dma_data_direction direction, | ||
354 | unsigned long flags); | ||
355 | void (*device_terminate_all)(struct dma_chan *chan); | ||
356 | |||
300 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 357 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, |
301 | dma_cookie_t cookie, dma_cookie_t *last, | 358 | dma_cookie_t cookie, dma_cookie_t *last, |
302 | dma_cookie_t *used); | 359 | dma_cookie_t *used); |
@@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | |||
318 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 375 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
319 | struct dma_chan *chan); | 376 | struct dma_chan *chan); |
320 | 377 | ||
321 | static inline void | 378 | static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) |
322 | async_tx_ack(struct dma_async_tx_descriptor *tx) | ||
323 | { | 379 | { |
324 | tx->flags |= DMA_CTRL_ACK; | 380 | tx->flags |= DMA_CTRL_ACK; |
325 | } | 381 | } |
326 | 382 | ||
327 | static inline int | 383 | static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) |
328 | async_tx_test_ack(struct dma_async_tx_descriptor *tx) | ||
329 | { | 384 | { |
330 | return tx->flags & DMA_CTRL_ACK; | 385 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; |
331 | } | 386 | } |
332 | 387 | ||
333 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 388 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h new file mode 100644 index 000000000000..04d217b442bf --- /dev/null +++ b/include/linux/dw_dmac.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | ||
3 | * AVR32 systems.) | ||
4 | * | ||
5 | * Copyright (C) 2007 Atmel Corporation | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef DW_DMAC_H | ||
12 | #define DW_DMAC_H | ||
13 | |||
14 | #include <linux/dmaengine.h> | ||
15 | |||
16 | /** | ||
17 | * struct dw_dma_platform_data - Controller configuration parameters | ||
18 | * @nr_channels: Number of channels supported by hardware (max 8) | ||
19 | */ | ||
20 | struct dw_dma_platform_data { | ||
21 | unsigned int nr_channels; | ||
22 | }; | ||
23 | |||
24 | /** | ||
25 | * struct dw_dma_slave - Controller-specific information about a slave | ||
26 | * @slave: Generic information about the slave | ||
27 | * @ctl_lo: Platform-specific initializer for the CTL_LO register | ||
28 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | ||
29 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | ||
30 | */ | ||
31 | struct dw_dma_slave { | ||
32 | struct dma_slave slave; | ||
33 | u32 cfg_hi; | ||
34 | u32 cfg_lo; | ||
35 | }; | ||
36 | |||
37 | /* Platform-configurable bits in CFG_HI */ | ||
38 | #define DWC_CFGH_FCMODE (1 << 0) | ||
39 | #define DWC_CFGH_FIFO_MODE (1 << 1) | ||
40 | #define DWC_CFGH_PROTCTL(x) ((x) << 2) | ||
41 | #define DWC_CFGH_SRC_PER(x) ((x) << 7) | ||
42 | #define DWC_CFGH_DST_PER(x) ((x) << 11) | ||
43 | |||
44 | /* Platform-configurable bits in CFG_LO */ | ||
45 | #define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ | ||
46 | #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ | ||
47 | #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) | ||
48 | #define DWC_CFGL_LOCK_CH_XACT (2 << 12) | ||
49 | #define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ | ||
50 | #define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) | ||
51 | #define DWC_CFGL_LOCK_BUS_XACT (2 << 14) | ||
52 | #define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ | ||
53 | #define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ | ||
54 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | ||
55 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | ||
56 | |||
57 | static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) | ||
58 | { | ||
59 | return container_of(slave, struct dw_dma_slave, slave); | ||
60 | } | ||
61 | |||
62 | #endif /* DW_DMAC_H */ | ||
diff --git a/include/linux/hid.h b/include/linux/hid.h index fe56b86f2c67..ac4e678a04ed 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -512,7 +512,7 @@ struct hid_descriptor { | |||
512 | 512 | ||
513 | /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ | 513 | /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ |
514 | /* We ignore a few input applications that are not widely used */ | 514 | /* We ignore a few input applications that are not widely used */ |
515 | #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) | 515 | #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) |
516 | 516 | ||
517 | /* HID core API */ | 517 | /* HID core API */ |
518 | 518 | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index 4726126f5a59..d67ccca2b964 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -178,6 +178,7 @@ typedef struct hw_regs_s { | |||
178 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ | 178 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ |
179 | hwif_chipset_t chipset; | 179 | hwif_chipset_t chipset; |
180 | struct device *dev, *parent; | 180 | struct device *dev, *parent; |
181 | unsigned long config; | ||
181 | } hw_regs_t; | 182 | } hw_regs_t; |
182 | 183 | ||
183 | void ide_init_port_data(struct hwif_s *, unsigned int); | 184 | void ide_init_port_data(struct hwif_s *, unsigned int); |
@@ -307,7 +308,65 @@ struct ide_acpi_drive_link; | |||
307 | struct ide_acpi_hwif_link; | 308 | struct ide_acpi_hwif_link; |
308 | #endif | 309 | #endif |
309 | 310 | ||
310 | typedef struct ide_drive_s { | 311 | /* ATAPI device flags */ |
312 | enum { | ||
313 | IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), | ||
314 | IDE_AFLAG_MEDIA_CHANGED = (1 << 1), | ||
315 | |||
316 | /* ide-cd */ | ||
317 | /* Drive cannot lock the door. */ | ||
318 | IDE_AFLAG_NO_DOORLOCK = (1 << 2), | ||
319 | /* Drive cannot eject the disc. */ | ||
320 | IDE_AFLAG_NO_EJECT = (1 << 3), | ||
321 | /* Drive is a pre ATAPI 1.2 drive. */ | ||
322 | IDE_AFLAG_PRE_ATAPI12 = (1 << 4), | ||
323 | /* TOC addresses are in BCD. */ | ||
324 | IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), | ||
325 | /* TOC track numbers are in BCD. */ | ||
326 | IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), | ||
327 | /* | ||
328 | * Drive does not provide data in multiples of SECTOR_SIZE | ||
329 | * when more than one interrupt is needed. | ||
330 | */ | ||
331 | IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), | ||
332 | /* Seeking in progress. */ | ||
333 | IDE_AFLAG_SEEKING = (1 << 8), | ||
334 | /* Saved TOC information is current. */ | ||
335 | IDE_AFLAG_TOC_VALID = (1 << 9), | ||
336 | /* We think that the drive door is locked. */ | ||
337 | IDE_AFLAG_DOOR_LOCKED = (1 << 10), | ||
338 | /* SET_CD_SPEED command is unsupported. */ | ||
339 | IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), | ||
340 | IDE_AFLAG_VERTOS_300_SSD = (1 << 12), | ||
341 | IDE_AFLAG_VERTOS_600_ESD = (1 << 13), | ||
342 | IDE_AFLAG_SANYO_3CD = (1 << 14), | ||
343 | IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), | ||
344 | IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), | ||
345 | IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), | ||
346 | |||
347 | /* ide-floppy */ | ||
348 | /* Format in progress */ | ||
349 | IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18), | ||
350 | /* Avoid commands not supported in Clik drive */ | ||
351 | IDE_AFLAG_CLIK_DRIVE = (1 << 19), | ||
352 | /* Requires BH algorithm for packets */ | ||
353 | IDE_AFLAG_ZIP_DRIVE = (1 << 20), | ||
354 | |||
355 | /* ide-tape */ | ||
356 | IDE_AFLAG_IGNORE_DSC = (1 << 21), | ||
357 | /* 0 When the tape position is unknown */ | ||
358 | IDE_AFLAG_ADDRESS_VALID = (1 << 22), | ||
359 | /* Device already opened */ | ||
360 | IDE_AFLAG_BUSY = (1 << 23), | ||
361 | /* Attempt to auto-detect the current user block size */ | ||
362 | IDE_AFLAG_DETECT_BS = (1 << 24), | ||
363 | /* Currently on a filemark */ | ||
364 | IDE_AFLAG_FILEMARK = (1 << 25), | ||
365 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ | ||
366 | IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) | ||
367 | }; | ||
368 | |||
369 | struct ide_drive_s { | ||
311 | char name[4]; /* drive name, such as "hda" */ | 370 | char name[4]; /* drive name, such as "hda" */ |
312 | char driver_req[10]; /* requests specific driver */ | 371 | char driver_req[10]; /* requests specific driver */ |
313 | 372 | ||
@@ -355,7 +414,6 @@ typedef struct ide_drive_s { | |||
355 | unsigned nodma : 1; /* disallow DMA */ | 414 | unsigned nodma : 1; /* disallow DMA */ |
356 | unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ | 415 | unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ |
357 | unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ | 416 | unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ |
358 | unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ | ||
359 | unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ | 417 | unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ |
360 | unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ | 418 | unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ |
361 | unsigned post_reset : 1; | 419 | unsigned post_reset : 1; |
@@ -400,7 +458,14 @@ typedef struct ide_drive_s { | |||
400 | struct list_head list; | 458 | struct list_head list; |
401 | struct device gendev; | 459 | struct device gendev; |
402 | struct completion gendev_rel_comp; /* to deal with device release() */ | 460 | struct completion gendev_rel_comp; /* to deal with device release() */ |
403 | } ide_drive_t; | 461 | |
462 | /* callback for packet commands */ | ||
463 | void (*pc_callback)(struct ide_drive_s *); | ||
464 | |||
465 | unsigned long atapi_flags; | ||
466 | }; | ||
467 | |||
468 | typedef struct ide_drive_s ide_drive_t; | ||
404 | 469 | ||
405 | #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) | 470 | #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) |
406 | 471 | ||
@@ -408,8 +473,28 @@ typedef struct ide_drive_s { | |||
408 | ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) | 473 | ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) |
409 | #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) | 474 | #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) |
410 | 475 | ||
476 | struct ide_task_s; | ||
411 | struct ide_port_info; | 477 | struct ide_port_info; |
412 | 478 | ||
479 | struct ide_tp_ops { | ||
480 | void (*exec_command)(struct hwif_s *, u8); | ||
481 | u8 (*read_status)(struct hwif_s *); | ||
482 | u8 (*read_altstatus)(struct hwif_s *); | ||
483 | u8 (*read_sff_dma_status)(struct hwif_s *); | ||
484 | |||
485 | void (*set_irq)(struct hwif_s *, int); | ||
486 | |||
487 | void (*tf_load)(ide_drive_t *, struct ide_task_s *); | ||
488 | void (*tf_read)(ide_drive_t *, struct ide_task_s *); | ||
489 | |||
490 | void (*input_data)(ide_drive_t *, struct request *, void *, | ||
491 | unsigned int); | ||
492 | void (*output_data)(ide_drive_t *, struct request *, void *, | ||
493 | unsigned int); | ||
494 | }; | ||
495 | |||
496 | extern const struct ide_tp_ops default_tp_ops; | ||
497 | |||
413 | struct ide_port_ops { | 498 | struct ide_port_ops { |
414 | /* host specific initialization of a device */ | 499 | /* host specific initialization of a device */ |
415 | void (*init_dev)(ide_drive_t *); | 500 | void (*init_dev)(ide_drive_t *); |
@@ -447,8 +532,6 @@ struct ide_dma_ops { | |||
447 | void (*dma_timeout)(struct ide_drive_s *); | 532 | void (*dma_timeout)(struct ide_drive_s *); |
448 | }; | 533 | }; |
449 | 534 | ||
450 | struct ide_task_s; | ||
451 | |||
452 | typedef struct hwif_s { | 535 | typedef struct hwif_s { |
453 | struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ | 536 | struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ |
454 | struct hwif_s *mate; /* other hwif from same PCI chip */ | 537 | struct hwif_s *mate; /* other hwif from same PCI chip */ |
@@ -486,22 +569,12 @@ typedef struct hwif_s { | |||
486 | 569 | ||
487 | void (*rw_disk)(ide_drive_t *, struct request *); | 570 | void (*rw_disk)(ide_drive_t *, struct request *); |
488 | 571 | ||
572 | const struct ide_tp_ops *tp_ops; | ||
489 | const struct ide_port_ops *port_ops; | 573 | const struct ide_port_ops *port_ops; |
490 | const struct ide_dma_ops *dma_ops; | 574 | const struct ide_dma_ops *dma_ops; |
491 | 575 | ||
492 | void (*tf_load)(ide_drive_t *, struct ide_task_s *); | ||
493 | void (*tf_read)(ide_drive_t *, struct ide_task_s *); | ||
494 | |||
495 | void (*input_data)(ide_drive_t *, struct request *, void *, unsigned); | ||
496 | void (*output_data)(ide_drive_t *, struct request *, void *, unsigned); | ||
497 | |||
498 | void (*ide_dma_clear_irq)(ide_drive_t *drive); | 576 | void (*ide_dma_clear_irq)(ide_drive_t *drive); |
499 | 577 | ||
500 | void (*OUTB)(u8 addr, unsigned long port); | ||
501 | void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port); | ||
502 | |||
503 | u8 (*INB)(unsigned long port); | ||
504 | |||
505 | /* dma physical region descriptor table (cpu view) */ | 578 | /* dma physical region descriptor table (cpu view) */ |
506 | unsigned int *dmatable_cpu; | 579 | unsigned int *dmatable_cpu; |
507 | /* dma physical region descriptor table (dma view) */ | 580 | /* dma physical region descriptor table (dma view) */ |
@@ -524,8 +597,6 @@ typedef struct hwif_s { | |||
524 | int irq; /* our irq number */ | 597 | int irq; /* our irq number */ |
525 | 598 | ||
526 | unsigned long dma_base; /* base addr for dma ports */ | 599 | unsigned long dma_base; /* base addr for dma ports */ |
527 | unsigned long dma_command; /* dma command register */ | ||
528 | unsigned long dma_status; /* dma status register */ | ||
529 | 600 | ||
530 | unsigned long config_data; /* for use by chipset-specific code */ | 601 | unsigned long config_data; /* for use by chipset-specific code */ |
531 | unsigned long select_data; /* for use by chipset-specific code */ | 602 | unsigned long select_data; /* for use by chipset-specific code */ |
@@ -552,6 +623,11 @@ typedef struct hwif_s { | |||
552 | #endif | 623 | #endif |
553 | } ____cacheline_internodealigned_in_smp ide_hwif_t; | 624 | } ____cacheline_internodealigned_in_smp ide_hwif_t; |
554 | 625 | ||
626 | struct ide_host { | ||
627 | ide_hwif_t *ports[MAX_HWIFS]; | ||
628 | unsigned int n_ports; | ||
629 | }; | ||
630 | |||
555 | /* | 631 | /* |
556 | * internal ide interrupt handler type | 632 | * internal ide interrupt handler type |
557 | */ | 633 | */ |
@@ -611,8 +687,6 @@ enum { | |||
611 | PC_FLAG_WRITING = (1 << 6), | 687 | PC_FLAG_WRITING = (1 << 6), |
612 | /* command timed out */ | 688 | /* command timed out */ |
613 | PC_FLAG_TIMEDOUT = (1 << 7), | 689 | PC_FLAG_TIMEDOUT = (1 << 7), |
614 | PC_FLAG_ZIP_DRIVE = (1 << 8), | ||
615 | PC_FLAG_DRQ_INTERRUPT = (1 << 9), | ||
616 | }; | 690 | }; |
617 | 691 | ||
618 | struct ide_atapi_pc { | 692 | struct ide_atapi_pc { |
@@ -646,8 +720,6 @@ struct ide_atapi_pc { | |||
646 | */ | 720 | */ |
647 | u8 pc_buf[256]; | 721 | u8 pc_buf[256]; |
648 | 722 | ||
649 | void (*callback)(ide_drive_t *); | ||
650 | |||
651 | /* idetape only */ | 723 | /* idetape only */ |
652 | struct idetape_bh *bh; | 724 | struct idetape_bh *bh; |
653 | char *b_data; | 725 | char *b_data; |
@@ -807,13 +879,6 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig | |||
807 | extern int ide_vlb_clk; | 879 | extern int ide_vlb_clk; |
808 | extern int ide_pci_clk; | 880 | extern int ide_pci_clk; |
809 | 881 | ||
810 | ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); | ||
811 | |||
812 | static inline ide_hwif_t *ide_find_port(void) | ||
813 | { | ||
814 | return ide_find_port_slot(NULL); | ||
815 | } | ||
816 | |||
817 | extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); | 882 | extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); |
818 | int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, | 883 | int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, |
819 | int uptodate, int nr_sectors); | 884 | int uptodate, int nr_sectors); |
@@ -884,6 +949,7 @@ enum { | |||
884 | IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | | 949 | IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | |
885 | IDE_TFLAG_IN_HOB_NSECT | | 950 | IDE_TFLAG_IN_HOB_NSECT | |
886 | IDE_TFLAG_IN_HOB_LBA, | 951 | IDE_TFLAG_IN_HOB_LBA, |
952 | IDE_TFLAG_IN_FEATURE = (1 << 1), | ||
887 | IDE_TFLAG_IN_NSECT = (1 << 25), | 953 | IDE_TFLAG_IN_NSECT = (1 << 25), |
888 | IDE_TFLAG_IN_LBAL = (1 << 26), | 954 | IDE_TFLAG_IN_LBAL = (1 << 26), |
889 | IDE_TFLAG_IN_LBAM = (1 << 27), | 955 | IDE_TFLAG_IN_LBAM = (1 << 27), |
@@ -948,9 +1014,25 @@ typedef struct ide_task_s { | |||
948 | 1014 | ||
949 | void ide_tf_dump(const char *, struct ide_taskfile *); | 1015 | void ide_tf_dump(const char *, struct ide_taskfile *); |
950 | 1016 | ||
1017 | void ide_exec_command(ide_hwif_t *, u8); | ||
1018 | u8 ide_read_status(ide_hwif_t *); | ||
1019 | u8 ide_read_altstatus(ide_hwif_t *); | ||
1020 | u8 ide_read_sff_dma_status(ide_hwif_t *); | ||
1021 | |||
1022 | void ide_set_irq(ide_hwif_t *, int); | ||
1023 | |||
1024 | void ide_tf_load(ide_drive_t *, ide_task_t *); | ||
1025 | void ide_tf_read(ide_drive_t *, ide_task_t *); | ||
1026 | |||
1027 | void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); | ||
1028 | void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); | ||
1029 | |||
951 | extern void SELECT_DRIVE(ide_drive_t *); | 1030 | extern void SELECT_DRIVE(ide_drive_t *); |
952 | void SELECT_MASK(ide_drive_t *, int); | 1031 | void SELECT_MASK(ide_drive_t *, int); |
953 | 1032 | ||
1033 | u8 ide_read_error(ide_drive_t *); | ||
1034 | void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); | ||
1035 | |||
954 | extern int drive_is_ready(ide_drive_t *); | 1036 | extern int drive_is_ready(ide_drive_t *); |
955 | 1037 | ||
956 | void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); | 1038 | void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); |
@@ -1000,12 +1082,15 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o | |||
1000 | #define ide_pci_register_driver(d) pci_register_driver(d) | 1082 | #define ide_pci_register_driver(d) pci_register_driver(d) |
1001 | #endif | 1083 | #endif |
1002 | 1084 | ||
1003 | void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *); | 1085 | void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, |
1086 | hw_regs_t *, hw_regs_t **); | ||
1004 | void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); | 1087 | void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); |
1005 | 1088 | ||
1006 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | 1089 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI |
1007 | int ide_pci_set_master(struct pci_dev *, const char *); | 1090 | int ide_pci_set_master(struct pci_dev *, const char *); |
1008 | unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); | 1091 | unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); |
1092 | extern const struct ide_dma_ops sff_dma_ops; | ||
1093 | int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); | ||
1009 | int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); | 1094 | int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); |
1010 | #else | 1095 | #else |
1011 | static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, | 1096 | static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, |
@@ -1015,10 +1100,6 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, | |||
1015 | } | 1100 | } |
1016 | #endif | 1101 | #endif |
1017 | 1102 | ||
1018 | extern void default_hwif_iops(ide_hwif_t *); | ||
1019 | extern void default_hwif_mmiops(ide_hwif_t *); | ||
1020 | extern void default_hwif_transport(ide_hwif_t *); | ||
1021 | |||
1022 | typedef struct ide_pci_enablebit_s { | 1103 | typedef struct ide_pci_enablebit_s { |
1023 | u8 reg; /* byte pci reg holding the enable-bit */ | 1104 | u8 reg; /* byte pci reg holding the enable-bit */ |
1024 | u8 mask; /* mask to isolate the enable-bit */ | 1105 | u8 mask; /* mask to isolate the enable-bit */ |
@@ -1081,7 +1162,6 @@ enum { | |||
1081 | IDE_HFLAG_IO_32BIT = (1 << 24), | 1162 | IDE_HFLAG_IO_32BIT = (1 << 24), |
1082 | /* unmask IRQs */ | 1163 | /* unmask IRQs */ |
1083 | IDE_HFLAG_UNMASK_IRQS = (1 << 25), | 1164 | IDE_HFLAG_UNMASK_IRQS = (1 << 25), |
1084 | IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), | ||
1085 | /* serialize ports if DMA is possible (for sl82c105) */ | 1165 | /* serialize ports if DMA is possible (for sl82c105) */ |
1086 | IDE_HFLAG_SERIALIZE_DMA = (1 << 27), | 1166 | IDE_HFLAG_SERIALIZE_DMA = (1 << 27), |
1087 | /* force host out of "simplex" mode */ | 1167 | /* force host out of "simplex" mode */ |
@@ -1092,8 +1172,6 @@ enum { | |||
1092 | IDE_HFLAG_NO_IO_32BIT = (1 << 30), | 1172 | IDE_HFLAG_NO_IO_32BIT = (1 << 30), |
1093 | /* never unmask IRQs */ | 1173 | /* never unmask IRQs */ |
1094 | IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), | 1174 | IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), |
1095 | /* host uses VDMA (disabled for now) */ | ||
1096 | IDE_HFLAG_VDMA = 0, | ||
1097 | }; | 1175 | }; |
1098 | 1176 | ||
1099 | #ifdef CONFIG_BLK_DEV_OFFBOARD | 1177 | #ifdef CONFIG_BLK_DEV_OFFBOARD |
@@ -1110,6 +1188,7 @@ struct ide_port_info { | |||
1110 | int (*init_dma)(ide_hwif_t *, | 1188 | int (*init_dma)(ide_hwif_t *, |
1111 | const struct ide_port_info *); | 1189 | const struct ide_port_info *); |
1112 | 1190 | ||
1191 | const struct ide_tp_ops *tp_ops; | ||
1113 | const struct ide_port_ops *port_ops; | 1192 | const struct ide_port_ops *port_ops; |
1114 | const struct ide_dma_ops *dma_ops; | 1193 | const struct ide_dma_ops *dma_ops; |
1115 | 1194 | ||
@@ -1163,7 +1242,6 @@ void ide_destroy_dmatable(ide_drive_t *); | |||
1163 | extern int ide_build_dmatable(ide_drive_t *, struct request *); | 1242 | extern int ide_build_dmatable(ide_drive_t *, struct request *); |
1164 | int ide_allocate_dma_engine(ide_hwif_t *); | 1243 | int ide_allocate_dma_engine(ide_hwif_t *); |
1165 | void ide_release_dma_engine(ide_hwif_t *); | 1244 | void ide_release_dma_engine(ide_hwif_t *); |
1166 | void ide_setup_dma(ide_hwif_t *, unsigned long); | ||
1167 | 1245 | ||
1168 | void ide_dma_host_set(ide_drive_t *, int); | 1246 | void ide_dma_host_set(ide_drive_t *, int); |
1169 | extern int ide_dma_setup(ide_drive_t *); | 1247 | extern int ide_dma_setup(ide_drive_t *); |
@@ -1217,8 +1295,14 @@ void ide_undecoded_slave(ide_drive_t *); | |||
1217 | 1295 | ||
1218 | void ide_port_apply_params(ide_hwif_t *); | 1296 | void ide_port_apply_params(ide_hwif_t *); |
1219 | 1297 | ||
1220 | int ide_device_add_all(u8 *idx, const struct ide_port_info *); | 1298 | struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **); |
1221 | int ide_device_add(u8 idx[4], const struct ide_port_info *); | 1299 | struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); |
1300 | void ide_host_free(struct ide_host *); | ||
1301 | int ide_host_register(struct ide_host *, const struct ide_port_info *, | ||
1302 | hw_regs_t **); | ||
1303 | int ide_host_add(const struct ide_port_info *, hw_regs_t **, | ||
1304 | struct ide_host **); | ||
1305 | void ide_host_remove(struct ide_host *); | ||
1222 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); | 1306 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); |
1223 | void ide_port_unregister_devices(ide_hwif_t *); | 1307 | void ide_port_unregister_devices(ide_hwif_t *); |
1224 | void ide_port_scan(ide_hwif_t *); | 1308 | void ide_port_scan(ide_hwif_t *); |
@@ -1350,33 +1434,4 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) | |||
1350 | 1434 | ||
1351 | return &hwif->drives[(drive->dn ^ 1) & 1]; | 1435 | return &hwif->drives[(drive->dn ^ 1) & 1]; |
1352 | } | 1436 | } |
1353 | |||
1354 | static inline void ide_set_irq(ide_drive_t *drive, int on) | ||
1355 | { | ||
1356 | ide_hwif_t *hwif = drive->hwif; | ||
1357 | |||
1358 | hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2), | ||
1359 | hwif->io_ports.ctl_addr); | ||
1360 | } | ||
1361 | |||
1362 | static inline u8 ide_read_status(ide_drive_t *drive) | ||
1363 | { | ||
1364 | ide_hwif_t *hwif = drive->hwif; | ||
1365 | |||
1366 | return hwif->INB(hwif->io_ports.status_addr); | ||
1367 | } | ||
1368 | |||
1369 | static inline u8 ide_read_altstatus(ide_drive_t *drive) | ||
1370 | { | ||
1371 | ide_hwif_t *hwif = drive->hwif; | ||
1372 | |||
1373 | return hwif->INB(hwif->io_ports.ctl_addr); | ||
1374 | } | ||
1375 | |||
1376 | static inline u8 ide_read_error(ide_drive_t *drive) | ||
1377 | { | ||
1378 | ide_hwif_t *hwif = drive->hwif; | ||
1379 | |||
1380 | return hwif->INB(hwif->io_ports.error_addr); | ||
1381 | } | ||
1382 | #endif /* _IDE_H */ | 1437 | #endif /* _IDE_H */ |
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 000000000000..bb3dd0545928 --- /dev/null +++ b/include/linux/mfd/core.h | |||
@@ -0,0 +1,55 @@ | |||
1 | #ifndef MFD_CORE_H | ||
2 | #define MFD_CORE_H | ||
3 | /* | ||
4 | * drivers/mfd/mfd-core.h | ||
5 | * | ||
6 | * core MFD support | ||
7 | * Copyright (c) 2006 Ian Molton | ||
8 | * Copyright (c) 2007 Dmitry Baryshkov | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/platform_device.h> | ||
17 | |||
18 | /* | ||
19 | * This struct describes the MFD part ("cell"). | ||
20 | * After registration the copy of this structure will become the platform data | ||
21 | * of the resulting platform_device | ||
22 | */ | ||
23 | struct mfd_cell { | ||
24 | const char *name; | ||
25 | |||
26 | int (*enable)(struct platform_device *dev); | ||
27 | int (*disable)(struct platform_device *dev); | ||
28 | int (*suspend)(struct platform_device *dev); | ||
29 | int (*resume)(struct platform_device *dev); | ||
30 | |||
31 | void *driver_data; /* driver-specific data */ | ||
32 | |||
33 | /* | ||
34 | * This resources can be specified relatievly to the parent device. | ||
35 | * For accessing device you should use resources from device | ||
36 | */ | ||
37 | int num_resources; | ||
38 | const struct resource *resources; | ||
39 | }; | ||
40 | |||
41 | static inline struct mfd_cell * | ||
42 | mfd_get_cell(struct platform_device *pdev) | ||
43 | { | ||
44 | return (struct mfd_cell *)pdev->dev.platform_data; | ||
45 | } | ||
46 | |||
47 | extern int mfd_add_devices( | ||
48 | struct platform_device *parent, | ||
49 | const struct mfd_cell *cells, int n_devs, | ||
50 | struct resource *mem_base, | ||
51 | int irq_base); | ||
52 | |||
53 | extern void mfd_remove_devices(struct platform_device *parent); | ||
54 | |||
55 | #endif | ||
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 000000000000..7cc824a58f7c --- /dev/null +++ b/include/linux/mfd/tc6393xb.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Toshiba TC6393XB SoC support | ||
3 | * | ||
4 | * Copyright(c) 2005-2006 Chris Humbert | ||
5 | * Copyright(c) 2005 Dirk Opfer | ||
6 | * Copyright(c) 2005 Ian Molton <spyro@f2s.com> | ||
7 | * Copyright(c) 2007 Dmitry Baryshkov | ||
8 | * | ||
9 | * Based on code written by Sharp/Lineo for 2.4 kernels | ||
10 | * Based on locomo.c | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #ifndef TC6393XB_H | ||
18 | #define TC6393XB_H | ||
19 | |||
20 | /* Also one should provide the CK3P6MI clock */ | ||
21 | struct tc6393xb_platform_data { | ||
22 | u16 scr_pll2cr; /* PLL2 Control */ | ||
23 | u16 scr_gper; /* GP Enable */ | ||
24 | u32 scr_gpo_doecr; /* GPO Data OE Control */ | ||
25 | u32 scr_gpo_dsr; /* GPO Data Set */ | ||
26 | |||
27 | int (*enable)(struct platform_device *dev); | ||
28 | int (*disable)(struct platform_device *dev); | ||
29 | int (*suspend)(struct platform_device *dev); | ||
30 | int (*resume)(struct platform_device *dev); | ||
31 | |||
32 | int irq_base; /* a base for cascaded irq */ | ||
33 | int gpio_base; | ||
34 | |||
35 | struct tmio_nand_data *nand_data; | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * Relative to irq_base | ||
40 | */ | ||
41 | #define IRQ_TC6393_NAND 0 | ||
42 | #define IRQ_TC6393_MMC 1 | ||
43 | #define IRQ_TC6393_OHCI 2 | ||
44 | #define IRQ_TC6393_SERIAL 3 | ||
45 | #define IRQ_TC6393_FB 4 | ||
46 | |||
47 | #define TC6393XB_NR_IRQS 8 | ||
48 | |||
49 | #endif | ||
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 000000000000..9438d8c9ac1c --- /dev/null +++ b/include/linux/mfd/tmio.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef MFD_TMIO_H | ||
2 | #define MFD_TMIO_H | ||
3 | |||
4 | /* | ||
5 | * data for the NAND controller | ||
6 | */ | ||
7 | struct tmio_nand_data { | ||
8 | struct nand_bbt_descr *badblock_pattern; | ||
9 | struct mtd_partition *partition; | ||
10 | unsigned int num_partitions; | ||
11 | }; | ||
12 | |||
13 | #define TMIO_NAND_CONFIG "tmio-nand-config" | ||
14 | #define TMIO_NAND_CONTROL "tmio-nand-control" | ||
15 | #define TMIO_NAND_IRQ "tmio-nand" | ||
16 | |||
17 | #endif | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d8507eb394cf..119ae7b8f028 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2371,6 +2371,14 @@ | |||
2371 | #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 | 2371 | #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 |
2372 | #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 | 2372 | #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 |
2373 | #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 | 2373 | #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 |
2374 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 | ||
2375 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a | ||
2376 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b | ||
2377 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c | ||
2378 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 | ||
2379 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 | ||
2380 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 | ||
2381 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 | ||
2374 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 | 2382 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
2375 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 | 2383 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 |
2376 | #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 | 2384 | #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 71fc81360048..e5996984ddd0 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -224,4 +224,42 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |||
224 | */ | 224 | */ |
225 | #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) | 225 | #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) |
226 | 226 | ||
227 | |||
228 | /* | ||
229 | * Mapping sg iterator | ||
230 | * | ||
231 | * Iterates over sg entries mapping page-by-page. On each successful | ||
232 | * iteration, @miter->page points to the mapped page and | ||
233 | * @miter->length bytes of data can be accessed at @miter->addr. As | ||
234 | * long as an interation is enclosed between start and stop, the user | ||
235 | * is free to choose control structure and when to stop. | ||
236 | * | ||
237 | * @miter->consumed is set to @miter->length on each iteration. It | ||
238 | * can be adjusted if the user can't consume all the bytes in one go. | ||
239 | * Also, a stopped iteration can be resumed by calling next on it. | ||
240 | * This is useful when iteration needs to release all resources and | ||
241 | * continue later (e.g. at the next interrupt). | ||
242 | */ | ||
243 | |||
244 | #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ | ||
245 | |||
246 | struct sg_mapping_iter { | ||
247 | /* the following three fields can be accessed directly */ | ||
248 | struct page *page; /* currently mapped page */ | ||
249 | void *addr; /* pointer to the mapped area */ | ||
250 | size_t length; /* length of the mapped area */ | ||
251 | size_t consumed; /* number of consumed bytes */ | ||
252 | |||
253 | /* these are internal states, keep away */ | ||
254 | struct scatterlist *__sg; /* current entry */ | ||
255 | unsigned int __nents; /* nr of remaining entries */ | ||
256 | unsigned int __offset; /* offset within sg */ | ||
257 | unsigned int __flags; | ||
258 | }; | ||
259 | |||
260 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | ||
261 | unsigned int nents, unsigned int flags); | ||
262 | bool sg_miter_next(struct sg_mapping_iter *miter); | ||
263 | void sg_miter_stop(struct sg_mapping_iter *miter); | ||
264 | |||
227 | #endif /* _LINUX_SCATTERLIST_H */ | 265 | #endif /* _LINUX_SCATTERLIST_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1941d8b5cf11..dc7e592c473a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -295,10 +295,11 @@ extern void softlockup_tick(void); | |||
295 | extern void spawn_softlockup_task(void); | 295 | extern void spawn_softlockup_task(void); |
296 | extern void touch_softlockup_watchdog(void); | 296 | extern void touch_softlockup_watchdog(void); |
297 | extern void touch_all_softlockup_watchdogs(void); | 297 | extern void touch_all_softlockup_watchdogs(void); |
298 | extern unsigned long softlockup_thresh; | 298 | extern unsigned int softlockup_panic; |
299 | extern unsigned long sysctl_hung_task_check_count; | 299 | extern unsigned long sysctl_hung_task_check_count; |
300 | extern unsigned long sysctl_hung_task_timeout_secs; | 300 | extern unsigned long sysctl_hung_task_timeout_secs; |
301 | extern unsigned long sysctl_hung_task_warnings; | 301 | extern unsigned long sysctl_hung_task_warnings; |
302 | extern int softlockup_thresh; | ||
302 | #else | 303 | #else |
303 | static inline void softlockup_tick(void) | 304 | static inline void softlockup_tick(void) |
304 | { | 305 | { |
@@ -824,7 +825,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
824 | struct sched_domain_attr *dattr_new); | 825 | struct sched_domain_attr *dattr_new); |
825 | extern int arch_reinit_sched_domains(void); | 826 | extern int arch_reinit_sched_domains(void); |
826 | 827 | ||
827 | #endif /* CONFIG_SMP */ | 828 | #else /* CONFIG_SMP */ |
829 | |||
830 | struct sched_domain_attr; | ||
831 | |||
832 | static inline void | ||
833 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | ||
834 | struct sched_domain_attr *dattr_new) | ||
835 | { | ||
836 | } | ||
837 | #endif /* !CONFIG_SMP */ | ||
828 | 838 | ||
829 | struct io_context; /* See blkdev.h */ | 839 | struct io_context; /* See blkdev.h */ |
830 | #define NGROUPS_SMALL 32 | 840 | #define NGROUPS_SMALL 32 |
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 8e0556b8781c..3827b922ba1f 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h | |||
@@ -5,9 +5,19 @@ | |||
5 | #define SMC91X_USE_16BIT (1 << 1) | 5 | #define SMC91X_USE_16BIT (1 << 1) |
6 | #define SMC91X_USE_32BIT (1 << 2) | 6 | #define SMC91X_USE_32BIT (1 << 2) |
7 | 7 | ||
8 | #define SMC91X_NOWAIT (1 << 3) | ||
9 | |||
10 | /* two bits for IO_SHIFT, let's hope later designs will keep this sane */ | ||
11 | #define SMC91X_IO_SHIFT_0 (0 << 4) | ||
12 | #define SMC91X_IO_SHIFT_1 (1 << 4) | ||
13 | #define SMC91X_IO_SHIFT_2 (2 << 4) | ||
14 | #define SMC91X_IO_SHIFT_3 (3 << 4) | ||
15 | #define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) | ||
16 | |||
17 | #define SMC91X_USE_DMA (1 << 6) | ||
18 | |||
8 | struct smc91x_platdata { | 19 | struct smc91x_platdata { |
9 | unsigned long flags; | 20 | unsigned long flags; |
10 | unsigned long irq_flags; /* IRQF_... */ | ||
11 | }; | 21 | }; |
12 | 22 | ||
13 | #endif /* __SMC91X_H__ */ | 23 | #endif /* __SMC91X_H__ */ |
diff --git a/init/main.c b/init/main.c index 756eca4b821a..2769dc031c62 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -415,6 +415,13 @@ static void __init smp_init(void) | |||
415 | { | 415 | { |
416 | unsigned int cpu; | 416 | unsigned int cpu; |
417 | 417 | ||
418 | /* | ||
419 | * Set up the current CPU as possible to migrate to. | ||
420 | * The other ones will be done by cpu_up/cpu_down() | ||
421 | */ | ||
422 | cpu = smp_processor_id(); | ||
423 | cpu_set(cpu, cpu_active_map); | ||
424 | |||
418 | /* FIXME: This should be done in userspace --RR */ | 425 | /* FIXME: This should be done in userspace --RR */ |
419 | for_each_present_cpu(cpu) { | 426 | for_each_present_cpu(cpu) { |
420 | if (num_online_cpus() >= setup_max_cpus) | 427 | if (num_online_cpus() >= setup_max_cpus) |
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 526128a2e622..2a202a846757 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz | |||
@@ -55,4 +55,4 @@ config HZ | |||
55 | default 1000 if HZ_1000 | 55 | default 1000 if HZ_1000 |
56 | 56 | ||
57 | config SCHED_HRTICK | 57 | config SCHED_HRTICK |
58 | def_bool HIGH_RES_TIMERS && X86 | 58 | def_bool HIGH_RES_TIMERS |
diff --git a/kernel/Makefile b/kernel/Makefile index 985ddb7da4d0..15ab63ffe64d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -11,6 +11,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o |
13 | 13 | ||
14 | CFLAGS_REMOVE_sched.o = -mno-spe | ||
15 | |||
14 | ifdef CONFIG_FTRACE | 16 | ifdef CONFIG_FTRACE |
15 | # Do not trace debug files and internal ftrace files | 17 | # Do not trace debug files and internal ftrace files |
16 | CFLAGS_REMOVE_lockdep.o = -pg | 18 | CFLAGS_REMOVE_lockdep.o = -pg |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index c10e7aae04d7..4699950e65bd 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -1476,7 +1476,8 @@ void audit_syscall_entry(int arch, int major, | |||
1476 | struct audit_context *context = tsk->audit_context; | 1476 | struct audit_context *context = tsk->audit_context; |
1477 | enum audit_state state; | 1477 | enum audit_state state; |
1478 | 1478 | ||
1479 | BUG_ON(!context); | 1479 | if (unlikely(!context)) |
1480 | return; | ||
1480 | 1481 | ||
1481 | /* | 1482 | /* |
1482 | * This happens only on certain architectures that make system | 1483 | * This happens only on certain architectures that make system |
diff --git a/kernel/cpu.c b/kernel/cpu.c index cfb1d43ab801..2cc409ce0a8f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -64,6 +64,8 @@ void __init cpu_hotplug_init(void) | |||
64 | cpu_hotplug.refcount = 0; | 64 | cpu_hotplug.refcount = 0; |
65 | } | 65 | } |
66 | 66 | ||
67 | cpumask_t cpu_active_map; | ||
68 | |||
67 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
68 | 70 | ||
69 | void get_online_cpus(void) | 71 | void get_online_cpus(void) |
@@ -291,11 +293,30 @@ int __ref cpu_down(unsigned int cpu) | |||
291 | int err = 0; | 293 | int err = 0; |
292 | 294 | ||
293 | cpu_maps_update_begin(); | 295 | cpu_maps_update_begin(); |
294 | if (cpu_hotplug_disabled) | 296 | |
297 | if (cpu_hotplug_disabled) { | ||
295 | err = -EBUSY; | 298 | err = -EBUSY; |
296 | else | 299 | goto out; |
297 | err = _cpu_down(cpu, 0); | 300 | } |
301 | |||
302 | cpu_clear(cpu, cpu_active_map); | ||
303 | |||
304 | /* | ||
305 | * Make sure the all cpus did the reschedule and are not | ||
306 | * using stale version of the cpu_active_map. | ||
307 | * This is not strictly necessary becuase stop_machine() | ||
308 | * that we run down the line already provides the required | ||
309 | * synchronization. But it's really a side effect and we do not | ||
310 | * want to depend on the innards of the stop_machine here. | ||
311 | */ | ||
312 | synchronize_sched(); | ||
313 | |||
314 | err = _cpu_down(cpu, 0); | ||
298 | 315 | ||
316 | if (cpu_online(cpu)) | ||
317 | cpu_set(cpu, cpu_active_map); | ||
318 | |||
319 | out: | ||
299 | cpu_maps_update_done(); | 320 | cpu_maps_update_done(); |
300 | return err; | 321 | return err; |
301 | } | 322 | } |
@@ -355,11 +376,18 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
355 | } | 376 | } |
356 | 377 | ||
357 | cpu_maps_update_begin(); | 378 | cpu_maps_update_begin(); |
358 | if (cpu_hotplug_disabled) | 379 | |
380 | if (cpu_hotplug_disabled) { | ||
359 | err = -EBUSY; | 381 | err = -EBUSY; |
360 | else | 382 | goto out; |
361 | err = _cpu_up(cpu, 0); | 383 | } |
384 | |||
385 | err = _cpu_up(cpu, 0); | ||
362 | 386 | ||
387 | if (cpu_online(cpu)) | ||
388 | cpu_set(cpu, cpu_active_map); | ||
389 | |||
390 | out: | ||
363 | cpu_maps_update_done(); | 391 | cpu_maps_update_done(); |
364 | return err; | 392 | return err; |
365 | } | 393 | } |
@@ -413,7 +441,7 @@ void __ref enable_nonboot_cpus(void) | |||
413 | goto out; | 441 | goto out; |
414 | 442 | ||
415 | printk("Enabling non-boot CPUs ...\n"); | 443 | printk("Enabling non-boot CPUs ...\n"); |
416 | for_each_cpu_mask(cpu, frozen_cpus) { | 444 | for_each_cpu_mask_nr(cpu, frozen_cpus) { |
417 | error = _cpu_up(cpu, 1); | 445 | error = _cpu_up(cpu, 1); |
418 | if (!error) { | 446 | if (!error) { |
419 | printk("CPU%d is up\n", cpu); | 447 | printk("CPU%d is up\n", cpu); |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index d2cc67dac8b1..d5738910c34c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -564,7 +564,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |||
564 | * partition_sched_domains(). | 564 | * partition_sched_domains(). |
565 | */ | 565 | */ |
566 | 566 | ||
567 | static void rebuild_sched_domains(void) | 567 | void rebuild_sched_domains(void) |
568 | { | 568 | { |
569 | struct kfifo *q; /* queue of cpusets to be scanned */ | 569 | struct kfifo *q; /* queue of cpusets to be scanned */ |
570 | struct cpuset *cp; /* scans q */ | 570 | struct cpuset *cp; /* scans q */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 77a51be36010..3cfc0fefb5ee 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -217,6 +217,17 @@ void enable_irq(unsigned int irq) | |||
217 | } | 217 | } |
218 | EXPORT_SYMBOL(enable_irq); | 218 | EXPORT_SYMBOL(enable_irq); |
219 | 219 | ||
220 | int set_irq_wake_real(unsigned int irq, unsigned int on) | ||
221 | { | ||
222 | struct irq_desc *desc = irq_desc + irq; | ||
223 | int ret = -ENXIO; | ||
224 | |||
225 | if (desc->chip->set_wake) | ||
226 | ret = desc->chip->set_wake(irq, on); | ||
227 | |||
228 | return ret; | ||
229 | } | ||
230 | |||
220 | /** | 231 | /** |
221 | * set_irq_wake - control irq power management wakeup | 232 | * set_irq_wake - control irq power management wakeup |
222 | * @irq: interrupt to control | 233 | * @irq: interrupt to control |
@@ -233,30 +244,34 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
233 | { | 244 | { |
234 | struct irq_desc *desc = irq_desc + irq; | 245 | struct irq_desc *desc = irq_desc + irq; |
235 | unsigned long flags; | 246 | unsigned long flags; |
236 | int ret = -ENXIO; | 247 | int ret = 0; |
237 | int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake; | ||
238 | 248 | ||
239 | /* wakeup-capable irqs can be shared between drivers that | 249 | /* wakeup-capable irqs can be shared between drivers that |
240 | * don't need to have the same sleep mode behaviors. | 250 | * don't need to have the same sleep mode behaviors. |
241 | */ | 251 | */ |
242 | spin_lock_irqsave(&desc->lock, flags); | 252 | spin_lock_irqsave(&desc->lock, flags); |
243 | if (on) { | 253 | if (on) { |
244 | if (desc->wake_depth++ == 0) | 254 | if (desc->wake_depth++ == 0) { |
245 | desc->status |= IRQ_WAKEUP; | 255 | ret = set_irq_wake_real(irq, on); |
246 | else | 256 | if (ret) |
247 | set_wake = NULL; | 257 | desc->wake_depth = 0; |
258 | else | ||
259 | desc->status |= IRQ_WAKEUP; | ||
260 | } | ||
248 | } else { | 261 | } else { |
249 | if (desc->wake_depth == 0) { | 262 | if (desc->wake_depth == 0) { |
250 | printk(KERN_WARNING "Unbalanced IRQ %d " | 263 | printk(KERN_WARNING "Unbalanced IRQ %d " |
251 | "wake disable\n", irq); | 264 | "wake disable\n", irq); |
252 | WARN_ON(1); | 265 | WARN_ON(1); |
253 | } else if (--desc->wake_depth == 0) | 266 | } else if (--desc->wake_depth == 0) { |
254 | desc->status &= ~IRQ_WAKEUP; | 267 | ret = set_irq_wake_real(irq, on); |
255 | else | 268 | if (ret) |
256 | set_wake = NULL; | 269 | desc->wake_depth = 1; |
270 | else | ||
271 | desc->status &= ~IRQ_WAKEUP; | ||
272 | } | ||
257 | } | 273 | } |
258 | if (set_wake) | 274 | |
259 | ret = desc->chip->set_wake(irq, on); | ||
260 | spin_unlock_irqrestore(&desc->lock, flags); | 275 | spin_unlock_irqrestore(&desc->lock, flags); |
261 | return ret; | 276 | return ret; |
262 | } | 277 | } |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 16eeeaa9d618..6f8696c502f4 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
106 | */ | 106 | */ |
107 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); | 107 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); |
108 | cpu_clear(rdp->cpu, cpumask); | 108 | cpu_clear(rdp->cpu, cpumask); |
109 | for_each_cpu_mask(cpu, cpumask) | 109 | for_each_cpu_mask_nr(cpu, cpumask) |
110 | smp_send_reschedule(cpu); | 110 | smp_send_reschedule(cpu); |
111 | } | 111 | } |
112 | } | 112 | } |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 6f62b77d93c4..27827931ca0d 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void) | |||
756 | 756 | ||
757 | /* Now ask each CPU for acknowledgement of the flip. */ | 757 | /* Now ask each CPU for acknowledgement of the flip. */ |
758 | 758 | ||
759 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 759 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
760 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 760 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
761 | dyntick_save_progress_counter(cpu); | 761 | dyntick_save_progress_counter(cpu); |
762 | } | 762 | } |
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void) | |||
774 | int cpu; | 774 | int cpu; |
775 | 775 | ||
776 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 776 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
777 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 777 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
778 | if (rcu_try_flip_waitack_needed(cpu) && | 778 | if (rcu_try_flip_waitack_needed(cpu) && |
779 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 779 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
780 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 780 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void) | |||
806 | /* Check to see if the sum of the "last" counters is zero. */ | 806 | /* Check to see if the sum of the "last" counters is zero. */ |
807 | 807 | ||
808 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 808 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
809 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 809 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
810 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 810 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
811 | if (sum != 0) { | 811 | if (sum != 0) { |
812 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 812 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void) | |||
821 | smp_mb(); /* ^^^^^^^^^^^^ */ | 821 | smp_mb(); /* ^^^^^^^^^^^^ */ |
822 | 822 | ||
823 | /* Call for a memory barrier from each CPU. */ | 823 | /* Call for a memory barrier from each CPU. */ |
824 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 824 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
825 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 825 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
826 | dyntick_save_progress_counter(cpu); | 826 | dyntick_save_progress_counter(cpu); |
827 | } | 827 | } |
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void) | |||
841 | int cpu; | 841 | int cpu; |
842 | 842 | ||
843 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 843 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
844 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 844 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
845 | if (rcu_try_flip_waitmb_needed(cpu) && | 845 | if (rcu_try_flip_waitmb_needed(cpu) && |
846 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 846 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
847 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 847 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
diff --git a/kernel/sched.c b/kernel/sched.c index b1104ea5d255..6acf749d3336 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -571,8 +571,10 @@ struct rq { | |||
571 | #endif | 571 | #endif |
572 | 572 | ||
573 | #ifdef CONFIG_SCHED_HRTICK | 573 | #ifdef CONFIG_SCHED_HRTICK |
574 | unsigned long hrtick_flags; | 574 | #ifdef CONFIG_SMP |
575 | ktime_t hrtick_expire; | 575 | int hrtick_csd_pending; |
576 | struct call_single_data hrtick_csd; | ||
577 | #endif | ||
576 | struct hrtimer hrtick_timer; | 578 | struct hrtimer hrtick_timer; |
577 | #endif | 579 | #endif |
578 | 580 | ||
@@ -983,13 +985,6 @@ static struct rq *this_rq_lock(void) | |||
983 | return rq; | 985 | return rq; |
984 | } | 986 | } |
985 | 987 | ||
986 | static void __resched_task(struct task_struct *p, int tif_bit); | ||
987 | |||
988 | static inline void resched_task(struct task_struct *p) | ||
989 | { | ||
990 | __resched_task(p, TIF_NEED_RESCHED); | ||
991 | } | ||
992 | |||
993 | #ifdef CONFIG_SCHED_HRTICK | 988 | #ifdef CONFIG_SCHED_HRTICK |
994 | /* | 989 | /* |
995 | * Use HR-timers to deliver accurate preemption points. | 990 | * Use HR-timers to deliver accurate preemption points. |
@@ -1001,25 +996,6 @@ static inline void resched_task(struct task_struct *p) | |||
1001 | * When we get rescheduled we reprogram the hrtick_timer outside of the | 996 | * When we get rescheduled we reprogram the hrtick_timer outside of the |
1002 | * rq->lock. | 997 | * rq->lock. |
1003 | */ | 998 | */ |
1004 | static inline void resched_hrt(struct task_struct *p) | ||
1005 | { | ||
1006 | __resched_task(p, TIF_HRTICK_RESCHED); | ||
1007 | } | ||
1008 | |||
1009 | static inline void resched_rq(struct rq *rq) | ||
1010 | { | ||
1011 | unsigned long flags; | ||
1012 | |||
1013 | spin_lock_irqsave(&rq->lock, flags); | ||
1014 | resched_task(rq->curr); | ||
1015 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1016 | } | ||
1017 | |||
1018 | enum { | ||
1019 | HRTICK_SET, /* re-programm hrtick_timer */ | ||
1020 | HRTICK_RESET, /* not a new slice */ | ||
1021 | HRTICK_BLOCK, /* stop hrtick operations */ | ||
1022 | }; | ||
1023 | 999 | ||
1024 | /* | 1000 | /* |
1025 | * Use hrtick when: | 1001 | * Use hrtick when: |
@@ -1030,40 +1006,11 @@ static inline int hrtick_enabled(struct rq *rq) | |||
1030 | { | 1006 | { |
1031 | if (!sched_feat(HRTICK)) | 1007 | if (!sched_feat(HRTICK)) |
1032 | return 0; | 1008 | return 0; |
1033 | if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags))) | 1009 | if (!cpu_active(cpu_of(rq))) |
1034 | return 0; | 1010 | return 0; |
1035 | return hrtimer_is_hres_active(&rq->hrtick_timer); | 1011 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
1036 | } | 1012 | } |
1037 | 1013 | ||
1038 | /* | ||
1039 | * Called to set the hrtick timer state. | ||
1040 | * | ||
1041 | * called with rq->lock held and irqs disabled | ||
1042 | */ | ||
1043 | static void hrtick_start(struct rq *rq, u64 delay, int reset) | ||
1044 | { | ||
1045 | assert_spin_locked(&rq->lock); | ||
1046 | |||
1047 | /* | ||
1048 | * preempt at: now + delay | ||
1049 | */ | ||
1050 | rq->hrtick_expire = | ||
1051 | ktime_add_ns(rq->hrtick_timer.base->get_time(), delay); | ||
1052 | /* | ||
1053 | * indicate we need to program the timer | ||
1054 | */ | ||
1055 | __set_bit(HRTICK_SET, &rq->hrtick_flags); | ||
1056 | if (reset) | ||
1057 | __set_bit(HRTICK_RESET, &rq->hrtick_flags); | ||
1058 | |||
1059 | /* | ||
1060 | * New slices are called from the schedule path and don't need a | ||
1061 | * forced reschedule. | ||
1062 | */ | ||
1063 | if (reset) | ||
1064 | resched_hrt(rq->curr); | ||
1065 | } | ||
1066 | |||
1067 | static void hrtick_clear(struct rq *rq) | 1014 | static void hrtick_clear(struct rq *rq) |
1068 | { | 1015 | { |
1069 | if (hrtimer_active(&rq->hrtick_timer)) | 1016 | if (hrtimer_active(&rq->hrtick_timer)) |
@@ -1071,32 +1018,6 @@ static void hrtick_clear(struct rq *rq) | |||
1071 | } | 1018 | } |
1072 | 1019 | ||
1073 | /* | 1020 | /* |
1074 | * Update the timer from the possible pending state. | ||
1075 | */ | ||
1076 | static void hrtick_set(struct rq *rq) | ||
1077 | { | ||
1078 | ktime_t time; | ||
1079 | int set, reset; | ||
1080 | unsigned long flags; | ||
1081 | |||
1082 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | ||
1083 | |||
1084 | spin_lock_irqsave(&rq->lock, flags); | ||
1085 | set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags); | ||
1086 | reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags); | ||
1087 | time = rq->hrtick_expire; | ||
1088 | clear_thread_flag(TIF_HRTICK_RESCHED); | ||
1089 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1090 | |||
1091 | if (set) { | ||
1092 | hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS); | ||
1093 | if (reset && !hrtimer_active(&rq->hrtick_timer)) | ||
1094 | resched_rq(rq); | ||
1095 | } else | ||
1096 | hrtick_clear(rq); | ||
1097 | } | ||
1098 | |||
1099 | /* | ||
1100 | * High-resolution timer tick. | 1021 | * High-resolution timer tick. |
1101 | * Runs from hardirq context with interrupts disabled. | 1022 | * Runs from hardirq context with interrupts disabled. |
1102 | */ | 1023 | */ |
@@ -1115,27 +1036,37 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1115 | } | 1036 | } |
1116 | 1037 | ||
1117 | #ifdef CONFIG_SMP | 1038 | #ifdef CONFIG_SMP |
1118 | static void hotplug_hrtick_disable(int cpu) | 1039 | /* |
1040 | * called from hardirq (IPI) context | ||
1041 | */ | ||
1042 | static void __hrtick_start(void *arg) | ||
1119 | { | 1043 | { |
1120 | struct rq *rq = cpu_rq(cpu); | 1044 | struct rq *rq = arg; |
1121 | unsigned long flags; | ||
1122 | |||
1123 | spin_lock_irqsave(&rq->lock, flags); | ||
1124 | rq->hrtick_flags = 0; | ||
1125 | __set_bit(HRTICK_BLOCK, &rq->hrtick_flags); | ||
1126 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1127 | 1045 | ||
1128 | hrtick_clear(rq); | 1046 | spin_lock(&rq->lock); |
1047 | hrtimer_restart(&rq->hrtick_timer); | ||
1048 | rq->hrtick_csd_pending = 0; | ||
1049 | spin_unlock(&rq->lock); | ||
1129 | } | 1050 | } |
1130 | 1051 | ||
1131 | static void hotplug_hrtick_enable(int cpu) | 1052 | /* |
1053 | * Called to set the hrtick timer state. | ||
1054 | * | ||
1055 | * called with rq->lock held and irqs disabled | ||
1056 | */ | ||
1057 | static void hrtick_start(struct rq *rq, u64 delay) | ||
1132 | { | 1058 | { |
1133 | struct rq *rq = cpu_rq(cpu); | 1059 | struct hrtimer *timer = &rq->hrtick_timer; |
1134 | unsigned long flags; | 1060 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); |
1135 | 1061 | ||
1136 | spin_lock_irqsave(&rq->lock, flags); | 1062 | timer->expires = time; |
1137 | __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags); | 1063 | |
1138 | spin_unlock_irqrestore(&rq->lock, flags); | 1064 | if (rq == this_rq()) { |
1065 | hrtimer_restart(timer); | ||
1066 | } else if (!rq->hrtick_csd_pending) { | ||
1067 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); | ||
1068 | rq->hrtick_csd_pending = 1; | ||
1069 | } | ||
1139 | } | 1070 | } |
1140 | 1071 | ||
1141 | static int | 1072 | static int |
@@ -1150,16 +1081,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
1150 | case CPU_DOWN_PREPARE_FROZEN: | 1081 | case CPU_DOWN_PREPARE_FROZEN: |
1151 | case CPU_DEAD: | 1082 | case CPU_DEAD: |
1152 | case CPU_DEAD_FROZEN: | 1083 | case CPU_DEAD_FROZEN: |
1153 | hotplug_hrtick_disable(cpu); | 1084 | hrtick_clear(cpu_rq(cpu)); |
1154 | return NOTIFY_OK; | ||
1155 | |||
1156 | case CPU_UP_PREPARE: | ||
1157 | case CPU_UP_PREPARE_FROZEN: | ||
1158 | case CPU_DOWN_FAILED: | ||
1159 | case CPU_DOWN_FAILED_FROZEN: | ||
1160 | case CPU_ONLINE: | ||
1161 | case CPU_ONLINE_FROZEN: | ||
1162 | hotplug_hrtick_enable(cpu); | ||
1163 | return NOTIFY_OK; | 1085 | return NOTIFY_OK; |
1164 | } | 1086 | } |
1165 | 1087 | ||
@@ -1170,46 +1092,45 @@ static void init_hrtick(void) | |||
1170 | { | 1092 | { |
1171 | hotcpu_notifier(hotplug_hrtick, 0); | 1093 | hotcpu_notifier(hotplug_hrtick, 0); |
1172 | } | 1094 | } |
1173 | #endif /* CONFIG_SMP */ | 1095 | #else |
1096 | /* | ||
1097 | * Called to set the hrtick timer state. | ||
1098 | * | ||
1099 | * called with rq->lock held and irqs disabled | ||
1100 | */ | ||
1101 | static void hrtick_start(struct rq *rq, u64 delay) | ||
1102 | { | ||
1103 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); | ||
1104 | } | ||
1174 | 1105 | ||
1175 | static void init_rq_hrtick(struct rq *rq) | 1106 | static void init_hrtick(void) |
1176 | { | 1107 | { |
1177 | rq->hrtick_flags = 0; | ||
1178 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1179 | rq->hrtick_timer.function = hrtick; | ||
1180 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | ||
1181 | } | 1108 | } |
1109 | #endif /* CONFIG_SMP */ | ||
1182 | 1110 | ||
1183 | void hrtick_resched(void) | 1111 | static void init_rq_hrtick(struct rq *rq) |
1184 | { | 1112 | { |
1185 | struct rq *rq; | 1113 | #ifdef CONFIG_SMP |
1186 | unsigned long flags; | 1114 | rq->hrtick_csd_pending = 0; |
1187 | 1115 | ||
1188 | if (!test_thread_flag(TIF_HRTICK_RESCHED)) | 1116 | rq->hrtick_csd.flags = 0; |
1189 | return; | 1117 | rq->hrtick_csd.func = __hrtick_start; |
1118 | rq->hrtick_csd.info = rq; | ||
1119 | #endif | ||
1190 | 1120 | ||
1191 | local_irq_save(flags); | 1121 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1192 | rq = cpu_rq(smp_processor_id()); | 1122 | rq->hrtick_timer.function = hrtick; |
1193 | hrtick_set(rq); | 1123 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; |
1194 | local_irq_restore(flags); | ||
1195 | } | 1124 | } |
1196 | #else | 1125 | #else |
1197 | static inline void hrtick_clear(struct rq *rq) | 1126 | static inline void hrtick_clear(struct rq *rq) |
1198 | { | 1127 | { |
1199 | } | 1128 | } |
1200 | 1129 | ||
1201 | static inline void hrtick_set(struct rq *rq) | ||
1202 | { | ||
1203 | } | ||
1204 | |||
1205 | static inline void init_rq_hrtick(struct rq *rq) | 1130 | static inline void init_rq_hrtick(struct rq *rq) |
1206 | { | 1131 | { |
1207 | } | 1132 | } |
1208 | 1133 | ||
1209 | void hrtick_resched(void) | ||
1210 | { | ||
1211 | } | ||
1212 | |||
1213 | static inline void init_hrtick(void) | 1134 | static inline void init_hrtick(void) |
1214 | { | 1135 | { |
1215 | } | 1136 | } |
@@ -1228,16 +1149,16 @@ static inline void init_hrtick(void) | |||
1228 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | 1149 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
1229 | #endif | 1150 | #endif |
1230 | 1151 | ||
1231 | static void __resched_task(struct task_struct *p, int tif_bit) | 1152 | static void resched_task(struct task_struct *p) |
1232 | { | 1153 | { |
1233 | int cpu; | 1154 | int cpu; |
1234 | 1155 | ||
1235 | assert_spin_locked(&task_rq(p)->lock); | 1156 | assert_spin_locked(&task_rq(p)->lock); |
1236 | 1157 | ||
1237 | if (unlikely(test_tsk_thread_flag(p, tif_bit))) | 1158 | if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) |
1238 | return; | 1159 | return; |
1239 | 1160 | ||
1240 | set_tsk_thread_flag(p, tif_bit); | 1161 | set_tsk_thread_flag(p, TIF_NEED_RESCHED); |
1241 | 1162 | ||
1242 | cpu = task_cpu(p); | 1163 | cpu = task_cpu(p); |
1243 | if (cpu == smp_processor_id()) | 1164 | if (cpu == smp_processor_id()) |
@@ -1303,10 +1224,10 @@ void wake_up_idle_cpu(int cpu) | |||
1303 | #endif /* CONFIG_NO_HZ */ | 1224 | #endif /* CONFIG_NO_HZ */ |
1304 | 1225 | ||
1305 | #else /* !CONFIG_SMP */ | 1226 | #else /* !CONFIG_SMP */ |
1306 | static void __resched_task(struct task_struct *p, int tif_bit) | 1227 | static void resched_task(struct task_struct *p) |
1307 | { | 1228 | { |
1308 | assert_spin_locked(&task_rq(p)->lock); | 1229 | assert_spin_locked(&task_rq(p)->lock); |
1309 | set_tsk_thread_flag(p, tif_bit); | 1230 | set_tsk_need_resched(p); |
1310 | } | 1231 | } |
1311 | #endif /* CONFIG_SMP */ | 1232 | #endif /* CONFIG_SMP */ |
1312 | 1233 | ||
@@ -2108,7 +2029,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2108 | /* Tally up the load of all CPUs in the group */ | 2029 | /* Tally up the load of all CPUs in the group */ |
2109 | avg_load = 0; | 2030 | avg_load = 0; |
2110 | 2031 | ||
2111 | for_each_cpu_mask(i, group->cpumask) { | 2032 | for_each_cpu_mask_nr(i, group->cpumask) { |
2112 | /* Bias balancing toward cpus of our domain */ | 2033 | /* Bias balancing toward cpus of our domain */ |
2113 | if (local_group) | 2034 | if (local_group) |
2114 | load = source_load(i, load_idx); | 2035 | load = source_load(i, load_idx); |
@@ -2150,7 +2071,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | |||
2150 | /* Traverse only the allowed CPUs */ | 2071 | /* Traverse only the allowed CPUs */ |
2151 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2072 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); |
2152 | 2073 | ||
2153 | for_each_cpu_mask(i, *tmp) { | 2074 | for_each_cpu_mask_nr(i, *tmp) { |
2154 | load = weighted_cpuload(i); | 2075 | load = weighted_cpuload(i); |
2155 | 2076 | ||
2156 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2077 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2881,7 +2802,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2881 | 2802 | ||
2882 | rq = task_rq_lock(p, &flags); | 2803 | rq = task_rq_lock(p, &flags); |
2883 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2804 | if (!cpu_isset(dest_cpu, p->cpus_allowed) |
2884 | || unlikely(cpu_is_offline(dest_cpu))) | 2805 | || unlikely(!cpu_active(dest_cpu))) |
2885 | goto out; | 2806 | goto out; |
2886 | 2807 | ||
2887 | /* force the process onto the specified CPU */ | 2808 | /* force the process onto the specified CPU */ |
@@ -3168,7 +3089,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3168 | max_cpu_load = 0; | 3089 | max_cpu_load = 0; |
3169 | min_cpu_load = ~0UL; | 3090 | min_cpu_load = ~0UL; |
3170 | 3091 | ||
3171 | for_each_cpu_mask(i, group->cpumask) { | 3092 | for_each_cpu_mask_nr(i, group->cpumask) { |
3172 | struct rq *rq; | 3093 | struct rq *rq; |
3173 | 3094 | ||
3174 | if (!cpu_isset(i, *cpus)) | 3095 | if (!cpu_isset(i, *cpus)) |
@@ -3447,7 +3368,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3447 | unsigned long max_load = 0; | 3368 | unsigned long max_load = 0; |
3448 | int i; | 3369 | int i; |
3449 | 3370 | ||
3450 | for_each_cpu_mask(i, group->cpumask) { | 3371 | for_each_cpu_mask_nr(i, group->cpumask) { |
3451 | unsigned long wl; | 3372 | unsigned long wl; |
3452 | 3373 | ||
3453 | if (!cpu_isset(i, *cpus)) | 3374 | if (!cpu_isset(i, *cpus)) |
@@ -3849,7 +3770,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3849 | /* | 3770 | /* |
3850 | * If we are going offline and still the leader, give up! | 3771 | * If we are going offline and still the leader, give up! |
3851 | */ | 3772 | */ |
3852 | if (cpu_is_offline(cpu) && | 3773 | if (!cpu_active(cpu) && |
3853 | atomic_read(&nohz.load_balancer) == cpu) { | 3774 | atomic_read(&nohz.load_balancer) == cpu) { |
3854 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3775 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
3855 | BUG(); | 3776 | BUG(); |
@@ -3989,7 +3910,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3989 | int balance_cpu; | 3910 | int balance_cpu; |
3990 | 3911 | ||
3991 | cpu_clear(this_cpu, cpus); | 3912 | cpu_clear(this_cpu, cpus); |
3992 | for_each_cpu_mask(balance_cpu, cpus) { | 3913 | for_each_cpu_mask_nr(balance_cpu, cpus) { |
3993 | /* | 3914 | /* |
3994 | * If this cpu gets work to do, stop the load balancing | 3915 | * If this cpu gets work to do, stop the load balancing |
3995 | * work being done for other cpus. Next load | 3916 | * work being done for other cpus. Next load |
@@ -4395,7 +4316,7 @@ asmlinkage void __sched schedule(void) | |||
4395 | struct task_struct *prev, *next; | 4316 | struct task_struct *prev, *next; |
4396 | unsigned long *switch_count; | 4317 | unsigned long *switch_count; |
4397 | struct rq *rq; | 4318 | struct rq *rq; |
4398 | int cpu, hrtick = sched_feat(HRTICK); | 4319 | int cpu; |
4399 | 4320 | ||
4400 | need_resched: | 4321 | need_resched: |
4401 | preempt_disable(); | 4322 | preempt_disable(); |
@@ -4410,7 +4331,7 @@ need_resched_nonpreemptible: | |||
4410 | 4331 | ||
4411 | schedule_debug(prev); | 4332 | schedule_debug(prev); |
4412 | 4333 | ||
4413 | if (hrtick) | 4334 | if (sched_feat(HRTICK)) |
4414 | hrtick_clear(rq); | 4335 | hrtick_clear(rq); |
4415 | 4336 | ||
4416 | /* | 4337 | /* |
@@ -4457,9 +4378,6 @@ need_resched_nonpreemptible: | |||
4457 | } else | 4378 | } else |
4458 | spin_unlock_irq(&rq->lock); | 4379 | spin_unlock_irq(&rq->lock); |
4459 | 4380 | ||
4460 | if (hrtick) | ||
4461 | hrtick_set(rq); | ||
4462 | |||
4463 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 4381 | if (unlikely(reacquire_kernel_lock(current) < 0)) |
4464 | goto need_resched_nonpreemptible; | 4382 | goto need_resched_nonpreemptible; |
4465 | 4383 | ||
@@ -5876,7 +5794,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5876 | struct rq *rq_dest, *rq_src; | 5794 | struct rq *rq_dest, *rq_src; |
5877 | int ret = 0, on_rq; | 5795 | int ret = 0, on_rq; |
5878 | 5796 | ||
5879 | if (unlikely(cpu_is_offline(dest_cpu))) | 5797 | if (unlikely(!cpu_active(dest_cpu))) |
5880 | return ret; | 5798 | return ret; |
5881 | 5799 | ||
5882 | rq_src = cpu_rq(src_cpu); | 5800 | rq_src = cpu_rq(src_cpu); |
@@ -6768,7 +6686,8 @@ static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | |||
6768 | /* Setup the mask of cpus configured for isolated domains */ | 6686 | /* Setup the mask of cpus configured for isolated domains */ |
6769 | static int __init isolated_cpu_setup(char *str) | 6687 | static int __init isolated_cpu_setup(char *str) |
6770 | { | 6688 | { |
6771 | int ints[NR_CPUS], i; | 6689 | static int __initdata ints[NR_CPUS]; |
6690 | int i; | ||
6772 | 6691 | ||
6773 | str = get_options(str, ARRAY_SIZE(ints), ints); | 6692 | str = get_options(str, ARRAY_SIZE(ints), ints); |
6774 | cpus_clear(cpu_isolated_map); | 6693 | cpus_clear(cpu_isolated_map); |
@@ -6802,7 +6721,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6802 | 6721 | ||
6803 | cpus_clear(*covered); | 6722 | cpus_clear(*covered); |
6804 | 6723 | ||
6805 | for_each_cpu_mask(i, *span) { | 6724 | for_each_cpu_mask_nr(i, *span) { |
6806 | struct sched_group *sg; | 6725 | struct sched_group *sg; |
6807 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 6726 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6808 | int j; | 6727 | int j; |
@@ -6813,7 +6732,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6813 | cpus_clear(sg->cpumask); | 6732 | cpus_clear(sg->cpumask); |
6814 | sg->__cpu_power = 0; | 6733 | sg->__cpu_power = 0; |
6815 | 6734 | ||
6816 | for_each_cpu_mask(j, *span) { | 6735 | for_each_cpu_mask_nr(j, *span) { |
6817 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 6736 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6818 | continue; | 6737 | continue; |
6819 | 6738 | ||
@@ -7013,7 +6932,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7013 | if (!sg) | 6932 | if (!sg) |
7014 | return; | 6933 | return; |
7015 | do { | 6934 | do { |
7016 | for_each_cpu_mask(j, sg->cpumask) { | 6935 | for_each_cpu_mask_nr(j, sg->cpumask) { |
7017 | struct sched_domain *sd; | 6936 | struct sched_domain *sd; |
7018 | 6937 | ||
7019 | sd = &per_cpu(phys_domains, j); | 6938 | sd = &per_cpu(phys_domains, j); |
@@ -7038,7 +6957,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7038 | { | 6957 | { |
7039 | int cpu, i; | 6958 | int cpu, i; |
7040 | 6959 | ||
7041 | for_each_cpu_mask(cpu, *cpu_map) { | 6960 | for_each_cpu_mask_nr(cpu, *cpu_map) { |
7042 | struct sched_group **sched_group_nodes | 6961 | struct sched_group **sched_group_nodes |
7043 | = sched_group_nodes_bycpu[cpu]; | 6962 | = sched_group_nodes_bycpu[cpu]; |
7044 | 6963 | ||
@@ -7277,7 +7196,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7277 | /* | 7196 | /* |
7278 | * Set up domains for cpus specified by the cpu_map. | 7197 | * Set up domains for cpus specified by the cpu_map. |
7279 | */ | 7198 | */ |
7280 | for_each_cpu_mask(i, *cpu_map) { | 7199 | for_each_cpu_mask_nr(i, *cpu_map) { |
7281 | struct sched_domain *sd = NULL, *p; | 7200 | struct sched_domain *sd = NULL, *p; |
7282 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7201 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
7283 | 7202 | ||
@@ -7344,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7344 | 7263 | ||
7345 | #ifdef CONFIG_SCHED_SMT | 7264 | #ifdef CONFIG_SCHED_SMT |
7346 | /* Set up CPU (sibling) groups */ | 7265 | /* Set up CPU (sibling) groups */ |
7347 | for_each_cpu_mask(i, *cpu_map) { | 7266 | for_each_cpu_mask_nr(i, *cpu_map) { |
7348 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7267 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); |
7349 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7268 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7350 | 7269 | ||
@@ -7361,7 +7280,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7361 | 7280 | ||
7362 | #ifdef CONFIG_SCHED_MC | 7281 | #ifdef CONFIG_SCHED_MC |
7363 | /* Set up multi-core groups */ | 7282 | /* Set up multi-core groups */ |
7364 | for_each_cpu_mask(i, *cpu_map) { | 7283 | for_each_cpu_mask_nr(i, *cpu_map) { |
7365 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7284 | SCHED_CPUMASK_VAR(this_core_map, allmasks); |
7366 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7285 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7367 | 7286 | ||
@@ -7428,7 +7347,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7428 | goto error; | 7347 | goto error; |
7429 | } | 7348 | } |
7430 | sched_group_nodes[i] = sg; | 7349 | sched_group_nodes[i] = sg; |
7431 | for_each_cpu_mask(j, *nodemask) { | 7350 | for_each_cpu_mask_nr(j, *nodemask) { |
7432 | struct sched_domain *sd; | 7351 | struct sched_domain *sd; |
7433 | 7352 | ||
7434 | sd = &per_cpu(node_domains, j); | 7353 | sd = &per_cpu(node_domains, j); |
@@ -7474,21 +7393,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7474 | 7393 | ||
7475 | /* Calculate CPU power for physical packages and nodes */ | 7394 | /* Calculate CPU power for physical packages and nodes */ |
7476 | #ifdef CONFIG_SCHED_SMT | 7395 | #ifdef CONFIG_SCHED_SMT |
7477 | for_each_cpu_mask(i, *cpu_map) { | 7396 | for_each_cpu_mask_nr(i, *cpu_map) { |
7478 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7397 | struct sched_domain *sd = &per_cpu(cpu_domains, i); |
7479 | 7398 | ||
7480 | init_sched_groups_power(i, sd); | 7399 | init_sched_groups_power(i, sd); |
7481 | } | 7400 | } |
7482 | #endif | 7401 | #endif |
7483 | #ifdef CONFIG_SCHED_MC | 7402 | #ifdef CONFIG_SCHED_MC |
7484 | for_each_cpu_mask(i, *cpu_map) { | 7403 | for_each_cpu_mask_nr(i, *cpu_map) { |
7485 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7404 | struct sched_domain *sd = &per_cpu(core_domains, i); |
7486 | 7405 | ||
7487 | init_sched_groups_power(i, sd); | 7406 | init_sched_groups_power(i, sd); |
7488 | } | 7407 | } |
7489 | #endif | 7408 | #endif |
7490 | 7409 | ||
7491 | for_each_cpu_mask(i, *cpu_map) { | 7410 | for_each_cpu_mask_nr(i, *cpu_map) { |
7492 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7411 | struct sched_domain *sd = &per_cpu(phys_domains, i); |
7493 | 7412 | ||
7494 | init_sched_groups_power(i, sd); | 7413 | init_sched_groups_power(i, sd); |
@@ -7508,7 +7427,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7508 | #endif | 7427 | #endif |
7509 | 7428 | ||
7510 | /* Attach the domains */ | 7429 | /* Attach the domains */ |
7511 | for_each_cpu_mask(i, *cpu_map) { | 7430 | for_each_cpu_mask_nr(i, *cpu_map) { |
7512 | struct sched_domain *sd; | 7431 | struct sched_domain *sd; |
7513 | #ifdef CONFIG_SCHED_SMT | 7432 | #ifdef CONFIG_SCHED_SMT |
7514 | sd = &per_cpu(cpu_domains, i); | 7433 | sd = &per_cpu(cpu_domains, i); |
@@ -7553,18 +7472,6 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7553 | } | 7472 | } |
7554 | 7473 | ||
7555 | /* | 7474 | /* |
7556 | * Free current domain masks. | ||
7557 | * Called after all cpus are attached to NULL domain. | ||
7558 | */ | ||
7559 | static void free_sched_domains(void) | ||
7560 | { | ||
7561 | ndoms_cur = 0; | ||
7562 | if (doms_cur != &fallback_doms) | ||
7563 | kfree(doms_cur); | ||
7564 | doms_cur = &fallback_doms; | ||
7565 | } | ||
7566 | |||
7567 | /* | ||
7568 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 7475 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
7569 | * For now this just excludes isolated cpus, but could be used to | 7476 | * For now this just excludes isolated cpus, but could be used to |
7570 | * exclude other special cases in the future. | 7477 | * exclude other special cases in the future. |
@@ -7603,7 +7510,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
7603 | 7510 | ||
7604 | unregister_sched_domain_sysctl(); | 7511 | unregister_sched_domain_sysctl(); |
7605 | 7512 | ||
7606 | for_each_cpu_mask(i, *cpu_map) | 7513 | for_each_cpu_mask_nr(i, *cpu_map) |
7607 | cpu_attach_domain(NULL, &def_root_domain, i); | 7514 | cpu_attach_domain(NULL, &def_root_domain, i); |
7608 | synchronize_sched(); | 7515 | synchronize_sched(); |
7609 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7516 | arch_destroy_sched_domains(cpu_map, &tmpmask); |
@@ -7642,7 +7549,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7642 | * ownership of it and will kfree it when done with it. If the caller | 7549 | * ownership of it and will kfree it when done with it. If the caller |
7643 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7550 | * failed the kmalloc call, then it can pass in doms_new == NULL, |
7644 | * and partition_sched_domains() will fallback to the single partition | 7551 | * and partition_sched_domains() will fallback to the single partition |
7645 | * 'fallback_doms'. | 7552 | * 'fallback_doms', it also forces the domains to be rebuilt. |
7646 | * | 7553 | * |
7647 | * Call with hotplug lock held | 7554 | * Call with hotplug lock held |
7648 | */ | 7555 | */ |
@@ -7656,12 +7563,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7656 | /* always unregister in case we don't destroy any domains */ | 7563 | /* always unregister in case we don't destroy any domains */ |
7657 | unregister_sched_domain_sysctl(); | 7564 | unregister_sched_domain_sysctl(); |
7658 | 7565 | ||
7659 | if (doms_new == NULL) { | 7566 | if (doms_new == NULL) |
7660 | ndoms_new = 1; | 7567 | ndoms_new = 0; |
7661 | doms_new = &fallback_doms; | ||
7662 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | ||
7663 | dattr_new = NULL; | ||
7664 | } | ||
7665 | 7568 | ||
7666 | /* Destroy deleted domains */ | 7569 | /* Destroy deleted domains */ |
7667 | for (i = 0; i < ndoms_cur; i++) { | 7570 | for (i = 0; i < ndoms_cur; i++) { |
@@ -7676,6 +7579,14 @@ match1: | |||
7676 | ; | 7579 | ; |
7677 | } | 7580 | } |
7678 | 7581 | ||
7582 | if (doms_new == NULL) { | ||
7583 | ndoms_cur = 0; | ||
7584 | ndoms_new = 1; | ||
7585 | doms_new = &fallback_doms; | ||
7586 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | ||
7587 | dattr_new = NULL; | ||
7588 | } | ||
7589 | |||
7679 | /* Build new domains */ | 7590 | /* Build new domains */ |
7680 | for (i = 0; i < ndoms_new; i++) { | 7591 | for (i = 0; i < ndoms_new; i++) { |
7681 | for (j = 0; j < ndoms_cur; j++) { | 7592 | for (j = 0; j < ndoms_cur; j++) { |
@@ -7706,17 +7617,10 @@ match2: | |||
7706 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7617 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7707 | int arch_reinit_sched_domains(void) | 7618 | int arch_reinit_sched_domains(void) |
7708 | { | 7619 | { |
7709 | int err; | ||
7710 | |||
7711 | get_online_cpus(); | 7620 | get_online_cpus(); |
7712 | mutex_lock(&sched_domains_mutex); | 7621 | rebuild_sched_domains(); |
7713 | detach_destroy_domains(&cpu_online_map); | ||
7714 | free_sched_domains(); | ||
7715 | err = arch_init_sched_domains(&cpu_online_map); | ||
7716 | mutex_unlock(&sched_domains_mutex); | ||
7717 | put_online_cpus(); | 7622 | put_online_cpus(); |
7718 | 7623 | return 0; | |
7719 | return err; | ||
7720 | } | 7624 | } |
7721 | 7625 | ||
7722 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 7626 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
@@ -7786,59 +7690,49 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
7786 | } | 7690 | } |
7787 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 7691 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
7788 | 7692 | ||
7693 | #ifndef CONFIG_CPUSETS | ||
7789 | /* | 7694 | /* |
7790 | * Force a reinitialization of the sched domains hierarchy. The domains | 7695 | * Add online and remove offline CPUs from the scheduler domains. |
7791 | * and groups cannot be updated in place without racing with the balancing | 7696 | * When cpusets are enabled they take over this function. |
7792 | * code, so we temporarily attach all running cpus to the NULL domain | ||
7793 | * which will prevent rebalancing while the sched domains are recalculated. | ||
7794 | */ | 7697 | */ |
7795 | static int update_sched_domains(struct notifier_block *nfb, | 7698 | static int update_sched_domains(struct notifier_block *nfb, |
7796 | unsigned long action, void *hcpu) | 7699 | unsigned long action, void *hcpu) |
7797 | { | 7700 | { |
7701 | switch (action) { | ||
7702 | case CPU_ONLINE: | ||
7703 | case CPU_ONLINE_FROZEN: | ||
7704 | case CPU_DEAD: | ||
7705 | case CPU_DEAD_FROZEN: | ||
7706 | partition_sched_domains(0, NULL, NULL); | ||
7707 | return NOTIFY_OK; | ||
7708 | |||
7709 | default: | ||
7710 | return NOTIFY_DONE; | ||
7711 | } | ||
7712 | } | ||
7713 | #endif | ||
7714 | |||
7715 | static int update_runtime(struct notifier_block *nfb, | ||
7716 | unsigned long action, void *hcpu) | ||
7717 | { | ||
7798 | int cpu = (int)(long)hcpu; | 7718 | int cpu = (int)(long)hcpu; |
7799 | 7719 | ||
7800 | switch (action) { | 7720 | switch (action) { |
7801 | case CPU_DOWN_PREPARE: | 7721 | case CPU_DOWN_PREPARE: |
7802 | case CPU_DOWN_PREPARE_FROZEN: | 7722 | case CPU_DOWN_PREPARE_FROZEN: |
7803 | disable_runtime(cpu_rq(cpu)); | 7723 | disable_runtime(cpu_rq(cpu)); |
7804 | /* fall-through */ | ||
7805 | case CPU_UP_PREPARE: | ||
7806 | case CPU_UP_PREPARE_FROZEN: | ||
7807 | detach_destroy_domains(&cpu_online_map); | ||
7808 | free_sched_domains(); | ||
7809 | return NOTIFY_OK; | 7724 | return NOTIFY_OK; |
7810 | 7725 | ||
7811 | |||
7812 | case CPU_DOWN_FAILED: | 7726 | case CPU_DOWN_FAILED: |
7813 | case CPU_DOWN_FAILED_FROZEN: | 7727 | case CPU_DOWN_FAILED_FROZEN: |
7814 | case CPU_ONLINE: | 7728 | case CPU_ONLINE: |
7815 | case CPU_ONLINE_FROZEN: | 7729 | case CPU_ONLINE_FROZEN: |
7816 | enable_runtime(cpu_rq(cpu)); | 7730 | enable_runtime(cpu_rq(cpu)); |
7817 | /* fall-through */ | 7731 | return NOTIFY_OK; |
7818 | case CPU_UP_CANCELED: | 7732 | |
7819 | case CPU_UP_CANCELED_FROZEN: | ||
7820 | case CPU_DEAD: | ||
7821 | case CPU_DEAD_FROZEN: | ||
7822 | /* | ||
7823 | * Fall through and re-initialise the domains. | ||
7824 | */ | ||
7825 | break; | ||
7826 | default: | 7733 | default: |
7827 | return NOTIFY_DONE; | 7734 | return NOTIFY_DONE; |
7828 | } | 7735 | } |
7829 | |||
7830 | #ifndef CONFIG_CPUSETS | ||
7831 | /* | ||
7832 | * Create default domain partitioning if cpusets are disabled. | ||
7833 | * Otherwise we let cpusets rebuild the domains based on the | ||
7834 | * current setup. | ||
7835 | */ | ||
7836 | |||
7837 | /* The hotplug lock is already held by cpu_up/cpu_down */ | ||
7838 | arch_init_sched_domains(&cpu_online_map); | ||
7839 | #endif | ||
7840 | |||
7841 | return NOTIFY_OK; | ||
7842 | } | 7736 | } |
7843 | 7737 | ||
7844 | void __init sched_init_smp(void) | 7738 | void __init sched_init_smp(void) |
@@ -7858,8 +7752,15 @@ void __init sched_init_smp(void) | |||
7858 | cpu_set(smp_processor_id(), non_isolated_cpus); | 7752 | cpu_set(smp_processor_id(), non_isolated_cpus); |
7859 | mutex_unlock(&sched_domains_mutex); | 7753 | mutex_unlock(&sched_domains_mutex); |
7860 | put_online_cpus(); | 7754 | put_online_cpus(); |
7755 | |||
7756 | #ifndef CONFIG_CPUSETS | ||
7861 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 7757 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
7862 | hotcpu_notifier(update_sched_domains, 0); | 7758 | hotcpu_notifier(update_sched_domains, 0); |
7759 | #endif | ||
7760 | |||
7761 | /* RT runtime code needs to handle some hotplug events */ | ||
7762 | hotcpu_notifier(update_runtime, 0); | ||
7763 | |||
7863 | init_hrtick(); | 7764 | init_hrtick(); |
7864 | 7765 | ||
7865 | /* Move init over to a non-isolated CPU */ | 7766 | /* Move init over to a non-isolated CPU */ |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f2aa987027d6..cf2cd6ce4cb2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
878 | #ifdef CONFIG_SCHED_HRTICK | 878 | #ifdef CONFIG_SCHED_HRTICK |
879 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 879 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) |
880 | { | 880 | { |
881 | int requeue = rq->curr == p; | ||
882 | struct sched_entity *se = &p->se; | 881 | struct sched_entity *se = &p->se; |
883 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 882 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
884 | 883 | ||
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | |||
899 | * Don't schedule slices shorter than 10000ns, that just | 898 | * Don't schedule slices shorter than 10000ns, that just |
900 | * doesn't make sense. Rely on vruntime for fairness. | 899 | * doesn't make sense. Rely on vruntime for fairness. |
901 | */ | 900 | */ |
902 | if (!requeue) | 901 | if (rq->curr != p) |
903 | delta = max(10000LL, delta); | 902 | delta = max(10000LL, delta); |
904 | 903 | ||
905 | hrtick_start(rq, delta, requeue); | 904 | hrtick_start(rq, delta); |
906 | } | 905 | } |
907 | } | 906 | } |
908 | #else /* !CONFIG_SCHED_HRTICK */ | 907 | #else /* !CONFIG_SCHED_HRTICK */ |
@@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq) | |||
1004 | * not idle and an idle cpu is available. The span of cpus to | 1003 | * not idle and an idle cpu is available. The span of cpus to |
1005 | * search starts with cpus closest then further out as needed, | 1004 | * search starts with cpus closest then further out as needed, |
1006 | * so we always favor a closer, idle cpu. | 1005 | * so we always favor a closer, idle cpu. |
1006 | * Domains may include CPUs that are not usable for migration, | ||
1007 | * hence we need to mask them out (cpu_active_map) | ||
1007 | * | 1008 | * |
1008 | * Returns the CPU we should wake onto. | 1009 | * Returns the CPU we should wake onto. |
1009 | */ | 1010 | */ |
@@ -1031,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1031 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 1032 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
1032 | && !task_hot(p, task_rq(p)->clock, sd))) { | 1033 | && !task_hot(p, task_rq(p)->clock, sd))) { |
1033 | cpus_and(tmp, sd->span, p->cpus_allowed); | 1034 | cpus_and(tmp, sd->span, p->cpus_allowed); |
1034 | for_each_cpu_mask(i, tmp) { | 1035 | cpus_and(tmp, tmp, cpu_active_map); |
1036 | for_each_cpu_mask_nr(i, tmp) { | ||
1035 | if (idle_cpu(i)) { | 1037 | if (idle_cpu(i)) { |
1036 | if (i != task_cpu(p)) { | 1038 | if (i != task_cpu(p)) { |
1037 | schedstat_inc(p, | 1039 | schedstat_inc(p, |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 47ceac9e8552..f85a76363eee 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
240 | 240 | ||
241 | spin_lock(&rt_b->rt_runtime_lock); | 241 | spin_lock(&rt_b->rt_runtime_lock); |
242 | rt_period = ktime_to_ns(rt_b->rt_period); | 242 | rt_period = ktime_to_ns(rt_b->rt_period); |
243 | for_each_cpu_mask(i, rd->span) { | 243 | for_each_cpu_mask_nr(i, rd->span) { |
244 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 244 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
245 | s64 diff; | 245 | s64 diff; |
246 | 246 | ||
@@ -505,7 +505,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
505 | rt_rq->rt_nr_running++; | 505 | rt_rq->rt_nr_running++; |
506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { | 507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { |
508 | #ifdef CONFIG_SMP | ||
508 | struct rq *rq = rq_of_rt_rq(rt_rq); | 509 | struct rq *rq = rq_of_rt_rq(rt_rq); |
510 | #endif | ||
509 | 511 | ||
510 | rt_rq->highest_prio = rt_se_prio(rt_se); | 512 | rt_rq->highest_prio = rt_se_prio(rt_se); |
511 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
@@ -599,11 +601,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
599 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 601 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
600 | return; | 602 | return; |
601 | 603 | ||
602 | if (rt_se->nr_cpus_allowed == 1) | 604 | list_add_tail(&rt_se->run_list, queue); |
603 | list_add(&rt_se->run_list, queue); | ||
604 | else | ||
605 | list_add_tail(&rt_se->run_list, queue); | ||
606 | |||
607 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 605 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
608 | 606 | ||
609 | inc_rt_tasks(rt_se, rt_rq); | 607 | inc_rt_tasks(rt_se, rt_rq); |
@@ -688,32 +686,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
688 | * Put task to the end of the run list without the overhead of dequeue | 686 | * Put task to the end of the run list without the overhead of dequeue |
689 | * followed by enqueue. | 687 | * followed by enqueue. |
690 | */ | 688 | */ |
691 | static | 689 | static void |
692 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 690 | requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) |
693 | { | 691 | { |
694 | struct rt_prio_array *array = &rt_rq->active; | ||
695 | |||
696 | if (on_rt_rq(rt_se)) { | 692 | if (on_rt_rq(rt_se)) { |
697 | list_del_init(&rt_se->run_list); | 693 | struct rt_prio_array *array = &rt_rq->active; |
698 | list_add_tail(&rt_se->run_list, | 694 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
699 | array->queue + rt_se_prio(rt_se)); | 695 | |
696 | if (head) | ||
697 | list_move(&rt_se->run_list, queue); | ||
698 | else | ||
699 | list_move_tail(&rt_se->run_list, queue); | ||
700 | } | 700 | } |
701 | } | 701 | } |
702 | 702 | ||
703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) |
704 | { | 704 | { |
705 | struct sched_rt_entity *rt_se = &p->rt; | 705 | struct sched_rt_entity *rt_se = &p->rt; |
706 | struct rt_rq *rt_rq; | 706 | struct rt_rq *rt_rq; |
707 | 707 | ||
708 | for_each_sched_rt_entity(rt_se) { | 708 | for_each_sched_rt_entity(rt_se) { |
709 | rt_rq = rt_rq_of_se(rt_se); | 709 | rt_rq = rt_rq_of_se(rt_se); |
710 | requeue_rt_entity(rt_rq, rt_se); | 710 | requeue_rt_entity(rt_rq, rt_se, head); |
711 | } | 711 | } |
712 | } | 712 | } |
713 | 713 | ||
714 | static void yield_task_rt(struct rq *rq) | 714 | static void yield_task_rt(struct rq *rq) |
715 | { | 715 | { |
716 | requeue_task_rt(rq, rq->curr); | 716 | requeue_task_rt(rq, rq->curr, 0); |
717 | } | 717 | } |
718 | 718 | ||
719 | #ifdef CONFIG_SMP | 719 | #ifdef CONFIG_SMP |
@@ -753,6 +753,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
753 | */ | 753 | */ |
754 | return task_cpu(p); | 754 | return task_cpu(p); |
755 | } | 755 | } |
756 | |||
757 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | ||
758 | { | ||
759 | cpumask_t mask; | ||
760 | |||
761 | if (rq->curr->rt.nr_cpus_allowed == 1) | ||
762 | return; | ||
763 | |||
764 | if (p->rt.nr_cpus_allowed != 1 | ||
765 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
766 | return; | ||
767 | |||
768 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
769 | return; | ||
770 | |||
771 | /* | ||
772 | * There appears to be other cpus that can accept | ||
773 | * current and none to run 'p', so lets reschedule | ||
774 | * to try and push current away: | ||
775 | */ | ||
776 | requeue_task_rt(rq, p, 1); | ||
777 | resched_task(rq->curr); | ||
778 | } | ||
779 | |||
756 | #endif /* CONFIG_SMP */ | 780 | #endif /* CONFIG_SMP */ |
757 | 781 | ||
758 | /* | 782 | /* |
@@ -778,18 +802,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |||
778 | * to move current somewhere else, making room for our non-migratable | 802 | * to move current somewhere else, making room for our non-migratable |
779 | * task. | 803 | * task. |
780 | */ | 804 | */ |
781 | if((p->prio == rq->curr->prio) | 805 | if (p->prio == rq->curr->prio && !need_resched()) |
782 | && p->rt.nr_cpus_allowed == 1 | 806 | check_preempt_equal_prio(rq, p); |
783 | && rq->curr->rt.nr_cpus_allowed != 1) { | ||
784 | cpumask_t mask; | ||
785 | |||
786 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
787 | /* | ||
788 | * There appears to be other cpus that can accept | ||
789 | * current, so lets reschedule to try and push it away | ||
790 | */ | ||
791 | resched_task(rq->curr); | ||
792 | } | ||
793 | #endif | 807 | #endif |
794 | } | 808 | } |
795 | 809 | ||
@@ -922,6 +936,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
922 | return -1; /* No targets found */ | 936 | return -1; /* No targets found */ |
923 | 937 | ||
924 | /* | 938 | /* |
939 | * Only consider CPUs that are usable for migration. | ||
940 | * I guess we might want to change cpupri_find() to ignore those | ||
941 | * in the first place. | ||
942 | */ | ||
943 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | ||
944 | |||
945 | /* | ||
925 | * At this point we have built a mask of cpus representing the | 946 | * At this point we have built a mask of cpus representing the |
926 | * lowest priority tasks in the system. Now we want to elect | 947 | * lowest priority tasks in the system. Now we want to elect |
927 | * the best one based on our affinity and topology. | 948 | * the best one based on our affinity and topology. |
@@ -1107,7 +1128,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1107 | 1128 | ||
1108 | next = pick_next_task_rt(this_rq); | 1129 | next = pick_next_task_rt(this_rq); |
1109 | 1130 | ||
1110 | for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { | 1131 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { |
1111 | if (this_cpu == cpu) | 1132 | if (this_cpu == cpu) |
1112 | continue; | 1133 | continue; |
1113 | 1134 | ||
@@ -1415,7 +1436,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
1415 | * on the queue: | 1436 | * on the queue: |
1416 | */ | 1437 | */ |
1417 | if (p->rt.run_list.prev != p->rt.run_list.next) { | 1438 | if (p->rt.run_list.prev != p->rt.run_list.next) { |
1418 | requeue_task_rt(rq, p); | 1439 | requeue_task_rt(rq, p, 0); |
1419 | set_tsk_need_resched(p); | 1440 | set_tsk_need_resched(p); |
1420 | } | 1441 | } |
1421 | } | 1442 | } |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index a272d78185eb..7bd8d1aadd5d 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/freezer.h> | 14 | #include <linux/freezer.h> |
15 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
16 | #include <linux/lockdep.h> | ||
16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | 19 | ||
@@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); | |||
25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 26 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
26 | 27 | ||
27 | static int __read_mostly did_panic; | 28 | static int __read_mostly did_panic; |
28 | unsigned long __read_mostly softlockup_thresh = 60; | 29 | int __read_mostly softlockup_thresh = 60; |
30 | |||
31 | /* | ||
32 | * Should we panic (and reboot, if panic_timeout= is set) when a | ||
33 | * soft-lockup occurs: | ||
34 | */ | ||
35 | unsigned int __read_mostly softlockup_panic = | ||
36 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
37 | |||
38 | static int __init softlockup_panic_setup(char *str) | ||
39 | { | ||
40 | softlockup_panic = simple_strtoul(str, NULL, 0); | ||
41 | |||
42 | return 1; | ||
43 | } | ||
44 | __setup("softlockup_panic=", softlockup_panic_setup); | ||
29 | 45 | ||
30 | static int | 46 | static int |
31 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | 47 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) |
@@ -84,6 +100,14 @@ void softlockup_tick(void) | |||
84 | struct pt_regs *regs = get_irq_regs(); | 100 | struct pt_regs *regs = get_irq_regs(); |
85 | unsigned long now; | 101 | unsigned long now; |
86 | 102 | ||
103 | /* Is detection switched off? */ | ||
104 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | ||
105 | /* Be sure we don't false trigger if switched back on */ | ||
106 | if (touch_timestamp) | ||
107 | per_cpu(touch_timestamp, this_cpu) = 0; | ||
108 | return; | ||
109 | } | ||
110 | |||
87 | if (touch_timestamp == 0) { | 111 | if (touch_timestamp == 0) { |
88 | __touch_softlockup_watchdog(); | 112 | __touch_softlockup_watchdog(); |
89 | return; | 113 | return; |
@@ -92,11 +116,8 @@ void softlockup_tick(void) | |||
92 | print_timestamp = per_cpu(print_timestamp, this_cpu); | 116 | print_timestamp = per_cpu(print_timestamp, this_cpu); |
93 | 117 | ||
94 | /* report at most once a second */ | 118 | /* report at most once a second */ |
95 | if ((print_timestamp >= touch_timestamp && | 119 | if (print_timestamp == touch_timestamp || did_panic) |
96 | print_timestamp < (touch_timestamp + 1)) || | ||
97 | did_panic || !per_cpu(watchdog_task, this_cpu)) { | ||
98 | return; | 120 | return; |
99 | } | ||
100 | 121 | ||
101 | /* do not print during early bootup: */ | 122 | /* do not print during early bootup: */ |
102 | if (unlikely(system_state != SYSTEM_RUNNING)) { | 123 | if (unlikely(system_state != SYSTEM_RUNNING)) { |
@@ -106,8 +127,11 @@ void softlockup_tick(void) | |||
106 | 127 | ||
107 | now = get_timestamp(this_cpu); | 128 | now = get_timestamp(this_cpu); |
108 | 129 | ||
109 | /* Wake up the high-prio watchdog task every second: */ | 130 | /* |
110 | if (now > (touch_timestamp + 1)) | 131 | * Wake up the high-prio watchdog task twice per |
132 | * threshold timespan. | ||
133 | */ | ||
134 | if (now > touch_timestamp + softlockup_thresh/2) | ||
111 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | 135 | wake_up_process(per_cpu(watchdog_task, this_cpu)); |
112 | 136 | ||
113 | /* Warn about unreasonable delays: */ | 137 | /* Warn about unreasonable delays: */ |
@@ -121,11 +145,15 @@ void softlockup_tick(void) | |||
121 | this_cpu, now - touch_timestamp, | 145 | this_cpu, now - touch_timestamp, |
122 | current->comm, task_pid_nr(current)); | 146 | current->comm, task_pid_nr(current)); |
123 | print_modules(); | 147 | print_modules(); |
148 | print_irqtrace_events(current); | ||
124 | if (regs) | 149 | if (regs) |
125 | show_regs(regs); | 150 | show_regs(regs); |
126 | else | 151 | else |
127 | dump_stack(); | 152 | dump_stack(); |
128 | spin_unlock(&print_lock); | 153 | spin_unlock(&print_lock); |
154 | |||
155 | if (softlockup_panic) | ||
156 | panic("softlockup: hung tasks"); | ||
129 | } | 157 | } |
130 | 158 | ||
131 | /* | 159 | /* |
@@ -178,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now) | |||
178 | 206 | ||
179 | t->last_switch_timestamp = now; | 207 | t->last_switch_timestamp = now; |
180 | touch_nmi_watchdog(); | 208 | touch_nmi_watchdog(); |
209 | |||
210 | if (softlockup_panic) | ||
211 | panic("softlockup: blocked tasks"); | ||
181 | } | 212 | } |
182 | 213 | ||
183 | /* | 214 | /* |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index ba9b2054ecbd..738b411ff2d3 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -33,8 +33,9 @@ static int stopmachine(void *cpu) | |||
33 | { | 33 | { |
34 | int irqs_disabled = 0; | 34 | int irqs_disabled = 0; |
35 | int prepared = 0; | 35 | int prepared = 0; |
36 | cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); | ||
36 | 37 | ||
37 | set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); | 38 | set_cpus_allowed_ptr(current, cpumask); |
38 | 39 | ||
39 | /* Ack: we are alive */ | 40 | /* Ack: we are alive */ |
40 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ | 41 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b859e6b5a767..2a7b9d88706b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -88,12 +88,13 @@ extern int rcutorture_runnable; | |||
88 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 88 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
89 | 89 | ||
90 | /* Constants used for minimum and maximum */ | 90 | /* Constants used for minimum and maximum */ |
91 | #if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) | 91 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP) |
92 | static int one = 1; | 92 | static int one = 1; |
93 | #endif | 93 | #endif |
94 | 94 | ||
95 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 95 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
96 | static int sixty = 60; | 96 | static int sixty = 60; |
97 | static int neg_one = -1; | ||
97 | #endif | 98 | #endif |
98 | 99 | ||
99 | #ifdef CONFIG_MMU | 100 | #ifdef CONFIG_MMU |
@@ -739,13 +740,24 @@ static struct ctl_table kern_table[] = { | |||
739 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 740 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
740 | { | 741 | { |
741 | .ctl_name = CTL_UNNUMBERED, | 742 | .ctl_name = CTL_UNNUMBERED, |
743 | .procname = "softlockup_panic", | ||
744 | .data = &softlockup_panic, | ||
745 | .maxlen = sizeof(int), | ||
746 | .mode = 0644, | ||
747 | .proc_handler = &proc_dointvec_minmax, | ||
748 | .strategy = &sysctl_intvec, | ||
749 | .extra1 = &zero, | ||
750 | .extra2 = &one, | ||
751 | }, | ||
752 | { | ||
753 | .ctl_name = CTL_UNNUMBERED, | ||
742 | .procname = "softlockup_thresh", | 754 | .procname = "softlockup_thresh", |
743 | .data = &softlockup_thresh, | 755 | .data = &softlockup_thresh, |
744 | .maxlen = sizeof(unsigned long), | 756 | .maxlen = sizeof(int), |
745 | .mode = 0644, | 757 | .mode = 0644, |
746 | .proc_handler = &proc_doulongvec_minmax, | 758 | .proc_handler = &proc_dointvec_minmax, |
747 | .strategy = &sysctl_intvec, | 759 | .strategy = &sysctl_intvec, |
748 | .extra1 = &one, | 760 | .extra1 = &neg_one, |
749 | .extra2 = &sixty, | 761 | .extra2 = &sixty, |
750 | }, | 762 | }, |
751 | { | 763 | { |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4a23517169a6..06b17547f4e7 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
301 | return -EINVAL; | 301 | return -EINVAL; |
302 | 302 | ||
303 | if (isadd == REGISTER) { | 303 | if (isadd == REGISTER) { |
304 | for_each_cpu_mask(cpu, mask) { | 304 | for_each_cpu_mask_nr(cpu, mask) { |
305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, | 305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, |
306 | cpu_to_node(cpu)); | 306 | cpu_to_node(cpu)); |
307 | if (!s) | 307 | if (!s) |
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
320 | 320 | ||
321 | /* Deregister or cleanup */ | 321 | /* Deregister or cleanup */ |
322 | cleanup: | 322 | cleanup: |
323 | for_each_cpu_mask(cpu, mask) { | 323 | for_each_cpu_mask_nr(cpu, mask) { |
324 | listeners = &per_cpu(listener_array, cpu); | 324 | listeners = &per_cpu(listener_array, cpu); |
325 | down_write(&listeners->sem); | 325 | down_write(&listeners->sem); |
326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { | 326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index b1c2da81b050..093d4acf993b 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data) | |||
145 | * Cycle through CPUs to check if the CPUs stay | 145 | * Cycle through CPUs to check if the CPUs stay |
146 | * synchronized to each other. | 146 | * synchronized to each other. |
147 | */ | 147 | */ |
148 | int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); | 148 | int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); |
149 | 149 | ||
150 | if (next_cpu >= NR_CPUS) | 150 | if (next_cpu >= nr_cpu_ids) |
151 | next_cpu = first_cpu(cpu_online_map); | 151 | next_cpu = first_cpu(cpu_online_map); |
152 | watchdog_timer.expires += WATCHDOG_INTERVAL; | 152 | watchdog_timer.expires += WATCHDOG_INTERVAL; |
153 | add_timer_on(&watchdog_timer, next_cpu); | 153 | add_timer_on(&watchdog_timer, next_cpu); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f48d0f09d32f..31463d370b94 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -399,8 +399,7 @@ again: | |||
399 | mask = CPU_MASK_NONE; | 399 | mask = CPU_MASK_NONE; |
400 | now = ktime_get(); | 400 | now = ktime_get(); |
401 | /* Find all expired events */ | 401 | /* Find all expired events */ |
402 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | 402 | for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { |
403 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
404 | td = &per_cpu(tick_cpu_device, cpu); | 403 | td = &per_cpu(tick_cpu_device, cpu); |
405 | if (td->evtdev->next_event.tv64 <= now.tv64) | 404 | if (td->evtdev->next_event.tv64 <= now.tv64) |
406 | cpu_set(cpu, mask); | 405 | cpu_set(cpu, mask); |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 4f3886562b8c..bf43284d6855 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
135 | */ | 135 | */ |
136 | static void tick_setup_device(struct tick_device *td, | 136 | static void tick_setup_device(struct tick_device *td, |
137 | struct clock_event_device *newdev, int cpu, | 137 | struct clock_event_device *newdev, int cpu, |
138 | cpumask_t cpumask) | 138 | const cpumask_t *cpumask) |
139 | { | 139 | { |
140 | ktime_t next_event; | 140 | ktime_t next_event; |
141 | void (*handler)(struct clock_event_device *) = NULL; | 141 | void (*handler)(struct clock_event_device *) = NULL; |
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td, | |||
169 | * When the device is not per cpu, pin the interrupt to the | 169 | * When the device is not per cpu, pin the interrupt to the |
170 | * current cpu: | 170 | * current cpu: |
171 | */ | 171 | */ |
172 | if (!cpus_equal(newdev->cpumask, cpumask)) | 172 | if (!cpus_equal(newdev->cpumask, *cpumask)) |
173 | irq_set_affinity(newdev->irq, cpumask); | 173 | irq_set_affinity(newdev->irq, *cpumask); |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * When global broadcasting is active, check if the current | 176 | * When global broadcasting is active, check if the current |
@@ -196,20 +196,20 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
196 | struct tick_device *td; | 196 | struct tick_device *td; |
197 | int cpu, ret = NOTIFY_OK; | 197 | int cpu, ret = NOTIFY_OK; |
198 | unsigned long flags; | 198 | unsigned long flags; |
199 | cpumask_t cpumask; | 199 | cpumask_of_cpu_ptr_declare(cpumask); |
200 | 200 | ||
201 | spin_lock_irqsave(&tick_device_lock, flags); | 201 | spin_lock_irqsave(&tick_device_lock, flags); |
202 | 202 | ||
203 | cpu = smp_processor_id(); | 203 | cpu = smp_processor_id(); |
204 | cpumask_of_cpu_ptr_next(cpumask, cpu); | ||
204 | if (!cpu_isset(cpu, newdev->cpumask)) | 205 | if (!cpu_isset(cpu, newdev->cpumask)) |
205 | goto out_bc; | 206 | goto out_bc; |
206 | 207 | ||
207 | td = &per_cpu(tick_cpu_device, cpu); | 208 | td = &per_cpu(tick_cpu_device, cpu); |
208 | curdev = td->evtdev; | 209 | curdev = td->evtdev; |
209 | cpumask = cpumask_of_cpu(cpu); | ||
210 | 210 | ||
211 | /* cpu local device ? */ | 211 | /* cpu local device ? */ |
212 | if (!cpus_equal(newdev->cpumask, cpumask)) { | 212 | if (!cpus_equal(newdev->cpumask, *cpumask)) { |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * If the cpu affinity of the device interrupt can not | 215 | * If the cpu affinity of the device interrupt can not |
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
222 | * If we have a cpu local device already, do not replace it | 222 | * If we have a cpu local device already, do not replace it |
223 | * by a non cpu local device | 223 | * by a non cpu local device |
224 | */ | 224 | */ |
225 | if (curdev && cpus_equal(curdev->cpumask, cpumask)) | 225 | if (curdev && cpus_equal(curdev->cpumask, *cpumask)) |
226 | goto out_bc; | 226 | goto out_bc; |
227 | } | 227 | } |
228 | 228 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index beef7ccdf842..942fc7c85283 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -140,8 +140,6 @@ void tick_nohz_update_jiffies(void) | |||
140 | if (!ts->tick_stopped) | 140 | if (!ts->tick_stopped) |
141 | return; | 141 | return; |
142 | 142 | ||
143 | touch_softlockup_watchdog(); | ||
144 | |||
145 | cpu_clear(cpu, nohz_cpu_mask); | 143 | cpu_clear(cpu, nohz_cpu_mask); |
146 | now = ktime_get(); | 144 | now = ktime_get(); |
147 | ts->idle_waketime = now; | 145 | ts->idle_waketime = now; |
@@ -149,6 +147,8 @@ void tick_nohz_update_jiffies(void) | |||
149 | local_irq_save(flags); | 147 | local_irq_save(flags); |
150 | tick_do_update_jiffies64(now); | 148 | tick_do_update_jiffies64(now); |
151 | local_irq_restore(flags); | 149 | local_irq_restore(flags); |
150 | |||
151 | touch_softlockup_watchdog(); | ||
152 | } | 152 | } |
153 | 153 | ||
154 | void tick_nohz_stop_idle(int cpu) | 154 | void tick_nohz_stop_idle(int cpu) |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 2301e1e7c606..63528086337c 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -213,7 +213,9 @@ static void start_stack_timers(void) | |||
213 | int cpu; | 213 | int cpu; |
214 | 214 | ||
215 | for_each_online_cpu(cpu) { | 215 | for_each_online_cpu(cpu) { |
216 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 216 | cpumask_of_cpu_ptr(new_mask, cpu); |
217 | |||
218 | set_cpus_allowed_ptr(current, new_mask); | ||
217 | start_stack_timer(cpu); | 219 | start_stack_timer(cpu); |
218 | } | 220 | } |
219 | set_cpus_allowed_ptr(current, &saved_mask); | 221 | set_cpus_allowed_ptr(current, &saved_mask); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce7799540c91..a6d36346d10a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
397 | might_sleep(); | 397 | might_sleep(); |
398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); |
399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | 399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); |
400 | for_each_cpu_mask(cpu, *cpu_map) | 400 | for_each_cpu_mask_nr(cpu, *cpu_map) |
401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
402 | } | 402 | } |
403 | EXPORT_SYMBOL_GPL(flush_workqueue); | 403 | EXPORT_SYMBOL_GPL(flush_workqueue); |
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work) | |||
477 | wq = cwq->wq; | 477 | wq = cwq->wq; |
478 | cpu_map = wq_cpu_map(wq); | 478 | cpu_map = wq_cpu_map(wq); |
479 | 479 | ||
480 | for_each_cpu_mask(cpu, *cpu_map) | 480 | for_each_cpu_mask_nr(cpu, *cpu_map) |
481 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 481 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
482 | } | 482 | } |
483 | 483 | ||
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
813 | list_del(&wq->list); | 813 | list_del(&wq->list); |
814 | spin_unlock(&workqueue_lock); | 814 | spin_unlock(&workqueue_lock); |
815 | 815 | ||
816 | for_each_cpu_mask(cpu, *cpu_map) | 816 | for_each_cpu_mask_nr(cpu, *cpu_map) |
817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
818 | put_online_cpus(); | 818 | put_online_cpus(); |
819 | 819 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ba106db5a65b..882c51048993 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -150,7 +150,7 @@ config DETECT_SOFTLOCKUP | |||
150 | help | 150 | help |
151 | Say Y here to enable the kernel to detect "soft lockups", | 151 | Say Y here to enable the kernel to detect "soft lockups", |
152 | which are bugs that cause the kernel to loop in kernel | 152 | which are bugs that cause the kernel to loop in kernel |
153 | mode for more than 10 seconds, without giving other tasks a | 153 | mode for more than 60 seconds, without giving other tasks a |
154 | chance to run. | 154 | chance to run. |
155 | 155 | ||
156 | When a soft-lockup is detected, the kernel will print the | 156 | When a soft-lockup is detected, the kernel will print the |
@@ -162,6 +162,30 @@ config DETECT_SOFTLOCKUP | |||
162 | can be detected via the NMI-watchdog, on platforms that | 162 | can be detected via the NMI-watchdog, on platforms that |
163 | support it.) | 163 | support it.) |
164 | 164 | ||
165 | config BOOTPARAM_SOFTLOCKUP_PANIC | ||
166 | bool "Panic (Reboot) On Soft Lockups" | ||
167 | depends on DETECT_SOFTLOCKUP | ||
168 | help | ||
169 | Say Y here to enable the kernel to panic on "soft lockups", | ||
170 | which are bugs that cause the kernel to loop in kernel | ||
171 | mode for more than 60 seconds, without giving other tasks a | ||
172 | chance to run. | ||
173 | |||
174 | The panic can be used in combination with panic_timeout, | ||
175 | to cause the system to reboot automatically after a | ||
176 | lockup has been detected. This feature is useful for | ||
177 | high-availability systems that have uptime guarantees and | ||
178 | where a lockup must be resolved ASAP. | ||
179 | |||
180 | Say N if unsure. | ||
181 | |||
182 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | ||
183 | int | ||
184 | depends on DETECT_SOFTLOCKUP | ||
185 | range 0 1 | ||
186 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | ||
187 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | ||
188 | |||
165 | config SCHED_DEBUG | 189 | config SCHED_DEBUG |
166 | bool "Collect scheduler debugging info" | 190 | bool "Collect scheduler debugging info" |
167 | depends on DEBUG_KERNEL && PROC_FS | 191 | depends on DEBUG_KERNEL && PROC_FS |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index a5d4b1dac2a5..2cfd2721f7ed 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
@@ -1,7 +1,4 @@ | |||
1 | 1 | ||
2 | config HAVE_ARCH_KGDB_SHADOW_INFO | ||
3 | bool | ||
4 | |||
5 | config HAVE_ARCH_KGDB | 2 | config HAVE_ARCH_KGDB |
6 | bool | 3 | bool |
7 | 4 | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e7..5f97dc25ef9c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) | |||
15 | } | 15 | } |
16 | EXPORT_SYMBOL(__next_cpu); | 16 | EXPORT_SYMBOL(__next_cpu); |
17 | 17 | ||
18 | #if NR_CPUS > 64 | ||
19 | int __next_cpu_nr(int n, const cpumask_t *srcp) | ||
20 | { | ||
21 | return min_t(int, nr_cpu_ids, | ||
22 | find_next_bit(srcp->bits, nr_cpu_ids, n+1)); | ||
23 | } | ||
24 | EXPORT_SYMBOL(__next_cpu_nr); | ||
25 | #endif | ||
26 | |||
18 | int __any_online_cpu(const cpumask_t *mask) | 27 | int __any_online_cpu(const cpumask_t *mask) |
19 | { | 28 | { |
20 | int cpu; | 29 | int cpu; |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index b80c21100d78..876ba6d5b670 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
295 | EXPORT_SYMBOL(sg_alloc_table); | 295 | EXPORT_SYMBOL(sg_alloc_table); |
296 | 296 | ||
297 | /** | 297 | /** |
298 | * sg_miter_start - start mapping iteration over a sg list | ||
299 | * @miter: sg mapping iter to be started | ||
300 | * @sgl: sg list to iterate over | ||
301 | * @nents: number of sg entries | ||
302 | * | ||
303 | * Description: | ||
304 | * Starts mapping iterator @miter. | ||
305 | * | ||
306 | * Context: | ||
307 | * Don't care. | ||
308 | */ | ||
309 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | ||
310 | unsigned int nents, unsigned int flags) | ||
311 | { | ||
312 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | ||
313 | |||
314 | miter->__sg = sgl; | ||
315 | miter->__nents = nents; | ||
316 | miter->__offset = 0; | ||
317 | miter->__flags = flags; | ||
318 | } | ||
319 | EXPORT_SYMBOL(sg_miter_start); | ||
320 | |||
321 | /** | ||
322 | * sg_miter_next - proceed mapping iterator to the next mapping | ||
323 | * @miter: sg mapping iter to proceed | ||
324 | * | ||
325 | * Description: | ||
326 | * Proceeds @miter@ to the next mapping. @miter@ should have been | ||
327 | * started using sg_miter_start(). On successful return, | ||
328 | * @miter@->page, @miter@->addr and @miter@->length point to the | ||
329 | * current mapping. | ||
330 | * | ||
331 | * Context: | ||
332 | * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till | ||
333 | * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. | ||
334 | * | ||
335 | * Returns: | ||
336 | * true if @miter contains the next mapping. false if end of sg | ||
337 | * list is reached. | ||
338 | */ | ||
339 | bool sg_miter_next(struct sg_mapping_iter *miter) | ||
340 | { | ||
341 | unsigned int off, len; | ||
342 | |||
343 | /* check for end and drop resources from the last iteration */ | ||
344 | if (!miter->__nents) | ||
345 | return false; | ||
346 | |||
347 | sg_miter_stop(miter); | ||
348 | |||
349 | /* get to the next sg if necessary. __offset is adjusted by stop */ | ||
350 | if (miter->__offset == miter->__sg->length && --miter->__nents) { | ||
351 | miter->__sg = sg_next(miter->__sg); | ||
352 | miter->__offset = 0; | ||
353 | } | ||
354 | |||
355 | /* map the next page */ | ||
356 | off = miter->__sg->offset + miter->__offset; | ||
357 | len = miter->__sg->length - miter->__offset; | ||
358 | |||
359 | miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); | ||
360 | off &= ~PAGE_MASK; | ||
361 | miter->length = min_t(unsigned int, len, PAGE_SIZE - off); | ||
362 | miter->consumed = miter->length; | ||
363 | |||
364 | if (miter->__flags & SG_MITER_ATOMIC) | ||
365 | miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; | ||
366 | else | ||
367 | miter->addr = kmap(miter->page) + off; | ||
368 | |||
369 | return true; | ||
370 | } | ||
371 | EXPORT_SYMBOL(sg_miter_next); | ||
372 | |||
373 | /** | ||
374 | * sg_miter_stop - stop mapping iteration | ||
375 | * @miter: sg mapping iter to be stopped | ||
376 | * | ||
377 | * Description: | ||
378 | * Stops mapping iterator @miter. @miter should have been started | ||
379 | * started using sg_miter_start(). A stopped iteration can be | ||
380 | * resumed by calling sg_miter_next() on it. This is useful when | ||
381 | * resources (kmap) need to be released during iteration. | ||
382 | * | ||
383 | * Context: | ||
384 | * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. | ||
385 | */ | ||
386 | void sg_miter_stop(struct sg_mapping_iter *miter) | ||
387 | { | ||
388 | WARN_ON(miter->consumed > miter->length); | ||
389 | |||
390 | /* drop resources from the last iteration */ | ||
391 | if (miter->addr) { | ||
392 | miter->__offset += miter->consumed; | ||
393 | |||
394 | if (miter->__flags & SG_MITER_ATOMIC) { | ||
395 | WARN_ON(!irqs_disabled()); | ||
396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); | ||
397 | } else | ||
398 | kunmap(miter->addr); | ||
399 | |||
400 | miter->page = NULL; | ||
401 | miter->addr = NULL; | ||
402 | miter->length = 0; | ||
403 | miter->consumed = 0; | ||
404 | } | ||
405 | } | ||
406 | EXPORT_SYMBOL(sg_miter_stop); | ||
407 | |||
408 | /** | ||
298 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | 409 | * sg_copy_buffer - Copy data between a linear buffer and an SG list |
299 | * @sgl: The SG list | 410 | * @sgl: The SG list |
300 | * @nents: Number of SG entries | 411 | * @nents: Number of SG entries |
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table); | |||
309 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | 420 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, |
310 | void *buf, size_t buflen, int to_buffer) | 421 | void *buf, size_t buflen, int to_buffer) |
311 | { | 422 | { |
312 | struct scatterlist *sg; | 423 | unsigned int offset = 0; |
313 | size_t buf_off = 0; | 424 | struct sg_mapping_iter miter; |
314 | int i; | 425 | |
315 | 426 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); | |
316 | WARN_ON(!irqs_disabled()); | 427 | |
317 | 428 | while (sg_miter_next(&miter) && offset < buflen) { | |
318 | for_each_sg(sgl, sg, nents, i) { | 429 | unsigned int len; |
319 | struct page *page; | 430 | |
320 | int n = 0; | 431 | len = min(miter.length, buflen - offset); |
321 | unsigned int sg_off = sg->offset; | 432 | |
322 | unsigned int sg_copy = sg->length; | 433 | if (to_buffer) |
323 | 434 | memcpy(buf + offset, miter.addr, len); | |
324 | if (sg_copy > buflen) | 435 | else { |
325 | sg_copy = buflen; | 436 | memcpy(miter.addr, buf + offset, len); |
326 | buflen -= sg_copy; | 437 | flush_kernel_dcache_page(miter.page); |
327 | |||
328 | while (sg_copy > 0) { | ||
329 | unsigned int page_copy; | ||
330 | void *p; | ||
331 | |||
332 | page_copy = PAGE_SIZE - sg_off; | ||
333 | if (page_copy > sg_copy) | ||
334 | page_copy = sg_copy; | ||
335 | |||
336 | page = nth_page(sg_page(sg), n); | ||
337 | p = kmap_atomic(page, KM_BIO_SRC_IRQ); | ||
338 | |||
339 | if (to_buffer) | ||
340 | memcpy(buf + buf_off, p + sg_off, page_copy); | ||
341 | else { | ||
342 | memcpy(p + sg_off, buf + buf_off, page_copy); | ||
343 | flush_kernel_dcache_page(page); | ||
344 | } | ||
345 | |||
346 | kunmap_atomic(p, KM_BIO_SRC_IRQ); | ||
347 | |||
348 | buf_off += page_copy; | ||
349 | sg_off += page_copy; | ||
350 | if (sg_off == PAGE_SIZE) { | ||
351 | sg_off = 0; | ||
352 | n++; | ||
353 | } | ||
354 | sg_copy -= page_copy; | ||
355 | } | 438 | } |
356 | 439 | ||
357 | if (!buflen) | 440 | offset += len; |
358 | break; | ||
359 | } | 441 | } |
360 | 442 | ||
361 | return buf_off; | 443 | sg_miter_stop(&miter); |
444 | |||
445 | return offset; | ||
362 | } | 446 | } |
363 | 447 | ||
364 | /** | 448 | /** |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 3b4dc098181e..c4381d9516f6 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
11 | { | 11 | { |
12 | unsigned long preempt_count = preempt_count(); | 12 | unsigned long preempt_count = preempt_count(); |
13 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
14 | cpumask_t this_mask; | 14 | cpumask_of_cpu_ptr_declare(this_mask); |
15 | 15 | ||
16 | if (likely(preempt_count)) | 16 | if (likely(preempt_count)) |
17 | goto out; | 17 | goto out; |
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void) | |||
23 | * Kernel threads bound to a single CPU can safely use | 23 | * Kernel threads bound to a single CPU can safely use |
24 | * smp_processor_id(): | 24 | * smp_processor_id(): |
25 | */ | 25 | */ |
26 | this_mask = cpumask_of_cpu(this_cpu); | 26 | cpumask_of_cpu_ptr_next(this_mask, this_cpu); |
27 | 27 | ||
28 | if (cpus_equal(current->cpus_allowed, this_mask)) | 28 | if (cpus_equal(current->cpus_allowed, *this_mask)) |
29 | goto out; | 29 | goto out; |
30 | 30 | ||
31 | /* | 31 | /* |
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 05f2b4009ccc..843364594e23 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate); | |||
35 | void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | 35 | void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) |
36 | { | 36 | { |
37 | int cpu; | 37 | int cpu; |
38 | for_each_cpu_mask(cpu, *mask) | 38 | for_each_cpu_mask_nr(cpu, *mask) |
39 | percpu_depopulate(__pdata, cpu); | 39 | percpu_depopulate(__pdata, cpu); |
40 | } | 40 | } |
41 | EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); | 41 | EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); |
@@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |||
86 | int cpu; | 86 | int cpu; |
87 | 87 | ||
88 | cpus_clear(populated); | 88 | cpus_clear(populated); |
89 | for_each_cpu_mask(cpu, *mask) | 89 | for_each_cpu_mask_nr(cpu, *mask) |
90 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { | 90 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { |
91 | __percpu_depopulate_mask(__pdata, &populated); | 91 | __percpu_depopulate_mask(__pdata, &populated); |
92 | return -ENOMEM; | 92 | return -ENOMEM; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index db9eabb2c5b3..c3d4a781802f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -26,7 +26,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) | |||
26 | 26 | ||
27 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 27 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
28 | 28 | ||
29 | for_each_cpu_mask(cpu, *cpumask) { | 29 | for_each_cpu_mask_nr(cpu, *cpumask) { |
30 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 30 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
31 | 31 | ||
32 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 32 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
diff --git a/net/core/dev.c b/net/core/dev.c index 6bf217da9d8f..7463a2150b09 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2398,7 +2398,7 @@ out: | |||
2398 | */ | 2398 | */ |
2399 | if (!cpus_empty(net_dma.channel_mask)) { | 2399 | if (!cpus_empty(net_dma.channel_mask)) { |
2400 | int chan_idx; | 2400 | int chan_idx; |
2401 | for_each_cpu_mask(chan_idx, net_dma.channel_mask) { | 2401 | for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { |
2402 | struct dma_chan *chan = net_dma.channels[chan_idx]; | 2402 | struct dma_chan *chan = net_dma.channels[chan_idx]; |
2403 | if (chan) | 2403 | if (chan) |
2404 | dma_async_memcpy_issue_pending(chan); | 2404 | dma_async_memcpy_issue_pending(chan); |
@@ -4533,7 +4533,7 @@ static void net_dma_rebalance(struct net_dma *net_dma) | |||
4533 | i = 0; | 4533 | i = 0; |
4534 | cpu = first_cpu(cpu_online_map); | 4534 | cpu = first_cpu(cpu_online_map); |
4535 | 4535 | ||
4536 | for_each_cpu_mask(chan_idx, net_dma->channel_mask) { | 4536 | for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { |
4537 | chan = net_dma->channels[chan_idx]; | 4537 | chan = net_dma->channels[chan_idx]; |
4538 | 4538 | ||
4539 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) | 4539 | n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) |
diff --git a/net/core/user_dma.c b/net/core/user_dma.c index c77aff9c6eb3..8c6b706963ff 100644 --- a/net/core/user_dma.c +++ b/net/core/user_dma.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #define NET_DMA_DEFAULT_COPYBREAK 4096 | 34 | #define NET_DMA_DEFAULT_COPYBREAK 4096 |
35 | 35 | ||
36 | int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK; | 36 | int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK; |
37 | EXPORT_SYMBOL(sysctl_tcp_dma_copybreak); | ||
37 | 38 | ||
38 | /** | 39 | /** |
39 | * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec. | 40 | * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec. |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 265b1b289a32..705959b31e24 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void) | |||
497 | /* Disable all cpu but the first in cpu_irq_cpumask. */ | 497 | /* Disable all cpu but the first in cpu_irq_cpumask. */ |
498 | cpumask = iucv_irq_cpumask; | 498 | cpumask = iucv_irq_cpumask; |
499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); | 499 | cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); |
500 | for_each_cpu_mask(cpu, cpumask) | 500 | for_each_cpu_mask_nr(cpu, cpumask) |
501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); | 501 | smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); |
502 | } | 502 | } |
503 | 503 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 5a32cb7c4bb4..835d27413083 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -310,7 +310,8 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) | |||
310 | switch (m->mode) { | 310 | switch (m->mode) { |
311 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
312 | { | 312 | { |
313 | set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); | 313 | cpumask_of_cpu_ptr(cpumask, node); |
314 | set_cpus_allowed_ptr(task, cpumask); | ||
314 | break; | 315 | break; |
315 | } | 316 | } |
316 | case SVC_POOL_PERNODE: | 317 | case SVC_POOL_PERNODE: |
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index 12f6ac99b04c..9212c37a33b8 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig | |||
@@ -48,6 +48,7 @@ config SND_PXA2XX_SOC_POODLE | |||
48 | config SND_PXA2XX_SOC_TOSA | 48 | config SND_PXA2XX_SOC_TOSA |
49 | tristate "SoC AC97 Audio support for Tosa" | 49 | tristate "SoC AC97 Audio support for Tosa" |
50 | depends on SND_PXA2XX_SOC && MACH_TOSA | 50 | depends on SND_PXA2XX_SOC && MACH_TOSA |
51 | depends on MFD_TC6393XB | ||
51 | select SND_PXA2XX_SOC_AC97 | 52 | select SND_PXA2XX_SOC_AC97 |
52 | select SND_SOC_WM9712 | 53 | select SND_SOC_WM9712 |
53 | help | 54 | help |
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index b6edb61a3a30..fe6cca9c9e76 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/moduleparam.h> | 22 | #include <linux/moduleparam.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/gpio.h> | ||
24 | 25 | ||
25 | #include <sound/core.h> | 26 | #include <sound/core.h> |
26 | #include <sound/pcm.h> | 27 | #include <sound/pcm.h> |
@@ -28,7 +29,7 @@ | |||
28 | #include <sound/soc-dapm.h> | 29 | #include <sound/soc-dapm.h> |
29 | 30 | ||
30 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
31 | #include <asm/hardware/tmio.h> | 32 | #include <asm/arch/tosa.h> |
32 | #include <asm/arch/pxa-regs.h> | 33 | #include <asm/arch/pxa-regs.h> |
33 | #include <asm/arch/hardware.h> | 34 | #include <asm/arch/hardware.h> |
34 | #include <asm/arch/audio.h> | 35 | #include <asm/arch/audio.h> |
@@ -137,10 +138,7 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol, | |||
137 | static int tosa_hp_event(struct snd_soc_dapm_widget *w, | 138 | static int tosa_hp_event(struct snd_soc_dapm_widget *w, |
138 | struct snd_kcontrol *k, int event) | 139 | struct snd_kcontrol *k, int event) |
139 | { | 140 | { |
140 | if (SND_SOC_DAPM_EVENT_ON(event)) | 141 | gpio_set_value(TOSA_GPIO_L_MUTE, SND_SOC_DAPM_EVENT_ON(event) ? 1 :0); |
141 | set_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE); | ||
142 | else | ||
143 | reset_tc6393_gpio(&tc6393_device.dev,TOSA_TC6393_L_MUTE); | ||
144 | return 0; | 142 | return 0; |
145 | } | 143 | } |
146 | 144 | ||
@@ -254,16 +252,28 @@ static int __init tosa_init(void) | |||
254 | if (!machine_is_tosa()) | 252 | if (!machine_is_tosa()) |
255 | return -ENODEV; | 253 | return -ENODEV; |
256 | 254 | ||
255 | ret = gpio_request(TOSA_GPIO_L_MUTE, "Headphone Jack"); | ||
256 | if (ret) | ||
257 | return ret; | ||
258 | gpio_direction_output(TOSA_GPIO_L_MUTE, 0); | ||
259 | |||
257 | tosa_snd_device = platform_device_alloc("soc-audio", -1); | 260 | tosa_snd_device = platform_device_alloc("soc-audio", -1); |
258 | if (!tosa_snd_device) | 261 | if (!tosa_snd_device) { |
259 | return -ENOMEM; | 262 | ret = -ENOMEM; |
263 | goto err_alloc; | ||
264 | } | ||
260 | 265 | ||
261 | platform_set_drvdata(tosa_snd_device, &tosa_snd_devdata); | 266 | platform_set_drvdata(tosa_snd_device, &tosa_snd_devdata); |
262 | tosa_snd_devdata.dev = &tosa_snd_device->dev; | 267 | tosa_snd_devdata.dev = &tosa_snd_device->dev; |
263 | ret = platform_device_add(tosa_snd_device); | 268 | ret = platform_device_add(tosa_snd_device); |
264 | 269 | ||
265 | if (ret) | 270 | if (!ret) |
266 | platform_device_put(tosa_snd_device); | 271 | return 0; |
272 | |||
273 | platform_device_put(tosa_snd_device); | ||
274 | |||
275 | err_alloc: | ||
276 | gpio_free(TOSA_GPIO_L_MUTE); | ||
267 | 277 | ||
268 | return ret; | 278 | return ret; |
269 | } | 279 | } |
@@ -271,6 +281,7 @@ static int __init tosa_init(void) | |||
271 | static void __exit tosa_exit(void) | 281 | static void __exit tosa_exit(void) |
272 | { | 282 | { |
273 | platform_device_unregister(tosa_snd_device); | 283 | platform_device_unregister(tosa_snd_device); |
284 | gpio_free(TOSA_GPIO_L_MUTE); | ||
274 | } | 285 | } |
275 | 286 | ||
276 | module_init(tosa_init); | 287 | module_init(tosa_init); |