diff options
251 files changed, 2613 insertions, 1934 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 6a451f47d40f..c3b1430cf603 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -304,3 +304,15 @@ Why: The code says it was obsolete when it was written in 2001. | |||
304 | Who: Richard Purdie <rpurdie@rpsys.net> | 304 | Who: Richard Purdie <rpurdie@rpsys.net> |
305 | 305 | ||
306 | --------------------------- | 306 | --------------------------- |
307 | |||
308 | What: Wireless extensions over netlink (CONFIG_NET_WIRELESS_RTNETLINK) | ||
309 | When: with the merge of wireless-dev, 2.6.22 or later | ||
310 | Why: The option/code is | ||
311 | * not enabled on most kernels | ||
312 | * not required by any userspace tools (except an experimental one, | ||
313 | and even there only for some parts, others use ioctl) | ||
314 | * pointless since wext is no longer evolving and the ioctl | ||
315 | interface needs to be kept | ||
316 | Who: Johannes Berg <johannes@sipsolutions.net> | ||
317 | |||
318 | --------------------------- | ||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 72af5de1effb..5484ab5efd4f 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -41,6 +41,7 @@ Table of Contents | |||
41 | 2.11 /proc/sys/fs/mqueue - POSIX message queues filesystem | 41 | 2.11 /proc/sys/fs/mqueue - POSIX message queues filesystem |
42 | 2.12 /proc/<pid>/oom_adj - Adjust the oom-killer score | 42 | 2.12 /proc/<pid>/oom_adj - Adjust the oom-killer score |
43 | 2.13 /proc/<pid>/oom_score - Display current oom-killer score | 43 | 2.13 /proc/<pid>/oom_score - Display current oom-killer score |
44 | 2.14 /proc/<pid>/io - Display the IO accounting fields | ||
44 | 45 | ||
45 | ------------------------------------------------------------------------------ | 46 | ------------------------------------------------------------------------------ |
46 | Preface | 47 | Preface |
@@ -1990,3 +1991,107 @@ need to recompile the kernel, or even to reboot the system. The files in the | |||
1990 | command to write value into these files, thereby changing the default settings | 1991 | command to write value into these files, thereby changing the default settings |
1991 | of the kernel. | 1992 | of the kernel. |
1992 | ------------------------------------------------------------------------------ | 1993 | ------------------------------------------------------------------------------ |
1994 | |||
1995 | 2.14 /proc/<pid>/io - Display the IO accounting fields | ||
1996 | ------------------------------------------------------- | ||
1997 | |||
1998 | This file contains IO statistics for each running process | ||
1999 | |||
2000 | Example | ||
2001 | ------- | ||
2002 | |||
2003 | test:/tmp # dd if=/dev/zero of=/tmp/test.dat & | ||
2004 | [1] 3828 | ||
2005 | |||
2006 | test:/tmp # cat /proc/3828/io | ||
2007 | rchar: 323934931 | ||
2008 | wchar: 323929600 | ||
2009 | syscr: 632687 | ||
2010 | syscw: 632675 | ||
2011 | read_bytes: 0 | ||
2012 | write_bytes: 323932160 | ||
2013 | cancelled_write_bytes: 0 | ||
2014 | |||
2015 | |||
2016 | Description | ||
2017 | ----------- | ||
2018 | |||
2019 | rchar | ||
2020 | ----- | ||
2021 | |||
2022 | I/O counter: chars read | ||
2023 | The number of bytes which this task has caused to be read from storage. This | ||
2024 | is simply the sum of bytes which this process passed to read() and pread(). | ||
2025 | It includes things like tty IO and it is unaffected by whether or not actual | ||
2026 | physical disk IO was required (the read might have been satisfied from | ||
2027 | pagecache) | ||
2028 | |||
2029 | |||
2030 | wchar | ||
2031 | ----- | ||
2032 | |||
2033 | I/O counter: chars written | ||
2034 | The number of bytes which this task has caused, or shall cause to be written | ||
2035 | to disk. Similar caveats apply here as with rchar. | ||
2036 | |||
2037 | |||
2038 | syscr | ||
2039 | ----- | ||
2040 | |||
2041 | I/O counter: read syscalls | ||
2042 | Attempt to count the number of read I/O operations, i.e. syscalls like read() | ||
2043 | and pread(). | ||
2044 | |||
2045 | |||
2046 | syscw | ||
2047 | ----- | ||
2048 | |||
2049 | I/O counter: write syscalls | ||
2050 | Attempt to count the number of write I/O operations, i.e. syscalls like | ||
2051 | write() and pwrite(). | ||
2052 | |||
2053 | |||
2054 | read_bytes | ||
2055 | ---------- | ||
2056 | |||
2057 | I/O counter: bytes read | ||
2058 | Attempt to count the number of bytes which this process really did cause to | ||
2059 | be fetched from the storage layer. Done at the submit_bio() level, so it is | ||
2060 | accurate for block-backed filesystems. <please add status regarding NFS and | ||
2061 | CIFS at a later time> | ||
2062 | |||
2063 | |||
2064 | write_bytes | ||
2065 | ----------- | ||
2066 | |||
2067 | I/O counter: bytes written | ||
2068 | Attempt to count the number of bytes which this process caused to be sent to | ||
2069 | the storage layer. This is done at page-dirtying time. | ||
2070 | |||
2071 | |||
2072 | cancelled_write_bytes | ||
2073 | --------------------- | ||
2074 | |||
2075 | The big inaccuracy here is truncate. If a process writes 1MB to a file and | ||
2076 | then deletes the file, it will in fact perform no writeout. But it will have | ||
2077 | been accounted as having caused 1MB of write. | ||
2078 | In other words: The number of bytes which this process caused to not happen, | ||
2079 | by truncating pagecache. A task can cause "negative" IO too. If this task | ||
2080 | truncates some dirty pagecache, some IO which another task has been accounted | ||
2081 | for (in it's write_bytes) will not be happening. We _could_ just subtract that | ||
2082 | from the truncating task's write_bytes, but there is information loss in doing | ||
2083 | that. | ||
2084 | |||
2085 | |||
2086 | Note | ||
2087 | ---- | ||
2088 | |||
2089 | At its current implementation state, this is a bit racy on 32-bit machines: if | ||
2090 | process A reads process B's /proc/pid/io while process B is updating one of | ||
2091 | those 64-bit counters, process A could see an intermediate result. | ||
2092 | |||
2093 | |||
2094 | More information about this can be found within the taskstats documentation in | ||
2095 | Documentation/accounting. | ||
2096 | |||
2097 | ------------------------------------------------------------------------------ | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 757dd994b879..914119309ddb 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1686,6 +1686,22 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1686 | stifb= [HW] | 1686 | stifb= [HW] |
1687 | Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]] | 1687 | Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]] |
1688 | 1688 | ||
1689 | sunrpc.pool_mode= | ||
1690 | [NFS] | ||
1691 | Control how the NFS server code allocates CPUs to | ||
1692 | service thread pools. Depending on how many NICs | ||
1693 | you have and where their interrupts are bound, this | ||
1694 | option will affect which CPUs will do NFS serving. | ||
1695 | Note: this parameter cannot be changed while the | ||
1696 | NFS server is running. | ||
1697 | |||
1698 | auto the server chooses an appropriate mode | ||
1699 | automatically using heuristics | ||
1700 | global a single global pool contains all CPUs | ||
1701 | percpu one pool for each CPU | ||
1702 | pernode one pool for each NUMA node (equivalent | ||
1703 | to global on non-NUMA machines) | ||
1704 | |||
1689 | swiotlb= [IA-64] Number of I/O TLB slabs | 1705 | swiotlb= [IA-64] Number of I/O TLB slabs |
1690 | 1706 | ||
1691 | switches= [HW,M68k] | 1707 | switches= [HW,M68k] |
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index c30ff1bb2d10..db398a6441c1 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt | |||
@@ -370,7 +370,9 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. | |||
370 | mpu_port - 0x300,0x310,0x320,0x330 = legacy port, | 370 | mpu_port - 0x300,0x310,0x320,0x330 = legacy port, |
371 | 1 = integrated PCI port, | 371 | 1 = integrated PCI port, |
372 | 0 = disable (default) | 372 | 0 = disable (default) |
373 | fm_port - 0x388 (default), 0 = disable (default) | 373 | fm_port - 0x388 = legacy port, |
374 | 1 = integrated PCI port (default), | ||
375 | 0 = disable | ||
374 | soft_ac3 - Software-conversion of raw SPDIF packets (model 033 only) | 376 | soft_ac3 - Software-conversion of raw SPDIF packets (model 033 only) |
375 | (default = 1) | 377 | (default = 1) |
376 | joystick_port - Joystick port address (0 = disable, 1 = auto-detect) | 378 | joystick_port - Joystick port address (0 = disable, 1 = auto-detect) |
@@ -895,10 +897,16 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. | |||
895 | can be adjusted. Appearing only when compiled with | 897 | can be adjusted. Appearing only when compiled with |
896 | $CONFIG_SND_DEBUG=y | 898 | $CONFIG_SND_DEBUG=y |
897 | 899 | ||
898 | STAC9200/9205/9220/9221/9254 | 900 | STAC9200/9205/9254 |
901 | ref Reference board | ||
902 | |||
903 | STAC9220/9221 | ||
899 | ref Reference board | 904 | ref Reference board |
900 | 3stack D945 3stack | 905 | 3stack D945 3stack |
901 | 5stack D945 5stack + SPDIF | 906 | 5stack D945 5stack + SPDIF |
907 | macmini Intel Mac Mini | ||
908 | macbook Intel Mac Book | ||
909 | macbook-pro Intel Mac Book Pro | ||
902 | 910 | ||
903 | STAC9202/9250/9251 | 911 | STAC9202/9250/9251 |
904 | ref Reference board, base config | 912 | ref Reference board, base config |
diff --git a/MAINTAINERS b/MAINTAINERS index 1dfba85ca7b5..9993b9009415 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2553,16 +2553,8 @@ L: i2c@lm-sensors.org | |||
2553 | S: Maintained | 2553 | S: Maintained |
2554 | 2554 | ||
2555 | PARALLEL PORT SUPPORT | 2555 | PARALLEL PORT SUPPORT |
2556 | P: Phil Blundell | ||
2557 | M: philb@gnu.org | ||
2558 | P: Tim Waugh | ||
2559 | M: tim@cyberelk.net | ||
2560 | P: David Campbell | ||
2561 | P: Andrea Arcangeli | ||
2562 | M: andrea@suse.de | ||
2563 | L: linux-parport@lists.infradead.org | 2556 | L: linux-parport@lists.infradead.org |
2564 | W: http://people.redhat.com/twaugh/parport/ | 2557 | S: Orphan |
2565 | S: Maintained | ||
2566 | 2558 | ||
2567 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES | 2559 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES |
2568 | P: Tim Waugh | 2560 | P: Tim Waugh |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 21 | 3 | SUBLEVEL = 21 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Homicidal Dwarf Hamster | 5 | NAME = Homicidal Dwarf Hamster |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ac2ffdcfbbb4..e7baca29f3fb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -21,6 +21,10 @@ config ARM | |||
21 | config SYS_SUPPORTS_APM_EMULATION | 21 | config SYS_SUPPORTS_APM_EMULATION |
22 | bool | 22 | bool |
23 | 23 | ||
24 | config GENERIC_GPIO | ||
25 | bool | ||
26 | default n | ||
27 | |||
24 | config GENERIC_TIME | 28 | config GENERIC_TIME |
25 | bool | 29 | bool |
26 | default n | 30 | default n |
@@ -163,6 +167,7 @@ config ARCH_VERSATILE | |||
163 | 167 | ||
164 | config ARCH_AT91 | 168 | config ARCH_AT91 |
165 | bool "Atmel AT91" | 169 | bool "Atmel AT91" |
170 | select GENERIC_GPIO | ||
166 | help | 171 | help |
167 | This enables support for systems based on the Atmel AT91RM9200 | 172 | This enables support for systems based on the Atmel AT91RM9200 |
168 | and AT91SAM9xxx processors. | 173 | and AT91SAM9xxx processors. |
@@ -304,6 +309,7 @@ config ARCH_PXA | |||
304 | bool "PXA2xx-based" | 309 | bool "PXA2xx-based" |
305 | depends on MMU | 310 | depends on MMU |
306 | select ARCH_MTD_XIP | 311 | select ARCH_MTD_XIP |
312 | select GENERIC_GPIO | ||
307 | select GENERIC_TIME | 313 | select GENERIC_TIME |
308 | help | 314 | help |
309 | Support for Intel's PXA2XX processor line. | 315 | Support for Intel's PXA2XX processor line. |
@@ -325,11 +331,13 @@ config ARCH_SA1100 | |||
325 | select ISA | 331 | select ISA |
326 | select ARCH_DISCONTIGMEM_ENABLE | 332 | select ARCH_DISCONTIGMEM_ENABLE |
327 | select ARCH_MTD_XIP | 333 | select ARCH_MTD_XIP |
334 | select GENERIC_GPIO | ||
328 | help | 335 | help |
329 | Support for StrongARM 11x0 based boards. | 336 | Support for StrongARM 11x0 based boards. |
330 | 337 | ||
331 | config ARCH_S3C2410 | 338 | config ARCH_S3C2410 |
332 | bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443" | 339 | bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443" |
340 | select GENERIC_GPIO | ||
333 | help | 341 | help |
334 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics | 342 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics |
335 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or | 343 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or |
@@ -354,6 +362,7 @@ config ARCH_LH7A40X | |||
354 | 362 | ||
355 | config ARCH_OMAP | 363 | config ARCH_OMAP |
356 | bool "TI OMAP" | 364 | bool "TI OMAP" |
365 | select GENERIC_GPIO | ||
357 | help | 366 | help |
358 | Support for TI's OMAP platform (OMAP1 and OMAP2). | 367 | Support for TI's OMAP platform (OMAP1 and OMAP2). |
359 | 368 | ||
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index bb059a4e1df9..ce4013aee59b 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig | |||
@@ -22,6 +22,10 @@ config AVR32 | |||
22 | config UID16 | 22 | config UID16 |
23 | bool | 23 | bool |
24 | 24 | ||
25 | config GENERIC_GPIO | ||
26 | bool | ||
27 | default y | ||
28 | |||
25 | config GENERIC_HARDIRQS | 29 | config GENERIC_HARDIRQS |
26 | bool | 30 | bool |
27 | default y | 31 | default y |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 2f7672545fe9..27e8453274e6 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -220,11 +220,11 @@ config PARAVIRT | |||
220 | 220 | ||
221 | config VMI | 221 | config VMI |
222 | bool "VMI Paravirt-ops support" | 222 | bool "VMI Paravirt-ops support" |
223 | depends on PARAVIRT && !NO_HZ | 223 | depends on PARAVIRT |
224 | default y | ||
225 | help | 224 | help |
226 | VMI provides a paravirtualized interface to multiple hypervisors | 225 | VMI provides a paravirtualized interface to the VMware ESX server |
227 | include VMware ESX server and Xen by connecting to a ROM module | 226 | (it could be used by other hypervisors in theory too, but is not |
227 | at the moment), by linking the kernel to a GPL-ed ROM module | ||
228 | provided by the hypervisor. | 228 | provided by the hypervisor. |
229 | 229 | ||
230 | config ACPI_SRAT | 230 | config ACPI_SRAT |
@@ -893,7 +893,6 @@ config HOTPLUG_CPU | |||
893 | config COMPAT_VDSO | 893 | config COMPAT_VDSO |
894 | bool "Compat VDSO support" | 894 | bool "Compat VDSO support" |
895 | default y | 895 | default y |
896 | depends on !PARAVIRT | ||
897 | help | 896 | help |
898 | Map the VDSO to the predictable old-style address too. | 897 | Map the VDSO to the predictable old-style address too. |
899 | ---help--- | 898 | ---help--- |
@@ -1287,12 +1286,3 @@ config X86_TRAMPOLINE | |||
1287 | config KTIME_SCALAR | 1286 | config KTIME_SCALAR |
1288 | bool | 1287 | bool |
1289 | default y | 1288 | default y |
1290 | |||
1291 | config NO_IDLE_HZ | ||
1292 | bool | ||
1293 | depends on PARAVIRT | ||
1294 | default y | ||
1295 | help | ||
1296 | Switches the regular HZ timer off when the system is going idle. | ||
1297 | This helps a hypervisor detect that the Linux system is idle, | ||
1298 | reducing the overhead of idle systems. | ||
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 7a2c9cbdb511..2383bcf18c5d 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -493,8 +493,15 @@ void __init setup_boot_APIC_clock(void) | |||
493 | /* No broadcast on UP ! */ | 493 | /* No broadcast on UP ! */ |
494 | if (num_possible_cpus() == 1) | 494 | if (num_possible_cpus() == 1) |
495 | return; | 495 | return; |
496 | } else | 496 | } else { |
497 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; | 497 | /* |
498 | * If nmi_watchdog is set to IO_APIC, we need the | ||
499 | * PIT/HPET going. Otherwise register lapic as a dummy | ||
500 | * device. | ||
501 | */ | ||
502 | if (nmi_watchdog != NMI_IO_APIC) | ||
503 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; | ||
504 | } | ||
498 | 505 | ||
499 | /* Setup the lapic or request the broadcast */ | 506 | /* Setup the lapic or request the broadcast */ |
500 | setup_APIC_timer(); | 507 | setup_APIC_timer(); |
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c index e1006b7acc9e..f3ab61ee7498 100644 --- a/arch/i386/kernel/hpet.c +++ b/arch/i386/kernel/hpet.c | |||
@@ -201,12 +201,30 @@ static int hpet_next_event(unsigned long delta, | |||
201 | } | 201 | } |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Clock source related code | ||
205 | */ | ||
206 | static cycle_t read_hpet(void) | ||
207 | { | ||
208 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
209 | } | ||
210 | |||
211 | static struct clocksource clocksource_hpet = { | ||
212 | .name = "hpet", | ||
213 | .rating = 250, | ||
214 | .read = read_hpet, | ||
215 | .mask = HPET_MASK, | ||
216 | .shift = HPET_SHIFT, | ||
217 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
218 | }; | ||
219 | |||
220 | /* | ||
204 | * Try to setup the HPET timer | 221 | * Try to setup the HPET timer |
205 | */ | 222 | */ |
206 | int __init hpet_enable(void) | 223 | int __init hpet_enable(void) |
207 | { | 224 | { |
208 | unsigned long id; | 225 | unsigned long id; |
209 | uint64_t hpet_freq; | 226 | uint64_t hpet_freq; |
227 | u64 tmp; | ||
210 | 228 | ||
211 | if (!is_hpet_capable()) | 229 | if (!is_hpet_capable()) |
212 | return 0; | 230 | return 0; |
@@ -253,6 +271,25 @@ int __init hpet_enable(void) | |||
253 | /* Start the counter */ | 271 | /* Start the counter */ |
254 | hpet_start_counter(); | 272 | hpet_start_counter(); |
255 | 273 | ||
274 | /* Initialize and register HPET clocksource | ||
275 | * | ||
276 | * hpet period is in femto seconds per cycle | ||
277 | * so we need to convert this to ns/cyc units | ||
278 | * aproximated by mult/2^shift | ||
279 | * | ||
280 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
281 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
282 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
283 | * (fsec/cyc << shift)/1000000 = mult | ||
284 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
285 | */ | ||
286 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
287 | do_div(tmp, FSEC_PER_NSEC); | ||
288 | clocksource_hpet.mult = (u32)tmp; | ||
289 | |||
290 | clocksource_register(&clocksource_hpet); | ||
291 | |||
292 | |||
256 | if (id & HPET_ID_LEGSUP) { | 293 | if (id & HPET_ID_LEGSUP) { |
257 | hpet_enable_int(); | 294 | hpet_enable_int(); |
258 | hpet_reserve_platform_timers(id); | 295 | hpet_reserve_platform_timers(id); |
@@ -273,49 +310,6 @@ out_nohpet: | |||
273 | return 0; | 310 | return 0; |
274 | } | 311 | } |
275 | 312 | ||
276 | /* | ||
277 | * Clock source related code | ||
278 | */ | ||
279 | static cycle_t read_hpet(void) | ||
280 | { | ||
281 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
282 | } | ||
283 | |||
284 | static struct clocksource clocksource_hpet = { | ||
285 | .name = "hpet", | ||
286 | .rating = 250, | ||
287 | .read = read_hpet, | ||
288 | .mask = HPET_MASK, | ||
289 | .shift = HPET_SHIFT, | ||
290 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
291 | }; | ||
292 | |||
293 | static int __init init_hpet_clocksource(void) | ||
294 | { | ||
295 | u64 tmp; | ||
296 | |||
297 | if (!hpet_virt_address) | ||
298 | return -ENODEV; | ||
299 | |||
300 | /* | ||
301 | * hpet period is in femto seconds per cycle | ||
302 | * so we need to convert this to ns/cyc units | ||
303 | * aproximated by mult/2^shift | ||
304 | * | ||
305 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
306 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
307 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
308 | * (fsec/cyc << shift)/1000000 = mult | ||
309 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
310 | */ | ||
311 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
312 | do_div(tmp, FSEC_PER_NSEC); | ||
313 | clocksource_hpet.mult = (u32)tmp; | ||
314 | |||
315 | return clocksource_register(&clocksource_hpet); | ||
316 | } | ||
317 | |||
318 | module_init(init_hpet_clocksource); | ||
319 | 313 | ||
320 | #ifdef CONFIG_HPET_EMULATE_RTC | 314 | #ifdef CONFIG_HPET_EMULATE_RTC |
321 | 315 | ||
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index a6bc7bb38834..5cbb776b3089 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c | |||
@@ -195,4 +195,4 @@ static int __init init_pit_clocksource(void) | |||
195 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); | 195 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); |
196 | return clocksource_register(&clocksource_pit); | 196 | return clocksource_register(&clocksource_pit); |
197 | } | 197 | } |
198 | module_init(init_pit_clocksource); | 198 | arch_initcall(init_pit_clocksource); |
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index c156ecfa3872..2ec331e03fa9 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/fixmap.h> | 32 | #include <asm/fixmap.h> |
33 | #include <asm/apic.h> | 33 | #include <asm/apic.h> |
34 | #include <asm/tlbflush.h> | 34 | #include <asm/tlbflush.h> |
35 | #include <asm/timer.h> | ||
35 | 36 | ||
36 | /* nop stub */ | 37 | /* nop stub */ |
37 | static void native_nop(void) | 38 | static void native_nop(void) |
@@ -493,7 +494,7 @@ struct paravirt_ops paravirt_ops = { | |||
493 | .memory_setup = machine_specific_memory_setup, | 494 | .memory_setup = machine_specific_memory_setup, |
494 | .get_wallclock = native_get_wallclock, | 495 | .get_wallclock = native_get_wallclock, |
495 | .set_wallclock = native_set_wallclock, | 496 | .set_wallclock = native_set_wallclock, |
496 | .time_init = time_init_hook, | 497 | .time_init = hpet_time_init, |
497 | .init_IRQ = native_init_IRQ, | 498 | .init_IRQ = native_init_IRQ, |
498 | 499 | ||
499 | .cpuid = native_cpuid, | 500 | .cpuid = native_cpuid, |
@@ -520,6 +521,8 @@ struct paravirt_ops paravirt_ops = { | |||
520 | .write_msr = native_write_msr, | 521 | .write_msr = native_write_msr, |
521 | .read_tsc = native_read_tsc, | 522 | .read_tsc = native_read_tsc, |
522 | .read_pmc = native_read_pmc, | 523 | .read_pmc = native_read_pmc, |
524 | .get_scheduled_cycles = native_read_tsc, | ||
525 | .get_cpu_khz = native_calculate_cpu_khz, | ||
523 | .load_tr_desc = native_load_tr_desc, | 526 | .load_tr_desc = native_load_tr_desc, |
524 | .set_ldt = native_set_ldt, | 527 | .set_ldt = native_set_ldt, |
525 | .load_gdt = native_load_gdt, | 528 | .load_gdt = native_load_gdt, |
@@ -535,7 +538,6 @@ struct paravirt_ops paravirt_ops = { | |||
535 | 538 | ||
536 | .set_iopl_mask = native_set_iopl_mask, | 539 | .set_iopl_mask = native_set_iopl_mask, |
537 | .io_delay = native_io_delay, | 540 | .io_delay = native_io_delay, |
538 | .const_udelay = __const_udelay, | ||
539 | 541 | ||
540 | #ifdef CONFIG_X86_LOCAL_APIC | 542 | #ifdef CONFIG_X86_LOCAL_APIC |
541 | .apic_write = native_apic_write, | 543 | .apic_write = native_apic_write, |
@@ -550,6 +552,8 @@ struct paravirt_ops paravirt_ops = { | |||
550 | .flush_tlb_kernel = native_flush_tlb_global, | 552 | .flush_tlb_kernel = native_flush_tlb_global, |
551 | .flush_tlb_single = native_flush_tlb_single, | 553 | .flush_tlb_single = native_flush_tlb_single, |
552 | 554 | ||
555 | .map_pt_hook = (void *)native_nop, | ||
556 | |||
553 | .alloc_pt = (void *)native_nop, | 557 | .alloc_pt = (void *)native_nop, |
554 | .alloc_pd = (void *)native_nop, | 558 | .alloc_pd = (void *)native_nop, |
555 | .alloc_pd_clone = (void *)native_nop, | 559 | .alloc_pd_clone = (void *)native_nop, |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 122623dcc6e1..698c24fe482e 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -657,5 +657,4 @@ void __init setup_arch(char **cmdline_p) | |||
657 | conswitchp = &dummy_con; | 657 | conswitchp = &dummy_con; |
658 | #endif | 658 | #endif |
659 | #endif | 659 | #endif |
660 | tsc_init(); | ||
661 | } | 660 | } |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 48bfcaa13ecc..4ff55e675576 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -33,11 +33,6 @@ | |||
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. | 33 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ | 34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ |
35 | 35 | ||
36 | |||
37 | /* SMP boot always wants to use real time delay to allow sufficient time for | ||
38 | * the APs to come online */ | ||
39 | #define USE_REAL_TIME_DELAY | ||
40 | |||
41 | #include <linux/module.h> | 36 | #include <linux/module.h> |
42 | #include <linux/init.h> | 37 | #include <linux/init.h> |
43 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
@@ -50,6 +45,7 @@ | |||
50 | #include <linux/notifier.h> | 45 | #include <linux/notifier.h> |
51 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
52 | #include <linux/percpu.h> | 47 | #include <linux/percpu.h> |
48 | #include <linux/nmi.h> | ||
53 | 49 | ||
54 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
55 | #include <linux/mc146818rtc.h> | 51 | #include <linux/mc146818rtc.h> |
@@ -1283,8 +1279,9 @@ void __cpu_die(unsigned int cpu) | |||
1283 | 1279 | ||
1284 | int __cpuinit __cpu_up(unsigned int cpu) | 1280 | int __cpuinit __cpu_up(unsigned int cpu) |
1285 | { | 1281 | { |
1282 | unsigned long flags; | ||
1286 | #ifdef CONFIG_HOTPLUG_CPU | 1283 | #ifdef CONFIG_HOTPLUG_CPU |
1287 | int ret=0; | 1284 | int ret = 0; |
1288 | 1285 | ||
1289 | /* | 1286 | /* |
1290 | * We do warm boot only on cpus that had booted earlier | 1287 | * We do warm boot only on cpus that had booted earlier |
@@ -1302,23 +1299,25 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
1302 | /* In case one didn't come up */ | 1299 | /* In case one didn't come up */ |
1303 | if (!cpu_isset(cpu, cpu_callin_map)) { | 1300 | if (!cpu_isset(cpu, cpu_callin_map)) { |
1304 | printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); | 1301 | printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); |
1305 | local_irq_enable(); | ||
1306 | return -EIO; | 1302 | return -EIO; |
1307 | } | 1303 | } |
1308 | 1304 | ||
1309 | local_irq_enable(); | ||
1310 | |||
1311 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 1305 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
1312 | /* Unleash the CPU! */ | 1306 | /* Unleash the CPU! */ |
1313 | cpu_set(cpu, smp_commenced_mask); | 1307 | cpu_set(cpu, smp_commenced_mask); |
1314 | 1308 | ||
1315 | /* | 1309 | /* |
1316 | * Check TSC synchronization with the AP: | 1310 | * Check TSC synchronization with the AP (keep irqs disabled |
1311 | * while doing so): | ||
1317 | */ | 1312 | */ |
1313 | local_irq_save(flags); | ||
1318 | check_tsc_sync_source(cpu); | 1314 | check_tsc_sync_source(cpu); |
1315 | local_irq_restore(flags); | ||
1319 | 1316 | ||
1320 | while (!cpu_isset(cpu, cpu_online_map)) | 1317 | while (!cpu_isset(cpu, cpu_online_map)) { |
1321 | cpu_relax(); | 1318 | cpu_relax(); |
1319 | touch_nmi_watchdog(); | ||
1320 | } | ||
1322 | 1321 | ||
1323 | #ifdef CONFIG_X86_GENERICARCH | 1322 | #ifdef CONFIG_X86_GENERICARCH |
1324 | if (num_online_cpus() > 8 && genapic == &apic_default) | 1323 | if (num_online_cpus() > 8 && genapic == &apic_default) |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index a5350059557a..94e5cb091104 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -262,14 +262,23 @@ void notify_arch_cmos_timer(void) | |||
262 | 262 | ||
263 | extern void (*late_time_init)(void); | 263 | extern void (*late_time_init)(void); |
264 | /* Duplicate of time_init() below, with hpet_enable part added */ | 264 | /* Duplicate of time_init() below, with hpet_enable part added */ |
265 | static void __init hpet_time_init(void) | 265 | void __init hpet_time_init(void) |
266 | { | 266 | { |
267 | if (!hpet_enable()) | 267 | if (!hpet_enable()) |
268 | setup_pit_timer(); | 268 | setup_pit_timer(); |
269 | do_time_init(); | 269 | time_init_hook(); |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | ||
273 | * This is called directly from init code; we must delay timer setup in the | ||
274 | * HPET case as we can't make the decision to turn on HPET this early in the | ||
275 | * boot process. | ||
276 | * | ||
277 | * The chosen time_init function will usually be hpet_time_init, above, but | ||
278 | * in the case of virtual hardware, an alternative function may be substituted. | ||
279 | */ | ||
272 | void __init time_init(void) | 280 | void __init time_init(void) |
273 | { | 281 | { |
274 | late_time_init = hpet_time_init; | 282 | tsc_init(); |
283 | late_time_init = choose_time_init(); | ||
275 | } | 284 | } |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 3082a418635c..602660df455c 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/delay.h> | 14 | #include <asm/delay.h> |
15 | #include <asm/tsc.h> | 15 | #include <asm/tsc.h> |
16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | #include <asm/timer.h> | ||
17 | 18 | ||
18 | #include "mach_timer.h" | 19 | #include "mach_timer.h" |
19 | 20 | ||
@@ -23,7 +24,6 @@ | |||
23 | * an extra value to store the TSC freq | 24 | * an extra value to store the TSC freq |
24 | */ | 25 | */ |
25 | unsigned int tsc_khz; | 26 | unsigned int tsc_khz; |
26 | unsigned long long (*custom_sched_clock)(void); | ||
27 | 27 | ||
28 | int tsc_disable; | 28 | int tsc_disable; |
29 | 29 | ||
@@ -102,9 +102,6 @@ unsigned long long sched_clock(void) | |||
102 | { | 102 | { |
103 | unsigned long long this_offset; | 103 | unsigned long long this_offset; |
104 | 104 | ||
105 | if (unlikely(custom_sched_clock)) | ||
106 | return (*custom_sched_clock)(); | ||
107 | |||
108 | /* | 105 | /* |
109 | * Fall back to jiffies if there's no TSC available: | 106 | * Fall back to jiffies if there's no TSC available: |
110 | */ | 107 | */ |
@@ -113,13 +110,13 @@ unsigned long long sched_clock(void) | |||
113 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 110 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
114 | 111 | ||
115 | /* read the Time Stamp Counter: */ | 112 | /* read the Time Stamp Counter: */ |
116 | rdtscll(this_offset); | 113 | get_scheduled_cycles(this_offset); |
117 | 114 | ||
118 | /* return the value in ns */ | 115 | /* return the value in ns */ |
119 | return cycles_2_ns(this_offset); | 116 | return cycles_2_ns(this_offset); |
120 | } | 117 | } |
121 | 118 | ||
122 | static unsigned long calculate_cpu_khz(void) | 119 | unsigned long native_calculate_cpu_khz(void) |
123 | { | 120 | { |
124 | unsigned long long start, end; | 121 | unsigned long long start, end; |
125 | unsigned long count; | 122 | unsigned long count; |
@@ -186,34 +183,6 @@ int recalibrate_cpu_khz(void) | |||
186 | 183 | ||
187 | EXPORT_SYMBOL(recalibrate_cpu_khz); | 184 | EXPORT_SYMBOL(recalibrate_cpu_khz); |
188 | 185 | ||
189 | void __init tsc_init(void) | ||
190 | { | ||
191 | if (!cpu_has_tsc || tsc_disable) | ||
192 | goto out_no_tsc; | ||
193 | |||
194 | cpu_khz = calculate_cpu_khz(); | ||
195 | tsc_khz = cpu_khz; | ||
196 | |||
197 | if (!cpu_khz) | ||
198 | goto out_no_tsc; | ||
199 | |||
200 | printk("Detected %lu.%03lu MHz processor.\n", | ||
201 | (unsigned long)cpu_khz / 1000, | ||
202 | (unsigned long)cpu_khz % 1000); | ||
203 | |||
204 | set_cyc2ns_scale(cpu_khz); | ||
205 | use_tsc_delay(); | ||
206 | return; | ||
207 | |||
208 | out_no_tsc: | ||
209 | /* | ||
210 | * Set the tsc_disable flag if there's no TSC support, this | ||
211 | * makes it a fast flag for the kernel to see whether it | ||
212 | * should be using the TSC. | ||
213 | */ | ||
214 | tsc_disable = 1; | ||
215 | } | ||
216 | |||
217 | #ifdef CONFIG_CPU_FREQ | 186 | #ifdef CONFIG_CPU_FREQ |
218 | 187 | ||
219 | /* | 188 | /* |
@@ -383,28 +352,47 @@ static void __init check_geode_tsc_reliable(void) | |||
383 | static inline void check_geode_tsc_reliable(void) { } | 352 | static inline void check_geode_tsc_reliable(void) { } |
384 | #endif | 353 | #endif |
385 | 354 | ||
386 | static int __init init_tsc_clocksource(void) | 355 | |
356 | void __init tsc_init(void) | ||
387 | { | 357 | { |
358 | if (!cpu_has_tsc || tsc_disable) | ||
359 | goto out_no_tsc; | ||
388 | 360 | ||
389 | if (cpu_has_tsc && tsc_khz && !tsc_disable) { | 361 | cpu_khz = calculate_cpu_khz(); |
390 | /* check blacklist */ | 362 | tsc_khz = cpu_khz; |
391 | dmi_check_system(bad_tsc_dmi_table); | ||
392 | 363 | ||
393 | unsynchronized_tsc(); | 364 | if (!cpu_khz) |
394 | check_geode_tsc_reliable(); | 365 | goto out_no_tsc; |
395 | current_tsc_khz = tsc_khz; | 366 | |
396 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | 367 | printk("Detected %lu.%03lu MHz processor.\n", |
397 | clocksource_tsc.shift); | 368 | (unsigned long)cpu_khz / 1000, |
398 | /* lower the rating if we already know its unstable: */ | 369 | (unsigned long)cpu_khz % 1000); |
399 | if (check_tsc_unstable()) { | 370 | |
400 | clocksource_tsc.rating = 0; | 371 | set_cyc2ns_scale(cpu_khz); |
401 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 372 | use_tsc_delay(); |
402 | } | ||
403 | 373 | ||
404 | return clocksource_register(&clocksource_tsc); | 374 | /* Check and install the TSC clocksource */ |
375 | dmi_check_system(bad_tsc_dmi_table); | ||
376 | |||
377 | unsynchronized_tsc(); | ||
378 | check_geode_tsc_reliable(); | ||
379 | current_tsc_khz = tsc_khz; | ||
380 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
381 | clocksource_tsc.shift); | ||
382 | /* lower the rating if we already know its unstable: */ | ||
383 | if (check_tsc_unstable()) { | ||
384 | clocksource_tsc.rating = 0; | ||
385 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
405 | } | 386 | } |
387 | clocksource_register(&clocksource_tsc); | ||
406 | 388 | ||
407 | return 0; | 389 | return; |
408 | } | ||
409 | 390 | ||
410 | module_init(init_tsc_clocksource); | 391 | out_no_tsc: |
392 | /* | ||
393 | * Set the tsc_disable flag if there's no TSC support, this | ||
394 | * makes it a fast flag for the kernel to see whether it | ||
395 | * should be using the TSC. | ||
396 | */ | ||
397 | tsc_disable = 1; | ||
398 | } | ||
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index bb5a7abf949c..fbf45fa08320 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
36 | #include <asm/timer.h> | 36 | #include <asm/timer.h> |
37 | #include <asm/vmi_time.h> | 37 | #include <asm/vmi_time.h> |
38 | #include <asm/kmap_types.h> | ||
38 | 39 | ||
39 | /* Convenient for calling VMI functions indirectly in the ROM */ | 40 | /* Convenient for calling VMI functions indirectly in the ROM */ |
40 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); | 41 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); |
@@ -48,12 +49,13 @@ typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); | |||
48 | 49 | ||
49 | static struct vrom_header *vmi_rom; | 50 | static struct vrom_header *vmi_rom; |
50 | static int license_gplok; | 51 | static int license_gplok; |
51 | static int disable_nodelay; | ||
52 | static int disable_pge; | 52 | static int disable_pge; |
53 | static int disable_pse; | 53 | static int disable_pse; |
54 | static int disable_sep; | 54 | static int disable_sep; |
55 | static int disable_tsc; | 55 | static int disable_tsc; |
56 | static int disable_mtrr; | 56 | static int disable_mtrr; |
57 | static int disable_noidle; | ||
58 | static int disable_vmi_timer; | ||
57 | 59 | ||
58 | /* Cached VMI operations */ | 60 | /* Cached VMI operations */ |
59 | struct { | 61 | struct { |
@@ -255,7 +257,6 @@ static void vmi_nop(void) | |||
255 | } | 257 | } |
256 | 258 | ||
257 | /* For NO_IDLE_HZ, we stop the clock when halting the kernel */ | 259 | /* For NO_IDLE_HZ, we stop the clock when halting the kernel */ |
258 | #ifdef CONFIG_NO_IDLE_HZ | ||
259 | static fastcall void vmi_safe_halt(void) | 260 | static fastcall void vmi_safe_halt(void) |
260 | { | 261 | { |
261 | int idle = vmi_stop_hz_timer(); | 262 | int idle = vmi_stop_hz_timer(); |
@@ -266,7 +267,6 @@ static fastcall void vmi_safe_halt(void) | |||
266 | local_irq_enable(); | 267 | local_irq_enable(); |
267 | } | 268 | } |
268 | } | 269 | } |
269 | #endif | ||
270 | 270 | ||
271 | #ifdef CONFIG_DEBUG_PAGE_TYPE | 271 | #ifdef CONFIG_DEBUG_PAGE_TYPE |
272 | 272 | ||
@@ -371,6 +371,24 @@ static void vmi_check_page_type(u32 pfn, int type) | |||
371 | #define vmi_check_page_type(p,t) do { } while (0) | 371 | #define vmi_check_page_type(p,t) do { } while (0) |
372 | #endif | 372 | #endif |
373 | 373 | ||
374 | static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn) | ||
375 | { | ||
376 | /* | ||
377 | * Internally, the VMI ROM must map virtual addresses to physical | ||
378 | * addresses for processing MMU updates. By the time MMU updates | ||
379 | * are issued, this information is typically already lost. | ||
380 | * Fortunately, the VMI provides a cache of mapping slots for active | ||
381 | * page tables. | ||
382 | * | ||
383 | * We use slot zero for the linear mapping of physical memory, and | ||
384 | * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. | ||
385 | * | ||
386 | * args: SLOT VA COUNT PFN | ||
387 | */ | ||
388 | BUG_ON(type != KM_PTE0 && type != KM_PTE1); | ||
389 | vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn); | ||
390 | } | ||
391 | |||
374 | static void vmi_allocate_pt(u32 pfn) | 392 | static void vmi_allocate_pt(u32 pfn) |
375 | { | 393 | { |
376 | vmi_set_page_type(pfn, VMI_PAGE_L1); | 394 | vmi_set_page_type(pfn, VMI_PAGE_L1); |
@@ -508,13 +526,14 @@ void vmi_pmd_clear(pmd_t *pmd) | |||
508 | #endif | 526 | #endif |
509 | 527 | ||
510 | #ifdef CONFIG_SMP | 528 | #ifdef CONFIG_SMP |
511 | struct vmi_ap_state ap; | ||
512 | extern void setup_pda(void); | 529 | extern void setup_pda(void); |
513 | 530 | ||
514 | static void __init /* XXX cpu hotplug */ | 531 | static void __devinit |
515 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | 532 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, |
516 | unsigned long start_esp) | 533 | unsigned long start_esp) |
517 | { | 534 | { |
535 | struct vmi_ap_state ap; | ||
536 | |||
518 | /* Default everything to zero. This is fine for most GPRs. */ | 537 | /* Default everything to zero. This is fine for most GPRs. */ |
519 | memset(&ap, 0, sizeof(struct vmi_ap_state)); | 538 | memset(&ap, 0, sizeof(struct vmi_ap_state)); |
520 | 539 | ||
@@ -553,7 +572,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
553 | /* Protected mode, paging, AM, WP, NE, MP. */ | 572 | /* Protected mode, paging, AM, WP, NE, MP. */ |
554 | ap.cr0 = 0x80050023; | 573 | ap.cr0 = 0x80050023; |
555 | ap.cr4 = mmu_cr4_features; | 574 | ap.cr4 = mmu_cr4_features; |
556 | vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid); | 575 | vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); |
557 | } | 576 | } |
558 | #endif | 577 | #endif |
559 | 578 | ||
@@ -645,12 +664,12 @@ static inline int __init probe_vmi_rom(void) | |||
645 | void vmi_bringup(void) | 664 | void vmi_bringup(void) |
646 | { | 665 | { |
647 | /* We must establish the lowmem mapping for MMU ops to work */ | 666 | /* We must establish the lowmem mapping for MMU ops to work */ |
648 | if (vmi_rom) | 667 | if (vmi_ops.set_linear_mapping) |
649 | vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0); | 668 | vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0); |
650 | } | 669 | } |
651 | 670 | ||
652 | /* | 671 | /* |
653 | * Return a pointer to the VMI function or a NOP stub | 672 | * Return a pointer to a VMI function or NULL if unimplemented |
654 | */ | 673 | */ |
655 | static void *vmi_get_function(int vmicall) | 674 | static void *vmi_get_function(int vmicall) |
656 | { | 675 | { |
@@ -661,12 +680,13 @@ static void *vmi_get_function(int vmicall) | |||
661 | if (rel->type == VMI_RELOCATION_CALL_REL) | 680 | if (rel->type == VMI_RELOCATION_CALL_REL) |
662 | return (void *)rel->eip; | 681 | return (void *)rel->eip; |
663 | else | 682 | else |
664 | return (void *)vmi_nop; | 683 | return NULL; |
665 | } | 684 | } |
666 | 685 | ||
667 | /* | 686 | /* |
668 | * Helper macro for making the VMI paravirt-ops fill code readable. | 687 | * Helper macro for making the VMI paravirt-ops fill code readable. |
669 | * For unimplemented operations, fall back to default. | 688 | * For unimplemented operations, fall back to default, unless nop |
689 | * is returned by the ROM. | ||
670 | */ | 690 | */ |
671 | #define para_fill(opname, vmicall) \ | 691 | #define para_fill(opname, vmicall) \ |
672 | do { \ | 692 | do { \ |
@@ -675,9 +695,29 @@ do { \ | |||
675 | if (rel->type != VMI_RELOCATION_NONE) { \ | 695 | if (rel->type != VMI_RELOCATION_NONE) { \ |
676 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \ | 696 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \ |
677 | paravirt_ops.opname = (void *)rel->eip; \ | 697 | paravirt_ops.opname = (void *)rel->eip; \ |
698 | } else if (rel->type == VMI_RELOCATION_NOP) \ | ||
699 | paravirt_ops.opname = (void *)vmi_nop; \ | ||
700 | } while (0) | ||
701 | |||
702 | /* | ||
703 | * Helper macro for making the VMI paravirt-ops fill code readable. | ||
704 | * For cached operations which do not match the VMI ROM ABI and must | ||
705 | * go through a tranlation stub. Ignore NOPs, since it is not clear | ||
706 | * a NOP * VMI function corresponds to a NOP paravirt-op when the | ||
707 | * functions are not in 1-1 correspondence. | ||
708 | */ | ||
709 | #define para_wrap(opname, wrapper, cache, vmicall) \ | ||
710 | do { \ | ||
711 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | ||
712 | VMI_CALL_##vmicall); \ | ||
713 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ | ||
714 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ | ||
715 | paravirt_ops.opname = wrapper; \ | ||
716 | vmi_ops.cache = (void *)rel->eip; \ | ||
678 | } \ | 717 | } \ |
679 | } while (0) | 718 | } while (0) |
680 | 719 | ||
720 | |||
681 | /* | 721 | /* |
682 | * Activate the VMI interface and switch into paravirtualized mode | 722 | * Activate the VMI interface and switch into paravirtualized mode |
683 | */ | 723 | */ |
@@ -714,13 +754,8 @@ static inline int __init activate_vmi(void) | |||
714 | * rdpmc is not yet used in Linux | 754 | * rdpmc is not yet used in Linux |
715 | */ | 755 | */ |
716 | 756 | ||
717 | /* CPUID is special, so very special */ | 757 | /* CPUID is special, so very special it gets wrapped like a present */ |
718 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_CPUID); | 758 | para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); |
719 | if (rel->type != VMI_RELOCATION_NONE) { | ||
720 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
721 | vmi_ops.cpuid = (void *)rel->eip; | ||
722 | paravirt_ops.cpuid = vmi_cpuid; | ||
723 | } | ||
724 | 759 | ||
725 | para_fill(clts, CLTS); | 760 | para_fill(clts, CLTS); |
726 | para_fill(get_debugreg, GetDR); | 761 | para_fill(get_debugreg, GetDR); |
@@ -737,38 +772,26 @@ static inline int __init activate_vmi(void) | |||
737 | para_fill(restore_fl, SetInterruptMask); | 772 | para_fill(restore_fl, SetInterruptMask); |
738 | para_fill(irq_disable, DisableInterrupts); | 773 | para_fill(irq_disable, DisableInterrupts); |
739 | para_fill(irq_enable, EnableInterrupts); | 774 | para_fill(irq_enable, EnableInterrupts); |
775 | |||
740 | /* irq_save_disable !!! sheer pain */ | 776 | /* irq_save_disable !!! sheer pain */ |
741 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK], | 777 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK], |
742 | (char *)paravirt_ops.save_fl); | 778 | (char *)paravirt_ops.save_fl); |
743 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], | 779 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], |
744 | (char *)paravirt_ops.irq_disable); | 780 | (char *)paravirt_ops.irq_disable); |
745 | #ifndef CONFIG_NO_IDLE_HZ | 781 | |
746 | para_fill(safe_halt, Halt); | ||
747 | #else | ||
748 | vmi_ops.halt = vmi_get_function(VMI_CALL_Halt); | ||
749 | paravirt_ops.safe_halt = vmi_safe_halt; | ||
750 | #endif | ||
751 | para_fill(wbinvd, WBINVD); | 782 | para_fill(wbinvd, WBINVD); |
783 | para_fill(read_tsc, RDTSC); | ||
784 | |||
785 | /* The following we emulate with trap and emulate for now */ | ||
752 | /* paravirt_ops.read_msr = vmi_rdmsr */ | 786 | /* paravirt_ops.read_msr = vmi_rdmsr */ |
753 | /* paravirt_ops.write_msr = vmi_wrmsr */ | 787 | /* paravirt_ops.write_msr = vmi_wrmsr */ |
754 | para_fill(read_tsc, RDTSC); | ||
755 | /* paravirt_ops.rdpmc = vmi_rdpmc */ | 788 | /* paravirt_ops.rdpmc = vmi_rdpmc */ |
756 | 789 | ||
757 | /* TR interface doesn't pass TR value */ | 790 | /* TR interface doesn't pass TR value, wrap */ |
758 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetTR); | 791 | para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); |
759 | if (rel->type != VMI_RELOCATION_NONE) { | ||
760 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
761 | vmi_ops.set_tr = (void *)rel->eip; | ||
762 | paravirt_ops.load_tr_desc = vmi_set_tr; | ||
763 | } | ||
764 | 792 | ||
765 | /* LDT is special, too */ | 793 | /* LDT is special, too */ |
766 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetLDT); | 794 | para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); |
767 | if (rel->type != VMI_RELOCATION_NONE) { | ||
768 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
769 | vmi_ops._set_ldt = (void *)rel->eip; | ||
770 | paravirt_ops.set_ldt = vmi_set_ldt; | ||
771 | } | ||
772 | 795 | ||
773 | para_fill(load_gdt, SetGDT); | 796 | para_fill(load_gdt, SetGDT); |
774 | para_fill(load_idt, SetIDT); | 797 | para_fill(load_idt, SetIDT); |
@@ -779,28 +802,14 @@ static inline int __init activate_vmi(void) | |||
779 | para_fill(write_ldt_entry, WriteLDTEntry); | 802 | para_fill(write_ldt_entry, WriteLDTEntry); |
780 | para_fill(write_gdt_entry, WriteGDTEntry); | 803 | para_fill(write_gdt_entry, WriteGDTEntry); |
781 | para_fill(write_idt_entry, WriteIDTEntry); | 804 | para_fill(write_idt_entry, WriteIDTEntry); |
782 | reloc = call_vrom_long_func(vmi_rom, get_reloc, | 805 | para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); |
783 | VMI_CALL_UpdateKernelStack); | ||
784 | if (rel->type != VMI_RELOCATION_NONE) { | ||
785 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
786 | vmi_ops.set_kernel_stack = (void *)rel->eip; | ||
787 | paravirt_ops.load_esp0 = vmi_load_esp0; | ||
788 | } | ||
789 | |||
790 | para_fill(set_iopl_mask, SetIOPLMask); | 806 | para_fill(set_iopl_mask, SetIOPLMask); |
791 | paravirt_ops.io_delay = (void *)vmi_nop; | 807 | para_fill(io_delay, IODelay); |
792 | if (!disable_nodelay) { | ||
793 | paravirt_ops.const_udelay = (void *)vmi_nop; | ||
794 | } | ||
795 | |||
796 | para_fill(set_lazy_mode, SetLazyMode); | 808 | para_fill(set_lazy_mode, SetLazyMode); |
797 | 809 | ||
798 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_FlushTLB); | 810 | /* user and kernel flush are just handled with different flags to FlushTLB */ |
799 | if (rel->type != VMI_RELOCATION_NONE) { | 811 | para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); |
800 | vmi_ops.flush_tlb = (void *)rel->eip; | 812 | para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, flush_tlb, FlushTLB); |
801 | paravirt_ops.flush_tlb_user = vmi_flush_tlb_user; | ||
802 | paravirt_ops.flush_tlb_kernel = vmi_flush_tlb_kernel; | ||
803 | } | ||
804 | para_fill(flush_tlb_single, InvalPage); | 813 | para_fill(flush_tlb_single, InvalPage); |
805 | 814 | ||
806 | /* | 815 | /* |
@@ -815,27 +824,40 @@ static inline int __init activate_vmi(void) | |||
815 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); | 824 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); |
816 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); | 825 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); |
817 | #endif | 826 | #endif |
818 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | ||
819 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | ||
820 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | ||
821 | 827 | ||
822 | paravirt_ops.alloc_pt = vmi_allocate_pt; | 828 | if (vmi_ops.set_pte) { |
823 | paravirt_ops.alloc_pd = vmi_allocate_pd; | 829 | paravirt_ops.set_pte = vmi_set_pte; |
824 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | 830 | paravirt_ops.set_pte_at = vmi_set_pte_at; |
825 | paravirt_ops.release_pt = vmi_release_pt; | 831 | paravirt_ops.set_pmd = vmi_set_pmd; |
826 | paravirt_ops.release_pd = vmi_release_pd; | ||
827 | paravirt_ops.set_pte = vmi_set_pte; | ||
828 | paravirt_ops.set_pte_at = vmi_set_pte_at; | ||
829 | paravirt_ops.set_pmd = vmi_set_pmd; | ||
830 | paravirt_ops.pte_update = vmi_update_pte; | ||
831 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | ||
832 | #ifdef CONFIG_X86_PAE | 832 | #ifdef CONFIG_X86_PAE |
833 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; | 833 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; |
834 | paravirt_ops.set_pte_present = vmi_set_pte_present; | 834 | paravirt_ops.set_pte_present = vmi_set_pte_present; |
835 | paravirt_ops.set_pud = vmi_set_pud; | 835 | paravirt_ops.set_pud = vmi_set_pud; |
836 | paravirt_ops.pte_clear = vmi_pte_clear; | 836 | paravirt_ops.pte_clear = vmi_pte_clear; |
837 | paravirt_ops.pmd_clear = vmi_pmd_clear; | 837 | paravirt_ops.pmd_clear = vmi_pmd_clear; |
838 | #endif | 838 | #endif |
839 | } | ||
840 | |||
841 | if (vmi_ops.update_pte) { | ||
842 | paravirt_ops.pte_update = vmi_update_pte; | ||
843 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | ||
844 | } | ||
845 | |||
846 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | ||
847 | if (vmi_ops.allocate_page) { | ||
848 | paravirt_ops.alloc_pt = vmi_allocate_pt; | ||
849 | paravirt_ops.alloc_pd = vmi_allocate_pd; | ||
850 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | ||
851 | } | ||
852 | |||
853 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | ||
854 | if (vmi_ops.release_page) { | ||
855 | paravirt_ops.release_pt = vmi_release_pt; | ||
856 | paravirt_ops.release_pd = vmi_release_pd; | ||
857 | } | ||
858 | para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping, | ||
859 | SetLinearMapping); | ||
860 | |||
839 | /* | 861 | /* |
840 | * These MUST always be patched. Don't support indirect jumps | 862 | * These MUST always be patched. Don't support indirect jumps |
841 | * through these operations, as the VMI interface may use either | 863 | * through these operations, as the VMI interface may use either |
@@ -847,21 +869,20 @@ static inline int __init activate_vmi(void) | |||
847 | paravirt_ops.iret = (void *)0xbadbab0; | 869 | paravirt_ops.iret = (void *)0xbadbab0; |
848 | 870 | ||
849 | #ifdef CONFIG_SMP | 871 | #ifdef CONFIG_SMP |
850 | paravirt_ops.startup_ipi_hook = vmi_startup_ipi_hook; | 872 | para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); |
851 | vmi_ops.set_initial_ap_state = vmi_get_function(VMI_CALL_SetInitialAPState); | ||
852 | #endif | 873 | #endif |
853 | 874 | ||
854 | #ifdef CONFIG_X86_LOCAL_APIC | 875 | #ifdef CONFIG_X86_LOCAL_APIC |
855 | paravirt_ops.apic_read = vmi_get_function(VMI_CALL_APICRead); | 876 | para_fill(apic_read, APICRead); |
856 | paravirt_ops.apic_write = vmi_get_function(VMI_CALL_APICWrite); | 877 | para_fill(apic_write, APICWrite); |
857 | paravirt_ops.apic_write_atomic = vmi_get_function(VMI_CALL_APICWrite); | 878 | para_fill(apic_write_atomic, APICWrite); |
858 | #endif | 879 | #endif |
859 | 880 | ||
860 | /* | 881 | /* |
861 | * Check for VMI timer functionality by probing for a cycle frequency method | 882 | * Check for VMI timer functionality by probing for a cycle frequency method |
862 | */ | 883 | */ |
863 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); | 884 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); |
864 | if (rel->type != VMI_RELOCATION_NONE) { | 885 | if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { |
865 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; | 886 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; |
866 | vmi_timer_ops.get_cycle_counter = | 887 | vmi_timer_ops.get_cycle_counter = |
867 | vmi_get_function(VMI_CALL_GetCycleCounter); | 888 | vmi_get_function(VMI_CALL_GetCycleCounter); |
@@ -879,9 +900,22 @@ static inline int __init activate_vmi(void) | |||
879 | paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm; | 900 | paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm; |
880 | paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm; | 901 | paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm; |
881 | #endif | 902 | #endif |
882 | custom_sched_clock = vmi_sched_clock; | 903 | paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles; |
904 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; | ||
905 | |||
906 | /* We have true wallclock functions; disable CMOS clock sync */ | ||
907 | no_sync_cmos_clock = 1; | ||
908 | } else { | ||
909 | disable_noidle = 1; | ||
910 | disable_vmi_timer = 1; | ||
883 | } | 911 | } |
884 | 912 | ||
913 | /* No idle HZ mode only works if VMI timer and no idle is enabled */ | ||
914 | if (disable_noidle || disable_vmi_timer) | ||
915 | para_fill(safe_halt, Halt); | ||
916 | else | ||
917 | para_wrap(safe_halt, vmi_safe_halt, halt, Halt); | ||
918 | |||
885 | /* | 919 | /* |
886 | * Alternative instruction rewriting doesn't happen soon enough | 920 | * Alternative instruction rewriting doesn't happen soon enough |
887 | * to convert VMI_IRET to a call instead of a jump; so we have | 921 | * to convert VMI_IRET to a call instead of a jump; so we have |
@@ -914,7 +948,9 @@ void __init vmi_init(void) | |||
914 | 948 | ||
915 | local_irq_save(flags); | 949 | local_irq_save(flags); |
916 | activate_vmi(); | 950 | activate_vmi(); |
917 | #ifdef CONFIG_SMP | 951 | |
952 | #ifdef CONFIG_X86_IO_APIC | ||
953 | /* This is virtual hardware; timer routing is wired correctly */ | ||
918 | no_timer_check = 1; | 954 | no_timer_check = 1; |
919 | #endif | 955 | #endif |
920 | local_irq_restore(flags & X86_EFLAGS_IF); | 956 | local_irq_restore(flags & X86_EFLAGS_IF); |
@@ -925,9 +961,7 @@ static int __init parse_vmi(char *arg) | |||
925 | if (!arg) | 961 | if (!arg) |
926 | return -EINVAL; | 962 | return -EINVAL; |
927 | 963 | ||
928 | if (!strcmp(arg, "disable_nodelay")) | 964 | if (!strcmp(arg, "disable_pge")) { |
929 | disable_nodelay = 1; | ||
930 | else if (!strcmp(arg, "disable_pge")) { | ||
931 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 965 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); |
932 | disable_pge = 1; | 966 | disable_pge = 1; |
933 | } else if (!strcmp(arg, "disable_pse")) { | 967 | } else if (!strcmp(arg, "disable_pse")) { |
@@ -942,7 +976,11 @@ static int __init parse_vmi(char *arg) | |||
942 | } else if (!strcmp(arg, "disable_mtrr")) { | 976 | } else if (!strcmp(arg, "disable_mtrr")) { |
943 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); | 977 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); |
944 | disable_mtrr = 1; | 978 | disable_mtrr = 1; |
945 | } | 979 | } else if (!strcmp(arg, "disable_timer")) { |
980 | disable_vmi_timer = 1; | ||
981 | disable_noidle = 1; | ||
982 | } else if (!strcmp(arg, "disable_noidle")) | ||
983 | disable_noidle = 1; | ||
946 | return 0; | 984 | return 0; |
947 | } | 985 | } |
948 | 986 | ||
diff --git a/arch/i386/kernel/vmitime.c b/arch/i386/kernel/vmitime.c index 76d2adcae5a3..9dfb17739b67 100644 --- a/arch/i386/kernel/vmitime.c +++ b/arch/i386/kernel/vmitime.c | |||
@@ -123,12 +123,10 @@ static struct clocksource clocksource_vmi = { | |||
123 | static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id); | 123 | static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id); |
124 | 124 | ||
125 | static struct irqaction vmi_timer_irq = { | 125 | static struct irqaction vmi_timer_irq = { |
126 | vmi_timer_interrupt, | 126 | .handler = vmi_timer_interrupt, |
127 | SA_INTERRUPT, | 127 | .flags = IRQF_DISABLED, |
128 | CPU_MASK_NONE, | 128 | .mask = CPU_MASK_NONE, |
129 | "VMI-alarm", | 129 | .name = "VMI-alarm", |
130 | NULL, | ||
131 | NULL | ||
132 | }; | 130 | }; |
133 | 131 | ||
134 | /* Alarm rate */ | 132 | /* Alarm rate */ |
@@ -153,13 +151,6 @@ static void vmi_get_wallclock_ts(struct timespec *ts) | |||
153 | ts->tv_sec = wallclock; | 151 | ts->tv_sec = wallclock; |
154 | } | 152 | } |
155 | 153 | ||
156 | static void update_xtime_from_wallclock(void) | ||
157 | { | ||
158 | struct timespec ts; | ||
159 | vmi_get_wallclock_ts(&ts); | ||
160 | do_settimeofday(&ts); | ||
161 | } | ||
162 | |||
163 | unsigned long vmi_get_wallclock(void) | 154 | unsigned long vmi_get_wallclock(void) |
164 | { | 155 | { |
165 | struct timespec ts; | 156 | struct timespec ts; |
@@ -172,11 +163,20 @@ int vmi_set_wallclock(unsigned long now) | |||
172 | return -1; | 163 | return -1; |
173 | } | 164 | } |
174 | 165 | ||
175 | unsigned long long vmi_sched_clock(void) | 166 | unsigned long long vmi_get_sched_cycles(void) |
176 | { | 167 | { |
177 | return read_available_cycles(); | 168 | return read_available_cycles(); |
178 | } | 169 | } |
179 | 170 | ||
171 | unsigned long vmi_cpu_khz(void) | ||
172 | { | ||
173 | unsigned long long khz; | ||
174 | |||
175 | khz = vmi_timer_ops.get_cycle_frequency(); | ||
176 | (void)do_div(khz, 1000); | ||
177 | return khz; | ||
178 | } | ||
179 | |||
180 | void __init vmi_time_init(void) | 180 | void __init vmi_time_init(void) |
181 | { | 181 | { |
182 | unsigned long long cycles_per_sec, cycles_per_msec; | 182 | unsigned long long cycles_per_sec, cycles_per_msec; |
@@ -188,25 +188,16 @@ void __init vmi_time_init(void) | |||
188 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt); | 188 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt); |
189 | #endif | 189 | #endif |
190 | 190 | ||
191 | no_sync_cmos_clock = 1; | ||
192 | |||
193 | vmi_get_wallclock_ts(&xtime); | ||
194 | set_normalized_timespec(&wall_to_monotonic, | ||
195 | -xtime.tv_sec, -xtime.tv_nsec); | ||
196 | |||
197 | real_cycles_accounted_system = read_real_cycles(); | 191 | real_cycles_accounted_system = read_real_cycles(); |
198 | update_xtime_from_wallclock(); | ||
199 | per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles(); | 192 | per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles(); |
200 | 193 | ||
201 | cycles_per_sec = vmi_timer_ops.get_cycle_frequency(); | 194 | cycles_per_sec = vmi_timer_ops.get_cycle_frequency(); |
202 | |||
203 | cycles_per_jiffy = cycles_per_sec; | 195 | cycles_per_jiffy = cycles_per_sec; |
204 | (void)do_div(cycles_per_jiffy, HZ); | 196 | (void)do_div(cycles_per_jiffy, HZ); |
205 | cycles_per_alarm = cycles_per_sec; | 197 | cycles_per_alarm = cycles_per_sec; |
206 | (void)do_div(cycles_per_alarm, alarm_hz); | 198 | (void)do_div(cycles_per_alarm, alarm_hz); |
207 | cycles_per_msec = cycles_per_sec; | 199 | cycles_per_msec = cycles_per_sec; |
208 | (void)do_div(cycles_per_msec, 1000); | 200 | (void)do_div(cycles_per_msec, 1000); |
209 | cpu_khz = cycles_per_msec; | ||
210 | 201 | ||
211 | printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;" | 202 | printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;" |
212 | "cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy, | 203 | "cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy, |
@@ -250,7 +241,7 @@ void __init vmi_timer_setup_boot_alarm(void) | |||
250 | 241 | ||
251 | /* Initialize the time accounting variables for an AP on an SMP system. | 242 | /* Initialize the time accounting variables for an AP on an SMP system. |
252 | * Also, set the local alarm for the AP. */ | 243 | * Also, set the local alarm for the AP. */ |
253 | void __init vmi_timer_setup_secondary_alarm(void) | 244 | void __devinit vmi_timer_setup_secondary_alarm(void) |
254 | { | 245 | { |
255 | int cpu = smp_processor_id(); | 246 | int cpu = smp_processor_id(); |
256 | 247 | ||
@@ -276,16 +267,13 @@ static void vmi_account_real_cycles(unsigned long long cur_real_cycles) | |||
276 | 267 | ||
277 | cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system; | 268 | cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system; |
278 | while (cycles_not_accounted >= cycles_per_jiffy) { | 269 | while (cycles_not_accounted >= cycles_per_jiffy) { |
279 | /* systems wide jiffies and wallclock. */ | 270 | /* systems wide jiffies. */ |
280 | do_timer(1); | 271 | do_timer(1); |
281 | 272 | ||
282 | cycles_not_accounted -= cycles_per_jiffy; | 273 | cycles_not_accounted -= cycles_per_jiffy; |
283 | real_cycles_accounted_system += cycles_per_jiffy; | 274 | real_cycles_accounted_system += cycles_per_jiffy; |
284 | } | 275 | } |
285 | 276 | ||
286 | if (vmi_timer_ops.wallclock_updated()) | ||
287 | update_xtime_from_wallclock(); | ||
288 | |||
289 | write_sequnlock(&xtime_lock); | 277 | write_sequnlock(&xtime_lock); |
290 | } | 278 | } |
291 | 279 | ||
@@ -380,7 +368,6 @@ int vmi_stop_hz_timer(void) | |||
380 | unsigned long seq, next; | 368 | unsigned long seq, next; |
381 | unsigned long long real_cycles_expiry; | 369 | unsigned long long real_cycles_expiry; |
382 | int cpu = smp_processor_id(); | 370 | int cpu = smp_processor_id(); |
383 | int idle; | ||
384 | 371 | ||
385 | BUG_ON(!irqs_disabled()); | 372 | BUG_ON(!irqs_disabled()); |
386 | if (sysctl_hz_timer != 0) | 373 | if (sysctl_hz_timer != 0) |
@@ -388,13 +375,13 @@ int vmi_stop_hz_timer(void) | |||
388 | 375 | ||
389 | cpu_set(cpu, nohz_cpu_mask); | 376 | cpu_set(cpu, nohz_cpu_mask); |
390 | smp_mb(); | 377 | smp_mb(); |
378 | |||
391 | if (rcu_needs_cpu(cpu) || local_softirq_pending() || | 379 | if (rcu_needs_cpu(cpu) || local_softirq_pending() || |
392 | (next = next_timer_interrupt(), time_before_eq(next, jiffies))) { | 380 | (next = next_timer_interrupt(), |
381 | time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) { | ||
393 | cpu_clear(cpu, nohz_cpu_mask); | 382 | cpu_clear(cpu, nohz_cpu_mask); |
394 | next = jiffies; | 383 | return 0; |
395 | idle = 0; | 384 | } |
396 | } else | ||
397 | idle = 1; | ||
398 | 385 | ||
399 | /* Convert jiffies to the real cycle counter. */ | 386 | /* Convert jiffies to the real cycle counter. */ |
400 | do { | 387 | do { |
@@ -404,17 +391,13 @@ int vmi_stop_hz_timer(void) | |||
404 | } while (read_seqretry(&xtime_lock, seq)); | 391 | } while (read_seqretry(&xtime_lock, seq)); |
405 | 392 | ||
406 | /* This cpu is going idle. Disable the periodic alarm. */ | 393 | /* This cpu is going idle. Disable the periodic alarm. */ |
407 | if (idle) { | 394 | vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE); |
408 | vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE); | 395 | per_cpu(idle_start_jiffies, cpu) = jiffies; |
409 | per_cpu(idle_start_jiffies, cpu) = jiffies; | ||
410 | } | ||
411 | |||
412 | /* Set the real time alarm to expire at the next event. */ | 396 | /* Set the real time alarm to expire at the next event. */ |
413 | vmi_timer_ops.set_alarm( | 397 | vmi_timer_ops.set_alarm( |
414 | VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL, | 398 | VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL, |
415 | real_cycles_expiry, 0); | 399 | real_cycles_expiry, 0); |
416 | 400 | return 1; | |
417 | return idle; | ||
418 | } | 401 | } |
419 | 402 | ||
420 | static void vmi_reenable_hz_timer(int cpu) | 403 | static void vmi_reenable_hz_timer(int cpu) |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index d430d36ae49d..0afb4fe7c35b 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -1267,6 +1267,10 @@ sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned i | |||
1267 | struct getdents32_callback buf; | 1267 | struct getdents32_callback buf; |
1268 | int error; | 1268 | int error; |
1269 | 1269 | ||
1270 | error = -EFAULT; | ||
1271 | if (!access_ok(VERIFY_WRITE, dirent, count)) | ||
1272 | goto out; | ||
1273 | |||
1270 | error = -EBADF; | 1274 | error = -EBADF; |
1271 | file = fget(fd); | 1275 | file = fget(fd); |
1272 | if (!file) | 1276 | if (!file) |
@@ -1283,10 +1287,10 @@ sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned i | |||
1283 | error = buf.error; | 1287 | error = buf.error; |
1284 | lastdirent = buf.previous; | 1288 | lastdirent = buf.previous; |
1285 | if (lastdirent) { | 1289 | if (lastdirent) { |
1286 | error = -EINVAL; | ||
1287 | if (put_user(file->f_pos, &lastdirent->d_off)) | 1290 | if (put_user(file->f_pos, &lastdirent->d_off)) |
1288 | goto out_putf; | 1291 | error = -EFAULT; |
1289 | error = count - buf.count; | 1292 | else |
1293 | error = count - buf.count; | ||
1290 | } | 1294 | } |
1291 | 1295 | ||
1292 | out_putf: | 1296 | out_putf: |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 772ba6fe110f..4061593e5b17 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * Skip non-WB memory and ignore empty memory ranges. | 21 | * Skip non-WB memory and ignore empty memory ranges. |
22 | */ | 22 | */ |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/bootmem.h> | ||
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
25 | #include <linux/init.h> | 26 | #include <linux/init.h> |
26 | #include <linux/types.h> | 27 | #include <linux/types.h> |
@@ -1009,6 +1010,11 @@ efi_memmap_init(unsigned long *s, unsigned long *e) | |||
1009 | } else | 1010 | } else |
1010 | ae = efi_md_end(md); | 1011 | ae = efi_md_end(md); |
1011 | 1012 | ||
1013 | #ifdef CONFIG_CRASH_DUMP | ||
1014 | /* saved_max_pfn should ignore max_addr= command line arg */ | ||
1015 | if (saved_max_pfn < (ae >> PAGE_SHIFT)) | ||
1016 | saved_max_pfn = (ae >> PAGE_SHIFT); | ||
1017 | #endif | ||
1012 | /* keep within max_addr= and min_addr= command line arg */ | 1018 | /* keep within max_addr= and min_addr= command line arg */ |
1013 | as = max(as, min_addr); | 1019 | as = max(as, min_addr); |
1014 | ae = min(ae, max_addr); | 1020 | ae = min(ae, max_addr); |
@@ -1177,3 +1183,33 @@ kdump_find_rsvd_region (unsigned long size, | |||
1177 | return ~0UL; | 1183 | return ~0UL; |
1178 | } | 1184 | } |
1179 | #endif | 1185 | #endif |
1186 | |||
1187 | #ifdef CONFIG_PROC_VMCORE | ||
1188 | /* locate the size find a the descriptor at a certain address */ | ||
1189 | unsigned long | ||
1190 | vmcore_find_descriptor_size (unsigned long address) | ||
1191 | { | ||
1192 | void *efi_map_start, *efi_map_end, *p; | ||
1193 | efi_memory_desc_t *md; | ||
1194 | u64 efi_desc_size; | ||
1195 | unsigned long ret = 0; | ||
1196 | |||
1197 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
1198 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
1199 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
1200 | |||
1201 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
1202 | md = p; | ||
1203 | if (efi_wb(md) && md->type == EFI_LOADER_DATA | ||
1204 | && md->phys_addr == address) { | ||
1205 | ret = efi_md_size(md); | ||
1206 | break; | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | if (ret == 0) | ||
1211 | printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); | ||
1212 | |||
1213 | return ret; | ||
1214 | } | ||
1215 | #endif | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9ddf896a137a..abc7ad035886 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2299,7 +2299,7 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad | |||
2299 | * allocate a sampling buffer and remaps it into the user address space of the task | 2299 | * allocate a sampling buffer and remaps it into the user address space of the task |
2300 | */ | 2300 | */ |
2301 | static int | 2301 | static int |
2302 | pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) | 2302 | pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) |
2303 | { | 2303 | { |
2304 | struct mm_struct *mm = task->mm; | 2304 | struct mm_struct *mm = task->mm; |
2305 | struct vm_area_struct *vma = NULL; | 2305 | struct vm_area_struct *vma = NULL; |
@@ -2349,6 +2349,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
2349 | * partially initialize the vma for the sampling buffer | 2349 | * partially initialize the vma for the sampling buffer |
2350 | */ | 2350 | */ |
2351 | vma->vm_mm = mm; | 2351 | vma->vm_mm = mm; |
2352 | vma->vm_file = filp; | ||
2352 | vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; | 2353 | vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; |
2353 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ | 2354 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ |
2354 | 2355 | ||
@@ -2387,6 +2388,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
2387 | goto error; | 2388 | goto error; |
2388 | } | 2389 | } |
2389 | 2390 | ||
2391 | get_file(filp); | ||
2392 | |||
2390 | /* | 2393 | /* |
2391 | * now insert the vma in the vm list for the process, must be | 2394 | * now insert the vma in the vm list for the process, must be |
2392 | * done with mmap lock held | 2395 | * done with mmap lock held |
@@ -2464,7 +2467,7 @@ pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) | |||
2464 | } | 2467 | } |
2465 | 2468 | ||
2466 | static int | 2469 | static int |
2467 | pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags, | 2470 | pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, |
2468 | unsigned int cpu, pfarg_context_t *arg) | 2471 | unsigned int cpu, pfarg_context_t *arg) |
2469 | { | 2472 | { |
2470 | pfm_buffer_fmt_t *fmt = NULL; | 2473 | pfm_buffer_fmt_t *fmt = NULL; |
@@ -2505,7 +2508,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int | |||
2505 | /* | 2508 | /* |
2506 | * buffer is always remapped into the caller's address space | 2509 | * buffer is always remapped into the caller's address space |
2507 | */ | 2510 | */ |
2508 | ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr); | 2511 | ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr); |
2509 | if (ret) goto error; | 2512 | if (ret) goto error; |
2510 | 2513 | ||
2511 | /* keep track of user address of buffer */ | 2514 | /* keep track of user address of buffer */ |
@@ -2716,7 +2719,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg | |||
2716 | * does the user want to sample? | 2719 | * does the user want to sample? |
2717 | */ | 2720 | */ |
2718 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { | 2721 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { |
2719 | ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req); | 2722 | ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); |
2720 | if (ret) goto buffer_error; | 2723 | if (ret) goto buffer_error; |
2721 | } | 2724 | } |
2722 | 2725 | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 5fa09d141ab7..7d6fe65c93f4 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -251,6 +251,12 @@ reserve_memory (void) | |||
251 | } | 251 | } |
252 | #endif | 252 | #endif |
253 | 253 | ||
254 | #ifdef CONFIG_PROC_VMCORE | ||
255 | if (reserve_elfcorehdr(&rsvd_region[n].start, | ||
256 | &rsvd_region[n].end) == 0) | ||
257 | n++; | ||
258 | #endif | ||
259 | |||
254 | efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); | 260 | efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); |
255 | n++; | 261 | n++; |
256 | 262 | ||
@@ -453,6 +459,30 @@ static int __init parse_elfcorehdr(char *arg) | |||
453 | return 0; | 459 | return 0; |
454 | } | 460 | } |
455 | early_param("elfcorehdr", parse_elfcorehdr); | 461 | early_param("elfcorehdr", parse_elfcorehdr); |
462 | |||
463 | int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) | ||
464 | { | ||
465 | unsigned long length; | ||
466 | |||
467 | /* We get the address using the kernel command line, | ||
468 | * but the size is extracted from the EFI tables. | ||
469 | * Both address and size are required for reservation | ||
470 | * to work properly. | ||
471 | */ | ||
472 | |||
473 | if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) | ||
474 | return -EINVAL; | ||
475 | |||
476 | if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { | ||
477 | elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | |||
481 | *start = (unsigned long)__va(elfcorehdr_addr); | ||
482 | *end = *start + length; | ||
483 | return 0; | ||
484 | } | ||
485 | |||
456 | #endif /* CONFIG_PROC_VMCORE */ | 486 | #endif /* CONFIG_PROC_VMCORE */ |
457 | 487 | ||
458 | void __init | 488 | void __init |
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 38fa6e49e791..46edf8444c7e 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
@@ -9,12 +9,11 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | |||
9 | checksum.o clear_page.o csum_partial_copy.o \ | 9 | checksum.o clear_page.o csum_partial_copy.o \ |
10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ | 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ |
11 | flush.o ip_fast_csum.o do_csum.o \ | 11 | flush.o ip_fast_csum.o do_csum.o \ |
12 | memset.o strlen.o | 12 | memset.o strlen.o xor.o |
13 | 13 | ||
14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o | 14 | lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o |
15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o | 15 | lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o |
16 | lib-$(CONFIG_PERFMON) += carta_random.o | 16 | lib-$(CONFIG_PERFMON) += carta_random.o |
17 | lib-$(CONFIG_MD_RAID456) += xor.o | ||
18 | 17 | ||
19 | AFLAGS___divdi3.o = | 18 | AFLAGS___divdi3.o = |
20 | AFLAGS___udivdi3.o = -DUNSIGNED | 19 | AFLAGS___udivdi3.o = -DUNSIGNED |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index ca4d41e5f177..fb0f4698f5d0 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -197,11 +197,6 @@ find_memory (void) | |||
197 | 197 | ||
198 | find_initrd(); | 198 | find_initrd(); |
199 | 199 | ||
200 | #ifdef CONFIG_CRASH_DUMP | ||
201 | /* If we are doing a crash dump, we still need to know the real mem | ||
202 | * size before original memory map is reset. */ | ||
203 | saved_max_pfn = max_pfn; | ||
204 | #endif | ||
205 | } | 200 | } |
206 | 201 | ||
207 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 16835108bb5b..11a2d8825d89 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -480,12 +480,6 @@ void __init find_memory(void) | |||
480 | max_pfn = max_low_pfn; | 480 | max_pfn = max_low_pfn; |
481 | 481 | ||
482 | find_initrd(); | 482 | find_initrd(); |
483 | |||
484 | #ifdef CONFIG_CRASH_DUMP | ||
485 | /* If we are doing a crash dump, we still need to know the real mem | ||
486 | * size before original memory map is reset. */ | ||
487 | saved_max_pfn = max_pfn; | ||
488 | #endif | ||
489 | } | 483 | } |
490 | 484 | ||
491 | #ifdef CONFIG_SMP | 485 | #ifdef CONFIG_SMP |
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c index d5c25d27b64d..8133b1047353 100644 --- a/arch/m68knommu/kernel/setup.c +++ b/arch/m68knommu/kernel/setup.c | |||
@@ -51,7 +51,7 @@ static void dummy_waitbut(void) | |||
51 | { | 51 | { |
52 | } | 52 | } |
53 | 53 | ||
54 | void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)); | 54 | void (*mach_sched_init) (irq_handler_t handler); |
55 | void (*mach_tick)( void ); | 55 | void (*mach_tick)( void ); |
56 | /* machine dependent keyboard functions */ | 56 | /* machine dependent keyboard functions */ |
57 | int (*mach_keyb_init) (void); | 57 | int (*mach_keyb_init) (void); |
@@ -66,7 +66,7 @@ void (*mach_trap_init) (void); | |||
66 | /* machine dependent timer functions */ | 66 | /* machine dependent timer functions */ |
67 | unsigned long (*mach_gettimeoffset) (void); | 67 | unsigned long (*mach_gettimeoffset) (void); |
68 | void (*mach_gettod) (int*, int*, int*, int*, int*, int*); | 68 | void (*mach_gettod) (int*, int*, int*, int*, int*, int*); |
69 | int (*mach_hwclk) (int, struct hwclk_time*); | 69 | int (*mach_hwclk) (int, struct rtc_time*); |
70 | int (*mach_set_clock_mmss) (unsigned long); | 70 | int (*mach_set_clock_mmss) (unsigned long); |
71 | void (*mach_mksound)( unsigned int count, unsigned int ticks ); | 71 | void (*mach_mksound)( unsigned int count, unsigned int ticks ); |
72 | void (*mach_reset)( void ); | 72 | void (*mach_reset)( void ); |
diff --git a/arch/m68knommu/platform/5307/ints.c b/arch/m68knommu/platform/5307/ints.c index 20f12a19a522..751633038c4b 100644 --- a/arch/m68knommu/platform/5307/ints.c +++ b/arch/m68knommu/platform/5307/ints.c | |||
@@ -42,7 +42,6 @@ static irq_node_t nodes[NUM_IRQ_NODES]; | |||
42 | /* The number of spurious interrupts */ | 42 | /* The number of spurious interrupts */ |
43 | volatile unsigned int num_spurious; | 43 | volatile unsigned int num_spurious; |
44 | 44 | ||
45 | unsigned int local_bh_count[NR_CPUS]; | ||
46 | unsigned int local_irq_count[NR_CPUS]; | 45 | unsigned int local_irq_count[NR_CPUS]; |
47 | 46 | ||
48 | static irqreturn_t default_irq_handler(int irq, void *ptr) | 47 | static irqreturn_t default_irq_handler(int irq, void *ptr) |
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c index 2dda7339aae5..3de6e337554e 100644 --- a/arch/m68knommu/platform/68328/ints.c +++ b/arch/m68knommu/platform/68328/ints.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/kernel_stat.h> | 16 | #include <linux/kernel_stat.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/interrupt.h> | ||
18 | 19 | ||
19 | #include <asm/system.h> | 20 | #include <asm/system.h> |
20 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
@@ -64,7 +65,7 @@ asmlinkage void trap44(void); | |||
64 | asmlinkage void trap45(void); | 65 | asmlinkage void trap45(void); |
65 | asmlinkage void trap46(void); | 66 | asmlinkage void trap46(void); |
66 | asmlinkage void trap47(void); | 67 | asmlinkage void trap47(void); |
67 | asmlinkage irqreturn_t bad_interrupt(int, void *, struct pt_regs *); | 68 | asmlinkage irqreturn_t bad_interrupt(int, void *); |
68 | asmlinkage irqreturn_t inthandler(void); | 69 | asmlinkage irqreturn_t inthandler(void); |
69 | asmlinkage irqreturn_t inthandler1(void); | 70 | asmlinkage irqreturn_t inthandler1(void); |
70 | asmlinkage irqreturn_t inthandler2(void); | 71 | asmlinkage irqreturn_t inthandler2(void); |
@@ -121,7 +122,7 @@ void init_IRQ(void) | |||
121 | 122 | ||
122 | int request_irq( | 123 | int request_irq( |
123 | unsigned int irq, | 124 | unsigned int irq, |
124 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 125 | irq_handler_t handler, |
125 | unsigned long flags, | 126 | unsigned long flags, |
126 | const char *devname, | 127 | const char *devname, |
127 | void *dev_id) | 128 | void *dev_id) |
diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c index 438ef6ee9720..ef067f4c3cd4 100644 --- a/arch/m68knommu/platform/68328/timers.c +++ b/arch/m68knommu/platform/68328/timers.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/interrupt.h> | ||
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <asm/system.h> | 22 | #include <asm/system.h> |
22 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
@@ -52,7 +53,7 @@ | |||
52 | 53 | ||
53 | /***************************************************************************/ | 54 | /***************************************************************************/ |
54 | 55 | ||
55 | void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *)) | 56 | void m68328_timer_init(irq_handler_t timer_routine) |
56 | { | 57 | { |
57 | /* disable timer 1 */ | 58 | /* disable timer 1 */ |
58 | TCTL = 0; | 59 | TCTL = 0; |
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c index 1b36f6261764..4ff13bd51ffd 100644 --- a/arch/m68knommu/platform/68360/config.c +++ b/arch/m68knommu/platform/68360/config.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/tty.h> | 17 | #include <linux/tty.h> |
18 | #include <linux/console.h> | 18 | #include <linux/console.h> |
19 | #include <linux/interrupt.h> | ||
19 | 20 | ||
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <asm/system.h> | 22 | #include <asm/system.h> |
@@ -50,7 +51,7 @@ extern unsigned long int system_clock; //In kernel setup.c | |||
50 | 51 | ||
51 | extern void config_M68360_irq(void); | 52 | extern void config_M68360_irq(void); |
52 | 53 | ||
53 | void BSP_sched_init(void (*timer_routine)(int, void *, struct pt_regs *)) | 54 | void BSP_sched_init(irq_handler_t timer_routine) |
54 | { | 55 | { |
55 | unsigned char prescaler; | 56 | unsigned char prescaler; |
56 | unsigned short tgcr_save; | 57 | unsigned short tgcr_save; |
diff --git a/arch/m68knommu/platform/68EZ328/config.c b/arch/m68knommu/platform/68EZ328/config.c index 659b80aca118..ab36551fc969 100644 --- a/arch/m68knommu/platform/68EZ328/config.c +++ b/arch/m68knommu/platform/68EZ328/config.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/tty.h> | 20 | #include <linux/tty.h> |
21 | #include <linux/console.h> | 21 | #include <linux/console.h> |
22 | #include <linux/interrupt.h> | ||
22 | 23 | ||
23 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
@@ -31,7 +32,7 @@ | |||
31 | 32 | ||
32 | /***************************************************************************/ | 33 | /***************************************************************************/ |
33 | 34 | ||
34 | void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *)); | 35 | void m68328_timer_init(irq_handler_t timer_routine); |
35 | void m68328_timer_tick(void); | 36 | void m68328_timer_tick(void); |
36 | unsigned long m68328_timer_gettimeoffset(void); | 37 | unsigned long m68328_timer_gettimeoffset(void); |
37 | void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); | 38 | void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); |
diff --git a/arch/m68knommu/platform/68VZ328/config.c b/arch/m68knommu/platform/68VZ328/config.c index fcd100b7594d..8abe0f6e7235 100644 --- a/arch/m68knommu/platform/68VZ328/config.c +++ b/arch/m68knommu/platform/68VZ328/config.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/console.h> | 21 | #include <linux/console.h> |
22 | #include <linux/kd.h> | 22 | #include <linux/kd.h> |
23 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
24 | #include <linux/interrupt.h> | ||
24 | 25 | ||
25 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
26 | #include <asm/system.h> | 27 | #include <asm/system.h> |
@@ -36,7 +37,7 @@ | |||
36 | 37 | ||
37 | /***************************************************************************/ | 38 | /***************************************************************************/ |
38 | 39 | ||
39 | void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *)); | 40 | void m68328_timer_init(irq_handler_t timer_routine); |
40 | void m68328_timer_tick(void); | 41 | void m68328_timer_tick(void); |
41 | unsigned long m68328_timer_gettimeoffset(void); | 42 | unsigned long m68328_timer_gettimeoffset(void); |
42 | void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); | 43 | void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4ec2dd5455f3..a1cd84f9b3bc 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -167,6 +167,7 @@ config MIPS_COBALT | |||
167 | select IRQ_CPU | 167 | select IRQ_CPU |
168 | select MIPS_GT64111 | 168 | select MIPS_GT64111 |
169 | select SYS_HAS_CPU_NEVADA | 169 | select SYS_HAS_CPU_NEVADA |
170 | select SYS_HAS_EARLY_PRINTK | ||
170 | select SYS_SUPPORTS_32BIT_KERNEL | 171 | select SYS_SUPPORTS_32BIT_KERNEL |
171 | select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL | 172 | select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL |
172 | select SYS_SUPPORTS_LITTLE_ENDIAN | 173 | select SYS_SUPPORTS_LITTLE_ENDIAN |
@@ -837,7 +838,6 @@ source "arch/mips/tx4927/Kconfig" | |||
837 | source "arch/mips/tx4938/Kconfig" | 838 | source "arch/mips/tx4938/Kconfig" |
838 | source "arch/mips/vr41xx/Kconfig" | 839 | source "arch/mips/vr41xx/Kconfig" |
839 | source "arch/mips/philips/pnx8550/common/Kconfig" | 840 | source "arch/mips/philips/pnx8550/common/Kconfig" |
840 | source "arch/mips/cobalt/Kconfig" | ||
841 | 841 | ||
842 | endmenu | 842 | endmenu |
843 | 843 | ||
diff --git a/arch/mips/cobalt/Kconfig b/arch/mips/cobalt/Kconfig deleted file mode 100644 index 7c42b088d16c..000000000000 --- a/arch/mips/cobalt/Kconfig +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | config EARLY_PRINTK | ||
2 | bool "Early console support" | ||
3 | depends on MIPS_COBALT | ||
4 | help | ||
5 | Provide early console support by direct access to the | ||
6 | on board UART. The UART must have been previously | ||
7 | initialised by the boot loader. | ||
diff --git a/arch/mips/cobalt/console.c b/arch/mips/cobalt/console.c index fff20d28114f..ca56b415b8ac 100644 --- a/arch/mips/cobalt/console.c +++ b/arch/mips/cobalt/console.c | |||
@@ -9,11 +9,8 @@ | |||
9 | #include <asm/addrspace.h> | 9 | #include <asm/addrspace.h> |
10 | #include <asm/mach-cobalt/cobalt.h> | 10 | #include <asm/mach-cobalt/cobalt.h> |
11 | 11 | ||
12 | static void putchar(int c) | 12 | void prom_putchar(char c) |
13 | { | 13 | { |
14 | if(c == '\n') | ||
15 | putchar('\r'); | ||
16 | |||
17 | while(!(COBALT_UART[UART_LSR] & UART_LSR_THRE)) | 14 | while(!(COBALT_UART[UART_LSR] & UART_LSR_THRE)) |
18 | ; | 15 | ; |
19 | 16 | ||
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 2ef857c3ee53..225755d0c1f6 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(kernel_thread); | |||
37 | * Userspace access stuff. | 37 | * Userspace access stuff. |
38 | */ | 38 | */ |
39 | EXPORT_SYMBOL(__copy_user); | 39 | EXPORT_SYMBOL(__copy_user); |
40 | EXPORT_SYMBOL(__copy_user_inatomic); | ||
40 | EXPORT_SYMBOL(__bzero); | 41 | EXPORT_SYMBOL(__bzero); |
41 | EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); | 42 | EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); |
42 | EXPORT_SYMBOL(__strncpy_from_user_asm); | 43 | EXPORT_SYMBOL(__strncpy_from_user_asm); |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 7c0b3936ba44..0c9a9ff8cd25 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -656,6 +656,8 @@ einval: li v0, -EINVAL | |||
656 | sys sys_kexec_load 4 | 656 | sys sys_kexec_load 4 |
657 | sys sys_getcpu 3 | 657 | sys sys_getcpu 3 |
658 | sys sys_epoll_pwait 6 | 658 | sys sys_epoll_pwait 6 |
659 | sys sys_ioprio_set 3 | ||
660 | sys sys_ioprio_get 2 | ||
659 | .endm | 661 | .endm |
660 | 662 | ||
661 | /* We pre-compute the number of _instruction_ bytes needed to | 663 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index e569b846e9a3..23f3b118f718 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -471,3 +471,6 @@ sys_call_table: | |||
471 | PTR sys_kexec_load /* 5270 */ | 471 | PTR sys_kexec_load /* 5270 */ |
472 | PTR sys_getcpu | 472 | PTR sys_getcpu |
473 | PTR sys_epoll_pwait | 473 | PTR sys_epoll_pwait |
474 | PTR sys_ioprio_set | ||
475 | PTR sys_ioprio_get | ||
476 | .size sys_call_table,.-sys_call_table | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f17e31e3bff2..6eac28337423 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -395,5 +395,8 @@ EXPORT(sysn32_call_table) | |||
395 | PTR compat_sys_set_robust_list | 395 | PTR compat_sys_set_robust_list |
396 | PTR compat_sys_get_robust_list | 396 | PTR compat_sys_get_robust_list |
397 | PTR compat_sys_kexec_load | 397 | PTR compat_sys_kexec_load |
398 | PTR sys_getcpu | 398 | PTR sys_getcpu /* 6275 */ |
399 | PTR compat_sys_epoll_pwait | 399 | PTR compat_sys_epoll_pwait |
400 | PTR sys_ioprio_set | ||
401 | PTR sys_ioprio_get | ||
402 | .size sysn32_call_table,.-sysn32_call_table | ||
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 142c9b70c026..7e74b412a782 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -519,4 +519,6 @@ sys_call_table: | |||
519 | PTR compat_sys_kexec_load | 519 | PTR compat_sys_kexec_load |
520 | PTR sys_getcpu | 520 | PTR sys_getcpu |
521 | PTR compat_sys_epoll_pwait | 521 | PTR compat_sys_epoll_pwait |
522 | PTR sys_ioprio_set | ||
523 | PTR sys_ioprio_get /* 4315 */ | ||
522 | .size sys_call_table,.-sys_call_table | 524 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c index b11337600129..1acdf091c258 100644 --- a/arch/mips/mips-boards/generic/init.c +++ b/arch/mips/mips-boards/generic/init.c | |||
@@ -251,8 +251,6 @@ void __init mips_ejtag_setup (void) | |||
251 | 251 | ||
252 | void __init prom_init(void) | 252 | void __init prom_init(void) |
253 | { | 253 | { |
254 | u32 start, map, mask, data; | ||
255 | |||
256 | prom_argc = fw_arg0; | 254 | prom_argc = fw_arg0; |
257 | _prom_argv = (int *) fw_arg1; | 255 | _prom_argv = (int *) fw_arg1; |
258 | _prom_envp = (int *) fw_arg2; | 256 | _prom_envp = (int *) fw_arg2; |
@@ -278,6 +276,8 @@ void __init prom_init(void) | |||
278 | mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC; | 276 | mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC; |
279 | } | 277 | } |
280 | switch(mips_revision_corid) { | 278 | switch(mips_revision_corid) { |
279 | u32 start, map, mask, data; | ||
280 | |||
281 | case MIPS_REVISION_CORID_QED_RM5261: | 281 | case MIPS_REVISION_CORID_QED_RM5261: |
282 | case MIPS_REVISION_CORID_CORE_LV: | 282 | case MIPS_REVISION_CORID_CORE_LV: |
283 | case MIPS_REVISION_CORID_CORE_FPGA: | 283 | case MIPS_REVISION_CORID_CORE_FPGA: |
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile index cb7f349b0514..377d9e8f250a 100644 --- a/arch/mips/mips-boards/malta/Makefile +++ b/arch/mips/mips-boards/malta/Makefile | |||
@@ -21,4 +21,4 @@ | |||
21 | 21 | ||
22 | obj-y := malta_int.o malta_setup.o | 22 | obj-y := malta_int.o malta_setup.o |
23 | obj-$(CONFIG_MTD) += malta_mtd.o | 23 | obj-$(CONFIG_MTD) += malta_mtd.o |
24 | obj-$(CONFIG_SMP) += malta_smp.o | 24 | obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o |
diff --git a/arch/mips/mips-boards/malta/malta_smp.c b/arch/mips/mips-boards/malta/malta_smtc.c index cf967170fe29..d1c80f631100 100644 --- a/arch/mips/mips-boards/malta/malta_smp.c +++ b/arch/mips/mips-boards/malta/malta_smtc.c | |||
@@ -1,25 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * Malta Platform-specific hooks for SMP operation | 2 | * Malta Platform-specific hooks for SMP operation |
3 | */ | 3 | */ |
4 | #include <linux/init.h> | ||
4 | 5 | ||
5 | #include <linux/kernel.h> | 6 | #include <asm/mipsregs.h> |
6 | #include <linux/sched.h> | 7 | #include <asm/mipsmtregs.h> |
7 | #include <linux/cpumask.h> | 8 | #include <asm/smtc.h> |
8 | #include <linux/interrupt.h> | ||
9 | |||
10 | #include <asm/atomic.h> | ||
11 | #include <asm/cpu.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/system.h> | ||
14 | #include <asm/hardirq.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/smp.h> | ||
17 | #ifdef CONFIG_MIPS_MT_SMTC | ||
18 | #include <asm/smtc_ipi.h> | 9 | #include <asm/smtc_ipi.h> |
19 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
20 | 10 | ||
21 | /* VPE/SMP Prototype implements platform interfaces directly */ | 11 | /* VPE/SMP Prototype implements platform interfaces directly */ |
22 | #if !defined(CONFIG_MIPS_MT_SMP) | ||
23 | 12 | ||
24 | /* | 13 | /* |
25 | * Cause the specified action to be performed on a targeted "CPU" | 14 | * Cause the specified action to be performed on a targeted "CPU" |
@@ -27,10 +16,8 @@ | |||
27 | 16 | ||
28 | void core_send_ipi(int cpu, unsigned int action) | 17 | void core_send_ipi(int cpu, unsigned int action) |
29 | { | 18 | { |
30 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | 19 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ |
31 | #ifdef CONFIG_MIPS_MT_SMTC | ||
32 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | 20 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); |
33 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
34 | } | 21 | } |
35 | 22 | ||
36 | /* | 23 | /* |
@@ -39,9 +26,7 @@ void core_send_ipi(int cpu, unsigned int action) | |||
39 | 26 | ||
40 | void prom_boot_secondary(int cpu, struct task_struct *idle) | 27 | void prom_boot_secondary(int cpu, struct task_struct *idle) |
41 | { | 28 | { |
42 | #ifdef CONFIG_MIPS_MT_SMTC | ||
43 | smtc_boot_secondary(cpu, idle); | 29 | smtc_boot_secondary(cpu, idle); |
44 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
45 | } | 30 | } |
46 | 31 | ||
47 | /* | 32 | /* |
@@ -50,7 +35,6 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) | |||
50 | 35 | ||
51 | void prom_init_secondary(void) | 36 | void prom_init_secondary(void) |
52 | { | 37 | { |
53 | #ifdef CONFIG_MIPS_MT_SMTC | ||
54 | void smtc_init_secondary(void); | 38 | void smtc_init_secondary(void); |
55 | int myvpe; | 39 | int myvpe; |
56 | 40 | ||
@@ -65,7 +49,6 @@ void prom_init_secondary(void) | |||
65 | } | 49 | } |
66 | 50 | ||
67 | smtc_init_secondary(); | 51 | smtc_init_secondary(); |
68 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
69 | } | 52 | } |
70 | 53 | ||
71 | /* | 54 | /* |
@@ -93,9 +76,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
93 | 76 | ||
94 | void prom_smp_finish(void) | 77 | void prom_smp_finish(void) |
95 | { | 78 | { |
96 | #ifdef CONFIG_MIPS_MT_SMTC | ||
97 | smtc_smp_finish(); | 79 | smtc_smp_finish(); |
98 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
99 | } | 80 | } |
100 | 81 | ||
101 | /* | 82 | /* |
@@ -105,5 +86,3 @@ void prom_smp_finish(void) | |||
105 | void prom_cpus_done(void) | 86 | void prom_cpus_done(void) |
106 | { | 87 | { |
107 | } | 88 | } |
108 | |||
109 | #endif /* CONFIG_MIPS32R2_MT_SMP */ | ||
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index f32ebde30ccf..560a6de96556 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c | |||
@@ -128,7 +128,6 @@ static inline void tx39_flush_cache_all(void) | |||
128 | return; | 128 | return; |
129 | 129 | ||
130 | tx39_blast_dcache(); | 130 | tx39_blast_dcache(); |
131 | tx39_blast_icache(); | ||
132 | } | 131 | } |
133 | 132 | ||
134 | static inline void tx39___flush_cache_all(void) | 133 | static inline void tx39___flush_cache_all(void) |
@@ -142,24 +141,19 @@ static void tx39_flush_cache_mm(struct mm_struct *mm) | |||
142 | if (!cpu_has_dc_aliases) | 141 | if (!cpu_has_dc_aliases) |
143 | return; | 142 | return; |
144 | 143 | ||
145 | if (cpu_context(smp_processor_id(), mm) != 0) { | 144 | if (cpu_context(smp_processor_id(), mm) != 0) |
146 | tx39_flush_cache_all(); | 145 | tx39_blast_dcache(); |
147 | } | ||
148 | } | 146 | } |
149 | 147 | ||
150 | static void tx39_flush_cache_range(struct vm_area_struct *vma, | 148 | static void tx39_flush_cache_range(struct vm_area_struct *vma, |
151 | unsigned long start, unsigned long end) | 149 | unsigned long start, unsigned long end) |
152 | { | 150 | { |
153 | int exec; | 151 | if (!cpu_has_dc_aliases) |
154 | 152 | return; | |
155 | if (!(cpu_context(smp_processor_id(), vma->vm_mm))) | 153 | if (!(cpu_context(smp_processor_id(), vma->vm_mm))) |
156 | return; | 154 | return; |
157 | 155 | ||
158 | exec = vma->vm_flags & VM_EXEC; | 156 | tx39_blast_dcache(); |
159 | if (cpu_has_dc_aliases || exec) | ||
160 | tx39_blast_dcache(); | ||
161 | if (exec) | ||
162 | tx39_blast_icache(); | ||
163 | } | 157 | } |
164 | 158 | ||
165 | static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) | 159 | static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) |
@@ -218,7 +212,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page | |||
218 | 212 | ||
219 | static void local_tx39_flush_data_cache_page(void * addr) | 213 | static void local_tx39_flush_data_cache_page(void * addr) |
220 | { | 214 | { |
221 | tx39_blast_dcache_page(addr); | 215 | tx39_blast_dcache_page((unsigned long)addr); |
222 | } | 216 | } |
223 | 217 | ||
224 | static void tx39_flush_data_cache_page(unsigned long addr) | 218 | static void tx39_flush_data_cache_page(unsigned long addr) |
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c index 771e55f39875..561844878a90 100644 --- a/arch/mips/momentum/jaguar_atx/platform.c +++ b/arch/mips/momentum/jaguar_atx/platform.c | |||
@@ -48,6 +48,8 @@ static struct resource mv64x60_eth0_resources[] = { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct mv643xx_eth_platform_data eth0_pd = { | 50 | static struct mv643xx_eth_platform_data eth0_pd = { |
51 | .port_number = 0, | ||
52 | |||
51 | .tx_sram_addr = MV_SRAM_BASE_ETH0, | 53 | .tx_sram_addr = MV_SRAM_BASE_ETH0, |
52 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 54 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
53 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 55 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -77,6 +79,8 @@ static struct resource mv64x60_eth1_resources[] = { | |||
77 | }; | 79 | }; |
78 | 80 | ||
79 | static struct mv643xx_eth_platform_data eth1_pd = { | 81 | static struct mv643xx_eth_platform_data eth1_pd = { |
82 | .port_number = 1, | ||
83 | |||
80 | .tx_sram_addr = MV_SRAM_BASE_ETH1, | 84 | .tx_sram_addr = MV_SRAM_BASE_ETH1, |
81 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 85 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
82 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 86 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -105,7 +109,9 @@ static struct resource mv64x60_eth2_resources[] = { | |||
105 | }, | 109 | }, |
106 | }; | 110 | }; |
107 | 111 | ||
108 | static struct mv643xx_eth_platform_data eth2_pd; | 112 | static struct mv643xx_eth_platform_data eth2_pd = { |
113 | .port_number = 2, | ||
114 | }; | ||
109 | 115 | ||
110 | static struct platform_device eth2_device = { | 116 | static struct platform_device eth2_device = { |
111 | .name = MV643XX_ETH_NAME, | 117 | .name = MV643XX_ETH_NAME, |
diff --git a/arch/mips/momentum/ocelot_3/platform.c b/arch/mips/momentum/ocelot_3/platform.c index b80733f0c66d..44e4c3fc7403 100644 --- a/arch/mips/momentum/ocelot_3/platform.c +++ b/arch/mips/momentum/ocelot_3/platform.c | |||
@@ -48,6 +48,8 @@ static struct resource mv64x60_eth0_resources[] = { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | static struct mv643xx_eth_platform_data eth0_pd = { | 50 | static struct mv643xx_eth_platform_data eth0_pd = { |
51 | .port_number = 0, | ||
52 | |||
51 | .tx_sram_addr = MV_SRAM_BASE_ETH0, | 53 | .tx_sram_addr = MV_SRAM_BASE_ETH0, |
52 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 54 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
53 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 55 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -77,6 +79,8 @@ static struct resource mv64x60_eth1_resources[] = { | |||
77 | }; | 79 | }; |
78 | 80 | ||
79 | static struct mv643xx_eth_platform_data eth1_pd = { | 81 | static struct mv643xx_eth_platform_data eth1_pd = { |
82 | .port_number = 1, | ||
83 | |||
80 | .tx_sram_addr = MV_SRAM_BASE_ETH1, | 84 | .tx_sram_addr = MV_SRAM_BASE_ETH1, |
81 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 85 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
82 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 86 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -105,7 +109,9 @@ static struct resource mv64x60_eth2_resources[] = { | |||
105 | }, | 109 | }, |
106 | }; | 110 | }; |
107 | 111 | ||
108 | static struct mv643xx_eth_platform_data eth2_pd; | 112 | static struct mv643xx_eth_platform_data eth2_pd = { |
113 | .port_number = 2, | ||
114 | }; | ||
109 | 115 | ||
110 | static struct platform_device eth2_device = { | 116 | static struct platform_device eth2_device = { |
111 | .name = MV643XX_ETH_NAME, | 117 | .name = MV643XX_ETH_NAME, |
diff --git a/arch/mips/momentum/ocelot_c/platform.c b/arch/mips/momentum/ocelot_c/platform.c index f7cd303f3eba..7780aa0c6555 100644 --- a/arch/mips/momentum/ocelot_c/platform.c +++ b/arch/mips/momentum/ocelot_c/platform.c | |||
@@ -47,6 +47,8 @@ static struct resource mv64x60_eth0_resources[] = { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | static struct mv643xx_eth_platform_data eth0_pd = { | 49 | static struct mv643xx_eth_platform_data eth0_pd = { |
50 | .port_number = 0, | ||
51 | |||
50 | .tx_sram_addr = MV_SRAM_BASE_ETH0, | 52 | .tx_sram_addr = MV_SRAM_BASE_ETH0, |
51 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 53 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
52 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 54 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
@@ -76,6 +78,8 @@ static struct resource mv64x60_eth1_resources[] = { | |||
76 | }; | 78 | }; |
77 | 79 | ||
78 | static struct mv643xx_eth_platform_data eth1_pd = { | 80 | static struct mv643xx_eth_platform_data eth1_pd = { |
81 | .port_number = 1, | ||
82 | |||
79 | .tx_sram_addr = MV_SRAM_BASE_ETH1, | 83 | .tx_sram_addr = MV_SRAM_BASE_ETH1, |
80 | .tx_sram_size = MV_SRAM_TXRING_SIZE, | 84 | .tx_sram_size = MV_SRAM_TXRING_SIZE, |
81 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, | 85 | .tx_queue_size = MV_SRAM_TXRING_SIZE / 16, |
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c index 9094baf31d0e..74158d349630 100644 --- a/arch/mips/sgi-ip27/ip27-init.c +++ b/arch/mips/sgi-ip27/ip27-init.c | |||
@@ -191,7 +191,6 @@ static inline void ioc3_eth_init(void) | |||
191 | ioc3->eier = 0; | 191 | ioc3->eier = 0; |
192 | } | 192 | } |
193 | 193 | ||
194 | extern void ip27_setup_console(void); | ||
195 | extern void ip27_time_init(void); | 194 | extern void ip27_time_init(void); |
196 | extern void ip27_reboot_setup(void); | 195 | extern void ip27_reboot_setup(void); |
197 | 196 | ||
@@ -200,7 +199,6 @@ void __init plat_mem_setup(void) | |||
200 | hubreg_t p, e, n_mode; | 199 | hubreg_t p, e, n_mode; |
201 | nasid_t nid; | 200 | nasid_t nid; |
202 | 201 | ||
203 | ip27_setup_console(); | ||
204 | ip27_reboot_setup(); | 202 | ip27_reboot_setup(); |
205 | 203 | ||
206 | /* | 204 | /* |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 919fbf568495..100930826850 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -968,7 +968,6 @@ void pci_scan_msi_device(struct pci_dev *dev) {} | |||
968 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} | 968 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} |
969 | void pci_disable_msix(struct pci_dev *dev) {} | 969 | void pci_disable_msix(struct pci_dev *dev) {} |
970 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} | 970 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} |
971 | void disable_msi_mode(struct pci_dev *dev, int pos, int type) {} | ||
972 | void pci_no_msi(void) {} | 971 | void pci_no_msi(void) {} |
973 | EXPORT_SYMBOL(pci_enable_msix); | 972 | EXPORT_SYMBOL(pci_enable_msix); |
974 | EXPORT_SYMBOL(pci_disable_msix); | 973 | EXPORT_SYMBOL(pci_disable_msix); |
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 6ad4b1a72c96..71045677559a 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c | |||
@@ -58,6 +58,7 @@ static struct resource mv643xx_eth0_resources[] = { | |||
58 | 58 | ||
59 | 59 | ||
60 | static struct mv643xx_eth_platform_data eth0_pd = { | 60 | static struct mv643xx_eth_platform_data eth0_pd = { |
61 | .port_number = 0, | ||
61 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0, | 62 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0, |
62 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, | 63 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, |
63 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, | 64 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, |
@@ -87,6 +88,7 @@ static struct resource mv643xx_eth1_resources[] = { | |||
87 | }; | 88 | }; |
88 | 89 | ||
89 | static struct mv643xx_eth_platform_data eth1_pd = { | 90 | static struct mv643xx_eth_platform_data eth1_pd = { |
91 | .port_number = 1, | ||
90 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1, | 92 | .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1, |
91 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, | 93 | .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE, |
92 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, | 94 | .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16, |
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c index 3b039c30a439..a6f8b686ea83 100644 --- a/arch/ppc/syslib/mv64x60.c +++ b/arch/ppc/syslib/mv64x60.c | |||
@@ -339,7 +339,9 @@ static struct resource mv64x60_eth0_resources[] = { | |||
339 | }, | 339 | }, |
340 | }; | 340 | }; |
341 | 341 | ||
342 | static struct mv643xx_eth_platform_data eth0_pd; | 342 | static struct mv643xx_eth_platform_data eth0_pd = { |
343 | .port_number = 0, | ||
344 | }; | ||
343 | 345 | ||
344 | static struct platform_device eth0_device = { | 346 | static struct platform_device eth0_device = { |
345 | .name = MV643XX_ETH_NAME, | 347 | .name = MV643XX_ETH_NAME, |
@@ -362,7 +364,9 @@ static struct resource mv64x60_eth1_resources[] = { | |||
362 | }, | 364 | }, |
363 | }; | 365 | }; |
364 | 366 | ||
365 | static struct mv643xx_eth_platform_data eth1_pd; | 367 | static struct mv643xx_eth_platform_data eth1_pd = { |
368 | .port_number = 1, | ||
369 | }; | ||
366 | 370 | ||
367 | static struct platform_device eth1_device = { | 371 | static struct platform_device eth1_device = { |
368 | .name = MV643XX_ETH_NAME, | 372 | .name = MV643XX_ETH_NAME, |
@@ -385,7 +389,9 @@ static struct resource mv64x60_eth2_resources[] = { | |||
385 | }, | 389 | }, |
386 | }; | 390 | }; |
387 | 391 | ||
388 | static struct mv643xx_eth_platform_data eth2_pd; | 392 | static struct mv643xx_eth_platform_data eth2_pd = { |
393 | .port_number = 2, | ||
394 | }; | ||
389 | 395 | ||
390 | static struct platform_device eth2_device = { | 396 | static struct platform_device eth2_device = { |
391 | .name = MV643XX_ETH_NAME, | 397 | .name = MV643XX_ETH_NAME, |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index d9425f59be91..0f293aa7b0fa 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -376,6 +376,8 @@ config SHARED_KERNEL | |||
376 | Select this option, if you want to share the text segment of the | 376 | Select this option, if you want to share the text segment of the |
377 | Linux kernel between different VM guests. This reduces memory | 377 | Linux kernel between different VM guests. This reduces memory |
378 | usage with lots of guests but greatly increases kernel size. | 378 | usage with lots of guests but greatly increases kernel size. |
379 | Also if a kernel was IPL'ed from a shared segment the kexec system | ||
380 | call will not work. | ||
379 | You should only select this option if you know what you are | 381 | You should only select this option if you know what you are |
380 | doing and want to exploit this feature. | 382 | doing and want to exploit this feature. |
381 | 383 | ||
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index da7c8bb80982..dc364c1419af 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -121,7 +121,7 @@ startup_continue: | |||
121 | .long .Lduct # cr2: dispatchable unit control table | 121 | .long .Lduct # cr2: dispatchable unit control table |
122 | .long 0 # cr3: instruction authorization | 122 | .long 0 # cr3: instruction authorization |
123 | .long 0 # cr4: instruction authorization | 123 | .long 0 # cr4: instruction authorization |
124 | .long 0xffffffff # cr5: primary-aste origin | 124 | .long .Lduct # cr5: primary-aste origin |
125 | .long 0 # cr6: I/O interrupts | 125 | .long 0 # cr6: I/O interrupts |
126 | .long 0 # cr7: secondary space segment table | 126 | .long 0 # cr7: secondary space segment table |
127 | .long 0 # cr8: access registers translation | 127 | .long 0 # cr8: access registers translation |
@@ -132,8 +132,6 @@ startup_continue: | |||
132 | .long 0 # cr13: home space segment table | 132 | .long 0 # cr13: home space segment table |
133 | .long 0xc0000000 # cr14: machine check handling off | 133 | .long 0xc0000000 # cr14: machine check handling off |
134 | .long 0 # cr15: linkage stack operations | 134 | .long 0 # cr15: linkage stack operations |
135 | .Lduct: .long 0,0,0,0,0,0,0,0 | ||
136 | .long 0,0,0,0,0,0,0,0 | ||
137 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu | 135 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu |
138 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp | 136 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp |
139 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg | 137 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg |
@@ -147,6 +145,13 @@ startup_continue: | |||
147 | .Linittu: .long init_thread_union | 145 | .Linittu: .long init_thread_union |
148 | .Lstartup_init: | 146 | .Lstartup_init: |
149 | .long startup_init | 147 | .long startup_init |
148 | .align 64 | ||
149 | .Lduct: .long 0,0,0,0,.Lduald,0,0,0 | ||
150 | .long 0,0,0,0,0,0,0,0 | ||
151 | .align 128 | ||
152 | .Lduald:.rept 8 | ||
153 | .long 0x80000000,0,0,0 # invalid access-list entries | ||
154 | .endr | ||
150 | 155 | ||
151 | .org 0x12000 | 156 | .org 0x12000 |
152 | .globl _ehead | 157 | .globl _ehead |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index af09e18cc5d0..37010709fe68 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -134,7 +134,7 @@ startup_continue: | |||
134 | .quad .Lduct # cr2: dispatchable unit control table | 134 | .quad .Lduct # cr2: dispatchable unit control table |
135 | .quad 0 # cr3: instruction authorization | 135 | .quad 0 # cr3: instruction authorization |
136 | .quad 0 # cr4: instruction authorization | 136 | .quad 0 # cr4: instruction authorization |
137 | .quad 0xffffffffffffffff # cr5: primary-aste origin | 137 | .quad .Lduct # cr5: primary-aste origin |
138 | .quad 0 # cr6: I/O interrupts | 138 | .quad 0 # cr6: I/O interrupts |
139 | .quad 0 # cr7: secondary space segment table | 139 | .quad 0 # cr7: secondary space segment table |
140 | .quad 0 # cr8: access registers translation | 140 | .quad 0 # cr8: access registers translation |
@@ -145,14 +145,19 @@ startup_continue: | |||
145 | .quad 0 # cr13: home space segment table | 145 | .quad 0 # cr13: home space segment table |
146 | .quad 0xc0000000 # cr14: machine check handling off | 146 | .quad 0xc0000000 # cr14: machine check handling off |
147 | .quad 0 # cr15: linkage stack operations | 147 | .quad 0 # cr15: linkage stack operations |
148 | .Lduct: .long 0,0,0,0,0,0,0,0 | ||
149 | .long 0,0,0,0,0,0,0,0 | ||
150 | .Lpcmsk:.quad 0x0000000180000000 | 148 | .Lpcmsk:.quad 0x0000000180000000 |
151 | .L4malign:.quad 0xffffffffffc00000 | 149 | .L4malign:.quad 0xffffffffffc00000 |
152 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 | 150 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 |
153 | .Lnop: .long 0x07000700 | 151 | .Lnop: .long 0x07000700 |
154 | .Lparmaddr: | 152 | .Lparmaddr: |
155 | .quad PARMAREA | 153 | .quad PARMAREA |
154 | .align 64 | ||
155 | .Lduct: .long 0,0,0,0,.Lduald,0,0,0 | ||
156 | .long 0,0,0,0,0,0,0,0 | ||
157 | .align 128 | ||
158 | .Lduald:.rept 8 | ||
159 | .long 0x80000000,0,0,0 # invalid access-list entries | ||
160 | .endr | ||
156 | 161 | ||
157 | .org 0x12000 | 162 | .org 0x12000 |
158 | .globl _ehead | 163 | .globl _ehead |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5a863a3bf10c..d125a4ead08d 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -1066,7 +1066,7 @@ static void do_reset_calls(void) | |||
1066 | reset->fn(); | 1066 | reset->fn(); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | extern __u32 dump_prefix_page; | 1069 | u32 dump_prefix_page; |
1070 | 1070 | ||
1071 | void s390_reset_system(void) | 1071 | void s390_reset_system(void) |
1072 | { | 1072 | { |
@@ -1078,7 +1078,7 @@ void s390_reset_system(void) | |||
1078 | lc->panic_stack = S390_lowcore.panic_stack; | 1078 | lc->panic_stack = S390_lowcore.panic_stack; |
1079 | 1079 | ||
1080 | /* Save prefix page address for dump case */ | 1080 | /* Save prefix page address for dump case */ |
1081 | dump_prefix_page = (unsigned long) lc; | 1081 | dump_prefix_page = (u32)(unsigned long) lc; |
1082 | 1082 | ||
1083 | /* Disable prefixing */ | 1083 | /* Disable prefixing */ |
1084 | set_prefix(0); | 1084 | set_prefix(0); |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index a466bab6677e..8af549e95730 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -337,21 +337,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
337 | } | 337 | } |
338 | 338 | ||
339 | p = get_kprobe(addr); | 339 | p = get_kprobe(addr); |
340 | if (!p) { | 340 | if (!p) |
341 | if (*addr != BREAKPOINT_INSTRUCTION) { | 341 | /* |
342 | /* | 342 | * No kprobe at this address. The fault has not been |
343 | * The breakpoint instruction was removed right | 343 | * caused by a kprobe breakpoint. The race of breakpoint |
344 | * after we hit it. Another cpu has removed | 344 | * vs. kprobe remove does not exist because on s390 we |
345 | * either a probepoint or a debugger breakpoint | 345 | * use stop_machine_run to arm/disarm the breakpoints. |
346 | * at this address. In either case, no further | 346 | */ |
347 | * handling of this interrupt is appropriate. | ||
348 | * | ||
349 | */ | ||
350 | ret = 1; | ||
351 | } | ||
352 | /* Not one of ours: let kernel handle it */ | ||
353 | goto no_kprobe; | 347 | goto no_kprobe; |
354 | } | ||
355 | 348 | ||
356 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 349 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
357 | set_current_kprobe(p, regs, kcb); | 350 | set_current_kprobe(p, regs, kcb); |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 52f57af252b4..3c77dd36994c 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | #include <asm/smp.h> | 20 | #include <asm/smp.h> |
21 | #include <asm/reset.h> | 21 | #include <asm/reset.h> |
22 | #include <asm/ipl.h> | ||
22 | 23 | ||
23 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); | 24 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); |
24 | 25 | ||
@@ -29,6 +30,10 @@ int machine_kexec_prepare(struct kimage *image) | |||
29 | { | 30 | { |
30 | void *reboot_code_buffer; | 31 | void *reboot_code_buffer; |
31 | 32 | ||
33 | /* Can't replace kernel image since it is read-only. */ | ||
34 | if (ipl_flags & IPL_NSS_VALID) | ||
35 | return -ENOSYS; | ||
36 | |||
32 | /* We don't support anything but the default image type for now. */ | 37 | /* We don't support anything but the default image type for now. */ |
33 | if (image->type != KEXEC_TYPE_DEFAULT) | 38 | if (image->type != KEXEC_TYPE_DEFAULT) |
34 | return -EINVAL; | 39 | return -EINVAL; |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index c3f4d9b95083..2f481cc3d1c9 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -8,6 +8,10 @@ | |||
8 | 8 | ||
9 | #include <asm/lowcore.h> | 9 | #include <asm/lowcore.h> |
10 | 10 | ||
11 | # | ||
12 | # do_reipl_asm | ||
13 | # Parameter: r2 = schid of reipl device | ||
14 | # | ||
11 | .globl do_reipl_asm | 15 | .globl do_reipl_asm |
12 | do_reipl_asm: basr %r13,0 | 16 | do_reipl_asm: basr %r13,0 |
13 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) | 17 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) |
@@ -16,12 +20,12 @@ do_reipl_asm: basr %r13,0 | |||
16 | stm %r0,%r15,__LC_GPREGS_SAVE_AREA | 20 | stm %r0,%r15,__LC_GPREGS_SAVE_AREA |
17 | stctl %c0,%c15,__LC_CREGS_SAVE_AREA | 21 | stctl %c0,%c15,__LC_CREGS_SAVE_AREA |
18 | stam %a0,%a15,__LC_AREGS_SAVE_AREA | 22 | stam %a0,%a15,__LC_AREGS_SAVE_AREA |
19 | mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13) | 23 | l %r10,.Ldump_pfx-.Lpg0(%r13) |
24 | mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) | ||
20 | stckc .Lclkcmp-.Lpg0(%r13) | 25 | stckc .Lclkcmp-.Lpg0(%r13) |
21 | mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) | 26 | mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) |
22 | stpt __LC_CPU_TIMER_SAVE_AREA | 27 | stpt __LC_CPU_TIMER_SAVE_AREA |
23 | st %r13, __LC_PSW_SAVE_AREA+4 | 28 | st %r13, __LC_PSW_SAVE_AREA+4 |
24 | |||
25 | lctl %c6,%c6,.Lall-.Lpg0(%r13) | 29 | lctl %c6,%c6,.Lall-.Lpg0(%r13) |
26 | lr %r1,%r2 | 30 | lr %r1,%r2 |
27 | mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) | 31 | mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) |
@@ -55,6 +59,7 @@ do_reipl_asm: basr %r13,0 | |||
55 | .align 8 | 59 | .align 8 |
56 | .Lclkcmp: .quad 0x0000000000000000 | 60 | .Lclkcmp: .quad 0x0000000000000000 |
57 | .Lall: .long 0xff000000 | 61 | .Lall: .long 0xff000000 |
62 | .Ldump_pfx: .long dump_prefix_page | ||
58 | .align 8 | 63 | .align 8 |
59 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 | 64 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 |
60 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs | 65 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs |
@@ -79,7 +84,3 @@ do_reipl_asm: basr %r13,0 | |||
79 | .long 0x00000000,0x00000000 | 84 | .long 0x00000000,0x00000000 |
80 | .long 0x00000000,0x00000000 | 85 | .long 0x00000000,0x00000000 |
81 | .long 0x00000000,0x00000000 | 86 | .long 0x00000000,0x00000000 |
82 | .globl dump_prefix_page | ||
83 | dump_prefix_page: | ||
84 | .long 0x00000000 | ||
85 | |||
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index dbb3eed38865..c41930499a5f 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -8,6 +8,12 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <asm/lowcore.h> | 10 | #include <asm/lowcore.h> |
11 | |||
12 | # | ||
13 | # do_reipl_asm | ||
14 | # Parameter: r2 = schid of reipl device | ||
15 | # | ||
16 | |||
11 | .globl do_reipl_asm | 17 | .globl do_reipl_asm |
12 | do_reipl_asm: basr %r13,0 | 18 | do_reipl_asm: basr %r13,0 |
13 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) | 19 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) |
@@ -20,7 +26,8 @@ do_reipl_asm: basr %r13,0 | |||
20 | stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) | 26 | stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) |
21 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) | 27 | stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) |
22 | stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) | 28 | stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) |
23 | mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13) | 29 | lg %r10,.Ldump_pfx-.Lpg0(%r13) |
30 | mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) | ||
24 | stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) | 31 | stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) |
25 | stckc .Lclkcmp-.Lpg0(%r13) | 32 | stckc .Lclkcmp-.Lpg0(%r13) |
26 | mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) | 33 | mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) |
@@ -64,6 +71,7 @@ do_reipl_asm: basr %r13,0 | |||
64 | .align 8 | 71 | .align 8 |
65 | .Lclkcmp: .quad 0x0000000000000000 | 72 | .Lclkcmp: .quad 0x0000000000000000 |
66 | .Lall: .quad 0x00000000ff000000 | 73 | .Lall: .quad 0x00000000ff000000 |
74 | .Ldump_pfx: .quad dump_prefix_page | ||
67 | .Lregsave: .quad 0x0000000000000000 | 75 | .Lregsave: .quad 0x0000000000000000 |
68 | .align 16 | 76 | .align 16 |
69 | /* | 77 | /* |
@@ -103,6 +111,3 @@ do_reipl_asm: basr %r13,0 | |||
103 | .long 0x00000000,0x00000000 | 111 | .long 0x00000000,0x00000000 |
104 | .long 0x00000000,0x00000000 | 112 | .long 0x00000000,0x00000000 |
105 | .long 0x00000000,0x00000000 | 113 | .long 0x00000000,0x00000000 |
106 | .globl dump_prefix_page | ||
107 | dump_prefix_page: | ||
108 | .long 0x00000000 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index ecaa432a99f8..97764f710bb7 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -94,10 +94,9 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, | |||
94 | int cpu, local = 0; | 94 | int cpu, local = 0; |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Can deadlock when interrupts are disabled or if in wrong context, | 97 | * Can deadlock when interrupts are disabled or if in wrong context. |
98 | * caller must disable preemption | ||
99 | */ | 98 | */ |
100 | WARN_ON(irqs_disabled() || in_irq() || preemptible()); | 99 | WARN_ON(irqs_disabled() || in_irq()); |
101 | 100 | ||
102 | /* | 101 | /* |
103 | * Check for local function call. We have to have the same call order | 102 | * Check for local function call. We have to have the same call order |
@@ -152,17 +151,18 @@ out: | |||
152 | * Run a function on all other CPUs. | 151 | * Run a function on all other CPUs. |
153 | * | 152 | * |
154 | * You must not call this function with disabled interrupts or from a | 153 | * You must not call this function with disabled interrupts or from a |
155 | * hardware interrupt handler. Must be called with preemption disabled. | 154 | * hardware interrupt handler. You may call it from a bottom half. |
156 | * You may call it from a bottom half. | ||
157 | */ | 155 | */ |
158 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | 156 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, |
159 | int wait) | 157 | int wait) |
160 | { | 158 | { |
161 | cpumask_t map; | 159 | cpumask_t map; |
162 | 160 | ||
161 | preempt_disable(); | ||
163 | map = cpu_online_map; | 162 | map = cpu_online_map; |
164 | cpu_clear(smp_processor_id(), map); | 163 | cpu_clear(smp_processor_id(), map); |
165 | __smp_call_function_map(func, info, nonatomic, wait, map); | 164 | __smp_call_function_map(func, info, nonatomic, wait, map); |
165 | preempt_enable(); | ||
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | EXPORT_SYMBOL(smp_call_function); | 168 | EXPORT_SYMBOL(smp_call_function); |
@@ -178,16 +178,17 @@ EXPORT_SYMBOL(smp_call_function); | |||
178 | * Run a function on one processor. | 178 | * Run a function on one processor. |
179 | * | 179 | * |
180 | * You must not call this function with disabled interrupts or from a | 180 | * You must not call this function with disabled interrupts or from a |
181 | * hardware interrupt handler. Must be called with preemption disabled. | 181 | * hardware interrupt handler. You may call it from a bottom half. |
182 | * You may call it from a bottom half. | ||
183 | */ | 182 | */ |
184 | int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, | 183 | int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, |
185 | int wait, int cpu) | 184 | int wait, int cpu) |
186 | { | 185 | { |
187 | cpumask_t map = CPU_MASK_NONE; | 186 | cpumask_t map = CPU_MASK_NONE; |
188 | 187 | ||
188 | preempt_disable(); | ||
189 | cpu_set(cpu, map); | 189 | cpu_set(cpu, map); |
190 | __smp_call_function_map(func, info, nonatomic, wait, map); | 190 | __smp_call_function_map(func, info, nonatomic, wait, map); |
191 | preempt_enable(); | ||
191 | return 0; | 192 | return 0; |
192 | } | 193 | } |
193 | EXPORT_SYMBOL(smp_call_function_on); | 194 | EXPORT_SYMBOL(smp_call_function_on); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 641aef36ccc4..7462aebd3eb6 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -108,53 +108,40 @@ void bust_spinlocks(int yes) | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * Check which address space is addressed by the access | 111 | * Returns the address space associated with the fault. |
112 | * register in S390_lowcore.exc_access_id. | 112 | * Returns 0 for kernel space, 1 for user space and |
113 | * Returns 1 for user space and 0 for kernel space. | 113 | * 2 for code execution in user space with noexec=on. |
114 | */ | 114 | */ |
115 | static int __check_access_register(struct pt_regs *regs, int error_code) | 115 | static inline int check_space(struct task_struct *tsk) |
116 | { | ||
117 | int areg = S390_lowcore.exc_access_id; | ||
118 | |||
119 | if (areg == 0) | ||
120 | /* Access via access register 0 -> kernel address */ | ||
121 | return 0; | ||
122 | save_access_regs(current->thread.acrs); | ||
123 | if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1) | ||
124 | /* | ||
125 | * access register contains 0 -> kernel address, | ||
126 | * access register contains 1 -> user space address | ||
127 | */ | ||
128 | return current->thread.acrs[areg]; | ||
129 | |||
130 | /* Something unhealthy was done with the access registers... */ | ||
131 | die("page fault via unknown access register", regs, error_code); | ||
132 | do_exit(SIGKILL); | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Check which address space the address belongs to. | ||
138 | * May return 1 or 2 for user space and 0 for kernel space. | ||
139 | * Returns 2 for user space in primary addressing mode with | ||
140 | * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on. | ||
141 | */ | ||
142 | static inline int check_user_space(struct pt_regs *regs, int error_code) | ||
143 | { | 116 | { |
144 | /* | 117 | /* |
145 | * The lowest two bits of S390_lowcore.trans_exc_code indicate | 118 | * The lowest two bits of S390_lowcore.trans_exc_code |
146 | * which paging table was used: | 119 | * indicate which paging table was used. |
147 | * 0: Primary Segment Table Descriptor | ||
148 | * 1: STD determined via access register | ||
149 | * 2: Secondary Segment Table Descriptor | ||
150 | * 3: Home Segment Table Descriptor | ||
151 | */ | 120 | */ |
152 | int descriptor = S390_lowcore.trans_exc_code & 3; | 121 | int desc = S390_lowcore.trans_exc_code & 3; |
153 | if (unlikely(descriptor == 1)) | 122 | |
154 | return __check_access_register(regs, error_code); | 123 | if (desc == 3) /* Home Segment Table Descriptor */ |
155 | if (descriptor == 2) | 124 | return switch_amode == 0; |
156 | return current->thread.mm_segment.ar4; | 125 | if (desc == 2) /* Secondary Segment Table Descriptor */ |
157 | return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; | 126 | return tsk->thread.mm_segment.ar4; |
127 | #ifdef CONFIG_S390_SWITCH_AMODE | ||
128 | if (unlikely(desc == 1)) { /* STD determined via access register */ | ||
129 | /* %a0 always indicates primary space. */ | ||
130 | if (S390_lowcore.exc_access_id != 0) { | ||
131 | save_access_regs(tsk->thread.acrs); | ||
132 | /* | ||
133 | * An alet of 0 indicates primary space. | ||
134 | * An alet of 1 indicates secondary space. | ||
135 | * Any other alet values generate an | ||
136 | * alen-translation exception. | ||
137 | */ | ||
138 | if (tsk->thread.acrs[S390_lowcore.exc_access_id]) | ||
139 | return tsk->thread.mm_segment.ar4; | ||
140 | } | ||
141 | } | ||
142 | #endif | ||
143 | /* Primary Segment Table Descriptor */ | ||
144 | return switch_amode << s390_noexec; | ||
158 | } | 145 | } |
159 | 146 | ||
160 | /* | 147 | /* |
@@ -265,16 +252,16 @@ out_fault: | |||
265 | * 11 Page translation -> Not present (nullification) | 252 | * 11 Page translation -> Not present (nullification) |
266 | * 3b Region third trans. -> Not present (nullification) | 253 | * 3b Region third trans. -> Not present (nullification) |
267 | */ | 254 | */ |
268 | static inline void __kprobes | 255 | static inline void |
269 | do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | 256 | do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) |
270 | { | 257 | { |
271 | struct task_struct *tsk; | 258 | struct task_struct *tsk; |
272 | struct mm_struct *mm; | 259 | struct mm_struct *mm; |
273 | struct vm_area_struct * vma; | 260 | struct vm_area_struct * vma; |
274 | unsigned long address; | 261 | unsigned long address; |
275 | int user_address; | ||
276 | const struct exception_table_entry *fixup; | 262 | const struct exception_table_entry *fixup; |
277 | int si_code = SEGV_MAPERR; | 263 | int si_code; |
264 | int space; | ||
278 | 265 | ||
279 | tsk = current; | 266 | tsk = current; |
280 | mm = tsk->mm; | 267 | mm = tsk->mm; |
@@ -294,7 +281,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
294 | NULL pointer write access in kernel mode. */ | 281 | NULL pointer write access in kernel mode. */ |
295 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) { | 282 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) { |
296 | address = 0; | 283 | address = 0; |
297 | user_address = 0; | 284 | space = 0; |
298 | goto no_context; | 285 | goto no_context; |
299 | } | 286 | } |
300 | 287 | ||
@@ -309,15 +296,15 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
309 | * the address | 296 | * the address |
310 | */ | 297 | */ |
311 | address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; | 298 | address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; |
312 | user_address = check_user_space(regs, error_code); | 299 | space = check_space(tsk); |
313 | 300 | ||
314 | /* | 301 | /* |
315 | * Verify that the fault happened in user space, that | 302 | * Verify that the fault happened in user space, that |
316 | * we are not in an interrupt and that there is a | 303 | * we are not in an interrupt and that there is a |
317 | * user context. | 304 | * user context. |
318 | */ | 305 | */ |
319 | if (user_address == 0 || in_atomic() || !mm) | 306 | if (unlikely(space == 0 || in_atomic() || !mm)) |
320 | goto no_context; | 307 | goto no_context; |
321 | 308 | ||
322 | /* | 309 | /* |
323 | * When we get here, the fault happened in the current | 310 | * When we get here, the fault happened in the current |
@@ -328,12 +315,13 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
328 | 315 | ||
329 | down_read(&mm->mmap_sem); | 316 | down_read(&mm->mmap_sem); |
330 | 317 | ||
331 | vma = find_vma(mm, address); | 318 | si_code = SEGV_MAPERR; |
332 | if (!vma) | 319 | vma = find_vma(mm, address); |
333 | goto bad_area; | 320 | if (!vma) |
321 | goto bad_area; | ||
334 | 322 | ||
335 | #ifdef CONFIG_S390_EXEC_PROTECT | 323 | #ifdef CONFIG_S390_EXEC_PROTECT |
336 | if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) | 324 | if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC))) |
337 | if (!signal_return(mm, regs, address, error_code)) | 325 | if (!signal_return(mm, regs, address, error_code)) |
338 | /* | 326 | /* |
339 | * signal_return() has done an up_read(&mm->mmap_sem) | 327 | * signal_return() has done an up_read(&mm->mmap_sem) |
@@ -389,7 +377,7 @@ survive: | |||
389 | * The instruction that caused the program check will | 377 | * The instruction that caused the program check will |
390 | * be repeated. Don't signal single step via SIGTRAP. | 378 | * be repeated. Don't signal single step via SIGTRAP. |
391 | */ | 379 | */ |
392 | clear_tsk_thread_flag(current, TIF_SINGLE_STEP); | 380 | clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); |
393 | return; | 381 | return; |
394 | 382 | ||
395 | /* | 383 | /* |
@@ -419,7 +407,7 @@ no_context: | |||
419 | * Oops. The kernel tried to access some bad page. We'll have to | 407 | * Oops. The kernel tried to access some bad page. We'll have to |
420 | * terminate things with extreme prejudice. | 408 | * terminate things with extreme prejudice. |
421 | */ | 409 | */ |
422 | if (user_address == 0) | 410 | if (space == 0) |
423 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" | 411 | printk(KERN_ALERT "Unable to handle kernel pointer dereference" |
424 | " at virtual kernel address %p\n", (void *)address); | 412 | " at virtual kernel address %p\n", (void *)address); |
425 | else | 413 | else |
@@ -462,13 +450,14 @@ do_sigbus: | |||
462 | goto no_context; | 450 | goto no_context; |
463 | } | 451 | } |
464 | 452 | ||
465 | void do_protection_exception(struct pt_regs *regs, unsigned long error_code) | 453 | void __kprobes do_protection_exception(struct pt_regs *regs, |
454 | unsigned long error_code) | ||
466 | { | 455 | { |
467 | regs->psw.addr -= (error_code >> 16); | 456 | regs->psw.addr -= (error_code >> 16); |
468 | do_exception(regs, 4, 1); | 457 | do_exception(regs, 4, 1); |
469 | } | 458 | } |
470 | 459 | ||
471 | void do_dat_exception(struct pt_regs *regs, unsigned long error_code) | 460 | void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) |
472 | { | 461 | { |
473 | do_exception(regs, error_code & 0xff, 0); | 462 | do_exception(regs, error_code & 0xff, 0); |
474 | } | 463 | } |
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 2a32e5e8e9c9..3c798cdde550 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c | |||
@@ -158,12 +158,12 @@ static int kern_do_signal(struct pt_regs *regs) | |||
158 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 158 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
159 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 159 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
160 | } | 160 | } |
161 | return(handled_sig); | 161 | return handled_sig; |
162 | } | 162 | } |
163 | 163 | ||
164 | int do_signal(void) | 164 | int do_signal(void) |
165 | { | 165 | { |
166 | return(kern_do_signal(¤t->thread.regs)); | 166 | return kern_do_signal(¤t->thread.regs); |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | 169 | /* |
@@ -186,5 +186,5 @@ long sys_sigsuspend(int history0, int history1, old_sigset_t mask) | |||
186 | 186 | ||
187 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) | 187 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) |
188 | { | 188 | { |
189 | return(do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs))); | 189 | return do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs)); |
190 | } | 190 | } |
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 9b34fe65949a..dda06789bcb0 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c | |||
@@ -419,9 +419,12 @@ void map_stub_pages(int fd, unsigned long code, | |||
419 | .offset = code_offset | 419 | .offset = code_offset |
420 | } } }); | 420 | } } }); |
421 | n = os_write_file(fd, &mmop, sizeof(mmop)); | 421 | n = os_write_file(fd, &mmop, sizeof(mmop)); |
422 | if(n != sizeof(mmop)) | 422 | if(n != sizeof(mmop)){ |
423 | printk("mmap args - addr = 0x%lx, fd = %d, offset = %llx\n", | ||
424 | code, code_fd, (unsigned long long) code_offset); | ||
423 | panic("map_stub_pages : /proc/mm map for code failed, " | 425 | panic("map_stub_pages : /proc/mm map for code failed, " |
424 | "err = %d\n", -n); | 426 | "err = %d\n", -n); |
427 | } | ||
425 | 428 | ||
426 | if ( stack ) { | 429 | if ( stack ) { |
427 | __u64 map_offset; | 430 | __u64 map_offset; |
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c index 1df231a26244..d221214d2ed5 100644 --- a/arch/um/os-Linux/trap.c +++ b/arch/um/os-Linux/trap.c | |||
@@ -16,6 +16,7 @@ void usr2_handler(int sig, union uml_pt_regs *regs) | |||
16 | CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0); | 16 | CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0); |
17 | } | 17 | } |
18 | 18 | ||
19 | /* Initialized from linux_main() */ | ||
19 | void (*sig_info[NSIG])(int, union uml_pt_regs *); | 20 | void (*sig_info[NSIG])(int, union uml_pt_regs *); |
20 | 21 | ||
21 | void os_fill_handlinfo(struct kern_handlers h) | 22 | void os_fill_handlinfo(struct kern_handlers h) |
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c index 65a0edd71a17..8cf0b8a13778 100644 --- a/arch/x86_64/kernel/hpet.c +++ b/arch/x86_64/kernel/hpet.c | |||
@@ -12,6 +12,12 @@ | |||
12 | #include <asm/timex.h> | 12 | #include <asm/timex.h> |
13 | #include <asm/hpet.h> | 13 | #include <asm/hpet.h> |
14 | 14 | ||
15 | #define HPET_MASK 0xFFFFFFFF | ||
16 | #define HPET_SHIFT 22 | ||
17 | |||
18 | /* FSEC = 10^-15 NSEC = 10^-9 */ | ||
19 | #define FSEC_PER_NSEC 1000000 | ||
20 | |||
15 | int nohpet __initdata; | 21 | int nohpet __initdata; |
16 | 22 | ||
17 | unsigned long hpet_address; | 23 | unsigned long hpet_address; |
@@ -106,9 +112,31 @@ int hpet_timer_stop_set_go(unsigned long tick) | |||
106 | return 0; | 112 | return 0; |
107 | } | 113 | } |
108 | 114 | ||
115 | static cycle_t read_hpet(void) | ||
116 | { | ||
117 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
118 | } | ||
119 | |||
120 | static cycle_t __vsyscall_fn vread_hpet(void) | ||
121 | { | ||
122 | return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | ||
123 | } | ||
124 | |||
125 | struct clocksource clocksource_hpet = { | ||
126 | .name = "hpet", | ||
127 | .rating = 250, | ||
128 | .read = read_hpet, | ||
129 | .mask = (cycle_t)HPET_MASK, | ||
130 | .mult = 0, /* set below */ | ||
131 | .shift = HPET_SHIFT, | ||
132 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
133 | .vread = vread_hpet, | ||
134 | }; | ||
135 | |||
109 | int hpet_arch_init(void) | 136 | int hpet_arch_init(void) |
110 | { | 137 | { |
111 | unsigned int id; | 138 | unsigned int id; |
139 | u64 tmp; | ||
112 | 140 | ||
113 | if (!hpet_address) | 141 | if (!hpet_address) |
114 | return -1; | 142 | return -1; |
@@ -132,6 +160,22 @@ int hpet_arch_init(void) | |||
132 | 160 | ||
133 | hpet_use_timer = (id & HPET_ID_LEGSUP); | 161 | hpet_use_timer = (id & HPET_ID_LEGSUP); |
134 | 162 | ||
163 | /* | ||
164 | * hpet period is in femto seconds per cycle | ||
165 | * so we need to convert this to ns/cyc units | ||
166 | * aproximated by mult/2^shift | ||
167 | * | ||
168 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
169 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
170 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
171 | * (fsec/cyc << shift)/1000000 = mult | ||
172 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
173 | */ | ||
174 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
175 | do_div(tmp, FSEC_PER_NSEC); | ||
176 | clocksource_hpet.mult = (u32)tmp; | ||
177 | clocksource_register(&clocksource_hpet); | ||
178 | |||
135 | return hpet_timer_stop_set_go(hpet_tick); | 179 | return hpet_timer_stop_set_go(hpet_tick); |
136 | } | 180 | } |
137 | 181 | ||
@@ -444,68 +488,3 @@ static int __init nohpet_setup(char *s) | |||
444 | } | 488 | } |
445 | 489 | ||
446 | __setup("nohpet", nohpet_setup); | 490 | __setup("nohpet", nohpet_setup); |
447 | |||
448 | #define HPET_MASK 0xFFFFFFFF | ||
449 | #define HPET_SHIFT 22 | ||
450 | |||
451 | /* FSEC = 10^-15 NSEC = 10^-9 */ | ||
452 | #define FSEC_PER_NSEC 1000000 | ||
453 | |||
454 | static void *hpet_ptr; | ||
455 | |||
456 | static cycle_t read_hpet(void) | ||
457 | { | ||
458 | return (cycle_t)readl(hpet_ptr); | ||
459 | } | ||
460 | |||
461 | static cycle_t __vsyscall_fn vread_hpet(void) | ||
462 | { | ||
463 | return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | ||
464 | } | ||
465 | |||
466 | struct clocksource clocksource_hpet = { | ||
467 | .name = "hpet", | ||
468 | .rating = 250, | ||
469 | .read = read_hpet, | ||
470 | .mask = (cycle_t)HPET_MASK, | ||
471 | .mult = 0, /* set below */ | ||
472 | .shift = HPET_SHIFT, | ||
473 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
474 | .vread = vread_hpet, | ||
475 | }; | ||
476 | |||
477 | static int __init init_hpet_clocksource(void) | ||
478 | { | ||
479 | unsigned long hpet_period; | ||
480 | void __iomem *hpet_base; | ||
481 | u64 tmp; | ||
482 | |||
483 | if (!hpet_address) | ||
484 | return -ENODEV; | ||
485 | |||
486 | /* calculate the hpet address: */ | ||
487 | hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | ||
488 | hpet_ptr = hpet_base + HPET_COUNTER; | ||
489 | |||
490 | /* calculate the frequency: */ | ||
491 | hpet_period = readl(hpet_base + HPET_PERIOD); | ||
492 | |||
493 | /* | ||
494 | * hpet period is in femto seconds per cycle | ||
495 | * so we need to convert this to ns/cyc units | ||
496 | * aproximated by mult/2^shift | ||
497 | * | ||
498 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
499 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
500 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
501 | * (fsec/cyc << shift)/1000000 = mult | ||
502 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
503 | */ | ||
504 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
505 | do_div(tmp, FSEC_PER_NSEC); | ||
506 | clocksource_hpet.mult = (u32)tmp; | ||
507 | |||
508 | return clocksource_register(&clocksource_hpet); | ||
509 | } | ||
510 | |||
511 | module_init(init_hpet_clocksource); | ||
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 0a91368f8b60..c6a5bc7e8118 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -789,7 +789,6 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | |||
789 | struct irq_cfg *cfg = irq_cfg + irq; | 789 | struct irq_cfg *cfg = irq_cfg + irq; |
790 | struct IO_APIC_route_entry entry; | 790 | struct IO_APIC_route_entry entry; |
791 | cpumask_t mask; | 791 | cpumask_t mask; |
792 | unsigned long flags; | ||
793 | 792 | ||
794 | if (!IO_APIC_IRQ(irq)) | 793 | if (!IO_APIC_IRQ(irq)) |
795 | return; | 794 | return; |
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 35443729aad8..cd4643a37022 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -923,8 +923,9 @@ void __init smp_prepare_boot_cpu(void) | |||
923 | */ | 923 | */ |
924 | int __cpuinit __cpu_up(unsigned int cpu) | 924 | int __cpuinit __cpu_up(unsigned int cpu) |
925 | { | 925 | { |
926 | int err; | ||
927 | int apicid = cpu_present_to_apicid(cpu); | 926 | int apicid = cpu_present_to_apicid(cpu); |
927 | unsigned long flags; | ||
928 | int err; | ||
928 | 929 | ||
929 | WARN_ON(irqs_disabled()); | 930 | WARN_ON(irqs_disabled()); |
930 | 931 | ||
@@ -958,7 +959,9 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
958 | /* | 959 | /* |
959 | * Make sure and check TSC sync: | 960 | * Make sure and check TSC sync: |
960 | */ | 961 | */ |
962 | local_irq_save(flags); | ||
961 | check_tsc_sync_source(cpu); | 963 | check_tsc_sync_source(cpu); |
964 | local_irq_restore(flags); | ||
962 | 965 | ||
963 | while (!cpu_isset(cpu, cpu_online_map)) | 966 | while (!cpu_isset(cpu, cpu_online_map)) |
964 | cpu_relax(); | 967 | cpu_relax(); |
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index c9addcfb96dc..75d73a9aa9ff 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -358,6 +358,8 @@ void __init time_init(void) | |||
358 | set_cyc2ns_scale(cpu_khz); | 358 | set_cyc2ns_scale(cpu_khz); |
359 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | 359 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", |
360 | cpu_khz / 1000, cpu_khz % 1000); | 360 | cpu_khz / 1000, cpu_khz % 1000); |
361 | init_tsc_clocksource(); | ||
362 | |||
361 | setup_irq(0, &irq0); | 363 | setup_irq(0, &irq0); |
362 | } | 364 | } |
363 | 365 | ||
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c index 895831865019..1a0edbbffaa0 100644 --- a/arch/x86_64/kernel/tsc.c +++ b/arch/x86_64/kernel/tsc.c | |||
@@ -210,7 +210,7 @@ void mark_tsc_unstable(void) | |||
210 | } | 210 | } |
211 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | 211 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); |
212 | 212 | ||
213 | static int __init init_tsc_clocksource(void) | 213 | void __init init_tsc_clocksource(void) |
214 | { | 214 | { |
215 | if (!notsc) { | 215 | if (!notsc) { |
216 | clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, | 216 | clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, |
@@ -218,9 +218,6 @@ static int __init init_tsc_clocksource(void) | |||
218 | if (check_tsc_unstable()) | 218 | if (check_tsc_unstable()) |
219 | clocksource_tsc.rating = 0; | 219 | clocksource_tsc.rating = 0; |
220 | 220 | ||
221 | return clocksource_register(&clocksource_tsc); | 221 | clocksource_register(&clocksource_tsc); |
222 | } | 222 | } |
223 | return 0; | ||
224 | } | 223 | } |
225 | |||
226 | module_init(init_tsc_clocksource); | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 43cc43d7b591..dc7b56225923 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -389,6 +389,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
389 | { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */ | 389 | { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */ |
390 | { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */ | 390 | { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */ |
391 | { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */ | 391 | { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */ |
392 | { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */ | ||
392 | { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */ | 393 | { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */ |
393 | { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */ | 394 | { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */ |
394 | { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */ | 395 | { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */ |
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index fc5b73d78e00..86fbcd6a742b 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c | |||
@@ -69,7 +69,7 @@ | |||
69 | #define NR_HOST 6 | 69 | #define NR_HOST 6 |
70 | 70 | ||
71 | static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; | 71 | static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; |
72 | static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 }; | 72 | static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 }; |
73 | 73 | ||
74 | struct legacy_data { | 74 | struct legacy_data { |
75 | unsigned long timing; | 75 | unsigned long timing; |
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 3fb417780166..acdc52cbe38a 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c | |||
@@ -2,13 +2,14 @@ | |||
2 | * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer | 2 | * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer |
3 | * (C) 2005 Red Hat Inc | 3 | * (C) 2005 Red Hat Inc |
4 | * Alan Cox <alan@redhat.com> | 4 | * Alan Cox <alan@redhat.com> |
5 | * (C) 2007 Bartlomiej Zolnierkiewicz | ||
5 | * | 6 | * |
6 | * Based in part on linux/drivers/ide/pci/pdc202xx_old.c | 7 | * Based in part on linux/drivers/ide/pci/pdc202xx_old.c |
7 | * | 8 | * |
8 | * First cut with LBA48/ATAPI | 9 | * First cut with LBA48/ATAPI |
9 | * | 10 | * |
10 | * TODO: | 11 | * TODO: |
11 | * Channel interlock/reset on both required ? | 12 | * Channel interlock/reset on both required |
12 | */ | 13 | */ |
13 | 14 | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -21,7 +22,7 @@ | |||
21 | #include <linux/libata.h> | 22 | #include <linux/libata.h> |
22 | 23 | ||
23 | #define DRV_NAME "pata_pdc202xx_old" | 24 | #define DRV_NAME "pata_pdc202xx_old" |
24 | #define DRV_VERSION "0.3.0" | 25 | #define DRV_VERSION "0.4.0" |
25 | 26 | ||
26 | /** | 27 | /** |
27 | * pdc2024x_pre_reset - probe begin | 28 | * pdc2024x_pre_reset - probe begin |
@@ -76,7 +77,7 @@ static void pdc2026x_error_handler(struct ata_port *ap) | |||
76 | static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) | 77 | static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) |
77 | { | 78 | { |
78 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 79 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
79 | int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; | 80 | int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; |
80 | static u16 pio_timing[5] = { | 81 | static u16 pio_timing[5] = { |
81 | 0x0913, 0x050C , 0x0308, 0x0206, 0x0104 | 82 | 0x0913, 0x050C , 0x0308, 0x0206, 0x0104 |
82 | }; | 83 | }; |
@@ -85,7 +86,7 @@ static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *a | |||
85 | pci_read_config_byte(pdev, port, &r_ap); | 86 | pci_read_config_byte(pdev, port, &r_ap); |
86 | pci_read_config_byte(pdev, port + 1, &r_bp); | 87 | pci_read_config_byte(pdev, port + 1, &r_bp); |
87 | r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */ | 88 | r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */ |
88 | r_bp &= ~0x07; | 89 | r_bp &= ~0x1F; |
89 | r_ap |= (pio_timing[pio] >> 8); | 90 | r_ap |= (pio_timing[pio] >> 8); |
90 | r_bp |= (pio_timing[pio] & 0xFF); | 91 | r_bp |= (pio_timing[pio] & 0xFF); |
91 | 92 | ||
@@ -123,7 +124,7 @@ static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
123 | static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) | 124 | static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
124 | { | 125 | { |
125 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 126 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
126 | int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; | 127 | int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; |
127 | static u8 udma_timing[6][2] = { | 128 | static u8 udma_timing[6][2] = { |
128 | { 0x60, 0x03 }, /* 33 Mhz Clock */ | 129 | { 0x60, 0x03 }, /* 33 Mhz Clock */ |
129 | { 0x40, 0x02 }, | 130 | { 0x40, 0x02 }, |
@@ -132,12 +133,17 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
132 | { 0x20, 0x01 }, | 133 | { 0x20, 0x01 }, |
133 | { 0x20, 0x01 } | 134 | { 0x20, 0x01 } |
134 | }; | 135 | }; |
136 | static u8 mdma_timing[3][2] = { | ||
137 | { 0x60, 0x03 }, | ||
138 | { 0x60, 0x04 }, | ||
139 | { 0xe0, 0x0f }, | ||
140 | }; | ||
135 | u8 r_bp, r_cp; | 141 | u8 r_bp, r_cp; |
136 | 142 | ||
137 | pci_read_config_byte(pdev, port + 1, &r_bp); | 143 | pci_read_config_byte(pdev, port + 1, &r_bp); |
138 | pci_read_config_byte(pdev, port + 2, &r_cp); | 144 | pci_read_config_byte(pdev, port + 2, &r_cp); |
139 | 145 | ||
140 | r_bp &= ~0xF0; | 146 | r_bp &= ~0xE0; |
141 | r_cp &= ~0x0F; | 147 | r_cp &= ~0x0F; |
142 | 148 | ||
143 | if (adev->dma_mode >= XFER_UDMA_0) { | 149 | if (adev->dma_mode >= XFER_UDMA_0) { |
@@ -147,8 +153,8 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
147 | 153 | ||
148 | } else { | 154 | } else { |
149 | int speed = adev->dma_mode - XFER_MW_DMA_0; | 155 | int speed = adev->dma_mode - XFER_MW_DMA_0; |
150 | r_bp |= 0x60; | 156 | r_bp |= mdma_timing[speed][0]; |
151 | r_cp |= (5 - speed); | 157 | r_cp |= mdma_timing[speed][1]; |
152 | } | 158 | } |
153 | pci_write_config_byte(pdev, port + 1, r_bp); | 159 | pci_write_config_byte(pdev, port + 1, r_bp); |
154 | pci_write_config_byte(pdev, port + 2, r_cp); | 160 | pci_write_config_byte(pdev, port + 2, r_cp); |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index cacb1c816e35..17ee97f3a99b 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -406,22 +406,6 @@ config BLK_DEV_RAM_BLOCKSIZE | |||
406 | setups function - apparently needed by the rd_load_image routine | 406 | setups function - apparently needed by the rd_load_image routine |
407 | that supposes the filesystem in the image uses a 1024 blocksize. | 407 | that supposes the filesystem in the image uses a 1024 blocksize. |
408 | 408 | ||
409 | config BLK_DEV_INITRD | ||
410 | bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" | ||
411 | depends on BROKEN || !FRV | ||
412 | help | ||
413 | The initial RAM filesystem is a ramfs which is loaded by the | ||
414 | boot loader (loadlin or lilo) and that is mounted as root | ||
415 | before the normal boot procedure. It is typically used to | ||
416 | load modules needed to mount the "real" root file system, | ||
417 | etc. See <file:Documentation/initrd.txt> for details. | ||
418 | |||
419 | If RAM disk support (BLK_DEV_RAM) is also included, this | ||
420 | also enables initial RAM disk (initrd) support and adds | ||
421 | 15 Kbytes (more on some other architectures) to the kernel size. | ||
422 | |||
423 | If unsure say Y. | ||
424 | |||
425 | config CDROM_PKTCDVD | 409 | config CDROM_PKTCDVD |
426 | tristate "Packet writing on CD/DVD media" | 410 | tristate "Packet writing on CD/DVD media" |
427 | depends on !UML | 411 | depends on !UML |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 05dfe357527c..0c716ee905d7 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1291,13 +1291,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index) | |||
1291 | if (inq_buff == NULL) | 1291 | if (inq_buff == NULL) |
1292 | goto mem_msg; | 1292 | goto mem_msg; |
1293 | 1293 | ||
1294 | /* testing to see if 16-byte CDBs are already being used */ | ||
1295 | if (h->cciss_read == CCISS_READ_16) { | ||
1296 | cciss_read_capacity_16(h->ctlr, drv_index, 1, | ||
1297 | &total_size, &block_size); | ||
1298 | goto geo_inq; | ||
1299 | } | ||
1300 | |||
1294 | cciss_read_capacity(ctlr, drv_index, 1, | 1301 | cciss_read_capacity(ctlr, drv_index, 1, |
1295 | &total_size, &block_size); | 1302 | &total_size, &block_size); |
1296 | 1303 | ||
1297 | /* total size = last LBA + 1 */ | 1304 | /* if read_capacity returns all F's this volume is >2TB in size */ |
1298 | /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */ | 1305 | /* so we switch to 16-byte CDB's for all read/write ops */ |
1299 | /* so we assume this volume this must be >2TB in size */ | 1306 | if (total_size == 0xFFFFFFFFULL) { |
1300 | if (total_size == (__u32) 0) { | ||
1301 | cciss_read_capacity_16(ctlr, drv_index, 1, | 1307 | cciss_read_capacity_16(ctlr, drv_index, 1, |
1302 | &total_size, &block_size); | 1308 | &total_size, &block_size); |
1303 | h->cciss_read = CCISS_READ_16; | 1309 | h->cciss_read = CCISS_READ_16; |
@@ -1306,6 +1312,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index) | |||
1306 | h->cciss_read = CCISS_READ_10; | 1312 | h->cciss_read = CCISS_READ_10; |
1307 | h->cciss_write = CCISS_WRITE_10; | 1313 | h->cciss_write = CCISS_WRITE_10; |
1308 | } | 1314 | } |
1315 | geo_inq: | ||
1309 | cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, | 1316 | cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, |
1310 | inq_buff, &h->drv[drv_index]); | 1317 | inq_buff, &h->drv[drv_index]); |
1311 | 1318 | ||
@@ -1917,13 +1924,14 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
1917 | drv->raid_level = inq_buff->data_byte[8]; | 1924 | drv->raid_level = inq_buff->data_byte[8]; |
1918 | } | 1925 | } |
1919 | drv->block_size = block_size; | 1926 | drv->block_size = block_size; |
1920 | drv->nr_blocks = total_size; | 1927 | drv->nr_blocks = total_size + 1; |
1921 | t = drv->heads * drv->sectors; | 1928 | t = drv->heads * drv->sectors; |
1922 | if (t > 1) { | 1929 | if (t > 1) { |
1923 | unsigned rem = sector_div(total_size, t); | 1930 | sector_t real_size = total_size + 1; |
1931 | unsigned long rem = sector_div(real_size, t); | ||
1924 | if (rem) | 1932 | if (rem) |
1925 | total_size++; | 1933 | real_size++; |
1926 | drv->cylinders = total_size; | 1934 | drv->cylinders = real_size; |
1927 | } | 1935 | } |
1928 | } else { /* Get geometry failed */ | 1936 | } else { /* Get geometry failed */ |
1929 | printk(KERN_WARNING "cciss: reading geometry failed\n"); | 1937 | printk(KERN_WARNING "cciss: reading geometry failed\n"); |
@@ -1953,16 +1961,16 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
1953 | ctlr, buf, sizeof(ReadCapdata_struct), | 1961 | ctlr, buf, sizeof(ReadCapdata_struct), |
1954 | 1, logvol, 0, NULL, TYPE_CMD); | 1962 | 1, logvol, 0, NULL, TYPE_CMD); |
1955 | if (return_code == IO_OK) { | 1963 | if (return_code == IO_OK) { |
1956 | *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1; | 1964 | *total_size = be32_to_cpu(*(__u32 *) buf->total_size); |
1957 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); | 1965 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); |
1958 | } else { /* read capacity command failed */ | 1966 | } else { /* read capacity command failed */ |
1959 | printk(KERN_WARNING "cciss: read capacity failed\n"); | 1967 | printk(KERN_WARNING "cciss: read capacity failed\n"); |
1960 | *total_size = 0; | 1968 | *total_size = 0; |
1961 | *block_size = BLOCK_SIZE; | 1969 | *block_size = BLOCK_SIZE; |
1962 | } | 1970 | } |
1963 | if (*total_size != (__u32) 0) | 1971 | if (*total_size != 0) |
1964 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | 1972 | printk(KERN_INFO " blocks= %llu block_size= %d\n", |
1965 | (unsigned long long)*total_size, *block_size); | 1973 | (unsigned long long)*total_size+1, *block_size); |
1966 | kfree(buf); | 1974 | kfree(buf); |
1967 | return; | 1975 | return; |
1968 | } | 1976 | } |
@@ -1989,7 +1997,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
1989 | 1, logvol, 0, NULL, TYPE_CMD); | 1997 | 1, logvol, 0, NULL, TYPE_CMD); |
1990 | } | 1998 | } |
1991 | if (return_code == IO_OK) { | 1999 | if (return_code == IO_OK) { |
1992 | *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1; | 2000 | *total_size = be64_to_cpu(*(__u64 *) buf->total_size); |
1993 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); | 2001 | *block_size = be32_to_cpu(*(__u32 *) buf->block_size); |
1994 | } else { /* read capacity command failed */ | 2002 | } else { /* read capacity command failed */ |
1995 | printk(KERN_WARNING "cciss: read capacity failed\n"); | 2003 | printk(KERN_WARNING "cciss: read capacity failed\n"); |
@@ -1997,7 +2005,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
1997 | *block_size = BLOCK_SIZE; | 2005 | *block_size = BLOCK_SIZE; |
1998 | } | 2006 | } |
1999 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | 2007 | printk(KERN_INFO " blocks= %llu block_size= %d\n", |
2000 | (unsigned long long)*total_size, *block_size); | 2008 | (unsigned long long)*total_size+1, *block_size); |
2001 | kfree(buf); | 2009 | kfree(buf); |
2002 | return; | 2010 | return; |
2003 | } | 2011 | } |
@@ -3119,8 +3127,9 @@ static void cciss_getgeometry(int cntl_num) | |||
3119 | } | 3127 | } |
3120 | cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size); | 3128 | cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size); |
3121 | 3129 | ||
3122 | /* total_size = last LBA + 1 */ | 3130 | /* If read_capacity returns all F's the logical is >2TB */ |
3123 | if(total_size == (__u32) 0) { | 3131 | /* so we switch to 16-byte CDBs for all read/write ops */ |
3132 | if(total_size == 0xFFFFFFFFULL) { | ||
3124 | cciss_read_capacity_16(cntl_num, i, 0, | 3133 | cciss_read_capacity_16(cntl_num, i, 0, |
3125 | &total_size, &block_size); | 3134 | &total_size, &block_size); |
3126 | hba[cntl_num]->cciss_read = CCISS_READ_16; | 3135 | hba[cntl_num]->cciss_read = CCISS_READ_16; |
@@ -3395,7 +3404,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3395 | return -1; | 3404 | return -1; |
3396 | } | 3405 | } |
3397 | 3406 | ||
3398 | static void __devexit cciss_remove_one(struct pci_dev *pdev) | 3407 | static void cciss_remove_one(struct pci_dev *pdev) |
3399 | { | 3408 | { |
3400 | ctlr_info_t *tmp_ptr; | 3409 | ctlr_info_t *tmp_ptr; |
3401 | int i, j; | 3410 | int i, j; |
@@ -3419,9 +3428,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
3419 | memset(flush_buf, 0, 4); | 3428 | memset(flush_buf, 0, 4); |
3420 | return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, | 3429 | return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, |
3421 | TYPE_CMD); | 3430 | TYPE_CMD); |
3422 | if (return_code != IO_OK) { | 3431 | if (return_code == IO_OK) { |
3423 | printk(KERN_WARNING "Error Flushing cache on controller %d\n", | 3432 | printk(KERN_INFO "Completed flushing cache on controller %d\n", i); |
3424 | i); | 3433 | } else { |
3434 | printk(KERN_WARNING "Error flushing cache on controller %d\n", i); | ||
3425 | } | 3435 | } |
3426 | free_irq(hba[i]->intr[2], hba[i]); | 3436 | free_irq(hba[i]->intr[2], hba[i]); |
3427 | 3437 | ||
@@ -3472,6 +3482,7 @@ static struct pci_driver cciss_pci_driver = { | |||
3472 | .probe = cciss_init_one, | 3482 | .probe = cciss_init_one, |
3473 | .remove = __devexit_p(cciss_remove_one), | 3483 | .remove = __devexit_p(cciss_remove_one), |
3474 | .id_table = cciss_pci_device_id, /* id_table */ | 3484 | .id_table = cciss_pci_device_id, /* id_table */ |
3485 | .shutdown = cciss_remove_one, | ||
3475 | }; | 3486 | }; |
3476 | 3487 | ||
3477 | /* | 3488 | /* |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index dc13ebacedfb..44cd7b2ddf09 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -376,6 +376,25 @@ static int send_request(struct request *req) | |||
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | static void viocd_end_request(struct request *req, int uptodate) | ||
380 | { | ||
381 | int nsectors = req->hard_nr_sectors; | ||
382 | |||
383 | /* | ||
384 | * Make sure it's fully ended, and ensure that we process | ||
385 | * at least one sector. | ||
386 | */ | ||
387 | if (blk_pc_request(req)) | ||
388 | nsectors = (req->data_len + 511) >> 9; | ||
389 | if (!nsectors) | ||
390 | nsectors = 1; | ||
391 | |||
392 | if (end_that_request_first(req, uptodate, nsectors)) | ||
393 | BUG(); | ||
394 | add_disk_randomness(req->rq_disk); | ||
395 | blkdev_dequeue_request(req); | ||
396 | end_that_request_last(req, uptodate); | ||
397 | } | ||
379 | 398 | ||
380 | static int rwreq; | 399 | static int rwreq; |
381 | 400 | ||
@@ -385,11 +404,11 @@ static void do_viocd_request(request_queue_t *q) | |||
385 | 404 | ||
386 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { | 405 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { |
387 | if (!blk_fs_request(req)) | 406 | if (!blk_fs_request(req)) |
388 | end_request(req, 0); | 407 | viocd_end_request(req, 0); |
389 | else if (send_request(req) < 0) { | 408 | else if (send_request(req) < 0) { |
390 | printk(VIOCD_KERN_WARNING | 409 | printk(VIOCD_KERN_WARNING |
391 | "unable to send message to OS/400!"); | 410 | "unable to send message to OS/400!"); |
392 | end_request(req, 0); | 411 | viocd_end_request(req, 0); |
393 | } else | 412 | } else |
394 | rwreq++; | 413 | rwreq++; |
395 | } | 414 | } |
@@ -601,9 +620,9 @@ return_complete: | |||
601 | "with rc %d:0x%04X: %s\n", | 620 | "with rc %d:0x%04X: %s\n", |
602 | req, event->xRc, | 621 | req, event->xRc, |
603 | bevent->sub_result, err->msg); | 622 | bevent->sub_result, err->msg); |
604 | end_request(req, 0); | 623 | viocd_end_request(req, 0); |
605 | } else | 624 | } else |
606 | end_request(req, 1); | 625 | viocd_end_request(req, 1); |
607 | 626 | ||
608 | /* restart handling of incoming requests */ | 627 | /* restart handling of incoming requests */ |
609 | spin_unlock_irqrestore(&viocd_reqlock, flags); | 628 | spin_unlock_irqrestore(&viocd_reqlock, flags); |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d0a6dc53213c..3429ece4ef92 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -1026,16 +1026,17 @@ config MMTIMER | |||
1026 | source "drivers/char/tpm/Kconfig" | 1026 | source "drivers/char/tpm/Kconfig" |
1027 | 1027 | ||
1028 | config TELCLOCK | 1028 | config TELCLOCK |
1029 | tristate "Telecom clock driver for MPBL0010 ATCA SBC" | 1029 | tristate "Telecom clock driver for ATCA SBC" |
1030 | depends on EXPERIMENTAL && X86 | 1030 | depends on EXPERIMENTAL && X86 |
1031 | default n | 1031 | default n |
1032 | help | 1032 | help |
1033 | The telecom clock device is specific to the MPBL0010 ATCA computer and | 1033 | The telecom clock device is specific to the MPCBL0010 and MPCBL0050 |
1034 | allows direct userspace access to the configuration of the telecom clock | 1034 | ATCA computers and allows direct userspace access to the |
1035 | configuration settings. This device is used for hardware synchronization | 1035 | configuration of the telecom clock configuration settings. This |
1036 | across the ATCA backplane fabric. Upon loading, the driver exports a | 1036 | device is used for hardware synchronization across the ATCA backplane |
1037 | sysfs directory, /sys/devices/platform/telco_clock, with a number of | 1037 | fabric. Upon loading, the driver exports a sysfs directory, |
1038 | files for controlling the behavior of this hardware. | 1038 | /sys/devices/platform/telco_clock, with a number of files for |
1039 | controlling the behavior of this hardware. | ||
1039 | 1040 | ||
1040 | endmenu | 1041 | endmenu |
1041 | 1042 | ||
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 54df35527bc5..16dc5d1d3cb4 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
@@ -3501,6 +3501,7 @@ get_serial_info(struct cyclades_port *info, | |||
3501 | tmp.irq = cinfo->irq; | 3501 | tmp.irq = cinfo->irq; |
3502 | tmp.flags = info->flags; | 3502 | tmp.flags = info->flags; |
3503 | tmp.close_delay = info->close_delay; | 3503 | tmp.close_delay = info->close_delay; |
3504 | tmp.closing_wait = info->closing_wait; | ||
3504 | tmp.baud_base = info->baud; | 3505 | tmp.baud_base = info->baud; |
3505 | tmp.custom_divisor = info->custom_divisor; | 3506 | tmp.custom_divisor = info->custom_divisor; |
3506 | tmp.hub6 = 0; /*!!! */ | 3507 | tmp.hub6 = 0; /*!!! */ |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index 88fc24fc4392..de5be30484ad 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
@@ -209,7 +209,6 @@ static void digi_send_break(struct channel *ch, int msec); | |||
209 | static void setup_empty_event(struct tty_struct *tty, struct channel *ch); | 209 | static void setup_empty_event(struct tty_struct *tty, struct channel *ch); |
210 | void epca_setup(char *, int *); | 210 | void epca_setup(char *, int *); |
211 | 211 | ||
212 | static int get_termio(struct tty_struct *, struct termio __user *); | ||
213 | static int pc_write(struct tty_struct *, const unsigned char *, int); | 212 | static int pc_write(struct tty_struct *, const unsigned char *, int); |
214 | static int pc_init(void); | 213 | static int pc_init(void); |
215 | static int init_PCI(void); | 214 | static int init_PCI(void); |
@@ -2362,15 +2361,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
2362 | 2361 | ||
2363 | switch (cmd) | 2362 | switch (cmd) |
2364 | { /* Begin switch cmd */ | 2363 | { /* Begin switch cmd */ |
2365 | |||
2366 | #if 0 /* Handled by calling layer properly */ | ||
2367 | case TCGETS: | ||
2368 | if (copy_to_user(argp, tty->termios, sizeof(struct ktermios))) | ||
2369 | return -EFAULT; | ||
2370 | return 0; | ||
2371 | case TCGETA: | ||
2372 | return get_termio(tty, argp); | ||
2373 | #endif | ||
2374 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | 2364 | case TCSBRK: /* SVID version: non-zero arg --> no break */ |
2375 | retval = tty_check_change(tty); | 2365 | retval = tty_check_change(tty); |
2376 | if (retval) | 2366 | if (retval) |
@@ -2735,13 +2725,6 @@ static void setup_empty_event(struct tty_struct *tty, struct channel *ch) | |||
2735 | memoff(ch); | 2725 | memoff(ch); |
2736 | } /* End setup_empty_event */ | 2726 | } /* End setup_empty_event */ |
2737 | 2727 | ||
2738 | /* --------------------- Begin get_termio ----------------------- */ | ||
2739 | |||
2740 | static int get_termio(struct tty_struct * tty, struct termio __user * termio) | ||
2741 | { /* Begin get_termio */ | ||
2742 | return kernel_termios_to_user_termio(termio, tty->termios); | ||
2743 | } /* End get_termio */ | ||
2744 | |||
2745 | /* ---------------------- Begin epca_setup -------------------------- */ | 2728 | /* ---------------------- Begin epca_setup -------------------------- */ |
2746 | void epca_setup(char *str, int *ints) | 2729 | void epca_setup(char *str, int *ints) |
2747 | { /* Begin epca_setup */ | 2730 | { /* Begin epca_setup */ |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index a7b33d2f5991..e22146546add 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2478,6 +2478,11 @@ static __devinit void default_find_bmc(void) | |||
2478 | if (!info) | 2478 | if (!info) |
2479 | return; | 2479 | return; |
2480 | 2480 | ||
2481 | #ifdef CONFIG_PPC_MERGE | ||
2482 | if (check_legacy_ioport(ipmi_defaults[i].port)) | ||
2483 | continue; | ||
2484 | #endif | ||
2485 | |||
2481 | info->addr_source = NULL; | 2486 | info->addr_source = NULL; |
2482 | 2487 | ||
2483 | info->si_type = ipmi_defaults[i].type; | 2488 | info->si_type = ipmi_defaults[i].type; |
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 0e82968c2f38..f2e4ec4fd407 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c | |||
@@ -273,6 +273,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf, | |||
273 | DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read); | 273 | DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read); |
274 | 274 | ||
275 | min_bytes_to_read = min(count, bytes_to_read + 5); | 275 | min_bytes_to_read = min(count, bytes_to_read + 5); |
276 | min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); | ||
276 | 277 | ||
277 | DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read); | 278 | DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read); |
278 | 279 | ||
@@ -340,7 +341,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
340 | return 0; | 341 | return 0; |
341 | } | 342 | } |
342 | 343 | ||
343 | if (count < 5) { | 344 | if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) { |
344 | DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count); | 345 | DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count); |
345 | return -EIO; | 346 | return -EIO; |
346 | } | 347 | } |
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index ccaa6a39cb4b..d42060ede930 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c | |||
@@ -214,4 +214,7 @@ pm_good: | |||
214 | return clocksource_register(&clocksource_acpi_pm); | 214 | return clocksource_register(&clocksource_acpi_pm); |
215 | } | 215 | } |
216 | 216 | ||
217 | module_init(init_acpi_pm_clocksource); | 217 | /* We use fs_initcall because we want the PCI fixups to have run |
218 | * but we still need to load before device_initcall | ||
219 | */ | ||
220 | fs_initcall(init_acpi_pm_clocksource); | ||
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 4f3925ceb360..1bde303b970b 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c | |||
@@ -116,4 +116,4 @@ static int __init init_cyclone_clocksource(void) | |||
116 | return clocksource_register(&clocksource_cyclone); | 116 | return clocksource_register(&clocksource_cyclone); |
117 | } | 117 | } |
118 | 118 | ||
119 | module_init(init_cyclone_clocksource); | 119 | arch_initcall(init_cyclone_clocksource); |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 0eb62841e9b0..6d3840e629de 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -99,9 +99,8 @@ do_crypt(void *src, void *dst, int len, u32 flags) | |||
99 | static unsigned int | 99 | static unsigned int |
100 | geode_aes_crypt(struct geode_aes_op *op) | 100 | geode_aes_crypt(struct geode_aes_op *op) |
101 | { | 101 | { |
102 | |||
103 | u32 flags = 0; | 102 | u32 flags = 0; |
104 | int iflags; | 103 | unsigned long iflags; |
105 | 104 | ||
106 | if (op->len == 0 || op->src == op->dst) | 105 | if (op->len == 0 || op->src == op->dst) |
107 | return 0; | 106 | return 0; |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 64509689fa65..f17e9c7d4b36 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -215,14 +215,16 @@ config KEYBOARD_AAED2000 | |||
215 | module will be called aaed2000_kbd. | 215 | module will be called aaed2000_kbd. |
216 | 216 | ||
217 | config KEYBOARD_GPIO | 217 | config KEYBOARD_GPIO |
218 | tristate "Buttons on CPU GPIOs (PXA)" | 218 | tristate "GPIO Buttons" |
219 | depends on (ARCH_SA1100 || ARCH_PXA || ARCH_S3C2410) | 219 | depends on GENERIC_GPIO |
220 | help | 220 | help |
221 | This driver implements support for buttons connected | 221 | This driver implements support for buttons connected |
222 | directly to GPIO pins of SA1100, PXA or S3C24xx CPUs. | 222 | to GPIO pins of various CPUs (and some other chips). |
223 | 223 | ||
224 | Say Y here if your device has buttons connected | 224 | Say Y here if your device has buttons connected |
225 | directly to GPIO pins of the CPU. | 225 | directly to such GPIO pins. Your board-specific |
226 | setup logic must also provide a platform device, | ||
227 | with configuration data saying which GPIOs are used. | ||
226 | 228 | ||
227 | To compile this driver as a module, choose M here: the | 229 | To compile this driver as a module, choose M here: the |
228 | module will be called gpio-keys. | 230 | module will be called gpio-keys. |
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index fa03a00b4c6d..ccf6df387b62 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c | |||
@@ -23,11 +23,9 @@ | |||
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/input.h> | 24 | #include <linux/input.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/gpio_keys.h> | ||
26 | 27 | ||
27 | #include <asm/gpio.h> | 28 | #include <asm/gpio.h> |
28 | #include <asm/arch/hardware.h> | ||
29 | |||
30 | #include <asm/hardware/gpio_keys.h> | ||
31 | 29 | ||
32 | static irqreturn_t gpio_keys_isr(int irq, void *dev_id) | 30 | static irqreturn_t gpio_keys_isr(int irq, void *dev_id) |
33 | { | 31 | { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d247429ee5ef..54a1ad5eef42 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3071,7 +3071,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3071 | release_stripe(sh); | 3071 | release_stripe(sh); |
3072 | } | 3072 | } |
3073 | spin_lock_irq(&conf->device_lock); | 3073 | spin_lock_irq(&conf->device_lock); |
3074 | conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); | 3074 | conf->expand_progress = (sector_nr + i) * new_data_disks; |
3075 | spin_unlock_irq(&conf->device_lock); | 3075 | spin_unlock_irq(&conf->device_lock); |
3076 | /* Ok, those stripe are ready. We can start scheduling | 3076 | /* Ok, those stripe are ready. We can start scheduling |
3077 | * reads on the source stripes. | 3077 | * reads on the source stripes. |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 5046a1661342..4a73e8b2428d 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -376,10 +376,11 @@ static inline void mmc_set_ios(struct mmc_host *host) | |||
376 | { | 376 | { |
377 | struct mmc_ios *ios = &host->ios; | 377 | struct mmc_ios *ios = &host->ios; |
378 | 378 | ||
379 | pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n", | 379 | pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " |
380 | "width %u timing %u\n", | ||
380 | mmc_hostname(host), ios->clock, ios->bus_mode, | 381 | mmc_hostname(host), ios->clock, ios->bus_mode, |
381 | ios->power_mode, ios->chip_select, ios->vdd, | 382 | ios->power_mode, ios->chip_select, ios->vdd, |
382 | ios->bus_width); | 383 | ios->bus_width, ios->timing); |
383 | 384 | ||
384 | host->ops->set_ios(host, ios); | 385 | host->ops->set_ios(host, ios); |
385 | } | 386 | } |
@@ -809,6 +810,7 @@ static void mmc_power_up(struct mmc_host *host) | |||
809 | host->ios.chip_select = MMC_CS_DONTCARE; | 810 | host->ios.chip_select = MMC_CS_DONTCARE; |
810 | host->ios.power_mode = MMC_POWER_UP; | 811 | host->ios.power_mode = MMC_POWER_UP; |
811 | host->ios.bus_width = MMC_BUS_WIDTH_1; | 812 | host->ios.bus_width = MMC_BUS_WIDTH_1; |
813 | host->ios.timing = MMC_TIMING_LEGACY; | ||
812 | mmc_set_ios(host); | 814 | mmc_set_ios(host); |
813 | 815 | ||
814 | mmc_delay(1); | 816 | mmc_delay(1); |
@@ -828,6 +830,7 @@ static void mmc_power_off(struct mmc_host *host) | |||
828 | host->ios.chip_select = MMC_CS_DONTCARE; | 830 | host->ios.chip_select = MMC_CS_DONTCARE; |
829 | host->ios.power_mode = MMC_POWER_OFF; | 831 | host->ios.power_mode = MMC_POWER_OFF; |
830 | host->ios.bus_width = MMC_BUS_WIDTH_1; | 832 | host->ios.bus_width = MMC_BUS_WIDTH_1; |
833 | host->ios.timing = MMC_TIMING_LEGACY; | ||
831 | mmc_set_ios(host); | 834 | mmc_set_ios(host); |
832 | } | 835 | } |
833 | 836 | ||
@@ -1112,46 +1115,50 @@ static void mmc_process_ext_csds(struct mmc_host *host) | |||
1112 | continue; | 1115 | continue; |
1113 | } | 1116 | } |
1114 | 1117 | ||
1115 | /* Activate highspeed support. */ | 1118 | if (host->caps & MMC_CAP_MMC_HIGHSPEED) { |
1116 | cmd.opcode = MMC_SWITCH; | 1119 | /* Activate highspeed support. */ |
1117 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | 1120 | cmd.opcode = MMC_SWITCH; |
1118 | (EXT_CSD_HS_TIMING << 16) | | 1121 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | |
1119 | (1 << 8) | | 1122 | (EXT_CSD_HS_TIMING << 16) | |
1120 | EXT_CSD_CMD_SET_NORMAL; | 1123 | (1 << 8) | |
1121 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | 1124 | EXT_CSD_CMD_SET_NORMAL; |
1125 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
1122 | 1126 | ||
1123 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); | 1127 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); |
1124 | if (err != MMC_ERR_NONE) { | 1128 | if (err != MMC_ERR_NONE) { |
1125 | printk("%s: failed to switch card to mmc v4 " | 1129 | printk("%s: failed to switch card to mmc v4 " |
1126 | "high-speed mode.\n", | 1130 | "high-speed mode.\n", |
1127 | mmc_hostname(card->host)); | 1131 | mmc_hostname(card->host)); |
1128 | continue; | 1132 | continue; |
1129 | } | 1133 | } |
1130 | 1134 | ||
1131 | mmc_card_set_highspeed(card); | 1135 | mmc_card_set_highspeed(card); |
1132 | 1136 | ||
1133 | /* Check for host support for wide-bus modes. */ | 1137 | host->ios.timing = MMC_TIMING_SD_HS; |
1134 | if (!(host->caps & MMC_CAP_4_BIT_DATA)) { | 1138 | mmc_set_ios(host); |
1135 | continue; | ||
1136 | } | 1139 | } |
1137 | 1140 | ||
1138 | /* Activate 4-bit support. */ | 1141 | /* Check for host support for wide-bus modes. */ |
1139 | cmd.opcode = MMC_SWITCH; | 1142 | if (host->caps & MMC_CAP_4_BIT_DATA) { |
1140 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | | 1143 | /* Activate 4-bit support. */ |
1141 | (EXT_CSD_BUS_WIDTH << 16) | | 1144 | cmd.opcode = MMC_SWITCH; |
1142 | (EXT_CSD_BUS_WIDTH_4 << 8) | | 1145 | cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | |
1143 | EXT_CSD_CMD_SET_NORMAL; | 1146 | (EXT_CSD_BUS_WIDTH << 16) | |
1144 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | 1147 | (EXT_CSD_BUS_WIDTH_4 << 8) | |
1148 | EXT_CSD_CMD_SET_NORMAL; | ||
1149 | cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; | ||
1145 | 1150 | ||
1146 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); | 1151 | err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); |
1147 | if (err != MMC_ERR_NONE) { | 1152 | if (err != MMC_ERR_NONE) { |
1148 | printk("%s: failed to switch card to " | 1153 | printk("%s: failed to switch card to " |
1149 | "mmc v4 4-bit bus mode.\n", | 1154 | "mmc v4 4-bit bus mode.\n", |
1150 | mmc_hostname(card->host)); | 1155 | mmc_hostname(card->host)); |
1151 | continue; | 1156 | continue; |
1152 | } | 1157 | } |
1153 | 1158 | ||
1154 | host->ios.bus_width = MMC_BUS_WIDTH_4; | 1159 | host->ios.bus_width = MMC_BUS_WIDTH_4; |
1160 | mmc_set_ios(host); | ||
1161 | } | ||
1155 | } | 1162 | } |
1156 | 1163 | ||
1157 | kfree(ext_csd); | 1164 | kfree(ext_csd); |
@@ -1241,6 +1248,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1241 | unsigned char *status; | 1248 | unsigned char *status; |
1242 | struct scatterlist sg; | 1249 | struct scatterlist sg; |
1243 | 1250 | ||
1251 | if (!(host->caps & MMC_CAP_SD_HIGHSPEED)) | ||
1252 | return; | ||
1253 | |||
1244 | status = kmalloc(64, GFP_KERNEL); | 1254 | status = kmalloc(64, GFP_KERNEL); |
1245 | if (!status) { | 1255 | if (!status) { |
1246 | printk(KERN_WARNING "%s: Unable to allocate buffer for " | 1256 | printk(KERN_WARNING "%s: Unable to allocate buffer for " |
@@ -1332,6 +1342,9 @@ static void mmc_read_switch_caps(struct mmc_host *host) | |||
1332 | } | 1342 | } |
1333 | 1343 | ||
1334 | mmc_card_set_highspeed(card); | 1344 | mmc_card_set_highspeed(card); |
1345 | |||
1346 | host->ios.timing = MMC_TIMING_SD_HS; | ||
1347 | mmc_set_ios(host); | ||
1335 | } | 1348 | } |
1336 | 1349 | ||
1337 | kfree(status); | 1350 | kfree(status); |
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index 7522f76b15ec..d749f08601b8 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c | |||
@@ -606,7 +606,6 @@ static void sdhci_finish_command(struct sdhci_host *host) | |||
606 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | 606 | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) |
607 | { | 607 | { |
608 | int div; | 608 | int div; |
609 | u8 ctrl; | ||
610 | u16 clk; | 609 | u16 clk; |
611 | unsigned long timeout; | 610 | unsigned long timeout; |
612 | 611 | ||
@@ -615,13 +614,6 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | |||
615 | 614 | ||
616 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); | 615 | writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); |
617 | 616 | ||
618 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | ||
619 | if (clock > 25000000) | ||
620 | ctrl |= SDHCI_CTRL_HISPD; | ||
621 | else | ||
622 | ctrl &= ~SDHCI_CTRL_HISPD; | ||
623 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | ||
624 | |||
625 | if (clock == 0) | 617 | if (clock == 0) |
626 | goto out; | 618 | goto out; |
627 | 619 | ||
@@ -761,10 +753,17 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
761 | sdhci_set_power(host, ios->vdd); | 753 | sdhci_set_power(host, ios->vdd); |
762 | 754 | ||
763 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); | 755 | ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); |
756 | |||
764 | if (ios->bus_width == MMC_BUS_WIDTH_4) | 757 | if (ios->bus_width == MMC_BUS_WIDTH_4) |
765 | ctrl |= SDHCI_CTRL_4BITBUS; | 758 | ctrl |= SDHCI_CTRL_4BITBUS; |
766 | else | 759 | else |
767 | ctrl &= ~SDHCI_CTRL_4BITBUS; | 760 | ctrl &= ~SDHCI_CTRL_4BITBUS; |
761 | |||
762 | if (ios->timing == MMC_TIMING_SD_HS) | ||
763 | ctrl |= SDHCI_CTRL_HISPD; | ||
764 | else | ||
765 | ctrl &= ~SDHCI_CTRL_HISPD; | ||
766 | |||
768 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); | 767 | writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); |
769 | 768 | ||
770 | mmiowb(); | 769 | mmiowb(); |
@@ -994,7 +993,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) | |||
994 | 993 | ||
995 | intmask = readl(host->ioaddr + SDHCI_INT_STATUS); | 994 | intmask = readl(host->ioaddr + SDHCI_INT_STATUS); |
996 | 995 | ||
997 | if (!intmask) { | 996 | if (!intmask || intmask == 0xffffffff) { |
998 | result = IRQ_NONE; | 997 | result = IRQ_NONE; |
999 | goto out; | 998 | goto out; |
1000 | } | 999 | } |
@@ -1080,6 +1079,13 @@ static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
1080 | 1079 | ||
1081 | pci_save_state(pdev); | 1080 | pci_save_state(pdev); |
1082 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | 1081 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); |
1082 | |||
1083 | for (i = 0;i < chip->num_slots;i++) { | ||
1084 | if (!chip->hosts[i]) | ||
1085 | continue; | ||
1086 | free_irq(chip->hosts[i]->irq, chip->hosts[i]); | ||
1087 | } | ||
1088 | |||
1083 | pci_disable_device(pdev); | 1089 | pci_disable_device(pdev); |
1084 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 1090 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
1085 | 1091 | ||
@@ -1108,6 +1114,11 @@ static int sdhci_resume (struct pci_dev *pdev) | |||
1108 | continue; | 1114 | continue; |
1109 | if (chip->hosts[i]->flags & SDHCI_USE_DMA) | 1115 | if (chip->hosts[i]->flags & SDHCI_USE_DMA) |
1110 | pci_set_master(pdev); | 1116 | pci_set_master(pdev); |
1117 | ret = request_irq(chip->hosts[i]->irq, sdhci_irq, | ||
1118 | IRQF_SHARED, chip->hosts[i]->slot_descr, | ||
1119 | chip->hosts[i]); | ||
1120 | if (ret) | ||
1121 | return ret; | ||
1111 | sdhci_init(chip->hosts[i]); | 1122 | sdhci_init(chip->hosts[i]); |
1112 | mmiowb(); | 1123 | mmiowb(); |
1113 | ret = mmc_resume_host(chip->hosts[i]->mmc); | 1124 | ret = mmc_resume_host(chip->hosts[i]->mmc); |
@@ -1274,6 +1285,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1274 | mmc->f_max = host->max_clk; | 1285 | mmc->f_max = host->max_clk; |
1275 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK; | 1286 | mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK; |
1276 | 1287 | ||
1288 | if (caps & SDHCI_CAN_DO_HISPD) | ||
1289 | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | ||
1290 | |||
1277 | mmc->ocr_avail = 0; | 1291 | mmc->ocr_avail = 0; |
1278 | if (caps & SDHCI_CAN_VDD_330) | 1292 | if (caps & SDHCI_CAN_VDD_330) |
1279 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; | 1293 | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; |
@@ -1282,13 +1296,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) | |||
1282 | if (caps & SDHCI_CAN_VDD_180) | 1296 | if (caps & SDHCI_CAN_VDD_180) |
1283 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; | 1297 | mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; |
1284 | 1298 | ||
1285 | if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { | ||
1286 | printk(KERN_ERR "%s: Controller reports > 25 MHz base clock," | ||
1287 | " but no high speed support.\n", | ||
1288 | host->slot_descr); | ||
1289 | mmc->f_max = 25000000; | ||
1290 | } | ||
1291 | |||
1292 | if (mmc->ocr_avail == 0) { | 1299 | if (mmc->ocr_avail == 0) { |
1293 | printk(KERN_ERR "%s: Hardware doesn't report any " | 1300 | printk(KERN_ERR "%s: Hardware doesn't report any " |
1294 | "support voltages.\n", host->slot_descr); | 1301 | "support voltages.\n", host->slot_descr); |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 716a47210aa3..72995777f809 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -822,11 +822,17 @@ static int vortex_resume(struct pci_dev *pdev) | |||
822 | { | 822 | { |
823 | struct net_device *dev = pci_get_drvdata(pdev); | 823 | struct net_device *dev = pci_get_drvdata(pdev); |
824 | struct vortex_private *vp = netdev_priv(dev); | 824 | struct vortex_private *vp = netdev_priv(dev); |
825 | int err; | ||
825 | 826 | ||
826 | if (dev && vp) { | 827 | if (dev && vp) { |
827 | pci_set_power_state(pdev, PCI_D0); | 828 | pci_set_power_state(pdev, PCI_D0); |
828 | pci_restore_state(pdev); | 829 | pci_restore_state(pdev); |
829 | pci_enable_device(pdev); | 830 | err = pci_enable_device(pdev); |
831 | if (err) { | ||
832 | printk(KERN_WARNING "%s: Could not enable device \n", | ||
833 | dev->name); | ||
834 | return err; | ||
835 | } | ||
830 | pci_set_master(pdev); | 836 | pci_set_master(pdev); |
831 | if (request_irq(dev->irq, vp->full_bus_master_rx ? | 837 | if (request_irq(dev->irq, vp->full_bus_master_rx ? |
832 | &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { | 838 | &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ea73ebff4387..e4724d874e7c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/netdevice.h> | 61 | #include <linux/netdevice.h> |
62 | #include <linux/inetdevice.h> | 62 | #include <linux/inetdevice.h> |
63 | #include <linux/igmp.h> | ||
63 | #include <linux/etherdevice.h> | 64 | #include <linux/etherdevice.h> |
64 | #include <linux/skbuff.h> | 65 | #include <linux/skbuff.h> |
65 | #include <net/sock.h> | 66 | #include <net/sock.h> |
@@ -861,6 +862,28 @@ static void bond_mc_delete(struct bonding *bond, void *addr, int alen) | |||
861 | } | 862 | } |
862 | } | 863 | } |
863 | 864 | ||
865 | |||
866 | /* | ||
867 | * Retrieve the list of registered multicast addresses for the bonding | ||
868 | * device and retransmit an IGMP JOIN request to the current active | ||
869 | * slave. | ||
870 | */ | ||
871 | static void bond_resend_igmp_join_requests(struct bonding *bond) | ||
872 | { | ||
873 | struct in_device *in_dev; | ||
874 | struct ip_mc_list *im; | ||
875 | |||
876 | rcu_read_lock(); | ||
877 | in_dev = __in_dev_get_rcu(bond->dev); | ||
878 | if (in_dev) { | ||
879 | for (im = in_dev->mc_list; im; im = im->next) { | ||
880 | ip_mc_rejoin_group(im); | ||
881 | } | ||
882 | } | ||
883 | |||
884 | rcu_read_unlock(); | ||
885 | } | ||
886 | |||
864 | /* | 887 | /* |
865 | * Totally destroys the mc_list in bond | 888 | * Totally destroys the mc_list in bond |
866 | */ | 889 | */ |
@@ -874,6 +897,7 @@ static void bond_mc_list_destroy(struct bonding *bond) | |||
874 | kfree(dmi); | 897 | kfree(dmi); |
875 | dmi = bond->mc_list; | 898 | dmi = bond->mc_list; |
876 | } | 899 | } |
900 | bond->mc_list = NULL; | ||
877 | } | 901 | } |
878 | 902 | ||
879 | /* | 903 | /* |
@@ -967,6 +991,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct | |||
967 | for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { | 991 | for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) { |
968 | dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); | 992 | dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); |
969 | } | 993 | } |
994 | bond_resend_igmp_join_requests(bond); | ||
970 | } | 995 | } |
971 | } | 996 | } |
972 | 997 | ||
@@ -3423,15 +3448,21 @@ void bond_register_arp(struct bonding *bond) | |||
3423 | { | 3448 | { |
3424 | struct packet_type *pt = &bond->arp_mon_pt; | 3449 | struct packet_type *pt = &bond->arp_mon_pt; |
3425 | 3450 | ||
3451 | if (pt->type) | ||
3452 | return; | ||
3453 | |||
3426 | pt->type = htons(ETH_P_ARP); | 3454 | pt->type = htons(ETH_P_ARP); |
3427 | pt->dev = NULL; /*bond->dev;XXX*/ | 3455 | pt->dev = bond->dev; |
3428 | pt->func = bond_arp_rcv; | 3456 | pt->func = bond_arp_rcv; |
3429 | dev_add_pack(pt); | 3457 | dev_add_pack(pt); |
3430 | } | 3458 | } |
3431 | 3459 | ||
3432 | void bond_unregister_arp(struct bonding *bond) | 3460 | void bond_unregister_arp(struct bonding *bond) |
3433 | { | 3461 | { |
3434 | dev_remove_pack(&bond->arp_mon_pt); | 3462 | struct packet_type *pt = &bond->arp_mon_pt; |
3463 | |||
3464 | dev_remove_pack(pt); | ||
3465 | pt->type = 0; | ||
3435 | } | 3466 | } |
3436 | 3467 | ||
3437 | /*---------------------------- Hashing Policies -----------------------------*/ | 3468 | /*---------------------------- Hashing Policies -----------------------------*/ |
@@ -4011,42 +4042,6 @@ out: | |||
4011 | return 0; | 4042 | return 0; |
4012 | } | 4043 | } |
4013 | 4044 | ||
4014 | static void bond_activebackup_xmit_copy(struct sk_buff *skb, | ||
4015 | struct bonding *bond, | ||
4016 | struct slave *slave) | ||
4017 | { | ||
4018 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); | ||
4019 | struct ethhdr *eth_data; | ||
4020 | u8 *hwaddr; | ||
4021 | int res; | ||
4022 | |||
4023 | if (!skb2) { | ||
4024 | printk(KERN_ERR DRV_NAME ": Error: " | ||
4025 | "bond_activebackup_xmit_copy(): skb_copy() failed\n"); | ||
4026 | return; | ||
4027 | } | ||
4028 | |||
4029 | skb2->mac.raw = (unsigned char *)skb2->data; | ||
4030 | eth_data = eth_hdr(skb2); | ||
4031 | |||
4032 | /* Pick an appropriate source MAC address | ||
4033 | * -- use slave's perm MAC addr, unless used by bond | ||
4034 | * -- otherwise, borrow active slave's perm MAC addr | ||
4035 | * since that will not be used | ||
4036 | */ | ||
4037 | hwaddr = slave->perm_hwaddr; | ||
4038 | if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN)) | ||
4039 | hwaddr = bond->curr_active_slave->perm_hwaddr; | ||
4040 | |||
4041 | /* Set source MAC address appropriately */ | ||
4042 | memcpy(eth_data->h_source, hwaddr, ETH_ALEN); | ||
4043 | |||
4044 | res = bond_dev_queue_xmit(bond, skb2, slave->dev); | ||
4045 | if (res) | ||
4046 | dev_kfree_skb(skb2); | ||
4047 | |||
4048 | return; | ||
4049 | } | ||
4050 | 4045 | ||
4051 | /* | 4046 | /* |
4052 | * in active-backup mode, we know that bond->curr_active_slave is always valid if | 4047 | * in active-backup mode, we know that bond->curr_active_slave is always valid if |
@@ -4067,21 +4062,6 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d | |||
4067 | if (!bond->curr_active_slave) | 4062 | if (!bond->curr_active_slave) |
4068 | goto out; | 4063 | goto out; |
4069 | 4064 | ||
4070 | /* Xmit IGMP frames on all slaves to ensure rapid fail-over | ||
4071 | for multicast traffic on snooping switches */ | ||
4072 | if (skb->protocol == __constant_htons(ETH_P_IP) && | ||
4073 | skb->nh.iph->protocol == IPPROTO_IGMP) { | ||
4074 | struct slave *slave, *active_slave; | ||
4075 | int i; | ||
4076 | |||
4077 | active_slave = bond->curr_active_slave; | ||
4078 | bond_for_each_slave_from_to(bond, slave, i, active_slave->next, | ||
4079 | active_slave->prev) | ||
4080 | if (IS_UP(slave->dev) && | ||
4081 | (slave->link == BOND_LINK_UP)) | ||
4082 | bond_activebackup_xmit_copy(skb, bond, slave); | ||
4083 | } | ||
4084 | |||
4085 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); | 4065 | res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); |
4086 | 4066 | ||
4087 | out: | 4067 | out: |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 02b61b85b62c..d981d4c41dd3 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1132,7 +1132,7 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) | |||
1132 | 1132 | ||
1133 | spin_lock_irqsave(&priv->rxlock, flags); | 1133 | spin_lock_irqsave(&priv->rxlock, flags); |
1134 | 1134 | ||
1135 | vlan_group_set_device(priv->vgrp, vid, NULL); | 1135 | vlan_group_set_device(priv->vlgrp, vid, NULL); |
1136 | 1136 | ||
1137 | spin_unlock_irqrestore(&priv->rxlock, flags); | 1137 | spin_unlock_irqrestore(&priv->rxlock, flags); |
1138 | } | 1138 | } |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index be2ddbb6ef56..9ba21e0f27c5 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -1309,7 +1309,7 @@ static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | |||
1309 | static int mv643xx_eth_probe(struct platform_device *pdev) | 1309 | static int mv643xx_eth_probe(struct platform_device *pdev) |
1310 | { | 1310 | { |
1311 | struct mv643xx_eth_platform_data *pd; | 1311 | struct mv643xx_eth_platform_data *pd; |
1312 | int port_num = pdev->id; | 1312 | int port_num; |
1313 | struct mv643xx_private *mp; | 1313 | struct mv643xx_private *mp; |
1314 | struct net_device *dev; | 1314 | struct net_device *dev; |
1315 | u8 *p; | 1315 | u8 *p; |
@@ -1319,6 +1319,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1319 | int duplex = DUPLEX_HALF; | 1319 | int duplex = DUPLEX_HALF; |
1320 | int speed = 0; /* default to auto-negotiation */ | 1320 | int speed = 0; /* default to auto-negotiation */ |
1321 | 1321 | ||
1322 | pd = pdev->dev.platform_data; | ||
1323 | if (pd == NULL) { | ||
1324 | printk(KERN_ERR "No mv643xx_eth_platform_data\n"); | ||
1325 | return -ENODEV; | ||
1326 | } | ||
1327 | |||
1322 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | 1328 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); |
1323 | if (!dev) | 1329 | if (!dev) |
1324 | return -ENOMEM; | 1330 | return -ENOMEM; |
@@ -1331,8 +1337,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1331 | BUG_ON(!res); | 1337 | BUG_ON(!res); |
1332 | dev->irq = res->start; | 1338 | dev->irq = res->start; |
1333 | 1339 | ||
1334 | mp->port_num = port_num; | ||
1335 | |||
1336 | dev->open = mv643xx_eth_open; | 1340 | dev->open = mv643xx_eth_open; |
1337 | dev->stop = mv643xx_eth_stop; | 1341 | dev->stop = mv643xx_eth_stop; |
1338 | dev->hard_start_xmit = mv643xx_eth_start_xmit; | 1342 | dev->hard_start_xmit = mv643xx_eth_start_xmit; |
@@ -1373,39 +1377,40 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1373 | 1377 | ||
1374 | spin_lock_init(&mp->lock); | 1378 | spin_lock_init(&mp->lock); |
1375 | 1379 | ||
1380 | port_num = pd->port_number; | ||
1381 | |||
1376 | /* set default config values */ | 1382 | /* set default config values */ |
1377 | eth_port_uc_addr_get(dev, dev->dev_addr); | 1383 | eth_port_uc_addr_get(dev, dev->dev_addr); |
1378 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1384 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1379 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1385 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1380 | 1386 | ||
1381 | pd = pdev->dev.platform_data; | 1387 | if (is_valid_ether_addr(pd->mac_addr)) |
1382 | if (pd) { | 1388 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1383 | if (is_valid_ether_addr(pd->mac_addr)) | ||
1384 | memcpy(dev->dev_addr, pd->mac_addr, 6); | ||
1385 | 1389 | ||
1386 | if (pd->phy_addr || pd->force_phy_addr) | 1390 | if (pd->phy_addr || pd->force_phy_addr) |
1387 | ethernet_phy_set(port_num, pd->phy_addr); | 1391 | ethernet_phy_set(port_num, pd->phy_addr); |
1388 | 1392 | ||
1389 | if (pd->rx_queue_size) | 1393 | if (pd->rx_queue_size) |
1390 | mp->rx_ring_size = pd->rx_queue_size; | 1394 | mp->rx_ring_size = pd->rx_queue_size; |
1391 | 1395 | ||
1392 | if (pd->tx_queue_size) | 1396 | if (pd->tx_queue_size) |
1393 | mp->tx_ring_size = pd->tx_queue_size; | 1397 | mp->tx_ring_size = pd->tx_queue_size; |
1394 | 1398 | ||
1395 | if (pd->tx_sram_size) { | 1399 | if (pd->tx_sram_size) { |
1396 | mp->tx_sram_size = pd->tx_sram_size; | 1400 | mp->tx_sram_size = pd->tx_sram_size; |
1397 | mp->tx_sram_addr = pd->tx_sram_addr; | 1401 | mp->tx_sram_addr = pd->tx_sram_addr; |
1398 | } | 1402 | } |
1399 | |||
1400 | if (pd->rx_sram_size) { | ||
1401 | mp->rx_sram_size = pd->rx_sram_size; | ||
1402 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1403 | } | ||
1404 | 1403 | ||
1405 | duplex = pd->duplex; | 1404 | if (pd->rx_sram_size) { |
1406 | speed = pd->speed; | 1405 | mp->rx_sram_size = pd->rx_sram_size; |
1406 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | duplex = pd->duplex; | ||
1410 | speed = pd->speed; | ||
1411 | |||
1412 | mp->port_num = port_num; | ||
1413 | |||
1409 | /* Hook up MII support for ethtool */ | 1414 | /* Hook up MII support for ethtool */ |
1410 | mp->mii.dev = dev; | 1415 | mp->mii.dev = dev; |
1411 | mp->mii.mdio_read = mv643xx_mdio_read; | 1416 | mp->mii.mdio_read = mv643xx_mdio_read; |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 5c57433cb306..c6172a77a6d7 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -2024,6 +2024,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2024 | struct netdev_private *np = netdev_priv(dev); | 2024 | struct netdev_private *np = netdev_priv(dev); |
2025 | void __iomem * ioaddr = ns_ioaddr(dev); | 2025 | void __iomem * ioaddr = ns_ioaddr(dev); |
2026 | unsigned entry; | 2026 | unsigned entry; |
2027 | unsigned long flags; | ||
2027 | 2028 | ||
2028 | /* Note: Ordering is important here, set the field with the | 2029 | /* Note: Ordering is important here, set the field with the |
2029 | "ownership" bit last, and only then increment cur_tx. */ | 2030 | "ownership" bit last, and only then increment cur_tx. */ |
@@ -2037,7 +2038,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2037 | 2038 | ||
2038 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); | 2039 | np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); |
2039 | 2040 | ||
2040 | spin_lock_irq(&np->lock); | 2041 | spin_lock_irqsave(&np->lock, flags); |
2041 | 2042 | ||
2042 | if (!np->hands_off) { | 2043 | if (!np->hands_off) { |
2043 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); | 2044 | np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); |
@@ -2056,7 +2057,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) | |||
2056 | dev_kfree_skb_irq(skb); | 2057 | dev_kfree_skb_irq(skb); |
2057 | np->stats.tx_dropped++; | 2058 | np->stats.tx_dropped++; |
2058 | } | 2059 | } |
2059 | spin_unlock_irq(&np->lock); | 2060 | spin_unlock_irqrestore(&np->lock, flags); |
2060 | 2061 | ||
2061 | dev->trans_start = jiffies; | 2062 | dev->trans_start = jiffies; |
2062 | 2063 | ||
@@ -2222,6 +2223,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2222 | pkt_len = (desc_status & DescSizeMask) - 4; | 2223 | pkt_len = (desc_status & DescSizeMask) - 4; |
2223 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ | 2224 | if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ |
2224 | if (desc_status & DescMore) { | 2225 | if (desc_status & DescMore) { |
2226 | unsigned long flags; | ||
2227 | |||
2225 | if (netif_msg_rx_err(np)) | 2228 | if (netif_msg_rx_err(np)) |
2226 | printk(KERN_WARNING | 2229 | printk(KERN_WARNING |
2227 | "%s: Oversized(?) Ethernet " | 2230 | "%s: Oversized(?) Ethernet " |
@@ -2236,12 +2239,12 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) | |||
2236 | * reset procedure documented in | 2239 | * reset procedure documented in |
2237 | * AN-1287. */ | 2240 | * AN-1287. */ |
2238 | 2241 | ||
2239 | spin_lock_irq(&np->lock); | 2242 | spin_lock_irqsave(&np->lock, flags); |
2240 | reset_rx(dev); | 2243 | reset_rx(dev); |
2241 | reinit_rx(dev); | 2244 | reinit_rx(dev); |
2242 | writel(np->ring_dma, ioaddr + RxRingPtr); | 2245 | writel(np->ring_dma, ioaddr + RxRingPtr); |
2243 | check_link(dev); | 2246 | check_link(dev); |
2244 | spin_unlock_irq(&np->lock); | 2247 | spin_unlock_irqrestore(&np->lock, flags); |
2245 | 2248 | ||
2246 | /* We'll enable RX on exit from this | 2249 | /* We'll enable RX on exit from this |
2247 | * function. */ | 2250 | * function. */ |
@@ -2396,8 +2399,19 @@ static struct net_device_stats *get_stats(struct net_device *dev) | |||
2396 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2399 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2397 | static void natsemi_poll_controller(struct net_device *dev) | 2400 | static void natsemi_poll_controller(struct net_device *dev) |
2398 | { | 2401 | { |
2402 | struct netdev_private *np = netdev_priv(dev); | ||
2403 | |||
2399 | disable_irq(dev->irq); | 2404 | disable_irq(dev->irq); |
2400 | intr_handler(dev->irq, dev); | 2405 | |
2406 | /* | ||
2407 | * A real interrupt might have already reached us at this point | ||
2408 | * but NAPI might still haven't called us back. As the interrupt | ||
2409 | * status register is cleared by reading, we should prevent an | ||
2410 | * interrupt loss in this case... | ||
2411 | */ | ||
2412 | if (!np->intr_status) | ||
2413 | intr_handler(dev->irq, dev); | ||
2414 | |||
2401 | enable_irq(dev->irq); | 2415 | enable_irq(dev->irq); |
2402 | } | 2416 | } |
2403 | #endif | 2417 | #endif |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 36f9d988278f..4d94ba7899bf 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1234,14 +1234,14 @@ static void pcnet32_rx_entry(struct net_device *dev, | |||
1234 | skb_put(skb, pkt_len); /* Make room */ | 1234 | skb_put(skb, pkt_len); /* Make room */ |
1235 | pci_dma_sync_single_for_cpu(lp->pci_dev, | 1235 | pci_dma_sync_single_for_cpu(lp->pci_dev, |
1236 | lp->rx_dma_addr[entry], | 1236 | lp->rx_dma_addr[entry], |
1237 | PKT_BUF_SZ - 2, | 1237 | pkt_len, |
1238 | PCI_DMA_FROMDEVICE); | 1238 | PCI_DMA_FROMDEVICE); |
1239 | eth_copy_and_sum(skb, | 1239 | eth_copy_and_sum(skb, |
1240 | (unsigned char *)(lp->rx_skbuff[entry]->data), | 1240 | (unsigned char *)(lp->rx_skbuff[entry]->data), |
1241 | pkt_len, 0); | 1241 | pkt_len, 0); |
1242 | pci_dma_sync_single_for_device(lp->pci_dev, | 1242 | pci_dma_sync_single_for_device(lp->pci_dev, |
1243 | lp->rx_dma_addr[entry], | 1243 | lp->rx_dma_addr[entry], |
1244 | PKT_BUF_SZ - 2, | 1244 | pkt_len, |
1245 | PCI_DMA_FROMDEVICE); | 1245 | PCI_DMA_FROMDEVICE); |
1246 | } | 1246 | } |
1247 | lp->stats.rx_bytes += skb->len; | 1247 | lp->stats.rx_bytes += skb->len; |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 86e56f1f2f0b..ebfa2967cd68 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -140,7 +140,7 @@ static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int | |||
140 | 140 | ||
141 | ret = item_hash_table[hash]; | 141 | ret = item_hash_table[hash]; |
142 | 142 | ||
143 | while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex)) | 143 | while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex)) |
144 | ret = ret->next; | 144 | ret = ret->next; |
145 | 145 | ||
146 | return ret; | 146 | return ret; |
@@ -153,7 +153,7 @@ static int __set_item(struct pppox_sock *po) | |||
153 | 153 | ||
154 | ret = item_hash_table[hash]; | 154 | ret = item_hash_table[hash]; |
155 | while (ret) { | 155 | while (ret) { |
156 | if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex) | 156 | if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) |
157 | return -EALREADY; | 157 | return -EALREADY; |
158 | 158 | ||
159 | ret = ret->next; | 159 | ret = ret->next; |
@@ -174,7 +174,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifind | |||
174 | src = &item_hash_table[hash]; | 174 | src = &item_hash_table[hash]; |
175 | 175 | ||
176 | while (ret) { | 176 | while (ret) { |
177 | if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) { | 177 | if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { |
178 | *src = ret->next; | 178 | *src = ret->next; |
179 | break; | 179 | break; |
180 | } | 180 | } |
@@ -529,7 +529,7 @@ static int pppoe_release(struct socket *sock) | |||
529 | 529 | ||
530 | po = pppox_sk(sk); | 530 | po = pppox_sk(sk); |
531 | if (po->pppoe_pa.sid) { | 531 | if (po->pppoe_pa.sid) { |
532 | delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex); | 532 | delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); |
533 | } | 533 | } |
534 | 534 | ||
535 | if (po->pppoe_dev) | 535 | if (po->pppoe_dev) |
@@ -577,7 +577,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
577 | pppox_unbind_sock(sk); | 577 | pppox_unbind_sock(sk); |
578 | 578 | ||
579 | /* Delete the old binding */ | 579 | /* Delete the old binding */ |
580 | delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex); | 580 | delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex); |
581 | 581 | ||
582 | if(po->pppoe_dev) | 582 | if(po->pppoe_dev) |
583 | dev_put(po->pppoe_dev); | 583 | dev_put(po->pppoe_dev); |
@@ -597,6 +597,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
597 | goto end; | 597 | goto end; |
598 | 598 | ||
599 | po->pppoe_dev = dev; | 599 | po->pppoe_dev = dev; |
600 | po->pppoe_ifindex = dev->ifindex; | ||
600 | 601 | ||
601 | if (!(dev->flags & IFF_UP)) | 602 | if (!(dev->flags & IFF_UP)) |
602 | goto err_put; | 603 | goto err_put; |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index fb2b53051635..b3750f284279 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -968,10 +968,10 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location, | |||
968 | 968 | ||
969 | static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) | 969 | static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) |
970 | { | 970 | { |
971 | int i = 0; | 971 | int i; |
972 | u16 status; | 972 | u16 status; |
973 | 973 | ||
974 | while (i++ < 2) | 974 | for (i = 0; i < 2; i++) |
975 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 975 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
976 | 976 | ||
977 | mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); | 977 | mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); |
@@ -1430,7 +1430,7 @@ static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr) | |||
1430 | int i = 0; | 1430 | int i = 0; |
1431 | u32 status; | 1431 | u32 status; |
1432 | 1432 | ||
1433 | while (i++ < 2) | 1433 | for (i = 0; i < 2; i++) |
1434 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 1434 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
1435 | 1435 | ||
1436 | if (!(status & MII_STAT_LINK)){ | 1436 | if (!(status & MII_STAT_LINK)){ |
@@ -1466,9 +1466,9 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex | |||
1466 | int phy_addr = sis_priv->cur_phy; | 1466 | int phy_addr = sis_priv->cur_phy; |
1467 | u32 status; | 1467 | u32 status; |
1468 | u16 autoadv, autorec; | 1468 | u16 autoadv, autorec; |
1469 | int i = 0; | 1469 | int i; |
1470 | 1470 | ||
1471 | while (i++ < 2) | 1471 | for (i = 0; i < 2; i++) |
1472 | status = mdio_read(net_dev, phy_addr, MII_STATUS); | 1472 | status = mdio_read(net_dev, phy_addr, MII_STATUS); |
1473 | 1473 | ||
1474 | if (!(status & MII_STAT_LINK)) | 1474 | if (!(status & MII_STAT_LINK)) |
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index dacea4fd3337..c82befa209a2 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c | |||
@@ -1685,7 +1685,7 @@ static const struct ethtool_ops de_ethtool_ops = { | |||
1685 | .get_regs = de_get_regs, | 1685 | .get_regs = de_get_regs, |
1686 | }; | 1686 | }; |
1687 | 1687 | ||
1688 | static void __init de21040_get_mac_address (struct de_private *de) | 1688 | static void __devinit de21040_get_mac_address (struct de_private *de) |
1689 | { | 1689 | { |
1690 | unsigned i; | 1690 | unsigned i; |
1691 | 1691 | ||
@@ -1703,7 +1703,7 @@ static void __init de21040_get_mac_address (struct de_private *de) | |||
1703 | } | 1703 | } |
1704 | } | 1704 | } |
1705 | 1705 | ||
1706 | static void __init de21040_get_media_info(struct de_private *de) | 1706 | static void __devinit de21040_get_media_info(struct de_private *de) |
1707 | { | 1707 | { |
1708 | unsigned int i; | 1708 | unsigned int i; |
1709 | 1709 | ||
@@ -1765,7 +1765,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in | |||
1765 | return retval; | 1765 | return retval; |
1766 | } | 1766 | } |
1767 | 1767 | ||
1768 | static void __init de21041_get_srom_info (struct de_private *de) | 1768 | static void __devinit de21041_get_srom_info (struct de_private *de) |
1769 | { | 1769 | { |
1770 | unsigned i, sa_offset = 0, ofs; | 1770 | unsigned i, sa_offset = 0, ofs; |
1771 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; | 1771 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; |
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 7f59a3d4fda2..24a29c99ba94 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c | |||
@@ -143,9 +143,16 @@ | |||
143 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ | 143 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ |
144 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ | 144 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ |
145 | 145 | ||
146 | #define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) | 146 | #define DMFE_DBUG(dbug_now, msg, value) \ |
147 | do { \ | ||
148 | if (dmfe_debug || (dbug_now)) \ | ||
149 | printk(KERN_ERR DRV_NAME ": %s %lx\n",\ | ||
150 | (msg), (long) (value)); \ | ||
151 | } while (0) | ||
147 | 152 | ||
148 | #define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); | 153 | #define SHOW_MEDIA_TYPE(mode) \ |
154 | printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \ | ||
155 | (mode & 1) ? "100":"10", (mode & 4) ? "full":"half"); | ||
149 | 156 | ||
150 | 157 | ||
151 | /* CR9 definition: SROM/MII */ | 158 | /* CR9 definition: SROM/MII */ |
@@ -163,10 +170,20 @@ | |||
163 | 170 | ||
164 | #define SROM_V41_CODE 0x14 | 171 | #define SROM_V41_CODE 0x14 |
165 | 172 | ||
166 | #define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5); | 173 | #define SROM_CLK_WRITE(data, ioaddr) \ |
174 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
175 | udelay(5); \ | ||
176 | outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ | ||
177 | udelay(5); \ | ||
178 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
179 | udelay(5); | ||
180 | |||
181 | #define __CHK_IO_SIZE(pci_id, dev_rev) \ | ||
182 | (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \ | ||
183 | DM9102A_IO_SIZE: DM9102_IO_SIZE) | ||
167 | 184 | ||
168 | #define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE | 185 | #define CHK_IO_SIZE(pci_dev, dev_rev) \ |
169 | #define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev) | 186 | (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)) |
170 | 187 | ||
171 | /* Sten Check */ | 188 | /* Sten Check */ |
172 | #define DEVICE net_device | 189 | #define DEVICE net_device |
@@ -187,7 +204,7 @@ struct rx_desc { | |||
187 | struct dmfe_board_info { | 204 | struct dmfe_board_info { |
188 | u32 chip_id; /* Chip vendor/Device ID */ | 205 | u32 chip_id; /* Chip vendor/Device ID */ |
189 | u32 chip_revision; /* Chip revision */ | 206 | u32 chip_revision; /* Chip revision */ |
190 | struct DEVICE *dev; /* net device */ | 207 | struct DEVICE *next_dev; /* next device */ |
191 | struct pci_dev *pdev; /* PCI device */ | 208 | struct pci_dev *pdev; /* PCI device */ |
192 | spinlock_t lock; | 209 | spinlock_t lock; |
193 | 210 | ||
@@ -231,7 +248,6 @@ struct dmfe_board_info { | |||
231 | u8 media_mode; /* user specify media mode */ | 248 | u8 media_mode; /* user specify media mode */ |
232 | u8 op_mode; /* real work media mode */ | 249 | u8 op_mode; /* real work media mode */ |
233 | u8 phy_addr; | 250 | u8 phy_addr; |
234 | u8 link_failed; /* Ever link failed */ | ||
235 | u8 wait_reset; /* Hardware failed, need to reset */ | 251 | u8 wait_reset; /* Hardware failed, need to reset */ |
236 | u8 dm910x_chk_mode; /* Operating mode check */ | 252 | u8 dm910x_chk_mode; /* Operating mode check */ |
237 | u8 first_in_callback; /* Flag to record state */ | 253 | u8 first_in_callback; /* Flag to record state */ |
@@ -329,7 +345,7 @@ static void dmfe_program_DM9802(struct dmfe_board_info *); | |||
329 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); | 345 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); |
330 | static void dmfe_set_phyxcer(struct dmfe_board_info *); | 346 | static void dmfe_set_phyxcer(struct dmfe_board_info *); |
331 | 347 | ||
332 | /* DM910X network baord routine ---------------------------- */ | 348 | /* DM910X network board routine ---------------------------- */ |
333 | 349 | ||
334 | /* | 350 | /* |
335 | * Search DM910X board ,allocate space and register it | 351 | * Search DM910X board ,allocate space and register it |
@@ -356,7 +372,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
356 | SET_NETDEV_DEV(dev, &pdev->dev); | 372 | SET_NETDEV_DEV(dev, &pdev->dev); |
357 | 373 | ||
358 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 374 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
359 | printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); | 375 | printk(KERN_WARNING DRV_NAME |
376 | ": 32-bit PCI DMA not available.\n"); | ||
360 | err = -ENODEV; | 377 | err = -ENODEV; |
361 | goto err_out_free; | 378 | goto err_out_free; |
362 | } | 379 | } |
@@ -399,11 +416,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
399 | /* Init system & device */ | 416 | /* Init system & device */ |
400 | db = netdev_priv(dev); | 417 | db = netdev_priv(dev); |
401 | 418 | ||
402 | db->dev = dev; | ||
403 | |||
404 | /* Allocate Tx/Rx descriptor memory */ | 419 | /* Allocate Tx/Rx descriptor memory */ |
405 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | 420 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * |
406 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | 421 | DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); |
422 | |||
423 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * | ||
424 | TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | ||
407 | 425 | ||
408 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | 426 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; |
409 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | 427 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; |
@@ -428,7 +446,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
428 | dev->poll_controller = &poll_dmfe; | 446 | dev->poll_controller = &poll_dmfe; |
429 | #endif | 447 | #endif |
430 | dev->ethtool_ops = &netdev_ethtool_ops; | 448 | dev->ethtool_ops = &netdev_ethtool_ops; |
431 | netif_carrier_off(db->dev); | 449 | netif_carrier_off(dev); |
432 | spin_lock_init(&db->lock); | 450 | spin_lock_init(&db->lock); |
433 | 451 | ||
434 | pci_read_config_dword(pdev, 0x50, &pci_pmr); | 452 | pci_read_config_dword(pdev, 0x50, &pci_pmr); |
@@ -440,7 +458,8 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, | |||
440 | 458 | ||
441 | /* read 64 word srom data */ | 459 | /* read 64 word srom data */ |
442 | for (i = 0; i < 64; i++) | 460 | for (i = 0; i < 64; i++) |
443 | ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); | 461 | ((u16 *) db->srom)[i] = |
462 | cpu_to_le16(read_srom_word(db->ioaddr, i)); | ||
444 | 463 | ||
445 | /* Set Node address */ | 464 | /* Set Node address */ |
446 | for (i = 0; i < 6; i++) | 465 | for (i = 0; i < 6; i++) |
@@ -482,14 +501,17 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev) | |||
482 | DMFE_DBUG(0, "dmfe_remove_one()", 0); | 501 | DMFE_DBUG(0, "dmfe_remove_one()", 0); |
483 | 502 | ||
484 | if (dev) { | 503 | if (dev) { |
504 | |||
505 | unregister_netdev(dev); | ||
506 | |||
485 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * | 507 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * |
486 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, | 508 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, |
487 | db->desc_pool_dma_ptr); | 509 | db->desc_pool_dma_ptr); |
488 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | 510 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, |
489 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | 511 | db->buf_pool_ptr, db->buf_pool_dma_ptr); |
490 | unregister_netdev(dev); | ||
491 | pci_release_regions(pdev); | 512 | pci_release_regions(pdev); |
492 | free_netdev(dev); /* free board information */ | 513 | free_netdev(dev); /* free board information */ |
514 | |||
493 | pci_set_drvdata(pdev, NULL); | 515 | pci_set_drvdata(pdev, NULL); |
494 | } | 516 | } |
495 | 517 | ||
@@ -509,7 +531,8 @@ static int dmfe_open(struct DEVICE *dev) | |||
509 | 531 | ||
510 | DMFE_DBUG(0, "dmfe_open", 0); | 532 | DMFE_DBUG(0, "dmfe_open", 0); |
511 | 533 | ||
512 | ret = request_irq(dev->irq, &dmfe_interrupt, IRQF_SHARED, dev->name, dev); | 534 | ret = request_irq(dev->irq, &dmfe_interrupt, |
535 | IRQF_SHARED, dev->name, dev); | ||
513 | if (ret) | 536 | if (ret) |
514 | return ret; | 537 | return ret; |
515 | 538 | ||
@@ -518,7 +541,6 @@ static int dmfe_open(struct DEVICE *dev) | |||
518 | db->tx_packet_cnt = 0; | 541 | db->tx_packet_cnt = 0; |
519 | db->tx_queue_cnt = 0; | 542 | db->tx_queue_cnt = 0; |
520 | db->rx_avail_cnt = 0; | 543 | db->rx_avail_cnt = 0; |
521 | db->link_failed = 1; | ||
522 | db->wait_reset = 0; | 544 | db->wait_reset = 0; |
523 | 545 | ||
524 | db->first_in_callback = 0; | 546 | db->first_in_callback = 0; |
@@ -650,7 +672,8 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) | |||
650 | /* No Tx resource check, it never happen nromally */ | 672 | /* No Tx resource check, it never happen nromally */ |
651 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { | 673 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { |
652 | spin_unlock_irqrestore(&db->lock, flags); | 674 | spin_unlock_irqrestore(&db->lock, flags); |
653 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt); | 675 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", |
676 | db->tx_queue_cnt); | ||
654 | return 1; | 677 | return 1; |
655 | } | 678 | } |
656 | 679 | ||
@@ -722,7 +745,8 @@ static int dmfe_stop(struct DEVICE *dev) | |||
722 | 745 | ||
723 | #if 0 | 746 | #if 0 |
724 | /* show statistic counter */ | 747 | /* show statistic counter */ |
725 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | 748 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx" |
749 | " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | ||
726 | db->tx_fifo_underrun, db->tx_excessive_collision, | 750 | db->tx_fifo_underrun, db->tx_excessive_collision, |
727 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, | 751 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, |
728 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, | 752 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, |
@@ -905,7 +929,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) | |||
905 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | 929 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) |
906 | { | 930 | { |
907 | struct rx_desc *rxptr; | 931 | struct rx_desc *rxptr; |
908 | struct sk_buff *skb; | 932 | struct sk_buff *skb, *newskb; |
909 | int rxlen; | 933 | int rxlen; |
910 | u32 rdes0; | 934 | u32 rdes0; |
911 | 935 | ||
@@ -919,7 +943,9 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
919 | db->rx_avail_cnt--; | 943 | db->rx_avail_cnt--; |
920 | db->interval_rx_cnt++; | 944 | db->interval_rx_cnt++; |
921 | 945 | ||
922 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | 946 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), |
947 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | ||
948 | |||
923 | if ( (rdes0 & 0x300) != 0x300) { | 949 | if ( (rdes0 & 0x300) != 0x300) { |
924 | /* A packet without First/Last flag */ | 950 | /* A packet without First/Last flag */ |
925 | /* reuse this SKB */ | 951 | /* reuse this SKB */ |
@@ -956,9 +982,11 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | |||
956 | } else { | 982 | } else { |
957 | /* Good packet, send to upper layer */ | 983 | /* Good packet, send to upper layer */ |
958 | /* Shorst packet used new SKB */ | 984 | /* Shorst packet used new SKB */ |
959 | if ( (rxlen < RX_COPY_SIZE) && | 985 | if ((rxlen < RX_COPY_SIZE) && |
960 | ( (skb = dev_alloc_skb(rxlen + 2) ) | 986 | ((newskb = dev_alloc_skb(rxlen + 2)) |
961 | != NULL) ) { | 987 | != NULL)) { |
988 | |||
989 | skb = newskb; | ||
962 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | 990 | /* size less than COPY_SIZE, allocate a rxlen SKB */ |
963 | skb->dev = dev; | 991 | skb->dev = dev; |
964 | skb_reserve(skb, 2); /* 16byte align */ | 992 | skb_reserve(skb, 2); /* 16byte align */ |
@@ -1069,6 +1097,8 @@ static void dmfe_timer(unsigned long data) | |||
1069 | struct dmfe_board_info *db = netdev_priv(dev); | 1097 | struct dmfe_board_info *db = netdev_priv(dev); |
1070 | unsigned long flags; | 1098 | unsigned long flags; |
1071 | 1099 | ||
1100 | int link_ok, link_ok_phy; | ||
1101 | |||
1072 | DMFE_DBUG(0, "dmfe_timer()", 0); | 1102 | DMFE_DBUG(0, "dmfe_timer()", 0); |
1073 | spin_lock_irqsave(&db->lock, flags); | 1103 | spin_lock_irqsave(&db->lock, flags); |
1074 | 1104 | ||
@@ -1078,7 +1108,8 @@ static void dmfe_timer(unsigned long data) | |||
1078 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { | 1108 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { |
1079 | db->cr6_data &= ~0x40000; | 1109 | db->cr6_data &= ~0x40000; |
1080 | update_cr6(db->cr6_data, db->ioaddr); | 1110 | update_cr6(db->cr6_data, db->ioaddr); |
1081 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | 1111 | phy_write(db->ioaddr, |
1112 | db->phy_addr, 0, 0x1000, db->chip_id); | ||
1082 | db->cr6_data |= 0x40000; | 1113 | db->cr6_data |= 0x40000; |
1083 | update_cr6(db->cr6_data, db->ioaddr); | 1114 | update_cr6(db->cr6_data, db->ioaddr); |
1084 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; | 1115 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; |
@@ -1139,21 +1170,41 @@ static void dmfe_timer(unsigned long data) | |||
1139 | (db->chip_revision == 0x02000010)) ) { | 1170 | (db->chip_revision == 0x02000010)) ) { |
1140 | /* DM9102A Chip */ | 1171 | /* DM9102A Chip */ |
1141 | if (tmp_cr12 & 2) | 1172 | if (tmp_cr12 & 2) |
1142 | tmp_cr12 = 0x0; /* Link failed */ | 1173 | link_ok = 0; |
1143 | else | 1174 | else |
1144 | tmp_cr12 = 0x3; /* Link OK */ | 1175 | link_ok = 1; |
1145 | } | 1176 | } |
1177 | else | ||
1178 | /*0x43 is used instead of 0x3 because bit 6 should represent | ||
1179 | link status of external PHY */ | ||
1180 | link_ok = (tmp_cr12 & 0x43) ? 1 : 0; | ||
1181 | |||
1182 | |||
1183 | /* If chip reports that link is failed it could be because external | ||
1184 | PHY link status pin is not conected correctly to chip | ||
1185 | To be sure ask PHY too. | ||
1186 | */ | ||
1187 | |||
1188 | /* need a dummy read because of PHY's register latch*/ | ||
1189 | phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1190 | link_ok_phy = (phy_read (db->ioaddr, | ||
1191 | db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; | ||
1146 | 1192 | ||
1147 | if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { | 1193 | if (link_ok_phy != link_ok) { |
1194 | DMFE_DBUG (0, "PHY and chip report different link status", 0); | ||
1195 | link_ok = link_ok | link_ok_phy; | ||
1196 | } | ||
1197 | |||
1198 | if ( !link_ok && netif_carrier_ok(dev)) { | ||
1148 | /* Link Failed */ | 1199 | /* Link Failed */ |
1149 | DMFE_DBUG(0, "Link Failed", tmp_cr12); | 1200 | DMFE_DBUG(0, "Link Failed", tmp_cr12); |
1150 | db->link_failed = 1; | 1201 | netif_carrier_off(dev); |
1151 | netif_carrier_off(db->dev); | ||
1152 | 1202 | ||
1153 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | 1203 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ |
1154 | /* AUTO or force 1M Homerun/Longrun don't need */ | 1204 | /* AUTO or force 1M Homerun/Longrun don't need */ |
1155 | if ( !(db->media_mode & 0x38) ) | 1205 | if ( !(db->media_mode & 0x38) ) |
1156 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | 1206 | phy_write(db->ioaddr, db->phy_addr, |
1207 | 0, 0x1000, db->chip_id); | ||
1157 | 1208 | ||
1158 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ | 1209 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ |
1159 | if (db->media_mode & DMFE_AUTO) { | 1210 | if (db->media_mode & DMFE_AUTO) { |
@@ -1162,21 +1213,19 @@ static void dmfe_timer(unsigned long data) | |||
1162 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ | 1213 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ |
1163 | update_cr6(db->cr6_data, db->ioaddr); | 1214 | update_cr6(db->cr6_data, db->ioaddr); |
1164 | } | 1215 | } |
1165 | } else | 1216 | } else if (!netif_carrier_ok(dev)) { |
1166 | if ((tmp_cr12 & 0x3) && db->link_failed) { | 1217 | |
1167 | DMFE_DBUG(0, "Link link OK", tmp_cr12); | 1218 | DMFE_DBUG(0, "Link link OK", tmp_cr12); |
1168 | db->link_failed = 0; | 1219 | |
1169 | 1220 | /* Auto Sense Speed */ | |
1170 | /* Auto Sense Speed */ | 1221 | if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) { |
1171 | if ( (db->media_mode & DMFE_AUTO) && | 1222 | netif_carrier_on(dev); |
1172 | dmfe_sense_speed(db) ) | 1223 | SHOW_MEDIA_TYPE(db->op_mode); |
1173 | db->link_failed = 1; | ||
1174 | else | ||
1175 | netif_carrier_on(db->dev); | ||
1176 | dmfe_process_mode(db); | ||
1177 | /* SHOW_MEDIA_TYPE(db->op_mode); */ | ||
1178 | } | 1224 | } |
1179 | 1225 | ||
1226 | dmfe_process_mode(db); | ||
1227 | } | ||
1228 | |||
1180 | /* HPNA remote command check */ | 1229 | /* HPNA remote command check */ |
1181 | if (db->HPNA_command & 0xf00) { | 1230 | if (db->HPNA_command & 0xf00) { |
1182 | db->HPNA_timer--; | 1231 | db->HPNA_timer--; |
@@ -1221,7 +1270,7 @@ static void dmfe_dynamic_reset(struct DEVICE *dev) | |||
1221 | db->tx_packet_cnt = 0; | 1270 | db->tx_packet_cnt = 0; |
1222 | db->tx_queue_cnt = 0; | 1271 | db->tx_queue_cnt = 0; |
1223 | db->rx_avail_cnt = 0; | 1272 | db->rx_avail_cnt = 0; |
1224 | db->link_failed = 1; | 1273 | netif_carrier_off(dev); |
1225 | db->wait_reset = 0; | 1274 | db->wait_reset = 0; |
1226 | 1275 | ||
1227 | /* Re-initilize DM910X board */ | 1276 | /* Re-initilize DM910X board */ |
@@ -1259,7 +1308,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) | |||
1259 | 1308 | ||
1260 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | 1309 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { |
1261 | rxptr->rx_skb_ptr = skb; | 1310 | rxptr->rx_skb_ptr = skb; |
1262 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1311 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, |
1312 | skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1263 | wmb(); | 1313 | wmb(); |
1264 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1314 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1265 | db->rx_avail_cnt++; | 1315 | db->rx_avail_cnt++; |
@@ -1291,8 +1341,11 @@ static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioadd | |||
1291 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ | 1341 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ |
1292 | 1342 | ||
1293 | /* rx descriptor start pointer */ | 1343 | /* rx descriptor start pointer */ |
1294 | db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; | 1344 | db->first_rx_desc = (void *)db->first_tx_desc + |
1295 | db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; | 1345 | sizeof(struct tx_desc) * TX_DESC_CNT; |
1346 | |||
1347 | db->first_rx_desc_dma = db->first_tx_desc_dma + | ||
1348 | sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1296 | db->rx_insert_ptr = db->first_rx_desc; | 1349 | db->rx_insert_ptr = db->first_rx_desc; |
1297 | db->rx_ready_ptr = db->first_rx_desc; | 1350 | db->rx_ready_ptr = db->first_rx_desc; |
1298 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ | 1351 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ |
@@ -1470,7 +1523,8 @@ static void allocate_rx_buffer(struct dmfe_board_info *db) | |||
1470 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | 1523 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) |
1471 | break; | 1524 | break; |
1472 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | 1525 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ |
1473 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | 1526 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, |
1527 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1474 | wmb(); | 1528 | wmb(); |
1475 | rxptr->rdes0 = cpu_to_le32(0x80000000); | 1529 | rxptr->rdes0 = cpu_to_le32(0x80000000); |
1476 | rxptr = rxptr->next_rx_desc; | 1530 | rxptr = rxptr->next_rx_desc; |
@@ -1510,7 +1564,8 @@ static u16 read_srom_word(long ioaddr, int offset) | |||
1510 | for (i = 16; i > 0; i--) { | 1564 | for (i = 16; i > 0; i--) { |
1511 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); | 1565 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); |
1512 | udelay(5); | 1566 | udelay(5); |
1513 | srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | 1567 | srom_data = (srom_data << 1) | |
1568 | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | ||
1514 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | 1569 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); |
1515 | udelay(5); | 1570 | udelay(5); |
1516 | } | 1571 | } |
@@ -1537,9 +1592,11 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db) | |||
1537 | 1592 | ||
1538 | if ( (phy_mode & 0x24) == 0x24 ) { | 1593 | if ( (phy_mode & 0x24) == 0x24 ) { |
1539 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ | 1594 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ |
1540 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000; | 1595 | phy_mode = phy_read(db->ioaddr, |
1596 | db->phy_addr, 7, db->chip_id) & 0xf000; | ||
1541 | else /* DM9102/DM9102A */ | 1597 | else /* DM9102/DM9102A */ |
1542 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000; | 1598 | phy_mode = phy_read(db->ioaddr, |
1599 | db->phy_addr, 17, db->chip_id) & 0xf000; | ||
1543 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ | 1600 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ |
1544 | switch (phy_mode) { | 1601 | switch (phy_mode) { |
1545 | case 0x1000: db->op_mode = DMFE_10MHF; break; | 1602 | case 0x1000: db->op_mode = DMFE_10MHF; break; |
@@ -1576,8 +1633,11 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) | |||
1576 | 1633 | ||
1577 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ | 1634 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ |
1578 | if (db->chip_id == PCI_DM9009_ID) { | 1635 | if (db->chip_id == PCI_DM9009_ID) { |
1579 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000; | 1636 | phy_reg = phy_read(db->ioaddr, |
1580 | phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id); | 1637 | db->phy_addr, 18, db->chip_id) & ~0x1000; |
1638 | |||
1639 | phy_write(db->ioaddr, | ||
1640 | db->phy_addr, 18, phy_reg, db->chip_id); | ||
1581 | } | 1641 | } |
1582 | 1642 | ||
1583 | /* Phyxcer capability setting */ | 1643 | /* Phyxcer capability setting */ |
@@ -1650,10 +1710,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db) | |||
1650 | case DMFE_100MHF: phy_reg = 0x2000; break; | 1710 | case DMFE_100MHF: phy_reg = 0x2000; break; |
1651 | case DMFE_100MFD: phy_reg = 0x2100; break; | 1711 | case DMFE_100MFD: phy_reg = 0x2100; break; |
1652 | } | 1712 | } |
1653 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | 1713 | phy_write(db->ioaddr, |
1714 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1654 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) | 1715 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) |
1655 | mdelay(20); | 1716 | mdelay(20); |
1656 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | 1717 | phy_write(db->ioaddr, |
1718 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1657 | } | 1719 | } |
1658 | } | 1720 | } |
1659 | } | 1721 | } |
@@ -1663,7 +1725,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db) | |||
1663 | * Write a word to Phy register | 1725 | * Write a word to Phy register |
1664 | */ | 1726 | */ |
1665 | 1727 | ||
1666 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) | 1728 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, |
1729 | u16 phy_data, u32 chip_id) | ||
1667 | { | 1730 | { |
1668 | u16 i; | 1731 | u16 i; |
1669 | unsigned long ioaddr; | 1732 | unsigned long ioaddr; |
@@ -1689,11 +1752,13 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1689 | 1752 | ||
1690 | /* Send Phy address */ | 1753 | /* Send Phy address */ |
1691 | for (i = 0x10; i > 0; i = i >> 1) | 1754 | for (i = 0x10; i > 0; i = i >> 1) |
1692 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | 1755 | phy_write_1bit(ioaddr, |
1756 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1693 | 1757 | ||
1694 | /* Send register address */ | 1758 | /* Send register address */ |
1695 | for (i = 0x10; i > 0; i = i >> 1) | 1759 | for (i = 0x10; i > 0; i = i >> 1) |
1696 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); | 1760 | phy_write_1bit(ioaddr, |
1761 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1697 | 1762 | ||
1698 | /* written trasnition */ | 1763 | /* written trasnition */ |
1699 | phy_write_1bit(ioaddr, PHY_DATA_1); | 1764 | phy_write_1bit(ioaddr, PHY_DATA_1); |
@@ -1701,7 +1766,8 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data | |||
1701 | 1766 | ||
1702 | /* Write a word data to PHY controller */ | 1767 | /* Write a word data to PHY controller */ |
1703 | for ( i = 0x8000; i > 0; i >>= 1) | 1768 | for ( i = 0x8000; i > 0; i >>= 1) |
1704 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | 1769 | phy_write_1bit(ioaddr, |
1770 | phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1705 | } | 1771 | } |
1706 | } | 1772 | } |
1707 | 1773 | ||
@@ -1738,11 +1804,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | |||
1738 | 1804 | ||
1739 | /* Send Phy address */ | 1805 | /* Send Phy address */ |
1740 | for (i = 0x10; i > 0; i = i >> 1) | 1806 | for (i = 0x10; i > 0; i = i >> 1) |
1741 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | 1807 | phy_write_1bit(ioaddr, |
1808 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1742 | 1809 | ||
1743 | /* Send register address */ | 1810 | /* Send register address */ |
1744 | for (i = 0x10; i > 0; i = i >> 1) | 1811 | for (i = 0x10; i > 0; i = i >> 1) |
1745 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0); | 1812 | phy_write_1bit(ioaddr, |
1813 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1746 | 1814 | ||
1747 | /* Skip transition state */ | 1815 | /* Skip transition state */ |
1748 | phy_read_1bit(ioaddr); | 1816 | phy_read_1bit(ioaddr); |
@@ -1963,7 +2031,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) | |||
1963 | 2031 | ||
1964 | /* Check remote device status match our setting ot not */ | 2032 | /* Check remote device status match our setting ot not */ |
1965 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { | 2033 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { |
1966 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); | 2034 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, |
2035 | db->chip_id); | ||
1967 | db->HPNA_timer=8; | 2036 | db->HPNA_timer=8; |
1968 | } else | 2037 | } else |
1969 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ | 2038 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ |
@@ -2003,8 +2072,11 @@ module_param(HPNA_tx_cmd, byte, 0); | |||
2003 | module_param(HPNA_NoiseFloor, byte, 0); | 2072 | module_param(HPNA_NoiseFloor, byte, 0); |
2004 | module_param(SF_mode, byte, 0); | 2073 | module_param(SF_mode, byte, 0); |
2005 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); | 2074 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); |
2006 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); | 2075 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: " |
2007 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | 2076 | "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); |
2077 | |||
2078 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function " | ||
2079 | "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | ||
2008 | 2080 | ||
2009 | /* Description: | 2081 | /* Description: |
2010 | * when user used insmod to add module, system invoked init_module() | 2082 | * when user used insmod to add module, system invoked init_module() |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 885e73d731c2..dab88b958d6e 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3598,17 +3598,20 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3598 | 3598 | ||
3599 | /* Move to next BD in the ring */ | 3599 | /* Move to next BD in the ring */ |
3600 | if (!(bd_status & T_W)) | 3600 | if (!(bd_status & T_W)) |
3601 | ugeth->txBd[txQ] = bd + sizeof(struct qe_bd); | 3601 | bd += sizeof(struct qe_bd); |
3602 | else | 3602 | else |
3603 | ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | 3603 | bd = ugeth->p_tx_bd_ring[txQ]; |
3604 | 3604 | ||
3605 | /* If the next BD still needs to be cleaned up, then the bds | 3605 | /* If the next BD still needs to be cleaned up, then the bds |
3606 | are full. We need to tell the kernel to stop sending us stuff. */ | 3606 | are full. We need to tell the kernel to stop sending us stuff. */ |
3607 | if (bd == ugeth->confBd[txQ]) { | 3607 | if (bd == ugeth->confBd[txQ]) { |
3608 | if (!netif_queue_stopped(dev)) | 3608 | if (!netif_queue_stopped(dev)) |
3609 | netif_stop_queue(dev); | 3609 | netif_stop_queue(dev); |
3610 | return NETDEV_TX_BUSY; | ||
3610 | } | 3611 | } |
3611 | 3612 | ||
3613 | ugeth->txBd[txQ] = bd; | ||
3614 | |||
3612 | if (ugeth->p_scheduler) { | 3615 | if (ugeth->p_scheduler) { |
3613 | ugeth->cpucount[txQ]++; | 3616 | ugeth->cpucount[txQ]++; |
3614 | /* Indicate to QE that there are more Tx bds ready for | 3617 | /* Indicate to QE that there are more Tx bds ready for |
@@ -3620,7 +3623,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3620 | 3623 | ||
3621 | spin_unlock_irq(&ugeth->lock); | 3624 | spin_unlock_irq(&ugeth->lock); |
3622 | 3625 | ||
3623 | return 0; | 3626 | return NETDEV_TX_OK; |
3624 | } | 3627 | } |
3625 | 3628 | ||
3626 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) | 3629 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
@@ -3722,7 +3725,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3722 | /* Handle the transmitted buffer and release */ | 3725 | /* Handle the transmitted buffer and release */ |
3723 | /* the BD to be used with the current frame */ | 3726 | /* the BD to be used with the current frame */ |
3724 | 3727 | ||
3725 | if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) | 3728 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) |
3726 | break; | 3729 | break; |
3727 | 3730 | ||
3728 | ugeth->stats.tx_packets++; | 3731 | ugeth->stats.tx_packets++; |
@@ -3741,10 +3744,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3741 | 3744 | ||
3742 | /* Advance the confirmation BD pointer */ | 3745 | /* Advance the confirmation BD pointer */ |
3743 | if (!(bd_status & T_W)) | 3746 | if (!(bd_status & T_W)) |
3744 | ugeth->confBd[txQ] += sizeof(struct qe_bd); | 3747 | bd += sizeof(struct qe_bd); |
3745 | else | 3748 | else |
3746 | ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | 3749 | bd = ugeth->p_tx_bd_ring[txQ]; |
3750 | bd_status = in_be32((u32 *)bd); | ||
3747 | } | 3751 | } |
3752 | ugeth->confBd[txQ] = bd; | ||
3748 | return 0; | 3753 | return 0; |
3749 | } | 3754 | } |
3750 | 3755 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 68555c11f556..01869b1782e4 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -38,6 +38,36 @@ static int msi_cache_init(void) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static void msi_set_enable(struct pci_dev *dev, int enable) | ||
42 | { | ||
43 | int pos; | ||
44 | u16 control; | ||
45 | |||
46 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
47 | if (pos) { | ||
48 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | ||
49 | control &= ~PCI_MSI_FLAGS_ENABLE; | ||
50 | if (enable) | ||
51 | control |= PCI_MSI_FLAGS_ENABLE; | ||
52 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static void msix_set_enable(struct pci_dev *dev, int enable) | ||
57 | { | ||
58 | int pos; | ||
59 | u16 control; | ||
60 | |||
61 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
62 | if (pos) { | ||
63 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
64 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
65 | if (enable) | ||
66 | control |= PCI_MSIX_FLAGS_ENABLE; | ||
67 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
68 | } | ||
69 | } | ||
70 | |||
41 | static void msi_set_mask_bit(unsigned int irq, int flag) | 71 | static void msi_set_mask_bit(unsigned int irq, int flag) |
42 | { | 72 | { |
43 | struct msi_desc *entry; | 73 | struct msi_desc *entry; |
@@ -55,6 +85,8 @@ static void msi_set_mask_bit(unsigned int irq, int flag) | |||
55 | mask_bits &= ~(1); | 85 | mask_bits &= ~(1); |
56 | mask_bits |= flag; | 86 | mask_bits |= flag; |
57 | pci_write_config_dword(entry->dev, pos, mask_bits); | 87 | pci_write_config_dword(entry->dev, pos, mask_bits); |
88 | } else { | ||
89 | msi_set_enable(entry->dev, !flag); | ||
58 | } | 90 | } |
59 | break; | 91 | break; |
60 | case PCI_CAP_ID_MSIX: | 92 | case PCI_CAP_ID_MSIX: |
@@ -192,44 +224,6 @@ static struct msi_desc* alloc_msi_entry(void) | |||
192 | return entry; | 224 | return entry; |
193 | } | 225 | } |
194 | 226 | ||
195 | static void enable_msi_mode(struct pci_dev *dev, int pos, int type) | ||
196 | { | ||
197 | u16 control; | ||
198 | |||
199 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
200 | if (type == PCI_CAP_ID_MSI) { | ||
201 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | ||
202 | msi_enable(control, 1); | ||
203 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
204 | dev->msi_enabled = 1; | ||
205 | } else { | ||
206 | msix_enable(control); | ||
207 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
208 | dev->msix_enabled = 1; | ||
209 | } | ||
210 | |||
211 | pci_intx(dev, 0); /* disable intx */ | ||
212 | } | ||
213 | |||
214 | void disable_msi_mode(struct pci_dev *dev, int pos, int type) | ||
215 | { | ||
216 | u16 control; | ||
217 | |||
218 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
219 | if (type == PCI_CAP_ID_MSI) { | ||
220 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | ||
221 | msi_disable(control); | ||
222 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
223 | dev->msi_enabled = 0; | ||
224 | } else { | ||
225 | msix_disable(control); | ||
226 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
227 | dev->msix_enabled = 0; | ||
228 | } | ||
229 | |||
230 | pci_intx(dev, 1); /* enable intx */ | ||
231 | } | ||
232 | |||
233 | #ifdef CONFIG_PM | 227 | #ifdef CONFIG_PM |
234 | static int __pci_save_msi_state(struct pci_dev *dev) | 228 | static int __pci_save_msi_state(struct pci_dev *dev) |
235 | { | 229 | { |
@@ -238,12 +232,11 @@ static int __pci_save_msi_state(struct pci_dev *dev) | |||
238 | struct pci_cap_saved_state *save_state; | 232 | struct pci_cap_saved_state *save_state; |
239 | u32 *cap; | 233 | u32 *cap; |
240 | 234 | ||
241 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 235 | if (!dev->msi_enabled) |
242 | if (pos <= 0 || dev->no_msi) | ||
243 | return 0; | 236 | return 0; |
244 | 237 | ||
245 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 238 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
246 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | 239 | if (pos <= 0) |
247 | return 0; | 240 | return 0; |
248 | 241 | ||
249 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, | 242 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, |
@@ -276,13 +269,18 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
276 | struct pci_cap_saved_state *save_state; | 269 | struct pci_cap_saved_state *save_state; |
277 | u32 *cap; | 270 | u32 *cap; |
278 | 271 | ||
272 | if (!dev->msi_enabled) | ||
273 | return; | ||
274 | |||
279 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); | 275 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); |
280 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 276 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
281 | if (!save_state || pos <= 0) | 277 | if (!save_state || pos <= 0) |
282 | return; | 278 | return; |
283 | cap = &save_state->data[0]; | 279 | cap = &save_state->data[0]; |
284 | 280 | ||
281 | pci_intx(dev, 0); /* disable intx */ | ||
285 | control = cap[i++] >> 16; | 282 | control = cap[i++] >> 16; |
283 | msi_set_enable(dev, 0); | ||
286 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); | 284 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); |
287 | if (control & PCI_MSI_FLAGS_64BIT) { | 285 | if (control & PCI_MSI_FLAGS_64BIT) { |
288 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); | 286 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); |
@@ -292,7 +290,6 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
292 | if (control & PCI_MSI_FLAGS_MASKBIT) | 290 | if (control & PCI_MSI_FLAGS_MASKBIT) |
293 | pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); | 291 | pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); |
294 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | 292 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); |
295 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
296 | pci_remove_saved_cap(save_state); | 293 | pci_remove_saved_cap(save_state); |
297 | kfree(save_state); | 294 | kfree(save_state); |
298 | } | 295 | } |
@@ -308,13 +305,11 @@ static int __pci_save_msix_state(struct pci_dev *dev) | |||
308 | return 0; | 305 | return 0; |
309 | 306 | ||
310 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 307 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
311 | if (pos <= 0 || dev->no_msi) | 308 | if (pos <= 0) |
312 | return 0; | 309 | return 0; |
313 | 310 | ||
314 | /* save the capability */ | 311 | /* save the capability */ |
315 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 312 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
316 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | ||
317 | return 0; | ||
318 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), | 313 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), |
319 | GFP_KERNEL); | 314 | GFP_KERNEL); |
320 | if (!save_state) { | 315 | if (!save_state) { |
@@ -376,6 +371,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
376 | return; | 371 | return; |
377 | 372 | ||
378 | /* route the table */ | 373 | /* route the table */ |
374 | pci_intx(dev, 0); /* disable intx */ | ||
375 | msix_set_enable(dev, 0); | ||
379 | irq = head = dev->first_msi_irq; | 376 | irq = head = dev->first_msi_irq; |
380 | while (head != tail) { | 377 | while (head != tail) { |
381 | entry = get_irq_msi(irq); | 378 | entry = get_irq_msi(irq); |
@@ -386,7 +383,6 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
386 | } | 383 | } |
387 | 384 | ||
388 | pci_write_config_word(dev, msi_control_reg(pos), save); | 385 | pci_write_config_word(dev, msi_control_reg(pos), save); |
389 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
390 | } | 386 | } |
391 | 387 | ||
392 | void pci_restore_msi_state(struct pci_dev *dev) | 388 | void pci_restore_msi_state(struct pci_dev *dev) |
@@ -411,6 +407,8 @@ static int msi_capability_init(struct pci_dev *dev) | |||
411 | int pos, irq; | 407 | int pos, irq; |
412 | u16 control; | 408 | u16 control; |
413 | 409 | ||
410 | msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ | ||
411 | |||
414 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 412 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
415 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 413 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
416 | /* MSI Entry Initialization */ | 414 | /* MSI Entry Initialization */ |
@@ -454,7 +452,9 @@ static int msi_capability_init(struct pci_dev *dev) | |||
454 | set_irq_msi(irq, entry); | 452 | set_irq_msi(irq, entry); |
455 | 453 | ||
456 | /* Set MSI enabled bits */ | 454 | /* Set MSI enabled bits */ |
457 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | 455 | pci_intx(dev, 0); /* disable intx */ |
456 | msi_set_enable(dev, 1); | ||
457 | dev->msi_enabled = 1; | ||
458 | 458 | ||
459 | dev->irq = irq; | 459 | dev->irq = irq; |
460 | return 0; | 460 | return 0; |
@@ -481,6 +481,8 @@ static int msix_capability_init(struct pci_dev *dev, | |||
481 | u8 bir; | 481 | u8 bir; |
482 | void __iomem *base; | 482 | void __iomem *base; |
483 | 483 | ||
484 | msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ | ||
485 | |||
484 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 486 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
485 | /* Request & Map MSI-X table region */ | 487 | /* Request & Map MSI-X table region */ |
486 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 488 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
@@ -549,7 +551,9 @@ static int msix_capability_init(struct pci_dev *dev, | |||
549 | } | 551 | } |
550 | dev->first_msi_irq = entries[0].vector; | 552 | dev->first_msi_irq = entries[0].vector; |
551 | /* Set MSI-X enabled bits */ | 553 | /* Set MSI-X enabled bits */ |
552 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | 554 | pci_intx(dev, 0); /* disable intx */ |
555 | msix_set_enable(dev, 1); | ||
556 | dev->msix_enabled = 1; | ||
553 | 557 | ||
554 | return 0; | 558 | return 0; |
555 | } | 559 | } |
@@ -611,12 +615,11 @@ int pci_enable_msi(struct pci_dev* dev) | |||
611 | WARN_ON(!!dev->msi_enabled); | 615 | WARN_ON(!!dev->msi_enabled); |
612 | 616 | ||
613 | /* Check whether driver already requested for MSI-X irqs */ | 617 | /* Check whether driver already requested for MSI-X irqs */ |
614 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 618 | if (dev->msix_enabled) { |
615 | if (pos > 0 && dev->msix_enabled) { | 619 | printk(KERN_INFO "PCI: %s: Can't enable MSI. " |
616 | printk(KERN_INFO "PCI: %s: Can't enable MSI. " | 620 | "Device already has MSI-X enabled\n", |
617 | "Device already has MSI-X enabled\n", | 621 | pci_name(dev)); |
618 | pci_name(dev)); | 622 | return -EINVAL; |
619 | return -EINVAL; | ||
620 | } | 623 | } |
621 | status = msi_capability_init(dev); | 624 | status = msi_capability_init(dev); |
622 | return status; | 625 | return status; |
@@ -625,8 +628,7 @@ int pci_enable_msi(struct pci_dev* dev) | |||
625 | void pci_disable_msi(struct pci_dev* dev) | 628 | void pci_disable_msi(struct pci_dev* dev) |
626 | { | 629 | { |
627 | struct msi_desc *entry; | 630 | struct msi_desc *entry; |
628 | int pos, default_irq; | 631 | int default_irq; |
629 | u16 control; | ||
630 | 632 | ||
631 | if (!pci_msi_enable) | 633 | if (!pci_msi_enable) |
632 | return; | 634 | return; |
@@ -636,16 +638,9 @@ void pci_disable_msi(struct pci_dev* dev) | |||
636 | if (!dev->msi_enabled) | 638 | if (!dev->msi_enabled) |
637 | return; | 639 | return; |
638 | 640 | ||
639 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 641 | msi_set_enable(dev, 0); |
640 | if (!pos) | 642 | pci_intx(dev, 1); /* enable intx */ |
641 | return; | 643 | dev->msi_enabled = 0; |
642 | |||
643 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
644 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | ||
645 | return; | ||
646 | |||
647 | |||
648 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
649 | 644 | ||
650 | entry = get_irq_msi(dev->first_msi_irq); | 645 | entry = get_irq_msi(dev->first_msi_irq); |
651 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { | 646 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { |
@@ -746,8 +741,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
746 | WARN_ON(!!dev->msix_enabled); | 741 | WARN_ON(!!dev->msix_enabled); |
747 | 742 | ||
748 | /* Check whether driver already requested for MSI irq */ | 743 | /* Check whether driver already requested for MSI irq */ |
749 | if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && | 744 | if (dev->msi_enabled) { |
750 | dev->msi_enabled) { | ||
751 | printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " | 745 | printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " |
752 | "Device already has an MSI irq assigned\n", | 746 | "Device already has an MSI irq assigned\n", |
753 | pci_name(dev)); | 747 | pci_name(dev)); |
@@ -760,8 +754,6 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
760 | void pci_disable_msix(struct pci_dev* dev) | 754 | void pci_disable_msix(struct pci_dev* dev) |
761 | { | 755 | { |
762 | int irq, head, tail = 0, warning = 0; | 756 | int irq, head, tail = 0, warning = 0; |
763 | int pos; | ||
764 | u16 control; | ||
765 | 757 | ||
766 | if (!pci_msi_enable) | 758 | if (!pci_msi_enable) |
767 | return; | 759 | return; |
@@ -771,15 +763,9 @@ void pci_disable_msix(struct pci_dev* dev) | |||
771 | if (!dev->msix_enabled) | 763 | if (!dev->msix_enabled) |
772 | return; | 764 | return; |
773 | 765 | ||
774 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 766 | msix_set_enable(dev, 0); |
775 | if (!pos) | 767 | pci_intx(dev, 1); /* enable intx */ |
776 | return; | 768 | dev->msix_enabled = 0; |
777 | |||
778 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
779 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | ||
780 | return; | ||
781 | |||
782 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
783 | 769 | ||
784 | irq = head = dev->first_msi_irq; | 770 | irq = head = dev->first_msi_irq; |
785 | while (head != tail) { | 771 | while (head != tail) { |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1e74e1ee8bd8..df495300ce3d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -881,13 +881,6 @@ pci_disable_device(struct pci_dev *dev) | |||
881 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) | 881 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) |
882 | return; | 882 | return; |
883 | 883 | ||
884 | if (dev->msi_enabled) | ||
885 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | ||
886 | PCI_CAP_ID_MSI); | ||
887 | if (dev->msix_enabled) | ||
888 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | ||
889 | PCI_CAP_ID_MSIX); | ||
890 | |||
891 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); | 884 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); |
892 | if (pci_command & PCI_COMMAND_MASTER) { | 885 | if (pci_command & PCI_COMMAND_MASTER) { |
893 | pci_command &= ~PCI_COMMAND_MASTER; | 886 | pci_command &= ~PCI_COMMAND_MASTER; |
@@ -1277,6 +1270,33 @@ pci_intx(struct pci_dev *pdev, int enable) | |||
1277 | } | 1270 | } |
1278 | } | 1271 | } |
1279 | 1272 | ||
1273 | /** | ||
1274 | * pci_msi_off - disables any msi or msix capabilities | ||
1275 | * @pdev: the PCI device to operate on | ||
1276 | * | ||
1277 | * If you want to use msi see pci_enable_msi and friends. | ||
1278 | * This is a lower level primitive that allows us to disable | ||
1279 | * msi operation at the device level. | ||
1280 | */ | ||
1281 | void pci_msi_off(struct pci_dev *dev) | ||
1282 | { | ||
1283 | int pos; | ||
1284 | u16 control; | ||
1285 | |||
1286 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
1287 | if (pos) { | ||
1288 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | ||
1289 | control &= ~PCI_MSI_FLAGS_ENABLE; | ||
1290 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | ||
1291 | } | ||
1292 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
1293 | if (pos) { | ||
1294 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
1295 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
1296 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1280 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK | 1300 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK |
1281 | /* | 1301 | /* |
1282 | * These can be overridden by arch-specific implementations | 1302 | * These can be overridden by arch-specific implementations |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index a4f2d580625e..ae7a975995a5 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -46,10 +46,8 @@ extern struct rw_semaphore pci_bus_sem; | |||
46 | extern unsigned int pci_pm_d3_delay; | 46 | extern unsigned int pci_pm_d3_delay; |
47 | 47 | ||
48 | #ifdef CONFIG_PCI_MSI | 48 | #ifdef CONFIG_PCI_MSI |
49 | void disable_msi_mode(struct pci_dev *dev, int pos, int type); | ||
50 | void pci_no_msi(void); | 49 | void pci_no_msi(void); |
51 | #else | 50 | #else |
52 | static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } | ||
53 | static inline void pci_no_msi(void) { } | 51 | static inline void pci_no_msi(void) { } |
54 | #endif | 52 | #endif |
55 | 53 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1bf548287564..7f94fc098cd3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1438,8 +1438,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir | |||
1438 | */ | 1438 | */ |
1439 | static void __devinit quirk_pcie_pxh(struct pci_dev *dev) | 1439 | static void __devinit quirk_pcie_pxh(struct pci_dev *dev) |
1440 | { | 1440 | { |
1441 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | 1441 | pci_msi_off(dev); |
1442 | PCI_CAP_ID_MSI); | 1442 | |
1443 | dev->no_msi = 1; | 1443 | dev->no_msi = 1; |
1444 | 1444 | ||
1445 | printk(KERN_WARNING "PCI: PXH quirk detected, " | 1445 | printk(KERN_WARNING "PCI: PXH quirk detected, " |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 4b8a95fba1e5..a1dc8c466ec9 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -461,6 +461,7 @@ int dasd_eer_enable(struct dasd_device *device) | |||
461 | cqr->device = device; | 461 | cqr->device = device; |
462 | cqr->retries = 255; | 462 | cqr->retries = 255; |
463 | cqr->expires = 10 * HZ; | 463 | cqr->expires = 10 * HZ; |
464 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
464 | 465 | ||
465 | cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; | 466 | cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; |
466 | cqr->cpaddr->count = SNSS_DATA_SIZE; | 467 | cqr->cpaddr->count = SNSS_DATA_SIZE; |
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 7a76ec413a3a..2a1af4e60be0 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c | |||
@@ -647,7 +647,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count) | |||
647 | return PTR_ERR(request); | 647 | return PTR_ERR(request); |
648 | request->op = TO_NOP; | 648 | request->op = TO_NOP; |
649 | /* setup ccws */ | 649 | /* setup ccws */ |
650 | *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; | 650 | if (mt_count == 0) |
651 | *device->modeset_byte &= ~0x08; | ||
652 | else | ||
653 | *device->modeset_byte |= 0x08; | ||
651 | tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); | 654 | tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); |
652 | tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); | 655 | tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); |
653 | /* execute it */ | 656 | /* execute it */ |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 51238e7555bb..089a3ddd6265 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -144,8 +144,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
144 | ret = stsch(sch->schid, &sch->schib); | 144 | ret = stsch(sch->schid, &sch->schib); |
145 | if (ret || !sch->schib.pmcw.dnv) | 145 | if (ret || !sch->schib.pmcw.dnv) |
146 | return -ENODEV; | 146 | return -ENODEV; |
147 | if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) | 147 | if (!sch->schib.pmcw.ena) |
148 | /* Not operational or no activity -> done. */ | 148 | /* Not operational -> done. */ |
149 | return 0; | 149 | return 0; |
150 | /* Stage 1: cancel io. */ | 150 | /* Stage 1: cancel io. */ |
151 | if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && | 151 | if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && |
@@ -334,20 +334,29 @@ ccw_device_oper_notify(struct work_struct *work) | |||
334 | struct ccw_device *cdev; | 334 | struct ccw_device *cdev; |
335 | struct subchannel *sch; | 335 | struct subchannel *sch; |
336 | int ret; | 336 | int ret; |
337 | unsigned long flags; | ||
337 | 338 | ||
338 | priv = container_of(work, struct ccw_device_private, kick_work); | 339 | priv = container_of(work, struct ccw_device_private, kick_work); |
339 | cdev = priv->cdev; | 340 | cdev = priv->cdev; |
341 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
340 | sch = to_subchannel(cdev->dev.parent); | 342 | sch = to_subchannel(cdev->dev.parent); |
341 | ret = (sch->driver && sch->driver->notify) ? | 343 | if (sch->driver && sch->driver->notify) { |
342 | sch->driver->notify(&sch->dev, CIO_OPER) : 0; | 344 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
343 | if (!ret) | 345 | ret = sch->driver->notify(&sch->dev, CIO_OPER); |
344 | /* Driver doesn't want device back. */ | 346 | spin_lock_irqsave(cdev->ccwlock, flags); |
345 | ccw_device_do_unreg_rereg(work); | 347 | } else |
346 | else { | 348 | ret = 0; |
349 | if (ret) { | ||
347 | /* Reenable channel measurements, if needed. */ | 350 | /* Reenable channel measurements, if needed. */ |
351 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
348 | cmf_reenable(cdev); | 352 | cmf_reenable(cdev); |
353 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
349 | wake_up(&cdev->private->wait_q); | 354 | wake_up(&cdev->private->wait_q); |
350 | } | 355 | } |
356 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
357 | if (!ret) | ||
358 | /* Driver doesn't want device back. */ | ||
359 | ccw_device_do_unreg_rereg(work); | ||
351 | } | 360 | } |
352 | 361 | ||
353 | /* | 362 | /* |
@@ -534,15 +543,21 @@ ccw_device_nopath_notify(struct work_struct *work) | |||
534 | struct ccw_device *cdev; | 543 | struct ccw_device *cdev; |
535 | struct subchannel *sch; | 544 | struct subchannel *sch; |
536 | int ret; | 545 | int ret; |
546 | unsigned long flags; | ||
537 | 547 | ||
538 | priv = container_of(work, struct ccw_device_private, kick_work); | 548 | priv = container_of(work, struct ccw_device_private, kick_work); |
539 | cdev = priv->cdev; | 549 | cdev = priv->cdev; |
550 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
540 | sch = to_subchannel(cdev->dev.parent); | 551 | sch = to_subchannel(cdev->dev.parent); |
541 | /* Extra sanity. */ | 552 | /* Extra sanity. */ |
542 | if (sch->lpm) | 553 | if (sch->lpm) |
543 | return; | 554 | goto out_unlock; |
544 | ret = (sch->driver && sch->driver->notify) ? | 555 | if (sch->driver && sch->driver->notify) { |
545 | sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; | 556 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
557 | ret = sch->driver->notify(&sch->dev, CIO_NO_PATH); | ||
558 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
559 | } else | ||
560 | ret = 0; | ||
546 | if (!ret) { | 561 | if (!ret) { |
547 | if (get_device(&sch->dev)) { | 562 | if (get_device(&sch->dev)) { |
548 | /* Driver doesn't want to keep device. */ | 563 | /* Driver doesn't want to keep device. */ |
@@ -562,6 +577,8 @@ ccw_device_nopath_notify(struct work_struct *work) | |||
562 | cdev->private->state = DEV_STATE_DISCONNECTED; | 577 | cdev->private->state = DEV_STATE_DISCONNECTED; |
563 | wake_up(&cdev->private->wait_q); | 578 | wake_up(&cdev->private->wait_q); |
564 | } | 579 | } |
580 | out_unlock: | ||
581 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
565 | } | 582 | } |
566 | 583 | ||
567 | void | 584 | void |
@@ -607,10 +624,13 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
607 | default: | 624 | default: |
608 | /* Reset oper notify indication after verify error. */ | 625 | /* Reset oper notify indication after verify error. */ |
609 | cdev->private->flags.donotify = 0; | 626 | cdev->private->flags.donotify = 0; |
610 | PREPARE_WORK(&cdev->private->kick_work, | 627 | if (cdev->online) { |
611 | ccw_device_nopath_notify); | 628 | PREPARE_WORK(&cdev->private->kick_work, |
612 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 629 | ccw_device_nopath_notify); |
613 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 630 | queue_work(ccw_device_notify_work, |
631 | &cdev->private->kick_work); | ||
632 | } else | ||
633 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
614 | break; | 634 | break; |
615 | } | 635 | } |
616 | } | 636 | } |
@@ -756,15 +776,22 @@ static void | |||
756 | ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) | 776 | ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) |
757 | { | 777 | { |
758 | struct subchannel *sch; | 778 | struct subchannel *sch; |
779 | int ret; | ||
759 | 780 | ||
760 | sch = to_subchannel(cdev->dev.parent); | 781 | sch = to_subchannel(cdev->dev.parent); |
761 | if (sch->driver->notify && | 782 | if (sch->driver->notify) { |
762 | sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { | 783 | spin_unlock_irq(cdev->ccwlock); |
763 | ccw_device_set_timeout(cdev, 0); | 784 | ret = sch->driver->notify(&sch->dev, |
764 | cdev->private->flags.fake_irb = 0; | 785 | sch->lpm ? CIO_GONE : CIO_NO_PATH); |
765 | cdev->private->state = DEV_STATE_DISCONNECTED; | 786 | spin_lock_irq(cdev->ccwlock); |
766 | wake_up(&cdev->private->wait_q); | 787 | } else |
767 | return; | 788 | ret = 0; |
789 | if (ret) { | ||
790 | ccw_device_set_timeout(cdev, 0); | ||
791 | cdev->private->flags.fake_irb = 0; | ||
792 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
793 | wake_up(&cdev->private->wait_q); | ||
794 | return; | ||
768 | } | 795 | } |
769 | cdev->private->state = DEV_STATE_NOT_OPER; | 796 | cdev->private->state = DEV_STATE_NOT_OPER; |
770 | cio_disable_subchannel(sch); | 797 | cio_disable_subchannel(sch); |
@@ -969,18 +996,12 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
969 | 996 | ||
970 | sch = to_subchannel(cdev->dev.parent); | 997 | sch = to_subchannel(cdev->dev.parent); |
971 | ccw_device_set_timeout(cdev, 0); | 998 | ccw_device_set_timeout(cdev, 0); |
999 | /* Start delayed path verification. */ | ||
1000 | ccw_device_online_verify(cdev, 0); | ||
972 | /* OK, i/o is dead now. Call interrupt handler. */ | 1001 | /* OK, i/o is dead now. Call interrupt handler. */ |
973 | cdev->private->state = DEV_STATE_ONLINE; | ||
974 | if (cdev->handler) | 1002 | if (cdev->handler) |
975 | cdev->handler(cdev, cdev->private->intparm, | 1003 | cdev->handler(cdev, cdev->private->intparm, |
976 | ERR_PTR(-EIO)); | 1004 | ERR_PTR(-EIO)); |
977 | if (!sch->lpm) { | ||
978 | PREPARE_WORK(&cdev->private->kick_work, | ||
979 | ccw_device_nopath_notify); | ||
980 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
981 | } else if (cdev->private->flags.doverify) | ||
982 | /* Start delayed path verification. */ | ||
983 | ccw_device_online_verify(cdev, 0); | ||
984 | } | 1005 | } |
985 | 1006 | ||
986 | static void | 1007 | static void |
@@ -993,21 +1014,8 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
993 | ccw_device_set_timeout(cdev, 3*HZ); | 1014 | ccw_device_set_timeout(cdev, 3*HZ); |
994 | return; | 1015 | return; |
995 | } | 1016 | } |
996 | if (ret == -ENODEV) { | 1017 | /* Start delayed path verification. */ |
997 | struct subchannel *sch; | 1018 | ccw_device_online_verify(cdev, 0); |
998 | |||
999 | sch = to_subchannel(cdev->dev.parent); | ||
1000 | if (!sch->lpm) { | ||
1001 | PREPARE_WORK(&cdev->private->kick_work, | ||
1002 | ccw_device_nopath_notify); | ||
1003 | queue_work(ccw_device_notify_work, | ||
1004 | &cdev->private->kick_work); | ||
1005 | } else | ||
1006 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
1007 | return; | ||
1008 | } | ||
1009 | //FIXME: Can we get here? | ||
1010 | cdev->private->state = DEV_STATE_ONLINE; | ||
1011 | if (cdev->handler) | 1019 | if (cdev->handler) |
1012 | cdev->handler(cdev, cdev->private->intparm, | 1020 | cdev->handler(cdev, cdev->private->intparm, |
1013 | ERR_PTR(-EIO)); | 1021 | ERR_PTR(-EIO)); |
@@ -1025,26 +1033,11 @@ void device_kill_io(struct subchannel *sch) | |||
1025 | cdev->private->state = DEV_STATE_TIMEOUT_KILL; | 1033 | cdev->private->state = DEV_STATE_TIMEOUT_KILL; |
1026 | return; | 1034 | return; |
1027 | } | 1035 | } |
1028 | if (ret == -ENODEV) { | 1036 | /* Start delayed path verification. */ |
1029 | if (!sch->lpm) { | 1037 | ccw_device_online_verify(cdev, 0); |
1030 | PREPARE_WORK(&cdev->private->kick_work, | ||
1031 | ccw_device_nopath_notify); | ||
1032 | queue_work(ccw_device_notify_work, | ||
1033 | &cdev->private->kick_work); | ||
1034 | } else | ||
1035 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
1036 | return; | ||
1037 | } | ||
1038 | if (cdev->handler) | 1038 | if (cdev->handler) |
1039 | cdev->handler(cdev, cdev->private->intparm, | 1039 | cdev->handler(cdev, cdev->private->intparm, |
1040 | ERR_PTR(-EIO)); | 1040 | ERR_PTR(-EIO)); |
1041 | if (!sch->lpm) { | ||
1042 | PREPARE_WORK(&cdev->private->kick_work, | ||
1043 | ccw_device_nopath_notify); | ||
1044 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
1045 | } else | ||
1046 | /* Start delayed path verification. */ | ||
1047 | ccw_device_online_verify(cdev, 0); | ||
1048 | } | 1041 | } |
1049 | 1042 | ||
1050 | static void | 1043 | static void |
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c index 587d87b9eb3c..d31721f2744d 100644 --- a/drivers/serial/dz.c +++ b/drivers/serial/dz.c | |||
@@ -170,8 +170,7 @@ static void dz_enable_ms(struct uart_port *port) | |||
170 | * This routine deals with inputs from any lines. | 170 | * This routine deals with inputs from any lines. |
171 | * ------------------------------------------------------------ | 171 | * ------------------------------------------------------------ |
172 | */ | 172 | */ |
173 | static inline void dz_receive_chars(struct dz_port *dport_in, | 173 | static inline void dz_receive_chars(struct dz_port *dport_in) |
174 | struct pt_regs *regs) | ||
175 | { | 174 | { |
176 | struct dz_port *dport; | 175 | struct dz_port *dport; |
177 | struct tty_struct *tty = NULL; | 176 | struct tty_struct *tty = NULL; |
@@ -226,7 +225,7 @@ static inline void dz_receive_chars(struct dz_port *dport_in, | |||
226 | break; | 225 | break; |
227 | } | 226 | } |
228 | 227 | ||
229 | if (uart_handle_sysrq_char(&dport->port, ch, regs)) | 228 | if (uart_handle_sysrq_char(&dport->port, ch)) |
230 | continue; | 229 | continue; |
231 | 230 | ||
232 | if ((status & dport->port.ignore_status_mask) == 0) { | 231 | if ((status & dport->port.ignore_status_mask) == 0) { |
@@ -332,7 +331,7 @@ static irqreturn_t dz_interrupt(int irq, void *dev) | |||
332 | status = dz_in(dport, DZ_CSR); | 331 | status = dz_in(dport, DZ_CSR); |
333 | 332 | ||
334 | if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) | 333 | if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) |
335 | dz_receive_chars(dport, regs); | 334 | dz_receive_chars(dport); |
336 | 335 | ||
337 | if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) | 336 | if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) |
338 | dz_transmit_chars(dport); | 337 | dz_transmit_chars(dport); |
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c index 08430961a895..99af084c7cec 100644 --- a/drivers/serial/mcfserial.c +++ b/drivers/serial/mcfserial.c | |||
@@ -425,15 +425,13 @@ irqreturn_t mcfrs_interrupt(int irq, void *dev_id) | |||
425 | * ------------------------------------------------------------------- | 425 | * ------------------------------------------------------------------- |
426 | */ | 426 | */ |
427 | 427 | ||
428 | static void mcfrs_offintr(void *private) | 428 | static void mcfrs_offintr(struct work_struct *work) |
429 | { | 429 | { |
430 | struct mcf_serial *info = (struct mcf_serial *) private; | 430 | struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue); |
431 | struct tty_struct *tty; | 431 | struct tty_struct *tty = info->tty; |
432 | 432 | ||
433 | tty = info->tty; | 433 | if (tty) |
434 | if (!tty) | 434 | tty_wakeup(tty); |
435 | return; | ||
436 | tty_wakeup(tty); | ||
437 | } | 435 | } |
438 | 436 | ||
439 | 437 | ||
@@ -497,16 +495,13 @@ static void mcfrs_timer(void) | |||
497 | * do_serial_hangup() -> tty->hangup() -> mcfrs_hangup() | 495 | * do_serial_hangup() -> tty->hangup() -> mcfrs_hangup() |
498 | * | 496 | * |
499 | */ | 497 | */ |
500 | static void do_serial_hangup(void *private) | 498 | static void do_serial_hangup(struct work_struct *work) |
501 | { | 499 | { |
502 | struct mcf_serial *info = (struct mcf_serial *) private; | 500 | struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue_hangup); |
503 | struct tty_struct *tty; | 501 | struct tty_struct *tty = info->tty; |
504 | 502 | ||
505 | tty = info->tty; | 503 | if (tty) |
506 | if (!tty) | 504 | tty_hangup(tty); |
507 | return; | ||
508 | |||
509 | tty_hangup(tty); | ||
510 | } | 505 | } |
511 | 506 | ||
512 | static int startup(struct mcf_serial * info) | 507 | static int startup(struct mcf_serial * info) |
@@ -857,7 +852,7 @@ static void mcfrs_throttle(struct tty_struct * tty) | |||
857 | #ifdef SERIAL_DEBUG_THROTTLE | 852 | #ifdef SERIAL_DEBUG_THROTTLE |
858 | char buf[64]; | 853 | char buf[64]; |
859 | 854 | ||
860 | printk("throttle %s: %d....\n", _tty_name(tty, buf), | 855 | printk("throttle %s: %d....\n", tty_name(tty, buf), |
861 | tty->ldisc.chars_in_buffer(tty)); | 856 | tty->ldisc.chars_in_buffer(tty)); |
862 | #endif | 857 | #endif |
863 | 858 | ||
@@ -876,7 +871,7 @@ static void mcfrs_unthrottle(struct tty_struct * tty) | |||
876 | #ifdef SERIAL_DEBUG_THROTTLE | 871 | #ifdef SERIAL_DEBUG_THROTTLE |
877 | char buf[64]; | 872 | char buf[64]; |
878 | 873 | ||
879 | printk("unthrottle %s: %d....\n", _tty_name(tty, buf), | 874 | printk("unthrottle %s: %d....\n", tty_name(tty, buf), |
880 | tty->ldisc.chars_in_buffer(tty)); | 875 | tty->ldisc.chars_in_buffer(tty)); |
881 | #endif | 876 | #endif |
882 | 877 | ||
@@ -1541,8 +1536,8 @@ static void mcfrs_irqinit(struct mcf_serial *info) | |||
1541 | * External Pin Mask Setting & Enable External Pin for Interface | 1536 | * External Pin Mask Setting & Enable External Pin for Interface |
1542 | * mrcbis@aliceposta.it | 1537 | * mrcbis@aliceposta.it |
1543 | */ | 1538 | */ |
1544 | unsigned short *serpin_enable_mask; | 1539 | u16 *serpin_enable_mask; |
1545 | serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART); | 1540 | serpin_enable_mask = (u16 *) (MCF_IPSBAR + MCF_GPIO_PAR_UART); |
1546 | if (info->line == 0) | 1541 | if (info->line == 0) |
1547 | *serpin_enable_mask |= UART0_ENABLE_MASK; | 1542 | *serpin_enable_mask |= UART0_ENABLE_MASK; |
1548 | else if (info->line == 1) | 1543 | else if (info->line == 1) |
@@ -1551,6 +1546,13 @@ static void mcfrs_irqinit(struct mcf_serial *info) | |||
1551 | *serpin_enable_mask |= UART2_ENABLE_MASK; | 1546 | *serpin_enable_mask |= UART2_ENABLE_MASK; |
1552 | } | 1547 | } |
1553 | #endif | 1548 | #endif |
1549 | #if defined(CONFIG_M528x) | ||
1550 | /* make sure PUAPAR is set for UART0 and UART1 */ | ||
1551 | if (info->line < 2) { | ||
1552 | volatile unsigned char *portp = (volatile unsigned char *) (MCF_MBAR + MCF5282_GPIO_PUAPAR); | ||
1553 | *portp |= (0x03 << (info->line * 2)); | ||
1554 | } | ||
1555 | #endif | ||
1554 | #elif defined(CONFIG_M520x) | 1556 | #elif defined(CONFIG_M520x) |
1555 | volatile unsigned char *icrp, *uartp; | 1557 | volatile unsigned char *icrp, *uartp; |
1556 | volatile unsigned long *imrp; | 1558 | volatile unsigned long *imrp; |
@@ -1783,8 +1785,8 @@ mcfrs_init(void) | |||
1783 | info->event = 0; | 1785 | info->event = 0; |
1784 | info->count = 0; | 1786 | info->count = 0; |
1785 | info->blocked_open = 0; | 1787 | info->blocked_open = 0; |
1786 | INIT_WORK(&info->tqueue, mcfrs_offintr, info); | 1788 | INIT_WORK(&info->tqueue, mcfrs_offintr); |
1787 | INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); | 1789 | INIT_WORK(&info->tqueue_hangup, do_serial_hangup); |
1788 | init_waitqueue_head(&info->open_wait); | 1790 | init_waitqueue_head(&info->open_wait); |
1789 | init_waitqueue_head(&info->close_wait); | 1791 | init_waitqueue_head(&info->close_wait); |
1790 | 1792 | ||
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c index 12ec8b432953..827a75a186ba 100644 --- a/drivers/usb/input/hid-core.c +++ b/drivers/usb/input/hid-core.c | |||
@@ -686,10 +686,8 @@ void usbhid_init_reports(struct hid_device *hid) | |||
686 | #define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802 | 686 | #define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802 |
687 | 687 | ||
688 | #define USB_VENDOR_ID_CODEMERCS 0x07c0 | 688 | #define USB_VENDOR_ID_CODEMERCS 0x07c0 |
689 | #define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500 | 689 | #define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500 |
690 | #define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501 | 690 | #define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff |
691 | #define USB_DEVICE_ID_CODEMERCS_IOW48 0x1502 | ||
692 | #define USB_DEVICE_ID_CODEMERCS_IOW28 0x1503 | ||
693 | 691 | ||
694 | #define USB_VENDOR_ID_DELORME 0x1163 | 692 | #define USB_VENDOR_ID_DELORME 0x1163 |
695 | #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 | 693 | #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 |
@@ -789,10 +787,6 @@ static const struct hid_blacklist { | |||
789 | { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE }, | 787 | { USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE }, |
790 | { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE }, | 788 | { USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE }, |
791 | { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE }, | 789 | { USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE }, |
792 | { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40, HID_QUIRK_IGNORE }, | ||
793 | { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24, HID_QUIRK_IGNORE }, | ||
794 | { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW48, HID_QUIRK_IGNORE }, | ||
795 | { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28, HID_QUIRK_IGNORE }, | ||
796 | { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE }, | 790 | { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE }, |
797 | { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE }, | 791 | { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE }, |
798 | { USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE }, | 792 | { USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE }, |
@@ -1070,9 +1064,14 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) | |||
1070 | int n, len, insize = 0; | 1064 | int n, len, insize = 0; |
1071 | struct usbhid_device *usbhid; | 1065 | struct usbhid_device *usbhid; |
1072 | 1066 | ||
1073 | /* Ignore all Wacom devices */ | 1067 | /* Ignore all Wacom devices */ |
1074 | if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM) | 1068 | if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM) |
1075 | return NULL; | 1069 | return NULL; |
1070 | /* ignore all Code Mercenaries IOWarrior devices */ | ||
1071 | if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_CODEMERCS) | ||
1072 | if (le16_to_cpu(dev->descriptor.idProduct) >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST && | ||
1073 | le16_to_cpu(dev->descriptor.idProduct) <= USB_DEVICE_ID_CODEMERCS_IOW_LAST) | ||
1074 | return NULL; | ||
1076 | 1075 | ||
1077 | for (n = 0; hid_blacklist[n].idVendor; n++) | 1076 | for (n = 0; hid_blacklist[n].idVendor; n++) |
1078 | if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) && | 1077 | if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) && |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 7e7ec29782f1..8e898e3d861e 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <linux/slab.h> | 55 | #include <linux/slab.h> |
56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
57 | #include <linux/mutex.h> | 57 | #include <linux/mutex.h> |
58 | #include <linux/utsrelease.h> | 58 | #include <linux/utsname.h> |
59 | 59 | ||
60 | #include <scsi/scsi.h> | 60 | #include <scsi/scsi.h> |
61 | #include <scsi/scsi_cmnd.h> | 61 | #include <scsi/scsi_cmnd.h> |
@@ -547,7 +547,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id) | |||
547 | idesc->bInterfaceSubClass, | 547 | idesc->bInterfaceSubClass, |
548 | idesc->bInterfaceProtocol, | 548 | idesc->bInterfaceProtocol, |
549 | msgs[msg], | 549 | msgs[msg], |
550 | UTS_RELEASE); | 550 | utsname()->release); |
551 | } | 551 | } |
552 | 552 | ||
553 | return 0; | 553 | return 0; |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index b8f0a11e8f31..7f5a59836818 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -677,8 +677,6 @@ config FB_S1D13XXX | |||
677 | config FB_NVIDIA | 677 | config FB_NVIDIA |
678 | tristate "nVidia Framebuffer Support" | 678 | tristate "nVidia Framebuffer Support" |
679 | depends on FB && PCI | 679 | depends on FB && PCI |
680 | select I2C_ALGOBIT if FB_NVIDIA_I2C | ||
681 | select I2C if FB_NVIDIA_I2C | ||
682 | select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT | 680 | select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT |
683 | select FB_MODE_HELPERS | 681 | select FB_MODE_HELPERS |
684 | select FB_CFB_FILLRECT | 682 | select FB_CFB_FILLRECT |
@@ -697,6 +695,7 @@ config FB_NVIDIA | |||
697 | config FB_NVIDIA_I2C | 695 | config FB_NVIDIA_I2C |
698 | bool "Enable DDC Support" | 696 | bool "Enable DDC Support" |
699 | depends on FB_NVIDIA | 697 | depends on FB_NVIDIA |
698 | select FB_DDC | ||
700 | help | 699 | help |
701 | This enables I2C support for nVidia Chipsets. This is used | 700 | This enables I2C support for nVidia Chipsets. This is used |
702 | only for getting EDID information from the attached display | 701 | only for getting EDID information from the attached display |
@@ -716,7 +715,6 @@ config FB_NVIDIA_BACKLIGHT | |||
716 | config FB_RIVA | 715 | config FB_RIVA |
717 | tristate "nVidia Riva support" | 716 | tristate "nVidia Riva support" |
718 | depends on FB && PCI | 717 | depends on FB && PCI |
719 | select FB_DDC if FB_RIVA_I2C | ||
720 | select FB_BACKLIGHT if FB_RIVA_BACKLIGHT | 718 | select FB_BACKLIGHT if FB_RIVA_BACKLIGHT |
721 | select FB_MODE_HELPERS | 719 | select FB_MODE_HELPERS |
722 | select FB_CFB_FILLRECT | 720 | select FB_CFB_FILLRECT |
@@ -734,6 +732,7 @@ config FB_RIVA | |||
734 | config FB_RIVA_I2C | 732 | config FB_RIVA_I2C |
735 | bool "Enable DDC Support" | 733 | bool "Enable DDC Support" |
736 | depends on FB_RIVA | 734 | depends on FB_RIVA |
735 | select FB_DDC | ||
737 | help | 736 | help |
738 | This enables I2C support for nVidia Chipsets. This is used | 737 | This enables I2C support for nVidia Chipsets. This is used |
739 | only for getting EDID information from the attached display | 738 | only for getting EDID information from the attached display |
@@ -812,8 +811,6 @@ config FB_INTEL | |||
812 | depends on FB && EXPERIMENTAL && PCI && X86 | 811 | depends on FB && EXPERIMENTAL && PCI && X86 |
813 | select AGP | 812 | select AGP |
814 | select AGP_INTEL | 813 | select AGP_INTEL |
815 | select I2C_ALGOBIT if FB_INTEL_I2C | ||
816 | select I2C if FB_INTEL_I2C | ||
817 | select FB_MODE_HELPERS | 814 | select FB_MODE_HELPERS |
818 | select FB_CFB_FILLRECT | 815 | select FB_CFB_FILLRECT |
819 | select FB_CFB_COPYAREA | 816 | select FB_CFB_COPYAREA |
@@ -846,6 +843,7 @@ config FB_INTEL_DEBUG | |||
846 | config FB_INTEL_I2C | 843 | config FB_INTEL_I2C |
847 | bool "DDC/I2C for Intel framebuffer support" | 844 | bool "DDC/I2C for Intel framebuffer support" |
848 | depends on FB_INTEL | 845 | depends on FB_INTEL |
846 | select FB_DDC | ||
849 | default y | 847 | default y |
850 | help | 848 | help |
851 | Say Y here if you want DDC/I2C support for your on-board Intel graphics. | 849 | Say Y here if you want DDC/I2C support for your on-board Intel graphics. |
@@ -924,8 +922,8 @@ config FB_MATROX_G | |||
924 | 922 | ||
925 | config FB_MATROX_I2C | 923 | config FB_MATROX_I2C |
926 | tristate "Matrox I2C support" | 924 | tristate "Matrox I2C support" |
927 | depends on FB_MATROX && I2C | 925 | depends on FB_MATROX |
928 | select I2C_ALGOBIT | 926 | select FB_DDC |
929 | ---help--- | 927 | ---help--- |
930 | This drivers creates I2C buses which are needed for accessing the | 928 | This drivers creates I2C buses which are needed for accessing the |
931 | DDC (I2C) bus present on all Matroxes, an I2C bus which | 929 | DDC (I2C) bus present on all Matroxes, an I2C bus which |
@@ -993,7 +991,6 @@ config FB_MATROX_MULTIHEAD | |||
993 | config FB_RADEON | 991 | config FB_RADEON |
994 | tristate "ATI Radeon display support" | 992 | tristate "ATI Radeon display support" |
995 | depends on FB && PCI | 993 | depends on FB && PCI |
996 | select FB_DDC if FB_RADEON_I2C | ||
997 | select FB_BACKLIGHT if FB_RADEON_BACKLIGHT | 994 | select FB_BACKLIGHT if FB_RADEON_BACKLIGHT |
998 | select FB_MODE_HELPERS | 995 | select FB_MODE_HELPERS |
999 | select FB_CFB_FILLRECT | 996 | select FB_CFB_FILLRECT |
@@ -1018,6 +1015,7 @@ config FB_RADEON | |||
1018 | config FB_RADEON_I2C | 1015 | config FB_RADEON_I2C |
1019 | bool "DDC/I2C for ATI Radeon support" | 1016 | bool "DDC/I2C for ATI Radeon support" |
1020 | depends on FB_RADEON | 1017 | depends on FB_RADEON |
1018 | select FB_DDC | ||
1021 | default y | 1019 | default y |
1022 | help | 1020 | help |
1023 | Say Y here if you want DDC/I2C support for your Radeon board. | 1021 | Say Y here if you want DDC/I2C support for your Radeon board. |
@@ -1125,7 +1123,6 @@ config FB_S3 | |||
1125 | config FB_SAVAGE | 1123 | config FB_SAVAGE |
1126 | tristate "S3 Savage support" | 1124 | tristate "S3 Savage support" |
1127 | depends on FB && PCI && EXPERIMENTAL | 1125 | depends on FB && PCI && EXPERIMENTAL |
1128 | select FB_DDC if FB_SAVAGE_I2C | ||
1129 | select FB_MODE_HELPERS | 1126 | select FB_MODE_HELPERS |
1130 | select FB_CFB_FILLRECT | 1127 | select FB_CFB_FILLRECT |
1131 | select FB_CFB_COPYAREA | 1128 | select FB_CFB_COPYAREA |
@@ -1142,6 +1139,7 @@ config FB_SAVAGE | |||
1142 | config FB_SAVAGE_I2C | 1139 | config FB_SAVAGE_I2C |
1143 | bool "Enable DDC2 Support" | 1140 | bool "Enable DDC2 Support" |
1144 | depends on FB_SAVAGE | 1141 | depends on FB_SAVAGE |
1142 | select FB_DDC | ||
1145 | help | 1143 | help |
1146 | This enables I2C support for S3 Savage Chipsets. This is used | 1144 | This enables I2C support for S3 Savage Chipsets. This is used |
1147 | only for getting EDID information from the attached display | 1145 | only for getting EDID information from the attached display |
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 8726c3669713..e86d7e0c9825 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c | |||
@@ -357,6 +357,12 @@ static int default_lcd_on __devinitdata = 1; | |||
357 | static int mtrr = 1; | 357 | static int mtrr = 1; |
358 | #endif | 358 | #endif |
359 | 359 | ||
360 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
361 | static int backlight __devinitdata = 1; | ||
362 | #else | ||
363 | static int backlight __devinitdata = 0; | ||
364 | #endif | ||
365 | |||
360 | /* PLL constants */ | 366 | /* PLL constants */ |
361 | struct aty128_constants { | 367 | struct aty128_constants { |
362 | u32 ref_clk; | 368 | u32 ref_clk; |
@@ -1652,6 +1658,9 @@ static int __devinit aty128fb_setup(char *options) | |||
1652 | } else if (!strncmp(this_opt, "crt:", 4)) { | 1658 | } else if (!strncmp(this_opt, "crt:", 4)) { |
1653 | default_crt_on = simple_strtoul(this_opt+4, NULL, 0); | 1659 | default_crt_on = simple_strtoul(this_opt+4, NULL, 0); |
1654 | continue; | 1660 | continue; |
1661 | } else if (!strncmp(this_opt, "backlight:", 10)) { | ||
1662 | backlight = simple_strtoul(this_opt+10, NULL, 0); | ||
1663 | continue; | ||
1655 | } | 1664 | } |
1656 | #ifdef CONFIG_MTRR | 1665 | #ifdef CONFIG_MTRR |
1657 | if(!strncmp(this_opt, "nomtrr", 6)) { | 1666 | if(!strncmp(this_opt, "nomtrr", 6)) { |
@@ -1985,7 +1994,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i | |||
1985 | par->lock_blank = 0; | 1994 | par->lock_blank = 0; |
1986 | 1995 | ||
1987 | #ifdef CONFIG_FB_ATY128_BACKLIGHT | 1996 | #ifdef CONFIG_FB_ATY128_BACKLIGHT |
1988 | aty128_bl_init(par); | 1997 | if (backlight) |
1998 | aty128_bl_init(par); | ||
1989 | #endif | 1999 | #endif |
1990 | 2000 | ||
1991 | if (register_framebuffer(info) < 0) | 2001 | if (register_framebuffer(info) < 0) |
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h index f72faff33c0c..dc62f8e282b4 100644 --- a/drivers/video/aty/atyfb.h +++ b/drivers/video/aty/atyfb.h | |||
@@ -284,7 +284,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par) | |||
284 | #endif | 284 | #endif |
285 | } | 285 | } |
286 | 286 | ||
287 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) | 287 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ |
288 | defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT) | ||
288 | extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); | 289 | extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par); |
289 | extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); | 290 | extern u32 aty_ld_lcd(int index, const struct atyfb_par *par); |
290 | #endif | 291 | #endif |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index a7e0062233f2..d7627fc4f11e 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -131,7 +131,8 @@ | |||
131 | #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) | 131 | #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) |
132 | #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) | 132 | #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) |
133 | 133 | ||
134 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) | 134 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ |
135 | defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT) | ||
135 | static const u32 lt_lcd_regs[] = { | 136 | static const u32 lt_lcd_regs[] = { |
136 | CONFIG_PANEL_LG, | 137 | CONFIG_PANEL_LG, |
137 | LCD_GEN_CNTL_LG, | 138 | LCD_GEN_CNTL_LG, |
@@ -308,6 +309,12 @@ static int xclk; | |||
308 | static int comp_sync __devinitdata = -1; | 309 | static int comp_sync __devinitdata = -1; |
309 | static char *mode; | 310 | static char *mode; |
310 | 311 | ||
312 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
313 | static int backlight __devinitdata = 1; | ||
314 | #else | ||
315 | static int backlight __devinitdata = 0; | ||
316 | #endif | ||
317 | |||
311 | #ifdef CONFIG_PPC | 318 | #ifdef CONFIG_PPC |
312 | static int default_vmode __devinitdata = VMODE_CHOOSE; | 319 | static int default_vmode __devinitdata = VMODE_CHOOSE; |
313 | static int default_cmode __devinitdata = CMODE_CHOOSE; | 320 | static int default_cmode __devinitdata = CMODE_CHOOSE; |
@@ -2575,7 +2582,7 @@ static int __devinit aty_init(struct fb_info *info) | |||
2575 | | (USE_F32KHZ | TRISTATE_MEM_EN), par); | 2582 | | (USE_F32KHZ | TRISTATE_MEM_EN), par); |
2576 | } else | 2583 | } else |
2577 | #endif | 2584 | #endif |
2578 | if (M64_HAS(MOBIL_BUS)) { | 2585 | if (M64_HAS(MOBIL_BUS) && backlight) { |
2579 | #ifdef CONFIG_FB_ATY_BACKLIGHT | 2586 | #ifdef CONFIG_FB_ATY_BACKLIGHT |
2580 | aty_bl_init (par); | 2587 | aty_bl_init (par); |
2581 | #endif | 2588 | #endif |
@@ -3757,6 +3764,8 @@ static int __init atyfb_setup(char *options) | |||
3757 | xclk = simple_strtoul(this_opt+5, NULL, 0); | 3764 | xclk = simple_strtoul(this_opt+5, NULL, 0); |
3758 | else if (!strncmp(this_opt, "comp_sync:", 10)) | 3765 | else if (!strncmp(this_opt, "comp_sync:", 10)) |
3759 | comp_sync = simple_strtoul(this_opt+10, NULL, 0); | 3766 | comp_sync = simple_strtoul(this_opt+10, NULL, 0); |
3767 | else if (!strncmp(this_opt, "backlight:", 10)) | ||
3768 | backlight = simple_strtoul(this_opt+10, NULL, 0); | ||
3760 | #ifdef CONFIG_PPC | 3769 | #ifdef CONFIG_PPC |
3761 | else if (!strncmp(this_opt, "vmode:", 6)) { | 3770 | else if (!strncmp(this_opt, "vmode:", 6)) { |
3762 | unsigned int vmode = | 3771 | unsigned int vmode = |
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c index f3b487b8710b..1fdcfdbf669b 100644 --- a/drivers/video/aty/mach64_ct.c +++ b/drivers/video/aty/mach64_ct.c | |||
@@ -598,7 +598,6 @@ static void aty_resume_pll_ct(const struct fb_info *info, | |||
598 | struct atyfb_par *par = info->par; | 598 | struct atyfb_par *par = info->par; |
599 | 599 | ||
600 | if (par->mclk_per != par->xclk_per) { | 600 | if (par->mclk_per != par->xclk_per) { |
601 | int i; | ||
602 | /* | 601 | /* |
603 | * This disables the sclk, crashes the computer as reported: | 602 | * This disables the sclk, crashes the computer as reported: |
604 | * aty_st_pll_ct(SPLL_CNTL2, 3, info); | 603 | * aty_st_pll_ct(SPLL_CNTL2, 3, info); |
@@ -614,7 +613,7 @@ static void aty_resume_pll_ct(const struct fb_info *info, | |||
614 | * helps for Rage Mobilities that sometimes crash when | 613 | * helps for Rage Mobilities that sometimes crash when |
615 | * we switch to sclk. (Daniel Mantione, 13-05-2003) | 614 | * we switch to sclk. (Daniel Mantione, 13-05-2003) |
616 | */ | 615 | */ |
617 | for (i=0;i<=0x1ffff;i++); | 616 | udelay(500); |
618 | } | 617 | } |
619 | 618 | ||
620 | aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par); | 619 | aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par); |
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 46ba1235f03a..1bf6f42eb400 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c | |||
@@ -268,6 +268,11 @@ static int nomtrr = 0; | |||
268 | #endif | 268 | #endif |
269 | static int force_sleep; | 269 | static int force_sleep; |
270 | static int ignore_devlist; | 270 | static int ignore_devlist; |
271 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
272 | static int backlight = 1; | ||
273 | #else | ||
274 | static int backlight = 0; | ||
275 | #endif | ||
271 | 276 | ||
272 | /* | 277 | /* |
273 | * prototypes | 278 | * prototypes |
@@ -2348,7 +2353,8 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, | |||
2348 | MTRR_TYPE_WRCOMB, 1); | 2353 | MTRR_TYPE_WRCOMB, 1); |
2349 | #endif | 2354 | #endif |
2350 | 2355 | ||
2351 | radeonfb_bl_init(rinfo); | 2356 | if (backlight) |
2357 | radeonfb_bl_init(rinfo); | ||
2352 | 2358 | ||
2353 | printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name); | 2359 | printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name); |
2354 | 2360 | ||
@@ -2469,6 +2475,8 @@ static int __init radeonfb_setup (char *options) | |||
2469 | force_dfp = 1; | 2475 | force_dfp = 1; |
2470 | } else if (!strncmp(this_opt, "panel_yres:", 11)) { | 2476 | } else if (!strncmp(this_opt, "panel_yres:", 11)) { |
2471 | panel_yres = simple_strtoul((this_opt+11), NULL, 0); | 2477 | panel_yres = simple_strtoul((this_opt+11), NULL, 0); |
2478 | } else if (!strncmp(this_opt, "backlight:", 10)) { | ||
2479 | backlight = simple_strtoul(this_opt+10, NULL, 0); | ||
2472 | #ifdef CONFIG_MTRR | 2480 | #ifdef CONFIG_MTRR |
2473 | } else if (!strncmp(this_opt, "nomtrr", 6)) { | 2481 | } else if (!strncmp(this_opt, "nomtrr", 6)) { |
2474 | nomtrr = 1; | 2482 | nomtrr = 1; |
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c index b7016e9b9e13..43f62d8ee41d 100644 --- a/drivers/video/nvidia/nv_backlight.c +++ b/drivers/video/nvidia/nv_backlight.c | |||
@@ -12,6 +12,11 @@ | |||
12 | #include <linux/backlight.h> | 12 | #include <linux/backlight.h> |
13 | #include <linux/fb.h> | 13 | #include <linux/fb.h> |
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | |||
16 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
17 | #include <asm/backlight.h> | ||
18 | #endif | ||
19 | |||
15 | #include "nv_local.h" | 20 | #include "nv_local.h" |
16 | #include "nv_type.h" | 21 | #include "nv_type.h" |
17 | #include "nv_proto.h" | 22 | #include "nv_proto.h" |
@@ -23,8 +28,6 @@ | |||
23 | #define MAX_LEVEL 0x534 | 28 | #define MAX_LEVEL 0x534 |
24 | #define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX) | 29 | #define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX) |
25 | 30 | ||
26 | static struct backlight_properties nvidia_bl_data; | ||
27 | |||
28 | static int nvidia_bl_get_level_brightness(struct nvidia_par *par, | 31 | static int nvidia_bl_get_level_brightness(struct nvidia_par *par, |
29 | int level) | 32 | int level) |
30 | { | 33 | { |
@@ -119,7 +122,7 @@ void nvidia_bl_init(struct nvidia_par *par) | |||
119 | 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); | 122 | 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); |
120 | 123 | ||
121 | bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; | 124 | bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; |
122 | bd->props.brightness = nvidia_bl_data.max_brightness; | 125 | bd->props.brightness = bd->props.max_brightness; |
123 | bd->props.power = FB_BLANK_UNBLANK; | 126 | bd->props.power = FB_BLANK_UNBLANK; |
124 | backlight_update_status(bd); | 127 | backlight_update_status(bd); |
125 | 128 | ||
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c index c18e9557ca30..b97ec6901263 100644 --- a/drivers/video/nvidia/nvidia.c +++ b/drivers/video/nvidia/nvidia.c | |||
@@ -83,6 +83,11 @@ static int bpp __devinitdata = 8; | |||
83 | #ifdef CONFIG_MTRR | 83 | #ifdef CONFIG_MTRR |
84 | static int nomtrr __devinitdata = 0; | 84 | static int nomtrr __devinitdata = 0; |
85 | #endif | 85 | #endif |
86 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
87 | static int backlight __devinitdata = 1; | ||
88 | #else | ||
89 | static int backlight __devinitdata = 0; | ||
90 | #endif | ||
86 | 91 | ||
87 | static char *mode_option __devinitdata = NULL; | 92 | static char *mode_option __devinitdata = NULL; |
88 | 93 | ||
@@ -1311,7 +1316,10 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd, | |||
1311 | nvidia_save_vga(par, &par->SavedReg); | 1316 | nvidia_save_vga(par, &par->SavedReg); |
1312 | 1317 | ||
1313 | pci_set_drvdata(pd, info); | 1318 | pci_set_drvdata(pd, info); |
1314 | nvidia_bl_init(par); | 1319 | |
1320 | if (backlight) | ||
1321 | nvidia_bl_init(par); | ||
1322 | |||
1315 | if (register_framebuffer(info) < 0) { | 1323 | if (register_framebuffer(info) < 0) { |
1316 | printk(KERN_ERR PFX "error registering nVidia framebuffer\n"); | 1324 | printk(KERN_ERR PFX "error registering nVidia framebuffer\n"); |
1317 | goto err_out_iounmap_fb; | 1325 | goto err_out_iounmap_fb; |
@@ -1408,6 +1416,8 @@ static int __devinit nvidiafb_setup(char *options) | |||
1408 | paneltweak = simple_strtoul(this_opt+11, NULL, 0); | 1416 | paneltweak = simple_strtoul(this_opt+11, NULL, 0); |
1409 | } else if (!strncmp(this_opt, "vram:", 5)) { | 1417 | } else if (!strncmp(this_opt, "vram:", 5)) { |
1410 | vram = simple_strtoul(this_opt+5, NULL, 0); | 1418 | vram = simple_strtoul(this_opt+5, NULL, 0); |
1419 | } else if (!strncmp(this_opt, "backlight:", 10)) { | ||
1420 | backlight = simple_strtoul(this_opt+10, NULL, 0); | ||
1411 | #ifdef CONFIG_MTRR | 1421 | #ifdef CONFIG_MTRR |
1412 | } else if (!strncmp(this_opt, "nomtrr", 6)) { | 1422 | } else if (!strncmp(this_opt, "nomtrr", 6)) { |
1413 | nomtrr = 1; | 1423 | nomtrr = 1; |
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index f8a3d608b208..1d1c7c624d7f 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c | |||
@@ -215,6 +215,11 @@ static int noaccel __devinitdata = 0; | |||
215 | #ifdef CONFIG_MTRR | 215 | #ifdef CONFIG_MTRR |
216 | static int nomtrr __devinitdata = 0; | 216 | static int nomtrr __devinitdata = 0; |
217 | #endif | 217 | #endif |
218 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
219 | static int backlight __devinitdata = 1; | ||
220 | #else | ||
221 | static int backlight __devinitdata = 0; | ||
222 | #endif | ||
218 | 223 | ||
219 | static char *mode_option __devinitdata = NULL; | 224 | static char *mode_option __devinitdata = NULL; |
220 | static int strictmode = 0; | 225 | static int strictmode = 0; |
@@ -2059,7 +2064,10 @@ static int __devinit rivafb_probe(struct pci_dev *pd, | |||
2059 | info->monspecs.modedb = NULL; | 2064 | info->monspecs.modedb = NULL; |
2060 | 2065 | ||
2061 | pci_set_drvdata(pd, info); | 2066 | pci_set_drvdata(pd, info); |
2062 | riva_bl_init(info->par); | 2067 | |
2068 | if (backlight) | ||
2069 | riva_bl_init(info->par); | ||
2070 | |||
2063 | ret = register_framebuffer(info); | 2071 | ret = register_framebuffer(info); |
2064 | if (ret < 0) { | 2072 | if (ret < 0) { |
2065 | printk(KERN_ERR PFX | 2073 | printk(KERN_ERR PFX |
@@ -2157,6 +2165,8 @@ static int __init rivafb_setup(char *options) | |||
2157 | forceCRTC = -1; | 2165 | forceCRTC = -1; |
2158 | } else if (!strncmp(this_opt, "flatpanel", 9)) { | 2166 | } else if (!strncmp(this_opt, "flatpanel", 9)) { |
2159 | flatpanel = 1; | 2167 | flatpanel = 1; |
2168 | } else if (!strncmp(this_opt, "backlight:", 10)) { | ||
2169 | backlight = simple_strtoul(this_opt+10, NULL, 0); | ||
2160 | #ifdef CONFIG_MTRR | 2170 | #ifdef CONFIG_MTRR |
2161 | } else if (!strncmp(this_opt, "nomtrr", 6)) { | 2171 | } else if (!strncmp(this_opt, "nomtrr", 6)) { |
2162 | nomtrr = 1; | 2172 | nomtrr = 1; |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index 58c0ac733db9..0a44c44672c8 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -1074,9 +1074,9 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev, | |||
1074 | if (len < 1) | 1074 | if (len < 1) |
1075 | return -EINVAL; | 1075 | return -EINVAL; |
1076 | 1076 | ||
1077 | if (strnicmp(buf, "crt", sizeof("crt")) == 0) | 1077 | if (strnicmp(buf, "crt", 3) == 0) |
1078 | head = HEAD_CRT; | 1078 | head = HEAD_CRT; |
1079 | else if (strnicmp(buf, "panel", sizeof("panel")) == 0) | 1079 | else if (strnicmp(buf, "panel", 5) == 0) |
1080 | head = HEAD_PANEL; | 1080 | head = HEAD_PANEL; |
1081 | else | 1081 | else |
1082 | return -EINVAL; | 1082 | return -EINVAL; |
@@ -1098,7 +1098,7 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev, | |||
1098 | writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); | 1098 | writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); |
1099 | sm501fb_sync_regs(info); | 1099 | sm501fb_sync_regs(info); |
1100 | 1100 | ||
1101 | return (head == HEAD_CRT) ? 3 : 5; | 1101 | return len; |
1102 | } | 1102 | } |
1103 | 1103 | ||
1104 | /* Prepare the device_attr for registration with sysfs later */ | 1104 | /* Prepare the device_attr for registration with sysfs later */ |
diff --git a/fs/buffer.c b/fs/buffer.c index e8504b65176c..1d0852fa728b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2365,6 +2365,10 @@ failed: | |||
2365 | } | 2365 | } |
2366 | EXPORT_SYMBOL(nobh_prepare_write); | 2366 | EXPORT_SYMBOL(nobh_prepare_write); |
2367 | 2367 | ||
2368 | /* | ||
2369 | * Make sure any changes to nobh_commit_write() are reflected in | ||
2370 | * nobh_truncate_page(), since it doesn't call commit_write(). | ||
2371 | */ | ||
2368 | int nobh_commit_write(struct file *file, struct page *page, | 2372 | int nobh_commit_write(struct file *file, struct page *page, |
2369 | unsigned from, unsigned to) | 2373 | unsigned from, unsigned to) |
2370 | { | 2374 | { |
@@ -2466,6 +2470,11 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) | |||
2466 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | 2470 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); |
2467 | flush_dcache_page(page); | 2471 | flush_dcache_page(page); |
2468 | kunmap_atomic(kaddr, KM_USER0); | 2472 | kunmap_atomic(kaddr, KM_USER0); |
2473 | /* | ||
2474 | * It would be more correct to call aops->commit_write() | ||
2475 | * here, but this is more efficient. | ||
2476 | */ | ||
2477 | SetPageUptodate(page); | ||
2469 | set_page_dirty(page); | 2478 | set_page_dirty(page); |
2470 | } | 2479 | } |
2471 | unlock_page(page); | 2480 | unlock_page(page); |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 5fe13593b57f..6247628bdaed 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -1,3 +1,10 @@ | |||
1 | Verison 1.48 | ||
2 | ------------ | ||
3 | Fix mtime bouncing around from local idea of last write times to remote time. | ||
4 | Fix hang (in i_size_read) when simultaneous size update of same remote file | ||
5 | on smp system corrupts sequence number. Do not reread unnecessarily partial page | ||
6 | (which we are about to overwrite anyway) when writing out file opened rw. | ||
7 | |||
1 | Version 1.47 | 8 | Version 1.47 |
2 | ------------ | 9 | ------------ |
3 | Fix oops in list_del during mount caused by unaligned string. | 10 | Fix oops in list_del during mount caused by unaligned string. |
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index a26f26ed5a17..6ecd9d6ba3f3 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | obj-$(CONFIG_CIFS) += cifs.o | 4 | obj-$(CONFIG_CIFS) += cifs.o |
5 | 5 | ||
6 | cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o | 6 | cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o export.o |
diff --git a/fs/cifs/TODO b/fs/cifs/TODO index 68372946dc92..d7b9c27c942d 100644 --- a/fs/cifs/TODO +++ b/fs/cifs/TODO | |||
@@ -18,7 +18,9 @@ better) | |||
18 | 18 | ||
19 | d) Kerberos/SPNEGO session setup support - (started) | 19 | d) Kerberos/SPNEGO session setup support - (started) |
20 | 20 | ||
21 | e) NTLMv2 authentication (mostly implemented) | 21 | e) NTLMv2 authentication (mostly implemented - double check |
22 | that NTLMv2 signing works, also need to cleanup now unneeded SessSetup code in | ||
23 | fs/cifs/connect.c) | ||
22 | 24 | ||
23 | f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup | 25 | f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup |
24 | used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM | 26 | used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM |
@@ -88,11 +90,12 @@ w) Finish up the dos time conversion routines needed to return old server | |||
88 | time to the client (default time, of now or time 0 is used now for these | 90 | time to the client (default time, of now or time 0 is used now for these |
89 | very old servers) | 91 | very old servers) |
90 | 92 | ||
91 | x) Add support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers) | 93 | x) In support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers) |
94 | need to add ability to set time to server (utimes command) | ||
92 | 95 | ||
93 | y) Finish testing of Windows 9x/Windows ME server support (started). | 96 | y) Finish testing of Windows 9x/Windows ME server support (started). |
94 | 97 | ||
95 | KNOWN BUGS (updated April 29, 2005) | 98 | KNOWN BUGS (updated February 26, 2007) |
96 | ==================================== | 99 | ==================================== |
97 | See http://bugzilla.samba.org - search on product "CifsVFS" for | 100 | See http://bugzilla.samba.org - search on product "CifsVFS" for |
98 | current bug list. | 101 | current bug list. |
@@ -107,11 +110,6 @@ but recognizes them | |||
107 | succeed but still return access denied (appears to be Windows | 110 | succeed but still return access denied (appears to be Windows |
108 | server not cifs client problem) and has not been reproduced recently. | 111 | server not cifs client problem) and has not been reproduced recently. |
109 | NTFS partitions do not have this problem. | 112 | NTFS partitions do not have this problem. |
110 | 4) debug connectathon lock test case 10 which fails against | ||
111 | Samba (may be unmappable due to POSIX to Windows lock model | ||
112 | differences but worth investigating). Also debug Samba to | ||
113 | see why lock test case 7 takes longer to complete to Samba | ||
114 | than to Windows. | ||
115 | 113 | ||
116 | Misc testing to do | 114 | Misc testing to do |
117 | ================== | 115 | ================== |
@@ -119,7 +117,7 @@ Misc testing to do | |||
119 | types. Try nested symlinks (8 deep). Return max path name in stat -f information | 117 | types. Try nested symlinks (8 deep). Return max path name in stat -f information |
120 | 118 | ||
121 | 2) Modify file portion of ltp so it can run against a mounted network | 119 | 2) Modify file portion of ltp so it can run against a mounted network |
122 | share and run it against cifs vfs. | 120 | share and run it against cifs vfs in automated fashion. |
123 | 121 | ||
124 | 3) Additional performance testing and optimization using iozone and similar - | 122 | 3) Additional performance testing and optimization using iozone and similar - |
125 | there are some easy changes that can be done to parallelize sequential writes, | 123 | there are some easy changes that can be done to parallelize sequential writes, |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index bc2c0ac27169..faba4d69fe91 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/cifsfs.c | 2 | * fs/cifs/cifsfs.c |
3 | * | 3 | * |
4 | * Copyright (C) International Business Machines Corp., 2002,2004 | 4 | * Copyright (C) International Business Machines Corp., 2002,2007 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 5 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 6 | * |
7 | * Common Internet FileSystem (CIFS) client | 7 | * Common Internet FileSystem (CIFS) client |
@@ -47,7 +47,11 @@ | |||
47 | 47 | ||
48 | #ifdef CONFIG_CIFS_QUOTA | 48 | #ifdef CONFIG_CIFS_QUOTA |
49 | static struct quotactl_ops cifs_quotactl_ops; | 49 | static struct quotactl_ops cifs_quotactl_ops; |
50 | #endif | 50 | #endif /* QUOTA */ |
51 | |||
52 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
53 | extern struct export_operations cifs_export_ops; | ||
54 | #endif /* EXPERIMENTAL */ | ||
51 | 55 | ||
52 | int cifsFYI = 0; | 56 | int cifsFYI = 0; |
53 | int cifsERROR = 1; | 57 | int cifsERROR = 1; |
@@ -62,8 +66,8 @@ unsigned int extended_security = CIFSSEC_DEF; | |||
62 | unsigned int sign_CIFS_PDUs = 1; | 66 | unsigned int sign_CIFS_PDUs = 1; |
63 | extern struct task_struct * oplockThread; /* remove sparse warning */ | 67 | extern struct task_struct * oplockThread; /* remove sparse warning */ |
64 | struct task_struct * oplockThread = NULL; | 68 | struct task_struct * oplockThread = NULL; |
65 | extern struct task_struct * dnotifyThread; /* remove sparse warning */ | 69 | /* extern struct task_struct * dnotifyThread; remove sparse warning */ |
66 | struct task_struct * dnotifyThread = NULL; | 70 | static struct task_struct * dnotifyThread = NULL; |
67 | static const struct super_operations cifs_super_ops; | 71 | static const struct super_operations cifs_super_ops; |
68 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; | 72 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; |
69 | module_param(CIFSMaxBufSize, int, 0); | 73 | module_param(CIFSMaxBufSize, int, 0); |
@@ -110,6 +114,10 @@ cifs_read_super(struct super_block *sb, void *data, | |||
110 | 114 | ||
111 | sb->s_magic = CIFS_MAGIC_NUMBER; | 115 | sb->s_magic = CIFS_MAGIC_NUMBER; |
112 | sb->s_op = &cifs_super_ops; | 116 | sb->s_op = &cifs_super_ops; |
117 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
118 | if(experimEnabled != 0) | ||
119 | sb->s_export_op = &cifs_export_ops; | ||
120 | #endif /* EXPERIMENTAL */ | ||
113 | /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) | 121 | /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) |
114 | sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ | 122 | sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ |
115 | #ifdef CONFIG_CIFS_QUOTA | 123 | #ifdef CONFIG_CIFS_QUOTA |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index c97c08eb481a..2c2c384894d8 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -38,8 +38,8 @@ extern const struct address_space_operations cifs_addr_ops_smallbuf; | |||
38 | /* Functions related to super block operations */ | 38 | /* Functions related to super block operations */ |
39 | /* extern const struct super_operations cifs_super_ops;*/ | 39 | /* extern const struct super_operations cifs_super_ops;*/ |
40 | extern void cifs_read_inode(struct inode *); | 40 | extern void cifs_read_inode(struct inode *); |
41 | extern void cifs_delete_inode(struct inode *); | 41 | /*extern void cifs_delete_inode(struct inode *);*/ /* BB not needed yet */ |
42 | /* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */ | 42 | /* extern void cifs_write_inode(struct inode *); */ /* BB not needed yet */ |
43 | 43 | ||
44 | /* Functions related to inodes */ | 44 | /* Functions related to inodes */ |
45 | extern const struct inode_operations cifs_dir_inode_ops; | 45 | extern const struct inode_operations cifs_dir_inode_ops; |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 74d3ccbb103b..e4de8eba4780 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -525,15 +525,17 @@ require use of the stronger protocol */ | |||
525 | */ | 525 | */ |
526 | GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH]; | 526 | GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH]; |
527 | 527 | ||
528 | GLOBAL_EXTERN struct list_head GlobalServerList; /* BB not implemented yet */ | 528 | /* GLOBAL_EXTERN struct list_head GlobalServerList; BB not implemented yet */ |
529 | GLOBAL_EXTERN struct list_head GlobalSMBSessionList; | 529 | GLOBAL_EXTERN struct list_head GlobalSMBSessionList; |
530 | GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; | 530 | GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; |
531 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ | 531 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ |
532 | 532 | ||
533 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; | 533 | GLOBAL_EXTERN struct list_head GlobalOplock_Q; |
534 | 534 | ||
535 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */ | 535 | /* Outstanding dir notify requests */ |
536 | GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */ | 536 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; |
537 | /* DirNotify response queue */ | ||
538 | GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q; | ||
537 | 539 | ||
538 | /* | 540 | /* |
539 | * Global transaction id (XID) information | 541 | * Global transaction id (XID) information |
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 2498d644827c..0efdf35aab2c 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h | |||
@@ -220,6 +220,9 @@ | |||
220 | */ | 220 | */ |
221 | #define CIFS_NO_HANDLE 0xFFFF | 221 | #define CIFS_NO_HANDLE 0xFFFF |
222 | 222 | ||
223 | #define NO_CHANGE_64 0xFFFFFFFFFFFFFFFFULL | ||
224 | #define NO_CHANGE_32 0xFFFFFFFFUL | ||
225 | |||
223 | /* IPC$ in ASCII */ | 226 | /* IPC$ in ASCII */ |
224 | #define CIFS_IPC_RESOURCE "\x49\x50\x43\x24" | 227 | #define CIFS_IPC_RESOURCE "\x49\x50\x43\x24" |
225 | 228 | ||
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 6148b82170c4..32eb1acab630 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -43,7 +43,7 @@ extern void _FreeXid(unsigned int); | |||
43 | #define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));} | 43 | #define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));} |
44 | extern char *build_path_from_dentry(struct dentry *); | 44 | extern char *build_path_from_dentry(struct dentry *); |
45 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); | 45 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); |
46 | extern void renew_parental_timestamps(struct dentry *direntry); | 46 | /* extern void renew_parental_timestamps(struct dentry *direntry);*/ |
47 | extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, | 47 | extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, |
48 | struct smb_hdr * /* input */ , | 48 | struct smb_hdr * /* input */ , |
49 | struct smb_hdr * /* out */ , | 49 | struct smb_hdr * /* out */ , |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 24364106b8f9..48fc0c2ab0e5 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -4803,6 +4803,16 @@ setPermsRetry: | |||
4803 | pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); | 4803 | pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); |
4804 | pSMB->Reserved4 = 0; | 4804 | pSMB->Reserved4 = 0; |
4805 | pSMB->hdr.smb_buf_length += byte_count; | 4805 | pSMB->hdr.smb_buf_length += byte_count; |
4806 | /* Samba server ignores set of file size to zero due to bugs in some | ||
4807 | older clients, but we should be precise - we use SetFileSize to | ||
4808 | set file size and do not want to truncate file size to zero | ||
4809 | accidently as happened on one Samba server beta by putting | ||
4810 | zero instead of -1 here */ | ||
4811 | data_offset->EndOfFile = NO_CHANGE_64; | ||
4812 | data_offset->NumOfBytes = NO_CHANGE_64; | ||
4813 | data_offset->LastStatusChange = NO_CHANGE_64; | ||
4814 | data_offset->LastAccessTime = NO_CHANGE_64; | ||
4815 | data_offset->LastModificationTime = NO_CHANGE_64; | ||
4806 | data_offset->Uid = cpu_to_le64(uid); | 4816 | data_offset->Uid = cpu_to_le64(uid); |
4807 | data_offset->Gid = cpu_to_le64(gid); | 4817 | data_offset->Gid = cpu_to_le64(gid); |
4808 | /* better to leave device as zero when it is */ | 4818 | /* better to leave device as zero when it is */ |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 66b825ade3e1..3fad638d26d3 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include "cifs_debug.h" | 31 | #include "cifs_debug.h" |
32 | #include "cifs_fs_sb.h" | 32 | #include "cifs_fs_sb.h" |
33 | 33 | ||
34 | void | 34 | static void |
35 | renew_parental_timestamps(struct dentry *direntry) | 35 | renew_parental_timestamps(struct dentry *direntry) |
36 | { | 36 | { |
37 | /* BB check if there is a way to get the kernel to do this or if we really need this */ | 37 | /* BB check if there is a way to get the kernel to do this or if we really need this */ |
diff --git a/fs/cifs/export.c b/fs/cifs/export.c new file mode 100644 index 000000000000..1d716392c3aa --- /dev/null +++ b/fs/cifs/export.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * fs/cifs/export.c | ||
3 | * | ||
4 | * Copyright (C) International Business Machines Corp., 2007 | ||
5 | * Author(s): Steve French (sfrench@us.ibm.com) | ||
6 | * | ||
7 | * Common Internet FileSystem (CIFS) client | ||
8 | * | ||
9 | * Operations related to support for exporting files via NFSD | ||
10 | * | ||
11 | * This library is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU Lesser General Public License as published | ||
13 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This library is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
19 | * the GNU Lesser General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU Lesser General Public License | ||
22 | * along with this library; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | |||
26 | /* | ||
27 | * See Documentation/filesystems/Exporting | ||
28 | * and examples in fs/exportfs | ||
29 | */ | ||
30 | |||
31 | #include <linux/fs.h> | ||
32 | |||
33 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
34 | |||
35 | static struct dentry *cifs_get_parent(struct dentry *dentry) | ||
36 | { | ||
37 | /* BB need to add code here eventually to enable export via NFSD */ | ||
38 | return ERR_PTR(-EACCES); | ||
39 | } | ||
40 | |||
41 | struct export_operations cifs_export_ops = { | ||
42 | .get_parent = cifs_get_parent, | ||
43 | /* Following five export operations are unneeded so far and can default */ | ||
44 | /* .get_dentry = | ||
45 | .get_name = | ||
46 | .find_exported_dentry = | ||
47 | .decode_fh = | ||
48 | .encode_fs = */ | ||
49 | }; | ||
50 | |||
51 | #endif /* EXPERIMENTAL */ | ||
52 | |||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a1265c9bfec0..2d3275bedb55 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -879,18 +879,19 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, | |||
879 | cifs_stats_bytes_written(pTcon, total_written); | 879 | cifs_stats_bytes_written(pTcon, total_written); |
880 | 880 | ||
881 | /* since the write may have blocked check these pointers again */ | 881 | /* since the write may have blocked check these pointers again */ |
882 | if (file->f_path.dentry) { | 882 | if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { |
883 | if (file->f_path.dentry->d_inode) { | 883 | struct inode *inode = file->f_path.dentry->d_inode; |
884 | struct inode *inode = file->f_path.dentry->d_inode; | 884 | /* Do not update local mtime - server will set its actual value on write |
885 | inode->i_ctime = inode->i_mtime = | 885 | * inode->i_ctime = inode->i_mtime = |
886 | current_fs_time(inode->i_sb); | 886 | * current_fs_time(inode->i_sb);*/ |
887 | if (total_written > 0) { | 887 | if (total_written > 0) { |
888 | if (*poffset > file->f_path.dentry->d_inode->i_size) | 888 | spin_lock(&inode->i_lock); |
889 | i_size_write(file->f_path.dentry->d_inode, | 889 | if (*poffset > file->f_path.dentry->d_inode->i_size) |
890 | i_size_write(file->f_path.dentry->d_inode, | ||
890 | *poffset); | 891 | *poffset); |
891 | } | 892 | spin_unlock(&inode->i_lock); |
892 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
893 | } | 893 | } |
894 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
894 | } | 895 | } |
895 | FreeXid(xid); | 896 | FreeXid(xid); |
896 | return total_written; | 897 | return total_written; |
@@ -1012,18 +1013,18 @@ static ssize_t cifs_write(struct file *file, const char *write_data, | |||
1012 | cifs_stats_bytes_written(pTcon, total_written); | 1013 | cifs_stats_bytes_written(pTcon, total_written); |
1013 | 1014 | ||
1014 | /* since the write may have blocked check these pointers again */ | 1015 | /* since the write may have blocked check these pointers again */ |
1015 | if (file->f_path.dentry) { | 1016 | if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { |
1016 | if (file->f_path.dentry->d_inode) { | ||
1017 | /*BB We could make this contingent on superblock ATIME flag too */ | 1017 | /*BB We could make this contingent on superblock ATIME flag too */ |
1018 | /* file->f_path.dentry->d_inode->i_ctime = | 1018 | /* file->f_path.dentry->d_inode->i_ctime = |
1019 | file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/ | 1019 | file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/ |
1020 | if (total_written > 0) { | 1020 | if (total_written > 0) { |
1021 | if (*poffset > file->f_path.dentry->d_inode->i_size) | 1021 | spin_lock(&file->f_path.dentry->d_inode->i_lock); |
1022 | i_size_write(file->f_path.dentry->d_inode, | 1022 | if (*poffset > file->f_path.dentry->d_inode->i_size) |
1023 | *poffset); | 1023 | i_size_write(file->f_path.dentry->d_inode, |
1024 | } | 1024 | *poffset); |
1025 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | 1025 | spin_unlock(&file->f_path.dentry->d_inode->i_lock); |
1026 | } | 1026 | } |
1027 | mark_inode_dirty_sync(file->f_path.dentry->d_inode); | ||
1027 | } | 1028 | } |
1028 | FreeXid(xid); | 1029 | FreeXid(xid); |
1029 | return total_written; | 1030 | return total_written; |
@@ -1400,6 +1401,7 @@ static int cifs_commit_write(struct file *file, struct page *page, | |||
1400 | xid = GetXid(); | 1401 | xid = GetXid(); |
1401 | cFYI(1, ("commit write for page %p up to position %lld for %d", | 1402 | cFYI(1, ("commit write for page %p up to position %lld for %d", |
1402 | page, position, to)); | 1403 | page, position, to)); |
1404 | spin_lock(&inode->i_lock); | ||
1403 | if (position > inode->i_size) { | 1405 | if (position > inode->i_size) { |
1404 | i_size_write(inode, position); | 1406 | i_size_write(inode, position); |
1405 | /* if (file->private_data == NULL) { | 1407 | /* if (file->private_data == NULL) { |
@@ -1429,6 +1431,7 @@ static int cifs_commit_write(struct file *file, struct page *page, | |||
1429 | cFYI(1, (" SetEOF (commit write) rc = %d", rc)); | 1431 | cFYI(1, (" SetEOF (commit write) rc = %d", rc)); |
1430 | } */ | 1432 | } */ |
1431 | } | 1433 | } |
1434 | spin_unlock(&inode->i_lock); | ||
1432 | if (!PageUptodate(page)) { | 1435 | if (!PageUptodate(page)) { |
1433 | position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset; | 1436 | position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset; |
1434 | /* can not rely on (or let) writepage write this data */ | 1437 | /* can not rely on (or let) writepage write this data */ |
@@ -1989,34 +1992,52 @@ static int cifs_prepare_write(struct file *file, struct page *page, | |||
1989 | unsigned from, unsigned to) | 1992 | unsigned from, unsigned to) |
1990 | { | 1993 | { |
1991 | int rc = 0; | 1994 | int rc = 0; |
1992 | loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | 1995 | loff_t i_size; |
1996 | loff_t offset; | ||
1997 | |||
1993 | cFYI(1, ("prepare write for page %p from %d to %d",page,from,to)); | 1998 | cFYI(1, ("prepare write for page %p from %d to %d",page,from,to)); |
1994 | if (!PageUptodate(page)) { | 1999 | if (PageUptodate(page)) |
1995 | /* if (to - from != PAGE_CACHE_SIZE) { | 2000 | return 0; |
1996 | void *kaddr = kmap_atomic(page, KM_USER0); | 2001 | |
2002 | /* If we are writing a full page it will be up to date, | ||
2003 | no need to read from the server */ | ||
2004 | if ((to == PAGE_CACHE_SIZE) && (from == 0)) { | ||
2005 | SetPageUptodate(page); | ||
2006 | return 0; | ||
2007 | } | ||
2008 | |||
2009 | offset = (loff_t)page->index << PAGE_CACHE_SHIFT; | ||
2010 | i_size = i_size_read(page->mapping->host); | ||
2011 | |||
2012 | if ((offset >= i_size) || | ||
2013 | ((from == 0) && (offset + to) >= i_size)) { | ||
2014 | /* | ||
2015 | * We don't need to read data beyond the end of the file. | ||
2016 | * zero it, and set the page uptodate | ||
2017 | */ | ||
2018 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
2019 | |||
2020 | if (from) | ||
1997 | memset(kaddr, 0, from); | 2021 | memset(kaddr, 0, from); |
2022 | if (to < PAGE_CACHE_SIZE) | ||
1998 | memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); | 2023 | memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); |
1999 | flush_dcache_page(page); | 2024 | flush_dcache_page(page); |
2000 | kunmap_atomic(kaddr, KM_USER0); | 2025 | kunmap_atomic(kaddr, KM_USER0); |
2001 | } */ | 2026 | SetPageUptodate(page); |
2002 | /* If we are writing a full page it will be up to date, | 2027 | } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
2003 | no need to read from the server */ | ||
2004 | if ((to == PAGE_CACHE_SIZE) && (from == 0)) | ||
2005 | SetPageUptodate(page); | ||
2006 | |||
2007 | /* might as well read a page, it is fast enough */ | 2028 | /* might as well read a page, it is fast enough */ |
2008 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { | 2029 | rc = cifs_readpage_worker(file, page, &offset); |
2009 | rc = cifs_readpage_worker(file, page, &offset); | 2030 | } else { |
2010 | } else { | 2031 | /* we could try using another file handle if there is one - |
2011 | /* should we try using another file handle if there is one - | 2032 | but how would we lock it to prevent close of that handle |
2012 | how would we lock it to prevent close of that handle | 2033 | racing with this read? In any case |
2013 | racing with this read? | 2034 | this will be written out by commit_write so is fine */ |
2014 | In any case this will be written out by commit_write */ | ||
2015 | } | ||
2016 | } | 2035 | } |
2017 | 2036 | ||
2018 | /* BB should we pass any errors back? | 2037 | /* we do not need to pass errors back |
2019 | e.g. if we do not have read access to the file */ | 2038 | e.g. if we do not have read access to the file |
2039 | because cifs_commit_write will do the right thing. -- shaggy */ | ||
2040 | |||
2020 | return 0; | 2041 | return 0; |
2021 | } | 2042 | } |
2022 | 2043 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 37c6ce87416b..86b9dbbd8441 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -143,10 +143,10 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
143 | inode->i_gid = le64_to_cpu(findData.Gid); | 143 | inode->i_gid = le64_to_cpu(findData.Gid); |
144 | inode->i_nlink = le64_to_cpu(findData.Nlinks); | 144 | inode->i_nlink = le64_to_cpu(findData.Nlinks); |
145 | 145 | ||
146 | spin_lock(&inode->i_lock); | ||
146 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 147 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
147 | /* can not safely change the file size here if the | 148 | /* can not safely change the file size here if the |
148 | client is writing to it due to potential races */ | 149 | client is writing to it due to potential races */ |
149 | |||
150 | i_size_write(inode, end_of_file); | 150 | i_size_write(inode, end_of_file); |
151 | 151 | ||
152 | /* blksize needs to be multiple of two. So safer to default to | 152 | /* blksize needs to be multiple of two. So safer to default to |
@@ -162,6 +162,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
162 | /* for this calculation */ | 162 | /* for this calculation */ |
163 | inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | 163 | inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; |
164 | } | 164 | } |
165 | spin_unlock(&inode->i_lock); | ||
165 | 166 | ||
166 | if (num_of_bytes < end_of_file) | 167 | if (num_of_bytes < end_of_file) |
167 | cFYI(1, ("allocation size less than end of file")); | 168 | cFYI(1, ("allocation size less than end of file")); |
@@ -496,6 +497,8 @@ int cifs_get_inode_info(struct inode **pinode, | |||
496 | /* BB add code here - | 497 | /* BB add code here - |
497 | validate if device or weird share or device type? */ | 498 | validate if device or weird share or device type? */ |
498 | } | 499 | } |
500 | |||
501 | spin_lock(&inode->i_lock); | ||
499 | if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) { | 502 | if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) { |
500 | /* can not safely shrink the file size here if the | 503 | /* can not safely shrink the file size here if the |
501 | client is writing to it due to potential races */ | 504 | client is writing to it due to potential races */ |
@@ -506,6 +509,7 @@ int cifs_get_inode_info(struct inode **pinode, | |||
506 | inode->i_blocks = (512 - 1 + le64_to_cpu( | 509 | inode->i_blocks = (512 - 1 + le64_to_cpu( |
507 | pfindData->AllocationSize)) >> 9; | 510 | pfindData->AllocationSize)) >> 9; |
508 | } | 511 | } |
512 | spin_unlock(&inode->i_lock); | ||
509 | 513 | ||
510 | inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks); | 514 | inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks); |
511 | 515 | ||
@@ -834,8 +838,10 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
834 | 838 | ||
835 | if (!rc) { | 839 | if (!rc) { |
836 | drop_nlink(inode); | 840 | drop_nlink(inode); |
841 | spin_lock(&direntry->d_inode->i_lock); | ||
837 | i_size_write(direntry->d_inode,0); | 842 | i_size_write(direntry->d_inode,0); |
838 | clear_nlink(direntry->d_inode); | 843 | clear_nlink(direntry->d_inode); |
844 | spin_unlock(&direntry->d_inode->i_lock); | ||
839 | } | 845 | } |
840 | 846 | ||
841 | cifsInode = CIFS_I(direntry->d_inode); | 847 | cifsInode = CIFS_I(direntry->d_inode); |
@@ -1128,6 +1134,52 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from) | |||
1128 | return rc; | 1134 | return rc; |
1129 | } | 1135 | } |
1130 | 1136 | ||
1137 | static int cifs_vmtruncate(struct inode * inode, loff_t offset) | ||
1138 | { | ||
1139 | struct address_space *mapping = inode->i_mapping; | ||
1140 | unsigned long limit; | ||
1141 | |||
1142 | spin_lock(&inode->i_lock); | ||
1143 | if (inode->i_size < offset) | ||
1144 | goto do_expand; | ||
1145 | /* | ||
1146 | * truncation of in-use swapfiles is disallowed - it would cause | ||
1147 | * subsequent swapout to scribble on the now-freed blocks. | ||
1148 | */ | ||
1149 | if (IS_SWAPFILE(inode)) { | ||
1150 | spin_unlock(&inode->i_lock); | ||
1151 | goto out_busy; | ||
1152 | } | ||
1153 | i_size_write(inode, offset); | ||
1154 | spin_unlock(&inode->i_lock); | ||
1155 | unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); | ||
1156 | truncate_inode_pages(mapping, offset); | ||
1157 | goto out_truncate; | ||
1158 | |||
1159 | do_expand: | ||
1160 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | ||
1161 | if (limit != RLIM_INFINITY && offset > limit) { | ||
1162 | spin_unlock(&inode->i_lock); | ||
1163 | goto out_sig; | ||
1164 | } | ||
1165 | if (offset > inode->i_sb->s_maxbytes) { | ||
1166 | spin_unlock(&inode->i_lock); | ||
1167 | goto out_big; | ||
1168 | } | ||
1169 | i_size_write(inode, offset); | ||
1170 | spin_unlock(&inode->i_lock); | ||
1171 | out_truncate: | ||
1172 | if (inode->i_op && inode->i_op->truncate) | ||
1173 | inode->i_op->truncate(inode); | ||
1174 | return 0; | ||
1175 | out_sig: | ||
1176 | send_sig(SIGXFSZ, current, 0); | ||
1177 | out_big: | ||
1178 | return -EFBIG; | ||
1179 | out_busy: | ||
1180 | return -ETXTBSY; | ||
1181 | } | ||
1182 | |||
1131 | int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | 1183 | int cifs_setattr(struct dentry *direntry, struct iattr *attrs) |
1132 | { | 1184 | { |
1133 | int xid; | 1185 | int xid; |
@@ -1244,7 +1296,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1244 | */ | 1296 | */ |
1245 | 1297 | ||
1246 | if (rc == 0) { | 1298 | if (rc == 0) { |
1247 | rc = vmtruncate(direntry->d_inode, attrs->ia_size); | 1299 | rc = cifs_vmtruncate(direntry->d_inode, attrs->ia_size); |
1248 | cifs_truncate_page(direntry->d_inode->i_mapping, | 1300 | cifs_truncate_page(direntry->d_inode->i_mapping, |
1249 | direntry->d_inode->i_size); | 1301 | direntry->d_inode->i_size); |
1250 | } else | 1302 | } else |
@@ -1379,9 +1431,11 @@ cifs_setattr_exit: | |||
1379 | return rc; | 1431 | return rc; |
1380 | } | 1432 | } |
1381 | 1433 | ||
1434 | #if 0 | ||
1382 | void cifs_delete_inode(struct inode *inode) | 1435 | void cifs_delete_inode(struct inode *inode) |
1383 | { | 1436 | { |
1384 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode)); | 1437 | cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode)); |
1385 | /* may have to add back in if and when safe distributed caching of | 1438 | /* may have to add back in if and when safe distributed caching of |
1386 | directories added e.g. via FindNotify */ | 1439 | directories added e.g. via FindNotify */ |
1387 | } | 1440 | } |
1441 | #endif | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index c444798f0740..44cfb528797d 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Directory search handling | 4 | * Directory search handling |
5 | * | 5 | * |
6 | * Copyright (C) International Business Machines Corp., 2004, 2005 | 6 | * Copyright (C) International Business Machines Corp., 2004, 2007 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) | 7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * | 8 | * |
9 | * This library is free software; you can redistribute it and/or modify | 9 | * This library is free software; you can redistribute it and/or modify |
@@ -226,6 +226,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
226 | atomic_set(&cifsInfo->inUse, 1); | 226 | atomic_set(&cifsInfo->inUse, 1); |
227 | } | 227 | } |
228 | 228 | ||
229 | spin_lock(&tmp_inode->i_lock); | ||
229 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 230 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
230 | /* can not safely change the file size here if the | 231 | /* can not safely change the file size here if the |
231 | client is writing to it due to potential races */ | 232 | client is writing to it due to potential races */ |
@@ -235,6 +236,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
235 | /* for this calculation, even though the reported blocksize is larger */ | 236 | /* for this calculation, even though the reported blocksize is larger */ |
236 | tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9; | 237 | tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9; |
237 | } | 238 | } |
239 | spin_unlock(&tmp_inode->i_lock); | ||
238 | 240 | ||
239 | if (allocation_size < end_of_file) | 241 | if (allocation_size < end_of_file) |
240 | cFYI(1, ("May be sparse file, allocation less than file size")); | 242 | cFYI(1, ("May be sparse file, allocation less than file size")); |
@@ -355,6 +357,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode, | |||
355 | tmp_inode->i_gid = le64_to_cpu(pfindData->Gid); | 357 | tmp_inode->i_gid = le64_to_cpu(pfindData->Gid); |
356 | tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks); | 358 | tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks); |
357 | 359 | ||
360 | spin_lock(&tmp_inode->i_lock); | ||
358 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { | 361 | if (is_size_safe_to_change(cifsInfo, end_of_file)) { |
359 | /* can not safely change the file size here if the | 362 | /* can not safely change the file size here if the |
360 | client is writing to it due to potential races */ | 363 | client is writing to it due to potential races */ |
@@ -364,6 +367,7 @@ static void unix_fill_in_inode(struct inode *tmp_inode, | |||
364 | /* for this calculation, not the real blocksize */ | 367 | /* for this calculation, not the real blocksize */ |
365 | tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; | 368 | tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; |
366 | } | 369 | } |
370 | spin_unlock(&tmp_inode->i_lock); | ||
367 | 371 | ||
368 | if (S_ISREG(tmp_inode->i_mode)) { | 372 | if (S_ISREG(tmp_inode->i_mode)) { |
369 | cFYI(1, ("File inode")); | 373 | cFYI(1, ("File inode")); |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index f80007eaebf4..5f468459a1e2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -499,7 +499,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
499 | due to last connection to this server being unmounted */ | 499 | due to last connection to this server being unmounted */ |
500 | if (signal_pending(current)) { | 500 | if (signal_pending(current)) { |
501 | /* if signal pending do not hold up user for full smb timeout | 501 | /* if signal pending do not hold up user for full smb timeout |
502 | but we still give response a change to complete */ | 502 | but we still give response a chance to complete */ |
503 | timeout = 2 * HZ; | 503 | timeout = 2 * HZ; |
504 | } | 504 | } |
505 | 505 | ||
@@ -587,7 +587,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
587 | } | 587 | } |
588 | 588 | ||
589 | out: | 589 | out: |
590 | |||
591 | DeleteMidQEntry(midQ); | 590 | DeleteMidQEntry(midQ); |
592 | atomic_dec(&ses->server->inFlight); | 591 | atomic_dec(&ses->server->inFlight); |
593 | wake_up(&ses->server->request_q); | 592 | wake_up(&ses->server->request_q); |
@@ -681,7 +680,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
681 | due to last connection to this server being unmounted */ | 680 | due to last connection to this server being unmounted */ |
682 | if (signal_pending(current)) { | 681 | if (signal_pending(current)) { |
683 | /* if signal pending do not hold up user for full smb timeout | 682 | /* if signal pending do not hold up user for full smb timeout |
684 | but we still give response a change to complete */ | 683 | but we still give response a chance to complete */ |
685 | timeout = 2 * HZ; | 684 | timeout = 2 * HZ; |
686 | } | 685 | } |
687 | 686 | ||
@@ -765,7 +764,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
765 | } | 764 | } |
766 | 765 | ||
767 | out: | 766 | out: |
768 | |||
769 | DeleteMidQEntry(midQ); | 767 | DeleteMidQEntry(midQ); |
770 | atomic_dec(&ses->server->inFlight); | 768 | atomic_dec(&ses->server->inFlight); |
771 | wake_up(&ses->server->request_q); | 769 | wake_up(&ses->server->request_q); |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 0cfff4fefa9e..e62f3fc7241e 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -168,9 +168,9 @@ static int grow_file(struct dentry *ecryptfs_dentry, struct file *lower_file, | |||
168 | goto out; | 168 | goto out; |
169 | } | 169 | } |
170 | i_size_write(inode, 0); | 170 | i_size_write(inode, 0); |
171 | ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode, inode, | 171 | rc = ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode, |
172 | ecryptfs_dentry, | 172 | inode, ecryptfs_dentry, |
173 | ECRYPTFS_LOWER_I_MUTEX_NOT_HELD); | 173 | ECRYPTFS_LOWER_I_MUTEX_NOT_HELD); |
174 | ecryptfs_inode_to_private(inode)->crypt_stat.flags |= ECRYPTFS_NEW_FILE; | 174 | ecryptfs_inode_to_private(inode)->crypt_stat.flags |= ECRYPTFS_NEW_FILE; |
175 | out: | 175 | out: |
176 | return rc; | 176 | return rc; |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 812427e6805c..fc4a3a224641 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -484,18 +484,12 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name) | |||
484 | struct vfsmount *lower_mnt; | 484 | struct vfsmount *lower_mnt; |
485 | 485 | ||
486 | memset(&nd, 0, sizeof(struct nameidata)); | 486 | memset(&nd, 0, sizeof(struct nameidata)); |
487 | rc = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); | 487 | rc = path_lookup(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd); |
488 | if (rc) { | 488 | if (rc) { |
489 | ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); | 489 | ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); |
490 | goto out; | 490 | goto out; |
491 | } | 491 | } |
492 | lower_root = nd.dentry; | 492 | lower_root = nd.dentry; |
493 | if (!lower_root->d_inode) { | ||
494 | ecryptfs_printk(KERN_WARNING, | ||
495 | "No directory to interpose on\n"); | ||
496 | rc = -ENOENT; | ||
497 | goto out_free; | ||
498 | } | ||
499 | lower_mnt = nd.mnt; | 493 | lower_mnt = nd.mnt; |
500 | ecryptfs_set_superblock_lower(sb, lower_root->d_sb); | 494 | ecryptfs_set_superblock_lower(sb, lower_root->d_sb); |
501 | sb->s_maxbytes = lower_root->d_sb->s_maxbytes; | 495 | sb->s_maxbytes = lower_root->d_sb->s_maxbytes; |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 7be8e91b5ba0..b731b09499cb 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -446,6 +446,7 @@ static int ecryptfs_write_inode_size_to_header(struct file *lower_file, | |||
446 | const struct address_space_operations *lower_a_ops; | 446 | const struct address_space_operations *lower_a_ops; |
447 | u64 file_size; | 447 | u64 file_size; |
448 | 448 | ||
449 | retry: | ||
449 | header_page = grab_cache_page(lower_inode->i_mapping, 0); | 450 | header_page = grab_cache_page(lower_inode->i_mapping, 0); |
450 | if (!header_page) { | 451 | if (!header_page) { |
451 | ecryptfs_printk(KERN_ERR, "grab_cache_page for " | 452 | ecryptfs_printk(KERN_ERR, "grab_cache_page for " |
@@ -456,9 +457,10 @@ static int ecryptfs_write_inode_size_to_header(struct file *lower_file, | |||
456 | lower_a_ops = lower_inode->i_mapping->a_ops; | 457 | lower_a_ops = lower_inode->i_mapping->a_ops; |
457 | rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8); | 458 | rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8); |
458 | if (rc) { | 459 | if (rc) { |
459 | if (rc == AOP_TRUNCATED_PAGE) | 460 | if (rc == AOP_TRUNCATED_PAGE) { |
460 | ecryptfs_release_lower_page(header_page, 0); | 461 | ecryptfs_release_lower_page(header_page, 0); |
461 | else | 462 | goto retry; |
463 | } else | ||
462 | ecryptfs_release_lower_page(header_page, 1); | 464 | ecryptfs_release_lower_page(header_page, 1); |
463 | goto out; | 465 | goto out; |
464 | } | 466 | } |
@@ -473,9 +475,10 @@ static int ecryptfs_write_inode_size_to_header(struct file *lower_file, | |||
473 | if (rc < 0) | 475 | if (rc < 0) |
474 | ecryptfs_printk(KERN_ERR, "Error commiting header page " | 476 | ecryptfs_printk(KERN_ERR, "Error commiting header page " |
475 | "write\n"); | 477 | "write\n"); |
476 | if (rc == AOP_TRUNCATED_PAGE) | 478 | if (rc == AOP_TRUNCATED_PAGE) { |
477 | ecryptfs_release_lower_page(header_page, 0); | 479 | ecryptfs_release_lower_page(header_page, 0); |
478 | else | 480 | goto retry; |
481 | } else | ||
479 | ecryptfs_release_lower_page(header_page, 1); | 482 | ecryptfs_release_lower_page(header_page, 1); |
480 | lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME; | 483 | lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME; |
481 | mark_inode_dirty_sync(inode); | 484 | mark_inode_dirty_sync(inode); |
@@ -502,7 +505,8 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *lower_inode, | |||
502 | goto out; | 505 | goto out; |
503 | } | 506 | } |
504 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); | 507 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); |
505 | if (!lower_dentry->d_inode->i_op->getxattr) { | 508 | if (!lower_dentry->d_inode->i_op->getxattr || |
509 | !lower_dentry->d_inode->i_op->setxattr) { | ||
506 | printk(KERN_WARNING | 510 | printk(KERN_WARNING |
507 | "No support for setting xattr in lower filesystem\n"); | 511 | "No support for setting xattr in lower filesystem\n"); |
508 | rc = -ENOSYS; | 512 | rc = -ENOSYS; |
@@ -564,6 +568,7 @@ int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode, | |||
564 | { | 568 | { |
565 | int rc = 0; | 569 | int rc = 0; |
566 | 570 | ||
571 | retry: | ||
567 | *lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index); | 572 | *lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index); |
568 | if (!(*lower_page)) { | 573 | if (!(*lower_page)) { |
569 | rc = -EINVAL; | 574 | rc = -EINVAL; |
@@ -577,18 +582,18 @@ int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode, | |||
577 | byte_offset, | 582 | byte_offset, |
578 | region_bytes); | 583 | region_bytes); |
579 | if (rc) { | 584 | if (rc) { |
580 | ecryptfs_printk(KERN_ERR, "prepare_write for " | 585 | if (rc == AOP_TRUNCATED_PAGE) { |
586 | ecryptfs_release_lower_page(*lower_page, 0); | ||
587 | goto retry; | ||
588 | } else { | ||
589 | ecryptfs_printk(KERN_ERR, "prepare_write for " | ||
581 | "lower_page_index = [0x%.16x] failed; rc = " | 590 | "lower_page_index = [0x%.16x] failed; rc = " |
582 | "[%d]\n", lower_page_index, rc); | 591 | "[%d]\n", lower_page_index, rc); |
583 | } | ||
584 | out: | ||
585 | if (rc && (*lower_page)) { | ||
586 | if (rc == AOP_TRUNCATED_PAGE) | ||
587 | ecryptfs_release_lower_page(*lower_page, 0); | ||
588 | else | ||
589 | ecryptfs_release_lower_page(*lower_page, 1); | 592 | ecryptfs_release_lower_page(*lower_page, 1); |
590 | (*lower_page) = NULL; | 593 | (*lower_page) = NULL; |
594 | } | ||
591 | } | 595 | } |
596 | out: | ||
592 | return rc; | 597 | return rc; |
593 | } | 598 | } |
594 | 599 | ||
diff --git a/fs/libfs.c b/fs/libfs.c index cf79196535ec..d93842d3c0a0 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
@@ -190,6 +190,10 @@ const struct inode_operations simple_dir_inode_operations = { | |||
190 | .lookup = simple_lookup, | 190 | .lookup = simple_lookup, |
191 | }; | 191 | }; |
192 | 192 | ||
193 | static const struct super_operations simple_super_operations = { | ||
194 | .statfs = simple_statfs, | ||
195 | }; | ||
196 | |||
193 | /* | 197 | /* |
194 | * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that | 198 | * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that |
195 | * will never be mountable) | 199 | * will never be mountable) |
@@ -199,7 +203,6 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, | |||
199 | struct vfsmount *mnt) | 203 | struct vfsmount *mnt) |
200 | { | 204 | { |
201 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); | 205 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); |
202 | static const struct super_operations default_ops = {.statfs = simple_statfs}; | ||
203 | struct dentry *dentry; | 206 | struct dentry *dentry; |
204 | struct inode *root; | 207 | struct inode *root; |
205 | struct qstr d_name = {.name = name, .len = strlen(name)}; | 208 | struct qstr d_name = {.name = name, .len = strlen(name)}; |
@@ -212,7 +215,7 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, | |||
212 | s->s_blocksize = 1024; | 215 | s->s_blocksize = 1024; |
213 | s->s_blocksize_bits = 10; | 216 | s->s_blocksize_bits = 10; |
214 | s->s_magic = magic; | 217 | s->s_magic = magic; |
215 | s->s_op = ops ? ops : &default_ops; | 218 | s->s_op = ops ? ops : &simple_super_operations; |
216 | s->s_time_gran = 1; | 219 | s->s_time_gran = 1; |
217 | root = new_inode(s); | 220 | root = new_inode(s); |
218 | if (!root) | 221 | if (!root) |
@@ -359,7 +362,6 @@ int simple_commit_write(struct file *file, struct page *page, | |||
359 | 362 | ||
360 | int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) | 363 | int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) |
361 | { | 364 | { |
362 | static struct super_operations s_ops = {.statfs = simple_statfs}; | ||
363 | struct inode *inode; | 365 | struct inode *inode; |
364 | struct dentry *root; | 366 | struct dentry *root; |
365 | struct dentry *dentry; | 367 | struct dentry *dentry; |
@@ -368,7 +370,7 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files | |||
368 | s->s_blocksize = PAGE_CACHE_SIZE; | 370 | s->s_blocksize = PAGE_CACHE_SIZE; |
369 | s->s_blocksize_bits = PAGE_CACHE_SHIFT; | 371 | s->s_blocksize_bits = PAGE_CACHE_SHIFT; |
370 | s->s_magic = magic; | 372 | s->s_magic = magic; |
371 | s->s_op = &s_ops; | 373 | s->s_op = &simple_super_operations; |
372 | s->s_time_gran = 1; | 374 | s->s_time_gran = 1; |
373 | 375 | ||
374 | inode = new_inode(s); | 376 | inode = new_inode(s); |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 14939ddf74f1..7285c94956c4 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -576,6 +576,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
576 | server->packet = vmalloc(NCP_PACKET_SIZE); | 576 | server->packet = vmalloc(NCP_PACKET_SIZE); |
577 | if (server->packet == NULL) | 577 | if (server->packet == NULL) |
578 | goto out_nls; | 578 | goto out_nls; |
579 | server->txbuf = vmalloc(NCP_PACKET_SIZE); | ||
580 | if (server->txbuf == NULL) | ||
581 | goto out_packet; | ||
582 | server->rxbuf = vmalloc(NCP_PACKET_SIZE); | ||
583 | if (server->rxbuf == NULL) | ||
584 | goto out_txbuf; | ||
579 | 585 | ||
580 | sock->sk->sk_data_ready = ncp_tcp_data_ready; | 586 | sock->sk->sk_data_ready = ncp_tcp_data_ready; |
581 | sock->sk->sk_error_report = ncp_tcp_error_report; | 587 | sock->sk->sk_error_report = ncp_tcp_error_report; |
@@ -597,7 +603,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
597 | error = ncp_connect(server); | 603 | error = ncp_connect(server); |
598 | ncp_unlock_server(server); | 604 | ncp_unlock_server(server); |
599 | if (error < 0) | 605 | if (error < 0) |
600 | goto out_packet; | 606 | goto out_rxbuf; |
601 | DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb)); | 607 | DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb)); |
602 | 608 | ||
603 | error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */ | 609 | error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */ |
@@ -666,8 +672,12 @@ out_disconnect: | |||
666 | ncp_lock_server(server); | 672 | ncp_lock_server(server); |
667 | ncp_disconnect(server); | 673 | ncp_disconnect(server); |
668 | ncp_unlock_server(server); | 674 | ncp_unlock_server(server); |
669 | out_packet: | 675 | out_rxbuf: |
670 | ncp_stop_tasks(server); | 676 | ncp_stop_tasks(server); |
677 | vfree(server->rxbuf); | ||
678 | out_txbuf: | ||
679 | vfree(server->txbuf); | ||
680 | out_packet: | ||
671 | vfree(server->packet); | 681 | vfree(server->packet); |
672 | out_nls: | 682 | out_nls: |
673 | #ifdef CONFIG_NCPFS_NLS | 683 | #ifdef CONFIG_NCPFS_NLS |
@@ -723,6 +733,8 @@ static void ncp_put_super(struct super_block *sb) | |||
723 | 733 | ||
724 | kfree(server->priv.data); | 734 | kfree(server->priv.data); |
725 | kfree(server->auth.object_name); | 735 | kfree(server->auth.object_name); |
736 | vfree(server->rxbuf); | ||
737 | vfree(server->txbuf); | ||
726 | vfree(server->packet); | 738 | vfree(server->packet); |
727 | sb->s_fs_info = NULL; | 739 | sb->s_fs_info = NULL; |
728 | kfree(server); | 740 | kfree(server); |
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index e496d8b65e92..e37df8d5fe70 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/socket.h> | 14 | #include <linux/socket.h> |
15 | #include <linux/fcntl.h> | 15 | #include <linux/fcntl.h> |
16 | #include <linux/stat.h> | 16 | #include <linux/stat.h> |
17 | #include <linux/string.h> | ||
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <linux/in.h> | 19 | #include <linux/in.h> |
19 | #include <linux/net.h> | 20 | #include <linux/net.h> |
@@ -55,10 +56,11 @@ static int _send(struct socket *sock, const void *buff, int len) | |||
55 | struct ncp_request_reply { | 56 | struct ncp_request_reply { |
56 | struct list_head req; | 57 | struct list_head req; |
57 | wait_queue_head_t wq; | 58 | wait_queue_head_t wq; |
58 | struct ncp_reply_header* reply_buf; | 59 | atomic_t refs; |
60 | unsigned char* reply_buf; | ||
59 | size_t datalen; | 61 | size_t datalen; |
60 | int result; | 62 | int result; |
61 | enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status; | 63 | enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status; |
62 | struct kvec* tx_ciov; | 64 | struct kvec* tx_ciov; |
63 | size_t tx_totallen; | 65 | size_t tx_totallen; |
64 | size_t tx_iovlen; | 66 | size_t tx_iovlen; |
@@ -67,6 +69,32 @@ struct ncp_request_reply { | |||
67 | u_int32_t sign[6]; | 69 | u_int32_t sign[6]; |
68 | }; | 70 | }; |
69 | 71 | ||
72 | static inline struct ncp_request_reply* ncp_alloc_req(void) | ||
73 | { | ||
74 | struct ncp_request_reply *req; | ||
75 | |||
76 | req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL); | ||
77 | if (!req) | ||
78 | return NULL; | ||
79 | |||
80 | init_waitqueue_head(&req->wq); | ||
81 | atomic_set(&req->refs, (1)); | ||
82 | req->status = RQ_IDLE; | ||
83 | |||
84 | return req; | ||
85 | } | ||
86 | |||
87 | static void ncp_req_get(struct ncp_request_reply *req) | ||
88 | { | ||
89 | atomic_inc(&req->refs); | ||
90 | } | ||
91 | |||
92 | static void ncp_req_put(struct ncp_request_reply *req) | ||
93 | { | ||
94 | if (atomic_dec_and_test(&req->refs)) | ||
95 | kfree(req); | ||
96 | } | ||
97 | |||
70 | void ncp_tcp_data_ready(struct sock *sk, int len) | 98 | void ncp_tcp_data_ready(struct sock *sk, int len) |
71 | { | 99 | { |
72 | struct ncp_server *server = sk->sk_user_data; | 100 | struct ncp_server *server = sk->sk_user_data; |
@@ -101,14 +129,17 @@ void ncpdgram_timeout_call(unsigned long v) | |||
101 | schedule_work(&server->timeout_tq); | 129 | schedule_work(&server->timeout_tq); |
102 | } | 130 | } |
103 | 131 | ||
104 | static inline void ncp_finish_request(struct ncp_request_reply *req, int result) | 132 | static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result) |
105 | { | 133 | { |
106 | req->result = result; | 134 | req->result = result; |
135 | if (req->status != RQ_ABANDONED) | ||
136 | memcpy(req->reply_buf, server->rxbuf, req->datalen); | ||
107 | req->status = RQ_DONE; | 137 | req->status = RQ_DONE; |
108 | wake_up_all(&req->wq); | 138 | wake_up_all(&req->wq); |
139 | ncp_req_put(req); | ||
109 | } | 140 | } |
110 | 141 | ||
111 | static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err) | 142 | static void __abort_ncp_connection(struct ncp_server *server) |
112 | { | 143 | { |
113 | struct ncp_request_reply *req; | 144 | struct ncp_request_reply *req; |
114 | 145 | ||
@@ -118,31 +149,19 @@ static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request | |||
118 | req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); | 149 | req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); |
119 | 150 | ||
120 | list_del_init(&req->req); | 151 | list_del_init(&req->req); |
121 | if (req == aborted) { | 152 | ncp_finish_request(server, req, -EIO); |
122 | ncp_finish_request(req, err); | ||
123 | } else { | ||
124 | ncp_finish_request(req, -EIO); | ||
125 | } | ||
126 | } | 153 | } |
127 | req = server->rcv.creq; | 154 | req = server->rcv.creq; |
128 | if (req) { | 155 | if (req) { |
129 | server->rcv.creq = NULL; | 156 | server->rcv.creq = NULL; |
130 | if (req == aborted) { | 157 | ncp_finish_request(server, req, -EIO); |
131 | ncp_finish_request(req, err); | ||
132 | } else { | ||
133 | ncp_finish_request(req, -EIO); | ||
134 | } | ||
135 | server->rcv.ptr = NULL; | 158 | server->rcv.ptr = NULL; |
136 | server->rcv.state = 0; | 159 | server->rcv.state = 0; |
137 | } | 160 | } |
138 | req = server->tx.creq; | 161 | req = server->tx.creq; |
139 | if (req) { | 162 | if (req) { |
140 | server->tx.creq = NULL; | 163 | server->tx.creq = NULL; |
141 | if (req == aborted) { | 164 | ncp_finish_request(server, req, -EIO); |
142 | ncp_finish_request(req, err); | ||
143 | } else { | ||
144 | ncp_finish_request(req, -EIO); | ||
145 | } | ||
146 | } | 165 | } |
147 | } | 166 | } |
148 | 167 | ||
@@ -160,10 +179,12 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req | |||
160 | break; | 179 | break; |
161 | case RQ_QUEUED: | 180 | case RQ_QUEUED: |
162 | list_del_init(&req->req); | 181 | list_del_init(&req->req); |
163 | ncp_finish_request(req, err); | 182 | ncp_finish_request(server, req, err); |
164 | break; | 183 | break; |
165 | case RQ_INPROGRESS: | 184 | case RQ_INPROGRESS: |
166 | __abort_ncp_connection(server, req, err); | 185 | req->status = RQ_ABANDONED; |
186 | break; | ||
187 | case RQ_ABANDONED: | ||
167 | break; | 188 | break; |
168 | } | 189 | } |
169 | } | 190 | } |
@@ -177,7 +198,7 @@ static inline void ncp_abort_request(struct ncp_server *server, struct ncp_reque | |||
177 | 198 | ||
178 | static inline void __ncptcp_abort(struct ncp_server *server) | 199 | static inline void __ncptcp_abort(struct ncp_server *server) |
179 | { | 200 | { |
180 | __abort_ncp_connection(server, NULL, 0); | 201 | __abort_ncp_connection(server); |
181 | } | 202 | } |
182 | 203 | ||
183 | static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) | 204 | static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) |
@@ -294,6 +315,11 @@ static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_r | |||
294 | 315 | ||
295 | static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) | 316 | static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) |
296 | { | 317 | { |
318 | /* we copy the data so that we do not depend on the caller | ||
319 | staying alive */ | ||
320 | memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len); | ||
321 | req->tx_iov[1].iov_base = server->txbuf; | ||
322 | |||
297 | if (server->ncp_sock->type == SOCK_STREAM) | 323 | if (server->ncp_sock->type == SOCK_STREAM) |
298 | ncptcp_start_request(server, req); | 324 | ncptcp_start_request(server, req); |
299 | else | 325 | else |
@@ -308,6 +334,7 @@ static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply * | |||
308 | printk(KERN_ERR "ncpfs: tcp: Server died\n"); | 334 | printk(KERN_ERR "ncpfs: tcp: Server died\n"); |
309 | return -EIO; | 335 | return -EIO; |
310 | } | 336 | } |
337 | ncp_req_get(req); | ||
311 | if (server->tx.creq || server->rcv.creq) { | 338 | if (server->tx.creq || server->rcv.creq) { |
312 | req->status = RQ_QUEUED; | 339 | req->status = RQ_QUEUED; |
313 | list_add_tail(&req->req, &server->tx.requests); | 340 | list_add_tail(&req->req, &server->tx.requests); |
@@ -409,7 +436,7 @@ void ncpdgram_rcv_proc(struct work_struct *work) | |||
409 | server->timeout_last = NCP_MAX_RPC_TIMEOUT; | 436 | server->timeout_last = NCP_MAX_RPC_TIMEOUT; |
410 | mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT); | 437 | mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT); |
411 | } else if (reply.type == NCP_REPLY) { | 438 | } else if (reply.type == NCP_REPLY) { |
412 | result = _recv(sock, (void*)req->reply_buf, req->datalen, MSG_DONTWAIT); | 439 | result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT); |
413 | #ifdef CONFIG_NCPFS_PACKET_SIGNING | 440 | #ifdef CONFIG_NCPFS_PACKET_SIGNING |
414 | if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { | 441 | if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { |
415 | if (result < 8 + 8) { | 442 | if (result < 8 + 8) { |
@@ -419,7 +446,7 @@ void ncpdgram_rcv_proc(struct work_struct *work) | |||
419 | 446 | ||
420 | result -= 8; | 447 | result -= 8; |
421 | hdrl = sock->sk->sk_family == AF_INET ? 8 : 6; | 448 | hdrl = sock->sk->sk_family == AF_INET ? 8 : 6; |
422 | if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) { | 449 | if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) { |
423 | printk(KERN_INFO "ncpfs: Signature violation\n"); | 450 | printk(KERN_INFO "ncpfs: Signature violation\n"); |
424 | result = -EIO; | 451 | result = -EIO; |
425 | } | 452 | } |
@@ -428,7 +455,7 @@ void ncpdgram_rcv_proc(struct work_struct *work) | |||
428 | #endif | 455 | #endif |
429 | del_timer(&server->timeout_tm); | 456 | del_timer(&server->timeout_tm); |
430 | server->rcv.creq = NULL; | 457 | server->rcv.creq = NULL; |
431 | ncp_finish_request(req, result); | 458 | ncp_finish_request(server, req, result); |
432 | __ncp_next_request(server); | 459 | __ncp_next_request(server); |
433 | mutex_unlock(&server->rcv.creq_mutex); | 460 | mutex_unlock(&server->rcv.creq_mutex); |
434 | continue; | 461 | continue; |
@@ -478,12 +505,6 @@ void ncpdgram_timeout_proc(struct work_struct *work) | |||
478 | mutex_unlock(&server->rcv.creq_mutex); | 505 | mutex_unlock(&server->rcv.creq_mutex); |
479 | } | 506 | } |
480 | 507 | ||
481 | static inline void ncp_init_req(struct ncp_request_reply* req) | ||
482 | { | ||
483 | init_waitqueue_head(&req->wq); | ||
484 | req->status = RQ_IDLE; | ||
485 | } | ||
486 | |||
487 | static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) | 508 | static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) |
488 | { | 509 | { |
489 | int result; | 510 | int result; |
@@ -601,8 +622,8 @@ skipdata:; | |||
601 | goto skipdata; | 622 | goto skipdata; |
602 | } | 623 | } |
603 | req->datalen = datalen - 8; | 624 | req->datalen = datalen - 8; |
604 | req->reply_buf->type = NCP_REPLY; | 625 | ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY; |
605 | server->rcv.ptr = (unsigned char*)(req->reply_buf) + 2; | 626 | server->rcv.ptr = server->rxbuf + 2; |
606 | server->rcv.len = datalen - 10; | 627 | server->rcv.len = datalen - 10; |
607 | server->rcv.state = 1; | 628 | server->rcv.state = 1; |
608 | break; | 629 | break; |
@@ -615,12 +636,12 @@ skipdata:; | |||
615 | case 1: | 636 | case 1: |
616 | req = server->rcv.creq; | 637 | req = server->rcv.creq; |
617 | if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) { | 638 | if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) { |
618 | if (req->reply_buf->sequence != server->sequence) { | 639 | if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) { |
619 | printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n"); | 640 | printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n"); |
620 | __ncp_abort_request(server, req, -EIO); | 641 | __ncp_abort_request(server, req, -EIO); |
621 | return -EIO; | 642 | return -EIO; |
622 | } | 643 | } |
623 | if ((req->reply_buf->conn_low | (req->reply_buf->conn_high << 8)) != server->connection) { | 644 | if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) { |
624 | printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n"); | 645 | printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n"); |
625 | __ncp_abort_request(server, req, -EIO); | 646 | __ncp_abort_request(server, req, -EIO); |
626 | return -EIO; | 647 | return -EIO; |
@@ -628,14 +649,14 @@ skipdata:; | |||
628 | } | 649 | } |
629 | #ifdef CONFIG_NCPFS_PACKET_SIGNING | 650 | #ifdef CONFIG_NCPFS_PACKET_SIGNING |
630 | if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { | 651 | if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { |
631 | if (sign_verify_reply(server, (unsigned char*)(req->reply_buf) + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) { | 652 | if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) { |
632 | printk(KERN_ERR "ncpfs: tcp: Signature violation\n"); | 653 | printk(KERN_ERR "ncpfs: tcp: Signature violation\n"); |
633 | __ncp_abort_request(server, req, -EIO); | 654 | __ncp_abort_request(server, req, -EIO); |
634 | return -EIO; | 655 | return -EIO; |
635 | } | 656 | } |
636 | } | 657 | } |
637 | #endif | 658 | #endif |
638 | ncp_finish_request(req, req->datalen); | 659 | ncp_finish_request(server, req, req->datalen); |
639 | nextreq:; | 660 | nextreq:; |
640 | __ncp_next_request(server); | 661 | __ncp_next_request(server); |
641 | case 2: | 662 | case 2: |
@@ -645,7 +666,7 @@ skipdata:; | |||
645 | server->rcv.state = 0; | 666 | server->rcv.state = 0; |
646 | break; | 667 | break; |
647 | case 3: | 668 | case 3: |
648 | ncp_finish_request(server->rcv.creq, -EIO); | 669 | ncp_finish_request(server, server->rcv.creq, -EIO); |
649 | goto nextreq; | 670 | goto nextreq; |
650 | case 5: | 671 | case 5: |
651 | info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len); | 672 | info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len); |
@@ -675,28 +696,39 @@ void ncp_tcp_tx_proc(struct work_struct *work) | |||
675 | } | 696 | } |
676 | 697 | ||
677 | static int do_ncp_rpc_call(struct ncp_server *server, int size, | 698 | static int do_ncp_rpc_call(struct ncp_server *server, int size, |
678 | struct ncp_reply_header* reply_buf, int max_reply_size) | 699 | unsigned char* reply_buf, int max_reply_size) |
679 | { | 700 | { |
680 | int result; | 701 | int result; |
681 | struct ncp_request_reply req; | 702 | struct ncp_request_reply *req; |
682 | 703 | ||
683 | ncp_init_req(&req); | 704 | req = ncp_alloc_req(); |
684 | req.reply_buf = reply_buf; | 705 | if (!req) |
685 | req.datalen = max_reply_size; | 706 | return -ENOMEM; |
686 | req.tx_iov[1].iov_base = server->packet; | 707 | |
687 | req.tx_iov[1].iov_len = size; | 708 | req->reply_buf = reply_buf; |
688 | req.tx_iovlen = 1; | 709 | req->datalen = max_reply_size; |
689 | req.tx_totallen = size; | 710 | req->tx_iov[1].iov_base = server->packet; |
690 | req.tx_type = *(u_int16_t*)server->packet; | 711 | req->tx_iov[1].iov_len = size; |
691 | 712 | req->tx_iovlen = 1; | |
692 | result = ncp_add_request(server, &req); | 713 | req->tx_totallen = size; |
693 | if (result < 0) { | 714 | req->tx_type = *(u_int16_t*)server->packet; |
694 | return result; | 715 | |
695 | } | 716 | result = ncp_add_request(server, req); |
696 | if (wait_event_interruptible(req.wq, req.status == RQ_DONE)) { | 717 | if (result < 0) |
697 | ncp_abort_request(server, &req, -EIO); | 718 | goto out; |
719 | |||
720 | if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) { | ||
721 | ncp_abort_request(server, req, -EINTR); | ||
722 | result = -EINTR; | ||
723 | goto out; | ||
698 | } | 724 | } |
699 | return req.result; | 725 | |
726 | result = req->result; | ||
727 | |||
728 | out: | ||
729 | ncp_req_put(req); | ||
730 | |||
731 | return result; | ||
700 | } | 732 | } |
701 | 733 | ||
702 | /* | 734 | /* |
@@ -751,11 +783,6 @@ static int ncp_do_request(struct ncp_server *server, int size, | |||
751 | 783 | ||
752 | DDPRINTK("do_ncp_rpc_call returned %d\n", result); | 784 | DDPRINTK("do_ncp_rpc_call returned %d\n", result); |
753 | 785 | ||
754 | if (result < 0) { | ||
755 | /* There was a problem with I/O, so the connections is | ||
756 | * no longer usable. */ | ||
757 | ncp_invalidate_conn(server); | ||
758 | } | ||
759 | return result; | 786 | return result; |
760 | } | 787 | } |
761 | 788 | ||
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 8813990304fe..85a668680f82 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -431,6 +431,8 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent) | |||
431 | new_parent_dentry = new_parent ? | 431 | new_parent_dentry = new_parent ? |
432 | new_parent->dentry : sysfs_mount->mnt_sb->s_root; | 432 | new_parent->dentry : sysfs_mount->mnt_sb->s_root; |
433 | 433 | ||
434 | if (old_parent_dentry->d_inode == new_parent_dentry->d_inode) | ||
435 | return 0; /* nothing to move */ | ||
434 | again: | 436 | again: |
435 | mutex_lock(&old_parent_dentry->d_inode->i_mutex); | 437 | mutex_lock(&old_parent_dentry->d_inode->i_mutex); |
436 | if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) { | 438 | if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) { |
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index dd1344b007f5..ccb7d722c558 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c | |||
@@ -227,11 +227,8 @@ static inline void orphan_all_buffers(struct inode *node) | |||
227 | 227 | ||
228 | mutex_lock_nested(&node->i_mutex, I_MUTEX_CHILD); | 228 | mutex_lock_nested(&node->i_mutex, I_MUTEX_CHILD); |
229 | if (node->i_private) { | 229 | if (node->i_private) { |
230 | list_for_each_entry(buf, &set->associates, associates) { | 230 | list_for_each_entry(buf, &set->associates, associates) |
231 | down(&buf->sem); | ||
232 | buf->orphaned = 1; | 231 | buf->orphaned = 1; |
233 | up(&buf->sem); | ||
234 | } | ||
235 | } | 232 | } |
236 | mutex_unlock(&node->i_mutex); | 233 | mutex_unlock(&node->i_mutex); |
237 | } | 234 | } |
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h index b55052ce2330..a96b5d986b6e 100644 --- a/include/asm-generic/page.h +++ b/include/asm-generic/page.h | |||
@@ -4,51 +4,21 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | #ifndef __ASSEMBLY__ | 5 | #ifndef __ASSEMBLY__ |
6 | 6 | ||
7 | #include <linux/log2.h> | 7 | #include <linux/compiler.h> |
8 | 8 | ||
9 | /* | 9 | /* Pure 2^n version of get_order */ |
10 | * non-const pure 2^n version of get_order | 10 | static __inline__ __attribute_const__ int get_order(unsigned long size) |
11 | * - the arch may override these in asm/bitops.h if they can be implemented | ||
12 | * more efficiently than using the arch log2 routines | ||
13 | * - we use the non-const log2() instead if the arch has defined one suitable | ||
14 | */ | ||
15 | #ifndef ARCH_HAS_GET_ORDER | ||
16 | static inline __attribute__((const)) | ||
17 | int __get_order(unsigned long size, int page_shift) | ||
18 | { | 11 | { |
19 | #if BITS_PER_LONG == 32 && defined(ARCH_HAS_ILOG2_U32) | ||
20 | int order = __ilog2_u32(size) - page_shift; | ||
21 | return order >= 0 ? order : 0; | ||
22 | #elif BITS_PER_LONG == 64 && defined(ARCH_HAS_ILOG2_U64) | ||
23 | int order = __ilog2_u64(size) - page_shift; | ||
24 | return order >= 0 ? order : 0; | ||
25 | #else | ||
26 | int order; | 12 | int order; |
27 | 13 | ||
28 | size = (size - 1) >> (page_shift - 1); | 14 | size = (size - 1) >> (PAGE_SHIFT - 1); |
29 | order = -1; | 15 | order = -1; |
30 | do { | 16 | do { |
31 | size >>= 1; | 17 | size >>= 1; |
32 | order++; | 18 | order++; |
33 | } while (size); | 19 | } while (size); |
34 | return order; | 20 | return order; |
35 | #endif | ||
36 | } | 21 | } |
37 | #endif | ||
38 | |||
39 | /** | ||
40 | * get_order - calculate log2(pages) to hold a block of the specified size | ||
41 | * @n - size | ||
42 | * | ||
43 | * calculate allocation order based on the current page size | ||
44 | * - this can be used to initialise global variables from constant data | ||
45 | */ | ||
46 | #define get_order(n) \ | ||
47 | ( \ | ||
48 | __builtin_constant_p(n) ? \ | ||
49 | ((n < (1UL << PAGE_SHIFT)) ? 0 : ilog2(n) - PAGE_SHIFT) : \ | ||
50 | __get_order(n, PAGE_SHIFT) \ | ||
51 | ) | ||
52 | 22 | ||
53 | #endif /* __ASSEMBLY__ */ | 23 | #endif /* __ASSEMBLY__ */ |
54 | #endif /* __KERNEL__ */ | 24 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h index 32d6678d0bbf..9ae5e3782ed8 100644 --- a/include/asm-i386/delay.h +++ b/include/asm-i386/delay.h | |||
@@ -16,13 +16,6 @@ extern void __ndelay(unsigned long nsecs); | |||
16 | extern void __const_udelay(unsigned long usecs); | 16 | extern void __const_udelay(unsigned long usecs); |
17 | extern void __delay(unsigned long loops); | 17 | extern void __delay(unsigned long loops); |
18 | 18 | ||
19 | #if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY) | ||
20 | #define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul) | ||
21 | |||
22 | #define ndelay(n) paravirt_ops.const_udelay((n) * 5ul) | ||
23 | |||
24 | #else /* !PARAVIRT || USE_REAL_TIME_DELAY */ | ||
25 | |||
26 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | 19 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ |
27 | #define udelay(n) (__builtin_constant_p(n) ? \ | 20 | #define udelay(n) (__builtin_constant_p(n) ? \ |
28 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ | 21 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ |
@@ -32,7 +25,6 @@ extern void __delay(unsigned long loops); | |||
32 | #define ndelay(n) (__builtin_constant_p(n) ? \ | 25 | #define ndelay(n) (__builtin_constant_p(n) ? \ |
33 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
34 | __ndelay(n)) | 27 | __ndelay(n)) |
35 | #endif | ||
36 | 28 | ||
37 | void use_tsc_delay(void); | 29 | void use_tsc_delay(void); |
38 | 30 | ||
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h index 059a9ff28b4d..340764076d5f 100644 --- a/include/asm-i386/io_apic.h +++ b/include/asm-i386/io_apic.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/mpspec.h> | 5 | #include <asm/mpspec.h> |
6 | #include <asm/apicdef.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Intel IO-APIC support for SMP and UP systems. | 9 | * Intel IO-APIC support for SMP and UP systems. |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index b04333ea6f31..64544cb85d6a 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -33,7 +33,7 @@ extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | |||
33 | 33 | ||
34 | extern atomic_t nmi_active; | 34 | extern atomic_t nmi_active; |
35 | extern unsigned int nmi_watchdog; | 35 | extern unsigned int nmi_watchdog; |
36 | #define NMI_DEFAULT -1 | 36 | #define NMI_DEFAULT 0 |
37 | #define NMI_NONE 0 | 37 | #define NMI_NONE 0 |
38 | #define NMI_IO_APIC 1 | 38 | #define NMI_IO_APIC 1 |
39 | #define NMI_LOCAL_APIC 2 | 39 | #define NMI_LOCAL_APIC 2 |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 6317e0a4d735..f8319cae2ac5 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -94,6 +94,8 @@ struct paravirt_ops | |||
94 | 94 | ||
95 | u64 (*read_tsc)(void); | 95 | u64 (*read_tsc)(void); |
96 | u64 (*read_pmc)(void); | 96 | u64 (*read_pmc)(void); |
97 | u64 (*get_scheduled_cycles)(void); | ||
98 | unsigned long (*get_cpu_khz)(void); | ||
97 | 99 | ||
98 | void (*load_tr_desc)(void); | 100 | void (*load_tr_desc)(void); |
99 | void (*load_gdt)(const struct Xgt_desc_struct *); | 101 | void (*load_gdt)(const struct Xgt_desc_struct *); |
@@ -115,7 +117,6 @@ struct paravirt_ops | |||
115 | void (*set_iopl_mask)(unsigned mask); | 117 | void (*set_iopl_mask)(unsigned mask); |
116 | 118 | ||
117 | void (*io_delay)(void); | 119 | void (*io_delay)(void); |
118 | void (*const_udelay)(unsigned long loops); | ||
119 | 120 | ||
120 | #ifdef CONFIG_X86_LOCAL_APIC | 121 | #ifdef CONFIG_X86_LOCAL_APIC |
121 | void (*apic_write)(unsigned long reg, unsigned long v); | 122 | void (*apic_write)(unsigned long reg, unsigned long v); |
@@ -129,6 +130,8 @@ struct paravirt_ops | |||
129 | void (*flush_tlb_kernel)(void); | 130 | void (*flush_tlb_kernel)(void); |
130 | void (*flush_tlb_single)(u32 addr); | 131 | void (*flush_tlb_single)(u32 addr); |
131 | 132 | ||
133 | void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn); | ||
134 | |||
132 | void (*alloc_pt)(u32 pfn); | 135 | void (*alloc_pt)(u32 pfn); |
133 | void (*alloc_pd)(u32 pfn); | 136 | void (*alloc_pd)(u32 pfn); |
134 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 137 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); |
@@ -183,9 +186,9 @@ static inline int set_wallclock(unsigned long nowtime) | |||
183 | return paravirt_ops.set_wallclock(nowtime); | 186 | return paravirt_ops.set_wallclock(nowtime); |
184 | } | 187 | } |
185 | 188 | ||
186 | static inline void do_time_init(void) | 189 | static inline void (*choose_time_init(void))(void) |
187 | { | 190 | { |
188 | return paravirt_ops.time_init(); | 191 | return paravirt_ops.time_init; |
189 | } | 192 | } |
190 | 193 | ||
191 | /* The paravirtualized CPUID instruction. */ | 194 | /* The paravirtualized CPUID instruction. */ |
@@ -273,6 +276,9 @@ static inline void halt(void) | |||
273 | 276 | ||
274 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) | 277 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) |
275 | 278 | ||
279 | #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) | ||
280 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) | ||
281 | |||
276 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 282 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
277 | 283 | ||
278 | #define rdpmc(counter,low,high) do { \ | 284 | #define rdpmc(counter,low,high) do { \ |
@@ -349,6 +355,8 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
349 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() | 355 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() |
350 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) | 356 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) |
351 | 357 | ||
358 | #define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn) | ||
359 | |||
352 | #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) | 360 | #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) |
353 | #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) | 361 | #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) |
354 | 362 | ||
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index e6a4723f0eb1..c3b58d473a55 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -263,6 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
263 | */ | 263 | */ |
264 | #define pte_update(mm, addr, ptep) do { } while (0) | 264 | #define pte_update(mm, addr, ptep) do { } while (0) |
265 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | 265 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
266 | #define paravirt_map_pt_hook(slot, va, pfn) do { } while (0) | ||
266 | #endif | 267 | #endif |
267 | 268 | ||
268 | /* | 269 | /* |
@@ -469,10 +470,24 @@ extern pte_t *lookup_address(unsigned long address); | |||
469 | #endif | 470 | #endif |
470 | 471 | ||
471 | #if defined(CONFIG_HIGHPTE) | 472 | #if defined(CONFIG_HIGHPTE) |
472 | #define pte_offset_map(dir, address) \ | 473 | #define pte_offset_map(dir, address) \ |
473 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 474 | ({ \ |
474 | #define pte_offset_map_nested(dir, address) \ | 475 | pte_t *__ptep; \ |
475 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) | 476 | unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ |
477 | __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\ | ||
478 | paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \ | ||
479 | __ptep = __ptep + pte_index(address); \ | ||
480 | __ptep; \ | ||
481 | }) | ||
482 | #define pte_offset_map_nested(dir, address) \ | ||
483 | ({ \ | ||
484 | pte_t *__ptep; \ | ||
485 | unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ | ||
486 | __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\ | ||
487 | paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \ | ||
488 | __ptep = __ptep + pte_index(address); \ | ||
489 | __ptep; \ | ||
490 | }) | ||
476 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | 491 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
477 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | 492 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) |
478 | #else | 493 | #else |
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h index 571b4294dc2e..eac011366dc2 100644 --- a/include/asm-i386/time.h +++ b/include/asm-i386/time.h | |||
@@ -28,14 +28,16 @@ static inline int native_set_wallclock(unsigned long nowtime) | |||
28 | return retval; | 28 | return retval; |
29 | } | 29 | } |
30 | 30 | ||
31 | extern void (*late_time_init)(void); | ||
32 | extern void hpet_time_init(void); | ||
33 | |||
31 | #ifdef CONFIG_PARAVIRT | 34 | #ifdef CONFIG_PARAVIRT |
32 | #include <asm/paravirt.h> | 35 | #include <asm/paravirt.h> |
33 | extern unsigned long long native_sched_clock(void); | ||
34 | #else /* !CONFIG_PARAVIRT */ | 36 | #else /* !CONFIG_PARAVIRT */ |
35 | 37 | ||
36 | #define get_wallclock() native_get_wallclock() | 38 | #define get_wallclock() native_get_wallclock() |
37 | #define set_wallclock(x) native_set_wallclock(x) | 39 | #define set_wallclock(x) native_set_wallclock(x) |
38 | #define do_time_init() time_init_hook() | 40 | #define choose_time_init() hpet_time_init |
39 | 41 | ||
40 | #endif /* CONFIG_PARAVIRT */ | 42 | #endif /* CONFIG_PARAVIRT */ |
41 | 43 | ||
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h index 4752c3a6a708..12dd67bf760f 100644 --- a/include/asm-i386/timer.h +++ b/include/asm-i386/timer.h | |||
@@ -4,13 +4,21 @@ | |||
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | 5 | ||
6 | #define TICK_SIZE (tick_nsec / 1000) | 6 | #define TICK_SIZE (tick_nsec / 1000) |
7 | |||
7 | void setup_pit_timer(void); | 8 | void setup_pit_timer(void); |
9 | unsigned long long native_sched_clock(void); | ||
10 | unsigned long native_calculate_cpu_khz(void); | ||
11 | |||
8 | /* Modifiers for buggy PIT handling */ | 12 | /* Modifiers for buggy PIT handling */ |
9 | extern int pit_latch_buggy; | 13 | extern int pit_latch_buggy; |
10 | extern int timer_ack; | 14 | extern int timer_ack; |
11 | extern int no_timer_check; | 15 | extern int no_timer_check; |
12 | extern unsigned long long (*custom_sched_clock)(void); | ||
13 | extern int no_sync_cmos_clock; | 16 | extern int no_sync_cmos_clock; |
14 | extern int recalibrate_cpu_khz(void); | 17 | extern int recalibrate_cpu_khz(void); |
15 | 18 | ||
19 | #ifndef CONFIG_PARAVIRT | ||
20 | #define get_scheduled_cycles(val) rdtscll(val) | ||
21 | #define calculate_cpu_khz() native_calculate_cpu_khz() | ||
22 | #endif | ||
23 | |||
16 | #endif | 24 | #endif |
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index ac58580ad664..7fc512d90ea8 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
@@ -85,7 +85,6 @@ static inline int node_to_first_cpu(int node) | |||
85 | .idle_idx = 1, \ | 85 | .idle_idx = 1, \ |
86 | .newidle_idx = 2, \ | 86 | .newidle_idx = 2, \ |
87 | .wake_idx = 1, \ | 87 | .wake_idx = 1, \ |
88 | .per_cpu_gain = 100, \ | ||
89 | .flags = SD_LOAD_BALANCE \ | 88 | .flags = SD_LOAD_BALANCE \ |
90 | | SD_BALANCE_EXEC \ | 89 | | SD_BALANCE_EXEC \ |
91 | | SD_BALANCE_FORK \ | 90 | | SD_BALANCE_FORK \ |
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h index e997891cc7cc..84016ff481b9 100644 --- a/include/asm-i386/tsc.h +++ b/include/asm-i386/tsc.h | |||
@@ -1 +1,67 @@ | |||
1 | #include <asm-x86_64/tsc.h> | 1 | /* |
2 | * linux/include/asm-i386/tsc.h | ||
3 | * | ||
4 | * i386 TSC related functions | ||
5 | */ | ||
6 | #ifndef _ASM_i386_TSC_H | ||
7 | #define _ASM_i386_TSC_H | ||
8 | |||
9 | #include <asm/processor.h> | ||
10 | |||
11 | /* | ||
12 | * Standard way to access the cycle counter. | ||
13 | */ | ||
14 | typedef unsigned long long cycles_t; | ||
15 | |||
16 | extern unsigned int cpu_khz; | ||
17 | extern unsigned int tsc_khz; | ||
18 | |||
19 | static inline cycles_t get_cycles(void) | ||
20 | { | ||
21 | unsigned long long ret = 0; | ||
22 | |||
23 | #ifndef CONFIG_X86_TSC | ||
24 | if (!cpu_has_tsc) | ||
25 | return 0; | ||
26 | #endif | ||
27 | |||
28 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
29 | rdtscll(ret); | ||
30 | #endif | ||
31 | return ret; | ||
32 | } | ||
33 | |||
34 | /* Like get_cycles, but make sure the CPU is synchronized. */ | ||
35 | static __always_inline cycles_t get_cycles_sync(void) | ||
36 | { | ||
37 | unsigned long long ret; | ||
38 | #ifdef X86_FEATURE_SYNC_RDTSC | ||
39 | unsigned eax; | ||
40 | |||
41 | /* | ||
42 | * Don't do an additional sync on CPUs where we know | ||
43 | * RDTSC is already synchronous: | ||
44 | */ | ||
45 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | ||
46 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | ||
47 | #else | ||
48 | sync_core(); | ||
49 | #endif | ||
50 | rdtscll(ret); | ||
51 | |||
52 | return ret; | ||
53 | } | ||
54 | |||
55 | extern void tsc_init(void); | ||
56 | extern void mark_tsc_unstable(void); | ||
57 | extern int unsynchronized_tsc(void); | ||
58 | extern void init_tsc_clocksource(void); | ||
59 | |||
60 | /* | ||
61 | * Boot-time check whether the TSCs are synchronized across | ||
62 | * all CPUs/cores: | ||
63 | */ | ||
64 | extern void check_tsc_sync_source(int cpu); | ||
65 | extern void check_tsc_sync_target(void); | ||
66 | |||
67 | #endif | ||
diff --git a/include/asm-i386/vmi.h b/include/asm-i386/vmi.h index 43c89333037e..eb8bd892c01e 100644 --- a/include/asm-i386/vmi.h +++ b/include/asm-i386/vmi.h | |||
@@ -97,6 +97,7 @@ | |||
97 | #define VMI_CALL_SetInitialAPState 62 | 97 | #define VMI_CALL_SetInitialAPState 62 |
98 | #define VMI_CALL_APICWrite 63 | 98 | #define VMI_CALL_APICWrite 63 |
99 | #define VMI_CALL_APICRead 64 | 99 | #define VMI_CALL_APICRead 64 |
100 | #define VMI_CALL_IODelay 65 | ||
100 | #define VMI_CALL_SetLazyMode 73 | 101 | #define VMI_CALL_SetLazyMode 73 |
101 | 102 | ||
102 | /* | 103 | /* |
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h index c12931211007..94d0a12a4114 100644 --- a/include/asm-i386/vmi_time.h +++ b/include/asm-i386/vmi_time.h | |||
@@ -49,7 +49,8 @@ extern struct vmi_timer_ops { | |||
49 | extern void __init vmi_time_init(void); | 49 | extern void __init vmi_time_init(void); |
50 | extern unsigned long vmi_get_wallclock(void); | 50 | extern unsigned long vmi_get_wallclock(void); |
51 | extern int vmi_set_wallclock(unsigned long now); | 51 | extern int vmi_set_wallclock(unsigned long now); |
52 | extern unsigned long long vmi_sched_clock(void); | 52 | extern unsigned long long vmi_get_sched_cycles(void); |
53 | extern unsigned long vmi_cpu_khz(void); | ||
53 | 54 | ||
54 | #ifdef CONFIG_X86_LOCAL_APIC | 55 | #ifdef CONFIG_X86_LOCAL_APIC |
55 | extern void __init vmi_timer_setup_boot_alarm(void); | 56 | extern void __init vmi_timer_setup_boot_alarm(void); |
@@ -60,6 +61,14 @@ extern void apic_vmi_timer_interrupt(void); | |||
60 | #ifdef CONFIG_NO_IDLE_HZ | 61 | #ifdef CONFIG_NO_IDLE_HZ |
61 | extern int vmi_stop_hz_timer(void); | 62 | extern int vmi_stop_hz_timer(void); |
62 | extern void vmi_account_time_restart_hz_timer(void); | 63 | extern void vmi_account_time_restart_hz_timer(void); |
64 | #else | ||
65 | static inline int vmi_stop_hz_timer(void) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | static inline void vmi_account_time_restart_hz_timer(void) | ||
70 | { | ||
71 | } | ||
63 | #endif | 72 | #endif |
64 | 73 | ||
65 | /* | 74 | /* |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 6dd476b652c6..21ec5f3d23de 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
@@ -17,10 +17,11 @@ | |||
17 | * - kernel code & data | 17 | * - kernel code & data |
18 | * - crash dumping code reserved region | 18 | * - crash dumping code reserved region |
19 | * - Kernel memory map built from EFI memory map | 19 | * - Kernel memory map built from EFI memory map |
20 | * - ELF core header | ||
20 | * | 21 | * |
21 | * More could be added if necessary | 22 | * More could be added if necessary |
22 | */ | 23 | */ |
23 | #define IA64_MAX_RSVD_REGIONS 7 | 24 | #define IA64_MAX_RSVD_REGIONS 8 |
24 | 25 | ||
25 | struct rsvd_region { | 26 | struct rsvd_region { |
26 | unsigned long start; /* virtual address of beginning of element */ | 27 | unsigned long start; /* virtual address of beginning of element */ |
@@ -36,6 +37,9 @@ extern void find_initrd (void); | |||
36 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); | 37 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); |
37 | extern void efi_memmap_init(unsigned long *, unsigned long *); | 38 | extern void efi_memmap_init(unsigned long *, unsigned long *); |
38 | 39 | ||
40 | extern unsigned long vmcore_find_descriptor_size(unsigned long address); | ||
41 | extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); | ||
42 | |||
39 | /* | 43 | /* |
40 | * For rounding an address to the next IA64_GRANULE_SIZE or order | 44 | * For rounding an address to the next IA64_GRANULE_SIZE or order |
41 | */ | 45 | */ |
diff --git a/include/asm-ia64/resource.h b/include/asm-ia64/resource.h index 77b1eee01f30..ba2272a87fc7 100644 --- a/include/asm-ia64/resource.h +++ b/include/asm-ia64/resource.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_IA64_RESOURCE_H | 2 | #define _ASM_IA64_RESOURCE_H |
3 | 3 | ||
4 | #include <asm/ustack.h> | 4 | #include <asm/ustack.h> |
5 | #define _STK_LIM_MAX DEFAULT_USER_STACK_SIZE | ||
6 | #include <asm-generic/resource.h> | 5 | #include <asm-generic/resource.h> |
7 | 6 | ||
8 | #endif /* _ASM_IA64_RESOURCE_H */ | 7 | #endif /* _ASM_IA64_RESOURCE_H */ |
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h deleted file mode 100644 index 452c162dee4e..000000000000 --- a/include/asm-ia64/swiotlb.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _ASM_SWIOTLB_H | ||
2 | #define _ASM_SWIOTLB_H 1 | ||
3 | |||
4 | #include <asm/machvec.h> | ||
5 | |||
6 | #define SWIOTLB_ARCH_NEED_LATE_INIT | ||
7 | #define SWIOTLB_ARCH_NEED_ALLOC | ||
8 | |||
9 | #endif /* _ASM_SWIOTLB_H */ | ||
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 22ed6749557e..233f1caae048 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h | |||
@@ -65,7 +65,6 @@ void build_cpu_to_node_map(void); | |||
65 | .max_interval = 4, \ | 65 | .max_interval = 4, \ |
66 | .busy_factor = 64, \ | 66 | .busy_factor = 64, \ |
67 | .imbalance_pct = 125, \ | 67 | .imbalance_pct = 125, \ |
68 | .per_cpu_gain = 100, \ | ||
69 | .cache_nice_tries = 2, \ | 68 | .cache_nice_tries = 2, \ |
70 | .busy_idx = 2, \ | 69 | .busy_idx = 2, \ |
71 | .idle_idx = 1, \ | 70 | .idle_idx = 1, \ |
@@ -97,7 +96,6 @@ void build_cpu_to_node_map(void); | |||
97 | .newidle_idx = 0, /* unused */ \ | 96 | .newidle_idx = 0, /* unused */ \ |
98 | .wake_idx = 1, \ | 97 | .wake_idx = 1, \ |
99 | .forkexec_idx = 1, \ | 98 | .forkexec_idx = 1, \ |
100 | .per_cpu_gain = 100, \ | ||
101 | .flags = SD_LOAD_BALANCE \ | 99 | .flags = SD_LOAD_BALANCE \ |
102 | | SD_BALANCE_EXEC \ | 100 | | SD_BALANCE_EXEC \ |
103 | | SD_BALANCE_FORK \ | 101 | | SD_BALANCE_FORK \ |
diff --git a/include/asm-m68knommu/m528xsim.h b/include/asm-m68knommu/m528xsim.h index 1a3b1ae06b1e..28bf783a5d6d 100644 --- a/include/asm-m68knommu/m528xsim.h +++ b/include/asm-m68knommu/m528xsim.h | |||
@@ -47,6 +47,9 @@ | |||
47 | /* set Port AS pin for I2C or UART */ | 47 | /* set Port AS pin for I2C or UART */ |
48 | #define MCF5282_GPIO_PASPAR (volatile u16 *) (MCF_IPSBAR + 0x00100056) | 48 | #define MCF5282_GPIO_PASPAR (volatile u16 *) (MCF_IPSBAR + 0x00100056) |
49 | 49 | ||
50 | /* Port UA Pin Assignment Register (8 Bit) */ | ||
51 | #define MCF5282_GPIO_PUAPAR 0x10005C | ||
52 | |||
50 | /* Interrupt Mask Register Register Low */ | 53 | /* Interrupt Mask Register Register Low */ |
51 | #define MCF5282_INTC0_IMRL (volatile u32 *) (MCF_IPSBAR + 0x0C0C) | 54 | #define MCF5282_INTC0_IMRL (volatile u32 *) (MCF_IPSBAR + 0x0C0C) |
52 | /* Interrupt Control Register 7 */ | 55 | /* Interrupt Control Register 7 */ |
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index 89436b96ad66..8959da245cfb 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h | |||
@@ -54,6 +54,7 @@ | |||
54 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | 54 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
55 | { | 55 | { |
56 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 56 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
57 | unsigned short bit = nr & SZLONG_MASK; | ||
57 | unsigned long temp; | 58 | unsigned long temp; |
58 | 59 | ||
59 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 60 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
@@ -65,9 +66,9 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
65 | " beqzl %0, 1b \n" | 66 | " beqzl %0, 1b \n" |
66 | " .set mips0 \n" | 67 | " .set mips0 \n" |
67 | : "=&r" (temp), "=m" (*m) | 68 | : "=&r" (temp), "=m" (*m) |
68 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 69 | : "ir" (1UL << bit), "m" (*m)); |
69 | #ifdef CONFIG_CPU_MIPSR2 | 70 | #ifdef CONFIG_CPU_MIPSR2 |
70 | } else if (__builtin_constant_p(nr)) { | 71 | } else if (__builtin_constant_p(bit)) { |
71 | __asm__ __volatile__( | 72 | __asm__ __volatile__( |
72 | "1: " __LL "%0, %1 # set_bit \n" | 73 | "1: " __LL "%0, %1 # set_bit \n" |
73 | " " __INS "%0, %4, %2, 1 \n" | 74 | " " __INS "%0, %4, %2, 1 \n" |
@@ -77,7 +78,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
77 | "2: b 1b \n" | 78 | "2: b 1b \n" |
78 | " .previous \n" | 79 | " .previous \n" |
79 | : "=&r" (temp), "=m" (*m) | 80 | : "=&r" (temp), "=m" (*m) |
80 | : "ir" (nr & SZLONG_MASK), "m" (*m), "r" (~0)); | 81 | : "ir" (bit), "m" (*m), "r" (~0)); |
81 | #endif /* CONFIG_CPU_MIPSR2 */ | 82 | #endif /* CONFIG_CPU_MIPSR2 */ |
82 | } else if (cpu_has_llsc) { | 83 | } else if (cpu_has_llsc) { |
83 | __asm__ __volatile__( | 84 | __asm__ __volatile__( |
@@ -91,14 +92,14 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
91 | " .previous \n" | 92 | " .previous \n" |
92 | " .set mips0 \n" | 93 | " .set mips0 \n" |
93 | : "=&r" (temp), "=m" (*m) | 94 | : "=&r" (temp), "=m" (*m) |
94 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 95 | : "ir" (1UL << bit), "m" (*m)); |
95 | } else { | 96 | } else { |
96 | volatile unsigned long *a = addr; | 97 | volatile unsigned long *a = addr; |
97 | unsigned long mask; | 98 | unsigned long mask; |
98 | unsigned long flags; | 99 | unsigned long flags; |
99 | 100 | ||
100 | a += nr >> SZLONG_LOG; | 101 | a += nr >> SZLONG_LOG; |
101 | mask = 1UL << (nr & SZLONG_MASK); | 102 | mask = 1UL << bit; |
102 | local_irq_save(flags); | 103 | local_irq_save(flags); |
103 | *a |= mask; | 104 | *a |= mask; |
104 | local_irq_restore(flags); | 105 | local_irq_restore(flags); |
@@ -118,6 +119,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
118 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | 119 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
119 | { | 120 | { |
120 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 121 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
122 | unsigned short bit = nr & SZLONG_MASK; | ||
121 | unsigned long temp; | 123 | unsigned long temp; |
122 | 124 | ||
123 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 125 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
@@ -129,9 +131,9 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
129 | " beqzl %0, 1b \n" | 131 | " beqzl %0, 1b \n" |
130 | " .set mips0 \n" | 132 | " .set mips0 \n" |
131 | : "=&r" (temp), "=m" (*m) | 133 | : "=&r" (temp), "=m" (*m) |
132 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | 134 | : "ir" (~(1UL << bit)), "m" (*m)); |
133 | #ifdef CONFIG_CPU_MIPSR2 | 135 | #ifdef CONFIG_CPU_MIPSR2 |
134 | } else if (__builtin_constant_p(nr)) { | 136 | } else if (__builtin_constant_p(bit)) { |
135 | __asm__ __volatile__( | 137 | __asm__ __volatile__( |
136 | "1: " __LL "%0, %1 # clear_bit \n" | 138 | "1: " __LL "%0, %1 # clear_bit \n" |
137 | " " __INS "%0, $0, %2, 1 \n" | 139 | " " __INS "%0, $0, %2, 1 \n" |
@@ -141,7 +143,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
141 | "2: b 1b \n" | 143 | "2: b 1b \n" |
142 | " .previous \n" | 144 | " .previous \n" |
143 | : "=&r" (temp), "=m" (*m) | 145 | : "=&r" (temp), "=m" (*m) |
144 | : "ir" (nr & SZLONG_MASK), "m" (*m)); | 146 | : "ir" (bit), "m" (*m)); |
145 | #endif /* CONFIG_CPU_MIPSR2 */ | 147 | #endif /* CONFIG_CPU_MIPSR2 */ |
146 | } else if (cpu_has_llsc) { | 148 | } else if (cpu_has_llsc) { |
147 | __asm__ __volatile__( | 149 | __asm__ __volatile__( |
@@ -155,14 +157,14 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
155 | " .previous \n" | 157 | " .previous \n" |
156 | " .set mips0 \n" | 158 | " .set mips0 \n" |
157 | : "=&r" (temp), "=m" (*m) | 159 | : "=&r" (temp), "=m" (*m) |
158 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); | 160 | : "ir" (~(1UL << bit)), "m" (*m)); |
159 | } else { | 161 | } else { |
160 | volatile unsigned long *a = addr; | 162 | volatile unsigned long *a = addr; |
161 | unsigned long mask; | 163 | unsigned long mask; |
162 | unsigned long flags; | 164 | unsigned long flags; |
163 | 165 | ||
164 | a += nr >> SZLONG_LOG; | 166 | a += nr >> SZLONG_LOG; |
165 | mask = 1UL << (nr & SZLONG_MASK); | 167 | mask = 1UL << bit; |
166 | local_irq_save(flags); | 168 | local_irq_save(flags); |
167 | *a &= ~mask; | 169 | *a &= ~mask; |
168 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
@@ -180,6 +182,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
180 | */ | 182 | */ |
181 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | 183 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
182 | { | 184 | { |
185 | unsigned short bit = nr & SZLONG_MASK; | ||
186 | |||
183 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 187 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
184 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 188 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
185 | unsigned long temp; | 189 | unsigned long temp; |
@@ -192,7 +196,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
192 | " beqzl %0, 1b \n" | 196 | " beqzl %0, 1b \n" |
193 | " .set mips0 \n" | 197 | " .set mips0 \n" |
194 | : "=&r" (temp), "=m" (*m) | 198 | : "=&r" (temp), "=m" (*m) |
195 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 199 | : "ir" (1UL << bit), "m" (*m)); |
196 | } else if (cpu_has_llsc) { | 200 | } else if (cpu_has_llsc) { |
197 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 201 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
198 | unsigned long temp; | 202 | unsigned long temp; |
@@ -208,14 +212,14 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
208 | " .previous \n" | 212 | " .previous \n" |
209 | " .set mips0 \n" | 213 | " .set mips0 \n" |
210 | : "=&r" (temp), "=m" (*m) | 214 | : "=&r" (temp), "=m" (*m) |
211 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); | 215 | : "ir" (1UL << bit), "m" (*m)); |
212 | } else { | 216 | } else { |
213 | volatile unsigned long *a = addr; | 217 | volatile unsigned long *a = addr; |
214 | unsigned long mask; | 218 | unsigned long mask; |
215 | unsigned long flags; | 219 | unsigned long flags; |
216 | 220 | ||
217 | a += nr >> SZLONG_LOG; | 221 | a += nr >> SZLONG_LOG; |
218 | mask = 1UL << (nr & SZLONG_MASK); | 222 | mask = 1UL << bit; |
219 | local_irq_save(flags); | 223 | local_irq_save(flags); |
220 | *a ^= mask; | 224 | *a ^= mask; |
221 | local_irq_restore(flags); | 225 | local_irq_restore(flags); |
@@ -233,6 +237,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
233 | static inline int test_and_set_bit(unsigned long nr, | 237 | static inline int test_and_set_bit(unsigned long nr, |
234 | volatile unsigned long *addr) | 238 | volatile unsigned long *addr) |
235 | { | 239 | { |
240 | unsigned short bit = nr & SZLONG_MASK; | ||
241 | |||
236 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 242 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
237 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 243 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
238 | unsigned long temp, res; | 244 | unsigned long temp, res; |
@@ -246,7 +252,7 @@ static inline int test_and_set_bit(unsigned long nr, | |||
246 | " and %2, %0, %3 \n" | 252 | " and %2, %0, %3 \n" |
247 | " .set mips0 \n" | 253 | " .set mips0 \n" |
248 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 254 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
249 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 255 | : "r" (1UL << bit), "m" (*m) |
250 | : "memory"); | 256 | : "memory"); |
251 | 257 | ||
252 | return res != 0; | 258 | return res != 0; |
@@ -269,7 +275,7 @@ static inline int test_and_set_bit(unsigned long nr, | |||
269 | " .previous \n" | 275 | " .previous \n" |
270 | " .set pop \n" | 276 | " .set pop \n" |
271 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 277 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
272 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 278 | : "r" (1UL << bit), "m" (*m) |
273 | : "memory"); | 279 | : "memory"); |
274 | 280 | ||
275 | return res != 0; | 281 | return res != 0; |
@@ -280,7 +286,7 @@ static inline int test_and_set_bit(unsigned long nr, | |||
280 | unsigned long flags; | 286 | unsigned long flags; |
281 | 287 | ||
282 | a += nr >> SZLONG_LOG; | 288 | a += nr >> SZLONG_LOG; |
283 | mask = 1UL << (nr & SZLONG_MASK); | 289 | mask = 1UL << bit; |
284 | local_irq_save(flags); | 290 | local_irq_save(flags); |
285 | retval = (mask & *a) != 0; | 291 | retval = (mask & *a) != 0; |
286 | *a |= mask; | 292 | *a |= mask; |
@@ -303,6 +309,8 @@ static inline int test_and_set_bit(unsigned long nr, | |||
303 | static inline int test_and_clear_bit(unsigned long nr, | 309 | static inline int test_and_clear_bit(unsigned long nr, |
304 | volatile unsigned long *addr) | 310 | volatile unsigned long *addr) |
305 | { | 311 | { |
312 | unsigned short bit = nr & SZLONG_MASK; | ||
313 | |||
306 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 314 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
307 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 315 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
308 | unsigned long temp, res; | 316 | unsigned long temp, res; |
@@ -317,7 +325,7 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
317 | " and %2, %0, %3 \n" | 325 | " and %2, %0, %3 \n" |
318 | " .set mips0 \n" | 326 | " .set mips0 \n" |
319 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 327 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
320 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 328 | : "r" (1UL << bit), "m" (*m) |
321 | : "memory"); | 329 | : "memory"); |
322 | 330 | ||
323 | return res != 0; | 331 | return res != 0; |
@@ -336,7 +344,7 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
336 | "2: b 1b \n" | 344 | "2: b 1b \n" |
337 | " .previous \n" | 345 | " .previous \n" |
338 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 346 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
339 | : "ri" (nr & SZLONG_MASK), "m" (*m) | 347 | : "ri" (bit), "m" (*m) |
340 | : "memory"); | 348 | : "memory"); |
341 | 349 | ||
342 | return res; | 350 | return res; |
@@ -361,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
361 | " .previous \n" | 369 | " .previous \n" |
362 | " .set pop \n" | 370 | " .set pop \n" |
363 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 371 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
364 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 372 | : "r" (1UL << bit), "m" (*m) |
365 | : "memory"); | 373 | : "memory"); |
366 | 374 | ||
367 | return res != 0; | 375 | return res != 0; |
@@ -372,7 +380,7 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
372 | unsigned long flags; | 380 | unsigned long flags; |
373 | 381 | ||
374 | a += nr >> SZLONG_LOG; | 382 | a += nr >> SZLONG_LOG; |
375 | mask = 1UL << (nr & SZLONG_MASK); | 383 | mask = 1UL << bit; |
376 | local_irq_save(flags); | 384 | local_irq_save(flags); |
377 | retval = (mask & *a) != 0; | 385 | retval = (mask & *a) != 0; |
378 | *a &= ~mask; | 386 | *a &= ~mask; |
@@ -395,6 +403,8 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
395 | static inline int test_and_change_bit(unsigned long nr, | 403 | static inline int test_and_change_bit(unsigned long nr, |
396 | volatile unsigned long *addr) | 404 | volatile unsigned long *addr) |
397 | { | 405 | { |
406 | unsigned short bit = nr & SZLONG_MASK; | ||
407 | |||
398 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 408 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
399 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 409 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
400 | unsigned long temp, res; | 410 | unsigned long temp, res; |
@@ -408,7 +418,7 @@ static inline int test_and_change_bit(unsigned long nr, | |||
408 | " and %2, %0, %3 \n" | 418 | " and %2, %0, %3 \n" |
409 | " .set mips0 \n" | 419 | " .set mips0 \n" |
410 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 420 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
411 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 421 | : "r" (1UL << bit), "m" (*m) |
412 | : "memory"); | 422 | : "memory"); |
413 | 423 | ||
414 | return res != 0; | 424 | return res != 0; |
@@ -431,7 +441,7 @@ static inline int test_and_change_bit(unsigned long nr, | |||
431 | " .previous \n" | 441 | " .previous \n" |
432 | " .set pop \n" | 442 | " .set pop \n" |
433 | : "=&r" (temp), "=m" (*m), "=&r" (res) | 443 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
434 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) | 444 | : "r" (1UL << bit), "m" (*m) |
435 | : "memory"); | 445 | : "memory"); |
436 | 446 | ||
437 | return res != 0; | 447 | return res != 0; |
@@ -441,7 +451,7 @@ static inline int test_and_change_bit(unsigned long nr, | |||
441 | unsigned long flags; | 451 | unsigned long flags; |
442 | 452 | ||
443 | a += nr >> SZLONG_LOG; | 453 | a += nr >> SZLONG_LOG; |
444 | mask = 1UL << (nr & SZLONG_MASK); | 454 | mask = 1UL << bit; |
445 | local_irq_save(flags); | 455 | local_irq_save(flags); |
446 | retval = (mask & *a) != 0; | 456 | retval = (mask & *a) != 0; |
447 | *a ^= mask; | 457 | *a ^= mask; |
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h index 44790fdc5d00..61d9be3f3175 100644 --- a/include/asm-mips/mach-ip27/topology.h +++ b/include/asm-mips/mach-ip27/topology.h | |||
@@ -28,7 +28,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; | |||
28 | .busy_factor = 32, \ | 28 | .busy_factor = 32, \ |
29 | .imbalance_pct = 125, \ | 29 | .imbalance_pct = 125, \ |
30 | .cache_nice_tries = 1, \ | 30 | .cache_nice_tries = 1, \ |
31 | .per_cpu_gain = 100, \ | ||
32 | .flags = SD_LOAD_BALANCE \ | 31 | .flags = SD_LOAD_BALANCE \ |
33 | | SD_BALANCE_EXEC \ | 32 | | SD_BALANCE_EXEC \ |
34 | | SD_WAKE_BALANCE, \ | 33 | | SD_WAKE_BALANCE, \ |
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h index fdfff0b8ce42..8045abc78d0f 100644 --- a/include/asm-mips/mips_mt.h +++ b/include/asm-mips/mips_mt.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #ifndef __ASM_MIPS_MT_H | 6 | #ifndef __ASM_MIPS_MT_H |
7 | #define __ASM_MIPS_MT_H | 7 | #define __ASM_MIPS_MT_H |
8 | 8 | ||
9 | #include <linux/cpumask.h> | ||
10 | |||
9 | extern cpumask_t mt_fpu_cpumask; | 11 | extern cpumask_t mt_fpu_cpumask; |
10 | extern unsigned long mt_fpemul_threshold; | 12 | extern unsigned long mt_fpemul_threshold; |
11 | 13 | ||
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index e1941d1b8726..44dfa4adecf3 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h | |||
@@ -34,6 +34,9 @@ typedef long asiduse; | |||
34 | 34 | ||
35 | extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 35 | extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
36 | 36 | ||
37 | struct mm_struct; | ||
38 | struct task_struct; | ||
39 | |||
37 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | 40 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); |
38 | 41 | ||
39 | void smtc_flush_tlb_asid(unsigned long asid); | 42 | void smtc_flush_tlb_asid(unsigned long asid); |
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h index 55f3419f6546..360ea6d250c7 100644 --- a/include/asm-mips/smtc_ipi.h +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef __ASM_SMTC_IPI_H | 4 | #ifndef __ASM_SMTC_IPI_H |
5 | #define __ASM_SMTC_IPI_H | 5 | #define __ASM_SMTC_IPI_H |
6 | 6 | ||
7 | #include <linux/spinlock.h> | ||
8 | |||
7 | //#define SMTC_IPI_DEBUG | 9 | //#define SMTC_IPI_DEBUG |
8 | 10 | ||
9 | #ifdef SMTC_IPI_DEBUG | 11 | #ifdef SMTC_IPI_DEBUG |
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index f1755d28a36a..35e431cd796b 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
@@ -287,7 +287,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
287 | " .set noreorder # __raw_read_trylock \n" | 287 | " .set noreorder # __raw_read_trylock \n" |
288 | " li %2, 0 \n" | 288 | " li %2, 0 \n" |
289 | "1: ll %1, %3 \n" | 289 | "1: ll %1, %3 \n" |
290 | " bnez %1, 2f \n" | 290 | " bltz %1, 2f \n" |
291 | " addu %1, 1 \n" | 291 | " addu %1, 1 \n" |
292 | " sc %1, %0 \n" | 292 | " sc %1, %0 \n" |
293 | " .set reorder \n" | 293 | " .set reorder \n" |
@@ -304,7 +304,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
304 | " .set noreorder # __raw_read_trylock \n" | 304 | " .set noreorder # __raw_read_trylock \n" |
305 | " li %2, 0 \n" | 305 | " li %2, 0 \n" |
306 | "1: ll %1, %3 \n" | 306 | "1: ll %1, %3 \n" |
307 | " bnez %1, 2f \n" | 307 | " bltz %1, 2f \n" |
308 | " addu %1, 1 \n" | 308 | " addu %1, 1 \n" |
309 | " sc %1, %0 \n" | 309 | " sc %1, %0 \n" |
310 | " beqz %1, 1b \n" | 310 | " beqz %1, 1b \n" |
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index c62c20e7b5c6..b25511787ee0 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h | |||
@@ -435,6 +435,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); | |||
435 | __cu_len; \ | 435 | __cu_len; \ |
436 | }) | 436 | }) |
437 | 437 | ||
438 | extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); | ||
439 | |||
438 | #define __copy_to_user_inatomic(to,from,n) \ | 440 | #define __copy_to_user_inatomic(to,from,n) \ |
439 | ({ \ | 441 | ({ \ |
440 | void __user *__cu_to; \ | 442 | void __user *__cu_to; \ |
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index 696cff39a1d3..2f1087b3a202 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h | |||
@@ -334,16 +334,18 @@ | |||
334 | #define __NR_kexec_load (__NR_Linux + 311) | 334 | #define __NR_kexec_load (__NR_Linux + 311) |
335 | #define __NR_getcpu (__NR_Linux + 312) | 335 | #define __NR_getcpu (__NR_Linux + 312) |
336 | #define __NR_epoll_pwait (__NR_Linux + 313) | 336 | #define __NR_epoll_pwait (__NR_Linux + 313) |
337 | #define __NR_ioprio_set (__NR_Linux + 314) | ||
338 | #define __NR_ioprio_get (__NR_Linux + 315) | ||
337 | 339 | ||
338 | /* | 340 | /* |
339 | * Offset of the last Linux o32 flavoured syscall | 341 | * Offset of the last Linux o32 flavoured syscall |
340 | */ | 342 | */ |
341 | #define __NR_Linux_syscalls 313 | 343 | #define __NR_Linux_syscalls 315 |
342 | 344 | ||
343 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 345 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
344 | 346 | ||
345 | #define __NR_O32_Linux 4000 | 347 | #define __NR_O32_Linux 4000 |
346 | #define __NR_O32_Linux_syscalls 313 | 348 | #define __NR_O32_Linux_syscalls 315 |
347 | 349 | ||
348 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 350 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
349 | 351 | ||
@@ -624,16 +626,18 @@ | |||
624 | #define __NR_kexec_load (__NR_Linux + 270) | 626 | #define __NR_kexec_load (__NR_Linux + 270) |
625 | #define __NR_getcpu (__NR_Linux + 271) | 627 | #define __NR_getcpu (__NR_Linux + 271) |
626 | #define __NR_epoll_pwait (__NR_Linux + 272) | 628 | #define __NR_epoll_pwait (__NR_Linux + 272) |
629 | #define __NR_ioprio_set (__NR_Linux + 273) | ||
630 | #define __NR_ioprio_get (__NR_Linux + 274) | ||
627 | 631 | ||
628 | /* | 632 | /* |
629 | * Offset of the last Linux 64-bit flavoured syscall | 633 | * Offset of the last Linux 64-bit flavoured syscall |
630 | */ | 634 | */ |
631 | #define __NR_Linux_syscalls 272 | 635 | #define __NR_Linux_syscalls 274 |
632 | 636 | ||
633 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 637 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
634 | 638 | ||
635 | #define __NR_64_Linux 5000 | 639 | #define __NR_64_Linux 5000 |
636 | #define __NR_64_Linux_syscalls 272 | 640 | #define __NR_64_Linux_syscalls 274 |
637 | 641 | ||
638 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 642 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
639 | 643 | ||
@@ -918,16 +922,18 @@ | |||
918 | #define __NR_kexec_load (__NR_Linux + 274) | 922 | #define __NR_kexec_load (__NR_Linux + 274) |
919 | #define __NR_getcpu (__NR_Linux + 275) | 923 | #define __NR_getcpu (__NR_Linux + 275) |
920 | #define __NR_epoll_pwait (__NR_Linux + 276) | 924 | #define __NR_epoll_pwait (__NR_Linux + 276) |
925 | #define __NR_ioprio_set (__NR_Linux + 277) | ||
926 | #define __NR_ioprio_get (__NR_Linux + 278) | ||
921 | 927 | ||
922 | /* | 928 | /* |
923 | * Offset of the last N32 flavoured syscall | 929 | * Offset of the last N32 flavoured syscall |
924 | */ | 930 | */ |
925 | #define __NR_Linux_syscalls 276 | 931 | #define __NR_Linux_syscalls 278 |
926 | 932 | ||
927 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 933 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
928 | 934 | ||
929 | #define __NR_N32_Linux 6000 | 935 | #define __NR_N32_Linux 6000 |
930 | #define __NR_N32_Linux_syscalls 276 | 936 | #define __NR_N32_Linux_syscalls 278 |
931 | 937 | ||
932 | #ifdef __KERNEL__ | 938 | #ifdef __KERNEL__ |
933 | 939 | ||
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 6610495f5f16..0ad21a849b5f 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h | |||
@@ -57,7 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *bus) | |||
57 | .busy_factor = 32, \ | 57 | .busy_factor = 32, \ |
58 | .imbalance_pct = 125, \ | 58 | .imbalance_pct = 125, \ |
59 | .cache_nice_tries = 1, \ | 59 | .cache_nice_tries = 1, \ |
60 | .per_cpu_gain = 100, \ | ||
61 | .busy_idx = 3, \ | 60 | .busy_idx = 3, \ |
62 | .idle_idx = 1, \ | 61 | .idle_idx = 1, \ |
63 | .newidle_idx = 2, \ | 62 | .newidle_idx = 2, \ |
diff --git a/include/asm-s390/bugs.h b/include/asm-s390/bugs.h index 2c3659621314..011f1e6a2a6c 100644 --- a/include/asm-s390/bugs.h +++ b/include/asm-s390/bugs.h | |||
@@ -16,7 +16,7 @@ | |||
16 | * void check_bugs(void); | 16 | * void check_bugs(void); |
17 | */ | 17 | */ |
18 | 18 | ||
19 | static void __init check_bugs(void) | 19 | static inline void check_bugs(void) |
20 | { | 20 | { |
21 | /* s390 has no bugs ... */ | 21 | /* s390 has no bugs ... */ |
22 | } | 22 | } |
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 5650d3d4ae46..660f78271a93 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h | |||
@@ -74,6 +74,7 @@ struct ipl_parameter_block { | |||
74 | extern u32 ipl_flags; | 74 | extern u32 ipl_flags; |
75 | extern u16 ipl_devno; | 75 | extern u16 ipl_devno; |
76 | 76 | ||
77 | extern u32 dump_prefix_page; | ||
77 | extern void do_reipl(void); | 78 | extern void do_reipl(void); |
78 | extern void ipl_save_parameters(void); | 79 | extern void ipl_save_parameters(void); |
79 | 80 | ||
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h index 1bf4f7a8fbe1..a9fd06183972 100644 --- a/include/asm-sparc64/dma.h +++ b/include/asm-sparc64/dma.h | |||
@@ -15,17 +15,6 @@ | |||
15 | #include <asm/delay.h> | 15 | #include <asm/delay.h> |
16 | #include <asm/oplib.h> | 16 | #include <asm/oplib.h> |
17 | 17 | ||
18 | extern spinlock_t dma_spin_lock; | ||
19 | |||
20 | #define claim_dma_lock() \ | ||
21 | ({ unsigned long flags; \ | ||
22 | spin_lock_irqsave(&dma_spin_lock, flags); \ | ||
23 | flags; \ | ||
24 | }) | ||
25 | |||
26 | #define release_dma_lock(__flags) \ | ||
27 | spin_unlock_irqrestore(&dma_spin_lock, __flags); | ||
28 | |||
29 | /* These are irrelevant for Sparc DMA, but we leave it in so that | 18 | /* These are irrelevant for Sparc DMA, but we leave it in so that |
30 | * things can compile. | 19 | * things can compile. |
31 | */ | 20 | */ |
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index dbe033e494db..331013a0053e 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h | |||
@@ -854,4 +854,15 @@ static unsigned long __init sun_floppy_init(void) | |||
854 | 854 | ||
855 | #define EXTRA_FLOPPY_PARAMS | 855 | #define EXTRA_FLOPPY_PARAMS |
856 | 856 | ||
857 | static DEFINE_SPINLOCK(dma_spin_lock); | ||
858 | |||
859 | #define claim_dma_lock() \ | ||
860 | ({ unsigned long flags; \ | ||
861 | spin_lock_irqsave(&dma_spin_lock, flags); \ | ||
862 | flags; \ | ||
863 | }) | ||
864 | |||
865 | #define release_dma_lock(__flags) \ | ||
866 | spin_unlock_irqrestore(&dma_spin_lock, __flags); | ||
867 | |||
857 | #endif /* !(__ASM_SPARC64_FLOPPY_H) */ | 868 | #endif /* !(__ASM_SPARC64_FLOPPY_H) */ |
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h index f4fb238c89f1..969d225a9350 100644 --- a/include/asm-x86_64/io_apic.h +++ b/include/asm-x86_64/io_apic.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/mpspec.h> | 5 | #include <asm/mpspec.h> |
6 | #include <asm/apicdef.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Intel IO-APIC support for SMP and UP systems. | 9 | * Intel IO-APIC support for SMP and UP systems. |
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index 72375e7d32a8..ceb3d8dac33d 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h | |||
@@ -64,7 +64,7 @@ extern int setup_nmi_watchdog(char *); | |||
64 | 64 | ||
65 | extern atomic_t nmi_active; | 65 | extern atomic_t nmi_active; |
66 | extern unsigned int nmi_watchdog; | 66 | extern unsigned int nmi_watchdog; |
67 | #define NMI_DEFAULT -1 | 67 | #define NMI_DEFAULT 0 |
68 | #define NMI_NONE 0 | 68 | #define NMI_NONE 0 |
69 | #define NMI_IO_APIC 1 | 69 | #define NMI_IO_APIC 1 |
70 | #define NMI_LOCAL_APIC 2 | 70 | #define NMI_LOCAL_APIC 2 |
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index ab913ffcad56..f9c589539a82 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h | |||
@@ -44,7 +44,6 @@ extern void swiotlb_init(void); | |||
44 | extern int swiotlb_force; | 44 | extern int swiotlb_force; |
45 | 45 | ||
46 | #ifdef CONFIG_SWIOTLB | 46 | #ifdef CONFIG_SWIOTLB |
47 | #define SWIOTLB_ARCH_NEED_ALLOC | ||
48 | extern int swiotlb; | 47 | extern int swiotlb; |
49 | #else | 48 | #else |
50 | #define swiotlb 0 | 49 | #define swiotlb 0 |
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 2facec5914d2..4fd6fb23953e 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
@@ -43,7 +43,6 @@ extern int __node_distance(int, int); | |||
43 | .newidle_idx = 0, \ | 43 | .newidle_idx = 0, \ |
44 | .wake_idx = 1, \ | 44 | .wake_idx = 1, \ |
45 | .forkexec_idx = 1, \ | 45 | .forkexec_idx = 1, \ |
46 | .per_cpu_gain = 100, \ | ||
47 | .flags = SD_LOAD_BALANCE \ | 46 | .flags = SD_LOAD_BALANCE \ |
48 | | SD_BALANCE_FORK \ | 47 | | SD_BALANCE_FORK \ |
49 | | SD_BALANCE_EXEC \ | 48 | | SD_BALANCE_EXEC \ |
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h index 9a0a368852c7..d66ba6ef25f6 100644 --- a/include/asm-x86_64/tsc.h +++ b/include/asm-x86_64/tsc.h | |||
@@ -1,66 +1 @@ | |||
1 | /* | #include <asm-i386/tsc.h> | |
2 | * linux/include/asm-x86_64/tsc.h | ||
3 | * | ||
4 | * x86_64 TSC related functions | ||
5 | */ | ||
6 | #ifndef _ASM_x86_64_TSC_H | ||
7 | #define _ASM_x86_64_TSC_H | ||
8 | |||
9 | #include <asm/processor.h> | ||
10 | |||
11 | /* | ||
12 | * Standard way to access the cycle counter. | ||
13 | */ | ||
14 | typedef unsigned long long cycles_t; | ||
15 | |||
16 | extern unsigned int cpu_khz; | ||
17 | extern unsigned int tsc_khz; | ||
18 | |||
19 | static inline cycles_t get_cycles(void) | ||
20 | { | ||
21 | unsigned long long ret = 0; | ||
22 | |||
23 | #ifndef CONFIG_X86_TSC | ||
24 | if (!cpu_has_tsc) | ||
25 | return 0; | ||
26 | #endif | ||
27 | |||
28 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
29 | rdtscll(ret); | ||
30 | #endif | ||
31 | return ret; | ||
32 | } | ||
33 | |||
34 | /* Like get_cycles, but make sure the CPU is synchronized. */ | ||
35 | static __always_inline cycles_t get_cycles_sync(void) | ||
36 | { | ||
37 | unsigned long long ret; | ||
38 | #ifdef X86_FEATURE_SYNC_RDTSC | ||
39 | unsigned eax; | ||
40 | |||
41 | /* | ||
42 | * Don't do an additional sync on CPUs where we know | ||
43 | * RDTSC is already synchronous: | ||
44 | */ | ||
45 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | ||
46 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | ||
47 | #else | ||
48 | sync_core(); | ||
49 | #endif | ||
50 | rdtscll(ret); | ||
51 | |||
52 | return ret; | ||
53 | } | ||
54 | |||
55 | extern void tsc_init(void); | ||
56 | extern void mark_tsc_unstable(void); | ||
57 | extern int unsynchronized_tsc(void); | ||
58 | |||
59 | /* | ||
60 | * Boot-time check whether the TSCs are synchronized across | ||
61 | * all CPUs/cores: | ||
62 | */ | ||
63 | extern void check_tsc_sync_source(int cpu); | ||
64 | extern void check_tsc_sync_target(void); | ||
65 | |||
66 | #endif | ||
diff --git a/include/linux/audit.h b/include/linux/audit.h index 229fa012c893..773e30df11ee 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef _LINUX_AUDIT_H_ | 24 | #ifndef _LINUX_AUDIT_H_ |
25 | #define _LINUX_AUDIT_H_ | 25 | #define _LINUX_AUDIT_H_ |
26 | 26 | ||
27 | #include <linux/types.h> | ||
27 | #include <linux/elf-em.h> | 28 | #include <linux/elf-em.h> |
28 | 29 | ||
29 | /* The netlink messages for the audit system is divided into blocks: | 30 | /* The netlink messages for the audit system is divided into blocks: |
diff --git a/include/asm-arm/hardware/gpio_keys.h b/include/linux/gpio_keys.h index 2b217c7b9312..2b217c7b9312 100644 --- a/include/asm-arm/hardware/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3bef961b58b1..5bdbc744e773 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -47,7 +47,7 @@ enum hrtimer_restart { | |||
47 | * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context | 47 | * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context |
48 | * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and | 48 | * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and |
49 | * does not restart the timer | 49 | * does not restart the timer |
50 | * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in softirq context | 50 | * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context |
51 | * Special mode for tick emultation | 51 | * Special mode for tick emultation |
52 | */ | 52 | */ |
53 | enum hrtimer_cb_mode { | 53 | enum hrtimer_cb_mode { |
@@ -139,7 +139,7 @@ struct hrtimer_sleeper { | |||
139 | }; | 139 | }; |
140 | 140 | ||
141 | /** | 141 | /** |
142 | * struct hrtimer_base - the timer base for a specific clock | 142 | * struct hrtimer_clock_base - the timer base for a specific clock |
143 | * @cpu_base: per cpu clock base | 143 | * @cpu_base: per cpu clock base |
144 | * @index: clock type index for per_cpu support when moving a | 144 | * @index: clock type index for per_cpu support when moving a |
145 | * timer to a base on another cpu. | 145 | * timer to a base on another cpu. |
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 4fab3d0a4bce..e33ee763c052 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h | |||
@@ -114,6 +114,7 @@ struct pppoe_hdr { | |||
114 | #ifdef __KERNEL__ | 114 | #ifdef __KERNEL__ |
115 | struct pppoe_opt { | 115 | struct pppoe_opt { |
116 | struct net_device *dev; /* device associated with socket*/ | 116 | struct net_device *dev; /* device associated with socket*/ |
117 | int ifindex; /* ifindex of device associated with socket */ | ||
117 | struct pppoe_addr pa; /* what this socket is bound to*/ | 118 | struct pppoe_addr pa; /* what this socket is bound to*/ |
118 | struct sockaddr_pppox relay; /* what socket data will be | 119 | struct sockaddr_pppox relay; /* what socket data will be |
119 | relayed to (PPPoE relaying) */ | 120 | relayed to (PPPoE relaying) */ |
@@ -132,6 +133,7 @@ struct pppox_sock { | |||
132 | unsigned short num; | 133 | unsigned short num; |
133 | }; | 134 | }; |
134 | #define pppoe_dev proto.pppoe.dev | 135 | #define pppoe_dev proto.pppoe.dev |
136 | #define pppoe_ifindex proto.pppoe.ifindex | ||
135 | #define pppoe_pa proto.pppoe.pa | 137 | #define pppoe_pa proto.pppoe.pa |
136 | #define pppoe_relay proto.pppoe.relay | 138 | #define pppoe_relay proto.pppoe.relay |
137 | 139 | ||
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 9dbb525c5178..a113fe68d8a1 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
@@ -218,5 +218,7 @@ extern void ip_mc_up(struct in_device *); | |||
218 | extern void ip_mc_down(struct in_device *); | 218 | extern void ip_mc_down(struct in_device *); |
219 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); | 219 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); |
220 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); | 220 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); |
221 | extern void ip_mc_rejoin_group(struct ip_mc_list *im); | ||
222 | |||
221 | #endif | 223 | #endif |
222 | #endif | 224 | #endif |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 48148e0cdbd1..75e55dcdeb18 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -5,6 +5,14 @@ | |||
5 | 5 | ||
6 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); | 6 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
7 | 7 | ||
8 | /* Check if a vma is migratable */ | ||
9 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
10 | { | ||
11 | if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | ||
12 | return 0; | ||
13 | return 1; | ||
14 | } | ||
15 | |||
8 | #ifdef CONFIG_MIGRATION | 16 | #ifdef CONFIG_MIGRATION |
9 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); | 17 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); |
10 | extern int putback_lru_pages(struct list_head *l); | 18 | extern int putback_lru_pages(struct list_head *l); |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 913e5752569f..bfcef8a1ad8b 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -62,6 +62,12 @@ struct mmc_ios { | |||
62 | 62 | ||
63 | #define MMC_BUS_WIDTH_1 0 | 63 | #define MMC_BUS_WIDTH_1 0 |
64 | #define MMC_BUS_WIDTH_4 2 | 64 | #define MMC_BUS_WIDTH_4 2 |
65 | |||
66 | unsigned char timing; /* timing specification used */ | ||
67 | |||
68 | #define MMC_TIMING_LEGACY 0 | ||
69 | #define MMC_TIMING_MMC_HS 1 | ||
70 | #define MMC_TIMING_SD_HS 2 | ||
65 | }; | 71 | }; |
66 | 72 | ||
67 | struct mmc_host_ops { | 73 | struct mmc_host_ops { |
@@ -87,6 +93,8 @@ struct mmc_host { | |||
87 | #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ | 93 | #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ |
88 | #define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ | 94 | #define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */ |
89 | #define MMC_CAP_BYTEBLOCK (1 << 2) /* Can do non-log2 block sizes */ | 95 | #define MMC_CAP_BYTEBLOCK (1 << 2) /* Can do non-log2 block sizes */ |
96 | #define MMC_CAP_MMC_HIGHSPEED (1 << 3) /* Can do MMC high-speed timing */ | ||
97 | #define MMC_CAP_SD_HIGHSPEED (1 << 4) /* Can do SD high-speed timing */ | ||
90 | 98 | ||
91 | /* host specific block data */ | 99 | /* host specific block data */ |
92 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ | 100 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ |
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index e7d4da1cc9fa..c6d4ab86b83c 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -1288,6 +1288,7 @@ struct mv64xxx_i2c_pdata { | |||
1288 | #define MV643XX_ETH_NAME "mv643xx_eth" | 1288 | #define MV643XX_ETH_NAME "mv643xx_eth" |
1289 | 1289 | ||
1290 | struct mv643xx_eth_platform_data { | 1290 | struct mv643xx_eth_platform_data { |
1291 | int port_number; | ||
1291 | u16 force_phy_addr; /* force override if phy_addr == 0 */ | 1292 | u16 force_phy_addr; /* force override if phy_addr == 0 */ |
1292 | u16 phy_addr; | 1293 | u16 phy_addr; |
1293 | 1294 | ||
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index a503052138bd..6330fc76b00f 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -50,6 +50,8 @@ struct ncp_server { | |||
50 | int packet_size; | 50 | int packet_size; |
51 | unsigned char *packet; /* Here we prepare requests and | 51 | unsigned char *packet; /* Here we prepare requests and |
52 | receive replies */ | 52 | receive replies */ |
53 | unsigned char *txbuf; /* Storage for current request */ | ||
54 | unsigned char *rxbuf; /* Storage for reply to current request */ | ||
53 | 55 | ||
54 | int lock; /* To prevent mismatch in protocols. */ | 56 | int lock; /* To prevent mismatch in protocols. */ |
55 | struct mutex mutex; | 57 | struct mutex mutex; |
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h index 907d4f5ca5dc..e3a6df07aa4b 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_core.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_core.h | |||
@@ -45,7 +45,7 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb) | |||
45 | int ret = NF_ACCEPT; | 45 | int ret = NF_ACCEPT; |
46 | 46 | ||
47 | if (ct) { | 47 | if (ct) { |
48 | if (!is_confirmed(ct)) | 48 | if (!is_confirmed(ct) && !is_dying(ct)) |
49 | ret = __ip_conntrack_confirm(pskb); | 49 | ret = __ip_conntrack_confirm(pskb); |
50 | ip_ct_deliver_cached_events(ct); | 50 | ip_ct_deliver_cached_events(ct); |
51 | } | 51 | } |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 2c4b6842dfb9..78417e421b4c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -543,6 +543,7 @@ void pci_set_master(struct pci_dev *dev); | |||
543 | int __must_check pci_set_mwi(struct pci_dev *dev); | 543 | int __must_check pci_set_mwi(struct pci_dev *dev); |
544 | void pci_clear_mwi(struct pci_dev *dev); | 544 | void pci_clear_mwi(struct pci_dev *dev); |
545 | void pci_intx(struct pci_dev *dev, int enable); | 545 | void pci_intx(struct pci_dev *dev, int enable); |
546 | void pci_msi_off(struct pci_dev *dev); | ||
546 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); | 547 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); |
547 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); | 548 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); |
548 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); | 549 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); |
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 7a6d34ee5ab1..f09cce2357ff 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h | |||
@@ -292,9 +292,10 @@ | |||
292 | #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ | 292 | #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ |
293 | #define PCI_MSI_MASK_BIT 16 /* Mask bits register */ | 293 | #define PCI_MSI_MASK_BIT 16 /* Mask bits register */ |
294 | 294 | ||
295 | /* MSI-X registers (these are at offset PCI_MSI_FLAGS) */ | 295 | /* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */ |
296 | #define PCI_MSIX_FLAGS_QSIZE 0x7FF | 296 | #define PCI_MSIX_FLAGS 2 |
297 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | 297 | #define PCI_MSIX_FLAGS_QSIZE 0x7FF |
298 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | ||
298 | #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) | 299 | #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) |
299 | #define PCI_MSIX_FLAGS_BITMASK (1 << 0) | 300 | #define PCI_MSIX_FLAGS_BITMASK (1 << 0) |
300 | 301 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6f7c9a4d80e5..49fe2997a016 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -684,7 +684,6 @@ struct sched_domain { | |||
684 | unsigned int imbalance_pct; /* No balance until over watermark */ | 684 | unsigned int imbalance_pct; /* No balance until over watermark */ |
685 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ | 685 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ |
686 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | 686 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
687 | unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ | ||
688 | unsigned int busy_idx; | 687 | unsigned int busy_idx; |
689 | unsigned int idle_idx; | 688 | unsigned int idle_idx; |
690 | unsigned int newidle_idx; | 689 | unsigned int newidle_idx; |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61fef376ed2e..a946176db638 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -283,6 +283,43 @@ do { \ | |||
283 | }) | 283 | }) |
284 | 284 | ||
285 | /* | 285 | /* |
286 | * Locks two spinlocks l1 and l2. | ||
287 | * l1_first indicates if spinlock l1 should be taken first. | ||
288 | */ | ||
289 | static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, | ||
290 | bool l1_first) | ||
291 | __acquires(l1) | ||
292 | __acquires(l2) | ||
293 | { | ||
294 | if (l1_first) { | ||
295 | spin_lock(l1); | ||
296 | spin_lock(l2); | ||
297 | } else { | ||
298 | spin_lock(l2); | ||
299 | spin_lock(l1); | ||
300 | } | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Unlocks two spinlocks l1 and l2. | ||
305 | * l1_taken_first indicates if spinlock l1 was taken first and therefore | ||
306 | * should be released after spinlock l2. | ||
307 | */ | ||
308 | static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, | ||
309 | bool l1_taken_first) | ||
310 | __releases(l1) | ||
311 | __releases(l2) | ||
312 | { | ||
313 | if (l1_taken_first) { | ||
314 | spin_unlock(l2); | ||
315 | spin_unlock(l1); | ||
316 | } else { | ||
317 | spin_unlock(l1); | ||
318 | spin_unlock(l2); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | /* | ||
286 | * Pull the atomic_t declaration: | 323 | * Pull the atomic_t declaration: |
287 | * (asm-mips/atomic.h needs above definitions) | 324 | * (asm-mips/atomic.h needs above definitions) |
288 | */ | 325 | */ |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 83b3c7b433aa..35fa4d5aadd0 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -194,9 +194,7 @@ static inline void svc_putu32(struct kvec *iov, __be32 val) | |||
194 | 194 | ||
195 | union svc_addr_u { | 195 | union svc_addr_u { |
196 | struct in_addr addr; | 196 | struct in_addr addr; |
197 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
198 | struct in6_addr addr6; | 197 | struct in6_addr addr6; |
199 | #endif | ||
200 | }; | 198 | }; |
201 | 199 | ||
202 | /* | 200 | /* |
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index cccea0a0feb4..7909687557bf 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h | |||
@@ -66,7 +66,7 @@ struct svc_sock { | |||
66 | * Function prototypes. | 66 | * Function prototypes. |
67 | */ | 67 | */ |
68 | int svc_makesock(struct svc_serv *, int, unsigned short, int flags); | 68 | int svc_makesock(struct svc_serv *, int, unsigned short, int flags); |
69 | void svc_close_socket(struct svc_sock *); | 69 | void svc_force_close_socket(struct svc_sock *); |
70 | int svc_recv(struct svc_rqst *, long); | 70 | int svc_recv(struct svc_rqst *, long); |
71 | int svc_send(struct svc_rqst *); | 71 | int svc_send(struct svc_rqst *); |
72 | void svc_drop(struct svc_rqst *); | 72 | void svc_drop(struct svc_rqst *); |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 6c5a6e6e813b..a9d1f049cc15 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -96,7 +96,6 @@ | |||
96 | .busy_factor = 64, \ | 96 | .busy_factor = 64, \ |
97 | .imbalance_pct = 110, \ | 97 | .imbalance_pct = 110, \ |
98 | .cache_nice_tries = 0, \ | 98 | .cache_nice_tries = 0, \ |
99 | .per_cpu_gain = 25, \ | ||
100 | .busy_idx = 0, \ | 99 | .busy_idx = 0, \ |
101 | .idle_idx = 0, \ | 100 | .idle_idx = 0, \ |
102 | .newidle_idx = 1, \ | 101 | .newidle_idx = 1, \ |
@@ -128,7 +127,6 @@ | |||
128 | .busy_factor = 64, \ | 127 | .busy_factor = 64, \ |
129 | .imbalance_pct = 125, \ | 128 | .imbalance_pct = 125, \ |
130 | .cache_nice_tries = 1, \ | 129 | .cache_nice_tries = 1, \ |
131 | .per_cpu_gain = 100, \ | ||
132 | .busy_idx = 2, \ | 130 | .busy_idx = 2, \ |
133 | .idle_idx = 1, \ | 131 | .idle_idx = 1, \ |
134 | .newidle_idx = 2, \ | 132 | .newidle_idx = 2, \ |
@@ -159,7 +157,6 @@ | |||
159 | .busy_factor = 64, \ | 157 | .busy_factor = 64, \ |
160 | .imbalance_pct = 125, \ | 158 | .imbalance_pct = 125, \ |
161 | .cache_nice_tries = 1, \ | 159 | .cache_nice_tries = 1, \ |
162 | .per_cpu_gain = 100, \ | ||
163 | .busy_idx = 2, \ | 160 | .busy_idx = 2, \ |
164 | .idle_idx = 1, \ | 161 | .idle_idx = 1, \ |
165 | .newidle_idx = 2, \ | 162 | .newidle_idx = 2, \ |
@@ -193,7 +190,6 @@ | |||
193 | .newidle_idx = 0, /* unused */ \ | 190 | .newidle_idx = 0, /* unused */ \ |
194 | .wake_idx = 0, /* unused */ \ | 191 | .wake_idx = 0, /* unused */ \ |
195 | .forkexec_idx = 0, /* unused */ \ | 192 | .forkexec_idx = 0, /* unused */ \ |
196 | .per_cpu_gain = 100, \ | ||
197 | .flags = SD_LOAD_BALANCE \ | 193 | .flags = SD_LOAD_BALANCE \ |
198 | | SD_SERIALIZE, \ | 194 | | SD_SERIALIZE, \ |
199 | .last_balance = jiffies, \ | 195 | .last_balance = jiffies, \ |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index f7be1ac73601..09a2532699b2 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -66,7 +66,7 @@ struct inet_hashinfo; | |||
66 | struct inet_timewait_death_row { | 66 | struct inet_timewait_death_row { |
67 | /* Short-time timewait calendar */ | 67 | /* Short-time timewait calendar */ |
68 | int twcal_hand; | 68 | int twcal_hand; |
69 | int twcal_jiffie; | 69 | unsigned long twcal_jiffie; |
70 | struct timer_list twcal_timer; | 70 | struct timer_list twcal_timer; |
71 | struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS]; | 71 | struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS]; |
72 | 72 | ||
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 7fdc72c01356..85634e1865c3 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h | |||
@@ -64,7 +64,7 @@ static inline int nf_conntrack_confirm(struct sk_buff **pskb) | |||
64 | int ret = NF_ACCEPT; | 64 | int ret = NF_ACCEPT; |
65 | 65 | ||
66 | if (ct) { | 66 | if (ct) { |
67 | if (!nf_ct_is_confirmed(ct)) | 67 | if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) |
68 | ret = __nf_conntrack_confirm(pskb); | 68 | ret = __nf_conntrack_confirm(pskb); |
69 | nf_ct_deliver_cached_events(ct); | 69 | nf_ct_deliver_cached_events(ct); |
70 | } | 70 | } |
diff --git a/include/net/sock.h b/include/net/sock.h index 849c7df23181..2c7d60ca3548 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -426,7 +426,7 @@ static inline void sk_acceptq_added(struct sock *sk) | |||
426 | 426 | ||
427 | static inline int sk_acceptq_is_full(struct sock *sk) | 427 | static inline int sk_acceptq_is_full(struct sock *sk) |
428 | { | 428 | { |
429 | return sk->sk_ack_backlog >= sk->sk_max_ack_backlog; | 429 | return sk->sk_ack_backlog > sk->sk_max_ack_backlog; |
430 | } | 430 | } |
431 | 431 | ||
432 | /* | 432 | /* |
diff --git a/include/sound/version.h b/include/sound/version.h index a9ba7ee69939..5f7275000102 100644 --- a/include/sound/version.h +++ b/include/sound/version.h | |||
@@ -1,3 +1,3 @@ | |||
1 | /* include/version.h. Generated by alsa/ksync script. */ | 1 | /* include/version.h. Generated by alsa/ksync script. */ |
2 | #define CONFIG_SND_VERSION "1.0.14rc2" | 2 | #define CONFIG_SND_VERSION "1.0.14rc3" |
3 | #define CONFIG_SND_DATE " (Wed Feb 14 07:42:13 2007 UTC)" | 3 | #define CONFIG_SND_DATE " (Tue Mar 06 13:10:00 2007 UTC)" |
diff --git a/init/Kconfig b/init/Kconfig index f977086e118a..b170aa1d43bd 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -304,6 +304,22 @@ config RELAY | |||
304 | 304 | ||
305 | If unsure, say N. | 305 | If unsure, say N. |
306 | 306 | ||
307 | config BLK_DEV_INITRD | ||
308 | bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" | ||
309 | depends on BROKEN || !FRV | ||
310 | help | ||
311 | The initial RAM filesystem is a ramfs which is loaded by the | ||
312 | boot loader (loadlin or lilo) and that is mounted as root | ||
313 | before the normal boot procedure. It is typically used to | ||
314 | load modules needed to mount the "real" root file system, | ||
315 | etc. See <file:Documentation/initrd.txt> for details. | ||
316 | |||
317 | If RAM disk support (BLK_DEV_RAM) is also included, this | ||
318 | also enables initial RAM disk (initrd) support and adds | ||
319 | 15 Kbytes (more on some other architectures) to the kernel size. | ||
320 | |||
321 | If unsure say Y. | ||
322 | |||
307 | if BLK_DEV_INITRD | 323 | if BLK_DEV_INITRD |
308 | 324 | ||
309 | source "usr/Kconfig" | 325 | source "usr/Kconfig" |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 0b5ecbe5f045..554ac368be79 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -731,7 +731,8 @@ asmlinkage long sys_mq_unlink(const char __user *u_name) | |||
731 | if (IS_ERR(name)) | 731 | if (IS_ERR(name)) |
732 | return PTR_ERR(name); | 732 | return PTR_ERR(name); |
733 | 733 | ||
734 | mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex); | 734 | mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex, |
735 | I_MUTEX_PARENT); | ||
735 | dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); | 736 | dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); |
736 | if (IS_ERR(dentry)) { | 737 | if (IS_ERR(dentry)) { |
737 | err = PTR_ERR(dentry); | 738 | err = PTR_ERR(dentry); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 476cb0c0b4a4..ec4cb9f3e3b7 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -540,19 +540,19 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
540 | /* | 540 | /* |
541 | * Switch to high resolution mode | 541 | * Switch to high resolution mode |
542 | */ | 542 | */ |
543 | static void hrtimer_switch_to_hres(void) | 543 | static int hrtimer_switch_to_hres(void) |
544 | { | 544 | { |
545 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 545 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); |
546 | unsigned long flags; | 546 | unsigned long flags; |
547 | 547 | ||
548 | if (base->hres_active) | 548 | if (base->hres_active) |
549 | return; | 549 | return 1; |
550 | 550 | ||
551 | local_irq_save(flags); | 551 | local_irq_save(flags); |
552 | 552 | ||
553 | if (tick_init_highres()) { | 553 | if (tick_init_highres()) { |
554 | local_irq_restore(flags); | 554 | local_irq_restore(flags); |
555 | return; | 555 | return 0; |
556 | } | 556 | } |
557 | base->hres_active = 1; | 557 | base->hres_active = 1; |
558 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | 558 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; |
@@ -565,13 +565,14 @@ static void hrtimer_switch_to_hres(void) | |||
565 | local_irq_restore(flags); | 565 | local_irq_restore(flags); |
566 | printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", | 566 | printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", |
567 | smp_processor_id()); | 567 | smp_processor_id()); |
568 | return 1; | ||
568 | } | 569 | } |
569 | 570 | ||
570 | #else | 571 | #else |
571 | 572 | ||
572 | static inline int hrtimer_hres_active(void) { return 0; } | 573 | static inline int hrtimer_hres_active(void) { return 0; } |
573 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 574 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
574 | static inline void hrtimer_switch_to_hres(void) { } | 575 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
575 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } | 576 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
576 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 577 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
577 | struct hrtimer_clock_base *base) | 578 | struct hrtimer_clock_base *base) |
@@ -1130,6 +1131,9 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, | |||
1130 | if (base->softirq_time.tv64 <= timer->expires.tv64) | 1131 | if (base->softirq_time.tv64 <= timer->expires.tv64) |
1131 | break; | 1132 | break; |
1132 | 1133 | ||
1134 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1135 | WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ); | ||
1136 | #endif | ||
1133 | timer_stats_account_hrtimer(timer); | 1137 | timer_stats_account_hrtimer(timer); |
1134 | 1138 | ||
1135 | fn = timer->function; | 1139 | fn = timer->function; |
@@ -1173,7 +1177,8 @@ void hrtimer_run_queues(void) | |||
1173 | * deadlock vs. xtime_lock. | 1177 | * deadlock vs. xtime_lock. |
1174 | */ | 1178 | */ |
1175 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 1179 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
1176 | hrtimer_switch_to_hres(); | 1180 | if (hrtimer_switch_to_hres()) |
1181 | return; | ||
1177 | 1182 | ||
1178 | hrtimer_get_softirq_time(cpu_base); | 1183 | hrtimer_get_softirq_time(cpu_base); |
1179 | 1184 | ||
@@ -1355,17 +1360,16 @@ static void migrate_hrtimers(int cpu) | |||
1355 | tick_cancel_sched_timer(cpu); | 1360 | tick_cancel_sched_timer(cpu); |
1356 | 1361 | ||
1357 | local_irq_disable(); | 1362 | local_irq_disable(); |
1358 | 1363 | double_spin_lock(&new_base->lock, &old_base->lock, | |
1359 | spin_lock(&new_base->lock); | 1364 | smp_processor_id() < cpu); |
1360 | spin_lock(&old_base->lock); | ||
1361 | 1365 | ||
1362 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1366 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1363 | migrate_hrtimer_list(&old_base->clock_base[i], | 1367 | migrate_hrtimer_list(&old_base->clock_base[i], |
1364 | &new_base->clock_base[i]); | 1368 | &new_base->clock_base[i]); |
1365 | } | 1369 | } |
1366 | spin_unlock(&old_base->lock); | ||
1367 | spin_unlock(&new_base->lock); | ||
1368 | 1370 | ||
1371 | double_spin_unlock(&new_base->lock, &old_base->lock, | ||
1372 | smp_processor_id() < cpu); | ||
1369 | local_irq_enable(); | 1373 | local_irq_enable(); |
1370 | put_cpu_var(hrtimer_bases); | 1374 | put_cpu_var(hrtimer_bases); |
1371 | } | 1375 | } |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 95f6657fff73..51a4dd0f1b74 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -81,29 +81,34 @@ config SOFTWARE_SUSPEND | |||
81 | bool "Software Suspend" | 81 | bool "Software Suspend" |
82 | depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) | 82 | depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)) |
83 | ---help--- | 83 | ---help--- |
84 | Enable the possibility of suspending the machine. | 84 | Enable the suspend to disk (STD) functionality. |
85 | It doesn't need ACPI or APM. | ||
86 | You may suspend your machine by 'swsusp' or 'shutdown -z <time>' | ||
87 | (patch for sysvinit needed). | ||
88 | 85 | ||
89 | It creates an image which is saved in your active swap. Upon next | 86 | You can suspend your machine with 'echo disk > /sys/power/state'. |
87 | Alternatively, you can use the additional userland tools available | ||
88 | from <http://suspend.sf.net>. | ||
89 | |||
90 | In principle it does not require ACPI or APM, although for example | ||
91 | ACPI will be used if available. | ||
92 | |||
93 | It creates an image which is saved in your active swap. Upon the next | ||
90 | boot, pass the 'resume=/dev/swappartition' argument to the kernel to | 94 | boot, pass the 'resume=/dev/swappartition' argument to the kernel to |
91 | have it detect the saved image, restore memory state from it, and | 95 | have it detect the saved image, restore memory state from it, and |
92 | continue to run as before. If you do not want the previous state to | 96 | continue to run as before. If you do not want the previous state to |
93 | be reloaded, then use the 'noresume' kernel argument. However, note | 97 | be reloaded, then use the 'noresume' kernel command line argument. |
94 | that your partitions will be fsck'd and you must re-mkswap your swap | 98 | Note, however, that fsck will be run on your filesystems and you will |
95 | partitions. It does not work with swap files. | 99 | need to run mkswap against the swap partition used for the suspend. |
96 | 100 | ||
97 | Right now you may boot without resuming and then later resume but | 101 | It also works with swap files to a limited extent (for details see |
98 | in meantime you cannot use those swap partitions/files which were | 102 | <file:Documentation/power/swsusp-and-swap-files.txt>). |
99 | involved in suspending. Also in this case there is a risk that buffers | ||
100 | on disk won't match with saved ones. | ||
101 | 103 | ||
102 | For more information take a look at <file:Documentation/power/swsusp.txt>. | 104 | Right now you may boot without resuming and resume later but in the |
105 | meantime you cannot use the swap partition(s)/file(s) involved in | ||
106 | suspending. Also in this case you must not use the filesystems | ||
107 | that were mounted before the suspend. In particular, you MUST NOT | ||
108 | MOUNT any journaled filesystems mounted before the suspend or they | ||
109 | will get corrupted in a nasty way. | ||
103 | 110 | ||
104 | (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386. | 111 | For more information take a look at <file:Documentation/power/swsusp.txt>. |
105 | we need identity mapping for resume to work, and that is trivial | ||
106 | to get with 4MB pages, but less than trivial on PAE). | ||
107 | 112 | ||
108 | config PM_STD_PARTITION | 113 | config PM_STD_PARTITION |
109 | string "Default resume partition" | 114 | string "Default resume partition" |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 482b11ff65cb..bcd14e83ef39 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -60,19 +60,19 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | |||
60 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ | 60 | static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ |
61 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | 61 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ |
62 | 62 | ||
63 | module_param(nreaders, int, 0); | 63 | module_param(nreaders, int, 0444); |
64 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | 64 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); |
65 | module_param(nfakewriters, int, 0); | 65 | module_param(nfakewriters, int, 0444); |
66 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | 66 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); |
67 | module_param(stat_interval, int, 0); | 67 | module_param(stat_interval, int, 0444); |
68 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | 68 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); |
69 | module_param(verbose, bool, 0); | 69 | module_param(verbose, bool, 0444); |
70 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | 70 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); |
71 | module_param(test_no_idle_hz, bool, 0); | 71 | module_param(test_no_idle_hz, bool, 0444); |
72 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | 72 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); |
73 | module_param(shuffle_interval, int, 0); | 73 | module_param(shuffle_interval, int, 0444); |
74 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | 74 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); |
75 | module_param(torture_type, charp, 0); | 75 | module_param(torture_type, charp, 0444); |
76 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | 76 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); |
77 | 77 | ||
78 | #define TORTURE_FLAG "-torture:" | 78 | #define TORTURE_FLAG "-torture:" |
diff --git a/kernel/sched.c b/kernel/sched.c index 5f102e6c7a4c..a4ca632c477c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3006,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq) | |||
3006 | } | 3006 | } |
3007 | #endif | 3007 | #endif |
3008 | 3008 | ||
3009 | static inline void wake_priority_sleeper(struct rq *rq) | ||
3010 | { | ||
3011 | #ifdef CONFIG_SCHED_SMT | ||
3012 | if (!rq->nr_running) | ||
3013 | return; | ||
3014 | |||
3015 | spin_lock(&rq->lock); | ||
3016 | /* | ||
3017 | * If an SMT sibling task has been put to sleep for priority | ||
3018 | * reasons reschedule the idle task to see if it can now run. | ||
3019 | */ | ||
3020 | if (rq->nr_running) | ||
3021 | resched_task(rq->idle); | ||
3022 | spin_unlock(&rq->lock); | ||
3023 | #endif | ||
3024 | } | ||
3025 | |||
3026 | DEFINE_PER_CPU(struct kernel_stat, kstat); | 3009 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
3027 | 3010 | ||
3028 | EXPORT_PER_CPU_SYMBOL(kstat); | 3011 | EXPORT_PER_CPU_SYMBOL(kstat); |
@@ -3239,10 +3222,7 @@ void scheduler_tick(void) | |||
3239 | 3222 | ||
3240 | update_cpu_clock(p, rq, now); | 3223 | update_cpu_clock(p, rq, now); |
3241 | 3224 | ||
3242 | if (p == rq->idle) | 3225 | if (p != rq->idle) |
3243 | /* Task on the idle queue */ | ||
3244 | wake_priority_sleeper(rq); | ||
3245 | else | ||
3246 | task_running_tick(rq, p); | 3226 | task_running_tick(rq, p); |
3247 | #ifdef CONFIG_SMP | 3227 | #ifdef CONFIG_SMP |
3248 | update_load(rq); | 3228 | update_load(rq); |
@@ -3251,136 +3231,6 @@ void scheduler_tick(void) | |||
3251 | #endif | 3231 | #endif |
3252 | } | 3232 | } |
3253 | 3233 | ||
3254 | #ifdef CONFIG_SCHED_SMT | ||
3255 | static inline void wakeup_busy_runqueue(struct rq *rq) | ||
3256 | { | ||
3257 | /* If an SMT runqueue is sleeping due to priority reasons wake it up */ | ||
3258 | if (rq->curr == rq->idle && rq->nr_running) | ||
3259 | resched_task(rq->idle); | ||
3260 | } | ||
3261 | |||
3262 | /* | ||
3263 | * Called with interrupt disabled and this_rq's runqueue locked. | ||
3264 | */ | ||
3265 | static void wake_sleeping_dependent(int this_cpu) | ||
3266 | { | ||
3267 | struct sched_domain *tmp, *sd = NULL; | ||
3268 | int i; | ||
3269 | |||
3270 | for_each_domain(this_cpu, tmp) { | ||
3271 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
3272 | sd = tmp; | ||
3273 | break; | ||
3274 | } | ||
3275 | } | ||
3276 | |||
3277 | if (!sd) | ||
3278 | return; | ||
3279 | |||
3280 | for_each_cpu_mask(i, sd->span) { | ||
3281 | struct rq *smt_rq = cpu_rq(i); | ||
3282 | |||
3283 | if (i == this_cpu) | ||
3284 | continue; | ||
3285 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
3286 | continue; | ||
3287 | |||
3288 | wakeup_busy_runqueue(smt_rq); | ||
3289 | spin_unlock(&smt_rq->lock); | ||
3290 | } | ||
3291 | } | ||
3292 | |||
3293 | /* | ||
3294 | * number of 'lost' timeslices this task wont be able to fully | ||
3295 | * utilize, if another task runs on a sibling. This models the | ||
3296 | * slowdown effect of other tasks running on siblings: | ||
3297 | */ | ||
3298 | static inline unsigned long | ||
3299 | smt_slice(struct task_struct *p, struct sched_domain *sd) | ||
3300 | { | ||
3301 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | ||
3302 | } | ||
3303 | |||
3304 | /* | ||
3305 | * To minimise lock contention and not have to drop this_rq's runlock we only | ||
3306 | * trylock the sibling runqueues and bypass those runqueues if we fail to | ||
3307 | * acquire their lock. As we only trylock the normal locking order does not | ||
3308 | * need to be obeyed. | ||
3309 | */ | ||
3310 | static int | ||
3311 | dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) | ||
3312 | { | ||
3313 | struct sched_domain *tmp, *sd = NULL; | ||
3314 | int ret = 0, i; | ||
3315 | |||
3316 | /* kernel/rt threads do not participate in dependent sleeping */ | ||
3317 | if (!p->mm || rt_task(p)) | ||
3318 | return 0; | ||
3319 | |||
3320 | for_each_domain(this_cpu, tmp) { | ||
3321 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
3322 | sd = tmp; | ||
3323 | break; | ||
3324 | } | ||
3325 | } | ||
3326 | |||
3327 | if (!sd) | ||
3328 | return 0; | ||
3329 | |||
3330 | for_each_cpu_mask(i, sd->span) { | ||
3331 | struct task_struct *smt_curr; | ||
3332 | struct rq *smt_rq; | ||
3333 | |||
3334 | if (i == this_cpu) | ||
3335 | continue; | ||
3336 | |||
3337 | smt_rq = cpu_rq(i); | ||
3338 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
3339 | continue; | ||
3340 | |||
3341 | smt_curr = smt_rq->curr; | ||
3342 | |||
3343 | if (!smt_curr->mm) | ||
3344 | goto unlock; | ||
3345 | |||
3346 | /* | ||
3347 | * If a user task with lower static priority than the | ||
3348 | * running task on the SMT sibling is trying to schedule, | ||
3349 | * delay it till there is proportionately less timeslice | ||
3350 | * left of the sibling task to prevent a lower priority | ||
3351 | * task from using an unfair proportion of the | ||
3352 | * physical cpu's resources. -ck | ||
3353 | */ | ||
3354 | if (rt_task(smt_curr)) { | ||
3355 | /* | ||
3356 | * With real time tasks we run non-rt tasks only | ||
3357 | * per_cpu_gain% of the time. | ||
3358 | */ | ||
3359 | if ((jiffies % DEF_TIMESLICE) > | ||
3360 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | ||
3361 | ret = 1; | ||
3362 | } else { | ||
3363 | if (smt_curr->static_prio < p->static_prio && | ||
3364 | !TASK_PREEMPTS_CURR(p, smt_rq) && | ||
3365 | smt_slice(smt_curr, sd) > task_timeslice(p)) | ||
3366 | ret = 1; | ||
3367 | } | ||
3368 | unlock: | ||
3369 | spin_unlock(&smt_rq->lock); | ||
3370 | } | ||
3371 | return ret; | ||
3372 | } | ||
3373 | #else | ||
3374 | static inline void wake_sleeping_dependent(int this_cpu) | ||
3375 | { | ||
3376 | } | ||
3377 | static inline int | ||
3378 | dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) | ||
3379 | { | ||
3380 | return 0; | ||
3381 | } | ||
3382 | #endif | ||
3383 | |||
3384 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | 3234 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) |
3385 | 3235 | ||
3386 | void fastcall add_preempt_count(int val) | 3236 | void fastcall add_preempt_count(int val) |
@@ -3507,7 +3357,6 @@ need_resched_nonpreemptible: | |||
3507 | if (!rq->nr_running) { | 3357 | if (!rq->nr_running) { |
3508 | next = rq->idle; | 3358 | next = rq->idle; |
3509 | rq->expired_timestamp = 0; | 3359 | rq->expired_timestamp = 0; |
3510 | wake_sleeping_dependent(cpu); | ||
3511 | goto switch_tasks; | 3360 | goto switch_tasks; |
3512 | } | 3361 | } |
3513 | } | 3362 | } |
@@ -3547,8 +3396,6 @@ need_resched_nonpreemptible: | |||
3547 | } | 3396 | } |
3548 | } | 3397 | } |
3549 | next->sleep_type = SLEEP_NORMAL; | 3398 | next->sleep_type = SLEEP_NORMAL; |
3550 | if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next)) | ||
3551 | next = rq->idle; | ||
3552 | switch_tasks: | 3399 | switch_tasks: |
3553 | if (next == rq->idle) | 3400 | if (next == rq->idle) |
3554 | schedstat_inc(rq, sched_goidle); | 3401 | schedstat_inc(rq, sched_goidle); |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 193a0793af95..5b0e46b56fd0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -55,16 +55,18 @@ static DEFINE_SPINLOCK(clocksource_lock); | |||
55 | static char override_name[32]; | 55 | static char override_name[32]; |
56 | static int finished_booting; | 56 | static int finished_booting; |
57 | 57 | ||
58 | /* clocksource_done_booting - Called near the end of bootup | 58 | /* clocksource_done_booting - Called near the end of core bootup |
59 | * | 59 | * |
60 | * Hack to avoid lots of clocksource churn at boot time | 60 | * Hack to avoid lots of clocksource churn at boot time. |
61 | * We use fs_initcall because we want this to start before | ||
62 | * device_initcall but after subsys_initcall. | ||
61 | */ | 63 | */ |
62 | static int __init clocksource_done_booting(void) | 64 | static int __init clocksource_done_booting(void) |
63 | { | 65 | { |
64 | finished_booting = 1; | 66 | finished_booting = 1; |
65 | return 0; | 67 | return 0; |
66 | } | 68 | } |
67 | late_initcall(clocksource_done_booting); | 69 | fs_initcall(clocksource_done_booting); |
68 | 70 | ||
69 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 71 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
70 | static LIST_HEAD(watchdog_list); | 72 | static LIST_HEAD(watchdog_list); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 12b3efeb9f6f..5567745470f7 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -284,6 +284,42 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
284 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 284 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
285 | } | 285 | } |
286 | 286 | ||
287 | void tick_suspend_broadcast(void) | ||
288 | { | ||
289 | struct clock_event_device *bc; | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
293 | |||
294 | bc = tick_broadcast_device.evtdev; | ||
295 | if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | ||
296 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | ||
297 | |||
298 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
299 | } | ||
300 | |||
301 | int tick_resume_broadcast(void) | ||
302 | { | ||
303 | struct clock_event_device *bc; | ||
304 | unsigned long flags; | ||
305 | int broadcast = 0; | ||
306 | |||
307 | spin_lock_irqsave(&tick_broadcast_lock, flags); | ||
308 | |||
309 | bc = tick_broadcast_device.evtdev; | ||
310 | if (bc) { | ||
311 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC && | ||
312 | !cpus_empty(tick_broadcast_mask)) | ||
313 | tick_broadcast_start_periodic(bc); | ||
314 | |||
315 | broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask); | ||
316 | } | ||
317 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | ||
318 | |||
319 | return broadcast; | ||
320 | } | ||
321 | |||
322 | |||
287 | #ifdef CONFIG_TICK_ONESHOT | 323 | #ifdef CONFIG_TICK_ONESHOT |
288 | 324 | ||
289 | static cpumask_t tick_broadcast_oneshot_mask; | 325 | static cpumask_t tick_broadcast_oneshot_mask; |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 0986a2bfab49..43ba1bdec14c 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -298,6 +298,28 @@ static void tick_shutdown(unsigned int *cpup) | |||
298 | spin_unlock_irqrestore(&tick_device_lock, flags); | 298 | spin_unlock_irqrestore(&tick_device_lock, flags); |
299 | } | 299 | } |
300 | 300 | ||
301 | static void tick_suspend_periodic(void) | ||
302 | { | ||
303 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
304 | unsigned long flags; | ||
305 | |||
306 | spin_lock_irqsave(&tick_device_lock, flags); | ||
307 | if (td->mode == TICKDEV_MODE_PERIODIC) | ||
308 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | ||
309 | spin_unlock_irqrestore(&tick_device_lock, flags); | ||
310 | } | ||
311 | |||
312 | static void tick_resume_periodic(void) | ||
313 | { | ||
314 | struct tick_device *td = &__get_cpu_var(tick_cpu_device); | ||
315 | unsigned long flags; | ||
316 | |||
317 | spin_lock_irqsave(&tick_device_lock, flags); | ||
318 | if (td->mode == TICKDEV_MODE_PERIODIC) | ||
319 | tick_setup_periodic(td->evtdev, 0); | ||
320 | spin_unlock_irqrestore(&tick_device_lock, flags); | ||
321 | } | ||
322 | |||
301 | /* | 323 | /* |
302 | * Notification about clock event devices | 324 | * Notification about clock event devices |
303 | */ | 325 | */ |
@@ -325,6 +347,16 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
325 | tick_shutdown(dev); | 347 | tick_shutdown(dev); |
326 | break; | 348 | break; |
327 | 349 | ||
350 | case CLOCK_EVT_NOTIFY_SUSPEND: | ||
351 | tick_suspend_periodic(); | ||
352 | tick_suspend_broadcast(); | ||
353 | break; | ||
354 | |||
355 | case CLOCK_EVT_NOTIFY_RESUME: | ||
356 | if (!tick_resume_broadcast()) | ||
357 | tick_resume_periodic(); | ||
358 | break; | ||
359 | |||
328 | default: | 360 | default: |
329 | break; | 361 | break; |
330 | } | 362 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 54861a0f29ff..75890efd24ff 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -67,6 +67,8 @@ extern int tick_check_broadcast_device(struct clock_event_device *dev); | |||
67 | extern int tick_is_broadcast_device(struct clock_event_device *dev); | 67 | extern int tick_is_broadcast_device(struct clock_event_device *dev); |
68 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); | 68 | extern void tick_broadcast_on_off(unsigned long reason, int *oncpu); |
69 | extern void tick_shutdown_broadcast(unsigned int *cpup); | 69 | extern void tick_shutdown_broadcast(unsigned int *cpup); |
70 | extern void tick_suspend_broadcast(void); | ||
71 | extern int tick_resume_broadcast(void); | ||
70 | 72 | ||
71 | extern void | 73 | extern void |
72 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); | 74 | tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
@@ -90,6 +92,8 @@ static inline int tick_device_uses_broadcast(struct clock_event_device *dev, | |||
90 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } | 92 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } |
91 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } | 93 | static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { } |
92 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } | 94 | static inline void tick_shutdown_broadcast(unsigned int *cpup) { } |
95 | static inline void tick_suspend_broadcast(void) { } | ||
96 | static inline int tick_resume_broadcast(void) { return 0; } | ||
93 | 97 | ||
94 | /* | 98 | /* |
95 | * Set the periodic handler in non broadcast mode | 99 | * Set the periodic handler in non broadcast mode |
diff --git a/kernel/timer.c b/kernel/timer.c index 6663a87f7304..797cccb86431 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -862,6 +862,8 @@ int do_settimeofday(struct timespec *tv) | |||
862 | clock->error = 0; | 862 | clock->error = 0; |
863 | ntp_clear(); | 863 | ntp_clear(); |
864 | 864 | ||
865 | update_vsyscall(&xtime, clock); | ||
866 | |||
865 | write_sequnlock_irqrestore(&xtime_lock, flags); | 867 | write_sequnlock_irqrestore(&xtime_lock, flags); |
866 | 868 | ||
867 | /* signal hrtimers about time change */ | 869 | /* signal hrtimers about time change */ |
@@ -997,6 +999,9 @@ static int timekeeping_resume(struct sys_device *dev) | |||
997 | write_sequnlock_irqrestore(&xtime_lock, flags); | 999 | write_sequnlock_irqrestore(&xtime_lock, flags); |
998 | 1000 | ||
999 | touch_softlockup_watchdog(); | 1001 | touch_softlockup_watchdog(); |
1002 | |||
1003 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | ||
1004 | |||
1000 | /* Resume hrtimers */ | 1005 | /* Resume hrtimers */ |
1001 | clock_was_set(); | 1006 | clock_was_set(); |
1002 | 1007 | ||
@@ -1011,6 +1016,9 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |||
1011 | timekeeping_suspended = 1; | 1016 | timekeeping_suspended = 1; |
1012 | timekeeping_suspend_time = read_persistent_clock(); | 1017 | timekeeping_suspend_time = read_persistent_clock(); |
1013 | write_sequnlock_irqrestore(&xtime_lock, flags); | 1018 | write_sequnlock_irqrestore(&xtime_lock, flags); |
1019 | |||
1020 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | ||
1021 | |||
1014 | return 0; | 1022 | return 0; |
1015 | } | 1023 | } |
1016 | 1024 | ||
@@ -1651,8 +1659,8 @@ static void __devinit migrate_timers(int cpu) | |||
1651 | new_base = get_cpu_var(tvec_bases); | 1659 | new_base = get_cpu_var(tvec_bases); |
1652 | 1660 | ||
1653 | local_irq_disable(); | 1661 | local_irq_disable(); |
1654 | spin_lock(&new_base->lock); | 1662 | double_spin_lock(&new_base->lock, &old_base->lock, |
1655 | spin_lock(&old_base->lock); | 1663 | smp_processor_id() < cpu); |
1656 | 1664 | ||
1657 | BUG_ON(old_base->running_timer); | 1665 | BUG_ON(old_base->running_timer); |
1658 | 1666 | ||
@@ -1665,8 +1673,8 @@ static void __devinit migrate_timers(int cpu) | |||
1665 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 1673 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
1666 | } | 1674 | } |
1667 | 1675 | ||
1668 | spin_unlock(&old_base->lock); | 1676 | double_spin_unlock(&new_base->lock, &old_base->lock, |
1669 | spin_unlock(&new_base->lock); | 1677 | smp_processor_id() < cpu); |
1670 | local_irq_enable(); | 1678 | local_irq_enable(); |
1671 | put_cpu_var(tvec_bases); | 1679 | put_cpu_var(tvec_bases); |
1672 | } | 1680 | } |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 623a68af8b18..9970e55c90bd 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <asm/io.h> | 28 | #include <asm/io.h> |
29 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
30 | #include <asm/scatterlist.h> | 30 | #include <asm/scatterlist.h> |
31 | #include <asm/swiotlb.h> | ||
32 | 31 | ||
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
34 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
@@ -36,10 +35,8 @@ | |||
36 | #define OFFSET(val,align) ((unsigned long) \ | 35 | #define OFFSET(val,align) ((unsigned long) \ |
37 | ( (val) & ( (align) - 1))) | 36 | ( (val) & ( (align) - 1))) |
38 | 37 | ||
39 | #ifndef SG_ENT_VIRT_ADDRESS | ||
40 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | 38 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) |
41 | #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) | 39 | #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) |
42 | #endif | ||
43 | 40 | ||
44 | /* | 41 | /* |
45 | * Maximum allowable number of contiguous slabs to map, | 42 | * Maximum allowable number of contiguous slabs to map, |
@@ -104,25 +101,13 @@ static unsigned int io_tlb_index; | |||
104 | * We need to save away the original address corresponding to a mapped entry | 101 | * We need to save away the original address corresponding to a mapped entry |
105 | * for the sync operations. | 102 | * for the sync operations. |
106 | */ | 103 | */ |
107 | #ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T | 104 | static unsigned char **io_tlb_orig_addr; |
108 | typedef char *io_tlb_addr_t; | ||
109 | #define swiotlb_orig_addr_null(buffer) (!(buffer)) | ||
110 | #define ptr_to_io_tlb_addr(ptr) (ptr) | ||
111 | #define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off)) | ||
112 | #define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg) | ||
113 | #endif | ||
114 | static io_tlb_addr_t *io_tlb_orig_addr; | ||
115 | 105 | ||
116 | /* | 106 | /* |
117 | * Protect the above data structures in the map and unmap calls | 107 | * Protect the above data structures in the map and unmap calls |
118 | */ | 108 | */ |
119 | static DEFINE_SPINLOCK(io_tlb_lock); | 109 | static DEFINE_SPINLOCK(io_tlb_lock); |
120 | 110 | ||
121 | #ifdef SWIOTLB_EXTRA_VARIABLES | ||
122 | SWIOTLB_EXTRA_VARIABLES; | ||
123 | #endif | ||
124 | |||
125 | #ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES | ||
126 | static int __init | 111 | static int __init |
127 | setup_io_tlb_npages(char *str) | 112 | setup_io_tlb_npages(char *str) |
128 | { | 113 | { |
@@ -137,25 +122,9 @@ setup_io_tlb_npages(char *str) | |||
137 | swiotlb_force = 1; | 122 | swiotlb_force = 1; |
138 | return 1; | 123 | return 1; |
139 | } | 124 | } |
140 | #endif | ||
141 | __setup("swiotlb=", setup_io_tlb_npages); | 125 | __setup("swiotlb=", setup_io_tlb_npages); |
142 | /* make io_tlb_overflow tunable too? */ | 126 | /* make io_tlb_overflow tunable too? */ |
143 | 127 | ||
144 | #ifndef swiotlb_adjust_size | ||
145 | #define swiotlb_adjust_size(size) ((void)0) | ||
146 | #endif | ||
147 | |||
148 | #ifndef swiotlb_adjust_seg | ||
149 | #define swiotlb_adjust_seg(start, size) ((void)0) | ||
150 | #endif | ||
151 | |||
152 | #ifndef swiotlb_print_info | ||
153 | #define swiotlb_print_info(bytes) \ | ||
154 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \ | ||
155 | "0x%lx\n", bytes >> 20, \ | ||
156 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)) | ||
157 | #endif | ||
158 | |||
159 | /* | 128 | /* |
160 | * Statically reserve bounce buffer space and initialize bounce buffer data | 129 | * Statically reserve bounce buffer space and initialize bounce buffer data |
161 | * structures for the software IO TLB used to implement the DMA API. | 130 | * structures for the software IO TLB used to implement the DMA API. |
@@ -169,8 +138,6 @@ swiotlb_init_with_default_size(size_t default_size) | |||
169 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 138 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); |
170 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 139 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); |
171 | } | 140 | } |
172 | swiotlb_adjust_size(io_tlb_nslabs); | ||
173 | swiotlb_adjust_size(io_tlb_overflow); | ||
174 | 141 | ||
175 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 142 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
176 | 143 | ||
@@ -188,14 +155,10 @@ swiotlb_init_with_default_size(size_t default_size) | |||
188 | * between io_tlb_start and io_tlb_end. | 155 | * between io_tlb_start and io_tlb_end. |
189 | */ | 156 | */ |
190 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 157 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); |
191 | for (i = 0; i < io_tlb_nslabs; i++) { | 158 | for (i = 0; i < io_tlb_nslabs; i++) |
192 | if ( !(i % IO_TLB_SEGSIZE) ) | ||
193 | swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT), | ||
194 | IO_TLB_SEGSIZE << IO_TLB_SHIFT); | ||
195 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 159 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
196 | } | ||
197 | io_tlb_index = 0; | 160 | io_tlb_index = 0; |
198 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t)); | 161 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); |
199 | 162 | ||
200 | /* | 163 | /* |
201 | * Get the overflow emergency buffer | 164 | * Get the overflow emergency buffer |
@@ -203,21 +166,17 @@ swiotlb_init_with_default_size(size_t default_size) | |||
203 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 166 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
204 | if (!io_tlb_overflow_buffer) | 167 | if (!io_tlb_overflow_buffer) |
205 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 168 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
206 | swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow); | ||
207 | 169 | ||
208 | swiotlb_print_info(bytes); | 170 | printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", |
171 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | ||
209 | } | 172 | } |
210 | #ifndef __swiotlb_init_with_default_size | ||
211 | #define __swiotlb_init_with_default_size swiotlb_init_with_default_size | ||
212 | #endif | ||
213 | 173 | ||
214 | void __init | 174 | void __init |
215 | swiotlb_init(void) | 175 | swiotlb_init(void) |
216 | { | 176 | { |
217 | __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 177 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ |
218 | } | 178 | } |
219 | 179 | ||
220 | #ifdef SWIOTLB_ARCH_NEED_LATE_INIT | ||
221 | /* | 180 | /* |
222 | * Systems with larger DMA zones (those that don't support ISA) can | 181 | * Systems with larger DMA zones (those that don't support ISA) can |
223 | * initialize the swiotlb later using the slab allocator if needed. | 182 | * initialize the swiotlb later using the slab allocator if needed. |
@@ -275,12 +234,12 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
275 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 234 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
276 | io_tlb_index = 0; | 235 | io_tlb_index = 0; |
277 | 236 | ||
278 | io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL, | 237 | io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, |
279 | get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); | 238 | get_order(io_tlb_nslabs * sizeof(char *))); |
280 | if (!io_tlb_orig_addr) | 239 | if (!io_tlb_orig_addr) |
281 | goto cleanup3; | 240 | goto cleanup3; |
282 | 241 | ||
283 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t)); | 242 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); |
284 | 243 | ||
285 | /* | 244 | /* |
286 | * Get the overflow emergency buffer | 245 | * Get the overflow emergency buffer |
@@ -290,17 +249,19 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
290 | if (!io_tlb_overflow_buffer) | 249 | if (!io_tlb_overflow_buffer) |
291 | goto cleanup4; | 250 | goto cleanup4; |
292 | 251 | ||
293 | swiotlb_print_info(bytes); | 252 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " |
253 | "0x%lx\n", bytes >> 20, | ||
254 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | ||
294 | 255 | ||
295 | return 0; | 256 | return 0; |
296 | 257 | ||
297 | cleanup4: | 258 | cleanup4: |
298 | free_pages((unsigned long)io_tlb_orig_addr, | 259 | free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * |
299 | get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); | 260 | sizeof(char *))); |
300 | io_tlb_orig_addr = NULL; | 261 | io_tlb_orig_addr = NULL; |
301 | cleanup3: | 262 | cleanup3: |
302 | free_pages((unsigned long)io_tlb_list, | 263 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * |
303 | get_order(io_tlb_nslabs * sizeof(int))); | 264 | sizeof(int))); |
304 | io_tlb_list = NULL; | 265 | io_tlb_list = NULL; |
305 | cleanup2: | 266 | cleanup2: |
306 | io_tlb_end = NULL; | 267 | io_tlb_end = NULL; |
@@ -310,9 +271,7 @@ cleanup1: | |||
310 | io_tlb_nslabs = req_nslabs; | 271 | io_tlb_nslabs = req_nslabs; |
311 | return -ENOMEM; | 272 | return -ENOMEM; |
312 | } | 273 | } |
313 | #endif | ||
314 | 274 | ||
315 | #ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING | ||
316 | static int | 275 | static int |
317 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 276 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) |
318 | { | 277 | { |
@@ -323,35 +282,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) | |||
323 | return (addr & ~mask) != 0; | 282 | return (addr & ~mask) != 0; |
324 | } | 283 | } |
325 | 284 | ||
326 | static inline int range_needs_mapping(const void *ptr, size_t size) | ||
327 | { | ||
328 | return swiotlb_force; | ||
329 | } | ||
330 | |||
331 | static inline int order_needs_mapping(unsigned int order) | ||
332 | { | ||
333 | return 0; | ||
334 | } | ||
335 | #endif | ||
336 | |||
337 | static void | ||
338 | __sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir) | ||
339 | { | ||
340 | #ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE | ||
341 | if (dir == DMA_TO_DEVICE) | ||
342 | memcpy(dma_addr, buffer, size); | ||
343 | else | ||
344 | memcpy(buffer, dma_addr, size); | ||
345 | #else | ||
346 | __swiotlb_arch_sync_single(buffer, dma_addr, size, dir); | ||
347 | #endif | ||
348 | } | ||
349 | |||
350 | /* | 285 | /* |
351 | * Allocates bounce buffer and returns its kernel virtual address. | 286 | * Allocates bounce buffer and returns its kernel virtual address. |
352 | */ | 287 | */ |
353 | static void * | 288 | static void * |
354 | map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) | 289 | map_single(struct device *hwdev, char *buffer, size_t size, int dir) |
355 | { | 290 | { |
356 | unsigned long flags; | 291 | unsigned long flags; |
357 | char *dma_addr; | 292 | char *dma_addr; |
@@ -424,7 +359,7 @@ map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) | |||
424 | */ | 359 | */ |
425 | io_tlb_orig_addr[index] = buffer; | 360 | io_tlb_orig_addr[index] = buffer; |
426 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 361 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
427 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 362 | memcpy(dma_addr, buffer, size); |
428 | 363 | ||
429 | return dma_addr; | 364 | return dma_addr; |
430 | } | 365 | } |
@@ -438,18 +373,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
438 | unsigned long flags; | 373 | unsigned long flags; |
439 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 374 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
440 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 375 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
441 | io_tlb_addr_t buffer = io_tlb_orig_addr[index]; | 376 | char *buffer = io_tlb_orig_addr[index]; |
442 | 377 | ||
443 | /* | 378 | /* |
444 | * First, sync the memory before unmapping the entry | 379 | * First, sync the memory before unmapping the entry |
445 | */ | 380 | */ |
446 | if (!swiotlb_orig_addr_null(buffer) | 381 | if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) |
447 | && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | ||
448 | /* | 382 | /* |
449 | * bounce... copy the data back into the original buffer * and | 383 | * bounce... copy the data back into the original buffer * and |
450 | * delete the bounce buffer. | 384 | * delete the bounce buffer. |
451 | */ | 385 | */ |
452 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 386 | memcpy(buffer, dma_addr, size); |
453 | 387 | ||
454 | /* | 388 | /* |
455 | * Return the buffer to the free list by setting the corresponding | 389 | * Return the buffer to the free list by setting the corresponding |
@@ -482,18 +416,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
482 | int dir, int target) | 416 | int dir, int target) |
483 | { | 417 | { |
484 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 418 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
485 | io_tlb_addr_t buffer = io_tlb_orig_addr[index]; | 419 | char *buffer = io_tlb_orig_addr[index]; |
486 | 420 | ||
487 | switch (target) { | 421 | switch (target) { |
488 | case SYNC_FOR_CPU: | 422 | case SYNC_FOR_CPU: |
489 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | 423 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
490 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 424 | memcpy(buffer, dma_addr, size); |
491 | else | 425 | else |
492 | BUG_ON(dir != DMA_TO_DEVICE); | 426 | BUG_ON(dir != DMA_TO_DEVICE); |
493 | break; | 427 | break; |
494 | case SYNC_FOR_DEVICE: | 428 | case SYNC_FOR_DEVICE: |
495 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | 429 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) |
496 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 430 | memcpy(dma_addr, buffer, size); |
497 | else | 431 | else |
498 | BUG_ON(dir != DMA_FROM_DEVICE); | 432 | BUG_ON(dir != DMA_FROM_DEVICE); |
499 | break; | 433 | break; |
@@ -502,8 +436,6 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
502 | } | 436 | } |
503 | } | 437 | } |
504 | 438 | ||
505 | #ifdef SWIOTLB_ARCH_NEED_ALLOC | ||
506 | |||
507 | void * | 439 | void * |
508 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 440 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
509 | dma_addr_t *dma_handle, gfp_t flags) | 441 | dma_addr_t *dma_handle, gfp_t flags) |
@@ -519,10 +451,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
519 | */ | 451 | */ |
520 | flags |= GFP_DMA; | 452 | flags |= GFP_DMA; |
521 | 453 | ||
522 | if (!order_needs_mapping(order)) | 454 | ret = (void *)__get_free_pages(flags, order); |
523 | ret = (void *)__get_free_pages(flags, order); | ||
524 | else | ||
525 | ret = NULL; | ||
526 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 455 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { |
527 | /* | 456 | /* |
528 | * The allocated memory isn't reachable by the device. | 457 | * The allocated memory isn't reachable by the device. |
@@ -560,7 +489,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
560 | *dma_handle = dev_addr; | 489 | *dma_handle = dev_addr; |
561 | return ret; | 490 | return ret; |
562 | } | 491 | } |
563 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
564 | 492 | ||
565 | void | 493 | void |
566 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 494 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
@@ -573,9 +501,6 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
573 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 501 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
574 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 502 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); |
575 | } | 503 | } |
576 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
577 | |||
578 | #endif | ||
579 | 504 | ||
580 | static void | 505 | static void |
581 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 506 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) |
@@ -617,14 +542,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
617 | * we can safely return the device addr and not worry about bounce | 542 | * we can safely return the device addr and not worry about bounce |
618 | * buffering it. | 543 | * buffering it. |
619 | */ | 544 | */ |
620 | if (!range_needs_mapping(ptr, size) | 545 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) |
621 | && !address_needs_mapping(hwdev, dev_addr)) | ||
622 | return dev_addr; | 546 | return dev_addr; |
623 | 547 | ||
624 | /* | 548 | /* |
625 | * Oh well, have to allocate and map a bounce buffer. | 549 | * Oh well, have to allocate and map a bounce buffer. |
626 | */ | 550 | */ |
627 | map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir); | 551 | map = map_single(hwdev, ptr, size, dir); |
628 | if (!map) { | 552 | if (!map) { |
629 | swiotlb_full(hwdev, size, dir, 1); | 553 | swiotlb_full(hwdev, size, dir, 1); |
630 | map = io_tlb_overflow_buffer; | 554 | map = io_tlb_overflow_buffer; |
@@ -752,16 +676,17 @@ int | |||
752 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | 676 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, |
753 | int dir) | 677 | int dir) |
754 | { | 678 | { |
679 | void *addr; | ||
755 | dma_addr_t dev_addr; | 680 | dma_addr_t dev_addr; |
756 | int i; | 681 | int i; |
757 | 682 | ||
758 | BUG_ON(dir == DMA_NONE); | 683 | BUG_ON(dir == DMA_NONE); |
759 | 684 | ||
760 | for (i = 0; i < nelems; i++, sg++) { | 685 | for (i = 0; i < nelems; i++, sg++) { |
761 | dev_addr = SG_ENT_PHYS_ADDRESS(sg); | 686 | addr = SG_ENT_VIRT_ADDRESS(sg); |
762 | if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length) | 687 | dev_addr = virt_to_bus(addr); |
763 | || address_needs_mapping(hwdev, dev_addr)) { | 688 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { |
764 | void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir); | 689 | void *map = map_single(hwdev, addr, sg->length, dir); |
765 | if (!map) { | 690 | if (!map) { |
766 | /* Don't panic here, we expect map_sg users | 691 | /* Don't panic here, we expect map_sg users |
767 | to do proper error handling. */ | 692 | to do proper error handling. */ |
@@ -835,44 +760,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
835 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 760 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
836 | } | 761 | } |
837 | 762 | ||
838 | #ifdef SWIOTLB_ARCH_NEED_MAP_PAGE | ||
839 | |||
840 | dma_addr_t | ||
841 | swiotlb_map_page(struct device *hwdev, struct page *page, | ||
842 | unsigned long offset, size_t size, | ||
843 | enum dma_data_direction direction) | ||
844 | { | ||
845 | dma_addr_t dev_addr; | ||
846 | char *map; | ||
847 | |||
848 | dev_addr = page_to_bus(page) + offset; | ||
849 | if (address_needs_mapping(hwdev, dev_addr)) { | ||
850 | map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction); | ||
851 | if (!map) { | ||
852 | swiotlb_full(hwdev, size, direction, 1); | ||
853 | map = io_tlb_overflow_buffer; | ||
854 | } | ||
855 | dev_addr = virt_to_bus(map); | ||
856 | } | ||
857 | |||
858 | return dev_addr; | ||
859 | } | ||
860 | |||
861 | void | ||
862 | swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | ||
863 | size_t size, enum dma_data_direction direction) | ||
864 | { | ||
865 | char *dma_addr = bus_to_virt(dev_addr); | ||
866 | |||
867 | BUG_ON(direction == DMA_NONE); | ||
868 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | ||
869 | unmap_single(hwdev, dma_addr, size, direction); | ||
870 | else if (direction == DMA_FROM_DEVICE) | ||
871 | dma_mark_clean(dma_addr, size); | ||
872 | } | ||
873 | |||
874 | #endif | ||
875 | |||
876 | int | 763 | int |
877 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 764 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) |
878 | { | 765 | { |
@@ -885,13 +772,10 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) | |||
885 | * during bus mastering, then you would pass 0x00ffffff as the mask to | 772 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
886 | * this function. | 773 | * this function. |
887 | */ | 774 | */ |
888 | #ifndef __swiotlb_dma_supported | ||
889 | #define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask)) | ||
890 | #endif | ||
891 | int | 775 | int |
892 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 776 | swiotlb_dma_supported(struct device *hwdev, u64 mask) |
893 | { | 777 | { |
894 | return __swiotlb_dma_supported(hwdev, mask); | 778 | return virt_to_bus(io_tlb_end - 1) <= mask; |
895 | } | 779 | } |
896 | 780 | ||
897 | EXPORT_SYMBOL(swiotlb_init); | 781 | EXPORT_SYMBOL(swiotlb_init); |
@@ -906,4 +790,6 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
906 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | 790 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); |
907 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | 791 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); |
908 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | 792 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); |
793 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
794 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
909 | EXPORT_SYMBOL(swiotlb_dma_supported); | 795 | EXPORT_SYMBOL(swiotlb_dma_supported); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cf2a5381030a..d76e8eb342d0 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -321,15 +321,6 @@ static inline int check_pgd_range(struct vm_area_struct *vma, | |||
321 | return 0; | 321 | return 0; |
322 | } | 322 | } |
323 | 323 | ||
324 | /* Check if a vma is migratable */ | ||
325 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
326 | { | ||
327 | if (vma->vm_flags & ( | ||
328 | VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | ||
329 | return 0; | ||
330 | return 1; | ||
331 | } | ||
332 | |||
333 | /* | 324 | /* |
334 | * Check if all pages in a range are on a set of nodes. | 325 | * Check if all pages in a range are on a set of nodes. |
335 | * If pagelist != NULL then isolate pages from the LRU and | 326 | * If pagelist != NULL then isolate pages from the LRU and |
diff --git a/mm/migrate.c b/mm/migrate.c index e9b161bde95b..7a66ca25dc8a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -781,7 +781,7 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, | |||
781 | 781 | ||
782 | err = -EFAULT; | 782 | err = -EFAULT; |
783 | vma = find_vma(mm, pp->addr); | 783 | vma = find_vma(mm, pp->addr); |
784 | if (!vma) | 784 | if (!vma || !vma_migratable(vma)) |
785 | goto set_status; | 785 | goto set_status; |
786 | 786 | ||
787 | page = follow_page(vma, pp->addr, FOLL_GET); | 787 | page = follow_page(vma, pp->addr, FOLL_GET); |
diff --git a/mm/shmem.c b/mm/shmem.c index fcb07882c8e0..b8c429a2d271 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -175,7 +175,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages) | |||
175 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | 175 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); |
176 | } | 176 | } |
177 | 177 | ||
178 | static struct super_operations shmem_ops; | 178 | static const struct super_operations shmem_ops; |
179 | static const struct address_space_operations shmem_aops; | 179 | static const struct address_space_operations shmem_aops; |
180 | static const struct file_operations shmem_file_operations; | 180 | static const struct file_operations shmem_file_operations; |
181 | static const struct inode_operations shmem_inode_operations; | 181 | static const struct inode_operations shmem_inode_operations; |
@@ -2383,7 +2383,7 @@ static const struct inode_operations shmem_special_inode_operations = { | |||
2383 | #endif | 2383 | #endif |
2384 | }; | 2384 | }; |
2385 | 2385 | ||
2386 | static struct super_operations shmem_ops = { | 2386 | static const struct super_operations shmem_ops = { |
2387 | .alloc_inode = shmem_alloc_inode, | 2387 | .alloc_inode = shmem_alloc_inode, |
2388 | .destroy_inode = shmem_destroy_inode, | 2388 | .destroy_inode = shmem_destroy_inode, |
2389 | #ifdef CONFIG_TMPFS | 2389 | #ifdef CONFIG_TMPFS |
diff --git a/net/core/sock.c b/net/core/sock.c index e9986acdd0ab..8d65d6478dcd 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1597,7 +1597,7 @@ int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, | |||
1597 | { | 1597 | { |
1598 | struct sock *sk = sock->sk; | 1598 | struct sock *sk = sock->sk; |
1599 | 1599 | ||
1600 | if (sk->sk_prot->compat_setsockopt != NULL) | 1600 | if (sk->sk_prot->compat_getsockopt != NULL) |
1601 | return sk->sk_prot->compat_getsockopt(sk, level, optname, | 1601 | return sk->sk_prot->compat_getsockopt(sk, level, optname, |
1602 | optval, optlen); | 1602 | optval, optlen); |
1603 | return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); | 1603 | return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 4dee462f00db..287099f7f042 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -256,10 +256,10 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
256 | * (only one is active at a time); when moving to bidirectional | 256 | * (only one is active at a time); when moving to bidirectional |
257 | * service, this needs to be revised. | 257 | * service, this needs to be revised. |
258 | */ | 258 | */ |
259 | if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER) | 259 | if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT) |
260 | ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); | ||
261 | else | ||
262 | ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); | 260 | ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); |
261 | else /* listening or connected server */ | ||
262 | ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); | ||
263 | 263 | ||
264 | return __dccp_rcv_established(sk, skb, dh, len); | 264 | return __dccp_rcv_established(sk, skb, dh, len); |
265 | discard: | 265 | discard: |
@@ -495,10 +495,10 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
495 | goto discard; | 495 | goto discard; |
496 | 496 | ||
497 | /* XXX see the comments in dccp_rcv_established about this */ | 497 | /* XXX see the comments in dccp_rcv_established about this */ |
498 | if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER) | 498 | if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT) |
499 | ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); | ||
500 | else | ||
501 | ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); | 499 | ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); |
500 | else | ||
501 | ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); | ||
502 | } | 502 | } |
503 | 503 | ||
504 | /* | 504 | /* |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 6656bb497c7b..6d235b3013dd 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -103,7 +103,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk, | |||
103 | 103 | ||
104 | if (newsk != NULL) { | 104 | if (newsk != NULL) { |
105 | const struct dccp_request_sock *dreq = dccp_rsk(req); | 105 | const struct dccp_request_sock *dreq = dccp_rsk(req); |
106 | struct inet_connection_sock *newicsk = inet_csk(sk); | 106 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
107 | struct dccp_sock *newdp = dccp_sk(newsk); | 107 | struct dccp_sock *newdp = dccp_sk(newsk); |
108 | struct dccp_minisock *newdmsk = dccp_msk(newsk); | 108 | struct dccp_minisock *newdmsk = dccp_msk(newsk); |
109 | 109 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 063721302ebf..1c6a084b5fb7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1251,6 +1251,28 @@ out: | |||
1251 | } | 1251 | } |
1252 | 1252 | ||
1253 | /* | 1253 | /* |
1254 | * Resend IGMP JOIN report; used for bonding. | ||
1255 | */ | ||
1256 | void ip_mc_rejoin_group(struct ip_mc_list *im) | ||
1257 | { | ||
1258 | struct in_device *in_dev = im->interface; | ||
1259 | |||
1260 | #ifdef CONFIG_IP_MULTICAST | ||
1261 | if (im->multiaddr == IGMP_ALL_HOSTS) | ||
1262 | return; | ||
1263 | |||
1264 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { | ||
1265 | igmp_mod_timer(im, IGMP_Initial_Report_Delay); | ||
1266 | return; | ||
1267 | } | ||
1268 | /* else, v3 */ | ||
1269 | im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : | ||
1270 | IGMP_Unsolicited_Report_Count; | ||
1271 | igmp_ifc_event(in_dev); | ||
1272 | #endif | ||
1273 | } | ||
1274 | |||
1275 | /* | ||
1254 | * A socket has left a multicast group on device dev | 1276 | * A socket has left a multicast group on device dev |
1255 | */ | 1277 | */ |
1256 | 1278 | ||
@@ -2596,3 +2618,4 @@ int __init igmp_mc_proc_init(void) | |||
2596 | EXPORT_SYMBOL(ip_mc_dec_group); | 2618 | EXPORT_SYMBOL(ip_mc_dec_group); |
2597 | EXPORT_SYMBOL(ip_mc_inc_group); | 2619 | EXPORT_SYMBOL(ip_mc_inc_group); |
2598 | EXPORT_SYMBOL(ip_mc_join_group); | 2620 | EXPORT_SYMBOL(ip_mc_join_group); |
2621 | EXPORT_SYMBOL(ip_mc_rejoin_group); | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 07ba1dd136b5..23b99ae2cc37 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -1254,7 +1254,7 @@ get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data), | |||
1254 | list_for_each_entry(h, &unconfirmed, list) { | 1254 | list_for_each_entry(h, &unconfirmed, list) { |
1255 | ct = tuplehash_to_ctrack(h); | 1255 | ct = tuplehash_to_ctrack(h); |
1256 | if (iter(ct, data)) | 1256 | if (iter(ct, data)) |
1257 | goto found; | 1257 | set_bit(IPS_DYING_BIT, &ct->status); |
1258 | } | 1258 | } |
1259 | write_unlock_bh(&ip_conntrack_lock); | 1259 | write_unlock_bh(&ip_conntrack_lock); |
1260 | return NULL; | 1260 | return NULL; |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 170d625fad67..0a72eab14620 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c | |||
@@ -812,8 +812,10 @@ void ip_conntrack_tcp_update(struct sk_buff *skb, | |||
812 | static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] = | 812 | static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] = |
813 | { | 813 | { |
814 | [TH_SYN] = 1, | 814 | [TH_SYN] = 1, |
815 | [TH_SYN|TH_ACK] = 1, | ||
816 | [TH_SYN|TH_PUSH] = 1, | 815 | [TH_SYN|TH_PUSH] = 1, |
816 | [TH_SYN|TH_URG] = 1, | ||
817 | [TH_SYN|TH_PUSH|TH_URG] = 1, | ||
818 | [TH_SYN|TH_ACK] = 1, | ||
817 | [TH_SYN|TH_ACK|TH_PUSH] = 1, | 819 | [TH_SYN|TH_ACK|TH_PUSH] = 1, |
818 | [TH_RST] = 1, | 820 | [TH_RST] = 1, |
819 | [TH_RST|TH_ACK] = 1, | 821 | [TH_RST|TH_ACK] = 1, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index b984db771258..8f3e92d20df8 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -379,8 +379,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
379 | return -ENOENT; | 379 | return -ENOENT; |
380 | } | 380 | } |
381 | 381 | ||
382 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 382 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
383 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
384 | 383 | ||
385 | #include <linux/netfilter/nfnetlink.h> | 384 | #include <linux/netfilter/nfnetlink.h> |
386 | #include <linux/netfilter/nfnetlink_conntrack.h> | 385 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -435,8 +434,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { | |||
435 | .print_conntrack = ipv4_print_conntrack, | 434 | .print_conntrack = ipv4_print_conntrack, |
436 | .prepare = ipv4_prepare, | 435 | .prepare = ipv4_prepare, |
437 | .get_features = ipv4_get_features, | 436 | .get_features = ipv4_get_features, |
438 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 437 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
439 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
440 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, | 438 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, |
441 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, | 439 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, |
442 | #endif | 440 | #endif |
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 88cfa6aacfc1..5fd1e5363c1a 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -268,8 +268,7 @@ icmp_error(struct sk_buff *skb, unsigned int dataoff, | |||
268 | return icmp_error_message(skb, ctinfo, hooknum); | 268 | return icmp_error_message(skb, ctinfo, hooknum); |
269 | } | 269 | } |
270 | 270 | ||
271 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 271 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
272 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
273 | 272 | ||
274 | #include <linux/netfilter/nfnetlink.h> | 273 | #include <linux/netfilter/nfnetlink.h> |
275 | #include <linux/netfilter/nfnetlink_conntrack.h> | 274 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -368,8 +367,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = | |||
368 | .error = icmp_error, | 367 | .error = icmp_error, |
369 | .destroy = NULL, | 368 | .destroy = NULL, |
370 | .me = NULL, | 369 | .me = NULL, |
371 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 370 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
372 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
373 | .tuple_to_nfattr = icmp_tuple_to_nfattr, | 371 | .tuple_to_nfattr = icmp_tuple_to_nfattr, |
374 | .nfattr_to_tuple = icmp_nfattr_to_tuple, | 372 | .nfattr_to_tuple = icmp_nfattr_to_tuple, |
375 | #endif | 373 | #endif |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 2c01378d3592..452e9d326684 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -546,8 +546,7 @@ void nf_nat_protocol_unregister(struct nf_nat_protocol *proto) | |||
546 | } | 546 | } |
547 | EXPORT_SYMBOL(nf_nat_protocol_unregister); | 547 | EXPORT_SYMBOL(nf_nat_protocol_unregister); |
548 | 548 | ||
549 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 549 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
550 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
551 | int | 550 | int |
552 | nf_nat_port_range_to_nfattr(struct sk_buff *skb, | 551 | nf_nat_port_range_to_nfattr(struct sk_buff *skb, |
553 | const struct nf_nat_range *range) | 552 | const struct nf_nat_range *range) |
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c index d3de579e09d2..e5a34c17d927 100644 --- a/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c | |||
@@ -152,8 +152,7 @@ static struct nf_nat_protocol gre __read_mostly = { | |||
152 | .manip_pkt = gre_manip_pkt, | 152 | .manip_pkt = gre_manip_pkt, |
153 | .in_range = gre_in_range, | 153 | .in_range = gre_in_range, |
154 | .unique_tuple = gre_unique_tuple, | 154 | .unique_tuple = gre_unique_tuple, |
155 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 155 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
156 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
157 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | 156 | .range_to_nfattr = nf_nat_port_range_to_nfattr, |
158 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | 157 | .nfattr_to_range = nf_nat_port_nfattr_to_range, |
159 | #endif | 158 | #endif |
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c index 6bc2f06de055..f71ef9b5f428 100644 --- a/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c | |||
@@ -78,8 +78,7 @@ struct nf_nat_protocol nf_nat_protocol_icmp = { | |||
78 | .manip_pkt = icmp_manip_pkt, | 78 | .manip_pkt = icmp_manip_pkt, |
79 | .in_range = icmp_in_range, | 79 | .in_range = icmp_in_range, |
80 | .unique_tuple = icmp_unique_tuple, | 80 | .unique_tuple = icmp_unique_tuple, |
81 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 81 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
82 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
83 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | 82 | .range_to_nfattr = nf_nat_port_range_to_nfattr, |
84 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | 83 | .nfattr_to_range = nf_nat_port_nfattr_to_range, |
85 | #endif | 84 | #endif |
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c index 439164c7a626..123c95913f28 100644 --- a/net/ipv4/netfilter/nf_nat_proto_tcp.c +++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c | |||
@@ -144,8 +144,7 @@ struct nf_nat_protocol nf_nat_protocol_tcp = { | |||
144 | .manip_pkt = tcp_manip_pkt, | 144 | .manip_pkt = tcp_manip_pkt, |
145 | .in_range = tcp_in_range, | 145 | .in_range = tcp_in_range, |
146 | .unique_tuple = tcp_unique_tuple, | 146 | .unique_tuple = tcp_unique_tuple, |
147 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 147 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
148 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
149 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | 148 | .range_to_nfattr = nf_nat_port_range_to_nfattr, |
150 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | 149 | .nfattr_to_range = nf_nat_port_nfattr_to_range, |
151 | #endif | 150 | #endif |
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c index 8cae6e063bb6..1c4c70e25cd4 100644 --- a/net/ipv4/netfilter/nf_nat_proto_udp.c +++ b/net/ipv4/netfilter/nf_nat_proto_udp.c | |||
@@ -134,8 +134,7 @@ struct nf_nat_protocol nf_nat_protocol_udp = { | |||
134 | .manip_pkt = udp_manip_pkt, | 134 | .manip_pkt = udp_manip_pkt, |
135 | .in_range = udp_in_range, | 135 | .in_range = udp_in_range, |
136 | .unique_tuple = udp_unique_tuple, | 136 | .unique_tuple = udp_unique_tuple, |
137 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 137 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
138 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
139 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | 138 | .range_to_nfattr = nf_nat_port_range_to_nfattr, |
140 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | 139 | .nfattr_to_range = nf_nat_port_nfattr_to_range, |
141 | #endif | 140 | #endif |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 0b2d265e7da7..1c405dd30c67 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -15,6 +15,7 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
15 | struct dst_entry *dst; | 15 | struct dst_entry *dst; |
16 | struct flowi fl = { | 16 | struct flowi fl = { |
17 | .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, | 17 | .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, |
18 | .mark = skb->mark, | ||
18 | .nl_u = | 19 | .nl_u = |
19 | { .ip6_u = | 20 | { .ip6_u = |
20 | { .daddr = iph->daddr, | 21 | { .daddr = iph->daddr, |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 4b7be4bb4d03..6f19c4a49560 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -353,8 +353,7 @@ static ctl_table nf_ct_ipv6_sysctl_table[] = { | |||
353 | }; | 353 | }; |
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 356 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
357 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
358 | 357 | ||
359 | #include <linux/netfilter/nfnetlink.h> | 358 | #include <linux/netfilter/nfnetlink.h> |
360 | #include <linux/netfilter/nfnetlink_conntrack.h> | 359 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -403,8 +402,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { | |||
403 | .print_tuple = ipv6_print_tuple, | 402 | .print_tuple = ipv6_print_tuple, |
404 | .print_conntrack = ipv6_print_conntrack, | 403 | .print_conntrack = ipv6_print_conntrack, |
405 | .prepare = ipv6_prepare, | 404 | .prepare = ipv6_prepare, |
406 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 405 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
407 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
408 | .tuple_to_nfattr = ipv6_tuple_to_nfattr, | 406 | .tuple_to_nfattr = ipv6_tuple_to_nfattr, |
409 | .nfattr_to_tuple = ipv6_nfattr_to_tuple, | 407 | .nfattr_to_tuple = ipv6_nfattr_to_tuple, |
410 | #endif | 408 | #endif |
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 21f19cc719f3..075da4f287b8 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -244,8 +244,7 @@ icmpv6_error(struct sk_buff *skb, unsigned int dataoff, | |||
244 | return icmpv6_error_message(skb, dataoff, ctinfo, hooknum); | 244 | return icmpv6_error_message(skb, dataoff, ctinfo, hooknum); |
245 | } | 245 | } |
246 | 246 | ||
247 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 247 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
248 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
249 | 248 | ||
250 | #include <linux/netfilter/nfnetlink.h> | 249 | #include <linux/netfilter/nfnetlink.h> |
251 | #include <linux/netfilter/nfnetlink_conntrack.h> | 250 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -327,8 +326,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = | |||
327 | .packet = icmpv6_packet, | 326 | .packet = icmpv6_packet, |
328 | .new = icmpv6_new, | 327 | .new = icmpv6_new, |
329 | .error = icmpv6_error, | 328 | .error = icmpv6_error, |
330 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 329 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
331 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
332 | .tuple_to_nfattr = icmpv6_tuple_to_nfattr, | 330 | .tuple_to_nfattr = icmpv6_tuple_to_nfattr, |
333 | .nfattr_to_tuple = icmpv6_nfattr_to_tuple, | 331 | .nfattr_to_tuple = icmpv6_nfattr_to_tuple, |
334 | #endif | 332 | #endif |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 32891ebc9e68..b3a70eb6d42a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -976,8 +976,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, | |||
976 | } | 976 | } |
977 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | 977 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); |
978 | 978 | ||
979 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 979 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
980 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
981 | 980 | ||
982 | #include <linux/netfilter/nfnetlink.h> | 981 | #include <linux/netfilter/nfnetlink.h> |
983 | #include <linux/netfilter/nfnetlink_conntrack.h> | 982 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -1070,7 +1069,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data), | |||
1070 | list_for_each_entry(h, &unconfirmed, list) { | 1069 | list_for_each_entry(h, &unconfirmed, list) { |
1071 | ct = nf_ct_tuplehash_to_ctrack(h); | 1070 | ct = nf_ct_tuplehash_to_ctrack(h); |
1072 | if (iter(ct, data)) | 1071 | if (iter(ct, data)) |
1073 | goto found; | 1072 | set_bit(IPS_DYING_BIT, &ct->status); |
1074 | } | 1073 | } |
1075 | write_unlock_bh(&nf_conntrack_lock); | 1074 | write_unlock_bh(&nf_conntrack_lock); |
1076 | return NULL; | 1075 | return NULL; |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index ac193ce70249..5434472420fe 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -281,8 +281,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { | |||
281 | .new = gre_new, | 281 | .new = gre_new, |
282 | .destroy = gre_destroy, | 282 | .destroy = gre_destroy, |
283 | .me = THIS_MODULE, | 283 | .me = THIS_MODULE, |
284 | #if defined(CONFIG_NF_CONNTRACK_NETLINK) || \ | 284 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
285 | defined(CONFIG_NF_CONNTRACK_NETLINK_MODULE) | ||
286 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 285 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
287 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 286 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
288 | #endif | 287 | #endif |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 069b85ca51cd..153d6619993a 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -769,8 +769,10 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update); | |||
769 | static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] = | 769 | static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] = |
770 | { | 770 | { |
771 | [TH_SYN] = 1, | 771 | [TH_SYN] = 1, |
772 | [TH_SYN|TH_ACK] = 1, | ||
773 | [TH_SYN|TH_PUSH] = 1, | 772 | [TH_SYN|TH_PUSH] = 1, |
773 | [TH_SYN|TH_URG] = 1, | ||
774 | [TH_SYN|TH_PUSH|TH_URG] = 1, | ||
775 | [TH_SYN|TH_ACK] = 1, | ||
774 | [TH_SYN|TH_ACK|TH_PUSH] = 1, | 776 | [TH_SYN|TH_ACK|TH_PUSH] = 1, |
775 | [TH_RST] = 1, | 777 | [TH_RST] = 1, |
776 | [TH_RST|TH_ACK] = 1, | 778 | [TH_RST|TH_ACK] = 1, |
@@ -1099,8 +1101,7 @@ static int tcp_new(struct nf_conn *conntrack, | |||
1099 | return 1; | 1101 | return 1; |
1100 | } | 1102 | } |
1101 | 1103 | ||
1102 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 1104 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
1103 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
1104 | 1105 | ||
1105 | #include <linux/netfilter/nfnetlink.h> | 1106 | #include <linux/netfilter/nfnetlink.h> |
1106 | #include <linux/netfilter/nfnetlink_conntrack.h> | 1107 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -1378,8 +1379,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = | |||
1378 | .packet = tcp_packet, | 1379 | .packet = tcp_packet, |
1379 | .new = tcp_new, | 1380 | .new = tcp_new, |
1380 | .error = tcp_error, | 1381 | .error = tcp_error, |
1381 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 1382 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
1382 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
1383 | .to_nfattr = tcp_to_nfattr, | 1383 | .to_nfattr = tcp_to_nfattr, |
1384 | .from_nfattr = nfattr_to_tcp, | 1384 | .from_nfattr = nfattr_to_tcp, |
1385 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 1385 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
@@ -1408,8 +1408,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = | |||
1408 | .packet = tcp_packet, | 1408 | .packet = tcp_packet, |
1409 | .new = tcp_new, | 1409 | .new = tcp_new, |
1410 | .error = tcp_error, | 1410 | .error = tcp_error, |
1411 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 1411 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
1412 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
1413 | .to_nfattr = tcp_to_nfattr, | 1412 | .to_nfattr = tcp_to_nfattr, |
1414 | .from_nfattr = nfattr_to_tcp, | 1413 | .from_nfattr = nfattr_to_tcp, |
1415 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 1414 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index d0a1cee7ee52..a5e5726ec0c7 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c | |||
@@ -208,8 +208,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = | |||
208 | .packet = udp_packet, | 208 | .packet = udp_packet, |
209 | .new = udp_new, | 209 | .new = udp_new, |
210 | .error = udp_error, | 210 | .error = udp_error, |
211 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 211 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
212 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
213 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 212 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
214 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 213 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
215 | #endif | 214 | #endif |
@@ -236,8 +235,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = | |||
236 | .packet = udp_packet, | 235 | .packet = udp_packet, |
237 | .new = udp_new, | 236 | .new = udp_new, |
238 | .error = udp_error, | 237 | .error = udp_error, |
239 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 238 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
240 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
241 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 239 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
242 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 240 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
243 | #endif | 241 | #endif |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index b8eab0dbc3dd..91a0972ec117 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -133,6 +133,7 @@ instance_put(struct nfulnl_instance *inst) | |||
133 | if (inst && atomic_dec_and_test(&inst->use)) { | 133 | if (inst && atomic_dec_and_test(&inst->use)) { |
134 | UDEBUG("kfree(inst=%p)\n", inst); | 134 | UDEBUG("kfree(inst=%p)\n", inst); |
135 | kfree(inst); | 135 | kfree(inst); |
136 | module_put(THIS_MODULE); | ||
136 | } | 137 | } |
137 | } | 138 | } |
138 | 139 | ||
@@ -217,6 +218,9 @@ _instance_destroy2(struct nfulnl_instance *inst, int lock) | |||
217 | 218 | ||
218 | spin_lock_bh(&inst->lock); | 219 | spin_lock_bh(&inst->lock); |
219 | if (inst->skb) { | 220 | if (inst->skb) { |
221 | /* timer "holds" one reference (we have one more) */ | ||
222 | if (del_timer(&inst->timer)) | ||
223 | instance_put(inst); | ||
220 | if (inst->qlen) | 224 | if (inst->qlen) |
221 | __nfulnl_send(inst); | 225 | __nfulnl_send(inst); |
222 | if (inst->skb) { | 226 | if (inst->skb) { |
@@ -228,8 +232,6 @@ _instance_destroy2(struct nfulnl_instance *inst, int lock) | |||
228 | 232 | ||
229 | /* and finally put the refcount */ | 233 | /* and finally put the refcount */ |
230 | instance_put(inst); | 234 | instance_put(inst); |
231 | |||
232 | module_put(THIS_MODULE); | ||
233 | } | 235 | } |
234 | 236 | ||
235 | static inline void | 237 | static inline void |
@@ -363,9 +365,6 @@ __nfulnl_send(struct nfulnl_instance *inst) | |||
363 | { | 365 | { |
364 | int status; | 366 | int status; |
365 | 367 | ||
366 | if (timer_pending(&inst->timer)) | ||
367 | del_timer(&inst->timer); | ||
368 | |||
369 | if (!inst->skb) | 368 | if (!inst->skb) |
370 | return 0; | 369 | return 0; |
371 | 370 | ||
@@ -393,8 +392,8 @@ static void nfulnl_timer(unsigned long data) | |||
393 | 392 | ||
394 | spin_lock_bh(&inst->lock); | 393 | spin_lock_bh(&inst->lock); |
395 | __nfulnl_send(inst); | 394 | __nfulnl_send(inst); |
396 | instance_put(inst); | ||
397 | spin_unlock_bh(&inst->lock); | 395 | spin_unlock_bh(&inst->lock); |
396 | instance_put(inst); | ||
398 | } | 397 | } |
399 | 398 | ||
400 | /* This is an inline function, we don't really care about a long | 399 | /* This is an inline function, we don't really care about a long |
@@ -560,6 +559,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
560 | } | 559 | } |
561 | 560 | ||
562 | nlh->nlmsg_len = inst->skb->tail - old_tail; | 561 | nlh->nlmsg_len = inst->skb->tail - old_tail; |
562 | inst->lastnlh = nlh; | ||
563 | return 0; | 563 | return 0; |
564 | 564 | ||
565 | nlmsg_failure: | 565 | nlmsg_failure: |
@@ -689,6 +689,9 @@ nfulnl_log_packet(unsigned int pf, | |||
689 | * enough room in the skb left. flush to userspace. */ | 689 | * enough room in the skb left. flush to userspace. */ |
690 | UDEBUG("flushing old skb\n"); | 690 | UDEBUG("flushing old skb\n"); |
691 | 691 | ||
692 | /* timer "holds" one reference (we have another one) */ | ||
693 | if (del_timer(&inst->timer)) | ||
694 | instance_put(inst); | ||
692 | __nfulnl_send(inst); | 695 | __nfulnl_send(inst); |
693 | 696 | ||
694 | if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) { | 697 | if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) { |
@@ -711,15 +714,16 @@ nfulnl_log_packet(unsigned int pf, | |||
711 | inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); | 714 | inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); |
712 | add_timer(&inst->timer); | 715 | add_timer(&inst->timer); |
713 | } | 716 | } |
714 | spin_unlock_bh(&inst->lock); | ||
715 | 717 | ||
718 | unlock_and_release: | ||
719 | spin_unlock_bh(&inst->lock); | ||
720 | instance_put(inst); | ||
716 | return; | 721 | return; |
717 | 722 | ||
718 | alloc_failure: | 723 | alloc_failure: |
719 | spin_unlock_bh(&inst->lock); | ||
720 | instance_put(inst); | ||
721 | UDEBUG("error allocating skb\n"); | 724 | UDEBUG("error allocating skb\n"); |
722 | /* FIXME: statistics */ | 725 | /* FIXME: statistics */ |
726 | goto unlock_and_release; | ||
723 | } | 727 | } |
724 | 728 | ||
725 | static int | 729 | static int |
@@ -856,6 +860,9 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
856 | ret = -EINVAL; | 860 | ret = -EINVAL; |
857 | break; | 861 | break; |
858 | } | 862 | } |
863 | |||
864 | if (!inst) | ||
865 | goto out; | ||
859 | } else { | 866 | } else { |
860 | if (!inst) { | 867 | if (!inst) { |
861 | UDEBUG("no config command, and no instance for " | 868 | UDEBUG("no config command, and no instance for " |
@@ -909,6 +916,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
909 | 916 | ||
910 | out_put: | 917 | out_put: |
911 | instance_put(inst); | 918 | instance_put(inst); |
919 | out: | ||
912 | return ret; | 920 | return ret; |
913 | } | 921 | } |
914 | 922 | ||
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 8353829bc5c6..b4db53ff1435 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -27,22 +27,26 @@ | |||
27 | 27 | ||
28 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | 28 | #define RPCDBG_FACILITY RPCDBG_SVCDSP |
29 | 29 | ||
30 | #define svc_serv_is_pooled(serv) ((serv)->sv_function) | ||
31 | |||
30 | /* | 32 | /* |
31 | * Mode for mapping cpus to pools. | 33 | * Mode for mapping cpus to pools. |
32 | */ | 34 | */ |
33 | enum { | 35 | enum { |
34 | SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */ | 36 | SVC_POOL_AUTO = -1, /* choose one of the others */ |
35 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool | 37 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool |
36 | * (legacy & UP mode) */ | 38 | * (legacy & UP mode) */ |
37 | SVC_POOL_PERCPU, /* one pool per cpu */ | 39 | SVC_POOL_PERCPU, /* one pool per cpu */ |
38 | SVC_POOL_PERNODE /* one pool per numa node */ | 40 | SVC_POOL_PERNODE /* one pool per numa node */ |
39 | }; | 41 | }; |
42 | #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL | ||
40 | 43 | ||
41 | /* | 44 | /* |
42 | * Structure for mapping cpus to pools and vice versa. | 45 | * Structure for mapping cpus to pools and vice versa. |
43 | * Setup once during sunrpc initialisation. | 46 | * Setup once during sunrpc initialisation. |
44 | */ | 47 | */ |
45 | static struct svc_pool_map { | 48 | static struct svc_pool_map { |
49 | int count; /* How many svc_servs use us */ | ||
46 | int mode; /* Note: int not enum to avoid | 50 | int mode; /* Note: int not enum to avoid |
47 | * warnings about "enumeration value | 51 | * warnings about "enumeration value |
48 | * not handled in switch" */ | 52 | * not handled in switch" */ |
@@ -50,9 +54,63 @@ static struct svc_pool_map { | |||
50 | unsigned int *pool_to; /* maps pool id to cpu or node */ | 54 | unsigned int *pool_to; /* maps pool id to cpu or node */ |
51 | unsigned int *to_pool; /* maps cpu or node to pool id */ | 55 | unsigned int *to_pool; /* maps cpu or node to pool id */ |
52 | } svc_pool_map = { | 56 | } svc_pool_map = { |
53 | .mode = SVC_POOL_NONE | 57 | .count = 0, |
58 | .mode = SVC_POOL_DEFAULT | ||
54 | }; | 59 | }; |
60 | static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ | ||
61 | |||
62 | static int | ||
63 | param_set_pool_mode(const char *val, struct kernel_param *kp) | ||
64 | { | ||
65 | int *ip = (int *)kp->arg; | ||
66 | struct svc_pool_map *m = &svc_pool_map; | ||
67 | int err; | ||
68 | |||
69 | mutex_lock(&svc_pool_map_mutex); | ||
70 | |||
71 | err = -EBUSY; | ||
72 | if (m->count) | ||
73 | goto out; | ||
74 | |||
75 | err = 0; | ||
76 | if (!strncmp(val, "auto", 4)) | ||
77 | *ip = SVC_POOL_AUTO; | ||
78 | else if (!strncmp(val, "global", 6)) | ||
79 | *ip = SVC_POOL_GLOBAL; | ||
80 | else if (!strncmp(val, "percpu", 6)) | ||
81 | *ip = SVC_POOL_PERCPU; | ||
82 | else if (!strncmp(val, "pernode", 7)) | ||
83 | *ip = SVC_POOL_PERNODE; | ||
84 | else | ||
85 | err = -EINVAL; | ||
86 | |||
87 | out: | ||
88 | mutex_unlock(&svc_pool_map_mutex); | ||
89 | return err; | ||
90 | } | ||
55 | 91 | ||
92 | static int | ||
93 | param_get_pool_mode(char *buf, struct kernel_param *kp) | ||
94 | { | ||
95 | int *ip = (int *)kp->arg; | ||
96 | |||
97 | switch (*ip) | ||
98 | { | ||
99 | case SVC_POOL_AUTO: | ||
100 | return strlcpy(buf, "auto", 20); | ||
101 | case SVC_POOL_GLOBAL: | ||
102 | return strlcpy(buf, "global", 20); | ||
103 | case SVC_POOL_PERCPU: | ||
104 | return strlcpy(buf, "percpu", 20); | ||
105 | case SVC_POOL_PERNODE: | ||
106 | return strlcpy(buf, "pernode", 20); | ||
107 | default: | ||
108 | return sprintf(buf, "%d", *ip); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, | ||
113 | &svc_pool_map.mode, 0644); | ||
56 | 114 | ||
57 | /* | 115 | /* |
58 | * Detect best pool mapping mode heuristically, | 116 | * Detect best pool mapping mode heuristically, |
@@ -166,18 +224,25 @@ svc_pool_map_init_pernode(struct svc_pool_map *m) | |||
166 | 224 | ||
167 | 225 | ||
168 | /* | 226 | /* |
169 | * Build the global map of cpus to pools and vice versa. | 227 | * Add a reference to the global map of cpus to pools (and |
228 | * vice versa). Initialise the map if we're the first user. | ||
229 | * Returns the number of pools. | ||
170 | */ | 230 | */ |
171 | static unsigned int | 231 | static unsigned int |
172 | svc_pool_map_init(void) | 232 | svc_pool_map_get(void) |
173 | { | 233 | { |
174 | struct svc_pool_map *m = &svc_pool_map; | 234 | struct svc_pool_map *m = &svc_pool_map; |
175 | int npools = -1; | 235 | int npools = -1; |
176 | 236 | ||
177 | if (m->mode != SVC_POOL_NONE) | 237 | mutex_lock(&svc_pool_map_mutex); |
238 | |||
239 | if (m->count++) { | ||
240 | mutex_unlock(&svc_pool_map_mutex); | ||
178 | return m->npools; | 241 | return m->npools; |
242 | } | ||
179 | 243 | ||
180 | m->mode = svc_pool_map_choose_mode(); | 244 | if (m->mode == SVC_POOL_AUTO) |
245 | m->mode = svc_pool_map_choose_mode(); | ||
181 | 246 | ||
182 | switch (m->mode) { | 247 | switch (m->mode) { |
183 | case SVC_POOL_PERCPU: | 248 | case SVC_POOL_PERCPU: |
@@ -195,9 +260,36 @@ svc_pool_map_init(void) | |||
195 | } | 260 | } |
196 | m->npools = npools; | 261 | m->npools = npools; |
197 | 262 | ||
263 | mutex_unlock(&svc_pool_map_mutex); | ||
198 | return m->npools; | 264 | return m->npools; |
199 | } | 265 | } |
200 | 266 | ||
267 | |||
268 | /* | ||
269 | * Drop a reference to the global map of cpus to pools. | ||
270 | * When the last reference is dropped, the map data is | ||
271 | * freed; this allows the sysadmin to change the pool | ||
272 | * mode using the pool_mode module option without | ||
273 | * rebooting or re-loading sunrpc.ko. | ||
274 | */ | ||
275 | static void | ||
276 | svc_pool_map_put(void) | ||
277 | { | ||
278 | struct svc_pool_map *m = &svc_pool_map; | ||
279 | |||
280 | mutex_lock(&svc_pool_map_mutex); | ||
281 | |||
282 | if (!--m->count) { | ||
283 | m->mode = SVC_POOL_DEFAULT; | ||
284 | kfree(m->to_pool); | ||
285 | kfree(m->pool_to); | ||
286 | m->npools = 0; | ||
287 | } | ||
288 | |||
289 | mutex_unlock(&svc_pool_map_mutex); | ||
290 | } | ||
291 | |||
292 | |||
201 | /* | 293 | /* |
202 | * Set the current thread's cpus_allowed mask so that it | 294 | * Set the current thread's cpus_allowed mask so that it |
203 | * will only run on cpus in the given pool. | 295 | * will only run on cpus in the given pool. |
@@ -212,10 +304,9 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
212 | 304 | ||
213 | /* | 305 | /* |
214 | * The caller checks for sv_nrpools > 1, which | 306 | * The caller checks for sv_nrpools > 1, which |
215 | * implies that we've been initialized and the | 307 | * implies that we've been initialized. |
216 | * map mode is not NONE. | ||
217 | */ | 308 | */ |
218 | BUG_ON(m->mode == SVC_POOL_NONE); | 309 | BUG_ON(m->count == 0); |
219 | 310 | ||
220 | switch (m->mode) | 311 | switch (m->mode) |
221 | { | 312 | { |
@@ -246,18 +337,19 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu) | |||
246 | unsigned int pidx = 0; | 337 | unsigned int pidx = 0; |
247 | 338 | ||
248 | /* | 339 | /* |
249 | * SVC_POOL_NONE happens in a pure client when | 340 | * An uninitialised map happens in a pure client when |
250 | * lockd is brought up, so silently treat it the | 341 | * lockd is brought up, so silently treat it the |
251 | * same as SVC_POOL_GLOBAL. | 342 | * same as SVC_POOL_GLOBAL. |
252 | */ | 343 | */ |
253 | 344 | if (svc_serv_is_pooled(serv)) { | |
254 | switch (m->mode) { | 345 | switch (m->mode) { |
255 | case SVC_POOL_PERCPU: | 346 | case SVC_POOL_PERCPU: |
256 | pidx = m->to_pool[cpu]; | 347 | pidx = m->to_pool[cpu]; |
257 | break; | 348 | break; |
258 | case SVC_POOL_PERNODE: | 349 | case SVC_POOL_PERNODE: |
259 | pidx = m->to_pool[cpu_to_node(cpu)]; | 350 | pidx = m->to_pool[cpu_to_node(cpu)]; |
260 | break; | 351 | break; |
352 | } | ||
261 | } | 353 | } |
262 | return &serv->sv_pools[pidx % serv->sv_nrpools]; | 354 | return &serv->sv_pools[pidx % serv->sv_nrpools]; |
263 | } | 355 | } |
@@ -347,7 +439,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
347 | svc_thread_fn func, int sig, struct module *mod) | 439 | svc_thread_fn func, int sig, struct module *mod) |
348 | { | 440 | { |
349 | struct svc_serv *serv; | 441 | struct svc_serv *serv; |
350 | unsigned int npools = svc_pool_map_init(); | 442 | unsigned int npools = svc_pool_map_get(); |
351 | 443 | ||
352 | serv = __svc_create(prog, bufsize, npools, shutdown); | 444 | serv = __svc_create(prog, bufsize, npools, shutdown); |
353 | 445 | ||
@@ -367,6 +459,7 @@ void | |||
367 | svc_destroy(struct svc_serv *serv) | 459 | svc_destroy(struct svc_serv *serv) |
368 | { | 460 | { |
369 | struct svc_sock *svsk; | 461 | struct svc_sock *svsk; |
462 | struct svc_sock *tmp; | ||
370 | 463 | ||
371 | dprintk("svc: svc_destroy(%s, %d)\n", | 464 | dprintk("svc: svc_destroy(%s, %d)\n", |
372 | serv->sv_program->pg_name, | 465 | serv->sv_program->pg_name, |
@@ -382,24 +475,23 @@ svc_destroy(struct svc_serv *serv) | |||
382 | 475 | ||
383 | del_timer_sync(&serv->sv_temptimer); | 476 | del_timer_sync(&serv->sv_temptimer); |
384 | 477 | ||
385 | while (!list_empty(&serv->sv_tempsocks)) { | 478 | list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list) |
386 | svsk = list_entry(serv->sv_tempsocks.next, | 479 | svc_force_close_socket(svsk); |
387 | struct svc_sock, | 480 | |
388 | sk_list); | ||
389 | svc_close_socket(svsk); | ||
390 | } | ||
391 | if (serv->sv_shutdown) | 481 | if (serv->sv_shutdown) |
392 | serv->sv_shutdown(serv); | 482 | serv->sv_shutdown(serv); |
393 | 483 | ||
394 | while (!list_empty(&serv->sv_permsocks)) { | 484 | list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list) |
395 | svsk = list_entry(serv->sv_permsocks.next, | 485 | svc_force_close_socket(svsk); |
396 | struct svc_sock, | 486 | |
397 | sk_list); | 487 | BUG_ON(!list_empty(&serv->sv_permsocks)); |
398 | svc_close_socket(svsk); | 488 | BUG_ON(!list_empty(&serv->sv_tempsocks)); |
399 | } | ||
400 | 489 | ||
401 | cache_clean_deferred(serv); | 490 | cache_clean_deferred(serv); |
402 | 491 | ||
492 | if (svc_serv_is_pooled(serv)) | ||
493 | svc_pool_map_put(); | ||
494 | |||
403 | /* Unregister service with the portmapper */ | 495 | /* Unregister service with the portmapper */ |
404 | svc_register(serv, 0, 0); | 496 | svc_register(serv, 0, 0); |
405 | kfree(serv->sv_pools); | 497 | kfree(serv->sv_pools); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 63ae94771b8e..f6e1eb1ea720 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -82,6 +82,7 @@ static void svc_delete_socket(struct svc_sock *svsk); | |||
82 | static void svc_udp_data_ready(struct sock *, int); | 82 | static void svc_udp_data_ready(struct sock *, int); |
83 | static int svc_udp_recvfrom(struct svc_rqst *); | 83 | static int svc_udp_recvfrom(struct svc_rqst *); |
84 | static int svc_udp_sendto(struct svc_rqst *); | 84 | static int svc_udp_sendto(struct svc_rqst *); |
85 | static void svc_close_socket(struct svc_sock *svsk); | ||
85 | 86 | ||
86 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); | 87 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); |
87 | static int svc_deferred_recv(struct svc_rqst *rqstp); | 88 | static int svc_deferred_recv(struct svc_rqst *rqstp); |
@@ -131,13 +132,13 @@ static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) | |||
131 | NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), | 132 | NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), |
132 | htons(((struct sockaddr_in *) addr)->sin_port)); | 133 | htons(((struct sockaddr_in *) addr)->sin_port)); |
133 | break; | 134 | break; |
134 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 135 | |
135 | case AF_INET6: | 136 | case AF_INET6: |
136 | snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", | 137 | snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", |
137 | NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), | 138 | NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), |
138 | htons(((struct sockaddr_in6 *) addr)->sin6_port)); | 139 | htons(((struct sockaddr_in6 *) addr)->sin6_port)); |
139 | break; | 140 | break; |
140 | #endif | 141 | |
141 | default: | 142 | default: |
142 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); | 143 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); |
143 | break; | 144 | break; |
@@ -449,9 +450,7 @@ svc_wake_up(struct svc_serv *serv) | |||
449 | 450 | ||
450 | union svc_pktinfo_u { | 451 | union svc_pktinfo_u { |
451 | struct in_pktinfo pkti; | 452 | struct in_pktinfo pkti; |
452 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
453 | struct in6_pktinfo pkti6; | 453 | struct in6_pktinfo pkti6; |
454 | #endif | ||
455 | }; | 454 | }; |
456 | 455 | ||
457 | static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | 456 | static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) |
@@ -467,7 +466,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
467 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | 466 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); |
468 | } | 467 | } |
469 | break; | 468 | break; |
470 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 469 | |
471 | case AF_INET6: { | 470 | case AF_INET6: { |
472 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | 471 | struct in6_pktinfo *pki = CMSG_DATA(cmh); |
473 | 472 | ||
@@ -479,7 +478,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) | |||
479 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | 478 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); |
480 | } | 479 | } |
481 | break; | 480 | break; |
482 | #endif | ||
483 | } | 481 | } |
484 | return; | 482 | return; |
485 | } | 483 | } |
@@ -721,45 +719,21 @@ svc_write_space(struct sock *sk) | |||
721 | } | 719 | } |
722 | } | 720 | } |
723 | 721 | ||
724 | static void svc_udp_get_sender_address(struct svc_rqst *rqstp, | 722 | static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp, |
725 | struct sk_buff *skb) | 723 | struct cmsghdr *cmh) |
726 | { | 724 | { |
727 | switch (rqstp->rq_sock->sk_sk->sk_family) { | 725 | switch (rqstp->rq_sock->sk_sk->sk_family) { |
728 | case AF_INET: { | 726 | case AF_INET: { |
729 | /* this seems to come from net/ipv4/udp.c:udp_recvmsg */ | 727 | struct in_pktinfo *pki = CMSG_DATA(cmh); |
730 | struct sockaddr_in *sin = svc_addr_in(rqstp); | 728 | rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; |
731 | |||
732 | sin->sin_family = AF_INET; | ||
733 | sin->sin_port = skb->h.uh->source; | ||
734 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | ||
735 | rqstp->rq_addrlen = sizeof(struct sockaddr_in); | ||
736 | /* Remember which interface received this request */ | ||
737 | rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr; | ||
738 | } | ||
739 | break; | 729 | break; |
740 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
741 | case AF_INET6: { | ||
742 | /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */ | ||
743 | struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp); | ||
744 | |||
745 | sin6->sin6_family = AF_INET6; | ||
746 | sin6->sin6_port = skb->h.uh->source; | ||
747 | sin6->sin6_flowinfo = 0; | ||
748 | sin6->sin6_scope_id = 0; | ||
749 | if (ipv6_addr_type(&sin6->sin6_addr) & | ||
750 | IPV6_ADDR_LINKLOCAL) | ||
751 | sin6->sin6_scope_id = IP6CB(skb)->iif; | ||
752 | ipv6_addr_copy(&sin6->sin6_addr, | ||
753 | &skb->nh.ipv6h->saddr); | ||
754 | rqstp->rq_addrlen = sizeof(struct sockaddr_in); | ||
755 | /* Remember which interface received this request */ | ||
756 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, | ||
757 | &skb->nh.ipv6h->saddr); | ||
758 | } | 730 | } |
731 | case AF_INET6: { | ||
732 | struct in6_pktinfo *pki = CMSG_DATA(cmh); | ||
733 | ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); | ||
759 | break; | 734 | break; |
760 | #endif | 735 | } |
761 | } | 736 | } |
762 | return; | ||
763 | } | 737 | } |
764 | 738 | ||
765 | /* | 739 | /* |
@@ -771,7 +745,15 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
771 | struct svc_sock *svsk = rqstp->rq_sock; | 745 | struct svc_sock *svsk = rqstp->rq_sock; |
772 | struct svc_serv *serv = svsk->sk_server; | 746 | struct svc_serv *serv = svsk->sk_server; |
773 | struct sk_buff *skb; | 747 | struct sk_buff *skb; |
748 | char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; | ||
749 | struct cmsghdr *cmh = (struct cmsghdr *)buffer; | ||
774 | int err, len; | 750 | int err, len; |
751 | struct msghdr msg = { | ||
752 | .msg_name = svc_addr(rqstp), | ||
753 | .msg_control = cmh, | ||
754 | .msg_controllen = sizeof(buffer), | ||
755 | .msg_flags = MSG_DONTWAIT, | ||
756 | }; | ||
775 | 757 | ||
776 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | 758 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) |
777 | /* udp sockets need large rcvbuf as all pending | 759 | /* udp sockets need large rcvbuf as all pending |
@@ -797,7 +779,9 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
797 | } | 779 | } |
798 | 780 | ||
799 | clear_bit(SK_DATA, &svsk->sk_flags); | 781 | clear_bit(SK_DATA, &svsk->sk_flags); |
800 | while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | 782 | while ((err == kernel_recvmsg(svsk->sk_sock, &msg, NULL, |
783 | 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 || | ||
784 | (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | ||
801 | if (err == -EAGAIN) { | 785 | if (err == -EAGAIN) { |
802 | svc_sock_received(svsk); | 786 | svc_sock_received(svsk); |
803 | return err; | 787 | return err; |
@@ -805,6 +789,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
805 | /* possibly an icmp error */ | 789 | /* possibly an icmp error */ |
806 | dprintk("svc: recvfrom returned error %d\n", -err); | 790 | dprintk("svc: recvfrom returned error %d\n", -err); |
807 | } | 791 | } |
792 | rqstp->rq_addrlen = sizeof(rqstp->rq_addr); | ||
808 | if (skb->tstamp.off_sec == 0) { | 793 | if (skb->tstamp.off_sec == 0) { |
809 | struct timeval tv; | 794 | struct timeval tv; |
810 | 795 | ||
@@ -827,7 +812,16 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
827 | 812 | ||
828 | rqstp->rq_prot = IPPROTO_UDP; | 813 | rqstp->rq_prot = IPPROTO_UDP; |
829 | 814 | ||
830 | svc_udp_get_sender_address(rqstp, skb); | 815 | if (cmh->cmsg_level != IPPROTO_IP || |
816 | cmh->cmsg_type != IP_PKTINFO) { | ||
817 | if (net_ratelimit()) | ||
818 | printk("rpcsvc: received unknown control message:" | ||
819 | "%d/%d\n", | ||
820 | cmh->cmsg_level, cmh->cmsg_type); | ||
821 | skb_free_datagram(svsk->sk_sk, skb); | ||
822 | return 0; | ||
823 | } | ||
824 | svc_udp_get_dest_address(rqstp, cmh); | ||
831 | 825 | ||
832 | if (skb_is_nonlinear(skb)) { | 826 | if (skb_is_nonlinear(skb)) { |
833 | /* we have to copy */ | 827 | /* we have to copy */ |
@@ -884,6 +878,9 @@ svc_udp_sendto(struct svc_rqst *rqstp) | |||
884 | static void | 878 | static void |
885 | svc_udp_init(struct svc_sock *svsk) | 879 | svc_udp_init(struct svc_sock *svsk) |
886 | { | 880 | { |
881 | int one = 1; | ||
882 | mm_segment_t oldfs; | ||
883 | |||
887 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | 884 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; |
888 | svsk->sk_sk->sk_write_space = svc_write_space; | 885 | svsk->sk_sk->sk_write_space = svc_write_space; |
889 | svsk->sk_recvfrom = svc_udp_recvfrom; | 886 | svsk->sk_recvfrom = svc_udp_recvfrom; |
@@ -899,6 +896,13 @@ svc_udp_init(struct svc_sock *svsk) | |||
899 | 896 | ||
900 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ | 897 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ |
901 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | 898 | set_bit(SK_CHNGBUF, &svsk->sk_flags); |
899 | |||
900 | oldfs = get_fs(); | ||
901 | set_fs(KERNEL_DS); | ||
902 | /* make sure we get destination address info */ | ||
903 | svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, | ||
904 | (char __user *)&one, sizeof(one)); | ||
905 | set_fs(oldfs); | ||
902 | } | 906 | } |
903 | 907 | ||
904 | /* | 908 | /* |
@@ -977,11 +981,9 @@ static inline int svc_port_is_privileged(struct sockaddr *sin) | |||
977 | case AF_INET: | 981 | case AF_INET: |
978 | return ntohs(((struct sockaddr_in *)sin)->sin_port) | 982 | return ntohs(((struct sockaddr_in *)sin)->sin_port) |
979 | < PROT_SOCK; | 983 | < PROT_SOCK; |
980 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
981 | case AF_INET6: | 984 | case AF_INET6: |
982 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) | 985 | return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) |
983 | < PROT_SOCK; | 986 | < PROT_SOCK; |
984 | #endif | ||
985 | default: | 987 | default: |
986 | return 0; | 988 | return 0; |
987 | } | 989 | } |
@@ -1786,7 +1788,7 @@ svc_delete_socket(struct svc_sock *svsk) | |||
1786 | spin_unlock_bh(&serv->sv_lock); | 1788 | spin_unlock_bh(&serv->sv_lock); |
1787 | } | 1789 | } |
1788 | 1790 | ||
1789 | void svc_close_socket(struct svc_sock *svsk) | 1791 | static void svc_close_socket(struct svc_sock *svsk) |
1790 | { | 1792 | { |
1791 | set_bit(SK_CLOSE, &svsk->sk_flags); | 1793 | set_bit(SK_CLOSE, &svsk->sk_flags); |
1792 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) | 1794 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) |
@@ -1799,6 +1801,19 @@ void svc_close_socket(struct svc_sock *svsk) | |||
1799 | svc_sock_put(svsk); | 1801 | svc_sock_put(svsk); |
1800 | } | 1802 | } |
1801 | 1803 | ||
1804 | void svc_force_close_socket(struct svc_sock *svsk) | ||
1805 | { | ||
1806 | set_bit(SK_CLOSE, &svsk->sk_flags); | ||
1807 | if (test_bit(SK_BUSY, &svsk->sk_flags)) { | ||
1808 | /* Waiting to be processed, but no threads left, | ||
1809 | * So just remove it from the waiting list | ||
1810 | */ | ||
1811 | list_del_init(&svsk->sk_ready); | ||
1812 | clear_bit(SK_BUSY, &svsk->sk_flags); | ||
1813 | } | ||
1814 | svc_close_socket(svsk); | ||
1815 | } | ||
1816 | |||
1802 | /** | 1817 | /** |
1803 | * svc_makesock - Make a socket for nfsd and lockd | 1818 | * svc_makesock - Make a socket for nfsd and lockd |
1804 | * @serv: RPC server structure | 1819 | * @serv: RPC server structure |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 51ca4383c388..606971645b33 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -934,7 +934,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo) | |||
934 | 934 | ||
935 | sched = !sock_flag(other, SOCK_DEAD) && | 935 | sched = !sock_flag(other, SOCK_DEAD) && |
936 | !(other->sk_shutdown & RCV_SHUTDOWN) && | 936 | !(other->sk_shutdown & RCV_SHUTDOWN) && |
937 | (skb_queue_len(&other->sk_receive_queue) >= | 937 | (skb_queue_len(&other->sk_receive_queue) > |
938 | other->sk_max_ack_backlog); | 938 | other->sk_max_ack_backlog); |
939 | 939 | ||
940 | unix_state_runlock(other); | 940 | unix_state_runlock(other); |
@@ -1008,7 +1008,7 @@ restart: | |||
1008 | if (other->sk_state != TCP_LISTEN) | 1008 | if (other->sk_state != TCP_LISTEN) |
1009 | goto out_unlock; | 1009 | goto out_unlock; |
1010 | 1010 | ||
1011 | if (skb_queue_len(&other->sk_receive_queue) >= | 1011 | if (skb_queue_len(&other->sk_receive_queue) > |
1012 | other->sk_max_ack_backlog) { | 1012 | other->sk_max_ack_backlog) { |
1013 | err = -EAGAIN; | 1013 | err = -EAGAIN; |
1014 | if (!timeo) | 1014 | if (!timeo) |
@@ -1381,7 +1381,7 @@ restart: | |||
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | if (unix_peer(other) != sk && | 1383 | if (unix_peer(other) != sk && |
1384 | (skb_queue_len(&other->sk_receive_queue) >= | 1384 | (skb_queue_len(&other->sk_receive_queue) > |
1385 | other->sk_max_ack_backlog)) { | 1385 | other->sk_max_ack_backlog)) { |
1386 | if (!timeo) { | 1386 | if (!timeo) { |
1387 | err = -EAGAIN; | 1387 | err = -EAGAIN; |
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index bfc2fed16da3..37fabf75daa7 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c | |||
@@ -1790,6 +1790,8 @@ static const struct snd_kcontrol_new snd_ac97_ad1981x_jack_sense[] = { | |||
1790 | * (SS vendor << 16 | device) | 1790 | * (SS vendor << 16 | device) |
1791 | */ | 1791 | */ |
1792 | static unsigned int ad1981_jacks_blacklist[] = { | 1792 | static unsigned int ad1981_jacks_blacklist[] = { |
1793 | 0x10140523, /* Thinkpad R40 */ | ||
1794 | 0x10140534, /* Thinkpad X31 */ | ||
1793 | 0x10140537, /* Thinkpad T41p */ | 1795 | 0x10140537, /* Thinkpad T41p */ |
1794 | 0x10140554, /* Thinkpad T42p/R50p */ | 1796 | 0x10140554, /* Thinkpad T42p/R50p */ |
1795 | 0 /* end */ | 1797 | 0 /* end */ |
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index 9327ab2eccb0..ba7fa22b285d 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c | |||
@@ -2312,6 +2312,8 @@ static int __devinit snd_ali_create(struct snd_card *card, | |||
2312 | return err; | 2312 | return err; |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | snd_card_set_dev(card, &pci->dev); | ||
2316 | |||
2315 | /* initialise synth voices*/ | 2317 | /* initialise synth voices*/ |
2316 | for (i = 0; i < ALI_CHANNELS; i++ ) { | 2318 | for (i = 0; i < ALI_CHANNELS; i++ ) { |
2317 | codec->synth.voices[i].number = i; | 2319 | codec->synth.voices[i].number = i; |
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 70face7e1048..7d3c5ee0005c 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c | |||
@@ -57,7 +57,7 @@ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ | |||
57 | static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ | 57 | static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ |
58 | static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */ | 58 | static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */ |
59 | static long mpu_port[SNDRV_CARDS]; | 59 | static long mpu_port[SNDRV_CARDS]; |
60 | static long fm_port[SNDRV_CARDS]; | 60 | static long fm_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; |
61 | static int soft_ac3[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; | 61 | static int soft_ac3[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1}; |
62 | #ifdef SUPPORT_JOYSTICK | 62 | #ifdef SUPPORT_JOYSTICK |
63 | static int joystick_port[SNDRV_CARDS]; | 63 | static int joystick_port[SNDRV_CARDS]; |
@@ -2779,6 +2779,9 @@ static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port) | |||
2779 | struct snd_opl3 *opl3; | 2779 | struct snd_opl3 *opl3; |
2780 | int err; | 2780 | int err; |
2781 | 2781 | ||
2782 | if (!fm_port) | ||
2783 | goto disable_fm; | ||
2784 | |||
2782 | /* first try FM regs in PCI port range */ | 2785 | /* first try FM regs in PCI port range */ |
2783 | iosynth = cm->iobase + CM_REG_FM_PCI; | 2786 | iosynth = cm->iobase + CM_REG_FM_PCI; |
2784 | err = snd_opl3_create(cm->card, iosynth, iosynth + 2, | 2787 | err = snd_opl3_create(cm->card, iosynth, iosynth + 2, |
@@ -2793,7 +2796,7 @@ static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port) | |||
2793 | case 0x3C8: val |= CM_FMSEL_3C8; break; | 2796 | case 0x3C8: val |= CM_FMSEL_3C8; break; |
2794 | case 0x388: val |= CM_FMSEL_388; break; | 2797 | case 0x388: val |= CM_FMSEL_388; break; |
2795 | default: | 2798 | default: |
2796 | return 0; | 2799 | goto disable_fm; |
2797 | } | 2800 | } |
2798 | snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val); | 2801 | snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val); |
2799 | /* enable FM */ | 2802 | /* enable FM */ |
@@ -2803,11 +2806,7 @@ static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port) | |||
2803 | OPL3_HW_OPL3, 0, &opl3) < 0) { | 2806 | OPL3_HW_OPL3, 0, &opl3) < 0) { |
2804 | printk(KERN_ERR "cmipci: no OPL device at %#lx, " | 2807 | printk(KERN_ERR "cmipci: no OPL device at %#lx, " |
2805 | "skipping...\n", iosynth); | 2808 | "skipping...\n", iosynth); |
2806 | /* disable FM */ | 2809 | goto disable_fm; |
2807 | snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, | ||
2808 | val & ~CM_FMSEL_MASK); | ||
2809 | snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); | ||
2810 | return 0; | ||
2811 | } | 2810 | } |
2812 | } | 2811 | } |
2813 | if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { | 2812 | if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { |
@@ -2815,6 +2814,11 @@ static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port) | |||
2815 | return err; | 2814 | return err; |
2816 | } | 2815 | } |
2817 | return 0; | 2816 | return 0; |
2817 | |||
2818 | disable_fm: | ||
2819 | snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_FMSEL_MASK); | ||
2820 | snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN); | ||
2821 | return 0; | ||
2818 | } | 2822 | } |
2819 | 2823 | ||
2820 | static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci, | 2824 | static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci, |
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 6a428b81dba6..e413da00759b 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c | |||
@@ -2033,6 +2033,8 @@ static int __devinit snd_echo_probe(struct pci_dev *pci, | |||
2033 | if (card == NULL) | 2033 | if (card == NULL) |
2034 | return -ENOMEM; | 2034 | return -ENOMEM; |
2035 | 2035 | ||
2036 | snd_card_set_dev(card, &pci->dev); | ||
2037 | |||
2036 | if ((err = snd_echo_create(card, pci, &chip)) < 0) { | 2038 | if ((err = snd_echo_create(card, pci, &chip)) < 0) { |
2037 | snd_card_free(card); | 2039 | snd_card_free(card); |
2038 | return err; | 2040 | return err; |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 38977bce70e2..00ace59b05c9 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -523,6 +523,7 @@ static struct snd_kcontrol_new ad1986a_mixers[] = { | |||
523 | HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT), | 523 | HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT), |
524 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), | 524 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), |
525 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), | 525 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), |
526 | HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), | ||
526 | HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT), | 527 | HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT), |
527 | HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT), | 528 | HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT), |
528 | HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT), | 529 | HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT), |
@@ -570,6 +571,7 @@ static struct snd_kcontrol_new ad1986a_laptop_mixers[] = { | |||
570 | HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT), | 571 | HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT), |
571 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), | 572 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), |
572 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), | 573 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), |
574 | HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), | ||
573 | /* HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT), | 575 | /* HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT), |
574 | HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT), | 576 | HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT), |
575 | HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT), | 577 | HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT), |
@@ -658,6 +660,7 @@ static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = { | |||
658 | HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT), | 660 | HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT), |
659 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), | 661 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT), |
660 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), | 662 | HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT), |
663 | HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT), | ||
661 | HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT), | 664 | HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT), |
662 | HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT), | 665 | HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT), |
663 | { | 666 | { |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 23a1c75085b5..46e93c6b9a42 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -629,10 +629,12 @@ static int cxt5045_hp_master_vol_put(struct snd_kcontrol *kcontrol, | |||
629 | static void cxt5045_hp_automute(struct hda_codec *codec) | 629 | static void cxt5045_hp_automute(struct hda_codec *codec) |
630 | { | 630 | { |
631 | struct conexant_spec *spec = codec->spec; | 631 | struct conexant_spec *spec = codec->spec; |
632 | unsigned int bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0; | 632 | unsigned int bits; |
633 | 633 | ||
634 | spec->hp_present = snd_hda_codec_read(codec, 0x11, 0, | 634 | spec->hp_present = snd_hda_codec_read(codec, 0x11, 0, |
635 | AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; | 635 | AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; |
636 | |||
637 | bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0; | ||
636 | snd_hda_codec_amp_update(codec, 0x10, 0, HDA_OUTPUT, 0, 0x80, bits); | 638 | snd_hda_codec_amp_update(codec, 0x10, 0, HDA_OUTPUT, 0, 0x80, bits); |
637 | snd_hda_codec_amp_update(codec, 0x10, 1, HDA_OUTPUT, 0, 0x80, bits); | 639 | snd_hda_codec_amp_update(codec, 0x10, 1, HDA_OUTPUT, 0, 0x80, bits); |
638 | } | 640 | } |
@@ -979,10 +981,12 @@ static int cxt5047_hp_master_vol_put(struct snd_kcontrol *kcontrol, | |||
979 | static void cxt5047_hp_automute(struct hda_codec *codec) | 981 | static void cxt5047_hp_automute(struct hda_codec *codec) |
980 | { | 982 | { |
981 | struct conexant_spec *spec = codec->spec; | 983 | struct conexant_spec *spec = codec->spec; |
982 | unsigned int bits = spec->hp_present || !spec->cur_eapd ? 0x80 : 0; | 984 | unsigned int bits; |
983 | 985 | ||
984 | spec->hp_present = snd_hda_codec_read(codec, 0x13, 0, | 986 | spec->hp_present = snd_hda_codec_read(codec, 0x13, 0, |
985 | AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; | 987 | AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; |
988 | |||
989 | bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0; | ||
986 | snd_hda_codec_amp_update(codec, 0x1d, 0, HDA_OUTPUT, 0, 0x80, bits); | 990 | snd_hda_codec_amp_update(codec, 0x1d, 0, HDA_OUTPUT, 0, 0x80, bits); |
987 | snd_hda_codec_amp_update(codec, 0x1d, 1, HDA_OUTPUT, 0, 0x80, bits); | 991 | snd_hda_codec_amp_update(codec, 0x1d, 1, HDA_OUTPUT, 0, 0x80, bits); |
988 | /* Mute/Unmute PCM 2 for good measure - some systems need this */ | 992 | /* Mute/Unmute PCM 2 for good measure - some systems need this */ |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 145682b78071..84d005ef30ee 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4942,9 +4942,16 @@ static int patch_alc882(struct hda_codec *codec) | |||
4942 | alc882_cfg_tbl); | 4942 | alc882_cfg_tbl); |
4943 | 4943 | ||
4944 | if (board_config < 0 || board_config >= ALC882_MODEL_LAST) { | 4944 | if (board_config < 0 || board_config >= ALC882_MODEL_LAST) { |
4945 | printk(KERN_INFO "hda_codec: Unknown model for ALC882, " | 4945 | /* Pick up systems that don't supply PCI SSID */ |
4946 | "trying auto-probe from BIOS...\n"); | 4946 | switch (codec->subsystem_id) { |
4947 | board_config = ALC882_AUTO; | 4947 | case 0x106b0c00: /* Mac Pro */ |
4948 | board_config = ALC885_MACPRO; | ||
4949 | break; | ||
4950 | default: | ||
4951 | printk(KERN_INFO "hda_codec: Unknown model for ALC882, " | ||
4952 | "trying auto-probe from BIOS...\n"); | ||
4953 | board_config = ALC882_AUTO; | ||
4954 | } | ||
4948 | } | 4955 | } |
4949 | 4956 | ||
4950 | if (board_config == ALC882_AUTO) { | 4957 | if (board_config == ALC882_AUTO) { |
@@ -5917,8 +5924,10 @@ static struct snd_kcontrol_new alc262_base_mixer[] = { | |||
5917 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), | 5924 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), |
5918 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | 5925 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), |
5919 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), | 5926 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), |
5927 | HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), | ||
5920 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), | 5928 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), |
5921 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), | 5929 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), |
5930 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), | ||
5922 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), | 5931 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), |
5923 | HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */ | 5932 | HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */ |
5924 | HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT), | 5933 | HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT), |
@@ -5937,8 +5946,10 @@ static struct snd_kcontrol_new alc262_hippo1_mixer[] = { | |||
5937 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), | 5946 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), |
5938 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | 5947 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), |
5939 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), | 5948 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), |
5949 | HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), | ||
5940 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), | 5950 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), |
5941 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), | 5951 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), |
5952 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), | ||
5942 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), | 5953 | /* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT), |
5943 | HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */ | 5954 | HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */ |
5944 | /*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/ | 5955 | /*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/ |
@@ -5955,8 +5966,10 @@ static struct snd_kcontrol_new alc262_HP_BPC_mixer[] = { | |||
5955 | 5966 | ||
5956 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | 5967 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), |
5957 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), | 5968 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), |
5969 | HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), | ||
5958 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), | 5970 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT), |
5959 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), | 5971 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT), |
5972 | HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT), | ||
5960 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), | 5973 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), |
5961 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), | 5974 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), |
5962 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), | 5975 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), |
@@ -5977,6 +5990,7 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_mixer[] = { | |||
5977 | HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT), | 5990 | HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT), |
5978 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT), | 5991 | HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT), |
5979 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT), | 5992 | HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT), |
5993 | HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT), | ||
5980 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT), | 5994 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT), |
5981 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT), | 5995 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT), |
5982 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), | 5996 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), |
@@ -5989,6 +6003,7 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_mixer[] = { | |||
5989 | static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = { | 6003 | static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = { |
5990 | HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | 6004 | HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), |
5991 | HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), | 6005 | HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), |
6006 | HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT), | ||
5992 | { } /* end */ | 6007 | { } /* end */ |
5993 | }; | 6008 | }; |
5994 | 6009 | ||
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index f7ef9c5afe87..4c7b03996be9 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -59,6 +59,8 @@ enum { | |||
59 | STAC_D945GTP3, | 59 | STAC_D945GTP3, |
60 | STAC_D945GTP5, | 60 | STAC_D945GTP5, |
61 | STAC_MACMINI, | 61 | STAC_MACMINI, |
62 | STAC_MACBOOK, | ||
63 | STAC_MACBOOK_PRO, | ||
62 | STAC_922X_MODELS | 64 | STAC_922X_MODELS |
63 | }; | 65 | }; |
64 | 66 | ||
@@ -461,6 +463,8 @@ static struct snd_pci_quirk stac9200_cfg_tbl[] = { | |||
461 | "Dell Inspiron E1705/9400", STAC_REF), | 463 | "Dell Inspiron E1705/9400", STAC_REF), |
462 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ce, | 464 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ce, |
463 | "Dell XPS M1710", STAC_REF), | 465 | "Dell XPS M1710", STAC_REF), |
466 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cf, | ||
467 | "Dell Precision M90", STAC_REF), | ||
464 | {} /* terminator */ | 468 | {} /* terminator */ |
465 | }; | 469 | }; |
466 | 470 | ||
@@ -519,11 +523,25 @@ static unsigned int d945gtp5_pin_configs[10] = { | |||
519 | 0x02a19320, 0x40000100, | 523 | 0x02a19320, 0x40000100, |
520 | }; | 524 | }; |
521 | 525 | ||
526 | static unsigned int macbook_pin_configs[10] = { | ||
527 | 0x0321e230, 0x03a1e020, 0x400000fd, 0x9017e110, | ||
528 | 0x400000fe, 0x0381e021, 0x1345e240, 0x13c5e22e, | ||
529 | 0x400000fc, 0x400000fb, | ||
530 | }; | ||
531 | |||
532 | static unsigned int macbook_pro_pin_configs[10] = { | ||
533 | 0x0221401f, 0x90a70120, 0x01813024, 0x01014010, | ||
534 | 0x400000fd, 0x01016011, 0x1345e240, 0x13c5e22e, | ||
535 | 0x400000fc, 0x400000fb, | ||
536 | }; | ||
537 | |||
522 | static unsigned int *stac922x_brd_tbl[STAC_922X_MODELS] = { | 538 | static unsigned int *stac922x_brd_tbl[STAC_922X_MODELS] = { |
523 | [STAC_D945_REF] = ref922x_pin_configs, | 539 | [STAC_D945_REF] = ref922x_pin_configs, |
524 | [STAC_D945GTP3] = d945gtp3_pin_configs, | 540 | [STAC_D945GTP3] = d945gtp3_pin_configs, |
525 | [STAC_D945GTP5] = d945gtp5_pin_configs, | 541 | [STAC_D945GTP5] = d945gtp5_pin_configs, |
526 | [STAC_MACMINI] = d945gtp5_pin_configs, | 542 | [STAC_MACMINI] = d945gtp5_pin_configs, |
543 | [STAC_MACBOOK] = macbook_pin_configs, | ||
544 | [STAC_MACBOOK_PRO] = macbook_pro_pin_configs, | ||
527 | }; | 545 | }; |
528 | 546 | ||
529 | static const char *stac922x_models[STAC_922X_MODELS] = { | 547 | static const char *stac922x_models[STAC_922X_MODELS] = { |
@@ -531,6 +549,8 @@ static const char *stac922x_models[STAC_922X_MODELS] = { | |||
531 | [STAC_D945GTP5] = "5stack", | 549 | [STAC_D945GTP5] = "5stack", |
532 | [STAC_D945GTP3] = "3stack", | 550 | [STAC_D945GTP3] = "3stack", |
533 | [STAC_MACMINI] = "macmini", | 551 | [STAC_MACMINI] = "macmini", |
552 | [STAC_MACBOOK] = "macbook", | ||
553 | [STAC_MACBOOK_PRO] = "macbook-pro", | ||
534 | }; | 554 | }; |
535 | 555 | ||
536 | static struct snd_pci_quirk stac922x_cfg_tbl[] = { | 556 | static struct snd_pci_quirk stac922x_cfg_tbl[] = { |
@@ -1864,6 +1884,18 @@ static int patch_stac922x(struct hda_codec *codec) | |||
1864 | spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS, | 1884 | spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS, |
1865 | stac922x_models, | 1885 | stac922x_models, |
1866 | stac922x_cfg_tbl); | 1886 | stac922x_cfg_tbl); |
1887 | if (spec->board_config == STAC_MACMINI) { | ||
1888 | spec->gpio_mute = 1; | ||
1889 | /* Intel Macs have all same PCI SSID, so we need to check | ||
1890 | * codec SSID to distinguish the exact models | ||
1891 | */ | ||
1892 | switch (codec->subsystem_id) { | ||
1893 | case 0x106b1e00: | ||
1894 | spec->board_config = STAC_MACBOOK_PRO; | ||
1895 | break; | ||
1896 | } | ||
1897 | } | ||
1898 | |||
1867 | again: | 1899 | again: |
1868 | if (spec->board_config < 0) { | 1900 | if (spec->board_config < 0) { |
1869 | snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC922x, " | 1901 | snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC922x, " |
@@ -1904,9 +1936,6 @@ static int patch_stac922x(struct hda_codec *codec) | |||
1904 | return err; | 1936 | return err; |
1905 | } | 1937 | } |
1906 | 1938 | ||
1907 | if (spec->board_config == STAC_MACMINI) | ||
1908 | spec->gpio_mute = 1; | ||
1909 | |||
1910 | codec->patch_ops = stac92xx_patch_ops; | 1939 | codec->patch_ops = stac92xx_patch_ops; |
1911 | 1940 | ||
1912 | return 0; | 1941 | return 0; |
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c index 5e1d5d2b2850..952625dead58 100644 --- a/sound/pci/riptide/riptide.c +++ b/sound/pci/riptide/riptide.c | |||
@@ -1919,6 +1919,8 @@ snd_riptide_create(struct snd_card *card, struct pci_dev *pci, | |||
1919 | return err; | 1919 | return err; |
1920 | } | 1920 | } |
1921 | 1921 | ||
1922 | snd_card_set_dev(card, &pci->dev); | ||
1923 | |||
1922 | *rchip = chip; | 1924 | *rchip = chip; |
1923 | return 0; | 1925 | return 0; |
1924 | } | 1926 | } |
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index e0215aca1193..6e95857e4e67 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c | |||
@@ -4468,6 +4468,8 @@ static int __devinit snd_hdspm_probe(struct pci_dev *pci, | |||
4468 | hdspm->dev = dev; | 4468 | hdspm->dev = dev; |
4469 | hdspm->pci = pci; | 4469 | hdspm->pci = pci; |
4470 | 4470 | ||
4471 | snd_card_set_dev(card, &pci->dev); | ||
4472 | |||
4471 | if ((err = | 4473 | if ((err = |
4472 | snd_hdspm_create(card, hdspm, precise_ptr[dev], | 4474 | snd_hdspm_create(card, hdspm, precise_ptr[dev], |
4473 | enable_monitor[dev])) < 0) { | 4475 | enable_monitor[dev])) < 0) { |
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 92a64871bcd0..ee7a691a9ba1 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c | |||
@@ -39,7 +39,7 @@ static int ac97_write(struct snd_soc_codec *codec, | |||
39 | */ | 39 | */ |
40 | static const u16 wm9712_reg[] = { | 40 | static const u16 wm9712_reg[] = { |
41 | 0x6174, 0x8000, 0x8000, 0x8000, // 6 | 41 | 0x6174, 0x8000, 0x8000, 0x8000, // 6 |
42 | 0xf0f0, 0xaaa0, 0xc008, 0x6808, // e | 42 | 0x0f0f, 0xaaa0, 0xc008, 0x6808, // e |
43 | 0xe808, 0xaaa0, 0xad00, 0x8000, // 16 | 43 | 0xe808, 0xaaa0, 0xad00, 0x8000, // 16 |
44 | 0xe808, 0x3000, 0x8000, 0x0000, // 1e | 44 | 0xe808, 0x3000, 0x8000, 0x0000, // 1e |
45 | 0x0000, 0x0000, 0x0000, 0x000f, // 26 | 45 | 0x0000, 0x0000, 0x0000, 0x000f, // 26 |
@@ -96,6 +96,7 @@ SOC_DOUBLE("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1), | |||
96 | SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1), | 96 | SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1), |
97 | SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1), | 97 | SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1), |
98 | SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE,15, 1, 1), | 98 | SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE,15, 1, 1), |
99 | SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1), | ||
99 | 100 | ||
100 | SOC_SINGLE("Speaker Playback ZC Switch", AC97_MASTER, 7, 1, 0), | 101 | SOC_SINGLE("Speaker Playback ZC Switch", AC97_MASTER, 7, 1, 0), |
101 | SOC_SINGLE("Speaker Playback Invert Switch", AC97_MASTER, 6, 1, 0), | 102 | SOC_SINGLE("Speaker Playback Invert Switch", AC97_MASTER, 6, 1, 0), |