diff options
74 files changed, 728 insertions, 699 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 6a451f47d40f..c3b1430cf603 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -304,3 +304,15 @@ Why: The code says it was obsolete when it was written in 2001. | |||
304 | Who: Richard Purdie <rpurdie@rpsys.net> | 304 | Who: Richard Purdie <rpurdie@rpsys.net> |
305 | 305 | ||
306 | --------------------------- | 306 | --------------------------- |
307 | |||
308 | What: Wireless extensions over netlink (CONFIG_NET_WIRELESS_RTNETLINK) | ||
309 | When: with the merge of wireless-dev, 2.6.22 or later | ||
310 | Why: The option/code is | ||
311 | * not enabled on most kernels | ||
312 | * not required by any userspace tools (except an experimental one, | ||
313 | and even there only for some parts, others use ioctl) | ||
314 | * pointless since wext is no longer evolving and the ioctl | ||
315 | interface needs to be kept | ||
316 | Who: Johannes Berg <johannes@sipsolutions.net> | ||
317 | |||
318 | --------------------------- | ||
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 72af5de1effb..5484ab5efd4f 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -41,6 +41,7 @@ Table of Contents | |||
41 | 2.11 /proc/sys/fs/mqueue - POSIX message queues filesystem | 41 | 2.11 /proc/sys/fs/mqueue - POSIX message queues filesystem |
42 | 2.12 /proc/<pid>/oom_adj - Adjust the oom-killer score | 42 | 2.12 /proc/<pid>/oom_adj - Adjust the oom-killer score |
43 | 2.13 /proc/<pid>/oom_score - Display current oom-killer score | 43 | 2.13 /proc/<pid>/oom_score - Display current oom-killer score |
44 | 2.14 /proc/<pid>/io - Display the IO accounting fields | ||
44 | 45 | ||
45 | ------------------------------------------------------------------------------ | 46 | ------------------------------------------------------------------------------ |
46 | Preface | 47 | Preface |
@@ -1990,3 +1991,107 @@ need to recompile the kernel, or even to reboot the system. The files in the | |||
1990 | command to write value into these files, thereby changing the default settings | 1991 | command to write value into these files, thereby changing the default settings |
1991 | of the kernel. | 1992 | of the kernel. |
1992 | ------------------------------------------------------------------------------ | 1993 | ------------------------------------------------------------------------------ |
1994 | |||
1995 | 2.14 /proc/<pid>/io - Display the IO accounting fields | ||
1996 | ------------------------------------------------------- | ||
1997 | |||
1998 | This file contains IO statistics for each running process | ||
1999 | |||
2000 | Example | ||
2001 | ------- | ||
2002 | |||
2003 | test:/tmp # dd if=/dev/zero of=/tmp/test.dat & | ||
2004 | [1] 3828 | ||
2005 | |||
2006 | test:/tmp # cat /proc/3828/io | ||
2007 | rchar: 323934931 | ||
2008 | wchar: 323929600 | ||
2009 | syscr: 632687 | ||
2010 | syscw: 632675 | ||
2011 | read_bytes: 0 | ||
2012 | write_bytes: 323932160 | ||
2013 | cancelled_write_bytes: 0 | ||
2014 | |||
2015 | |||
2016 | Description | ||
2017 | ----------- | ||
2018 | |||
2019 | rchar | ||
2020 | ----- | ||
2021 | |||
2022 | I/O counter: chars read | ||
2023 | The number of bytes which this task has caused to be read from storage. This | ||
2024 | is simply the sum of bytes which this process passed to read() and pread(). | ||
2025 | It includes things like tty IO and it is unaffected by whether or not actual | ||
2026 | physical disk IO was required (the read might have been satisfied from | ||
2027 | pagecache) | ||
2028 | |||
2029 | |||
2030 | wchar | ||
2031 | ----- | ||
2032 | |||
2033 | I/O counter: chars written | ||
2034 | The number of bytes which this task has caused, or shall cause to be written | ||
2035 | to disk. Similar caveats apply here as with rchar. | ||
2036 | |||
2037 | |||
2038 | syscr | ||
2039 | ----- | ||
2040 | |||
2041 | I/O counter: read syscalls | ||
2042 | Attempt to count the number of read I/O operations, i.e. syscalls like read() | ||
2043 | and pread(). | ||
2044 | |||
2045 | |||
2046 | syscw | ||
2047 | ----- | ||
2048 | |||
2049 | I/O counter: write syscalls | ||
2050 | Attempt to count the number of write I/O operations, i.e. syscalls like | ||
2051 | write() and pwrite(). | ||
2052 | |||
2053 | |||
2054 | read_bytes | ||
2055 | ---------- | ||
2056 | |||
2057 | I/O counter: bytes read | ||
2058 | Attempt to count the number of bytes which this process really did cause to | ||
2059 | be fetched from the storage layer. Done at the submit_bio() level, so it is | ||
2060 | accurate for block-backed filesystems. <please add status regarding NFS and | ||
2061 | CIFS at a later time> | ||
2062 | |||
2063 | |||
2064 | write_bytes | ||
2065 | ----------- | ||
2066 | |||
2067 | I/O counter: bytes written | ||
2068 | Attempt to count the number of bytes which this process caused to be sent to | ||
2069 | the storage layer. This is done at page-dirtying time. | ||
2070 | |||
2071 | |||
2072 | cancelled_write_bytes | ||
2073 | --------------------- | ||
2074 | |||
2075 | The big inaccuracy here is truncate. If a process writes 1MB to a file and | ||
2076 | then deletes the file, it will in fact perform no writeout. But it will have | ||
2077 | been accounted as having caused 1MB of write. | ||
2078 | In other words: The number of bytes which this process caused to not happen, | ||
2079 | by truncating pagecache. A task can cause "negative" IO too. If this task | ||
2080 | truncates some dirty pagecache, some IO which another task has been accounted | ||
2081 | for (in it's write_bytes) will not be happening. We _could_ just subtract that | ||
2082 | from the truncating task's write_bytes, but there is information loss in doing | ||
2083 | that. | ||
2084 | |||
2085 | |||
2086 | Note | ||
2087 | ---- | ||
2088 | |||
2089 | At its current implementation state, this is a bit racy on 32-bit machines: if | ||
2090 | process A reads process B's /proc/pid/io while process B is updating one of | ||
2091 | those 64-bit counters, process A could see an intermediate result. | ||
2092 | |||
2093 | |||
2094 | More information about this can be found within the taskstats documentation in | ||
2095 | Documentation/accounting. | ||
2096 | |||
2097 | ------------------------------------------------------------------------------ | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 1dfba85ca7b5..9993b9009415 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2553,16 +2553,8 @@ L: i2c@lm-sensors.org | |||
2553 | S: Maintained | 2553 | S: Maintained |
2554 | 2554 | ||
2555 | PARALLEL PORT SUPPORT | 2555 | PARALLEL PORT SUPPORT |
2556 | P: Phil Blundell | ||
2557 | M: philb@gnu.org | ||
2558 | P: Tim Waugh | ||
2559 | M: tim@cyberelk.net | ||
2560 | P: David Campbell | ||
2561 | P: Andrea Arcangeli | ||
2562 | M: andrea@suse.de | ||
2563 | L: linux-parport@lists.infradead.org | 2556 | L: linux-parport@lists.infradead.org |
2564 | W: http://people.redhat.com/twaugh/parport/ | 2557 | S: Orphan |
2565 | S: Maintained | ||
2566 | 2558 | ||
2567 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES | 2559 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES |
2568 | P: Tim Waugh | 2560 | P: Tim Waugh |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ac2ffdcfbbb4..e7baca29f3fb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -21,6 +21,10 @@ config ARM | |||
21 | config SYS_SUPPORTS_APM_EMULATION | 21 | config SYS_SUPPORTS_APM_EMULATION |
22 | bool | 22 | bool |
23 | 23 | ||
24 | config GENERIC_GPIO | ||
25 | bool | ||
26 | default n | ||
27 | |||
24 | config GENERIC_TIME | 28 | config GENERIC_TIME |
25 | bool | 29 | bool |
26 | default n | 30 | default n |
@@ -163,6 +167,7 @@ config ARCH_VERSATILE | |||
163 | 167 | ||
164 | config ARCH_AT91 | 168 | config ARCH_AT91 |
165 | bool "Atmel AT91" | 169 | bool "Atmel AT91" |
170 | select GENERIC_GPIO | ||
166 | help | 171 | help |
167 | This enables support for systems based on the Atmel AT91RM9200 | 172 | This enables support for systems based on the Atmel AT91RM9200 |
168 | and AT91SAM9xxx processors. | 173 | and AT91SAM9xxx processors. |
@@ -304,6 +309,7 @@ config ARCH_PXA | |||
304 | bool "PXA2xx-based" | 309 | bool "PXA2xx-based" |
305 | depends on MMU | 310 | depends on MMU |
306 | select ARCH_MTD_XIP | 311 | select ARCH_MTD_XIP |
312 | select GENERIC_GPIO | ||
307 | select GENERIC_TIME | 313 | select GENERIC_TIME |
308 | help | 314 | help |
309 | Support for Intel's PXA2XX processor line. | 315 | Support for Intel's PXA2XX processor line. |
@@ -325,11 +331,13 @@ config ARCH_SA1100 | |||
325 | select ISA | 331 | select ISA |
326 | select ARCH_DISCONTIGMEM_ENABLE | 332 | select ARCH_DISCONTIGMEM_ENABLE |
327 | select ARCH_MTD_XIP | 333 | select ARCH_MTD_XIP |
334 | select GENERIC_GPIO | ||
328 | help | 335 | help |
329 | Support for StrongARM 11x0 based boards. | 336 | Support for StrongARM 11x0 based boards. |
330 | 337 | ||
331 | config ARCH_S3C2410 | 338 | config ARCH_S3C2410 |
332 | bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443" | 339 | bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443" |
340 | select GENERIC_GPIO | ||
333 | help | 341 | help |
334 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics | 342 | Samsung S3C2410X CPU based systems, such as the Simtec Electronics |
335 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or | 343 | BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or |
@@ -354,6 +362,7 @@ config ARCH_LH7A40X | |||
354 | 362 | ||
355 | config ARCH_OMAP | 363 | config ARCH_OMAP |
356 | bool "TI OMAP" | 364 | bool "TI OMAP" |
365 | select GENERIC_GPIO | ||
357 | help | 366 | help |
358 | Support for TI's OMAP platform (OMAP1 and OMAP2). | 367 | Support for TI's OMAP platform (OMAP1 and OMAP2). |
359 | 368 | ||
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index bb059a4e1df9..ce4013aee59b 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig | |||
@@ -22,6 +22,10 @@ config AVR32 | |||
22 | config UID16 | 22 | config UID16 |
23 | bool | 23 | bool |
24 | 24 | ||
25 | config GENERIC_GPIO | ||
26 | bool | ||
27 | default y | ||
28 | |||
25 | config GENERIC_HARDIRQS | 29 | config GENERIC_HARDIRQS |
26 | bool | 30 | bool |
27 | default y | 31 | default y |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 2f7672545fe9..cee4ff679d3c 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -220,11 +220,11 @@ config PARAVIRT | |||
220 | 220 | ||
221 | config VMI | 221 | config VMI |
222 | bool "VMI Paravirt-ops support" | 222 | bool "VMI Paravirt-ops support" |
223 | depends on PARAVIRT && !NO_HZ | 223 | depends on PARAVIRT |
224 | default y | ||
225 | help | 224 | help |
226 | VMI provides a paravirtualized interface to multiple hypervisors | 225 | VMI provides a paravirtualized interface to the VMware ESX server |
227 | include VMware ESX server and Xen by connecting to a ROM module | 226 | (it could be used by other hypervisors in theory too, but is not |
227 | at the moment), by linking the kernel to a GPL-ed ROM module | ||
228 | provided by the hypervisor. | 228 | provided by the hypervisor. |
229 | 229 | ||
230 | config ACPI_SRAT | 230 | config ACPI_SRAT |
@@ -1287,12 +1287,3 @@ config X86_TRAMPOLINE | |||
1287 | config KTIME_SCALAR | 1287 | config KTIME_SCALAR |
1288 | bool | 1288 | bool |
1289 | default y | 1289 | default y |
1290 | |||
1291 | config NO_IDLE_HZ | ||
1292 | bool | ||
1293 | depends on PARAVIRT | ||
1294 | default y | ||
1295 | help | ||
1296 | Switches the regular HZ timer off when the system is going idle. | ||
1297 | This helps a hypervisor detect that the Linux system is idle, | ||
1298 | reducing the overhead of idle systems. | ||
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 7a2c9cbdb511..2383bcf18c5d 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -493,8 +493,15 @@ void __init setup_boot_APIC_clock(void) | |||
493 | /* No broadcast on UP ! */ | 493 | /* No broadcast on UP ! */ |
494 | if (num_possible_cpus() == 1) | 494 | if (num_possible_cpus() == 1) |
495 | return; | 495 | return; |
496 | } else | 496 | } else { |
497 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; | 497 | /* |
498 | * If nmi_watchdog is set to IO_APIC, we need the | ||
499 | * PIT/HPET going. Otherwise register lapic as a dummy | ||
500 | * device. | ||
501 | */ | ||
502 | if (nmi_watchdog != NMI_IO_APIC) | ||
503 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; | ||
504 | } | ||
498 | 505 | ||
499 | /* Setup the lapic or request the broadcast */ | 506 | /* Setup the lapic or request the broadcast */ |
500 | setup_APIC_timer(); | 507 | setup_APIC_timer(); |
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c index e1006b7acc9e..f3ab61ee7498 100644 --- a/arch/i386/kernel/hpet.c +++ b/arch/i386/kernel/hpet.c | |||
@@ -201,12 +201,30 @@ static int hpet_next_event(unsigned long delta, | |||
201 | } | 201 | } |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Clock source related code | ||
205 | */ | ||
206 | static cycle_t read_hpet(void) | ||
207 | { | ||
208 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
209 | } | ||
210 | |||
211 | static struct clocksource clocksource_hpet = { | ||
212 | .name = "hpet", | ||
213 | .rating = 250, | ||
214 | .read = read_hpet, | ||
215 | .mask = HPET_MASK, | ||
216 | .shift = HPET_SHIFT, | ||
217 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
218 | }; | ||
219 | |||
220 | /* | ||
204 | * Try to setup the HPET timer | 221 | * Try to setup the HPET timer |
205 | */ | 222 | */ |
206 | int __init hpet_enable(void) | 223 | int __init hpet_enable(void) |
207 | { | 224 | { |
208 | unsigned long id; | 225 | unsigned long id; |
209 | uint64_t hpet_freq; | 226 | uint64_t hpet_freq; |
227 | u64 tmp; | ||
210 | 228 | ||
211 | if (!is_hpet_capable()) | 229 | if (!is_hpet_capable()) |
212 | return 0; | 230 | return 0; |
@@ -253,6 +271,25 @@ int __init hpet_enable(void) | |||
253 | /* Start the counter */ | 271 | /* Start the counter */ |
254 | hpet_start_counter(); | 272 | hpet_start_counter(); |
255 | 273 | ||
274 | /* Initialize and register HPET clocksource | ||
275 | * | ||
276 | * hpet period is in femto seconds per cycle | ||
277 | * so we need to convert this to ns/cyc units | ||
278 | * aproximated by mult/2^shift | ||
279 | * | ||
280 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
281 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
282 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
283 | * (fsec/cyc << shift)/1000000 = mult | ||
284 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
285 | */ | ||
286 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
287 | do_div(tmp, FSEC_PER_NSEC); | ||
288 | clocksource_hpet.mult = (u32)tmp; | ||
289 | |||
290 | clocksource_register(&clocksource_hpet); | ||
291 | |||
292 | |||
256 | if (id & HPET_ID_LEGSUP) { | 293 | if (id & HPET_ID_LEGSUP) { |
257 | hpet_enable_int(); | 294 | hpet_enable_int(); |
258 | hpet_reserve_platform_timers(id); | 295 | hpet_reserve_platform_timers(id); |
@@ -273,49 +310,6 @@ out_nohpet: | |||
273 | return 0; | 310 | return 0; |
274 | } | 311 | } |
275 | 312 | ||
276 | /* | ||
277 | * Clock source related code | ||
278 | */ | ||
279 | static cycle_t read_hpet(void) | ||
280 | { | ||
281 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
282 | } | ||
283 | |||
284 | static struct clocksource clocksource_hpet = { | ||
285 | .name = "hpet", | ||
286 | .rating = 250, | ||
287 | .read = read_hpet, | ||
288 | .mask = HPET_MASK, | ||
289 | .shift = HPET_SHIFT, | ||
290 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
291 | }; | ||
292 | |||
293 | static int __init init_hpet_clocksource(void) | ||
294 | { | ||
295 | u64 tmp; | ||
296 | |||
297 | if (!hpet_virt_address) | ||
298 | return -ENODEV; | ||
299 | |||
300 | /* | ||
301 | * hpet period is in femto seconds per cycle | ||
302 | * so we need to convert this to ns/cyc units | ||
303 | * aproximated by mult/2^shift | ||
304 | * | ||
305 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
306 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
307 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
308 | * (fsec/cyc << shift)/1000000 = mult | ||
309 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
310 | */ | ||
311 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
312 | do_div(tmp, FSEC_PER_NSEC); | ||
313 | clocksource_hpet.mult = (u32)tmp; | ||
314 | |||
315 | return clocksource_register(&clocksource_hpet); | ||
316 | } | ||
317 | |||
318 | module_init(init_hpet_clocksource); | ||
319 | 313 | ||
320 | #ifdef CONFIG_HPET_EMULATE_RTC | 314 | #ifdef CONFIG_HPET_EMULATE_RTC |
321 | 315 | ||
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index a6bc7bb38834..5cbb776b3089 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c | |||
@@ -195,4 +195,4 @@ static int __init init_pit_clocksource(void) | |||
195 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); | 195 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); |
196 | return clocksource_register(&clocksource_pit); | 196 | return clocksource_register(&clocksource_pit); |
197 | } | 197 | } |
198 | module_init(init_pit_clocksource); | 198 | arch_initcall(init_pit_clocksource); |
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index c156ecfa3872..2ec331e03fa9 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/fixmap.h> | 32 | #include <asm/fixmap.h> |
33 | #include <asm/apic.h> | 33 | #include <asm/apic.h> |
34 | #include <asm/tlbflush.h> | 34 | #include <asm/tlbflush.h> |
35 | #include <asm/timer.h> | ||
35 | 36 | ||
36 | /* nop stub */ | 37 | /* nop stub */ |
37 | static void native_nop(void) | 38 | static void native_nop(void) |
@@ -493,7 +494,7 @@ struct paravirt_ops paravirt_ops = { | |||
493 | .memory_setup = machine_specific_memory_setup, | 494 | .memory_setup = machine_specific_memory_setup, |
494 | .get_wallclock = native_get_wallclock, | 495 | .get_wallclock = native_get_wallclock, |
495 | .set_wallclock = native_set_wallclock, | 496 | .set_wallclock = native_set_wallclock, |
496 | .time_init = time_init_hook, | 497 | .time_init = hpet_time_init, |
497 | .init_IRQ = native_init_IRQ, | 498 | .init_IRQ = native_init_IRQ, |
498 | 499 | ||
499 | .cpuid = native_cpuid, | 500 | .cpuid = native_cpuid, |
@@ -520,6 +521,8 @@ struct paravirt_ops paravirt_ops = { | |||
520 | .write_msr = native_write_msr, | 521 | .write_msr = native_write_msr, |
521 | .read_tsc = native_read_tsc, | 522 | .read_tsc = native_read_tsc, |
522 | .read_pmc = native_read_pmc, | 523 | .read_pmc = native_read_pmc, |
524 | .get_scheduled_cycles = native_read_tsc, | ||
525 | .get_cpu_khz = native_calculate_cpu_khz, | ||
523 | .load_tr_desc = native_load_tr_desc, | 526 | .load_tr_desc = native_load_tr_desc, |
524 | .set_ldt = native_set_ldt, | 527 | .set_ldt = native_set_ldt, |
525 | .load_gdt = native_load_gdt, | 528 | .load_gdt = native_load_gdt, |
@@ -535,7 +538,6 @@ struct paravirt_ops paravirt_ops = { | |||
535 | 538 | ||
536 | .set_iopl_mask = native_set_iopl_mask, | 539 | .set_iopl_mask = native_set_iopl_mask, |
537 | .io_delay = native_io_delay, | 540 | .io_delay = native_io_delay, |
538 | .const_udelay = __const_udelay, | ||
539 | 541 | ||
540 | #ifdef CONFIG_X86_LOCAL_APIC | 542 | #ifdef CONFIG_X86_LOCAL_APIC |
541 | .apic_write = native_apic_write, | 543 | .apic_write = native_apic_write, |
@@ -550,6 +552,8 @@ struct paravirt_ops paravirt_ops = { | |||
550 | .flush_tlb_kernel = native_flush_tlb_global, | 552 | .flush_tlb_kernel = native_flush_tlb_global, |
551 | .flush_tlb_single = native_flush_tlb_single, | 553 | .flush_tlb_single = native_flush_tlb_single, |
552 | 554 | ||
555 | .map_pt_hook = (void *)native_nop, | ||
556 | |||
553 | .alloc_pt = (void *)native_nop, | 557 | .alloc_pt = (void *)native_nop, |
554 | .alloc_pd = (void *)native_nop, | 558 | .alloc_pd = (void *)native_nop, |
555 | .alloc_pd_clone = (void *)native_nop, | 559 | .alloc_pd_clone = (void *)native_nop, |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 122623dcc6e1..698c24fe482e 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -657,5 +657,4 @@ void __init setup_arch(char **cmdline_p) | |||
657 | conswitchp = &dummy_con; | 657 | conswitchp = &dummy_con; |
658 | #endif | 658 | #endif |
659 | #endif | 659 | #endif |
660 | tsc_init(); | ||
661 | } | 660 | } |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 48bfcaa13ecc..9b0dd2744c82 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -33,11 +33,6 @@ | |||
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. | 33 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ | 34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ |
35 | 35 | ||
36 | |||
37 | /* SMP boot always wants to use real time delay to allow sufficient time for | ||
38 | * the APs to come online */ | ||
39 | #define USE_REAL_TIME_DELAY | ||
40 | |||
41 | #include <linux/module.h> | 36 | #include <linux/module.h> |
42 | #include <linux/init.h> | 37 | #include <linux/init.h> |
43 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index a5350059557a..94e5cb091104 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -262,14 +262,23 @@ void notify_arch_cmos_timer(void) | |||
262 | 262 | ||
263 | extern void (*late_time_init)(void); | 263 | extern void (*late_time_init)(void); |
264 | /* Duplicate of time_init() below, with hpet_enable part added */ | 264 | /* Duplicate of time_init() below, with hpet_enable part added */ |
265 | static void __init hpet_time_init(void) | 265 | void __init hpet_time_init(void) |
266 | { | 266 | { |
267 | if (!hpet_enable()) | 267 | if (!hpet_enable()) |
268 | setup_pit_timer(); | 268 | setup_pit_timer(); |
269 | do_time_init(); | 269 | time_init_hook(); |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | ||
273 | * This is called directly from init code; we must delay timer setup in the | ||
274 | * HPET case as we can't make the decision to turn on HPET this early in the | ||
275 | * boot process. | ||
276 | * | ||
277 | * The chosen time_init function will usually be hpet_time_init, above, but | ||
278 | * in the case of virtual hardware, an alternative function may be substituted. | ||
279 | */ | ||
272 | void __init time_init(void) | 280 | void __init time_init(void) |
273 | { | 281 | { |
274 | late_time_init = hpet_time_init; | 282 | tsc_init(); |
283 | late_time_init = choose_time_init(); | ||
275 | } | 284 | } |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 3082a418635c..875d8a6ecc02 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/delay.h> | 14 | #include <asm/delay.h> |
15 | #include <asm/tsc.h> | 15 | #include <asm/tsc.h> |
16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | #include <asm/timer.h> | ||
17 | 18 | ||
18 | #include "mach_timer.h" | 19 | #include "mach_timer.h" |
19 | 20 | ||
@@ -102,9 +103,6 @@ unsigned long long sched_clock(void) | |||
102 | { | 103 | { |
103 | unsigned long long this_offset; | 104 | unsigned long long this_offset; |
104 | 105 | ||
105 | if (unlikely(custom_sched_clock)) | ||
106 | return (*custom_sched_clock)(); | ||
107 | |||
108 | /* | 106 | /* |
109 | * Fall back to jiffies if there's no TSC available: | 107 | * Fall back to jiffies if there's no TSC available: |
110 | */ | 108 | */ |
@@ -113,13 +111,13 @@ unsigned long long sched_clock(void) | |||
113 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 111 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
114 | 112 | ||
115 | /* read the Time Stamp Counter: */ | 113 | /* read the Time Stamp Counter: */ |
116 | rdtscll(this_offset); | 114 | get_scheduled_cycles(this_offset); |
117 | 115 | ||
118 | /* return the value in ns */ | 116 | /* return the value in ns */ |
119 | return cycles_2_ns(this_offset); | 117 | return cycles_2_ns(this_offset); |
120 | } | 118 | } |
121 | 119 | ||
122 | static unsigned long calculate_cpu_khz(void) | 120 | unsigned long native_calculate_cpu_khz(void) |
123 | { | 121 | { |
124 | unsigned long long start, end; | 122 | unsigned long long start, end; |
125 | unsigned long count; | 123 | unsigned long count; |
@@ -186,34 +184,6 @@ int recalibrate_cpu_khz(void) | |||
186 | 184 | ||
187 | EXPORT_SYMBOL(recalibrate_cpu_khz); | 185 | EXPORT_SYMBOL(recalibrate_cpu_khz); |
188 | 186 | ||
189 | void __init tsc_init(void) | ||
190 | { | ||
191 | if (!cpu_has_tsc || tsc_disable) | ||
192 | goto out_no_tsc; | ||
193 | |||
194 | cpu_khz = calculate_cpu_khz(); | ||
195 | tsc_khz = cpu_khz; | ||
196 | |||
197 | if (!cpu_khz) | ||
198 | goto out_no_tsc; | ||
199 | |||
200 | printk("Detected %lu.%03lu MHz processor.\n", | ||
201 | (unsigned long)cpu_khz / 1000, | ||
202 | (unsigned long)cpu_khz % 1000); | ||
203 | |||
204 | set_cyc2ns_scale(cpu_khz); | ||
205 | use_tsc_delay(); | ||
206 | return; | ||
207 | |||
208 | out_no_tsc: | ||
209 | /* | ||
210 | * Set the tsc_disable flag if there's no TSC support, this | ||
211 | * makes it a fast flag for the kernel to see whether it | ||
212 | * should be using the TSC. | ||
213 | */ | ||
214 | tsc_disable = 1; | ||
215 | } | ||
216 | |||
217 | #ifdef CONFIG_CPU_FREQ | 187 | #ifdef CONFIG_CPU_FREQ |
218 | 188 | ||
219 | /* | 189 | /* |
@@ -383,28 +353,47 @@ static void __init check_geode_tsc_reliable(void) | |||
383 | static inline void check_geode_tsc_reliable(void) { } | 353 | static inline void check_geode_tsc_reliable(void) { } |
384 | #endif | 354 | #endif |
385 | 355 | ||
386 | static int __init init_tsc_clocksource(void) | 356 | |
357 | void __init tsc_init(void) | ||
387 | { | 358 | { |
359 | if (!cpu_has_tsc || tsc_disable) | ||
360 | goto out_no_tsc; | ||
388 | 361 | ||
389 | if (cpu_has_tsc && tsc_khz && !tsc_disable) { | 362 | cpu_khz = calculate_cpu_khz(); |
390 | /* check blacklist */ | 363 | tsc_khz = cpu_khz; |
391 | dmi_check_system(bad_tsc_dmi_table); | ||
392 | 364 | ||
393 | unsynchronized_tsc(); | 365 | if (!cpu_khz) |
394 | check_geode_tsc_reliable(); | 366 | goto out_no_tsc; |
395 | current_tsc_khz = tsc_khz; | 367 | |
396 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | 368 | printk("Detected %lu.%03lu MHz processor.\n", |
397 | clocksource_tsc.shift); | 369 | (unsigned long)cpu_khz / 1000, |
398 | /* lower the rating if we already know its unstable: */ | 370 | (unsigned long)cpu_khz % 1000); |
399 | if (check_tsc_unstable()) { | 371 | |
400 | clocksource_tsc.rating = 0; | 372 | set_cyc2ns_scale(cpu_khz); |
401 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 373 | use_tsc_delay(); |
402 | } | ||
403 | 374 | ||
404 | return clocksource_register(&clocksource_tsc); | 375 | /* Check and install the TSC clocksource */ |
376 | dmi_check_system(bad_tsc_dmi_table); | ||
377 | |||
378 | unsynchronized_tsc(); | ||
379 | check_geode_tsc_reliable(); | ||
380 | current_tsc_khz = tsc_khz; | ||
381 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
382 | clocksource_tsc.shift); | ||
383 | /* lower the rating if we already know its unstable: */ | ||
384 | if (check_tsc_unstable()) { | ||
385 | clocksource_tsc.rating = 0; | ||
386 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
405 | } | 387 | } |
388 | clocksource_register(&clocksource_tsc); | ||
406 | 389 | ||
407 | return 0; | 390 | return; |
408 | } | ||
409 | 391 | ||
410 | module_init(init_tsc_clocksource); | 392 | out_no_tsc: |
393 | /* | ||
394 | * Set the tsc_disable flag if there's no TSC support, this | ||
395 | * makes it a fast flag for the kernel to see whether it | ||
396 | * should be using the TSC. | ||
397 | */ | ||
398 | tsc_disable = 1; | ||
399 | } | ||
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index bb5a7abf949c..fbf45fa08320 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
36 | #include <asm/timer.h> | 36 | #include <asm/timer.h> |
37 | #include <asm/vmi_time.h> | 37 | #include <asm/vmi_time.h> |
38 | #include <asm/kmap_types.h> | ||
38 | 39 | ||
39 | /* Convenient for calling VMI functions indirectly in the ROM */ | 40 | /* Convenient for calling VMI functions indirectly in the ROM */ |
40 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); | 41 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); |
@@ -48,12 +49,13 @@ typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); | |||
48 | 49 | ||
49 | static struct vrom_header *vmi_rom; | 50 | static struct vrom_header *vmi_rom; |
50 | static int license_gplok; | 51 | static int license_gplok; |
51 | static int disable_nodelay; | ||
52 | static int disable_pge; | 52 | static int disable_pge; |
53 | static int disable_pse; | 53 | static int disable_pse; |
54 | static int disable_sep; | 54 | static int disable_sep; |
55 | static int disable_tsc; | 55 | static int disable_tsc; |
56 | static int disable_mtrr; | 56 | static int disable_mtrr; |
57 | static int disable_noidle; | ||
58 | static int disable_vmi_timer; | ||
57 | 59 | ||
58 | /* Cached VMI operations */ | 60 | /* Cached VMI operations */ |
59 | struct { | 61 | struct { |
@@ -255,7 +257,6 @@ static void vmi_nop(void) | |||
255 | } | 257 | } |
256 | 258 | ||
257 | /* For NO_IDLE_HZ, we stop the clock when halting the kernel */ | 259 | /* For NO_IDLE_HZ, we stop the clock when halting the kernel */ |
258 | #ifdef CONFIG_NO_IDLE_HZ | ||
259 | static fastcall void vmi_safe_halt(void) | 260 | static fastcall void vmi_safe_halt(void) |
260 | { | 261 | { |
261 | int idle = vmi_stop_hz_timer(); | 262 | int idle = vmi_stop_hz_timer(); |
@@ -266,7 +267,6 @@ static fastcall void vmi_safe_halt(void) | |||
266 | local_irq_enable(); | 267 | local_irq_enable(); |
267 | } | 268 | } |
268 | } | 269 | } |
269 | #endif | ||
270 | 270 | ||
271 | #ifdef CONFIG_DEBUG_PAGE_TYPE | 271 | #ifdef CONFIG_DEBUG_PAGE_TYPE |
272 | 272 | ||
@@ -371,6 +371,24 @@ static void vmi_check_page_type(u32 pfn, int type) | |||
371 | #define vmi_check_page_type(p,t) do { } while (0) | 371 | #define vmi_check_page_type(p,t) do { } while (0) |
372 | #endif | 372 | #endif |
373 | 373 | ||
374 | static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn) | ||
375 | { | ||
376 | /* | ||
377 | * Internally, the VMI ROM must map virtual addresses to physical | ||
378 | * addresses for processing MMU updates. By the time MMU updates | ||
379 | * are issued, this information is typically already lost. | ||
380 | * Fortunately, the VMI provides a cache of mapping slots for active | ||
381 | * page tables. | ||
382 | * | ||
383 | * We use slot zero for the linear mapping of physical memory, and | ||
384 | * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. | ||
385 | * | ||
386 | * args: SLOT VA COUNT PFN | ||
387 | */ | ||
388 | BUG_ON(type != KM_PTE0 && type != KM_PTE1); | ||
389 | vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn); | ||
390 | } | ||
391 | |||
374 | static void vmi_allocate_pt(u32 pfn) | 392 | static void vmi_allocate_pt(u32 pfn) |
375 | { | 393 | { |
376 | vmi_set_page_type(pfn, VMI_PAGE_L1); | 394 | vmi_set_page_type(pfn, VMI_PAGE_L1); |
@@ -508,13 +526,14 @@ void vmi_pmd_clear(pmd_t *pmd) | |||
508 | #endif | 526 | #endif |
509 | 527 | ||
510 | #ifdef CONFIG_SMP | 528 | #ifdef CONFIG_SMP |
511 | struct vmi_ap_state ap; | ||
512 | extern void setup_pda(void); | 529 | extern void setup_pda(void); |
513 | 530 | ||
514 | static void __init /* XXX cpu hotplug */ | 531 | static void __devinit |
515 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | 532 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, |
516 | unsigned long start_esp) | 533 | unsigned long start_esp) |
517 | { | 534 | { |
535 | struct vmi_ap_state ap; | ||
536 | |||
518 | /* Default everything to zero. This is fine for most GPRs. */ | 537 | /* Default everything to zero. This is fine for most GPRs. */ |
519 | memset(&ap, 0, sizeof(struct vmi_ap_state)); | 538 | memset(&ap, 0, sizeof(struct vmi_ap_state)); |
520 | 539 | ||
@@ -553,7 +572,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
553 | /* Protected mode, paging, AM, WP, NE, MP. */ | 572 | /* Protected mode, paging, AM, WP, NE, MP. */ |
554 | ap.cr0 = 0x80050023; | 573 | ap.cr0 = 0x80050023; |
555 | ap.cr4 = mmu_cr4_features; | 574 | ap.cr4 = mmu_cr4_features; |
556 | vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid); | 575 | vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); |
557 | } | 576 | } |
558 | #endif | 577 | #endif |
559 | 578 | ||
@@ -645,12 +664,12 @@ static inline int __init probe_vmi_rom(void) | |||
645 | void vmi_bringup(void) | 664 | void vmi_bringup(void) |
646 | { | 665 | { |
647 | /* We must establish the lowmem mapping for MMU ops to work */ | 666 | /* We must establish the lowmem mapping for MMU ops to work */ |
648 | if (vmi_rom) | 667 | if (vmi_ops.set_linear_mapping) |
649 | vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0); | 668 | vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0); |
650 | } | 669 | } |
651 | 670 | ||
652 | /* | 671 | /* |
653 | * Return a pointer to the VMI function or a NOP stub | 672 | * Return a pointer to a VMI function or NULL if unimplemented |
654 | */ | 673 | */ |
655 | static void *vmi_get_function(int vmicall) | 674 | static void *vmi_get_function(int vmicall) |
656 | { | 675 | { |
@@ -661,12 +680,13 @@ static void *vmi_get_function(int vmicall) | |||
661 | if (rel->type == VMI_RELOCATION_CALL_REL) | 680 | if (rel->type == VMI_RELOCATION_CALL_REL) |
662 | return (void *)rel->eip; | 681 | return (void *)rel->eip; |
663 | else | 682 | else |
664 | return (void *)vmi_nop; | 683 | return NULL; |
665 | } | 684 | } |
666 | 685 | ||
667 | /* | 686 | /* |
668 | * Helper macro for making the VMI paravirt-ops fill code readable. | 687 | * Helper macro for making the VMI paravirt-ops fill code readable. |
669 | * For unimplemented operations, fall back to default. | 688 | * For unimplemented operations, fall back to default, unless nop |
689 | * is returned by the ROM. | ||
670 | */ | 690 | */ |
671 | #define para_fill(opname, vmicall) \ | 691 | #define para_fill(opname, vmicall) \ |
672 | do { \ | 692 | do { \ |
@@ -675,9 +695,29 @@ do { \ | |||
675 | if (rel->type != VMI_RELOCATION_NONE) { \ | 695 | if (rel->type != VMI_RELOCATION_NONE) { \ |
676 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \ | 696 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \ |
677 | paravirt_ops.opname = (void *)rel->eip; \ | 697 | paravirt_ops.opname = (void *)rel->eip; \ |
698 | } else if (rel->type == VMI_RELOCATION_NOP) \ | ||
699 | paravirt_ops.opname = (void *)vmi_nop; \ | ||
700 | } while (0) | ||
701 | |||
702 | /* | ||
703 | * Helper macro for making the VMI paravirt-ops fill code readable. | ||
704 | * For cached operations which do not match the VMI ROM ABI and must | ||
705 | * go through a tranlation stub. Ignore NOPs, since it is not clear | ||
706 | * a NOP * VMI function corresponds to a NOP paravirt-op when the | ||
707 | * functions are not in 1-1 correspondence. | ||
708 | */ | ||
709 | #define para_wrap(opname, wrapper, cache, vmicall) \ | ||
710 | do { \ | ||
711 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | ||
712 | VMI_CALL_##vmicall); \ | ||
713 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ | ||
714 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ | ||
715 | paravirt_ops.opname = wrapper; \ | ||
716 | vmi_ops.cache = (void *)rel->eip; \ | ||
678 | } \ | 717 | } \ |
679 | } while (0) | 718 | } while (0) |
680 | 719 | ||
720 | |||
681 | /* | 721 | /* |
682 | * Activate the VMI interface and switch into paravirtualized mode | 722 | * Activate the VMI interface and switch into paravirtualized mode |
683 | */ | 723 | */ |
@@ -714,13 +754,8 @@ static inline int __init activate_vmi(void) | |||
714 | * rdpmc is not yet used in Linux | 754 | * rdpmc is not yet used in Linux |
715 | */ | 755 | */ |
716 | 756 | ||
717 | /* CPUID is special, so very special */ | 757 | /* CPUID is special, so very special it gets wrapped like a present */ |
718 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_CPUID); | 758 | para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); |
719 | if (rel->type != VMI_RELOCATION_NONE) { | ||
720 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
721 | vmi_ops.cpuid = (void *)rel->eip; | ||
722 | paravirt_ops.cpuid = vmi_cpuid; | ||
723 | } | ||
724 | 759 | ||
725 | para_fill(clts, CLTS); | 760 | para_fill(clts, CLTS); |
726 | para_fill(get_debugreg, GetDR); | 761 | para_fill(get_debugreg, GetDR); |
@@ -737,38 +772,26 @@ static inline int __init activate_vmi(void) | |||
737 | para_fill(restore_fl, SetInterruptMask); | 772 | para_fill(restore_fl, SetInterruptMask); |
738 | para_fill(irq_disable, DisableInterrupts); | 773 | para_fill(irq_disable, DisableInterrupts); |
739 | para_fill(irq_enable, EnableInterrupts); | 774 | para_fill(irq_enable, EnableInterrupts); |
775 | |||
740 | /* irq_save_disable !!! sheer pain */ | 776 | /* irq_save_disable !!! sheer pain */ |
741 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK], | 777 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK], |
742 | (char *)paravirt_ops.save_fl); | 778 | (char *)paravirt_ops.save_fl); |
743 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], | 779 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], |
744 | (char *)paravirt_ops.irq_disable); | 780 | (char *)paravirt_ops.irq_disable); |
745 | #ifndef CONFIG_NO_IDLE_HZ | 781 | |
746 | para_fill(safe_halt, Halt); | ||
747 | #else | ||
748 | vmi_ops.halt = vmi_get_function(VMI_CALL_Halt); | ||
749 | paravirt_ops.safe_halt = vmi_safe_halt; | ||
750 | #endif | ||
751 | para_fill(wbinvd, WBINVD); | 782 | para_fill(wbinvd, WBINVD); |
783 | para_fill(read_tsc, RDTSC); | ||
784 | |||
785 | /* The following we emulate with trap and emulate for now */ | ||
752 | /* paravirt_ops.read_msr = vmi_rdmsr */ | 786 | /* paravirt_ops.read_msr = vmi_rdmsr */ |
753 | /* paravirt_ops.write_msr = vmi_wrmsr */ | 787 | /* paravirt_ops.write_msr = vmi_wrmsr */ |
754 | para_fill(read_tsc, RDTSC); | ||
755 | /* paravirt_ops.rdpmc = vmi_rdpmc */ | 788 | /* paravirt_ops.rdpmc = vmi_rdpmc */ |
756 | 789 | ||
757 | /* TR interface doesn't pass TR value */ | 790 | /* TR interface doesn't pass TR value, wrap */ |
758 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetTR); | 791 | para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); |
759 | if (rel->type != VMI_RELOCATION_NONE) { | ||
760 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
761 | vmi_ops.set_tr = (void *)rel->eip; | ||
762 | paravirt_ops.load_tr_desc = vmi_set_tr; | ||
763 | } | ||
764 | 792 | ||
765 | /* LDT is special, too */ | 793 | /* LDT is special, too */ |
766 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetLDT); | 794 | para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); |
767 | if (rel->type != VMI_RELOCATION_NONE) { | ||
768 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
769 | vmi_ops._set_ldt = (void *)rel->eip; | ||
770 | paravirt_ops.set_ldt = vmi_set_ldt; | ||
771 | } | ||
772 | 795 | ||
773 | para_fill(load_gdt, SetGDT); | 796 | para_fill(load_gdt, SetGDT); |
774 | para_fill(load_idt, SetIDT); | 797 | para_fill(load_idt, SetIDT); |
@@ -779,28 +802,14 @@ static inline int __init activate_vmi(void) | |||
779 | para_fill(write_ldt_entry, WriteLDTEntry); | 802 | para_fill(write_ldt_entry, WriteLDTEntry); |
780 | para_fill(write_gdt_entry, WriteGDTEntry); | 803 | para_fill(write_gdt_entry, WriteGDTEntry); |
781 | para_fill(write_idt_entry, WriteIDTEntry); | 804 | para_fill(write_idt_entry, WriteIDTEntry); |
782 | reloc = call_vrom_long_func(vmi_rom, get_reloc, | 805 | para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); |
783 | VMI_CALL_UpdateKernelStack); | ||
784 | if (rel->type != VMI_RELOCATION_NONE) { | ||
785 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | ||
786 | vmi_ops.set_kernel_stack = (void *)rel->eip; | ||
787 | paravirt_ops.load_esp0 = vmi_load_esp0; | ||
788 | } | ||
789 | |||
790 | para_fill(set_iopl_mask, SetIOPLMask); | 806 | para_fill(set_iopl_mask, SetIOPLMask); |
791 | paravirt_ops.io_delay = (void *)vmi_nop; | 807 | para_fill(io_delay, IODelay); |
792 | if (!disable_nodelay) { | ||
793 | paravirt_ops.const_udelay = (void *)vmi_nop; | ||
794 | } | ||
795 | |||
796 | para_fill(set_lazy_mode, SetLazyMode); | 808 | para_fill(set_lazy_mode, SetLazyMode); |
797 | 809 | ||
798 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_FlushTLB); | 810 | /* user and kernel flush are just handled with different flags to FlushTLB */ |
799 | if (rel->type != VMI_RELOCATION_NONE) { | 811 | para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); |
800 | vmi_ops.flush_tlb = (void *)rel->eip; | 812 | para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, flush_tlb, FlushTLB); |
801 | paravirt_ops.flush_tlb_user = vmi_flush_tlb_user; | ||
802 | paravirt_ops.flush_tlb_kernel = vmi_flush_tlb_kernel; | ||
803 | } | ||
804 | para_fill(flush_tlb_single, InvalPage); | 813 | para_fill(flush_tlb_single, InvalPage); |
805 | 814 | ||
806 | /* | 815 | /* |
@@ -815,27 +824,40 @@ static inline int __init activate_vmi(void) | |||
815 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); | 824 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); |
816 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); | 825 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); |
817 | #endif | 826 | #endif |
818 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | ||
819 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | ||
820 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | ||
821 | 827 | ||
822 | paravirt_ops.alloc_pt = vmi_allocate_pt; | 828 | if (vmi_ops.set_pte) { |
823 | paravirt_ops.alloc_pd = vmi_allocate_pd; | 829 | paravirt_ops.set_pte = vmi_set_pte; |
824 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | 830 | paravirt_ops.set_pte_at = vmi_set_pte_at; |
825 | paravirt_ops.release_pt = vmi_release_pt; | 831 | paravirt_ops.set_pmd = vmi_set_pmd; |
826 | paravirt_ops.release_pd = vmi_release_pd; | ||
827 | paravirt_ops.set_pte = vmi_set_pte; | ||
828 | paravirt_ops.set_pte_at = vmi_set_pte_at; | ||
829 | paravirt_ops.set_pmd = vmi_set_pmd; | ||
830 | paravirt_ops.pte_update = vmi_update_pte; | ||
831 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | ||
832 | #ifdef CONFIG_X86_PAE | 832 | #ifdef CONFIG_X86_PAE |
833 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; | 833 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; |
834 | paravirt_ops.set_pte_present = vmi_set_pte_present; | 834 | paravirt_ops.set_pte_present = vmi_set_pte_present; |
835 | paravirt_ops.set_pud = vmi_set_pud; | 835 | paravirt_ops.set_pud = vmi_set_pud; |
836 | paravirt_ops.pte_clear = vmi_pte_clear; | 836 | paravirt_ops.pte_clear = vmi_pte_clear; |
837 | paravirt_ops.pmd_clear = vmi_pmd_clear; | 837 | paravirt_ops.pmd_clear = vmi_pmd_clear; |
838 | #endif | 838 | #endif |
839 | } | ||
840 | |||
841 | if (vmi_ops.update_pte) { | ||
842 | paravirt_ops.pte_update = vmi_update_pte; | ||
843 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | ||
844 | } | ||
845 | |||
846 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | ||
847 | if (vmi_ops.allocate_page) { | ||
848 | paravirt_ops.alloc_pt = vmi_allocate_pt; | ||
849 | paravirt_ops.alloc_pd = vmi_allocate_pd; | ||
850 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | ||
851 | } | ||
852 | |||
853 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | ||
854 | if (vmi_ops.release_page) { | ||
855 | paravirt_ops.release_pt = vmi_release_pt; | ||
856 | paravirt_ops.release_pd = vmi_release_pd; | ||
857 | } | ||
858 | para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping, | ||
859 | SetLinearMapping); | ||
860 | |||
839 | /* | 861 | /* |
840 | * These MUST always be patched. Don't support indirect jumps | 862 | * These MUST always be patched. Don't support indirect jumps |
841 | * through these operations, as the VMI interface may use either | 863 | * through these operations, as the VMI interface may use either |
@@ -847,21 +869,20 @@ static inline int __init activate_vmi(void) | |||
847 | paravirt_ops.iret = (void *)0xbadbab0; | 869 | paravirt_ops.iret = (void *)0xbadbab0; |
848 | 870 | ||
849 | #ifdef CONFIG_SMP | 871 | #ifdef CONFIG_SMP |
850 | paravirt_ops.startup_ipi_hook = vmi_startup_ipi_hook; | 872 | para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); |
851 | vmi_ops.set_initial_ap_state = vmi_get_function(VMI_CALL_SetInitialAPState); | ||
852 | #endif | 873 | #endif |
853 | 874 | ||
854 | #ifdef CONFIG_X86_LOCAL_APIC | 875 | #ifdef CONFIG_X86_LOCAL_APIC |
855 | paravirt_ops.apic_read = vmi_get_function(VMI_CALL_APICRead); | 876 | para_fill(apic_read, APICRead); |
856 | paravirt_ops.apic_write = vmi_get_function(VMI_CALL_APICWrite); | 877 | para_fill(apic_write, APICWrite); |
857 | paravirt_ops.apic_write_atomic = vmi_get_function(VMI_CALL_APICWrite); | 878 | para_fill(apic_write_atomic, APICWrite); |
858 | #endif | 879 | #endif |
859 | 880 | ||
860 | /* | 881 | /* |
861 | * Check for VMI timer functionality by probing for a cycle frequency method | 882 | * Check for VMI timer functionality by probing for a cycle frequency method |
862 | */ | 883 | */ |
863 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); | 884 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); |
864 | if (rel->type != VMI_RELOCATION_NONE) { | 885 | if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { |
865 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; | 886 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; |
866 | vmi_timer_ops.get_cycle_counter = | 887 | vmi_timer_ops.get_cycle_counter = |
867 | vmi_get_function(VMI_CALL_GetCycleCounter); | 888 | vmi_get_function(VMI_CALL_GetCycleCounter); |
@@ -879,9 +900,22 @@ static inline int __init activate_vmi(void) | |||
879 | paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm; | 900 | paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm; |
880 | paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm; | 901 | paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm; |
881 | #endif | 902 | #endif |
882 | custom_sched_clock = vmi_sched_clock; | 903 | paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles; |
904 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; | ||
905 | |||
906 | /* We have true wallclock functions; disable CMOS clock sync */ | ||
907 | no_sync_cmos_clock = 1; | ||
908 | } else { | ||
909 | disable_noidle = 1; | ||
910 | disable_vmi_timer = 1; | ||
883 | } | 911 | } |
884 | 912 | ||
913 | /* No idle HZ mode only works if VMI timer and no idle is enabled */ | ||
914 | if (disable_noidle || disable_vmi_timer) | ||
915 | para_fill(safe_halt, Halt); | ||
916 | else | ||
917 | para_wrap(safe_halt, vmi_safe_halt, halt, Halt); | ||
918 | |||
885 | /* | 919 | /* |
886 | * Alternative instruction rewriting doesn't happen soon enough | 920 | * Alternative instruction rewriting doesn't happen soon enough |
887 | * to convert VMI_IRET to a call instead of a jump; so we have | 921 | * to convert VMI_IRET to a call instead of a jump; so we have |
@@ -914,7 +948,9 @@ void __init vmi_init(void) | |||
914 | 948 | ||
915 | local_irq_save(flags); | 949 | local_irq_save(flags); |
916 | activate_vmi(); | 950 | activate_vmi(); |
917 | #ifdef CONFIG_SMP | 951 | |
952 | #ifdef CONFIG_X86_IO_APIC | ||
953 | /* This is virtual hardware; timer routing is wired correctly */ | ||
918 | no_timer_check = 1; | 954 | no_timer_check = 1; |
919 | #endif | 955 | #endif |
920 | local_irq_restore(flags & X86_EFLAGS_IF); | 956 | local_irq_restore(flags & X86_EFLAGS_IF); |
@@ -925,9 +961,7 @@ static int __init parse_vmi(char *arg) | |||
925 | if (!arg) | 961 | if (!arg) |
926 | return -EINVAL; | 962 | return -EINVAL; |
927 | 963 | ||
928 | if (!strcmp(arg, "disable_nodelay")) | 964 | if (!strcmp(arg, "disable_pge")) { |
929 | disable_nodelay = 1; | ||
930 | else if (!strcmp(arg, "disable_pge")) { | ||
931 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 965 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); |
932 | disable_pge = 1; | 966 | disable_pge = 1; |
933 | } else if (!strcmp(arg, "disable_pse")) { | 967 | } else if (!strcmp(arg, "disable_pse")) { |
@@ -942,7 +976,11 @@ static int __init parse_vmi(char *arg) | |||
942 | } else if (!strcmp(arg, "disable_mtrr")) { | 976 | } else if (!strcmp(arg, "disable_mtrr")) { |
943 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); | 977 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); |
944 | disable_mtrr = 1; | 978 | disable_mtrr = 1; |
945 | } | 979 | } else if (!strcmp(arg, "disable_timer")) { |
980 | disable_vmi_timer = 1; | ||
981 | disable_noidle = 1; | ||
982 | } else if (!strcmp(arg, "disable_noidle")) | ||
983 | disable_noidle = 1; | ||
946 | return 0; | 984 | return 0; |
947 | } | 985 | } |
948 | 986 | ||
diff --git a/arch/i386/kernel/vmitime.c b/arch/i386/kernel/vmitime.c index 76d2adcae5a3..8dc72d575666 100644 --- a/arch/i386/kernel/vmitime.c +++ b/arch/i386/kernel/vmitime.c | |||
@@ -153,13 +153,6 @@ static void vmi_get_wallclock_ts(struct timespec *ts) | |||
153 | ts->tv_sec = wallclock; | 153 | ts->tv_sec = wallclock; |
154 | } | 154 | } |
155 | 155 | ||
156 | static void update_xtime_from_wallclock(void) | ||
157 | { | ||
158 | struct timespec ts; | ||
159 | vmi_get_wallclock_ts(&ts); | ||
160 | do_settimeofday(&ts); | ||
161 | } | ||
162 | |||
163 | unsigned long vmi_get_wallclock(void) | 156 | unsigned long vmi_get_wallclock(void) |
164 | { | 157 | { |
165 | struct timespec ts; | 158 | struct timespec ts; |
@@ -172,11 +165,20 @@ int vmi_set_wallclock(unsigned long now) | |||
172 | return -1; | 165 | return -1; |
173 | } | 166 | } |
174 | 167 | ||
175 | unsigned long long vmi_sched_clock(void) | 168 | unsigned long long vmi_get_sched_cycles(void) |
176 | { | 169 | { |
177 | return read_available_cycles(); | 170 | return read_available_cycles(); |
178 | } | 171 | } |
179 | 172 | ||
173 | unsigned long vmi_cpu_khz(void) | ||
174 | { | ||
175 | unsigned long long khz; | ||
176 | |||
177 | khz = vmi_timer_ops.get_cycle_frequency(); | ||
178 | (void)do_div(khz, 1000); | ||
179 | return khz; | ||
180 | } | ||
181 | |||
180 | void __init vmi_time_init(void) | 182 | void __init vmi_time_init(void) |
181 | { | 183 | { |
182 | unsigned long long cycles_per_sec, cycles_per_msec; | 184 | unsigned long long cycles_per_sec, cycles_per_msec; |
@@ -188,25 +190,16 @@ void __init vmi_time_init(void) | |||
188 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt); | 190 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt); |
189 | #endif | 191 | #endif |
190 | 192 | ||
191 | no_sync_cmos_clock = 1; | ||
192 | |||
193 | vmi_get_wallclock_ts(&xtime); | ||
194 | set_normalized_timespec(&wall_to_monotonic, | ||
195 | -xtime.tv_sec, -xtime.tv_nsec); | ||
196 | |||
197 | real_cycles_accounted_system = read_real_cycles(); | 193 | real_cycles_accounted_system = read_real_cycles(); |
198 | update_xtime_from_wallclock(); | ||
199 | per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles(); | 194 | per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles(); |
200 | 195 | ||
201 | cycles_per_sec = vmi_timer_ops.get_cycle_frequency(); | 196 | cycles_per_sec = vmi_timer_ops.get_cycle_frequency(); |
202 | |||
203 | cycles_per_jiffy = cycles_per_sec; | 197 | cycles_per_jiffy = cycles_per_sec; |
204 | (void)do_div(cycles_per_jiffy, HZ); | 198 | (void)do_div(cycles_per_jiffy, HZ); |
205 | cycles_per_alarm = cycles_per_sec; | 199 | cycles_per_alarm = cycles_per_sec; |
206 | (void)do_div(cycles_per_alarm, alarm_hz); | 200 | (void)do_div(cycles_per_alarm, alarm_hz); |
207 | cycles_per_msec = cycles_per_sec; | 201 | cycles_per_msec = cycles_per_sec; |
208 | (void)do_div(cycles_per_msec, 1000); | 202 | (void)do_div(cycles_per_msec, 1000); |
209 | cpu_khz = cycles_per_msec; | ||
210 | 203 | ||
211 | printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;" | 204 | printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;" |
212 | "cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy, | 205 | "cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy, |
@@ -250,7 +243,7 @@ void __init vmi_timer_setup_boot_alarm(void) | |||
250 | 243 | ||
251 | /* Initialize the time accounting variables for an AP on an SMP system. | 244 | /* Initialize the time accounting variables for an AP on an SMP system. |
252 | * Also, set the local alarm for the AP. */ | 245 | * Also, set the local alarm for the AP. */ |
253 | void __init vmi_timer_setup_secondary_alarm(void) | 246 | void __devinit vmi_timer_setup_secondary_alarm(void) |
254 | { | 247 | { |
255 | int cpu = smp_processor_id(); | 248 | int cpu = smp_processor_id(); |
256 | 249 | ||
@@ -276,16 +269,13 @@ static void vmi_account_real_cycles(unsigned long long cur_real_cycles) | |||
276 | 269 | ||
277 | cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system; | 270 | cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system; |
278 | while (cycles_not_accounted >= cycles_per_jiffy) { | 271 | while (cycles_not_accounted >= cycles_per_jiffy) { |
279 | /* systems wide jiffies and wallclock. */ | 272 | /* systems wide jiffies. */ |
280 | do_timer(1); | 273 | do_timer(1); |
281 | 274 | ||
282 | cycles_not_accounted -= cycles_per_jiffy; | 275 | cycles_not_accounted -= cycles_per_jiffy; |
283 | real_cycles_accounted_system += cycles_per_jiffy; | 276 | real_cycles_accounted_system += cycles_per_jiffy; |
284 | } | 277 | } |
285 | 278 | ||
286 | if (vmi_timer_ops.wallclock_updated()) | ||
287 | update_xtime_from_wallclock(); | ||
288 | |||
289 | write_sequnlock(&xtime_lock); | 279 | write_sequnlock(&xtime_lock); |
290 | } | 280 | } |
291 | 281 | ||
@@ -380,7 +370,6 @@ int vmi_stop_hz_timer(void) | |||
380 | unsigned long seq, next; | 370 | unsigned long seq, next; |
381 | unsigned long long real_cycles_expiry; | 371 | unsigned long long real_cycles_expiry; |
382 | int cpu = smp_processor_id(); | 372 | int cpu = smp_processor_id(); |
383 | int idle; | ||
384 | 373 | ||
385 | BUG_ON(!irqs_disabled()); | 374 | BUG_ON(!irqs_disabled()); |
386 | if (sysctl_hz_timer != 0) | 375 | if (sysctl_hz_timer != 0) |
@@ -388,13 +377,13 @@ int vmi_stop_hz_timer(void) | |||
388 | 377 | ||
389 | cpu_set(cpu, nohz_cpu_mask); | 378 | cpu_set(cpu, nohz_cpu_mask); |
390 | smp_mb(); | 379 | smp_mb(); |
380 | |||
391 | if (rcu_needs_cpu(cpu) || local_softirq_pending() || | 381 | if (rcu_needs_cpu(cpu) || local_softirq_pending() || |
392 | (next = next_timer_interrupt(), time_before_eq(next, jiffies))) { | 382 | (next = next_timer_interrupt(), |
383 | time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) { | ||
393 | cpu_clear(cpu, nohz_cpu_mask); | 384 | cpu_clear(cpu, nohz_cpu_mask); |
394 | next = jiffies; | 385 | return 0; |
395 | idle = 0; | 386 | } |
396 | } else | ||
397 | idle = 1; | ||
398 | 387 | ||
399 | /* Convert jiffies to the real cycle counter. */ | 388 | /* Convert jiffies to the real cycle counter. */ |
400 | do { | 389 | do { |
@@ -404,17 +393,13 @@ int vmi_stop_hz_timer(void) | |||
404 | } while (read_seqretry(&xtime_lock, seq)); | 393 | } while (read_seqretry(&xtime_lock, seq)); |
405 | 394 | ||
406 | /* This cpu is going idle. Disable the periodic alarm. */ | 395 | /* This cpu is going idle. Disable the periodic alarm. */ |
407 | if (idle) { | 396 | vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE); |
408 | vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE); | 397 | per_cpu(idle_start_jiffies, cpu) = jiffies; |
409 | per_cpu(idle_start_jiffies, cpu) = jiffies; | ||
410 | } | ||
411 | |||
412 | /* Set the real time alarm to expire at the next event. */ | 398 | /* Set the real time alarm to expire at the next event. */ |
413 | vmi_timer_ops.set_alarm( | 399 | vmi_timer_ops.set_alarm( |
414 | VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL, | 400 | VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL, |
415 | real_cycles_expiry, 0); | 401 | real_cycles_expiry, 0); |
416 | 402 | return 1; | |
417 | return idle; | ||
418 | } | 403 | } |
419 | 404 | ||
420 | static void vmi_reenable_hz_timer(int cpu) | 405 | static void vmi_reenable_hz_timer(int cpu) |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 919fbf568495..100930826850 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -968,7 +968,6 @@ void pci_scan_msi_device(struct pci_dev *dev) {} | |||
968 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} | 968 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} |
969 | void pci_disable_msix(struct pci_dev *dev) {} | 969 | void pci_disable_msix(struct pci_dev *dev) {} |
970 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} | 970 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} |
971 | void disable_msi_mode(struct pci_dev *dev, int pos, int type) {} | ||
972 | void pci_no_msi(void) {} | 971 | void pci_no_msi(void) {} |
973 | EXPORT_SYMBOL(pci_enable_msix); | 972 | EXPORT_SYMBOL(pci_enable_msix); |
974 | EXPORT_SYMBOL(pci_disable_msix); | 973 | EXPORT_SYMBOL(pci_disable_msix); |
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c index 65a0edd71a17..8cf0b8a13778 100644 --- a/arch/x86_64/kernel/hpet.c +++ b/arch/x86_64/kernel/hpet.c | |||
@@ -12,6 +12,12 @@ | |||
12 | #include <asm/timex.h> | 12 | #include <asm/timex.h> |
13 | #include <asm/hpet.h> | 13 | #include <asm/hpet.h> |
14 | 14 | ||
15 | #define HPET_MASK 0xFFFFFFFF | ||
16 | #define HPET_SHIFT 22 | ||
17 | |||
18 | /* FSEC = 10^-15 NSEC = 10^-9 */ | ||
19 | #define FSEC_PER_NSEC 1000000 | ||
20 | |||
15 | int nohpet __initdata; | 21 | int nohpet __initdata; |
16 | 22 | ||
17 | unsigned long hpet_address; | 23 | unsigned long hpet_address; |
@@ -106,9 +112,31 @@ int hpet_timer_stop_set_go(unsigned long tick) | |||
106 | return 0; | 112 | return 0; |
107 | } | 113 | } |
108 | 114 | ||
115 | static cycle_t read_hpet(void) | ||
116 | { | ||
117 | return (cycle_t)hpet_readl(HPET_COUNTER); | ||
118 | } | ||
119 | |||
120 | static cycle_t __vsyscall_fn vread_hpet(void) | ||
121 | { | ||
122 | return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | ||
123 | } | ||
124 | |||
125 | struct clocksource clocksource_hpet = { | ||
126 | .name = "hpet", | ||
127 | .rating = 250, | ||
128 | .read = read_hpet, | ||
129 | .mask = (cycle_t)HPET_MASK, | ||
130 | .mult = 0, /* set below */ | ||
131 | .shift = HPET_SHIFT, | ||
132 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
133 | .vread = vread_hpet, | ||
134 | }; | ||
135 | |||
109 | int hpet_arch_init(void) | 136 | int hpet_arch_init(void) |
110 | { | 137 | { |
111 | unsigned int id; | 138 | unsigned int id; |
139 | u64 tmp; | ||
112 | 140 | ||
113 | if (!hpet_address) | 141 | if (!hpet_address) |
114 | return -1; | 142 | return -1; |
@@ -132,6 +160,22 @@ int hpet_arch_init(void) | |||
132 | 160 | ||
133 | hpet_use_timer = (id & HPET_ID_LEGSUP); | 161 | hpet_use_timer = (id & HPET_ID_LEGSUP); |
134 | 162 | ||
163 | /* | ||
164 | * hpet period is in femto seconds per cycle | ||
165 | * so we need to convert this to ns/cyc units | ||
166 | * aproximated by mult/2^shift | ||
167 | * | ||
168 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
169 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
170 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
171 | * (fsec/cyc << shift)/1000000 = mult | ||
172 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
173 | */ | ||
174 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
175 | do_div(tmp, FSEC_PER_NSEC); | ||
176 | clocksource_hpet.mult = (u32)tmp; | ||
177 | clocksource_register(&clocksource_hpet); | ||
178 | |||
135 | return hpet_timer_stop_set_go(hpet_tick); | 179 | return hpet_timer_stop_set_go(hpet_tick); |
136 | } | 180 | } |
137 | 181 | ||
@@ -444,68 +488,3 @@ static int __init nohpet_setup(char *s) | |||
444 | } | 488 | } |
445 | 489 | ||
446 | __setup("nohpet", nohpet_setup); | 490 | __setup("nohpet", nohpet_setup); |
447 | |||
448 | #define HPET_MASK 0xFFFFFFFF | ||
449 | #define HPET_SHIFT 22 | ||
450 | |||
451 | /* FSEC = 10^-15 NSEC = 10^-9 */ | ||
452 | #define FSEC_PER_NSEC 1000000 | ||
453 | |||
454 | static void *hpet_ptr; | ||
455 | |||
456 | static cycle_t read_hpet(void) | ||
457 | { | ||
458 | return (cycle_t)readl(hpet_ptr); | ||
459 | } | ||
460 | |||
461 | static cycle_t __vsyscall_fn vread_hpet(void) | ||
462 | { | ||
463 | return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | ||
464 | } | ||
465 | |||
466 | struct clocksource clocksource_hpet = { | ||
467 | .name = "hpet", | ||
468 | .rating = 250, | ||
469 | .read = read_hpet, | ||
470 | .mask = (cycle_t)HPET_MASK, | ||
471 | .mult = 0, /* set below */ | ||
472 | .shift = HPET_SHIFT, | ||
473 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
474 | .vread = vread_hpet, | ||
475 | }; | ||
476 | |||
477 | static int __init init_hpet_clocksource(void) | ||
478 | { | ||
479 | unsigned long hpet_period; | ||
480 | void __iomem *hpet_base; | ||
481 | u64 tmp; | ||
482 | |||
483 | if (!hpet_address) | ||
484 | return -ENODEV; | ||
485 | |||
486 | /* calculate the hpet address: */ | ||
487 | hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | ||
488 | hpet_ptr = hpet_base + HPET_COUNTER; | ||
489 | |||
490 | /* calculate the frequency: */ | ||
491 | hpet_period = readl(hpet_base + HPET_PERIOD); | ||
492 | |||
493 | /* | ||
494 | * hpet period is in femto seconds per cycle | ||
495 | * so we need to convert this to ns/cyc units | ||
496 | * aproximated by mult/2^shift | ||
497 | * | ||
498 | * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift | ||
499 | * fsec/cyc * 1ns/1000000fsec * 2^shift = mult | ||
500 | * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult | ||
501 | * (fsec/cyc << shift)/1000000 = mult | ||
502 | * (hpet_period << shift)/FSEC_PER_NSEC = mult | ||
503 | */ | ||
504 | tmp = (u64)hpet_period << HPET_SHIFT; | ||
505 | do_div(tmp, FSEC_PER_NSEC); | ||
506 | clocksource_hpet.mult = (u32)tmp; | ||
507 | |||
508 | return clocksource_register(&clocksource_hpet); | ||
509 | } | ||
510 | |||
511 | module_init(init_hpet_clocksource); | ||
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 0a91368f8b60..c6a5bc7e8118 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -789,7 +789,6 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | |||
789 | struct irq_cfg *cfg = irq_cfg + irq; | 789 | struct irq_cfg *cfg = irq_cfg + irq; |
790 | struct IO_APIC_route_entry entry; | 790 | struct IO_APIC_route_entry entry; |
791 | cpumask_t mask; | 791 | cpumask_t mask; |
792 | unsigned long flags; | ||
793 | 792 | ||
794 | if (!IO_APIC_IRQ(irq)) | 793 | if (!IO_APIC_IRQ(irq)) |
795 | return; | 794 | return; |
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index c9addcfb96dc..75d73a9aa9ff 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -358,6 +358,8 @@ void __init time_init(void) | |||
358 | set_cyc2ns_scale(cpu_khz); | 358 | set_cyc2ns_scale(cpu_khz); |
359 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", | 359 | printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", |
360 | cpu_khz / 1000, cpu_khz % 1000); | 360 | cpu_khz / 1000, cpu_khz % 1000); |
361 | init_tsc_clocksource(); | ||
362 | |||
361 | setup_irq(0, &irq0); | 363 | setup_irq(0, &irq0); |
362 | } | 364 | } |
363 | 365 | ||
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c index 895831865019..1a0edbbffaa0 100644 --- a/arch/x86_64/kernel/tsc.c +++ b/arch/x86_64/kernel/tsc.c | |||
@@ -210,7 +210,7 @@ void mark_tsc_unstable(void) | |||
210 | } | 210 | } |
211 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | 211 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); |
212 | 212 | ||
213 | static int __init init_tsc_clocksource(void) | 213 | void __init init_tsc_clocksource(void) |
214 | { | 214 | { |
215 | if (!notsc) { | 215 | if (!notsc) { |
216 | clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, | 216 | clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, |
@@ -218,9 +218,6 @@ static int __init init_tsc_clocksource(void) | |||
218 | if (check_tsc_unstable()) | 218 | if (check_tsc_unstable()) |
219 | clocksource_tsc.rating = 0; | 219 | clocksource_tsc.rating = 0; |
220 | 220 | ||
221 | return clocksource_register(&clocksource_tsc); | 221 | clocksource_register(&clocksource_tsc); |
222 | } | 222 | } |
223 | return 0; | ||
224 | } | 223 | } |
225 | |||
226 | module_init(init_tsc_clocksource); | ||
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index dc13ebacedfb..44cd7b2ddf09 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -376,6 +376,25 @@ static int send_request(struct request *req) | |||
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | static void viocd_end_request(struct request *req, int uptodate) | ||
380 | { | ||
381 | int nsectors = req->hard_nr_sectors; | ||
382 | |||
383 | /* | ||
384 | * Make sure it's fully ended, and ensure that we process | ||
385 | * at least one sector. | ||
386 | */ | ||
387 | if (blk_pc_request(req)) | ||
388 | nsectors = (req->data_len + 511) >> 9; | ||
389 | if (!nsectors) | ||
390 | nsectors = 1; | ||
391 | |||
392 | if (end_that_request_first(req, uptodate, nsectors)) | ||
393 | BUG(); | ||
394 | add_disk_randomness(req->rq_disk); | ||
395 | blkdev_dequeue_request(req); | ||
396 | end_that_request_last(req, uptodate); | ||
397 | } | ||
379 | 398 | ||
380 | static int rwreq; | 399 | static int rwreq; |
381 | 400 | ||
@@ -385,11 +404,11 @@ static void do_viocd_request(request_queue_t *q) | |||
385 | 404 | ||
386 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { | 405 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { |
387 | if (!blk_fs_request(req)) | 406 | if (!blk_fs_request(req)) |
388 | end_request(req, 0); | 407 | viocd_end_request(req, 0); |
389 | else if (send_request(req) < 0) { | 408 | else if (send_request(req) < 0) { |
390 | printk(VIOCD_KERN_WARNING | 409 | printk(VIOCD_KERN_WARNING |
391 | "unable to send message to OS/400!"); | 410 | "unable to send message to OS/400!"); |
392 | end_request(req, 0); | 411 | viocd_end_request(req, 0); |
393 | } else | 412 | } else |
394 | rwreq++; | 413 | rwreq++; |
395 | } | 414 | } |
@@ -601,9 +620,9 @@ return_complete: | |||
601 | "with rc %d:0x%04X: %s\n", | 620 | "with rc %d:0x%04X: %s\n", |
602 | req, event->xRc, | 621 | req, event->xRc, |
603 | bevent->sub_result, err->msg); | 622 | bevent->sub_result, err->msg); |
604 | end_request(req, 0); | 623 | viocd_end_request(req, 0); |
605 | } else | 624 | } else |
606 | end_request(req, 1); | 625 | viocd_end_request(req, 1); |
607 | 626 | ||
608 | /* restart handling of incoming requests */ | 627 | /* restart handling of incoming requests */ |
609 | spin_unlock_irqrestore(&viocd_reqlock, flags); | 628 | spin_unlock_irqrestore(&viocd_reqlock, flags); |
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 54df35527bc5..16dc5d1d3cb4 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
@@ -3501,6 +3501,7 @@ get_serial_info(struct cyclades_port *info, | |||
3501 | tmp.irq = cinfo->irq; | 3501 | tmp.irq = cinfo->irq; |
3502 | tmp.flags = info->flags; | 3502 | tmp.flags = info->flags; |
3503 | tmp.close_delay = info->close_delay; | 3503 | tmp.close_delay = info->close_delay; |
3504 | tmp.closing_wait = info->closing_wait; | ||
3504 | tmp.baud_base = info->baud; | 3505 | tmp.baud_base = info->baud; |
3505 | tmp.custom_divisor = info->custom_divisor; | 3506 | tmp.custom_divisor = info->custom_divisor; |
3506 | tmp.hub6 = 0; /*!!! */ | 3507 | tmp.hub6 = 0; /*!!! */ |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index 88fc24fc4392..de5be30484ad 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
@@ -209,7 +209,6 @@ static void digi_send_break(struct channel *ch, int msec); | |||
209 | static void setup_empty_event(struct tty_struct *tty, struct channel *ch); | 209 | static void setup_empty_event(struct tty_struct *tty, struct channel *ch); |
210 | void epca_setup(char *, int *); | 210 | void epca_setup(char *, int *); |
211 | 211 | ||
212 | static int get_termio(struct tty_struct *, struct termio __user *); | ||
213 | static int pc_write(struct tty_struct *, const unsigned char *, int); | 212 | static int pc_write(struct tty_struct *, const unsigned char *, int); |
214 | static int pc_init(void); | 213 | static int pc_init(void); |
215 | static int init_PCI(void); | 214 | static int init_PCI(void); |
@@ -2362,15 +2361,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
2362 | 2361 | ||
2363 | switch (cmd) | 2362 | switch (cmd) |
2364 | { /* Begin switch cmd */ | 2363 | { /* Begin switch cmd */ |
2365 | |||
2366 | #if 0 /* Handled by calling layer properly */ | ||
2367 | case TCGETS: | ||
2368 | if (copy_to_user(argp, tty->termios, sizeof(struct ktermios))) | ||
2369 | return -EFAULT; | ||
2370 | return 0; | ||
2371 | case TCGETA: | ||
2372 | return get_termio(tty, argp); | ||
2373 | #endif | ||
2374 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | 2364 | case TCSBRK: /* SVID version: non-zero arg --> no break */ |
2375 | retval = tty_check_change(tty); | 2365 | retval = tty_check_change(tty); |
2376 | if (retval) | 2366 | if (retval) |
@@ -2735,13 +2725,6 @@ static void setup_empty_event(struct tty_struct *tty, struct channel *ch) | |||
2735 | memoff(ch); | 2725 | memoff(ch); |
2736 | } /* End setup_empty_event */ | 2726 | } /* End setup_empty_event */ |
2737 | 2727 | ||
2738 | /* --------------------- Begin get_termio ----------------------- */ | ||
2739 | |||
2740 | static int get_termio(struct tty_struct * tty, struct termio __user * termio) | ||
2741 | { /* Begin get_termio */ | ||
2742 | return kernel_termios_to_user_termio(termio, tty->termios); | ||
2743 | } /* End get_termio */ | ||
2744 | |||
2745 | /* ---------------------- Begin epca_setup -------------------------- */ | 2728 | /* ---------------------- Begin epca_setup -------------------------- */ |
2746 | void epca_setup(char *str, int *ints) | 2729 | void epca_setup(char *str, int *ints) |
2747 | { /* Begin epca_setup */ | 2730 | { /* Begin epca_setup */ |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index a7b33d2f5991..e22146546add 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2478,6 +2478,11 @@ static __devinit void default_find_bmc(void) | |||
2478 | if (!info) | 2478 | if (!info) |
2479 | return; | 2479 | return; |
2480 | 2480 | ||
2481 | #ifdef CONFIG_PPC_MERGE | ||
2482 | if (check_legacy_ioport(ipmi_defaults[i].port)) | ||
2483 | continue; | ||
2484 | #endif | ||
2485 | |||
2481 | info->addr_source = NULL; | 2486 | info->addr_source = NULL; |
2482 | 2487 | ||
2483 | info->si_type = ipmi_defaults[i].type; | 2488 | info->si_type = ipmi_defaults[i].type; |
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index ccaa6a39cb4b..d42060ede930 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c | |||
@@ -214,4 +214,7 @@ pm_good: | |||
214 | return clocksource_register(&clocksource_acpi_pm); | 214 | return clocksource_register(&clocksource_acpi_pm); |
215 | } | 215 | } |
216 | 216 | ||
217 | module_init(init_acpi_pm_clocksource); | 217 | /* We use fs_initcall because we want the PCI fixups to have run |
218 | * but we still need to load before device_initcall | ||
219 | */ | ||
220 | fs_initcall(init_acpi_pm_clocksource); | ||
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 4f3925ceb360..1bde303b970b 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c | |||
@@ -116,4 +116,4 @@ static int __init init_cyclone_clocksource(void) | |||
116 | return clocksource_register(&clocksource_cyclone); | 116 | return clocksource_register(&clocksource_cyclone); |
117 | } | 117 | } |
118 | 118 | ||
119 | module_init(init_cyclone_clocksource); | 119 | arch_initcall(init_cyclone_clocksource); |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 64509689fa65..f17e9c7d4b36 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -215,14 +215,16 @@ config KEYBOARD_AAED2000 | |||
215 | module will be called aaed2000_kbd. | 215 | module will be called aaed2000_kbd. |
216 | 216 | ||
217 | config KEYBOARD_GPIO | 217 | config KEYBOARD_GPIO |
218 | tristate "Buttons on CPU GPIOs (PXA)" | 218 | tristate "GPIO Buttons" |
219 | depends on (ARCH_SA1100 || ARCH_PXA || ARCH_S3C2410) | 219 | depends on GENERIC_GPIO |
220 | help | 220 | help |
221 | This driver implements support for buttons connected | 221 | This driver implements support for buttons connected |
222 | directly to GPIO pins of SA1100, PXA or S3C24xx CPUs. | 222 | to GPIO pins of various CPUs (and some other chips). |
223 | 223 | ||
224 | Say Y here if your device has buttons connected | 224 | Say Y here if your device has buttons connected |
225 | directly to GPIO pins of the CPU. | 225 | directly to such GPIO pins. Your board-specific |
226 | setup logic must also provide a platform device, | ||
227 | with configuration data saying which GPIOs are used. | ||
226 | 228 | ||
227 | To compile this driver as a module, choose M here: the | 229 | To compile this driver as a module, choose M here: the |
228 | module will be called gpio-keys. | 230 | module will be called gpio-keys. |
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index fa03a00b4c6d..ccf6df387b62 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c | |||
@@ -23,11 +23,9 @@ | |||
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/input.h> | 24 | #include <linux/input.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/gpio_keys.h> | ||
26 | 27 | ||
27 | #include <asm/gpio.h> | 28 | #include <asm/gpio.h> |
28 | #include <asm/arch/hardware.h> | ||
29 | |||
30 | #include <asm/hardware/gpio_keys.h> | ||
31 | 29 | ||
32 | static irqreturn_t gpio_keys_isr(int irq, void *dev_id) | 30 | static irqreturn_t gpio_keys_isr(int irq, void *dev_id) |
33 | { | 31 | { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d247429ee5ef..54a1ad5eef42 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3071,7 +3071,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3071 | release_stripe(sh); | 3071 | release_stripe(sh); |
3072 | } | 3072 | } |
3073 | spin_lock_irq(&conf->device_lock); | 3073 | spin_lock_irq(&conf->device_lock); |
3074 | conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1); | 3074 | conf->expand_progress = (sector_nr + i) * new_data_disks; |
3075 | spin_unlock_irq(&conf->device_lock); | 3075 | spin_unlock_irq(&conf->device_lock); |
3076 | /* Ok, those stripe are ready. We can start scheduling | 3076 | /* Ok, those stripe are ready. We can start scheduling |
3077 | * reads on the source stripes. | 3077 | * reads on the source stripes. |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 68555c11f556..01869b1782e4 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -38,6 +38,36 @@ static int msi_cache_init(void) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static void msi_set_enable(struct pci_dev *dev, int enable) | ||
42 | { | ||
43 | int pos; | ||
44 | u16 control; | ||
45 | |||
46 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
47 | if (pos) { | ||
48 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | ||
49 | control &= ~PCI_MSI_FLAGS_ENABLE; | ||
50 | if (enable) | ||
51 | control |= PCI_MSI_FLAGS_ENABLE; | ||
52 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static void msix_set_enable(struct pci_dev *dev, int enable) | ||
57 | { | ||
58 | int pos; | ||
59 | u16 control; | ||
60 | |||
61 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
62 | if (pos) { | ||
63 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
64 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
65 | if (enable) | ||
66 | control |= PCI_MSIX_FLAGS_ENABLE; | ||
67 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
68 | } | ||
69 | } | ||
70 | |||
41 | static void msi_set_mask_bit(unsigned int irq, int flag) | 71 | static void msi_set_mask_bit(unsigned int irq, int flag) |
42 | { | 72 | { |
43 | struct msi_desc *entry; | 73 | struct msi_desc *entry; |
@@ -55,6 +85,8 @@ static void msi_set_mask_bit(unsigned int irq, int flag) | |||
55 | mask_bits &= ~(1); | 85 | mask_bits &= ~(1); |
56 | mask_bits |= flag; | 86 | mask_bits |= flag; |
57 | pci_write_config_dword(entry->dev, pos, mask_bits); | 87 | pci_write_config_dword(entry->dev, pos, mask_bits); |
88 | } else { | ||
89 | msi_set_enable(entry->dev, !flag); | ||
58 | } | 90 | } |
59 | break; | 91 | break; |
60 | case PCI_CAP_ID_MSIX: | 92 | case PCI_CAP_ID_MSIX: |
@@ -192,44 +224,6 @@ static struct msi_desc* alloc_msi_entry(void) | |||
192 | return entry; | 224 | return entry; |
193 | } | 225 | } |
194 | 226 | ||
195 | static void enable_msi_mode(struct pci_dev *dev, int pos, int type) | ||
196 | { | ||
197 | u16 control; | ||
198 | |||
199 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
200 | if (type == PCI_CAP_ID_MSI) { | ||
201 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | ||
202 | msi_enable(control, 1); | ||
203 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
204 | dev->msi_enabled = 1; | ||
205 | } else { | ||
206 | msix_enable(control); | ||
207 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
208 | dev->msix_enabled = 1; | ||
209 | } | ||
210 | |||
211 | pci_intx(dev, 0); /* disable intx */ | ||
212 | } | ||
213 | |||
214 | void disable_msi_mode(struct pci_dev *dev, int pos, int type) | ||
215 | { | ||
216 | u16 control; | ||
217 | |||
218 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
219 | if (type == PCI_CAP_ID_MSI) { | ||
220 | /* Set enabled bits to single MSI & enable MSI_enable bit */ | ||
221 | msi_disable(control); | ||
222 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
223 | dev->msi_enabled = 0; | ||
224 | } else { | ||
225 | msix_disable(control); | ||
226 | pci_write_config_word(dev, msi_control_reg(pos), control); | ||
227 | dev->msix_enabled = 0; | ||
228 | } | ||
229 | |||
230 | pci_intx(dev, 1); /* enable intx */ | ||
231 | } | ||
232 | |||
233 | #ifdef CONFIG_PM | 227 | #ifdef CONFIG_PM |
234 | static int __pci_save_msi_state(struct pci_dev *dev) | 228 | static int __pci_save_msi_state(struct pci_dev *dev) |
235 | { | 229 | { |
@@ -238,12 +232,11 @@ static int __pci_save_msi_state(struct pci_dev *dev) | |||
238 | struct pci_cap_saved_state *save_state; | 232 | struct pci_cap_saved_state *save_state; |
239 | u32 *cap; | 233 | u32 *cap; |
240 | 234 | ||
241 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 235 | if (!dev->msi_enabled) |
242 | if (pos <= 0 || dev->no_msi) | ||
243 | return 0; | 236 | return 0; |
244 | 237 | ||
245 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 238 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
246 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | 239 | if (pos <= 0) |
247 | return 0; | 240 | return 0; |
248 | 241 | ||
249 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, | 242 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5, |
@@ -276,13 +269,18 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
276 | struct pci_cap_saved_state *save_state; | 269 | struct pci_cap_saved_state *save_state; |
277 | u32 *cap; | 270 | u32 *cap; |
278 | 271 | ||
272 | if (!dev->msi_enabled) | ||
273 | return; | ||
274 | |||
279 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); | 275 | save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI); |
280 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 276 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
281 | if (!save_state || pos <= 0) | 277 | if (!save_state || pos <= 0) |
282 | return; | 278 | return; |
283 | cap = &save_state->data[0]; | 279 | cap = &save_state->data[0]; |
284 | 280 | ||
281 | pci_intx(dev, 0); /* disable intx */ | ||
285 | control = cap[i++] >> 16; | 282 | control = cap[i++] >> 16; |
283 | msi_set_enable(dev, 0); | ||
286 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); | 284 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]); |
287 | if (control & PCI_MSI_FLAGS_64BIT) { | 285 | if (control & PCI_MSI_FLAGS_64BIT) { |
288 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); | 286 | pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]); |
@@ -292,7 +290,6 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
292 | if (control & PCI_MSI_FLAGS_MASKBIT) | 290 | if (control & PCI_MSI_FLAGS_MASKBIT) |
293 | pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); | 291 | pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]); |
294 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | 292 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); |
295 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
296 | pci_remove_saved_cap(save_state); | 293 | pci_remove_saved_cap(save_state); |
297 | kfree(save_state); | 294 | kfree(save_state); |
298 | } | 295 | } |
@@ -308,13 +305,11 @@ static int __pci_save_msix_state(struct pci_dev *dev) | |||
308 | return 0; | 305 | return 0; |
309 | 306 | ||
310 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 307 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
311 | if (pos <= 0 || dev->no_msi) | 308 | if (pos <= 0) |
312 | return 0; | 309 | return 0; |
313 | 310 | ||
314 | /* save the capability */ | 311 | /* save the capability */ |
315 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 312 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
316 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | ||
317 | return 0; | ||
318 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), | 313 | save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16), |
319 | GFP_KERNEL); | 314 | GFP_KERNEL); |
320 | if (!save_state) { | 315 | if (!save_state) { |
@@ -376,6 +371,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
376 | return; | 371 | return; |
377 | 372 | ||
378 | /* route the table */ | 373 | /* route the table */ |
374 | pci_intx(dev, 0); /* disable intx */ | ||
375 | msix_set_enable(dev, 0); | ||
379 | irq = head = dev->first_msi_irq; | 376 | irq = head = dev->first_msi_irq; |
380 | while (head != tail) { | 377 | while (head != tail) { |
381 | entry = get_irq_msi(irq); | 378 | entry = get_irq_msi(irq); |
@@ -386,7 +383,6 @@ static void __pci_restore_msix_state(struct pci_dev *dev) | |||
386 | } | 383 | } |
387 | 384 | ||
388 | pci_write_config_word(dev, msi_control_reg(pos), save); | 385 | pci_write_config_word(dev, msi_control_reg(pos), save); |
389 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
390 | } | 386 | } |
391 | 387 | ||
392 | void pci_restore_msi_state(struct pci_dev *dev) | 388 | void pci_restore_msi_state(struct pci_dev *dev) |
@@ -411,6 +407,8 @@ static int msi_capability_init(struct pci_dev *dev) | |||
411 | int pos, irq; | 407 | int pos, irq; |
412 | u16 control; | 408 | u16 control; |
413 | 409 | ||
410 | msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ | ||
411 | |||
414 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 412 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
415 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 413 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
416 | /* MSI Entry Initialization */ | 414 | /* MSI Entry Initialization */ |
@@ -454,7 +452,9 @@ static int msi_capability_init(struct pci_dev *dev) | |||
454 | set_irq_msi(irq, entry); | 452 | set_irq_msi(irq, entry); |
455 | 453 | ||
456 | /* Set MSI enabled bits */ | 454 | /* Set MSI enabled bits */ |
457 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | 455 | pci_intx(dev, 0); /* disable intx */ |
456 | msi_set_enable(dev, 1); | ||
457 | dev->msi_enabled = 1; | ||
458 | 458 | ||
459 | dev->irq = irq; | 459 | dev->irq = irq; |
460 | return 0; | 460 | return 0; |
@@ -481,6 +481,8 @@ static int msix_capability_init(struct pci_dev *dev, | |||
481 | u8 bir; | 481 | u8 bir; |
482 | void __iomem *base; | 482 | void __iomem *base; |
483 | 483 | ||
484 | msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ | ||
485 | |||
484 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 486 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
485 | /* Request & Map MSI-X table region */ | 487 | /* Request & Map MSI-X table region */ |
486 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 488 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
@@ -549,7 +551,9 @@ static int msix_capability_init(struct pci_dev *dev, | |||
549 | } | 551 | } |
550 | dev->first_msi_irq = entries[0].vector; | 552 | dev->first_msi_irq = entries[0].vector; |
551 | /* Set MSI-X enabled bits */ | 553 | /* Set MSI-X enabled bits */ |
552 | enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | 554 | pci_intx(dev, 0); /* disable intx */ |
555 | msix_set_enable(dev, 1); | ||
556 | dev->msix_enabled = 1; | ||
553 | 557 | ||
554 | return 0; | 558 | return 0; |
555 | } | 559 | } |
@@ -611,12 +615,11 @@ int pci_enable_msi(struct pci_dev* dev) | |||
611 | WARN_ON(!!dev->msi_enabled); | 615 | WARN_ON(!!dev->msi_enabled); |
612 | 616 | ||
613 | /* Check whether driver already requested for MSI-X irqs */ | 617 | /* Check whether driver already requested for MSI-X irqs */ |
614 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 618 | if (dev->msix_enabled) { |
615 | if (pos > 0 && dev->msix_enabled) { | 619 | printk(KERN_INFO "PCI: %s: Can't enable MSI. " |
616 | printk(KERN_INFO "PCI: %s: Can't enable MSI. " | 620 | "Device already has MSI-X enabled\n", |
617 | "Device already has MSI-X enabled\n", | 621 | pci_name(dev)); |
618 | pci_name(dev)); | 622 | return -EINVAL; |
619 | return -EINVAL; | ||
620 | } | 623 | } |
621 | status = msi_capability_init(dev); | 624 | status = msi_capability_init(dev); |
622 | return status; | 625 | return status; |
@@ -625,8 +628,7 @@ int pci_enable_msi(struct pci_dev* dev) | |||
625 | void pci_disable_msi(struct pci_dev* dev) | 628 | void pci_disable_msi(struct pci_dev* dev) |
626 | { | 629 | { |
627 | struct msi_desc *entry; | 630 | struct msi_desc *entry; |
628 | int pos, default_irq; | 631 | int default_irq; |
629 | u16 control; | ||
630 | 632 | ||
631 | if (!pci_msi_enable) | 633 | if (!pci_msi_enable) |
632 | return; | 634 | return; |
@@ -636,16 +638,9 @@ void pci_disable_msi(struct pci_dev* dev) | |||
636 | if (!dev->msi_enabled) | 638 | if (!dev->msi_enabled) |
637 | return; | 639 | return; |
638 | 640 | ||
639 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 641 | msi_set_enable(dev, 0); |
640 | if (!pos) | 642 | pci_intx(dev, 1); /* enable intx */ |
641 | return; | 643 | dev->msi_enabled = 0; |
642 | |||
643 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
644 | if (!(control & PCI_MSI_FLAGS_ENABLE)) | ||
645 | return; | ||
646 | |||
647 | |||
648 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); | ||
649 | 644 | ||
650 | entry = get_irq_msi(dev->first_msi_irq); | 645 | entry = get_irq_msi(dev->first_msi_irq); |
651 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { | 646 | if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { |
@@ -746,8 +741,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
746 | WARN_ON(!!dev->msix_enabled); | 741 | WARN_ON(!!dev->msix_enabled); |
747 | 742 | ||
748 | /* Check whether driver already requested for MSI irq */ | 743 | /* Check whether driver already requested for MSI irq */ |
749 | if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && | 744 | if (dev->msi_enabled) { |
750 | dev->msi_enabled) { | ||
751 | printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " | 745 | printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " |
752 | "Device already has an MSI irq assigned\n", | 746 | "Device already has an MSI irq assigned\n", |
753 | pci_name(dev)); | 747 | pci_name(dev)); |
@@ -760,8 +754,6 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | |||
760 | void pci_disable_msix(struct pci_dev* dev) | 754 | void pci_disable_msix(struct pci_dev* dev) |
761 | { | 755 | { |
762 | int irq, head, tail = 0, warning = 0; | 756 | int irq, head, tail = 0, warning = 0; |
763 | int pos; | ||
764 | u16 control; | ||
765 | 757 | ||
766 | if (!pci_msi_enable) | 758 | if (!pci_msi_enable) |
767 | return; | 759 | return; |
@@ -771,15 +763,9 @@ void pci_disable_msix(struct pci_dev* dev) | |||
771 | if (!dev->msix_enabled) | 763 | if (!dev->msix_enabled) |
772 | return; | 764 | return; |
773 | 765 | ||
774 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 766 | msix_set_enable(dev, 0); |
775 | if (!pos) | 767 | pci_intx(dev, 1); /* enable intx */ |
776 | return; | 768 | dev->msix_enabled = 0; |
777 | |||
778 | pci_read_config_word(dev, msi_control_reg(pos), &control); | ||
779 | if (!(control & PCI_MSIX_FLAGS_ENABLE)) | ||
780 | return; | ||
781 | |||
782 | disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); | ||
783 | 769 | ||
784 | irq = head = dev->first_msi_irq; | 770 | irq = head = dev->first_msi_irq; |
785 | while (head != tail) { | 771 | while (head != tail) { |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1e74e1ee8bd8..df495300ce3d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -881,13 +881,6 @@ pci_disable_device(struct pci_dev *dev) | |||
881 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) | 881 | if (atomic_sub_return(1, &dev->enable_cnt) != 0) |
882 | return; | 882 | return; |
883 | 883 | ||
884 | if (dev->msi_enabled) | ||
885 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | ||
886 | PCI_CAP_ID_MSI); | ||
887 | if (dev->msix_enabled) | ||
888 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | ||
889 | PCI_CAP_ID_MSIX); | ||
890 | |||
891 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); | 884 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); |
892 | if (pci_command & PCI_COMMAND_MASTER) { | 885 | if (pci_command & PCI_COMMAND_MASTER) { |
893 | pci_command &= ~PCI_COMMAND_MASTER; | 886 | pci_command &= ~PCI_COMMAND_MASTER; |
@@ -1277,6 +1270,33 @@ pci_intx(struct pci_dev *pdev, int enable) | |||
1277 | } | 1270 | } |
1278 | } | 1271 | } |
1279 | 1272 | ||
1273 | /** | ||
1274 | * pci_msi_off - disables any msi or msix capabilities | ||
1275 | * @pdev: the PCI device to operate on | ||
1276 | * | ||
1277 | * If you want to use msi see pci_enable_msi and friends. | ||
1278 | * This is a lower level primitive that allows us to disable | ||
1279 | * msi operation at the device level. | ||
1280 | */ | ||
1281 | void pci_msi_off(struct pci_dev *dev) | ||
1282 | { | ||
1283 | int pos; | ||
1284 | u16 control; | ||
1285 | |||
1286 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | ||
1287 | if (pos) { | ||
1288 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | ||
1289 | control &= ~PCI_MSI_FLAGS_ENABLE; | ||
1290 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | ||
1291 | } | ||
1292 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
1293 | if (pos) { | ||
1294 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | ||
1295 | control &= ~PCI_MSIX_FLAGS_ENABLE; | ||
1296 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1280 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK | 1300 | #ifndef HAVE_ARCH_PCI_SET_DMA_MASK |
1281 | /* | 1301 | /* |
1282 | * These can be overridden by arch-specific implementations | 1302 | * These can be overridden by arch-specific implementations |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index a4f2d580625e..ae7a975995a5 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -46,10 +46,8 @@ extern struct rw_semaphore pci_bus_sem; | |||
46 | extern unsigned int pci_pm_d3_delay; | 46 | extern unsigned int pci_pm_d3_delay; |
47 | 47 | ||
48 | #ifdef CONFIG_PCI_MSI | 48 | #ifdef CONFIG_PCI_MSI |
49 | void disable_msi_mode(struct pci_dev *dev, int pos, int type); | ||
50 | void pci_no_msi(void); | 49 | void pci_no_msi(void); |
51 | #else | 50 | #else |
52 | static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } | ||
53 | static inline void pci_no_msi(void) { } | 51 | static inline void pci_no_msi(void) { } |
54 | #endif | 52 | #endif |
55 | 53 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1bf548287564..7f94fc098cd3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1438,8 +1438,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir | |||
1438 | */ | 1438 | */ |
1439 | static void __devinit quirk_pcie_pxh(struct pci_dev *dev) | 1439 | static void __devinit quirk_pcie_pxh(struct pci_dev *dev) |
1440 | { | 1440 | { |
1441 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | 1441 | pci_msi_off(dev); |
1442 | PCI_CAP_ID_MSI); | 1442 | |
1443 | dev->no_msi = 1; | 1443 | dev->no_msi = 1; |
1444 | 1444 | ||
1445 | printk(KERN_WARNING "PCI: PXH quirk detected, " | 1445 | printk(KERN_WARNING "PCI: PXH quirk detected, " |
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c index 587d87b9eb3c..d31721f2744d 100644 --- a/drivers/serial/dz.c +++ b/drivers/serial/dz.c | |||
@@ -170,8 +170,7 @@ static void dz_enable_ms(struct uart_port *port) | |||
170 | * This routine deals with inputs from any lines. | 170 | * This routine deals with inputs from any lines. |
171 | * ------------------------------------------------------------ | 171 | * ------------------------------------------------------------ |
172 | */ | 172 | */ |
173 | static inline void dz_receive_chars(struct dz_port *dport_in, | 173 | static inline void dz_receive_chars(struct dz_port *dport_in) |
174 | struct pt_regs *regs) | ||
175 | { | 174 | { |
176 | struct dz_port *dport; | 175 | struct dz_port *dport; |
177 | struct tty_struct *tty = NULL; | 176 | struct tty_struct *tty = NULL; |
@@ -226,7 +225,7 @@ static inline void dz_receive_chars(struct dz_port *dport_in, | |||
226 | break; | 225 | break; |
227 | } | 226 | } |
228 | 227 | ||
229 | if (uart_handle_sysrq_char(&dport->port, ch, regs)) | 228 | if (uart_handle_sysrq_char(&dport->port, ch)) |
230 | continue; | 229 | continue; |
231 | 230 | ||
232 | if ((status & dport->port.ignore_status_mask) == 0) { | 231 | if ((status & dport->port.ignore_status_mask) == 0) { |
@@ -332,7 +331,7 @@ static irqreturn_t dz_interrupt(int irq, void *dev) | |||
332 | status = dz_in(dport, DZ_CSR); | 331 | status = dz_in(dport, DZ_CSR); |
333 | 332 | ||
334 | if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) | 333 | if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE)) |
335 | dz_receive_chars(dport, regs); | 334 | dz_receive_chars(dport); |
336 | 335 | ||
337 | if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) | 336 | if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE)) |
338 | dz_transmit_chars(dport); | 337 | dz_transmit_chars(dport); |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 7e7ec29782f1..8e898e3d861e 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <linux/slab.h> | 55 | #include <linux/slab.h> |
56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
57 | #include <linux/mutex.h> | 57 | #include <linux/mutex.h> |
58 | #include <linux/utsrelease.h> | 58 | #include <linux/utsname.h> |
59 | 59 | ||
60 | #include <scsi/scsi.h> | 60 | #include <scsi/scsi.h> |
61 | #include <scsi/scsi_cmnd.h> | 61 | #include <scsi/scsi_cmnd.h> |
@@ -547,7 +547,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id) | |||
547 | idesc->bInterfaceSubClass, | 547 | idesc->bInterfaceSubClass, |
548 | idesc->bInterfaceProtocol, | 548 | idesc->bInterfaceProtocol, |
549 | msgs[msg], | 549 | msgs[msg], |
550 | UTS_RELEASE); | 550 | utsname()->release); |
551 | } | 551 | } |
552 | 552 | ||
553 | return 0; | 553 | return 0; |
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 00a51835fd82..d7627fc4f11e 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c | |||
@@ -131,7 +131,8 @@ | |||
131 | #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) | 131 | #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) |
132 | #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) | 132 | #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) |
133 | 133 | ||
134 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) | 134 | #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ |
135 | defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT) | ||
135 | static const u32 lt_lcd_regs[] = { | 136 | static const u32 lt_lcd_regs[] = { |
136 | CONFIG_PANEL_LG, | 137 | CONFIG_PANEL_LG, |
137 | LCD_GEN_CNTL_LG, | 138 | LCD_GEN_CNTL_LG, |
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c index f3b487b8710b..1fdcfdbf669b 100644 --- a/drivers/video/aty/mach64_ct.c +++ b/drivers/video/aty/mach64_ct.c | |||
@@ -598,7 +598,6 @@ static void aty_resume_pll_ct(const struct fb_info *info, | |||
598 | struct atyfb_par *par = info->par; | 598 | struct atyfb_par *par = info->par; |
599 | 599 | ||
600 | if (par->mclk_per != par->xclk_per) { | 600 | if (par->mclk_per != par->xclk_per) { |
601 | int i; | ||
602 | /* | 601 | /* |
603 | * This disables the sclk, crashes the computer as reported: | 602 | * This disables the sclk, crashes the computer as reported: |
604 | * aty_st_pll_ct(SPLL_CNTL2, 3, info); | 603 | * aty_st_pll_ct(SPLL_CNTL2, 3, info); |
@@ -614,7 +613,7 @@ static void aty_resume_pll_ct(const struct fb_info *info, | |||
614 | * helps for Rage Mobilities that sometimes crash when | 613 | * helps for Rage Mobilities that sometimes crash when |
615 | * we switch to sclk. (Daniel Mantione, 13-05-2003) | 614 | * we switch to sclk. (Daniel Mantione, 13-05-2003) |
616 | */ | 615 | */ |
617 | for (i=0;i<=0x1ffff;i++); | 616 | udelay(500); |
618 | } | 617 | } |
619 | 618 | ||
620 | aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par); | 619 | aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par); |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index 58c0ac733db9..0a44c44672c8 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -1074,9 +1074,9 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev, | |||
1074 | if (len < 1) | 1074 | if (len < 1) |
1075 | return -EINVAL; | 1075 | return -EINVAL; |
1076 | 1076 | ||
1077 | if (strnicmp(buf, "crt", sizeof("crt")) == 0) | 1077 | if (strnicmp(buf, "crt", 3) == 0) |
1078 | head = HEAD_CRT; | 1078 | head = HEAD_CRT; |
1079 | else if (strnicmp(buf, "panel", sizeof("panel")) == 0) | 1079 | else if (strnicmp(buf, "panel", 5) == 0) |
1080 | head = HEAD_PANEL; | 1080 | head = HEAD_PANEL; |
1081 | else | 1081 | else |
1082 | return -EINVAL; | 1082 | return -EINVAL; |
@@ -1098,7 +1098,7 @@ static ssize_t sm501fb_crtsrc_store(struct device *dev, | |||
1098 | writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); | 1098 | writel(ctrl, info->regs + SM501_DC_CRT_CONTROL); |
1099 | sm501fb_sync_regs(info); | 1099 | sm501fb_sync_regs(info); |
1100 | 1100 | ||
1101 | return (head == HEAD_CRT) ? 3 : 5; | 1101 | return len; |
1102 | } | 1102 | } |
1103 | 1103 | ||
1104 | /* Prepare the device_attr for registration with sysfs later */ | 1104 | /* Prepare the device_attr for registration with sysfs later */ |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 0cfff4fefa9e..e62f3fc7241e 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -168,9 +168,9 @@ static int grow_file(struct dentry *ecryptfs_dentry, struct file *lower_file, | |||
168 | goto out; | 168 | goto out; |
169 | } | 169 | } |
170 | i_size_write(inode, 0); | 170 | i_size_write(inode, 0); |
171 | ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode, inode, | 171 | rc = ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode, |
172 | ecryptfs_dentry, | 172 | inode, ecryptfs_dentry, |
173 | ECRYPTFS_LOWER_I_MUTEX_NOT_HELD); | 173 | ECRYPTFS_LOWER_I_MUTEX_NOT_HELD); |
174 | ecryptfs_inode_to_private(inode)->crypt_stat.flags |= ECRYPTFS_NEW_FILE; | 174 | ecryptfs_inode_to_private(inode)->crypt_stat.flags |= ECRYPTFS_NEW_FILE; |
175 | out: | 175 | out: |
176 | return rc; | 176 | return rc; |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 812427e6805c..fc4a3a224641 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -484,18 +484,12 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name) | |||
484 | struct vfsmount *lower_mnt; | 484 | struct vfsmount *lower_mnt; |
485 | 485 | ||
486 | memset(&nd, 0, sizeof(struct nameidata)); | 486 | memset(&nd, 0, sizeof(struct nameidata)); |
487 | rc = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); | 487 | rc = path_lookup(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd); |
488 | if (rc) { | 488 | if (rc) { |
489 | ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); | 489 | ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); |
490 | goto out; | 490 | goto out; |
491 | } | 491 | } |
492 | lower_root = nd.dentry; | 492 | lower_root = nd.dentry; |
493 | if (!lower_root->d_inode) { | ||
494 | ecryptfs_printk(KERN_WARNING, | ||
495 | "No directory to interpose on\n"); | ||
496 | rc = -ENOENT; | ||
497 | goto out_free; | ||
498 | } | ||
499 | lower_mnt = nd.mnt; | 493 | lower_mnt = nd.mnt; |
500 | ecryptfs_set_superblock_lower(sb, lower_root->d_sb); | 494 | ecryptfs_set_superblock_lower(sb, lower_root->d_sb); |
501 | sb->s_maxbytes = lower_root->d_sb->s_maxbytes; | 495 | sb->s_maxbytes = lower_root->d_sb->s_maxbytes; |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 7be8e91b5ba0..b731b09499cb 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -446,6 +446,7 @@ static int ecryptfs_write_inode_size_to_header(struct file *lower_file, | |||
446 | const struct address_space_operations *lower_a_ops; | 446 | const struct address_space_operations *lower_a_ops; |
447 | u64 file_size; | 447 | u64 file_size; |
448 | 448 | ||
449 | retry: | ||
449 | header_page = grab_cache_page(lower_inode->i_mapping, 0); | 450 | header_page = grab_cache_page(lower_inode->i_mapping, 0); |
450 | if (!header_page) { | 451 | if (!header_page) { |
451 | ecryptfs_printk(KERN_ERR, "grab_cache_page for " | 452 | ecryptfs_printk(KERN_ERR, "grab_cache_page for " |
@@ -456,9 +457,10 @@ static int ecryptfs_write_inode_size_to_header(struct file *lower_file, | |||
456 | lower_a_ops = lower_inode->i_mapping->a_ops; | 457 | lower_a_ops = lower_inode->i_mapping->a_ops; |
457 | rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8); | 458 | rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8); |
458 | if (rc) { | 459 | if (rc) { |
459 | if (rc == AOP_TRUNCATED_PAGE) | 460 | if (rc == AOP_TRUNCATED_PAGE) { |
460 | ecryptfs_release_lower_page(header_page, 0); | 461 | ecryptfs_release_lower_page(header_page, 0); |
461 | else | 462 | goto retry; |
463 | } else | ||
462 | ecryptfs_release_lower_page(header_page, 1); | 464 | ecryptfs_release_lower_page(header_page, 1); |
463 | goto out; | 465 | goto out; |
464 | } | 466 | } |
@@ -473,9 +475,10 @@ static int ecryptfs_write_inode_size_to_header(struct file *lower_file, | |||
473 | if (rc < 0) | 475 | if (rc < 0) |
474 | ecryptfs_printk(KERN_ERR, "Error commiting header page " | 476 | ecryptfs_printk(KERN_ERR, "Error commiting header page " |
475 | "write\n"); | 477 | "write\n"); |
476 | if (rc == AOP_TRUNCATED_PAGE) | 478 | if (rc == AOP_TRUNCATED_PAGE) { |
477 | ecryptfs_release_lower_page(header_page, 0); | 479 | ecryptfs_release_lower_page(header_page, 0); |
478 | else | 480 | goto retry; |
481 | } else | ||
479 | ecryptfs_release_lower_page(header_page, 1); | 482 | ecryptfs_release_lower_page(header_page, 1); |
480 | lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME; | 483 | lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME; |
481 | mark_inode_dirty_sync(inode); | 484 | mark_inode_dirty_sync(inode); |
@@ -502,7 +505,8 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *lower_inode, | |||
502 | goto out; | 505 | goto out; |
503 | } | 506 | } |
504 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); | 507 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); |
505 | if (!lower_dentry->d_inode->i_op->getxattr) { | 508 | if (!lower_dentry->d_inode->i_op->getxattr || |
509 | !lower_dentry->d_inode->i_op->setxattr) { | ||
506 | printk(KERN_WARNING | 510 | printk(KERN_WARNING |
507 | "No support for setting xattr in lower filesystem\n"); | 511 | "No support for setting xattr in lower filesystem\n"); |
508 | rc = -ENOSYS; | 512 | rc = -ENOSYS; |
@@ -564,6 +568,7 @@ int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode, | |||
564 | { | 568 | { |
565 | int rc = 0; | 569 | int rc = 0; |
566 | 570 | ||
571 | retry: | ||
567 | *lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index); | 572 | *lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index); |
568 | if (!(*lower_page)) { | 573 | if (!(*lower_page)) { |
569 | rc = -EINVAL; | 574 | rc = -EINVAL; |
@@ -577,18 +582,18 @@ int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode, | |||
577 | byte_offset, | 582 | byte_offset, |
578 | region_bytes); | 583 | region_bytes); |
579 | if (rc) { | 584 | if (rc) { |
580 | ecryptfs_printk(KERN_ERR, "prepare_write for " | 585 | if (rc == AOP_TRUNCATED_PAGE) { |
586 | ecryptfs_release_lower_page(*lower_page, 0); | ||
587 | goto retry; | ||
588 | } else { | ||
589 | ecryptfs_printk(KERN_ERR, "prepare_write for " | ||
581 | "lower_page_index = [0x%.16x] failed; rc = " | 590 | "lower_page_index = [0x%.16x] failed; rc = " |
582 | "[%d]\n", lower_page_index, rc); | 591 | "[%d]\n", lower_page_index, rc); |
583 | } | ||
584 | out: | ||
585 | if (rc && (*lower_page)) { | ||
586 | if (rc == AOP_TRUNCATED_PAGE) | ||
587 | ecryptfs_release_lower_page(*lower_page, 0); | ||
588 | else | ||
589 | ecryptfs_release_lower_page(*lower_page, 1); | 592 | ecryptfs_release_lower_page(*lower_page, 1); |
590 | (*lower_page) = NULL; | 593 | (*lower_page) = NULL; |
594 | } | ||
591 | } | 595 | } |
596 | out: | ||
592 | return rc; | 597 | return rc; |
593 | } | 598 | } |
594 | 599 | ||
diff --git a/fs/libfs.c b/fs/libfs.c index cf79196535ec..d93842d3c0a0 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
@@ -190,6 +190,10 @@ const struct inode_operations simple_dir_inode_operations = { | |||
190 | .lookup = simple_lookup, | 190 | .lookup = simple_lookup, |
191 | }; | 191 | }; |
192 | 192 | ||
193 | static const struct super_operations simple_super_operations = { | ||
194 | .statfs = simple_statfs, | ||
195 | }; | ||
196 | |||
193 | /* | 197 | /* |
194 | * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that | 198 | * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that |
195 | * will never be mountable) | 199 | * will never be mountable) |
@@ -199,7 +203,6 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, | |||
199 | struct vfsmount *mnt) | 203 | struct vfsmount *mnt) |
200 | { | 204 | { |
201 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); | 205 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); |
202 | static const struct super_operations default_ops = {.statfs = simple_statfs}; | ||
203 | struct dentry *dentry; | 206 | struct dentry *dentry; |
204 | struct inode *root; | 207 | struct inode *root; |
205 | struct qstr d_name = {.name = name, .len = strlen(name)}; | 208 | struct qstr d_name = {.name = name, .len = strlen(name)}; |
@@ -212,7 +215,7 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name, | |||
212 | s->s_blocksize = 1024; | 215 | s->s_blocksize = 1024; |
213 | s->s_blocksize_bits = 10; | 216 | s->s_blocksize_bits = 10; |
214 | s->s_magic = magic; | 217 | s->s_magic = magic; |
215 | s->s_op = ops ? ops : &default_ops; | 218 | s->s_op = ops ? ops : &simple_super_operations; |
216 | s->s_time_gran = 1; | 219 | s->s_time_gran = 1; |
217 | root = new_inode(s); | 220 | root = new_inode(s); |
218 | if (!root) | 221 | if (!root) |
@@ -359,7 +362,6 @@ int simple_commit_write(struct file *file, struct page *page, | |||
359 | 362 | ||
360 | int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) | 363 | int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) |
361 | { | 364 | { |
362 | static struct super_operations s_ops = {.statfs = simple_statfs}; | ||
363 | struct inode *inode; | 365 | struct inode *inode; |
364 | struct dentry *root; | 366 | struct dentry *root; |
365 | struct dentry *dentry; | 367 | struct dentry *dentry; |
@@ -368,7 +370,7 @@ int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files | |||
368 | s->s_blocksize = PAGE_CACHE_SIZE; | 370 | s->s_blocksize = PAGE_CACHE_SIZE; |
369 | s->s_blocksize_bits = PAGE_CACHE_SHIFT; | 371 | s->s_blocksize_bits = PAGE_CACHE_SHIFT; |
370 | s->s_magic = magic; | 372 | s->s_magic = magic; |
371 | s->s_op = &s_ops; | 373 | s->s_op = &simple_super_operations; |
372 | s->s_time_gran = 1; | 374 | s->s_time_gran = 1; |
373 | 375 | ||
374 | inode = new_inode(s); | 376 | inode = new_inode(s); |
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h index 32d6678d0bbf..9ae5e3782ed8 100644 --- a/include/asm-i386/delay.h +++ b/include/asm-i386/delay.h | |||
@@ -16,13 +16,6 @@ extern void __ndelay(unsigned long nsecs); | |||
16 | extern void __const_udelay(unsigned long usecs); | 16 | extern void __const_udelay(unsigned long usecs); |
17 | extern void __delay(unsigned long loops); | 17 | extern void __delay(unsigned long loops); |
18 | 18 | ||
19 | #if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY) | ||
20 | #define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul) | ||
21 | |||
22 | #define ndelay(n) paravirt_ops.const_udelay((n) * 5ul) | ||
23 | |||
24 | #else /* !PARAVIRT || USE_REAL_TIME_DELAY */ | ||
25 | |||
26 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | 19 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ |
27 | #define udelay(n) (__builtin_constant_p(n) ? \ | 20 | #define udelay(n) (__builtin_constant_p(n) ? \ |
28 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ | 21 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ |
@@ -32,7 +25,6 @@ extern void __delay(unsigned long loops); | |||
32 | #define ndelay(n) (__builtin_constant_p(n) ? \ | 25 | #define ndelay(n) (__builtin_constant_p(n) ? \ |
33 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
34 | __ndelay(n)) | 27 | __ndelay(n)) |
35 | #endif | ||
36 | 28 | ||
37 | void use_tsc_delay(void); | 29 | void use_tsc_delay(void); |
38 | 30 | ||
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h index 059a9ff28b4d..340764076d5f 100644 --- a/include/asm-i386/io_apic.h +++ b/include/asm-i386/io_apic.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/mpspec.h> | 5 | #include <asm/mpspec.h> |
6 | #include <asm/apicdef.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Intel IO-APIC support for SMP and UP systems. | 9 | * Intel IO-APIC support for SMP and UP systems. |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index b04333ea6f31..64544cb85d6a 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -33,7 +33,7 @@ extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | |||
33 | 33 | ||
34 | extern atomic_t nmi_active; | 34 | extern atomic_t nmi_active; |
35 | extern unsigned int nmi_watchdog; | 35 | extern unsigned int nmi_watchdog; |
36 | #define NMI_DEFAULT -1 | 36 | #define NMI_DEFAULT 0 |
37 | #define NMI_NONE 0 | 37 | #define NMI_NONE 0 |
38 | #define NMI_IO_APIC 1 | 38 | #define NMI_IO_APIC 1 |
39 | #define NMI_LOCAL_APIC 2 | 39 | #define NMI_LOCAL_APIC 2 |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 6317e0a4d735..f8319cae2ac5 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -94,6 +94,8 @@ struct paravirt_ops | |||
94 | 94 | ||
95 | u64 (*read_tsc)(void); | 95 | u64 (*read_tsc)(void); |
96 | u64 (*read_pmc)(void); | 96 | u64 (*read_pmc)(void); |
97 | u64 (*get_scheduled_cycles)(void); | ||
98 | unsigned long (*get_cpu_khz)(void); | ||
97 | 99 | ||
98 | void (*load_tr_desc)(void); | 100 | void (*load_tr_desc)(void); |
99 | void (*load_gdt)(const struct Xgt_desc_struct *); | 101 | void (*load_gdt)(const struct Xgt_desc_struct *); |
@@ -115,7 +117,6 @@ struct paravirt_ops | |||
115 | void (*set_iopl_mask)(unsigned mask); | 117 | void (*set_iopl_mask)(unsigned mask); |
116 | 118 | ||
117 | void (*io_delay)(void); | 119 | void (*io_delay)(void); |
118 | void (*const_udelay)(unsigned long loops); | ||
119 | 120 | ||
120 | #ifdef CONFIG_X86_LOCAL_APIC | 121 | #ifdef CONFIG_X86_LOCAL_APIC |
121 | void (*apic_write)(unsigned long reg, unsigned long v); | 122 | void (*apic_write)(unsigned long reg, unsigned long v); |
@@ -129,6 +130,8 @@ struct paravirt_ops | |||
129 | void (*flush_tlb_kernel)(void); | 130 | void (*flush_tlb_kernel)(void); |
130 | void (*flush_tlb_single)(u32 addr); | 131 | void (*flush_tlb_single)(u32 addr); |
131 | 132 | ||
133 | void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn); | ||
134 | |||
132 | void (*alloc_pt)(u32 pfn); | 135 | void (*alloc_pt)(u32 pfn); |
133 | void (*alloc_pd)(u32 pfn); | 136 | void (*alloc_pd)(u32 pfn); |
134 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 137 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); |
@@ -183,9 +186,9 @@ static inline int set_wallclock(unsigned long nowtime) | |||
183 | return paravirt_ops.set_wallclock(nowtime); | 186 | return paravirt_ops.set_wallclock(nowtime); |
184 | } | 187 | } |
185 | 188 | ||
186 | static inline void do_time_init(void) | 189 | static inline void (*choose_time_init(void))(void) |
187 | { | 190 | { |
188 | return paravirt_ops.time_init(); | 191 | return paravirt_ops.time_init; |
189 | } | 192 | } |
190 | 193 | ||
191 | /* The paravirtualized CPUID instruction. */ | 194 | /* The paravirtualized CPUID instruction. */ |
@@ -273,6 +276,9 @@ static inline void halt(void) | |||
273 | 276 | ||
274 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) | 277 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) |
275 | 278 | ||
279 | #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) | ||
280 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) | ||
281 | |||
276 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 282 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
277 | 283 | ||
278 | #define rdpmc(counter,low,high) do { \ | 284 | #define rdpmc(counter,low,high) do { \ |
@@ -349,6 +355,8 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
349 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() | 355 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() |
350 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) | 356 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) |
351 | 357 | ||
358 | #define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn) | ||
359 | |||
352 | #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) | 360 | #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) |
353 | #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) | 361 | #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) |
354 | 362 | ||
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index e6a4723f0eb1..c3b58d473a55 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -263,6 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
263 | */ | 263 | */ |
264 | #define pte_update(mm, addr, ptep) do { } while (0) | 264 | #define pte_update(mm, addr, ptep) do { } while (0) |
265 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | 265 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
266 | #define paravirt_map_pt_hook(slot, va, pfn) do { } while (0) | ||
266 | #endif | 267 | #endif |
267 | 268 | ||
268 | /* | 269 | /* |
@@ -469,10 +470,24 @@ extern pte_t *lookup_address(unsigned long address); | |||
469 | #endif | 470 | #endif |
470 | 471 | ||
471 | #if defined(CONFIG_HIGHPTE) | 472 | #if defined(CONFIG_HIGHPTE) |
472 | #define pte_offset_map(dir, address) \ | 473 | #define pte_offset_map(dir, address) \ |
473 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 474 | ({ \ |
474 | #define pte_offset_map_nested(dir, address) \ | 475 | pte_t *__ptep; \ |
475 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) | 476 | unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ |
477 | __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\ | ||
478 | paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \ | ||
479 | __ptep = __ptep + pte_index(address); \ | ||
480 | __ptep; \ | ||
481 | }) | ||
482 | #define pte_offset_map_nested(dir, address) \ | ||
483 | ({ \ | ||
484 | pte_t *__ptep; \ | ||
485 | unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ | ||
486 | __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\ | ||
487 | paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \ | ||
488 | __ptep = __ptep + pte_index(address); \ | ||
489 | __ptep; \ | ||
490 | }) | ||
476 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | 491 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
477 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | 492 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) |
478 | #else | 493 | #else |
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h index 571b4294dc2e..eac011366dc2 100644 --- a/include/asm-i386/time.h +++ b/include/asm-i386/time.h | |||
@@ -28,14 +28,16 @@ static inline int native_set_wallclock(unsigned long nowtime) | |||
28 | return retval; | 28 | return retval; |
29 | } | 29 | } |
30 | 30 | ||
31 | extern void (*late_time_init)(void); | ||
32 | extern void hpet_time_init(void); | ||
33 | |||
31 | #ifdef CONFIG_PARAVIRT | 34 | #ifdef CONFIG_PARAVIRT |
32 | #include <asm/paravirt.h> | 35 | #include <asm/paravirt.h> |
33 | extern unsigned long long native_sched_clock(void); | ||
34 | #else /* !CONFIG_PARAVIRT */ | 36 | #else /* !CONFIG_PARAVIRT */ |
35 | 37 | ||
36 | #define get_wallclock() native_get_wallclock() | 38 | #define get_wallclock() native_get_wallclock() |
37 | #define set_wallclock(x) native_set_wallclock(x) | 39 | #define set_wallclock(x) native_set_wallclock(x) |
38 | #define do_time_init() time_init_hook() | 40 | #define choose_time_init() hpet_time_init |
39 | 41 | ||
40 | #endif /* CONFIG_PARAVIRT */ | 42 | #endif /* CONFIG_PARAVIRT */ |
41 | 43 | ||
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h index 4752c3a6a708..12dd67bf760f 100644 --- a/include/asm-i386/timer.h +++ b/include/asm-i386/timer.h | |||
@@ -4,13 +4,21 @@ | |||
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | 5 | ||
6 | #define TICK_SIZE (tick_nsec / 1000) | 6 | #define TICK_SIZE (tick_nsec / 1000) |
7 | |||
7 | void setup_pit_timer(void); | 8 | void setup_pit_timer(void); |
9 | unsigned long long native_sched_clock(void); | ||
10 | unsigned long native_calculate_cpu_khz(void); | ||
11 | |||
8 | /* Modifiers for buggy PIT handling */ | 12 | /* Modifiers for buggy PIT handling */ |
9 | extern int pit_latch_buggy; | 13 | extern int pit_latch_buggy; |
10 | extern int timer_ack; | 14 | extern int timer_ack; |
11 | extern int no_timer_check; | 15 | extern int no_timer_check; |
12 | extern unsigned long long (*custom_sched_clock)(void); | ||
13 | extern int no_sync_cmos_clock; | 16 | extern int no_sync_cmos_clock; |
14 | extern int recalibrate_cpu_khz(void); | 17 | extern int recalibrate_cpu_khz(void); |
15 | 18 | ||
19 | #ifndef CONFIG_PARAVIRT | ||
20 | #define get_scheduled_cycles(val) rdtscll(val) | ||
21 | #define calculate_cpu_khz() native_calculate_cpu_khz() | ||
22 | #endif | ||
23 | |||
16 | #endif | 24 | #endif |
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index ac58580ad664..7fc512d90ea8 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
@@ -85,7 +85,6 @@ static inline int node_to_first_cpu(int node) | |||
85 | .idle_idx = 1, \ | 85 | .idle_idx = 1, \ |
86 | .newidle_idx = 2, \ | 86 | .newidle_idx = 2, \ |
87 | .wake_idx = 1, \ | 87 | .wake_idx = 1, \ |
88 | .per_cpu_gain = 100, \ | ||
89 | .flags = SD_LOAD_BALANCE \ | 88 | .flags = SD_LOAD_BALANCE \ |
90 | | SD_BALANCE_EXEC \ | 89 | | SD_BALANCE_EXEC \ |
91 | | SD_BALANCE_FORK \ | 90 | | SD_BALANCE_FORK \ |
diff --git a/include/asm-i386/vmi.h b/include/asm-i386/vmi.h index 43c89333037e..eb8bd892c01e 100644 --- a/include/asm-i386/vmi.h +++ b/include/asm-i386/vmi.h | |||
@@ -97,6 +97,7 @@ | |||
97 | #define VMI_CALL_SetInitialAPState 62 | 97 | #define VMI_CALL_SetInitialAPState 62 |
98 | #define VMI_CALL_APICWrite 63 | 98 | #define VMI_CALL_APICWrite 63 |
99 | #define VMI_CALL_APICRead 64 | 99 | #define VMI_CALL_APICRead 64 |
100 | #define VMI_CALL_IODelay 65 | ||
100 | #define VMI_CALL_SetLazyMode 73 | 101 | #define VMI_CALL_SetLazyMode 73 |
101 | 102 | ||
102 | /* | 103 | /* |
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h index c12931211007..1f971eb7f71e 100644 --- a/include/asm-i386/vmi_time.h +++ b/include/asm-i386/vmi_time.h | |||
@@ -49,7 +49,8 @@ extern struct vmi_timer_ops { | |||
49 | extern void __init vmi_time_init(void); | 49 | extern void __init vmi_time_init(void); |
50 | extern unsigned long vmi_get_wallclock(void); | 50 | extern unsigned long vmi_get_wallclock(void); |
51 | extern int vmi_set_wallclock(unsigned long now); | 51 | extern int vmi_set_wallclock(unsigned long now); |
52 | extern unsigned long long vmi_sched_clock(void); | 52 | extern unsigned long long vmi_get_sched_cycles(void); |
53 | extern unsigned long vmi_cpu_khz(void); | ||
53 | 54 | ||
54 | #ifdef CONFIG_X86_LOCAL_APIC | 55 | #ifdef CONFIG_X86_LOCAL_APIC |
55 | extern void __init vmi_timer_setup_boot_alarm(void); | 56 | extern void __init vmi_timer_setup_boot_alarm(void); |
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 22ed6749557e..233f1caae048 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h | |||
@@ -65,7 +65,6 @@ void build_cpu_to_node_map(void); | |||
65 | .max_interval = 4, \ | 65 | .max_interval = 4, \ |
66 | .busy_factor = 64, \ | 66 | .busy_factor = 64, \ |
67 | .imbalance_pct = 125, \ | 67 | .imbalance_pct = 125, \ |
68 | .per_cpu_gain = 100, \ | ||
69 | .cache_nice_tries = 2, \ | 68 | .cache_nice_tries = 2, \ |
70 | .busy_idx = 2, \ | 69 | .busy_idx = 2, \ |
71 | .idle_idx = 1, \ | 70 | .idle_idx = 1, \ |
@@ -97,7 +96,6 @@ void build_cpu_to_node_map(void); | |||
97 | .newidle_idx = 0, /* unused */ \ | 96 | .newidle_idx = 0, /* unused */ \ |
98 | .wake_idx = 1, \ | 97 | .wake_idx = 1, \ |
99 | .forkexec_idx = 1, \ | 98 | .forkexec_idx = 1, \ |
100 | .per_cpu_gain = 100, \ | ||
101 | .flags = SD_LOAD_BALANCE \ | 99 | .flags = SD_LOAD_BALANCE \ |
102 | | SD_BALANCE_EXEC \ | 100 | | SD_BALANCE_EXEC \ |
103 | | SD_BALANCE_FORK \ | 101 | | SD_BALANCE_FORK \ |
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h index 44790fdc5d00..61d9be3f3175 100644 --- a/include/asm-mips/mach-ip27/topology.h +++ b/include/asm-mips/mach-ip27/topology.h | |||
@@ -28,7 +28,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; | |||
28 | .busy_factor = 32, \ | 28 | .busy_factor = 32, \ |
29 | .imbalance_pct = 125, \ | 29 | .imbalance_pct = 125, \ |
30 | .cache_nice_tries = 1, \ | 30 | .cache_nice_tries = 1, \ |
31 | .per_cpu_gain = 100, \ | ||
32 | .flags = SD_LOAD_BALANCE \ | 31 | .flags = SD_LOAD_BALANCE \ |
33 | | SD_BALANCE_EXEC \ | 32 | | SD_BALANCE_EXEC \ |
34 | | SD_WAKE_BALANCE, \ | 33 | | SD_WAKE_BALANCE, \ |
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 6610495f5f16..0ad21a849b5f 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h | |||
@@ -57,7 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *bus) | |||
57 | .busy_factor = 32, \ | 57 | .busy_factor = 32, \ |
58 | .imbalance_pct = 125, \ | 58 | .imbalance_pct = 125, \ |
59 | .cache_nice_tries = 1, \ | 59 | .cache_nice_tries = 1, \ |
60 | .per_cpu_gain = 100, \ | ||
61 | .busy_idx = 3, \ | 60 | .busy_idx = 3, \ |
62 | .idle_idx = 1, \ | 61 | .idle_idx = 1, \ |
63 | .newidle_idx = 2, \ | 62 | .newidle_idx = 2, \ |
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h index f4fb238c89f1..969d225a9350 100644 --- a/include/asm-x86_64/io_apic.h +++ b/include/asm-x86_64/io_apic.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <asm/mpspec.h> | 5 | #include <asm/mpspec.h> |
6 | #include <asm/apicdef.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Intel IO-APIC support for SMP and UP systems. | 9 | * Intel IO-APIC support for SMP and UP systems. |
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index 72375e7d32a8..ceb3d8dac33d 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h | |||
@@ -64,7 +64,7 @@ extern int setup_nmi_watchdog(char *); | |||
64 | 64 | ||
65 | extern atomic_t nmi_active; | 65 | extern atomic_t nmi_active; |
66 | extern unsigned int nmi_watchdog; | 66 | extern unsigned int nmi_watchdog; |
67 | #define NMI_DEFAULT -1 | 67 | #define NMI_DEFAULT 0 |
68 | #define NMI_NONE 0 | 68 | #define NMI_NONE 0 |
69 | #define NMI_IO_APIC 1 | 69 | #define NMI_IO_APIC 1 |
70 | #define NMI_LOCAL_APIC 2 | 70 | #define NMI_LOCAL_APIC 2 |
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 2facec5914d2..4fd6fb23953e 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
@@ -43,7 +43,6 @@ extern int __node_distance(int, int); | |||
43 | .newidle_idx = 0, \ | 43 | .newidle_idx = 0, \ |
44 | .wake_idx = 1, \ | 44 | .wake_idx = 1, \ |
45 | .forkexec_idx = 1, \ | 45 | .forkexec_idx = 1, \ |
46 | .per_cpu_gain = 100, \ | ||
47 | .flags = SD_LOAD_BALANCE \ | 46 | .flags = SD_LOAD_BALANCE \ |
48 | | SD_BALANCE_FORK \ | 47 | | SD_BALANCE_FORK \ |
49 | | SD_BALANCE_EXEC \ | 48 | | SD_BALANCE_EXEC \ |
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h index 9a0a368852c7..26c3e9828288 100644 --- a/include/asm-x86_64/tsc.h +++ b/include/asm-x86_64/tsc.h | |||
@@ -55,6 +55,7 @@ static __always_inline cycles_t get_cycles_sync(void) | |||
55 | extern void tsc_init(void); | 55 | extern void tsc_init(void); |
56 | extern void mark_tsc_unstable(void); | 56 | extern void mark_tsc_unstable(void); |
57 | extern int unsynchronized_tsc(void); | 57 | extern int unsynchronized_tsc(void); |
58 | extern void init_tsc_clocksource(void); | ||
58 | 59 | ||
59 | /* | 60 | /* |
60 | * Boot-time check whether the TSCs are synchronized across | 61 | * Boot-time check whether the TSCs are synchronized across |
diff --git a/include/asm-arm/hardware/gpio_keys.h b/include/linux/gpio_keys.h index 2b217c7b9312..2b217c7b9312 100644 --- a/include/asm-arm/hardware/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 48148e0cdbd1..75e55dcdeb18 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -5,6 +5,14 @@ | |||
5 | 5 | ||
6 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); | 6 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
7 | 7 | ||
8 | /* Check if a vma is migratable */ | ||
9 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
10 | { | ||
11 | if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | ||
12 | return 0; | ||
13 | return 1; | ||
14 | } | ||
15 | |||
8 | #ifdef CONFIG_MIGRATION | 16 | #ifdef CONFIG_MIGRATION |
9 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); | 17 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); |
10 | extern int putback_lru_pages(struct list_head *l); | 18 | extern int putback_lru_pages(struct list_head *l); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 2c4b6842dfb9..78417e421b4c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -543,6 +543,7 @@ void pci_set_master(struct pci_dev *dev); | |||
543 | int __must_check pci_set_mwi(struct pci_dev *dev); | 543 | int __must_check pci_set_mwi(struct pci_dev *dev); |
544 | void pci_clear_mwi(struct pci_dev *dev); | 544 | void pci_clear_mwi(struct pci_dev *dev); |
545 | void pci_intx(struct pci_dev *dev, int enable); | 545 | void pci_intx(struct pci_dev *dev, int enable); |
546 | void pci_msi_off(struct pci_dev *dev); | ||
546 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); | 547 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); |
547 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); | 548 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); |
548 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); | 549 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); |
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 7a6d34ee5ab1..f09cce2357ff 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h | |||
@@ -292,9 +292,10 @@ | |||
292 | #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ | 292 | #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ |
293 | #define PCI_MSI_MASK_BIT 16 /* Mask bits register */ | 293 | #define PCI_MSI_MASK_BIT 16 /* Mask bits register */ |
294 | 294 | ||
295 | /* MSI-X registers (these are at offset PCI_MSI_FLAGS) */ | 295 | /* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */ |
296 | #define PCI_MSIX_FLAGS_QSIZE 0x7FF | 296 | #define PCI_MSIX_FLAGS 2 |
297 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | 297 | #define PCI_MSIX_FLAGS_QSIZE 0x7FF |
298 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | ||
298 | #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) | 299 | #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) |
299 | #define PCI_MSIX_FLAGS_BITMASK (1 << 0) | 300 | #define PCI_MSIX_FLAGS_BITMASK (1 << 0) |
300 | 301 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6f7c9a4d80e5..49fe2997a016 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -684,7 +684,6 @@ struct sched_domain { | |||
684 | unsigned int imbalance_pct; /* No balance until over watermark */ | 684 | unsigned int imbalance_pct; /* No balance until over watermark */ |
685 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ | 685 | unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ |
686 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | 686 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
687 | unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ | ||
688 | unsigned int busy_idx; | 687 | unsigned int busy_idx; |
689 | unsigned int idle_idx; | 688 | unsigned int idle_idx; |
690 | unsigned int newidle_idx; | 689 | unsigned int newidle_idx; |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61fef376ed2e..a946176db638 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -283,6 +283,43 @@ do { \ | |||
283 | }) | 283 | }) |
284 | 284 | ||
285 | /* | 285 | /* |
286 | * Locks two spinlocks l1 and l2. | ||
287 | * l1_first indicates if spinlock l1 should be taken first. | ||
288 | */ | ||
289 | static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, | ||
290 | bool l1_first) | ||
291 | __acquires(l1) | ||
292 | __acquires(l2) | ||
293 | { | ||
294 | if (l1_first) { | ||
295 | spin_lock(l1); | ||
296 | spin_lock(l2); | ||
297 | } else { | ||
298 | spin_lock(l2); | ||
299 | spin_lock(l1); | ||
300 | } | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Unlocks two spinlocks l1 and l2. | ||
305 | * l1_taken_first indicates if spinlock l1 was taken first and therefore | ||
306 | * should be released after spinlock l2. | ||
307 | */ | ||
308 | static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, | ||
309 | bool l1_taken_first) | ||
310 | __releases(l1) | ||
311 | __releases(l2) | ||
312 | { | ||
313 | if (l1_taken_first) { | ||
314 | spin_unlock(l2); | ||
315 | spin_unlock(l1); | ||
316 | } else { | ||
317 | spin_unlock(l1); | ||
318 | spin_unlock(l2); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | /* | ||
286 | * Pull the atomic_t declaration: | 323 | * Pull the atomic_t declaration: |
287 | * (asm-mips/atomic.h needs above definitions) | 324 | * (asm-mips/atomic.h needs above definitions) |
288 | */ | 325 | */ |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 6c5a6e6e813b..a9d1f049cc15 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -96,7 +96,6 @@ | |||
96 | .busy_factor = 64, \ | 96 | .busy_factor = 64, \ |
97 | .imbalance_pct = 110, \ | 97 | .imbalance_pct = 110, \ |
98 | .cache_nice_tries = 0, \ | 98 | .cache_nice_tries = 0, \ |
99 | .per_cpu_gain = 25, \ | ||
100 | .busy_idx = 0, \ | 99 | .busy_idx = 0, \ |
101 | .idle_idx = 0, \ | 100 | .idle_idx = 0, \ |
102 | .newidle_idx = 1, \ | 101 | .newidle_idx = 1, \ |
@@ -128,7 +127,6 @@ | |||
128 | .busy_factor = 64, \ | 127 | .busy_factor = 64, \ |
129 | .imbalance_pct = 125, \ | 128 | .imbalance_pct = 125, \ |
130 | .cache_nice_tries = 1, \ | 129 | .cache_nice_tries = 1, \ |
131 | .per_cpu_gain = 100, \ | ||
132 | .busy_idx = 2, \ | 130 | .busy_idx = 2, \ |
133 | .idle_idx = 1, \ | 131 | .idle_idx = 1, \ |
134 | .newidle_idx = 2, \ | 132 | .newidle_idx = 2, \ |
@@ -159,7 +157,6 @@ | |||
159 | .busy_factor = 64, \ | 157 | .busy_factor = 64, \ |
160 | .imbalance_pct = 125, \ | 158 | .imbalance_pct = 125, \ |
161 | .cache_nice_tries = 1, \ | 159 | .cache_nice_tries = 1, \ |
162 | .per_cpu_gain = 100, \ | ||
163 | .busy_idx = 2, \ | 160 | .busy_idx = 2, \ |
164 | .idle_idx = 1, \ | 161 | .idle_idx = 1, \ |
165 | .newidle_idx = 2, \ | 162 | .newidle_idx = 2, \ |
@@ -193,7 +190,6 @@ | |||
193 | .newidle_idx = 0, /* unused */ \ | 190 | .newidle_idx = 0, /* unused */ \ |
194 | .wake_idx = 0, /* unused */ \ | 191 | .wake_idx = 0, /* unused */ \ |
195 | .forkexec_idx = 0, /* unused */ \ | 192 | .forkexec_idx = 0, /* unused */ \ |
196 | .per_cpu_gain = 100, \ | ||
197 | .flags = SD_LOAD_BALANCE \ | 193 | .flags = SD_LOAD_BALANCE \ |
198 | | SD_SERIALIZE, \ | 194 | | SD_SERIALIZE, \ |
199 | .last_balance = jiffies, \ | 195 | .last_balance = jiffies, \ |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 476cb0c0b4a4..de93a8176ca6 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1355,17 +1355,16 @@ static void migrate_hrtimers(int cpu) | |||
1355 | tick_cancel_sched_timer(cpu); | 1355 | tick_cancel_sched_timer(cpu); |
1356 | 1356 | ||
1357 | local_irq_disable(); | 1357 | local_irq_disable(); |
1358 | 1358 | double_spin_lock(&new_base->lock, &old_base->lock, | |
1359 | spin_lock(&new_base->lock); | 1359 | smp_processor_id() < cpu); |
1360 | spin_lock(&old_base->lock); | ||
1361 | 1360 | ||
1362 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1361 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1363 | migrate_hrtimer_list(&old_base->clock_base[i], | 1362 | migrate_hrtimer_list(&old_base->clock_base[i], |
1364 | &new_base->clock_base[i]); | 1363 | &new_base->clock_base[i]); |
1365 | } | 1364 | } |
1366 | spin_unlock(&old_base->lock); | ||
1367 | spin_unlock(&new_base->lock); | ||
1368 | 1365 | ||
1366 | double_spin_unlock(&new_base->lock, &old_base->lock, | ||
1367 | smp_processor_id() < cpu); | ||
1369 | local_irq_enable(); | 1368 | local_irq_enable(); |
1370 | put_cpu_var(hrtimer_bases); | 1369 | put_cpu_var(hrtimer_bases); |
1371 | } | 1370 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 5f102e6c7a4c..a4ca632c477c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3006,23 +3006,6 @@ static inline void idle_balance(int cpu, struct rq *rq) | |||
3006 | } | 3006 | } |
3007 | #endif | 3007 | #endif |
3008 | 3008 | ||
3009 | static inline void wake_priority_sleeper(struct rq *rq) | ||
3010 | { | ||
3011 | #ifdef CONFIG_SCHED_SMT | ||
3012 | if (!rq->nr_running) | ||
3013 | return; | ||
3014 | |||
3015 | spin_lock(&rq->lock); | ||
3016 | /* | ||
3017 | * If an SMT sibling task has been put to sleep for priority | ||
3018 | * reasons reschedule the idle task to see if it can now run. | ||
3019 | */ | ||
3020 | if (rq->nr_running) | ||
3021 | resched_task(rq->idle); | ||
3022 | spin_unlock(&rq->lock); | ||
3023 | #endif | ||
3024 | } | ||
3025 | |||
3026 | DEFINE_PER_CPU(struct kernel_stat, kstat); | 3009 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
3027 | 3010 | ||
3028 | EXPORT_PER_CPU_SYMBOL(kstat); | 3011 | EXPORT_PER_CPU_SYMBOL(kstat); |
@@ -3239,10 +3222,7 @@ void scheduler_tick(void) | |||
3239 | 3222 | ||
3240 | update_cpu_clock(p, rq, now); | 3223 | update_cpu_clock(p, rq, now); |
3241 | 3224 | ||
3242 | if (p == rq->idle) | 3225 | if (p != rq->idle) |
3243 | /* Task on the idle queue */ | ||
3244 | wake_priority_sleeper(rq); | ||
3245 | else | ||
3246 | task_running_tick(rq, p); | 3226 | task_running_tick(rq, p); |
3247 | #ifdef CONFIG_SMP | 3227 | #ifdef CONFIG_SMP |
3248 | update_load(rq); | 3228 | update_load(rq); |
@@ -3251,136 +3231,6 @@ void scheduler_tick(void) | |||
3251 | #endif | 3231 | #endif |
3252 | } | 3232 | } |
3253 | 3233 | ||
3254 | #ifdef CONFIG_SCHED_SMT | ||
3255 | static inline void wakeup_busy_runqueue(struct rq *rq) | ||
3256 | { | ||
3257 | /* If an SMT runqueue is sleeping due to priority reasons wake it up */ | ||
3258 | if (rq->curr == rq->idle && rq->nr_running) | ||
3259 | resched_task(rq->idle); | ||
3260 | } | ||
3261 | |||
3262 | /* | ||
3263 | * Called with interrupt disabled and this_rq's runqueue locked. | ||
3264 | */ | ||
3265 | static void wake_sleeping_dependent(int this_cpu) | ||
3266 | { | ||
3267 | struct sched_domain *tmp, *sd = NULL; | ||
3268 | int i; | ||
3269 | |||
3270 | for_each_domain(this_cpu, tmp) { | ||
3271 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
3272 | sd = tmp; | ||
3273 | break; | ||
3274 | } | ||
3275 | } | ||
3276 | |||
3277 | if (!sd) | ||
3278 | return; | ||
3279 | |||
3280 | for_each_cpu_mask(i, sd->span) { | ||
3281 | struct rq *smt_rq = cpu_rq(i); | ||
3282 | |||
3283 | if (i == this_cpu) | ||
3284 | continue; | ||
3285 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
3286 | continue; | ||
3287 | |||
3288 | wakeup_busy_runqueue(smt_rq); | ||
3289 | spin_unlock(&smt_rq->lock); | ||
3290 | } | ||
3291 | } | ||
3292 | |||
3293 | /* | ||
3294 | * number of 'lost' timeslices this task wont be able to fully | ||
3295 | * utilize, if another task runs on a sibling. This models the | ||
3296 | * slowdown effect of other tasks running on siblings: | ||
3297 | */ | ||
3298 | static inline unsigned long | ||
3299 | smt_slice(struct task_struct *p, struct sched_domain *sd) | ||
3300 | { | ||
3301 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | ||
3302 | } | ||
3303 | |||
3304 | /* | ||
3305 | * To minimise lock contention and not have to drop this_rq's runlock we only | ||
3306 | * trylock the sibling runqueues and bypass those runqueues if we fail to | ||
3307 | * acquire their lock. As we only trylock the normal locking order does not | ||
3308 | * need to be obeyed. | ||
3309 | */ | ||
3310 | static int | ||
3311 | dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) | ||
3312 | { | ||
3313 | struct sched_domain *tmp, *sd = NULL; | ||
3314 | int ret = 0, i; | ||
3315 | |||
3316 | /* kernel/rt threads do not participate in dependent sleeping */ | ||
3317 | if (!p->mm || rt_task(p)) | ||
3318 | return 0; | ||
3319 | |||
3320 | for_each_domain(this_cpu, tmp) { | ||
3321 | if (tmp->flags & SD_SHARE_CPUPOWER) { | ||
3322 | sd = tmp; | ||
3323 | break; | ||
3324 | } | ||
3325 | } | ||
3326 | |||
3327 | if (!sd) | ||
3328 | return 0; | ||
3329 | |||
3330 | for_each_cpu_mask(i, sd->span) { | ||
3331 | struct task_struct *smt_curr; | ||
3332 | struct rq *smt_rq; | ||
3333 | |||
3334 | if (i == this_cpu) | ||
3335 | continue; | ||
3336 | |||
3337 | smt_rq = cpu_rq(i); | ||
3338 | if (unlikely(!spin_trylock(&smt_rq->lock))) | ||
3339 | continue; | ||
3340 | |||
3341 | smt_curr = smt_rq->curr; | ||
3342 | |||
3343 | if (!smt_curr->mm) | ||
3344 | goto unlock; | ||
3345 | |||
3346 | /* | ||
3347 | * If a user task with lower static priority than the | ||
3348 | * running task on the SMT sibling is trying to schedule, | ||
3349 | * delay it till there is proportionately less timeslice | ||
3350 | * left of the sibling task to prevent a lower priority | ||
3351 | * task from using an unfair proportion of the | ||
3352 | * physical cpu's resources. -ck | ||
3353 | */ | ||
3354 | if (rt_task(smt_curr)) { | ||
3355 | /* | ||
3356 | * With real time tasks we run non-rt tasks only | ||
3357 | * per_cpu_gain% of the time. | ||
3358 | */ | ||
3359 | if ((jiffies % DEF_TIMESLICE) > | ||
3360 | (sd->per_cpu_gain * DEF_TIMESLICE / 100)) | ||
3361 | ret = 1; | ||
3362 | } else { | ||
3363 | if (smt_curr->static_prio < p->static_prio && | ||
3364 | !TASK_PREEMPTS_CURR(p, smt_rq) && | ||
3365 | smt_slice(smt_curr, sd) > task_timeslice(p)) | ||
3366 | ret = 1; | ||
3367 | } | ||
3368 | unlock: | ||
3369 | spin_unlock(&smt_rq->lock); | ||
3370 | } | ||
3371 | return ret; | ||
3372 | } | ||
3373 | #else | ||
3374 | static inline void wake_sleeping_dependent(int this_cpu) | ||
3375 | { | ||
3376 | } | ||
3377 | static inline int | ||
3378 | dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) | ||
3379 | { | ||
3380 | return 0; | ||
3381 | } | ||
3382 | #endif | ||
3383 | |||
3384 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) | 3234 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) |
3385 | 3235 | ||
3386 | void fastcall add_preempt_count(int val) | 3236 | void fastcall add_preempt_count(int val) |
@@ -3507,7 +3357,6 @@ need_resched_nonpreemptible: | |||
3507 | if (!rq->nr_running) { | 3357 | if (!rq->nr_running) { |
3508 | next = rq->idle; | 3358 | next = rq->idle; |
3509 | rq->expired_timestamp = 0; | 3359 | rq->expired_timestamp = 0; |
3510 | wake_sleeping_dependent(cpu); | ||
3511 | goto switch_tasks; | 3360 | goto switch_tasks; |
3512 | } | 3361 | } |
3513 | } | 3362 | } |
@@ -3547,8 +3396,6 @@ need_resched_nonpreemptible: | |||
3547 | } | 3396 | } |
3548 | } | 3397 | } |
3549 | next->sleep_type = SLEEP_NORMAL; | 3398 | next->sleep_type = SLEEP_NORMAL; |
3550 | if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next)) | ||
3551 | next = rq->idle; | ||
3552 | switch_tasks: | 3399 | switch_tasks: |
3553 | if (next == rq->idle) | 3400 | if (next == rq->idle) |
3554 | schedstat_inc(rq, sched_goidle); | 3401 | schedstat_inc(rq, sched_goidle); |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 193a0793af95..5b0e46b56fd0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -55,16 +55,18 @@ static DEFINE_SPINLOCK(clocksource_lock); | |||
55 | static char override_name[32]; | 55 | static char override_name[32]; |
56 | static int finished_booting; | 56 | static int finished_booting; |
57 | 57 | ||
58 | /* clocksource_done_booting - Called near the end of bootup | 58 | /* clocksource_done_booting - Called near the end of core bootup |
59 | * | 59 | * |
60 | * Hack to avoid lots of clocksource churn at boot time | 60 | * Hack to avoid lots of clocksource churn at boot time. |
61 | * We use fs_initcall because we want this to start before | ||
62 | * device_initcall but after subsys_initcall. | ||
61 | */ | 63 | */ |
62 | static int __init clocksource_done_booting(void) | 64 | static int __init clocksource_done_booting(void) |
63 | { | 65 | { |
64 | finished_booting = 1; | 66 | finished_booting = 1; |
65 | return 0; | 67 | return 0; |
66 | } | 68 | } |
67 | late_initcall(clocksource_done_booting); | 69 | fs_initcall(clocksource_done_booting); |
68 | 70 | ||
69 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 71 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
70 | static LIST_HEAD(watchdog_list); | 72 | static LIST_HEAD(watchdog_list); |
diff --git a/kernel/timer.c b/kernel/timer.c index 6663a87f7304..8ad384253ef2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1651,8 +1651,8 @@ static void __devinit migrate_timers(int cpu) | |||
1651 | new_base = get_cpu_var(tvec_bases); | 1651 | new_base = get_cpu_var(tvec_bases); |
1652 | 1652 | ||
1653 | local_irq_disable(); | 1653 | local_irq_disable(); |
1654 | spin_lock(&new_base->lock); | 1654 | double_spin_lock(&new_base->lock, &old_base->lock, |
1655 | spin_lock(&old_base->lock); | 1655 | smp_processor_id() < cpu); |
1656 | 1656 | ||
1657 | BUG_ON(old_base->running_timer); | 1657 | BUG_ON(old_base->running_timer); |
1658 | 1658 | ||
@@ -1665,8 +1665,8 @@ static void __devinit migrate_timers(int cpu) | |||
1665 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 1665 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
1666 | } | 1666 | } |
1667 | 1667 | ||
1668 | spin_unlock(&old_base->lock); | 1668 | double_spin_unlock(&new_base->lock, &old_base->lock, |
1669 | spin_unlock(&new_base->lock); | 1669 | smp_processor_id() < cpu); |
1670 | local_irq_enable(); | 1670 | local_irq_enable(); |
1671 | put_cpu_var(tvec_bases); | 1671 | put_cpu_var(tvec_bases); |
1672 | } | 1672 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cf2a5381030a..d76e8eb342d0 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -321,15 +321,6 @@ static inline int check_pgd_range(struct vm_area_struct *vma, | |||
321 | return 0; | 321 | return 0; |
322 | } | 322 | } |
323 | 323 | ||
324 | /* Check if a vma is migratable */ | ||
325 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
326 | { | ||
327 | if (vma->vm_flags & ( | ||
328 | VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | ||
329 | return 0; | ||
330 | return 1; | ||
331 | } | ||
332 | |||
333 | /* | 324 | /* |
334 | * Check if all pages in a range are on a set of nodes. | 325 | * Check if all pages in a range are on a set of nodes. |
335 | * If pagelist != NULL then isolate pages from the LRU and | 326 | * If pagelist != NULL then isolate pages from the LRU and |
diff --git a/mm/migrate.c b/mm/migrate.c index e9b161bde95b..7a66ca25dc8a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -781,7 +781,7 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, | |||
781 | 781 | ||
782 | err = -EFAULT; | 782 | err = -EFAULT; |
783 | vma = find_vma(mm, pp->addr); | 783 | vma = find_vma(mm, pp->addr); |
784 | if (!vma) | 784 | if (!vma || !vma_migratable(vma)) |
785 | goto set_status; | 785 | goto set_status; |
786 | 786 | ||
787 | page = follow_page(vma, pp->addr, FOLL_GET); | 787 | page = follow_page(vma, pp->addr, FOLL_GET); |
diff --git a/mm/shmem.c b/mm/shmem.c index fcb07882c8e0..b8c429a2d271 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -175,7 +175,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages) | |||
175 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | 175 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); |
176 | } | 176 | } |
177 | 177 | ||
178 | static struct super_operations shmem_ops; | 178 | static const struct super_operations shmem_ops; |
179 | static const struct address_space_operations shmem_aops; | 179 | static const struct address_space_operations shmem_aops; |
180 | static const struct file_operations shmem_file_operations; | 180 | static const struct file_operations shmem_file_operations; |
181 | static const struct inode_operations shmem_inode_operations; | 181 | static const struct inode_operations shmem_inode_operations; |
@@ -2383,7 +2383,7 @@ static const struct inode_operations shmem_special_inode_operations = { | |||
2383 | #endif | 2383 | #endif |
2384 | }; | 2384 | }; |
2385 | 2385 | ||
2386 | static struct super_operations shmem_ops = { | 2386 | static const struct super_operations shmem_ops = { |
2387 | .alloc_inode = shmem_alloc_inode, | 2387 | .alloc_inode = shmem_alloc_inode, |
2388 | .destroy_inode = shmem_destroy_inode, | 2388 | .destroy_inode = shmem_destroy_inode, |
2389 | #ifdef CONFIG_TMPFS | 2389 | #ifdef CONFIG_TMPFS |