aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/power/freezing-of-tasks.txt8
-rw-r--r--Documentation/x86_64/boot-options.txt14
-rw-r--r--Documentation/x86_64/machinecheck14
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm/mach-davinci/time.c2
-rw-r--r--arch/arm/mach-imx/time.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-omap1/time.c1
-rw-r--r--arch/arm/plat-omap/timer32k.c2
-rw-r--r--arch/i386/Kconfig21
-rw-r--r--arch/i386/defconfig264
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/acpi/boot.c36
-rw-r--r--arch/i386/kernel/alternative.c14
-rw-r--r--arch/i386/kernel/apic.c10
-rw-r--r--arch/i386/kernel/cpu/Makefile1
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c79
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c4
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/i386/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/i386/kernel/cpu/rise.c52
-rw-r--r--arch/i386/kernel/e820.c32
-rw-r--r--arch/i386/kernel/geode.c155
-rw-r--r--arch/i386/kernel/hpet.c98
-rw-r--r--arch/i386/kernel/i8253.c32
-rw-r--r--arch/i386/kernel/io_apic.c26
-rw-r--r--arch/i386/kernel/irq.c8
-rw-r--r--arch/i386/kernel/process.c12
-rw-r--r--arch/i386/kernel/reboot.c9
-rw-r--r--arch/i386/kernel/setup.c11
-rw-r--r--arch/i386/kernel/sysenter.c4
-rw-r--r--arch/i386/kernel/time.c50
-rw-r--r--arch/i386/kernel/traps.c3
-rw-r--r--arch/i386/kernel/vmiclock.c2
-rw-r--r--arch/i386/lib/Makefile2
-rw-r--r--arch/i386/lib/string.c257
-rw-r--r--arch/i386/mm/init.c7
-rw-r--r--arch/i386/mm/ioremap.c2
-rw-r--r--arch/i386/mm/pageattr.c20
-rw-r--r--arch/i386/mm/pgtable.c6
-rw-r--r--arch/i386/pci/acpi.c32
-rw-r--r--arch/i386/pci/common.c13
-rw-r--r--arch/i386/pci/mmconfig-shared.c48
-rw-r--r--arch/i386/xen/time.c3
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c2
-rw-r--r--arch/powerpc/boot/ps3-head.S2
-rw-r--r--arch/powerpc/boot/ps3-hvcall.S2
-rw-r--r--arch/powerpc/mm/tlb_32.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c18
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig36
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c1
-rw-r--r--arch/sparc/kernel/entry.S14
-rw-r--r--arch/sparc/kernel/irq.c27
-rw-r--r--arch/sparc/kernel/irq.h68
-rw-r--r--arch/sparc/kernel/pcic.c1
-rw-r--r--arch/sparc/kernel/smp.c2
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c2
-rw-r--r--arch/sparc/kernel/sun4c_irq.c15
-rw-r--r--arch/sparc/kernel/sun4d_irq.c6
-rw-r--r--arch/sparc/kernel/sun4d_smp.c1
-rw-r--r--arch/sparc/kernel/sun4m_irq.c74
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc/kernel/tick14.c6
-rw-r--r--arch/sparc/kernel/time.c2
-rw-r--r--arch/sparc/mm/init.c3
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/sparc/mm/sun4c.c2
-rw-r--r--arch/sparc64/Kconfig4
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/time.c54
-rw-r--r--arch/x86_64/Kconfig12
-rw-r--r--arch/x86_64/Makefile3
-rw-r--r--arch/x86_64/defconfig288
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c1
-rw-r--r--arch/x86_64/ia32/ia32entry.S5
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/apic.c77
-rw-r--r--arch/x86_64/kernel/e820.c138
-rw-r--r--arch/x86_64/kernel/early-quirks.c1
-rw-r--r--arch/x86_64/kernel/entry.S6
-rw-r--r--arch/x86_64/kernel/hpet.c6
-rw-r--r--arch/x86_64/kernel/i8259.c18
-rw-r--r--arch/x86_64/kernel/io_apic.c58
-rw-r--r--arch/x86_64/kernel/mce.c241
-rw-r--r--arch/x86_64/kernel/mce_amd.c6
-rw-r--r--arch/x86_64/kernel/mpparse.c21
-rw-r--r--arch/x86_64/kernel/pci-calgary.c570
-rw-r--r--arch/x86_64/kernel/pci-dma.c7
-rw-r--r--arch/x86_64/kernel/pci-gart.c27
-rw-r--r--arch/x86_64/kernel/pci-nommu.c8
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/process.c13
-rw-r--r--arch/x86_64/kernel/reboot.c4
-rw-r--r--arch/x86_64/kernel/setup.c9
-rw-r--r--arch/x86_64/kernel/signal.c7
-rw-r--r--arch/x86_64/kernel/smp.c6
-rw-r--r--arch/x86_64/kernel/tce.c12
-rw-r--r--arch/x86_64/kernel/time.c158
-rw-r--r--arch/x86_64/kernel/tsc.c39
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S25
-rw-r--r--arch/x86_64/kernel/vsyscall.c22
-rw-r--r--arch/x86_64/mm/fault.c4
-rw-r--r--arch/x86_64/mm/init.c11
-rw-r--r--arch/x86_64/mm/k8topology.c13
-rw-r--r--arch/x86_64/mm/numa.c15
-rw-r--r--arch/x86_64/mm/pageattr.c23
-rw-r--r--arch/x86_64/mm/srat.c97
-rw-r--r--arch/x86_64/pci/k8-bus.c6
-rw-r--r--arch/x86_64/vdso/Makefile49
-rw-r--r--arch/x86_64/vdso/vclock_gettime.c120
-rw-r--r--arch/x86_64/vdso/vdso-note.S12
-rw-r--r--arch/x86_64/vdso/vdso-start.S2
-rw-r--r--arch/x86_64/vdso/vdso.S2
-rw-r--r--arch/x86_64/vdso/vdso.lds.S77
-rw-r--r--arch/x86_64/vdso/vextern.h16
-rw-r--r--arch/x86_64/vdso/vgetcpu.c50
-rw-r--r--arch/x86_64/vdso/vma.c139
-rw-r--r--arch/x86_64/vdso/voffset.h1
-rw-r--r--arch/x86_64/vdso/vvar.c12
-rw-r--r--drivers/acpi/numa.c31
-rw-r--r--drivers/base/power/trace.c5
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/ps3disk.c630
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/ps3flash.c440
-rw-r--r--drivers/char/rtc.c2
-rw-r--r--drivers/clocksource/acpi_pm.c2
-rw-r--r--drivers/input/misc/pcspkr.c11
-rw-r--r--drivers/isdn/Kconfig12
-rw-r--r--drivers/isdn/act2000/Kconfig2
-rw-r--r--drivers/isdn/gigaset/Kconfig10
-rw-r--r--drivers/isdn/hisax/Kconfig1
-rw-r--r--drivers/isdn/i4l/Kconfig3
-rw-r--r--drivers/isdn/icn/Kconfig2
-rw-r--r--drivers/isdn/pcbit/Kconfig2
-rw-r--r--drivers/isdn/sc/Kconfig2
-rw-r--r--drivers/kvm/mmu.c2
-rw-r--r--drivers/lguest/lguest.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/mmc/host/at91_mci.c13
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/rtc/Kconfig13
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ds1553.c2
-rw-r--r--drivers/rtc/rtc-ds1742.c2
-rw-r--r--drivers/rtc/rtc-max6900.c96
-rw-r--r--drivers/rtc/rtc-stk17ta8.c420
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/ps3rom.c533
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/video/Kconfig13
-rw-r--r--drivers/video/atmel_lcdfb.c67
-rw-r--r--drivers/video/console/vgacon.c6
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/video/riva/fbdev.c2
-rw-r--r--fs/binfmt_elf.c109
-rw-r--r--fs/coda/dir.c1
-rw-r--r--fs/coda/file.c65
-rw-r--r--fs/coda/upcall.c49
-rw-r--r--fs/nfsd/export.c4
-rw-r--r--fs/proc/proc_misc.c3
-rw-r--r--fs/udf/balloc.c420
-rw-r--r--fs/udf/crc.c4
-rw-r--r--fs/udf/dir.c60
-rw-r--r--fs/udf/directory.c68
-rw-r--r--fs/udf/ecma_167.h684
-rw-r--r--fs/udf/file.c72
-rw-r--r--fs/udf/fsync.c2
-rw-r--r--fs/udf/ialloc.c38
-rw-r--r--fs/udf/inode.c948
-rw-r--r--fs/udf/lowlevel.c4
-rw-r--r--fs/udf/misc.c87
-rw-r--r--fs/udf/namei.c410
-rw-r--r--fs/udf/osta_udf.h164
-rw-r--r--fs/udf/partition.c212
-rw-r--r--fs/udf/super.c1023
-rw-r--r--fs/udf/symlink.c14
-rw-r--r--fs/udf/truncate.c113
-rw-r--r--fs/udf/udf_i.h2
-rw-r--r--fs/udf/udf_sb.h26
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/udf/udfend.h20
-rw-r--r--fs/udf/udftime.c82
-rw-r--r--fs/udf/unicode.c155
-rw-r--r--include/acpi/acpi_numa.h1
-rw-r--r--include/asm-i386/e820.h8
-rw-r--r--include/asm-i386/geode.h159
-rw-r--r--include/asm-i386/hpet.h126
-rw-r--r--include/asm-i386/i8253.h16
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-default/io_ports.h5
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h25
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/mc146818rtc.h5
-rw-r--r--include/asm-i386/page.h1
-rw-r--r--include/asm-i386/pci.h5
-rw-r--r--include/asm-i386/processor.h1
-rw-r--r--include/asm-i386/resume-trace.h13
-rw-r--r--include/asm-i386/string.h243
-rw-r--r--include/asm-i386/timer.h2
-rw-r--r--include/asm-i386/tlbflush.h6
-rw-r--r--include/asm-i386/topology.h2
-rw-r--r--include/asm-i386/uaccess.h2
-rw-r--r--include/asm-sparc/irq.h168
-rw-r--r--include/asm-sparc/pgtable.h3
-rw-r--r--include/asm-x86_64/acpi.h11
-rw-r--r--include/asm-x86_64/apic.h6
-rw-r--r--include/asm-x86_64/auxvec.h2
-rw-r--r--include/asm-x86_64/calgary.h9
-rw-r--r--include/asm-x86_64/dmi.h5
-rw-r--r--include/asm-x86_64/elf.h13
-rw-r--r--include/asm-x86_64/fixmap.h6
-rw-r--r--include/asm-x86_64/hpet.h62
-rw-r--r--include/asm-x86_64/hw_irq.h20
-rw-r--r--include/asm-x86_64/i8253.h6
-rw-r--r--include/asm-x86_64/iommu.h29
-rw-r--r--include/asm-x86_64/mce.h2
-rw-r--r--include/asm-x86_64/mmu.h1
-rw-r--r--include/asm-x86_64/pci.h19
-rw-r--r--include/asm-x86_64/pgalloc.h73
-rw-r--r--include/asm-x86_64/pgtable.h1
-rw-r--r--include/asm-x86_64/processor.h1
-rw-r--r--include/asm-x86_64/proto.h18
-rw-r--r--include/asm-x86_64/ptrace.h1
-rw-r--r--include/asm-x86_64/resume-trace.h13
-rw-r--r--include/asm-x86_64/string.h5
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/timex.h1
-rw-r--r--include/asm-x86_64/tlbflush.h6
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/asm-x86_64/vgtod.h29
-rw-r--r--include/asm-x86_64/vsyscall.h3
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/clockchips.h5
-rw-r--r--include/linux/coda_linux.h1
-rw-r--r--include/linux/coda_psdev.h3
-rw-r--r--include/linux/compiler-gcc4.h18
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/init.h8
-rw-r--r--include/linux/kernel.h8
-rw-r--r--include/linux/resume-trace.h19
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/vmalloc.h7
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/irq/proc.c10
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/time/ntp.c59
-rw-r--r--kernel/time/tick-broadcast.c35
-rw-r--r--kernel/time/tick-common.c16
-rw-r--r--kernel/time/tick-oneshot.c15
-rw-r--r--kernel/time/tick-sched.c7
-rw-r--r--lib/swiotlb.c5
-rw-r--r--mm/memory.c2
-rw-r--r--mm/nommu.c45
-rw-r--r--mm/slob.c21
262 files changed, 7831 insertions, 4993 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5fbe07706ae9..fb80e9ffea68 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1882,7 +1882,7 @@ and is between 256 and 4096 characters. It is defined in the file
1882 usbhid.mousepoll= 1882 usbhid.mousepoll=
1883 [USBHID] The interval which mice are to be polled at. 1883 [USBHID] The interval which mice are to be polled at.
1884 1884
1885 vdso= [IA-32,SH] 1885 vdso= [IA-32,SH,x86-64]
1886 vdso=2: enable compat VDSO (default with COMPAT_VDSO) 1886 vdso=2: enable compat VDSO (default with COMPAT_VDSO)
1887 vdso=1: enable VDSO (default) 1887 vdso=1: enable VDSO (default)
1888 vdso=0: disable VDSO mapping 1888 vdso=0: disable VDSO mapping
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index af1a282c71a3..04dc1cf9d215 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -155,6 +155,8 @@ Suppose, however, that the firmware file is located on a filesystem accessible
155only through another device that hasn't been resumed yet. In that case, 155only through another device that hasn't been resumed yet. In that case,
156request_firmware() will fail regardless of whether or not the freezing of tasks 156request_firmware() will fail regardless of whether or not the freezing of tasks
157is used. Consequently, the problem is not really related to the freezing of 157is used. Consequently, the problem is not really related to the freezing of
158tasks, since it generally exists anyway. [The solution to this particular 158tasks, since it generally exists anyway.
159problem is to keep the firmware in memory after it's loaded for the first time 159
160and upload if from memory to the device whenever necessary.] 160A driver must have all firmwares it may need in RAM before suspend() is called.
161If keeping them is not practical, for example due to their size, they must be
162requested early enough using the suspend notifier API described in notifiers.txt.
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 6177d881983f..945311840a10 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -14,9 +14,11 @@ Machine check
14 mce=nobootlog 14 mce=nobootlog
15 Disable boot machine check logging. 15 Disable boot machine check logging.
16 mce=tolerancelevel (number) 16 mce=tolerancelevel (number)
17 0: always panic, 1: panic if deadlock possible, 17 0: always panic on uncorrected errors, log corrected errors
18 2: try to avoid panic, 3: never panic or exit (for testing) 18 1: panic or SIGBUS on uncorrected errors, log corrected errors
19 default is 1 19 2: SIGBUS or log uncorrected errors, log corrected errors
20 3: never panic or SIGBUS, log all errors (for testing only)
21 Default is 1
20 Can be also set using sysfs which is preferable. 22 Can be also set using sysfs which is preferable.
21 23
22 nomce (for compatibility with i386): same as mce=off 24 nomce (for compatibility with i386): same as mce=off
@@ -134,12 +136,6 @@ Non Executable Mappings
134 136
135SMP 137SMP
136 138
137 nosmp Only use a single CPU
138
139 maxcpus=NUMBER only use upto NUMBER CPUs
140
141 cpumask=MASK only use cpus with bits set in mask
142
143 additional_cpus=NUM Allow NUM more CPUs for hotplug 139 additional_cpus=NUM Allow NUM more CPUs for hotplug
144 (defaults are specified by the BIOS, see Documentation/x86_64/cpu-hotplug-spec) 140 (defaults are specified by the BIOS, see Documentation/x86_64/cpu-hotplug-spec)
145 141
diff --git a/Documentation/x86_64/machinecheck b/Documentation/x86_64/machinecheck
index feaeaf6f6e4d..a05e58e7b159 100644
--- a/Documentation/x86_64/machinecheck
+++ b/Documentation/x86_64/machinecheck
@@ -49,12 +49,14 @@ tolerant
49 Since machine check exceptions can happen any time it is sometimes 49 Since machine check exceptions can happen any time it is sometimes
50 risky for the kernel to kill a process because it defies 50 risky for the kernel to kill a process because it defies
51 normal kernel locking rules. The tolerance level configures 51 normal kernel locking rules. The tolerance level configures
52 how hard the kernel tries to recover even at some risk of deadlock. 52 how hard the kernel tries to recover even at some risk of
53 53 deadlock. Higher tolerant values trade potentially better uptime
54 0: always panic, 54 with the risk of a crash or even corruption (for tolerant >= 3).
55 1: panic if deadlock possible, 55
56 2: try to avoid panic, 56 0: always panic on uncorrected errors, log corrected errors
57 3: never panic or exit (for testing only) 57 1: panic or SIGBUS on uncorrected errors, log corrected errors
58 2: SIGBUS or log uncorrected errors, log corrected errors
59 3: never panic or SIGBUS, log all errors (for testing only)
58 60
59 Default: 1 61 Default: 1
60 62
diff --git a/MAINTAINERS b/MAINTAINERS
index f49c5563f060..773c732b4177 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -651,7 +651,12 @@ W: http://linux-atm.sourceforge.net
651S: Maintained 651S: Maintained
652 652
653ATMEL AT91 MCI DRIVER 653ATMEL AT91 MCI DRIVER
654S: Orphan 654P: Nicolas Ferre
655M: nicolas.ferre@rfo.atmel.com
656L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
657W: http://www.atmel.com/products/AT91/
658W: http://www.at91.com/
659S: Maintained
655 660
656ATMEL MACB ETHERNET DRIVER 661ATMEL MACB ETHERNET DRIVER
657P: Haavard Skinnemoen 662P: Haavard Skinnemoen
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 4d8425de6922..e96a3dcdc1a7 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -285,6 +285,8 @@ static void davinci_set_mode(enum clock_event_mode mode,
285 case CLOCK_EVT_MODE_SHUTDOWN: 285 case CLOCK_EVT_MODE_SHUTDOWN:
286 t->opts = TIMER_OPTS_DISABLED; 286 t->opts = TIMER_OPTS_DISABLED;
287 break; 287 break;
288 case CLOCK_EVT_MODE_RESUME:
289 break;
288 } 290 }
289} 291}
290 292
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c
index 010f6fa984a6..d86d124aea22 100644
--- a/arch/arm/mach-imx/time.c
+++ b/arch/arm/mach-imx/time.c
@@ -159,6 +159,7 @@ static void imx_set_mode(enum clock_event_mode mode, struct clock_event_device *
159 break; 159 break;
160 case CLOCK_EVT_MODE_SHUTDOWN: 160 case CLOCK_EVT_MODE_SHUTDOWN:
161 case CLOCK_EVT_MODE_UNUSED: 161 case CLOCK_EVT_MODE_UNUSED:
162 case CLOCK_EVT_MODE_RESUME:
162 /* Left event sources disabled, no more interrupts appears */ 163 /* Left event sources disabled, no more interrupts appears */
163 break; 164 break;
164 } 165 }
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 8112f726ffa0..23e7fba6d3e1 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -459,6 +459,8 @@ static void ixp4xx_set_mode(enum clock_event_mode mode,
459 default: 459 default:
460 osrt = opts = 0; 460 osrt = opts = 0;
461 break; 461 break;
462 case CLOCK_EVT_MODE_RESUME:
463 break;
462 } 464 }
463 465
464 *IXP4XX_OSRT1 = osrt | opts; 466 *IXP4XX_OSRT1 = osrt | opts;
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 3705d20c4e5c..237651ebae5d 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -156,6 +156,7 @@ static void omap_mpu_set_mode(enum clock_event_mode mode,
156 break; 156 break;
157 case CLOCK_EVT_MODE_UNUSED: 157 case CLOCK_EVT_MODE_UNUSED:
158 case CLOCK_EVT_MODE_SHUTDOWN: 158 case CLOCK_EVT_MODE_SHUTDOWN:
159 case CLOCK_EVT_MODE_RESUME:
159 break; 160 break;
160 } 161 }
161} 162}
diff --git a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
index 2feceec8eccd..b0af014b0e2c 100644
--- a/arch/arm/plat-omap/timer32k.c
+++ b/arch/arm/plat-omap/timer32k.c
@@ -156,6 +156,8 @@ static void omap_32k_timer_set_mode(enum clock_event_mode mode,
156 case CLOCK_EVT_MODE_SHUTDOWN: 156 case CLOCK_EVT_MODE_SHUTDOWN:
157 omap_32k_timer_stop(); 157 omap_32k_timer_stop();
158 break; 158 break;
159 case CLOCK_EVT_MODE_RESUME:
160 break;
159 } 161 }
160} 162}
161 163
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 7a11b905ef49..abb582bc218f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -18,6 +18,10 @@ config GENERIC_TIME
18 bool 18 bool
19 default y 19 default y
20 20
21config GENERIC_CMOS_UPDATE
22 bool
23 default y
24
21config CLOCKSOURCE_WATCHDOG 25config CLOCKSOURCE_WATCHDOG
22 bool 26 bool
23 default y 27 default y
@@ -544,6 +548,7 @@ config HIGHMEM4G
544config HIGHMEM64G 548config HIGHMEM64G
545 bool "64GB" 549 bool "64GB"
546 depends on !M386 && !M486 550 depends on !M386 && !M486
551 select X86_PAE
547 help 552 help
548 Select this if you have a 32-bit processor and more than 4 553 Select this if you have a 32-bit processor and more than 4
549 gigabytes of physical RAM. 554 gigabytes of physical RAM.
@@ -573,12 +578,12 @@ choice
573 config VMSPLIT_3G 578 config VMSPLIT_3G
574 bool "3G/1G user/kernel split" 579 bool "3G/1G user/kernel split"
575 config VMSPLIT_3G_OPT 580 config VMSPLIT_3G_OPT
576 depends on !HIGHMEM 581 depends on !X86_PAE
577 bool "3G/1G user/kernel split (for full 1G low memory)" 582 bool "3G/1G user/kernel split (for full 1G low memory)"
578 config VMSPLIT_2G 583 config VMSPLIT_2G
579 bool "2G/2G user/kernel split" 584 bool "2G/2G user/kernel split"
580 config VMSPLIT_2G_OPT 585 config VMSPLIT_2G_OPT
581 depends on !HIGHMEM 586 depends on !X86_PAE
582 bool "2G/2G user/kernel split (for full 2G low memory)" 587 bool "2G/2G user/kernel split (for full 2G low memory)"
583 config VMSPLIT_1G 588 config VMSPLIT_1G
584 bool "1G/3G user/kernel split" 589 bool "1G/3G user/kernel split"
@@ -598,10 +603,15 @@ config HIGHMEM
598 default y 603 default y
599 604
600config X86_PAE 605config X86_PAE
601 bool 606 bool "PAE (Physical Address Extension) Support"
602 depends on HIGHMEM64G 607 default n
603 default y 608 depends on !HIGHMEM4G
604 select RESOURCES_64BIT 609 select RESOURCES_64BIT
610 help
611 PAE is required for NX support, and furthermore enables
612 larger swapspace support for non-overcommit purposes. It
613 has the cost of more pagetable lookup overhead, and also
614 consumes more pagetable space per process.
605 615
606# Common NUMA Features 616# Common NUMA Features
607config NUMA 617config NUMA
@@ -817,6 +827,7 @@ config CRASH_DUMP
817 827
818config PHYSICAL_START 828config PHYSICAL_START
819 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 829 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
830 default "0x1000000" if X86_NUMAQ
820 default "0x100000" 831 default "0x100000"
821 help 832 help
822 This gives the physical address where the kernel is loaded. 833 This gives the physical address where the kernel is loaded.
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 0ac62cdcd3b7..54ee1764fdae 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22-rc2 3# Linux kernel version: 2.6.22-git14
4# Mon May 21 13:23:44 2007 4# Fri Jul 20 09:53:15 2007
5# 5#
6CONFIG_X86_32=y 6CONFIG_X86_32=y
7CONFIG_GENERIC_TIME=y 7CONFIG_GENERIC_TIME=y
@@ -37,19 +37,18 @@ CONFIG_LOCALVERSION=""
37CONFIG_LOCALVERSION_AUTO=y 37CONFIG_LOCALVERSION_AUTO=y
38CONFIG_SWAP=y 38CONFIG_SWAP=y
39CONFIG_SYSVIPC=y 39CONFIG_SYSVIPC=y
40# CONFIG_IPC_NS is not set
41CONFIG_SYSVIPC_SYSCTL=y 40CONFIG_SYSVIPC_SYSCTL=y
42CONFIG_POSIX_MQUEUE=y 41CONFIG_POSIX_MQUEUE=y
43# CONFIG_BSD_PROCESS_ACCT is not set 42# CONFIG_BSD_PROCESS_ACCT is not set
44# CONFIG_TASKSTATS is not set 43# CONFIG_TASKSTATS is not set
45# CONFIG_UTS_NS is not set 44# CONFIG_USER_NS is not set
46# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
47CONFIG_IKCONFIG=y 46CONFIG_IKCONFIG=y
48CONFIG_IKCONFIG_PROC=y 47CONFIG_IKCONFIG_PROC=y
49CONFIG_LOG_BUF_SHIFT=18 48CONFIG_LOG_BUF_SHIFT=18
50# CONFIG_CPUSETS is not set 49# CONFIG_CPUSETS is not set
51CONFIG_SYSFS_DEPRECATED=y 50CONFIG_SYSFS_DEPRECATED=y
52# CONFIG_RELAY is not set 51CONFIG_RELAY=y
53CONFIG_BLK_DEV_INITRD=y 52CONFIG_BLK_DEV_INITRD=y
54CONFIG_INITRAMFS_SOURCE="" 53CONFIG_INITRAMFS_SOURCE=""
55CONFIG_CC_OPTIMIZE_FOR_SIZE=y 54CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -73,16 +72,13 @@ CONFIG_TIMERFD=y
73CONFIG_EVENTFD=y 72CONFIG_EVENTFD=y
74CONFIG_SHMEM=y 73CONFIG_SHMEM=y
75CONFIG_VM_EVENT_COUNTERS=y 74CONFIG_VM_EVENT_COUNTERS=y
76CONFIG_SLAB=y 75CONFIG_SLUB_DEBUG=y
77# CONFIG_SLUB is not set 76# CONFIG_SLAB is not set
77CONFIG_SLUB=y
78# CONFIG_SLOB is not set 78# CONFIG_SLOB is not set
79CONFIG_RT_MUTEXES=y 79CONFIG_RT_MUTEXES=y
80# CONFIG_TINY_SHMEM is not set 80# CONFIG_TINY_SHMEM is not set
81CONFIG_BASE_SMALL=0 81CONFIG_BASE_SMALL=0
82
83#
84# Loadable module support
85#
86CONFIG_MODULES=y 82CONFIG_MODULES=y
87CONFIG_MODULE_UNLOAD=y 83CONFIG_MODULE_UNLOAD=y
88CONFIG_MODULE_FORCE_UNLOAD=y 84CONFIG_MODULE_FORCE_UNLOAD=y
@@ -90,14 +86,11 @@ CONFIG_MODULE_FORCE_UNLOAD=y
90# CONFIG_MODULE_SRCVERSION_ALL is not set 86# CONFIG_MODULE_SRCVERSION_ALL is not set
91# CONFIG_KMOD is not set 87# CONFIG_KMOD is not set
92CONFIG_STOP_MACHINE=y 88CONFIG_STOP_MACHINE=y
93
94#
95# Block layer
96#
97CONFIG_BLOCK=y 89CONFIG_BLOCK=y
98CONFIG_LBD=y 90CONFIG_LBD=y
99# CONFIG_BLK_DEV_IO_TRACE is not set 91# CONFIG_BLK_DEV_IO_TRACE is not set
100# CONFIG_LSF is not set 92# CONFIG_LSF is not set
93# CONFIG_BLK_DEV_BSG is not set
101 94
102# 95#
103# IO Schedulers 96# IO Schedulers
@@ -201,6 +194,7 @@ CONFIG_X86_CPUID=y
201# CONFIG_EDD is not set 194# CONFIG_EDD is not set
202# CONFIG_DELL_RBU is not set 195# CONFIG_DELL_RBU is not set
203# CONFIG_DCDBAS is not set 196# CONFIG_DCDBAS is not set
197CONFIG_DMIID=y
204# CONFIG_NOHIGHMEM is not set 198# CONFIG_NOHIGHMEM is not set
205CONFIG_HIGHMEM4G=y 199CONFIG_HIGHMEM4G=y
206# CONFIG_HIGHMEM64G is not set 200# CONFIG_HIGHMEM64G is not set
@@ -217,7 +211,9 @@ CONFIG_FLAT_NODE_MEM_MAP=y
217CONFIG_SPLIT_PTLOCK_CPUS=4 211CONFIG_SPLIT_PTLOCK_CPUS=4
218CONFIG_RESOURCES_64BIT=y 212CONFIG_RESOURCES_64BIT=y
219CONFIG_ZONE_DMA_FLAG=1 213CONFIG_ZONE_DMA_FLAG=1
214CONFIG_BOUNCE=y
220CONFIG_NR_QUICK=1 215CONFIG_NR_QUICK=1
216CONFIG_VIRT_TO_BUS=y
221# CONFIG_HIGHPTE is not set 217# CONFIG_HIGHPTE is not set
222# CONFIG_MATH_EMULATION is not set 218# CONFIG_MATH_EMULATION is not set
223CONFIG_MTRR=y 219CONFIG_MTRR=y
@@ -244,7 +240,6 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
244CONFIG_PM=y 240CONFIG_PM=y
245CONFIG_PM_LEGACY=y 241CONFIG_PM_LEGACY=y
246# CONFIG_PM_DEBUG is not set 242# CONFIG_PM_DEBUG is not set
247# CONFIG_PM_SYSFS_DEPRECATED is not set
248 243
249# 244#
250# ACPI (Advanced Configuration and Power Interface) Support 245# ACPI (Advanced Configuration and Power Interface) Support
@@ -284,7 +279,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
284# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set 279# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
285CONFIG_CPU_FREQ_GOV_USERSPACE=y 280CONFIG_CPU_FREQ_GOV_USERSPACE=y
286CONFIG_CPU_FREQ_GOV_ONDEMAND=y 281CONFIG_CPU_FREQ_GOV_ONDEMAND=y
287# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set 282CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
288 283
289# 284#
290# CPUFreq processor drivers 285# CPUFreq processor drivers
@@ -325,7 +320,7 @@ CONFIG_PCI_MMCONFIG=y
325CONFIG_ARCH_SUPPORTS_MSI=y 320CONFIG_ARCH_SUPPORTS_MSI=y
326CONFIG_PCI_MSI=y 321CONFIG_PCI_MSI=y
327# CONFIG_PCI_DEBUG is not set 322# CONFIG_PCI_DEBUG is not set
328CONFIG_HT_IRQ=y 323# CONFIG_HT_IRQ is not set
329CONFIG_ISA_DMA_API=y 324CONFIG_ISA_DMA_API=y
330# CONFIG_ISA is not set 325# CONFIG_ISA is not set
331# CONFIG_MCA is not set 326# CONFIG_MCA is not set
@@ -381,7 +376,7 @@ CONFIG_IP_PNP_DHCP=y
381CONFIG_INET_TUNNEL=y 376CONFIG_INET_TUNNEL=y
382CONFIG_INET_XFRM_MODE_TRANSPORT=y 377CONFIG_INET_XFRM_MODE_TRANSPORT=y
383CONFIG_INET_XFRM_MODE_TUNNEL=y 378CONFIG_INET_XFRM_MODE_TUNNEL=y
384CONFIG_INET_XFRM_MODE_BEET=y 379# CONFIG_INET_XFRM_MODE_BEET is not set
385CONFIG_INET_DIAG=y 380CONFIG_INET_DIAG=y
386CONFIG_INET_TCP_DIAG=y 381CONFIG_INET_TCP_DIAG=y
387# CONFIG_TCP_CONG_ADVANCED is not set 382# CONFIG_TCP_CONG_ADVANCED is not set
@@ -400,27 +395,15 @@ CONFIG_IPV6=y
400# CONFIG_INET6_TUNNEL is not set 395# CONFIG_INET6_TUNNEL is not set
401CONFIG_INET6_XFRM_MODE_TRANSPORT=y 396CONFIG_INET6_XFRM_MODE_TRANSPORT=y
402CONFIG_INET6_XFRM_MODE_TUNNEL=y 397CONFIG_INET6_XFRM_MODE_TUNNEL=y
403CONFIG_INET6_XFRM_MODE_BEET=y 398# CONFIG_INET6_XFRM_MODE_BEET is not set
404# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 399# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
405CONFIG_IPV6_SIT=y 400CONFIG_IPV6_SIT=y
406# CONFIG_IPV6_TUNNEL is not set 401# CONFIG_IPV6_TUNNEL is not set
407# CONFIG_IPV6_MULTIPLE_TABLES is not set 402# CONFIG_IPV6_MULTIPLE_TABLES is not set
408# CONFIG_NETWORK_SECMARK is not set 403# CONFIG_NETWORK_SECMARK is not set
409# CONFIG_NETFILTER is not set 404# CONFIG_NETFILTER is not set
410
411#
412# DCCP Configuration (EXPERIMENTAL)
413#
414# CONFIG_IP_DCCP is not set 405# CONFIG_IP_DCCP is not set
415
416#
417# SCTP Configuration (EXPERIMENTAL)
418#
419# CONFIG_IP_SCTP is not set 406# CONFIG_IP_SCTP is not set
420
421#
422# TIPC Configuration (EXPERIMENTAL)
423#
424# CONFIG_TIPC is not set 407# CONFIG_TIPC is not set
425# CONFIG_ATM is not set 408# CONFIG_ATM is not set
426# CONFIG_BRIDGE is not set 409# CONFIG_BRIDGE is not set
@@ -457,6 +440,7 @@ CONFIG_IPV6_SIT=y
457# CONFIG_MAC80211 is not set 440# CONFIG_MAC80211 is not set
458# CONFIG_IEEE80211 is not set 441# CONFIG_IEEE80211 is not set
459# CONFIG_RFKILL is not set 442# CONFIG_RFKILL is not set
443# CONFIG_NET_9P is not set
460 444
461# 445#
462# Device Drivers 446# Device Drivers
@@ -471,21 +455,9 @@ CONFIG_FW_LOADER=y
471# CONFIG_DEBUG_DRIVER is not set 455# CONFIG_DEBUG_DRIVER is not set
472# CONFIG_DEBUG_DEVRES is not set 456# CONFIG_DEBUG_DEVRES is not set
473# CONFIG_SYS_HYPERVISOR is not set 457# CONFIG_SYS_HYPERVISOR is not set
474
475#
476# Connector - unified userspace <-> kernelspace linker
477#
478# CONFIG_CONNECTOR is not set 458# CONFIG_CONNECTOR is not set
479# CONFIG_MTD is not set 459# CONFIG_MTD is not set
480
481#
482# Parallel port support
483#
484# CONFIG_PARPORT is not set 460# CONFIG_PARPORT is not set
485
486#
487# Plug and Play support
488#
489CONFIG_PNP=y 461CONFIG_PNP=y
490# CONFIG_PNP_DEBUG is not set 462# CONFIG_PNP_DEBUG is not set
491 463
@@ -493,10 +465,7 @@ CONFIG_PNP=y
493# Protocols 465# Protocols
494# 466#
495CONFIG_PNPACPI=y 467CONFIG_PNPACPI=y
496 468CONFIG_BLK_DEV=y
497#
498# Block devices
499#
500CONFIG_BLK_DEV_FD=y 469CONFIG_BLK_DEV_FD=y
501# CONFIG_BLK_CPQ_DA is not set 470# CONFIG_BLK_CPQ_DA is not set
502# CONFIG_BLK_CPQ_CISS_DA is not set 471# CONFIG_BLK_CPQ_CISS_DA is not set
@@ -514,17 +483,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
514CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 483CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
515# CONFIG_CDROM_PKTCDVD is not set 484# CONFIG_CDROM_PKTCDVD is not set
516# CONFIG_ATA_OVER_ETH is not set 485# CONFIG_ATA_OVER_ETH is not set
517 486CONFIG_MISC_DEVICES=y
518#
519# Misc devices
520#
521# CONFIG_IBM_ASM is not set 487# CONFIG_IBM_ASM is not set
522# CONFIG_PHANTOM is not set 488# CONFIG_PHANTOM is not set
489# CONFIG_EEPROM_93CX6 is not set
523# CONFIG_SGI_IOC4 is not set 490# CONFIG_SGI_IOC4 is not set
524# CONFIG_TIFM_CORE is not set 491# CONFIG_TIFM_CORE is not set
525# CONFIG_SONY_LAPTOP is not set 492# CONFIG_SONY_LAPTOP is not set
526# CONFIG_THINKPAD_ACPI is not set 493# CONFIG_THINKPAD_ACPI is not set
527# CONFIG_BLINK is not set
528CONFIG_IDE=y 494CONFIG_IDE=y
529CONFIG_BLK_DEV_IDE=y 495CONFIG_BLK_DEV_IDE=y
530 496
@@ -596,6 +562,7 @@ CONFIG_BLK_DEV_IDEDMA=y
596# 562#
597# CONFIG_RAID_ATTRS is not set 563# CONFIG_RAID_ATTRS is not set
598CONFIG_SCSI=y 564CONFIG_SCSI=y
565CONFIG_SCSI_DMA=y
599# CONFIG_SCSI_TGT is not set 566# CONFIG_SCSI_TGT is not set
600CONFIG_SCSI_NETLINK=y 567CONFIG_SCSI_NETLINK=y
601# CONFIG_SCSI_PROC_FS is not set 568# CONFIG_SCSI_PROC_FS is not set
@@ -606,8 +573,9 @@ CONFIG_SCSI_NETLINK=y
606CONFIG_BLK_DEV_SD=y 573CONFIG_BLK_DEV_SD=y
607# CONFIG_CHR_DEV_ST is not set 574# CONFIG_CHR_DEV_ST is not set
608# CONFIG_CHR_DEV_OSST is not set 575# CONFIG_CHR_DEV_OSST is not set
609# CONFIG_BLK_DEV_SR is not set 576CONFIG_BLK_DEV_SR=y
610# CONFIG_CHR_DEV_SG is not set 577# CONFIG_BLK_DEV_SR_VENDOR is not set
578CONFIG_CHR_DEV_SG=y
611# CONFIG_CHR_DEV_SCH is not set 579# CONFIG_CHR_DEV_SCH is not set
612 580
613# 581#
@@ -667,6 +635,7 @@ CONFIG_AIC79XX_DEBUG_MASK=0
667# CONFIG_SCSI_INIA100 is not set 635# CONFIG_SCSI_INIA100 is not set
668# CONFIG_SCSI_STEX is not set 636# CONFIG_SCSI_STEX is not set
669# CONFIG_SCSI_SYM53C8XX_2 is not set 637# CONFIG_SCSI_SYM53C8XX_2 is not set
638# CONFIG_SCSI_IPR is not set
670# CONFIG_SCSI_QLOGIC_1280 is not set 639# CONFIG_SCSI_QLOGIC_1280 is not set
671# CONFIG_SCSI_QLA_FC is not set 640# CONFIG_SCSI_QLA_FC is not set
672# CONFIG_SCSI_QLA_ISCSI is not set 641# CONFIG_SCSI_QLA_ISCSI is not set
@@ -675,14 +644,73 @@ CONFIG_AIC79XX_DEBUG_MASK=0
675# CONFIG_SCSI_DC390T is not set 644# CONFIG_SCSI_DC390T is not set
676# CONFIG_SCSI_NSP32 is not set 645# CONFIG_SCSI_NSP32 is not set
677# CONFIG_SCSI_DEBUG is not set 646# CONFIG_SCSI_DEBUG is not set
678# CONFIG_SCSI_ESP_CORE is not set
679# CONFIG_SCSI_SRP is not set 647# CONFIG_SCSI_SRP is not set
680# CONFIG_ATA is not set 648CONFIG_ATA=y
681 649# CONFIG_ATA_NONSTANDARD is not set
682# 650CONFIG_ATA_ACPI=y
683# Multi-device support (RAID and LVM) 651CONFIG_SATA_AHCI=y
684# 652CONFIG_SATA_SVW=y
685# CONFIG_MD is not set 653CONFIG_ATA_PIIX=y
654# CONFIG_SATA_MV is not set
655CONFIG_SATA_NV=y
656# CONFIG_PDC_ADMA is not set
657# CONFIG_SATA_QSTOR is not set
658# CONFIG_SATA_PROMISE is not set
659# CONFIG_SATA_SX4 is not set
660CONFIG_SATA_SIL=y
661# CONFIG_SATA_SIL24 is not set
662# CONFIG_SATA_SIS is not set
663# CONFIG_SATA_ULI is not set
664CONFIG_SATA_VIA=y
665# CONFIG_SATA_VITESSE is not set
666# CONFIG_SATA_INIC162X is not set
667# CONFIG_PATA_ALI is not set
668# CONFIG_PATA_AMD is not set
669# CONFIG_PATA_ARTOP is not set
670# CONFIG_PATA_ATIIXP is not set
671# CONFIG_PATA_CMD640_PCI is not set
672# CONFIG_PATA_CMD64X is not set
673# CONFIG_PATA_CS5520 is not set
674# CONFIG_PATA_CS5530 is not set
675# CONFIG_PATA_CS5535 is not set
676# CONFIG_PATA_CYPRESS is not set
677# CONFIG_PATA_EFAR is not set
678# CONFIG_ATA_GENERIC is not set
679# CONFIG_PATA_HPT366 is not set
680# CONFIG_PATA_HPT37X is not set
681# CONFIG_PATA_HPT3X2N is not set
682# CONFIG_PATA_HPT3X3 is not set
683# CONFIG_PATA_IT821X is not set
684# CONFIG_PATA_IT8213 is not set
685# CONFIG_PATA_JMICRON is not set
686# CONFIG_PATA_TRIFLEX is not set
687# CONFIG_PATA_MARVELL is not set
688# CONFIG_PATA_MPIIX is not set
689# CONFIG_PATA_OLDPIIX is not set
690# CONFIG_PATA_NETCELL is not set
691# CONFIG_PATA_NS87410 is not set
692# CONFIG_PATA_OPTI is not set
693# CONFIG_PATA_OPTIDMA is not set
694# CONFIG_PATA_PDC_OLD is not set
695# CONFIG_PATA_RADISYS is not set
696# CONFIG_PATA_RZ1000 is not set
697# CONFIG_PATA_SC1200 is not set
698# CONFIG_PATA_SERVERWORKS is not set
699# CONFIG_PATA_PDC2027X is not set
700# CONFIG_PATA_SIL680 is not set
701# CONFIG_PATA_SIS is not set
702# CONFIG_PATA_VIA is not set
703# CONFIG_PATA_WINBOND is not set
704CONFIG_MD=y
705# CONFIG_BLK_DEV_MD is not set
706CONFIG_BLK_DEV_DM=y
707# CONFIG_DM_DEBUG is not set
708# CONFIG_DM_CRYPT is not set
709# CONFIG_DM_SNAPSHOT is not set
710# CONFIG_DM_MIRROR is not set
711# CONFIG_DM_ZERO is not set
712# CONFIG_DM_MULTIPATH is not set
713# CONFIG_DM_DELAY is not set
686 714
687# 715#
688# Fusion MPT device support 716# Fusion MPT device support
@@ -723,42 +751,27 @@ CONFIG_IEEE1394_OHCI1394=y
723# CONFIG_IEEE1394_ETH1394 is not set 751# CONFIG_IEEE1394_ETH1394 is not set
724# CONFIG_IEEE1394_DV1394 is not set 752# CONFIG_IEEE1394_DV1394 is not set
725CONFIG_IEEE1394_RAWIO=y 753CONFIG_IEEE1394_RAWIO=y
726
727#
728# I2O device support
729#
730# CONFIG_I2O is not set 754# CONFIG_I2O is not set
731# CONFIG_MACINTOSH_DRIVERS is not set 755CONFIG_MACINTOSH_DRIVERS=y
732 756# CONFIG_MAC_EMUMOUSEBTN is not set
733#
734# Network device support
735#
736CONFIG_NETDEVICES=y 757CONFIG_NETDEVICES=y
758CONFIG_NETDEVICES_MULTIQUEUE=y
737# CONFIG_DUMMY is not set 759# CONFIG_DUMMY is not set
738# CONFIG_BONDING is not set 760# CONFIG_BONDING is not set
761# CONFIG_MACVLAN is not set
739# CONFIG_EQUALIZER is not set 762# CONFIG_EQUALIZER is not set
740# CONFIG_TUN is not set 763# CONFIG_TUN is not set
741# CONFIG_NET_SB1000 is not set 764# CONFIG_NET_SB1000 is not set
742
743#
744# ARCnet devices
745#
746# CONFIG_ARCNET is not set 765# CONFIG_ARCNET is not set
747# CONFIG_PHYLIB is not set 766# CONFIG_PHYLIB is not set
748
749#
750# Ethernet (10 or 100Mbit)
751#
752CONFIG_NET_ETHERNET=y 767CONFIG_NET_ETHERNET=y
753CONFIG_MII=y 768CONFIG_MII=y
754# CONFIG_HAPPYMEAL is not set 769# CONFIG_HAPPYMEAL is not set
755# CONFIG_SUNGEM is not set 770# CONFIG_SUNGEM is not set
756# CONFIG_CASSINI is not set 771# CONFIG_CASSINI is not set
757# CONFIG_NET_VENDOR_3COM is not set 772CONFIG_NET_VENDOR_3COM=y
758 773CONFIG_VORTEX=y
759# 774# CONFIG_TYPHOON is not set
760# Tulip family network device support
761#
762CONFIG_NET_TULIP=y 775CONFIG_NET_TULIP=y
763# CONFIG_DE2104X is not set 776# CONFIG_DE2104X is not set
764CONFIG_TULIP=y 777CONFIG_TULIP=y
@@ -809,7 +822,6 @@ CONFIG_R8169=y
809# CONFIG_SIS190 is not set 822# CONFIG_SIS190 is not set
810# CONFIG_SKGE is not set 823# CONFIG_SKGE is not set
811CONFIG_SKY2=y 824CONFIG_SKY2=y
812# CONFIG_SK98LIN is not set
813# CONFIG_VIA_VELOCITY is not set 825# CONFIG_VIA_VELOCITY is not set
814CONFIG_TIGON3=y 826CONFIG_TIGON3=y
815CONFIG_BNX2=y 827CONFIG_BNX2=y
@@ -823,10 +835,6 @@ CONFIG_NETDEV_10000=y
823# CONFIG_MYRI10GE is not set 835# CONFIG_MYRI10GE is not set
824# CONFIG_NETXEN_NIC is not set 836# CONFIG_NETXEN_NIC is not set
825# CONFIG_MLX4_CORE is not set 837# CONFIG_MLX4_CORE is not set
826
827#
828# Token Ring devices
829#
830# CONFIG_TR is not set 838# CONFIG_TR is not set
831 839
832# 840#
@@ -855,15 +863,7 @@ CONFIG_NETCONSOLE=y
855CONFIG_NETPOLL=y 863CONFIG_NETPOLL=y
856# CONFIG_NETPOLL_TRAP is not set 864# CONFIG_NETPOLL_TRAP is not set
857CONFIG_NET_POLL_CONTROLLER=y 865CONFIG_NET_POLL_CONTROLLER=y
858
859#
860# ISDN subsystem
861#
862# CONFIG_ISDN is not set 866# CONFIG_ISDN is not set
863
864#
865# Telephony Support
866#
867# CONFIG_PHONE is not set 867# CONFIG_PHONE is not set
868 868
869# 869#
@@ -871,6 +871,7 @@ CONFIG_NET_POLL_CONTROLLER=y
871# 871#
872CONFIG_INPUT=y 872CONFIG_INPUT=y
873# CONFIG_INPUT_FF_MEMLESS is not set 873# CONFIG_INPUT_FF_MEMLESS is not set
874# CONFIG_INPUT_POLLDEV is not set
874 875
875# 876#
876# Userland interfaces 877# Userland interfaces
@@ -936,6 +937,7 @@ CONFIG_HW_CONSOLE=y
936# 937#
937CONFIG_SERIAL_8250=y 938CONFIG_SERIAL_8250=y
938CONFIG_SERIAL_8250_CONSOLE=y 939CONFIG_SERIAL_8250_CONSOLE=y
940CONFIG_FIX_EARLYCON_MEM=y
939CONFIG_SERIAL_8250_PCI=y 941CONFIG_SERIAL_8250_PCI=y
940CONFIG_SERIAL_8250_PNP=y 942CONFIG_SERIAL_8250_PNP=y
941CONFIG_SERIAL_8250_NR_UARTS=4 943CONFIG_SERIAL_8250_NR_UARTS=4
@@ -951,10 +953,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
951CONFIG_UNIX98_PTYS=y 953CONFIG_UNIX98_PTYS=y
952CONFIG_LEGACY_PTYS=y 954CONFIG_LEGACY_PTYS=y
953CONFIG_LEGACY_PTY_COUNT=256 955CONFIG_LEGACY_PTY_COUNT=256
954
955#
956# IPMI
957#
958# CONFIG_IPMI_HANDLER is not set 956# CONFIG_IPMI_HANDLER is not set
959# CONFIG_WATCHDOG is not set 957# CONFIG_WATCHDOG is not set
960CONFIG_HW_RANDOM=y 958CONFIG_HW_RANDOM=y
@@ -988,11 +986,7 @@ CONFIG_MAX_RAW_DEVS=256
988CONFIG_HPET=y 986CONFIG_HPET=y
989# CONFIG_HPET_RTC_IRQ is not set 987# CONFIG_HPET_RTC_IRQ is not set
990CONFIG_HPET_MMAP=y 988CONFIG_HPET_MMAP=y
991CONFIG_HANGCHECK_TIMER=y 989# CONFIG_HANGCHECK_TIMER is not set
992
993#
994# TPM devices
995#
996# CONFIG_TCG_TPM is not set 990# CONFIG_TCG_TPM is not set
997# CONFIG_TELCLOCK is not set 991# CONFIG_TELCLOCK is not set
998CONFIG_DEVPORT=y 992CONFIG_DEVPORT=y
@@ -1003,11 +997,8 @@ CONFIG_DEVPORT=y
1003# 997#
1004# CONFIG_SPI is not set 998# CONFIG_SPI is not set
1005# CONFIG_SPI_MASTER is not set 999# CONFIG_SPI_MASTER is not set
1006
1007#
1008# Dallas's 1-wire bus
1009#
1010# CONFIG_W1 is not set 1000# CONFIG_W1 is not set
1001# CONFIG_POWER_SUPPLY is not set
1011# CONFIG_HWMON is not set 1002# CONFIG_HWMON is not set
1012 1003
1013# 1004#
@@ -1041,7 +1032,7 @@ CONFIG_DAB=y
1041CONFIG_VGA_CONSOLE=y 1032CONFIG_VGA_CONSOLE=y
1042CONFIG_VGACON_SOFT_SCROLLBACK=y 1033CONFIG_VGACON_SOFT_SCROLLBACK=y
1043CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128 1034CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=128
1044# CONFIG_VIDEO_SELECT is not set 1035CONFIG_VIDEO_SELECT=y
1045CONFIG_DUMMY_CONSOLE=y 1036CONFIG_DUMMY_CONSOLE=y
1046 1037
1047# 1038#
@@ -1058,15 +1049,11 @@ CONFIG_SOUND=y
1058# Open Sound System 1049# Open Sound System
1059# 1050#
1060CONFIG_SOUND_PRIME=y 1051CONFIG_SOUND_PRIME=y
1061# CONFIG_OSS_OBSOLETE is not set
1062# CONFIG_SOUND_TRIDENT is not set 1052# CONFIG_SOUND_TRIDENT is not set
1063# CONFIG_SOUND_MSNDCLAS is not set 1053# CONFIG_SOUND_MSNDCLAS is not set
1064# CONFIG_SOUND_MSNDPIN is not set 1054# CONFIG_SOUND_MSNDPIN is not set
1065# CONFIG_SOUND_OSS is not set 1055# CONFIG_SOUND_OSS is not set
1066 1056CONFIG_HID_SUPPORT=y
1067#
1068# HID Devices
1069#
1070CONFIG_HID=y 1057CONFIG_HID=y
1071# CONFIG_HID_DEBUG is not set 1058# CONFIG_HID_DEBUG is not set
1072 1059
@@ -1077,10 +1064,7 @@ CONFIG_USB_HID=y
1077# CONFIG_USB_HIDINPUT_POWERBOOK is not set 1064# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1078# CONFIG_HID_FF is not set 1065# CONFIG_HID_FF is not set
1079# CONFIG_USB_HIDDEV is not set 1066# CONFIG_USB_HIDDEV is not set
1080 1067CONFIG_USB_SUPPORT=y
1081#
1082# USB support
1083#
1084CONFIG_USB_ARCH_HAS_HCD=y 1068CONFIG_USB_ARCH_HAS_HCD=y
1085CONFIG_USB_ARCH_HAS_OHCI=y 1069CONFIG_USB_ARCH_HAS_OHCI=y
1086CONFIG_USB_ARCH_HAS_EHCI=y 1070CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1094,6 +1078,7 @@ CONFIG_USB_DEVICEFS=y
1094# CONFIG_USB_DEVICE_CLASS is not set 1078# CONFIG_USB_DEVICE_CLASS is not set
1095# CONFIG_USB_DYNAMIC_MINORS is not set 1079# CONFIG_USB_DYNAMIC_MINORS is not set
1096# CONFIG_USB_SUSPEND is not set 1080# CONFIG_USB_SUSPEND is not set
1081# CONFIG_USB_PERSIST is not set
1097# CONFIG_USB_OTG is not set 1082# CONFIG_USB_OTG is not set
1098 1083
1099# 1084#
@@ -1103,7 +1088,6 @@ CONFIG_USB_EHCI_HCD=y
1103# CONFIG_USB_EHCI_SPLIT_ISO is not set 1088# CONFIG_USB_EHCI_SPLIT_ISO is not set
1104# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1089# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1105# CONFIG_USB_EHCI_TT_NEWSCHED is not set 1090# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1106# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
1107# CONFIG_USB_ISP116X_HCD is not set 1091# CONFIG_USB_ISP116X_HCD is not set
1108CONFIG_USB_OHCI_HCD=y 1092CONFIG_USB_OHCI_HCD=y
1109# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1093# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1111,6 +1095,7 @@ CONFIG_USB_OHCI_HCD=y
1111CONFIG_USB_OHCI_LITTLE_ENDIAN=y 1095CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1112CONFIG_USB_UHCI_HCD=y 1096CONFIG_USB_UHCI_HCD=y
1113# CONFIG_USB_SL811_HCD is not set 1097# CONFIG_USB_SL811_HCD is not set
1098# CONFIG_USB_R8A66597_HCD is not set
1114 1099
1115# 1100#
1116# USB Device Class drivers 1101# USB Device Class drivers
@@ -1201,15 +1186,7 @@ CONFIG_USB_MON=y
1201# 1186#
1202# LED Triggers 1187# LED Triggers
1203# 1188#
1204
1205#
1206# InfiniBand support
1207#
1208# CONFIG_INFINIBAND is not set 1189# CONFIG_INFINIBAND is not set
1209
1210#
1211# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1212#
1213# CONFIG_EDAC is not set 1190# CONFIG_EDAC is not set
1214 1191
1215# 1192#
@@ -1229,11 +1206,13 @@ CONFIG_USB_MON=y
1229# 1206#
1230# DMA Devices 1207# DMA Devices
1231# 1208#
1209CONFIG_VIRTUALIZATION=y
1210# CONFIG_KVM is not set
1232 1211
1233# 1212#
1234# Virtualization 1213# Userspace I/O
1235# 1214#
1236# CONFIG_KVM is not set 1215# CONFIG_UIO is not set
1237 1216
1238# 1217#
1239# File systems 1218# File systems
@@ -1271,6 +1250,7 @@ CONFIG_DNOTIFY=y
1271# CONFIG_AUTOFS_FS is not set 1250# CONFIG_AUTOFS_FS is not set
1272CONFIG_AUTOFS4_FS=y 1251CONFIG_AUTOFS4_FS=y
1273# CONFIG_FUSE_FS is not set 1252# CONFIG_FUSE_FS is not set
1253CONFIG_GENERIC_ACL=y
1274 1254
1275# 1255#
1276# CD-ROM/DVD Filesystems 1256# CD-ROM/DVD Filesystems
@@ -1298,7 +1278,7 @@ CONFIG_PROC_KCORE=y
1298CONFIG_PROC_SYSCTL=y 1278CONFIG_PROC_SYSCTL=y
1299CONFIG_SYSFS=y 1279CONFIG_SYSFS=y
1300CONFIG_TMPFS=y 1280CONFIG_TMPFS=y
1301# CONFIG_TMPFS_POSIX_ACL is not set 1281CONFIG_TMPFS_POSIX_ACL=y
1302CONFIG_HUGETLBFS=y 1282CONFIG_HUGETLBFS=y
1303CONFIG_HUGETLB_PAGE=y 1283CONFIG_HUGETLB_PAGE=y
1304CONFIG_RAMFS=y 1284CONFIG_RAMFS=y
@@ -1348,7 +1328,6 @@ CONFIG_SUNRPC=y
1348# CONFIG_NCP_FS is not set 1328# CONFIG_NCP_FS is not set
1349# CONFIG_CODA_FS is not set 1329# CONFIG_CODA_FS is not set
1350# CONFIG_AFS_FS is not set 1330# CONFIG_AFS_FS is not set
1351# CONFIG_9P_FS is not set
1352 1331
1353# 1332#
1354# Partition Types 1333# Partition Types
@@ -1404,10 +1383,7 @@ CONFIG_NLS_UTF8=y
1404# Distributed Lock Manager 1383# Distributed Lock Manager
1405# 1384#
1406# CONFIG_DLM is not set 1385# CONFIG_DLM is not set
1407 1386CONFIG_INSTRUMENTATION=y
1408#
1409# Instrumentation Support
1410#
1411CONFIG_PROFILING=y 1387CONFIG_PROFILING=y
1412CONFIG_OPROFILE=y 1388CONFIG_OPROFILE=y
1413CONFIG_KPROBES=y 1389CONFIG_KPROBES=y
@@ -1417,7 +1393,7 @@ CONFIG_KPROBES=y
1417# 1393#
1418CONFIG_TRACE_IRQFLAGS_SUPPORT=y 1394CONFIG_TRACE_IRQFLAGS_SUPPORT=y
1419# CONFIG_PRINTK_TIME is not set 1395# CONFIG_PRINTK_TIME is not set
1420CONFIG_ENABLE_MUST_CHECK=y 1396# CONFIG_ENABLE_MUST_CHECK is not set
1421CONFIG_MAGIC_SYSRQ=y 1397CONFIG_MAGIC_SYSRQ=y
1422CONFIG_UNUSED_SYMBOLS=y 1398CONFIG_UNUSED_SYMBOLS=y
1423# CONFIG_DEBUG_FS is not set 1399# CONFIG_DEBUG_FS is not set
@@ -1425,15 +1401,17 @@ CONFIG_UNUSED_SYMBOLS=y
1425CONFIG_DEBUG_KERNEL=y 1401CONFIG_DEBUG_KERNEL=y
1426# CONFIG_DEBUG_SHIRQ is not set 1402# CONFIG_DEBUG_SHIRQ is not set
1427CONFIG_DETECT_SOFTLOCKUP=y 1403CONFIG_DETECT_SOFTLOCKUP=y
1404# CONFIG_SCHED_DEBUG is not set
1428# CONFIG_SCHEDSTATS is not set 1405# CONFIG_SCHEDSTATS is not set
1429# CONFIG_TIMER_STATS is not set 1406CONFIG_TIMER_STATS=y
1430# CONFIG_DEBUG_SLAB is not set 1407# CONFIG_SLUB_DEBUG_ON is not set
1431# CONFIG_DEBUG_RT_MUTEXES is not set 1408# CONFIG_DEBUG_RT_MUTEXES is not set
1432# CONFIG_RT_MUTEX_TESTER is not set 1409# CONFIG_RT_MUTEX_TESTER is not set
1433# CONFIG_DEBUG_SPINLOCK is not set 1410# CONFIG_DEBUG_SPINLOCK is not set
1434# CONFIG_DEBUG_MUTEXES is not set 1411# CONFIG_DEBUG_MUTEXES is not set
1435# CONFIG_DEBUG_LOCK_ALLOC is not set 1412# CONFIG_DEBUG_LOCK_ALLOC is not set
1436# CONFIG_PROVE_LOCKING is not set 1413# CONFIG_PROVE_LOCKING is not set
1414# CONFIG_LOCK_STAT is not set
1437# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1415# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1438# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1416# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1439# CONFIG_DEBUG_KOBJECT is not set 1417# CONFIG_DEBUG_KOBJECT is not set
@@ -1443,7 +1421,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
1443# CONFIG_DEBUG_VM is not set 1421# CONFIG_DEBUG_VM is not set
1444# CONFIG_DEBUG_LIST is not set 1422# CONFIG_DEBUG_LIST is not set
1445# CONFIG_FRAME_POINTER is not set 1423# CONFIG_FRAME_POINTER is not set
1446# CONFIG_UNWIND_INFO is not set
1447# CONFIG_FORCED_INLINING is not set 1424# CONFIG_FORCED_INLINING is not set
1448# CONFIG_RCU_TORTURE_TEST is not set 1425# CONFIG_RCU_TORTURE_TEST is not set
1449# CONFIG_LKDTM is not set 1426# CONFIG_LKDTM is not set
@@ -1462,10 +1439,6 @@ CONFIG_DOUBLEFAULT=y
1462# 1439#
1463# CONFIG_KEYS is not set 1440# CONFIG_KEYS is not set
1464# CONFIG_SECURITY is not set 1441# CONFIG_SECURITY is not set
1465
1466#
1467# Cryptographic options
1468#
1469# CONFIG_CRYPTO is not set 1442# CONFIG_CRYPTO is not set
1470 1443
1471# 1444#
@@ -1476,6 +1449,7 @@ CONFIG_BITREVERSE=y
1476# CONFIG_CRC16 is not set 1449# CONFIG_CRC16 is not set
1477# CONFIG_CRC_ITU_T is not set 1450# CONFIG_CRC_ITU_T is not set
1478CONFIG_CRC32=y 1451CONFIG_CRC32=y
1452# CONFIG_CRC7 is not set
1479# CONFIG_LIBCRC32C is not set 1453# CONFIG_LIBCRC32C is not set
1480CONFIG_ZLIB_INFLATE=y 1454CONFIG_ZLIB_INFLATE=y
1481CONFIG_PLIST=y 1455CONFIG_PLIST=y
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 06da59f6f837..dbe5e87e0d66 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_VM86) += vm86.o
40obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 40obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
41obj-$(CONFIG_HPET_TIMER) += hpet.o 41obj-$(CONFIG_HPET_TIMER) += hpet.o
42obj-$(CONFIG_K8_NB) += k8.o 42obj-$(CONFIG_K8_NB) += k8.o
43obj-$(CONFIG_MGEODE_LX) += geode.o
43 44
44obj-$(CONFIG_VMI) += vmi.o vmiclock.o 45obj-$(CONFIG_VMI) += vmi.o vmiclock.o
45obj-$(CONFIG_PARAVIRT) += paravirt.o 46obj-$(CONFIG_PARAVIRT) += paravirt.o
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index a574cd2c8b61..b87cedeaf59b 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -618,6 +618,8 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
618#ifdef CONFIG_HPET_TIMER 618#ifdef CONFIG_HPET_TIMER
619#include <asm/hpet.h> 619#include <asm/hpet.h>
620 620
621static struct __initdata resource *hpet_res;
622
621static int __init acpi_parse_hpet(struct acpi_table_header *table) 623static int __init acpi_parse_hpet(struct acpi_table_header *table)
622{ 624{
623 struct acpi_table_hpet *hpet_tbl; 625 struct acpi_table_hpet *hpet_tbl;
@@ -638,8 +640,42 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
638 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 640 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
639 hpet_tbl->id, hpet_address); 641 hpet_tbl->id, hpet_address);
640 642
643 /*
644 * Allocate and initialize the HPET firmware resource for adding into
645 * the resource tree during the lateinit timeframe.
646 */
647#define HPET_RESOURCE_NAME_SIZE 9
648 hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
649
650 if (!hpet_res)
651 return 0;
652
653 memset(hpet_res, 0, sizeof(*hpet_res));
654 hpet_res->name = (void *)&hpet_res[1];
655 hpet_res->flags = IORESOURCE_MEM;
656 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
657 hpet_tbl->sequence);
658
659 hpet_res->start = hpet_address;
660 hpet_res->end = hpet_address + (1 * 1024) - 1;
661
641 return 0; 662 return 0;
642} 663}
664
665/*
666 * hpet_insert_resource inserts the HPET resources used into the resource
667 * tree.
668 */
669static __init int hpet_insert_resource(void)
670{
671 if (!hpet_res)
672 return 1;
673
674 return insert_resource(&iomem_resource, hpet_res);
675}
676
677late_initcall(hpet_insert_resource);
678
643#else 679#else
644#define acpi_parse_hpet NULL 680#define acpi_parse_hpet NULL
645#endif 681#endif
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index d8cda14fff8b..0695be538de5 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -5,9 +5,8 @@
5#include <asm/alternative.h> 5#include <asm/alternative.h>
6#include <asm/sections.h> 6#include <asm/sections.h>
7 7
8static int noreplace_smp = 0; 8#ifdef CONFIG_HOTPLUG_CPU
9static int smp_alt_once = 0; 9static int smp_alt_once;
10static int debug_alternative = 0;
11 10
12static int __init bootonly(char *str) 11static int __init bootonly(char *str)
13{ 12{
@@ -15,6 +14,11 @@ static int __init bootonly(char *str)
15 return 1; 14 return 1;
16} 15}
17__setup("smp-alt-boot", bootonly); 16__setup("smp-alt-boot", bootonly);
17#else
18#define smp_alt_once 1
19#endif
20
21static int debug_alternative;
18 22
19static int __init debug_alt(char *str) 23static int __init debug_alt(char *str)
20{ 24{
@@ -23,6 +27,8 @@ static int __init debug_alt(char *str)
23} 27}
24__setup("debug-alternative", debug_alt); 28__setup("debug-alternative", debug_alt);
25 29
30static int noreplace_smp;
31
26static int __init setup_noreplace_smp(char *str) 32static int __init setup_noreplace_smp(char *str)
27{ 33{
28 noreplace_smp = 1; 34 noreplace_smp = 1;
@@ -376,8 +382,6 @@ void __init alternative_instructions(void)
376#ifdef CONFIG_HOTPLUG_CPU 382#ifdef CONFIG_HOTPLUG_CPU
377 if (num_possible_cpus() < 2) 383 if (num_possible_cpus() < 2)
378 smp_alt_once = 1; 384 smp_alt_once = 1;
379#else
380 smp_alt_once = 1;
381#endif 385#endif
382 386
383#ifdef CONFIG_SMP 387#ifdef CONFIG_SMP
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 67824f3bb974..bfc6cb7df7e7 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -263,6 +263,9 @@ static void lapic_timer_setup(enum clock_event_mode mode,
263 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); 263 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
264 apic_write_around(APIC_LVTT, v); 264 apic_write_around(APIC_LVTT, v);
265 break; 265 break;
266 case CLOCK_EVT_MODE_RESUME:
267 /* Nothing to do here */
268 break;
266 } 269 }
267 270
268 local_irq_restore(flags); 271 local_irq_restore(flags);
@@ -315,7 +318,7 @@ static void __devinit setup_APIC_timer(void)
315 318
316#define LAPIC_CAL_LOOPS (HZ/10) 319#define LAPIC_CAL_LOOPS (HZ/10)
317 320
318static __initdata volatile int lapic_cal_loops = -1; 321static __initdata int lapic_cal_loops = -1;
319static __initdata long lapic_cal_t1, lapic_cal_t2; 322static __initdata long lapic_cal_t1, lapic_cal_t2;
320static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2; 323static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
321static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; 324static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
@@ -485,7 +488,7 @@ void __init setup_boot_APIC_clock(void)
485 /* Let the interrupts run */ 488 /* Let the interrupts run */
486 local_irq_enable(); 489 local_irq_enable();
487 490
488 while(lapic_cal_loops <= LAPIC_CAL_LOOPS) 491 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
489 cpu_relax(); 492 cpu_relax();
490 493
491 local_irq_disable(); 494 local_irq_disable();
@@ -521,6 +524,9 @@ void __init setup_boot_APIC_clock(void)
521 */ 524 */
522 if (nmi_watchdog != NMI_IO_APIC) 525 if (nmi_watchdog != NMI_IO_APIC)
523 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; 526 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
527 else
528 printk(KERN_WARNING "APIC timer registered as dummy,"
529 " due to nmi_watchdog=1!\n");
524 } 530 }
525 531
526 /* Setup the lapic or request the broadcast */ 532 /* Setup the lapic or request the broadcast */
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
index 0b6a8551e9e2..778396c78d65 100644
--- a/arch/i386/kernel/cpu/Makefile
+++ b/arch/i386/kernel/cpu/Makefile
@@ -9,7 +9,6 @@ obj-y += cyrix.o
9obj-y += centaur.o 9obj-y += centaur.o
10obj-y += transmeta.o 10obj-y += transmeta.o
11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o 11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o
12obj-y += rise.o
13obj-y += nexgen.o 12obj-y += nexgen.o
14obj-y += umc.o 13obj-y += umc.o
15 14
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 6f47eeeb93ea..815a5f0aa474 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -272,8 +272,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
272 } 272 }
273#endif 273#endif
274 274
275 if (cpuid_eax(0x80000000) >= 0x80000006) 275 if (cpuid_eax(0x80000000) >= 0x80000006) {
276 num_cache_leaves = 3; 276 if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
277 num_cache_leaves = 4;
278 else
279 num_cache_leaves = 3;
280 }
277 281
278 if (amd_apic_timer_broken()) 282 if (amd_apic_timer_broken())
279 set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); 283 set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index e5419a9dec88..d506201d397c 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -606,7 +606,6 @@ extern int nsc_init_cpu(void);
606extern int amd_init_cpu(void); 606extern int amd_init_cpu(void);
607extern int centaur_init_cpu(void); 607extern int centaur_init_cpu(void);
608extern int transmeta_init_cpu(void); 608extern int transmeta_init_cpu(void);
609extern int rise_init_cpu(void);
610extern int nexgen_init_cpu(void); 609extern int nexgen_init_cpu(void);
611extern int umc_init_cpu(void); 610extern int umc_init_cpu(void);
612 611
@@ -618,7 +617,6 @@ void __init early_cpu_init(void)
618 amd_init_cpu(); 617 amd_init_cpu();
619 centaur_init_cpu(); 618 centaur_init_cpu();
620 transmeta_init_cpu(); 619 transmeta_init_cpu();
621 rise_init_cpu();
622 nexgen_init_cpu(); 620 nexgen_init_cpu();
623 umc_init_cpu(); 621 umc_init_cpu();
624 early_cpu_detect(); 622 early_cpu_detect();
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index e5be819492ef..d5a456d27d82 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,7 +4,7 @@
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen : CPUID4 emulation on AMD. 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */ 8 */
9 9
10#include <linux/init.h> 10#include <linux/init.h>
@@ -135,7 +135,7 @@ unsigned short num_cache_leaves;
135 135
136/* AMD doesn't have CPUID4. Emulate it here to report the same 136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine: 137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs. 138 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139 139
140 In theory the TLBs could be reported as fake type (they are in "dummy"). 140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */ 141 Maybe later */
@@ -159,13 +159,26 @@ union l2_cache {
159 unsigned val; 159 unsigned val;
160}; 160};
161 161
162union l3_cache {
163 struct {
164 unsigned line_size : 8;
165 unsigned lines_per_tag : 4;
166 unsigned assoc : 4;
167 unsigned res : 2;
168 unsigned size_encoded : 14;
169 };
170 unsigned val;
171};
172
162static const unsigned short assocs[] = { 173static const unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16, 175 [8] = 16, [0xa] = 32, [0xb] = 48,
176 [0xc] = 64,
165 [0xf] = 0xffff // ?? 177 [0xf] = 0xffff // ??
166 }; 178};
167static const unsigned char levels[] = { 1, 1, 2 }; 179
168static const unsigned char types[] = { 1, 2, 3 }; 180static const unsigned char levels[] = { 1, 1, 2, 3 };
181static const unsigned char types[] = { 1, 2, 3, 3 };
169 182
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 183static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx, 184 union _cpuid4_leaf_ebx *ebx,
@@ -175,37 +188,58 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
175 unsigned line_size, lines_per_tag, assoc, size_in_kb; 188 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d; 189 union l1_cache l1i, l1d;
177 union l2_cache l2; 190 union l2_cache l2;
191 union l3_cache l3;
192 union l1_cache *l1 = &l1d;
178 193
179 eax->full = 0; 194 eax->full = 0;
180 ebx->full = 0; 195 ebx->full = 0;
181 ecx->full = 0; 196 ecx->full = 0;
182 197
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); 198 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy); 199 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
185 200
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val) 201 switch (leaf) {
187 return; 202 case 1:
188 203 l1 = &l1i;
189 eax->split.is_self_initializing = 1; 204 case 0:
190 eax->split.type = types[leaf]; 205 if (!l1->val)
191 eax->split.level = levels[leaf]; 206 return;
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc; 207 assoc = l1->assoc;
198 line_size = l1->line_size; 208 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag; 209 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb; 210 size_in_kb = l1->size_in_kb;
201 } else { 211 break;
212 case 2:
213 if (!l2.val)
214 return;
202 assoc = l2.assoc; 215 assoc = l2.assoc;
203 line_size = l2.line_size; 216 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag; 217 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */ 218 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size; 219 size_in_kb = current_cpu_data.x86_cache_size;
220 break;
221 case 3:
222 if (!l3.val)
223 return;
224 assoc = l3.assoc;
225 line_size = l3.line_size;
226 lines_per_tag = l3.lines_per_tag;
227 size_in_kb = l3.size_encoded * 512;
228 break;
229 default:
230 return;
207 } 231 }
208 232
233 eax->split.is_self_initializing = 1;
234 eax->split.type = types[leaf];
235 eax->split.level = levels[leaf];
236 if (leaf == 3)
237 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
238 else
239 eax->split.num_threads_sharing = 0;
240 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
241
242
209 if (assoc == 0xf) 243 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1; 244 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1; 245 ebx->split.coherency_line_size = line_size - 1;
@@ -239,8 +273,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
239 return 0; 273 return 0;
240} 274}
241 275
242/* will only be called once; __init is safe here */ 276static int __cpuinit find_num_cache_leaves(void)
243static int __init find_num_cache_leaves(void)
244{ 277{
245 unsigned int eax, ebx, ecx, edx; 278 unsigned int eax, ebx, ecx, edx;
246 union _cpuid4_leaf_eax cache_eax; 279 union _cpuid4_leaf_eax cache_eax;
@@ -710,7 +743,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
710 return retval; 743 return retval;
711} 744}
712 745
713static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) 746static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
714{ 747{
715 unsigned int cpu = sys_dev->id; 748 unsigned int cpu = sys_dev->id;
716 unsigned long i; 749 unsigned long i;
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 6b5d3518a1c0..bf39409b3838 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -57,7 +57,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
57static void mce_work_fn(struct work_struct *work) 57static void mce_work_fn(struct work_struct *work)
58{ 58{
59 on_each_cpu(mce_checkregs, NULL, 1, 1); 59 on_each_cpu(mce_checkregs, NULL, 1, 1);
60 schedule_delayed_work(&mce_work, MCE_RATE); 60 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
61} 61}
62 62
63static int __init init_nonfatal_mce_checker(void) 63static int __init init_nonfatal_mce_checker(void)
@@ -82,7 +82,7 @@ static int __init init_nonfatal_mce_checker(void)
82 /* 82 /*
83 * Check for non-fatal errors every MCE_RATE s 83 * Check for non-fatal errors every MCE_RATE s
84 */ 84 */
85 schedule_delayed_work(&mce_work, MCE_RATE); 85 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
86 printk(KERN_INFO "Machine check exception polling timer started.\n"); 86 printk(KERN_INFO "Machine check exception polling timer started.\n");
87 return 0; 87 return 0;
88} 88}
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index f6e46943e6ef..56f64e34829f 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -79,7 +79,7 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
79} 79}
80 80
81/* Grab all of the MTRR state for this CPU into *state */ 81/* Grab all of the MTRR state for this CPU into *state */
82void get_mtrr_state(void) 82void __init get_mtrr_state(void)
83{ 83{
84 unsigned int i; 84 unsigned int i;
85 struct mtrr_var_range *vrs; 85 struct mtrr_var_range *vrs;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 75dc6d5214bc..c48b6fea5ab4 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -643,7 +643,7 @@ static struct sysdev_driver mtrr_sysdev_driver = {
643 * initialized (i.e. before smp_init()). 643 * initialized (i.e. before smp_init()).
644 * 644 *
645 */ 645 */
646__init void mtrr_bp_init(void) 646void __init mtrr_bp_init(void)
647{ 647{
648 init_ifs(); 648 init_ifs();
649 649
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
index 4d26d514c56f..30b5e48aa76b 100644
--- a/arch/i386/kernel/cpu/perfctr-watchdog.c
+++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
@@ -599,8 +599,8 @@ static struct wd_ops intel_arch_wd_ops = {
599 .setup = setup_intel_arch_watchdog, 599 .setup = setup_intel_arch_watchdog,
600 .rearm = p6_rearm, 600 .rearm = p6_rearm,
601 .stop = single_msr_stop_watchdog, 601 .stop = single_msr_stop_watchdog,
602 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 602 .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
603 .evntsel = MSR_ARCH_PERFMON_EVENTSEL0, 603 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
604}; 604};
605 605
606static void probe_nmi_watchdog(void) 606static void probe_nmi_watchdog(void)
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
deleted file mode 100644
index 50076f22e90f..000000000000
--- a/arch/i386/kernel/cpu/rise.c
+++ /dev/null
@@ -1,52 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/bitops.h>
4#include <asm/processor.h>
5
6#include "cpu.h"
7
8static void __cpuinit init_rise(struct cpuinfo_x86 *c)
9{
10 printk("CPU: Rise iDragon");
11 if (c->x86_model > 2)
12 printk(" II");
13 printk("\n");
14
15 /* Unhide possibly hidden capability flags
16 The mp6 iDragon family don't have MSRs.
17 We switch on extra features with this cpuid weirdness: */
18 __asm__ (
19 "movl $0x6363452a, %%eax\n\t"
20 "movl $0x3231206c, %%ecx\n\t"
21 "movl $0x2a32313a, %%edx\n\t"
22 "cpuid\n\t"
23 "movl $0x63634523, %%eax\n\t"
24 "movl $0x32315f6c, %%ecx\n\t"
25 "movl $0x2333313a, %%edx\n\t"
26 "cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
27 );
28 set_bit(X86_FEATURE_CX8, c->x86_capability);
29}
30
31static struct cpu_dev rise_cpu_dev __cpuinitdata = {
32 .c_vendor = "Rise",
33 .c_ident = { "RiseRiseRise" },
34 .c_models = {
35 { .vendor = X86_VENDOR_RISE, .family = 5, .model_names =
36 {
37 [0] = "iDragon",
38 [2] = "iDragon",
39 [8] = "iDragon II",
40 [9] = "iDragon II"
41 }
42 },
43 },
44 .c_init = init_rise,
45};
46
47int __init rise_init_cpu(void)
48{
49 cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
50 return 0;
51}
52
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
index fc822a46897a..e60cddbc4cfb 100644
--- a/arch/i386/kernel/e820.c
+++ b/arch/i386/kernel/e820.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/pfn.h> 11#include <linux/pfn.h>
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/suspend.h>
13 14
14#include <asm/pgtable.h> 15#include <asm/pgtable.h>
15#include <asm/page.h> 16#include <asm/page.h>
@@ -320,6 +321,37 @@ static int __init request_standard_resources(void)
320 321
321subsys_initcall(request_standard_resources); 322subsys_initcall(request_standard_resources);
322 323
324#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
325/**
326 * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
327 * correspond to e820 RAM areas and mark the corresponding pages as nosave for
328 * hibernation.
329 *
330 * This function requires the e820 map to be sorted and without any
331 * overlapping entries and assumes the first e820 area to be RAM.
332 */
333void __init e820_mark_nosave_regions(void)
334{
335 int i;
336 unsigned long pfn;
337
338 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
339 for (i = 1; i < e820.nr_map; i++) {
340 struct e820entry *ei = &e820.map[i];
341
342 if (pfn < PFN_UP(ei->addr))
343 register_nosave_region(pfn, PFN_UP(ei->addr));
344
345 pfn = PFN_DOWN(ei->addr + ei->size);
346 if (ei->type != E820_RAM)
347 register_nosave_region(PFN_UP(ei->addr), pfn);
348
349 if (pfn >= max_low_pfn)
350 break;
351 }
352}
353#endif
354
323void __init add_memory_region(unsigned long long start, 355void __init add_memory_region(unsigned long long start,
324 unsigned long long size, int type) 356 unsigned long long size, int type)
325{ 357{
diff --git a/arch/i386/kernel/geode.c b/arch/i386/kernel/geode.c
new file mode 100644
index 000000000000..41e8aec4c61d
--- /dev/null
+++ b/arch/i386/kernel/geode.c
@@ -0,0 +1,155 @@
1/*
2 * AMD Geode southbridge support code
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/ioport.h>
13#include <linux/io.h>
14#include <asm/msr.h>
15#include <asm/geode.h>
16
17static struct {
18 char *name;
19 u32 msr;
20 int size;
21 u32 base;
22} lbars[] = {
23 { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
24 { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
25 { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
26 { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
27};
28
29static void __init init_lbars(void)
30{
31 u32 lo, hi;
32 int i;
33
34 for (i = 0; i < ARRAY_SIZE(lbars); i++) {
35 rdmsr(lbars[i].msr, lo, hi);
36 if (hi & 0x01)
37 lbars[i].base = lo & 0x0000ffff;
38
39 if (lbars[i].base == 0)
40 printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
41 lbars[i].name);
42 }
43}
44
45int geode_get_dev_base(unsigned int dev)
46{
47 BUG_ON(dev >= ARRAY_SIZE(lbars));
48 return lbars[dev].base;
49}
50EXPORT_SYMBOL_GPL(geode_get_dev_base);
51
52/* === GPIO API === */
53
54void geode_gpio_set(unsigned int gpio, unsigned int reg)
55{
56 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
57
58 if (!base)
59 return;
60
61 if (gpio < 16)
62 outl(1 << gpio, base + reg);
63 else
64 outl(1 << (gpio - 16), base + 0x80 + reg);
65}
66EXPORT_SYMBOL_GPL(geode_gpio_set);
67
68void geode_gpio_clear(unsigned int gpio, unsigned int reg)
69{
70 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
71
72 if (!base)
73 return;
74
75 if (gpio < 16)
76 outl(1 << (gpio + 16), base + reg);
77 else
78 outl(1 << gpio, base + 0x80 + reg);
79}
80EXPORT_SYMBOL_GPL(geode_gpio_clear);
81
82int geode_gpio_isset(unsigned int gpio, unsigned int reg)
83{
84 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
85
86 if (!base)
87 return 0;
88
89 if (gpio < 16)
90 return (inl(base + reg) & (1 << gpio)) ? 1 : 0;
91 else
92 return (inl(base + 0x80 + reg) & (1 << (gpio - 16))) ? 1 : 0;
93}
94EXPORT_SYMBOL_GPL(geode_gpio_isset);
95
96void geode_gpio_set_irq(unsigned int group, unsigned int irq)
97{
98 u32 lo, hi;
99
100 if (group > 7 || irq > 15)
101 return;
102
103 rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
104
105 lo &= ~(0xF << (group * 4));
106 lo |= (irq & 0xF) << (group * 4);
107
108 wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
109}
110EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
111
112void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
113{
114 u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
115 u32 offset, shift, val;
116
117 if (gpio >= 24)
118 offset = GPIO_MAP_W;
119 else if (gpio >= 16)
120 offset = GPIO_MAP_Z;
121 else if (gpio >= 8)
122 offset = GPIO_MAP_Y;
123 else
124 offset = GPIO_MAP_X;
125
126 shift = (gpio % 8) * 4;
127
128 val = inl(base + offset);
129
130 /* Clear whatever was there before */
131 val &= ~(0xF << shift);
132
133 /* And set the new value */
134
135 val |= ((pair & 7) << shift);
136
137 /* Set the PME bit if this is a PME event */
138
139 if (pme)
140 val |= (1 << (shift + 3));
141
142 outl(val, base + offset);
143}
144EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
145
146static int __init geode_southbridge_init(void)
147{
148 if (!is_geode())
149 return -ENODEV;
150
151 init_lbars();
152 return 0;
153}
154
155postcore_initcall(geode_southbridge_init);
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 17d73459fc5f..533d4932bc79 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -5,6 +5,7 @@
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/sysdev.h> 6#include <linux/sysdev.h>
7#include <linux/pm.h> 7#include <linux/pm.h>
8#include <linux/delay.h>
8 9
9#include <asm/hpet.h> 10#include <asm/hpet.h>
10#include <asm/io.h> 11#include <asm/io.h>
@@ -187,6 +188,10 @@ static void hpet_set_mode(enum clock_event_mode mode,
187 cfg &= ~HPET_TN_ENABLE; 188 cfg &= ~HPET_TN_ENABLE;
188 hpet_writel(cfg, HPET_T0_CFG); 189 hpet_writel(cfg, HPET_T0_CFG);
189 break; 190 break;
191
192 case CLOCK_EVT_MODE_RESUME:
193 hpet_enable_int();
194 break;
190 } 195 }
191} 196}
192 197
@@ -217,6 +222,7 @@ static struct clocksource clocksource_hpet = {
217 .mask = HPET_MASK, 222 .mask = HPET_MASK,
218 .shift = HPET_SHIFT, 223 .shift = HPET_SHIFT,
219 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 224 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
225 .resume = hpet_start_counter,
220}; 226};
221 227
222/* 228/*
@@ -226,7 +232,8 @@ int __init hpet_enable(void)
226{ 232{
227 unsigned long id; 233 unsigned long id;
228 uint64_t hpet_freq; 234 uint64_t hpet_freq;
229 u64 tmp; 235 u64 tmp, start, now;
236 cycle_t t1;
230 237
231 if (!is_hpet_capable()) 238 if (!is_hpet_capable())
232 return 0; 239 return 0;
@@ -273,6 +280,27 @@ int __init hpet_enable(void)
273 /* Start the counter */ 280 /* Start the counter */
274 hpet_start_counter(); 281 hpet_start_counter();
275 282
283 /* Verify whether hpet counter works */
284 t1 = read_hpet();
285 rdtscll(start);
286
287 /*
288 * We don't know the TSC frequency yet, but waiting for
289 * 200000 TSC cycles is safe:
290 * 4 GHz == 50us
291 * 1 GHz == 200us
292 */
293 do {
294 rep_nop();
295 rdtscll(now);
296 } while ((now - start) < 200000UL);
297
298 if (t1 == read_hpet()) {
299 printk(KERN_WARNING
300 "HPET counter not counting. HPET disabled\n");
301 goto out_nohpet;
302 }
303
276 /* Initialize and register HPET clocksource 304 /* Initialize and register HPET clocksource
277 * 305 *
278 * hpet period is in femto seconds per cycle 306 * hpet period is in femto seconds per cycle
@@ -291,7 +319,6 @@ int __init hpet_enable(void)
291 319
292 clocksource_register(&clocksource_hpet); 320 clocksource_register(&clocksource_hpet);
293 321
294
295 if (id & HPET_ID_LEGSUP) { 322 if (id & HPET_ID_LEGSUP) {
296 hpet_enable_int(); 323 hpet_enable_int();
297 hpet_reserve_platform_timers(id); 324 hpet_reserve_platform_timers(id);
@@ -299,7 +326,7 @@ int __init hpet_enable(void)
299 * Start hpet with the boot cpu mask and make it 326 * Start hpet with the boot cpu mask and make it
300 * global after the IO_APIC has been initialized. 327 * global after the IO_APIC has been initialized.
301 */ 328 */
302 hpet_clockevent.cpumask =cpumask_of_cpu(0); 329 hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
303 clockevents_register_device(&hpet_clockevent); 330 clockevents_register_device(&hpet_clockevent);
304 global_clock_event = &hpet_clockevent; 331 global_clock_event = &hpet_clockevent;
305 return 1; 332 return 1;
@@ -524,68 +551,3 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
524 return IRQ_HANDLED; 551 return IRQ_HANDLED;
525} 552}
526#endif 553#endif
527
528
529/*
530 * Suspend/resume part
531 */
532
533#ifdef CONFIG_PM
534
535static int hpet_suspend(struct sys_device *sys_device, pm_message_t state)
536{
537 unsigned long cfg = hpet_readl(HPET_CFG);
538
539 cfg &= ~(HPET_CFG_ENABLE|HPET_CFG_LEGACY);
540 hpet_writel(cfg, HPET_CFG);
541
542 return 0;
543}
544
545static int hpet_resume(struct sys_device *sys_device)
546{
547 unsigned int id;
548
549 hpet_start_counter();
550
551 id = hpet_readl(HPET_ID);
552
553 if (id & HPET_ID_LEGSUP)
554 hpet_enable_int();
555
556 return 0;
557}
558
559static struct sysdev_class hpet_class = {
560 set_kset_name("hpet"),
561 .suspend = hpet_suspend,
562 .resume = hpet_resume,
563};
564
565static struct sys_device hpet_device = {
566 .id = 0,
567 .cls = &hpet_class,
568};
569
570
571static __init int hpet_register_sysfs(void)
572{
573 int err;
574
575 if (!is_hpet_capable())
576 return 0;
577
578 err = sysdev_class_register(&hpet_class);
579
580 if (!err) {
581 err = sysdev_register(&hpet_device);
582 if (err)
583 sysdev_class_unregister(&hpet_class);
584 }
585
586 return err;
587}
588
589device_initcall(hpet_register_sysfs);
590
591#endif
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index f8a3c4054c70..6d839f2f1b1a 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -3,18 +3,17 @@
3 * 3 *
4 */ 4 */
5#include <linux/clockchips.h> 5#include <linux/clockchips.h>
6#include <linux/spinlock.h> 6#include <linux/init.h>
7#include <linux/interrupt.h>
7#include <linux/jiffies.h> 8#include <linux/jiffies.h>
8#include <linux/sysdev.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/init.h> 10#include <linux/spinlock.h>
11 11
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include <asm/delay.h> 13#include <asm/delay.h>
14#include <asm/i8253.h> 14#include <asm/i8253.h>
15#include <asm/io.h> 15#include <asm/io.h>
16 16#include <asm/timer.h>
17#include "io_ports.h"
18 17
19DEFINE_SPINLOCK(i8253_lock); 18DEFINE_SPINLOCK(i8253_lock);
20EXPORT_SYMBOL(i8253_lock); 19EXPORT_SYMBOL(i8253_lock);
@@ -41,26 +40,27 @@ static void init_pit_timer(enum clock_event_mode mode,
41 case CLOCK_EVT_MODE_PERIODIC: 40 case CLOCK_EVT_MODE_PERIODIC:
42 /* binary, mode 2, LSB/MSB, ch 0 */ 41 /* binary, mode 2, LSB/MSB, ch 0 */
43 outb_p(0x34, PIT_MODE); 42 outb_p(0x34, PIT_MODE);
44 udelay(10);
45 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */ 43 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
46 udelay(10);
47 outb(LATCH >> 8 , PIT_CH0); /* MSB */ 44 outb(LATCH >> 8 , PIT_CH0); /* MSB */
48 break; 45 break;
49 46
50 /*
51 * Avoid unnecessary state transitions, as it confuses
52 * Geode / Cyrix based boxen.
53 */
54 case CLOCK_EVT_MODE_SHUTDOWN: 47 case CLOCK_EVT_MODE_SHUTDOWN:
55 if (evt->mode == CLOCK_EVT_MODE_UNUSED)
56 break;
57 case CLOCK_EVT_MODE_UNUSED: 48 case CLOCK_EVT_MODE_UNUSED:
58 if (evt->mode == CLOCK_EVT_MODE_SHUTDOWN) 49 if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
59 break; 50 evt->mode == CLOCK_EVT_MODE_ONESHOT) {
51 outb_p(0x30, PIT_MODE);
52 outb_p(0, PIT_CH0);
53 outb_p(0, PIT_CH0);
54 }
55 break;
56
60 case CLOCK_EVT_MODE_ONESHOT: 57 case CLOCK_EVT_MODE_ONESHOT:
61 /* One shot setup */ 58 /* One shot setup */
62 outb_p(0x38, PIT_MODE); 59 outb_p(0x38, PIT_MODE);
63 udelay(10); 60 break;
61
62 case CLOCK_EVT_MODE_RESUME:
63 /* Nothing to do here */
64 break; 64 break;
65 } 65 }
66 spin_unlock_irqrestore(&i8253_lock, flags); 66 spin_unlock_irqrestore(&i8253_lock, flags);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 21db8f56c9a1..893df8280756 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -353,14 +353,6 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
353# include <linux/slab.h> /* kmalloc() */ 353# include <linux/slab.h> /* kmalloc() */
354# include <linux/timer.h> /* time_after() */ 354# include <linux/timer.h> /* time_after() */
355 355
356#ifdef CONFIG_BALANCED_IRQ_DEBUG
357# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
358# define Dprintk(x...) do { TDprintk(x); } while (0)
359# else
360# define TDprintk(x...)
361# define Dprintk(x...)
362# endif
363
364#define IRQBALANCE_CHECK_ARCH -999 356#define IRQBALANCE_CHECK_ARCH -999
365#define MAX_BALANCED_IRQ_INTERVAL (5*HZ) 357#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
366#define MIN_BALANCED_IRQ_INTERVAL (HZ/2) 358#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
@@ -443,7 +435,7 @@ static inline void balance_irq(int cpu, int irq)
443static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) 435static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
444{ 436{
445 int i, j; 437 int i, j;
446 Dprintk("Rotating IRQs among CPUs.\n"); 438
447 for_each_online_cpu(i) { 439 for_each_online_cpu(i) {
448 for (j = 0; j < NR_IRQS; j++) { 440 for (j = 0; j < NR_IRQS; j++) {
449 if (!irq_desc[j].action) 441 if (!irq_desc[j].action)
@@ -560,19 +552,11 @@ tryanothercpu:
560 max_loaded = tmp_loaded; /* processor */ 552 max_loaded = tmp_loaded; /* processor */
561 imbalance = (max_cpu_irq - min_cpu_irq) / 2; 553 imbalance = (max_cpu_irq - min_cpu_irq) / 2;
562 554
563 Dprintk("max_loaded cpu = %d\n", max_loaded);
564 Dprintk("min_loaded cpu = %d\n", min_loaded);
565 Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
566 Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
567 Dprintk("load imbalance = %lu\n", imbalance);
568
569 /* if imbalance is less than approx 10% of max load, then 555 /* if imbalance is less than approx 10% of max load, then
570 * observe diminishing returns action. - quit 556 * observe diminishing returns action. - quit
571 */ 557 */
572 if (imbalance < (max_cpu_irq >> 3)) { 558 if (imbalance < (max_cpu_irq >> 3))
573 Dprintk("Imbalance too trivial\n");
574 goto not_worth_the_effort; 559 goto not_worth_the_effort;
575 }
576 560
577tryanotherirq: 561tryanotherirq:
578 /* if we select an IRQ to move that can't go where we want, then 562 /* if we select an IRQ to move that can't go where we want, then
@@ -629,9 +613,6 @@ tryanotherirq:
629 cpus_and(tmp, target_cpu_mask, allowed_mask); 613 cpus_and(tmp, target_cpu_mask, allowed_mask);
630 614
631 if (!cpus_empty(tmp)) { 615 if (!cpus_empty(tmp)) {
632
633 Dprintk("irq = %d moved to cpu = %d\n",
634 selected_irq, min_loaded);
635 /* mark for change destination */ 616 /* mark for change destination */
636 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); 617 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
637 618
@@ -651,7 +632,6 @@ not_worth_the_effort:
651 */ 632 */
652 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, 633 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
653 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); 634 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
654 Dprintk("IRQ worth rotating not found\n");
655 return; 635 return;
656} 636}
657 637
@@ -1902,7 +1882,7 @@ __setup("no_timer_check", notimercheck);
1902 * - if this function detects that timer IRQs are defunct, then we fall 1882 * - if this function detects that timer IRQs are defunct, then we fall
1903 * back to ISA timer IRQs 1883 * back to ISA timer IRQs
1904 */ 1884 */
1905int __init timer_irq_works(void) 1885static int __init timer_irq_works(void)
1906{ 1886{
1907 unsigned long t1 = jiffies; 1887 unsigned long t1 = jiffies;
1908 1888
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index ba44d40b066d..dd2b97fc00b2 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -149,15 +149,11 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
149 149
150#ifdef CONFIG_4KSTACKS 150#ifdef CONFIG_4KSTACKS
151 151
152/*
153 * These should really be __section__(".bss.page_aligned") as well, but
154 * gcc's 3.0 and earlier don't handle that correctly.
155 */
156static char softirq_stack[NR_CPUS * THREAD_SIZE] 152static char softirq_stack[NR_CPUS * THREAD_SIZE]
157 __attribute__((__aligned__(THREAD_SIZE))); 153 __attribute__((__section__(".bss.page_aligned")));
158 154
159static char hardirq_stack[NR_CPUS * THREAD_SIZE] 155static char hardirq_stack[NR_CPUS * THREAD_SIZE]
160 __attribute__((__aligned__(THREAD_SIZE))); 156 __attribute__((__section__(".bss.page_aligned")));
161 157
162/* 158/*
163 * allocate per-cpu stacks for hardirq and for softirq processing 159 * allocate per-cpu stacks for hardirq and for softirq processing
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 6c49acb96982..84664710b784 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -300,6 +300,7 @@ early_param("idle", idle_setup);
300void show_regs(struct pt_regs * regs) 300void show_regs(struct pt_regs * regs)
301{ 301{
302 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 302 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
303 unsigned long d0, d1, d2, d3, d6, d7;
303 304
304 printk("\n"); 305 printk("\n");
305 printk("Pid: %d, comm: %20s\n", current->pid, current->comm); 306 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
@@ -324,6 +325,17 @@ void show_regs(struct pt_regs * regs)
324 cr3 = read_cr3(); 325 cr3 = read_cr3();
325 cr4 = read_cr4_safe(); 326 cr4 = read_cr4_safe();
326 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 327 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
328
329 get_debugreg(d0, 0);
330 get_debugreg(d1, 1);
331 get_debugreg(d2, 2);
332 get_debugreg(d3, 3);
333 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
334 d0, d1, d2, d3);
335 get_debugreg(d6, 6);
336 get_debugreg(d7, 7);
337 printk("DR6: %08lx DR7: %08lx\n", d6, d7);
338
327 show_trace(NULL, regs, &regs->esp); 339 show_trace(NULL, regs, &regs->esp);
328} 340}
329 341
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 5513f8d5b5be..0d796248866c 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -113,6 +113,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
113 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), 113 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
114 }, 114 },
115 }, 115 },
116 { /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
117 .callback = set_bios_reboot,
118 .ident = "Dell OptiPlex 745",
119 .matches = {
120 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
121 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
122 DMI_MATCH(DMI_BOARD_NAME, "0WF810"),
123 },
124 },
116 { /* Handle problems with rebooting on Dell 2400's */ 125 { /* Handle problems with rebooting on Dell 2400's */
117 .callback = set_bios_reboot, 126 .callback = set_bios_reboot,
118 .ident = "Dell PowerEdge 2400", 127 .ident = "Dell PowerEdge 2400",
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 74871d066c2b..d474cd639bcb 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -273,18 +273,18 @@ unsigned long __init find_max_low_pfn(void)
273 printk(KERN_WARNING "Warning only %ldMB will be used.\n", 273 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
274 MAXMEM>>20); 274 MAXMEM>>20);
275 if (max_pfn > MAX_NONPAE_PFN) 275 if (max_pfn > MAX_NONPAE_PFN)
276 printk(KERN_WARNING "Use a PAE enabled kernel.\n"); 276 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
277 else 277 else
278 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 278 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
279 max_pfn = MAXMEM_PFN; 279 max_pfn = MAXMEM_PFN;
280#else /* !CONFIG_HIGHMEM */ 280#else /* !CONFIG_HIGHMEM */
281#ifndef CONFIG_X86_PAE 281#ifndef CONFIG_HIGHMEM64G
282 if (max_pfn > MAX_NONPAE_PFN) { 282 if (max_pfn > MAX_NONPAE_PFN) {
283 max_pfn = MAX_NONPAE_PFN; 283 max_pfn = MAX_NONPAE_PFN;
284 printk(KERN_WARNING "Warning only 4GB will be used.\n"); 284 printk(KERN_WARNING "Warning only 4GB will be used.\n");
285 printk(KERN_WARNING "Use a PAE enabled kernel.\n"); 285 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
286 } 286 }
287#endif /* !CONFIG_X86_PAE */ 287#endif /* !CONFIG_HIGHMEM64G */
288#endif /* !CONFIG_HIGHMEM */ 288#endif /* !CONFIG_HIGHMEM */
289 } else { 289 } else {
290 if (highmem_pages == -1) 290 if (highmem_pages == -1)
@@ -466,7 +466,7 @@ void __init setup_bootmem_allocator(void)
466 * 466 *
467 * This should all compile down to nothing when NUMA is off. 467 * This should all compile down to nothing when NUMA is off.
468 */ 468 */
469void __init remapped_pgdat_init(void) 469static void __init remapped_pgdat_init(void)
470{ 470{
471 int nid; 471 int nid;
472 472
@@ -640,6 +640,7 @@ void __init setup_arch(char **cmdline_p)
640#endif 640#endif
641 641
642 e820_register_memory(); 642 e820_register_memory();
643 e820_mark_nosave_regions();
643 644
644#ifdef CONFIG_VT 645#ifdef CONFIG_VT
645#if defined(CONFIG_VGA_CONSOLE) 646#if defined(CONFIG_VGA_CONSOLE)
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index ff4ee6f3326b..6deb159d08e0 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -336,7 +336,9 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
336 336
337int in_gate_area(struct task_struct *task, unsigned long addr) 337int in_gate_area(struct task_struct *task, unsigned long addr)
338{ 338{
339 return 0; 339 const struct vm_area_struct *vma = get_gate_vma(task);
340
341 return vma && addr >= vma->vm_start && addr < vma->vm_end;
340} 342}
341 343
342int in_gate_area_no_task(unsigned long addr) 344int in_gate_area_no_task(unsigned long addr)
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a665df61f08c..19a6c678d02e 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -207,55 +207,9 @@ unsigned long read_persistent_clock(void)
207 return retval; 207 return retval;
208} 208}
209 209
210static void sync_cmos_clock(unsigned long dummy); 210int update_persistent_clock(struct timespec now)
211
212static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
213int no_sync_cmos_clock;
214
215static void sync_cmos_clock(unsigned long dummy)
216{
217 struct timeval now, next;
218 int fail = 1;
219
220 /*
221 * If we have an externally synchronized Linux clock, then update
222 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
223 * called as close as possible to 500 ms before the new second starts.
224 * This code is run on a timer. If the clock is set, that timer
225 * may not expire at the correct time. Thus, we adjust...
226 */
227 if (!ntp_synced())
228 /*
229 * Not synced, exit, do not restart a timer (if one is
230 * running, let it run out).
231 */
232 return;
233
234 do_gettimeofday(&now);
235 if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
236 now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
237 fail = set_rtc_mmss(now.tv_sec);
238
239 next.tv_usec = USEC_AFTER - now.tv_usec;
240 if (next.tv_usec <= 0)
241 next.tv_usec += USEC_PER_SEC;
242
243 if (!fail)
244 next.tv_sec = 659;
245 else
246 next.tv_sec = 0;
247
248 if (next.tv_usec >= USEC_PER_SEC) {
249 next.tv_sec++;
250 next.tv_usec -= USEC_PER_SEC;
251 }
252 mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
253}
254
255void notify_arch_cmos_timer(void)
256{ 211{
257 if (!no_sync_cmos_clock) 212 return set_rtc_mmss(now.tv_sec);
258 mod_timer(&sync_cmos_timer, jiffies + 1);
259} 213}
260 214
261extern void (*late_time_init)(void); 215extern void (*late_time_init)(void);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 3e7753c78b9b..57772a18c394 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
152 if (!stack) { 152 if (!stack) {
153 unsigned long dummy; 153 unsigned long dummy;
154 stack = &dummy; 154 stack = &dummy;
155 if (task && task != current) 155 if (task != current)
156 stack = (unsigned long *)task->thread.esp; 156 stack = (unsigned long *)task->thread.esp;
157 } 157 }
158 158
@@ -211,6 +211,7 @@ static void print_trace_address(void *data, unsigned long addr)
211{ 211{
212 printk("%s [<%08lx>] ", (char *)data, addr); 212 printk("%s [<%08lx>] ", (char *)data, addr);
213 print_symbol("%s\n", addr); 213 print_symbol("%s\n", addr);
214 touch_nmi_watchdog();
214} 215}
215 216
216static struct stacktrace_ops print_trace_ops = { 217static struct stacktrace_ops print_trace_ops = {
diff --git a/arch/i386/kernel/vmiclock.c b/arch/i386/kernel/vmiclock.c
index f9b845f4e692..b1b5ab08b26e 100644
--- a/arch/i386/kernel/vmiclock.c
+++ b/arch/i386/kernel/vmiclock.c
@@ -32,6 +32,7 @@
32#include <asm/apicdef.h> 32#include <asm/apicdef.h>
33#include <asm/apic.h> 33#include <asm/apic.h>
34#include <asm/timer.h> 34#include <asm/timer.h>
35#include <asm/i8253.h>
35 36
36#include <irq_vectors.h> 37#include <irq_vectors.h>
37#include "io_ports.h" 38#include "io_ports.h"
@@ -142,6 +143,7 @@ static void vmi_timer_set_mode(enum clock_event_mode mode,
142 143
143 switch (mode) { 144 switch (mode) {
144 case CLOCK_EVT_MODE_ONESHOT: 145 case CLOCK_EVT_MODE_ONESHOT:
146 case CLOCK_EVT_MODE_RESUME:
145 break; 147 break;
146 case CLOCK_EVT_MODE_PERIODIC: 148 case CLOCK_EVT_MODE_PERIODIC:
147 cycles_per_hz = vmi_timer_ops.get_cycle_frequency(); 149 cycles_per_hz = vmi_timer_ops.get_cycle_frequency();
diff --git a/arch/i386/lib/Makefile b/arch/i386/lib/Makefile
index 22d8ac5815f0..4d105fdfe817 100644
--- a/arch/i386/lib/Makefile
+++ b/arch/i386/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5 5
6lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \ 6lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \
7 bitops.o semaphore.o 7 bitops.o semaphore.o string.o
8 8
9lib-$(CONFIG_X86_USE_3DNOW) += mmx.o 9lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
10 10
diff --git a/arch/i386/lib/string.c b/arch/i386/lib/string.c
new file mode 100644
index 000000000000..2c773fefa3dd
--- /dev/null
+++ b/arch/i386/lib/string.c
@@ -0,0 +1,257 @@
1/*
2 * Most of the string-functions are rather heavily hand-optimized,
3 * see especially strsep,strstr,str[c]spn. They should work, but are not
4 * very easy to understand. Everything is done entirely within the register
5 * set, making the functions fast and clean. String instructions have been
6 * used through-out, making for "slightly" unclear code :-)
7 *
8 * AK: On P4 and K7 using non string instruction implementations might be faster
9 * for large memory blocks. But most of them are unlikely to be used on large
10 * strings.
11 */
12
13#include <linux/string.h>
14#include <linux/module.h>
15
16#ifdef __HAVE_ARCH_STRCPY
17char *strcpy(char * dest,const char *src)
18{
19 int d0, d1, d2;
20 asm volatile( "1:\tlodsb\n\t"
21 "stosb\n\t"
22 "testb %%al,%%al\n\t"
23 "jne 1b"
24 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
25 :"0" (src),"1" (dest) : "memory");
26 return dest;
27}
28EXPORT_SYMBOL(strcpy);
29#endif
30
31#ifdef __HAVE_ARCH_STRNCPY
32char *strncpy(char * dest,const char *src,size_t count)
33{
34 int d0, d1, d2, d3;
35 asm volatile( "1:\tdecl %2\n\t"
36 "js 2f\n\t"
37 "lodsb\n\t"
38 "stosb\n\t"
39 "testb %%al,%%al\n\t"
40 "jne 1b\n\t"
41 "rep\n\t"
42 "stosb\n"
43 "2:"
44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
45 :"0" (src),"1" (dest),"2" (count) : "memory");
46 return dest;
47}
48EXPORT_SYMBOL(strncpy);
49#endif
50
51#ifdef __HAVE_ARCH_STRCAT
52char *strcat(char * dest,const char * src)
53{
54 int d0, d1, d2, d3;
55 asm volatile( "repne\n\t"
56 "scasb\n\t"
57 "decl %1\n"
58 "1:\tlodsb\n\t"
59 "stosb\n\t"
60 "testb %%al,%%al\n\t"
61 "jne 1b"
62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory");
64 return dest;
65}
66EXPORT_SYMBOL(strcat);
67#endif
68
69#ifdef __HAVE_ARCH_STRNCAT
70char *strncat(char * dest,const char * src,size_t count)
71{
72 int d0, d1, d2, d3;
73 asm volatile( "repne\n\t"
74 "scasb\n\t"
75 "decl %1\n\t"
76 "movl %8,%3\n"
77 "1:\tdecl %3\n\t"
78 "js 2f\n\t"
79 "lodsb\n\t"
80 "stosb\n\t"
81 "testb %%al,%%al\n\t"
82 "jne 1b\n"
83 "2:\txorl %2,%2\n\t"
84 "stosb"
85 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
86 : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
87 : "memory");
88 return dest;
89}
90EXPORT_SYMBOL(strncat);
91#endif
92
93#ifdef __HAVE_ARCH_STRCMP
94int strcmp(const char * cs,const char * ct)
95{
96 int d0, d1;
97 int res;
98 asm volatile( "1:\tlodsb\n\t"
99 "scasb\n\t"
100 "jne 2f\n\t"
101 "testb %%al,%%al\n\t"
102 "jne 1b\n\t"
103 "xorl %%eax,%%eax\n\t"
104 "jmp 3f\n"
105 "2:\tsbbl %%eax,%%eax\n\t"
106 "orb $1,%%al\n"
107 "3:"
108 :"=a" (res), "=&S" (d0), "=&D" (d1)
109 :"1" (cs),"2" (ct)
110 :"memory");
111 return res;
112}
113EXPORT_SYMBOL(strcmp);
114#endif
115
116#ifdef __HAVE_ARCH_STRNCMP
117int strncmp(const char * cs,const char * ct,size_t count)
118{
119 int res;
120 int d0, d1, d2;
121 asm volatile( "1:\tdecl %3\n\t"
122 "js 2f\n\t"
123 "lodsb\n\t"
124 "scasb\n\t"
125 "jne 3f\n\t"
126 "testb %%al,%%al\n\t"
127 "jne 1b\n"
128 "2:\txorl %%eax,%%eax\n\t"
129 "jmp 4f\n"
130 "3:\tsbbl %%eax,%%eax\n\t"
131 "orb $1,%%al\n"
132 "4:"
133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
134 :"1" (cs),"2" (ct),"3" (count)
135 :"memory");
136 return res;
137}
138EXPORT_SYMBOL(strncmp);
139#endif
140
141#ifdef __HAVE_ARCH_STRCHR
142char *strchr(const char * s, int c)
143{
144 int d0;
145 char * res;
146 asm volatile( "movb %%al,%%ah\n"
147 "1:\tlodsb\n\t"
148 "cmpb %%ah,%%al\n\t"
149 "je 2f\n\t"
150 "testb %%al,%%al\n\t"
151 "jne 1b\n\t"
152 "movl $1,%1\n"
153 "2:\tmovl %1,%0\n\t"
154 "decl %0"
155 :"=a" (res), "=&S" (d0)
156 :"1" (s),"0" (c)
157 :"memory");
158 return res;
159}
160EXPORT_SYMBOL(strchr);
161#endif
162
163#ifdef __HAVE_ARCH_STRRCHR
164char *strrchr(const char * s, int c)
165{
166 int d0, d1;
167 char * res;
168 asm volatile( "movb %%al,%%ah\n"
169 "1:\tlodsb\n\t"
170 "cmpb %%ah,%%al\n\t"
171 "jne 2f\n\t"
172 "leal -1(%%esi),%0\n"
173 "2:\ttestb %%al,%%al\n\t"
174 "jne 1b"
175 :"=g" (res), "=&S" (d0), "=&a" (d1)
176 :"0" (0),"1" (s),"2" (c)
177 :"memory");
178 return res;
179}
180EXPORT_SYMBOL(strrchr);
181#endif
182
183#ifdef __HAVE_ARCH_STRLEN
184size_t strlen(const char * s)
185{
186 int d0;
187 int res;
188 asm volatile( "repne\n\t"
189 "scasb\n\t"
190 "notl %0\n\t"
191 "decl %0"
192 :"=c" (res), "=&D" (d0)
193 :"1" (s),"a" (0), "0" (0xffffffffu)
194 :"memory");
195 return res;
196}
197EXPORT_SYMBOL(strlen);
198#endif
199
200#ifdef __HAVE_ARCH_MEMCHR
201void *memchr(const void *cs,int c,size_t count)
202{
203 int d0;
204 void *res;
205 if (!count)
206 return NULL;
207 asm volatile( "repne\n\t"
208 "scasb\n\t"
209 "je 1f\n\t"
210 "movl $1,%0\n"
211 "1:\tdecl %0"
212 :"=D" (res), "=&c" (d0)
213 :"a" (c),"0" (cs),"1" (count)
214 :"memory");
215 return res;
216}
217EXPORT_SYMBOL(memchr);
218#endif
219
220#ifdef __HAVE_ARCH_MEMSCAN
221void *memscan(void * addr, int c, size_t size)
222{
223 if (!size)
224 return addr;
225 asm volatile("repnz; scasb\n\t"
226 "jnz 1f\n\t"
227 "dec %%edi\n"
228 "1:"
229 : "=D" (addr), "=c" (size)
230 : "0" (addr), "1" (size), "a" (c)
231 : "memory");
232 return addr;
233}
234EXPORT_SYMBOL(memscan);
235#endif
236
237#ifdef __HAVE_ARCH_STRNLEN
238size_t strnlen(const char *s, size_t count)
239{
240 int d0;
241 int res;
242 asm volatile( "movl %2,%0\n\t"
243 "jmp 2f\n"
244 "1:\tcmpb $0,(%0)\n\t"
245 "je 3f\n\t"
246 "incl %0\n"
247 "2:\tdecl %1\n\t"
248 "cmpl $-1,%1\n\t"
249 "jne 1b\n"
250 "3:\tsubl %2,%0"
251 :"=a" (res), "=&d" (d0)
252 :"c" (s),"1" (count)
253 :"memory");
254 return res;
255}
256EXPORT_SYMBOL(strnlen);
257#endif
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 6e72f22e6bbd..e1a9a805c445 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -471,6 +471,10 @@ void zap_low_mappings (void)
471 flush_tlb_all(); 471 flush_tlb_all();
472} 472}
473 473
474int nx_enabled = 0;
475
476#ifdef CONFIG_X86_PAE
477
474static int disable_nx __initdata = 0; 478static int disable_nx __initdata = 0;
475u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; 479u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
476EXPORT_SYMBOL_GPL(__supported_pte_mask); 480EXPORT_SYMBOL_GPL(__supported_pte_mask);
@@ -500,9 +504,6 @@ static int __init noexec_setup(char *str)
500} 504}
501early_param("noexec", noexec_setup); 505early_param("noexec", noexec_setup);
502 506
503int nx_enabled = 0;
504#ifdef CONFIG_X86_PAE
505
506static void __init set_nx(void) 507static void __init set_nx(void)
507{ 508{
508 unsigned int v[4], l, h; 509 unsigned int v[4], l, h;
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index fff08ae7b5ed..0b278315d737 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -196,7 +196,7 @@ void iounmap(volatile void __iomem *addr)
196 /* Reset the direct mapping. Can block */ 196 /* Reset the direct mapping. Can block */
197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { 197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
198 change_page_attr(virt_to_page(__va(p->phys_addr)), 198 change_page_attr(virt_to_page(__va(p->phys_addr)),
199 p->size >> PAGE_SHIFT, 199 get_vm_area_size(p) >> PAGE_SHIFT,
200 PAGE_KERNEL); 200 PAGE_KERNEL);
201 global_flush_tlb(); 201 global_flush_tlb();
202 } 202 }
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 37992ffb1633..8927222b3ab2 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -82,7 +82,7 @@ static void flush_kernel_map(void *arg)
82 struct page *p; 82 struct page *p;
83 83
84 /* High level code is not ready for clflush yet */ 84 /* High level code is not ready for clflush yet */
85 if (0 && cpu_has_clflush) { 85 if (cpu_has_clflush) {
86 list_for_each_entry (p, lh, lru) 86 list_for_each_entry (p, lh, lru)
87 cache_flush_page(p); 87 cache_flush_page(p);
88 } else if (boot_cpu_data.x86_model >= 4) 88 } else if (boot_cpu_data.x86_model >= 4)
@@ -136,6 +136,12 @@ static inline void revert_page(struct page *kpte_page, unsigned long address)
136 ref_prot)); 136 ref_prot));
137} 137}
138 138
139static inline void save_page(struct page *kpte_page)
140{
141 if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
142 list_add(&kpte_page->lru, &df_list);
143}
144
139static int 145static int
140__change_page_attr(struct page *page, pgprot_t prot) 146__change_page_attr(struct page *page, pgprot_t prot)
141{ 147{
@@ -150,6 +156,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
150 if (!kpte) 156 if (!kpte)
151 return -EINVAL; 157 return -EINVAL;
152 kpte_page = virt_to_page(kpte); 158 kpte_page = virt_to_page(kpte);
159 BUG_ON(PageLRU(kpte_page));
160 BUG_ON(PageCompound(kpte_page));
161
153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 162 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
154 if (!pte_huge(*kpte)) { 163 if (!pte_huge(*kpte)) {
155 set_pte_atomic(kpte, mk_pte(page, prot)); 164 set_pte_atomic(kpte, mk_pte(page, prot));
@@ -179,11 +188,11 @@ __change_page_attr(struct page *page, pgprot_t prot)
179 * time (not via split_large_page) and in turn we must not 188 * time (not via split_large_page) and in turn we must not
180 * replace it with a largepage. 189 * replace it with a largepage.
181 */ 190 */
191
192 save_page(kpte_page);
182 if (!PageReserved(kpte_page)) { 193 if (!PageReserved(kpte_page)) {
183 if (cpu_has_pse && (page_private(kpte_page) == 0)) { 194 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
184 ClearPagePrivate(kpte_page);
185 paravirt_release_pt(page_to_pfn(kpte_page)); 195 paravirt_release_pt(page_to_pfn(kpte_page));
186 list_add(&kpte_page->lru, &df_list);
187 revert_page(kpte_page, address); 196 revert_page(kpte_page, address);
188 } 197 }
189 } 198 }
@@ -236,6 +245,11 @@ void global_flush_tlb(void)
236 spin_unlock_irq(&cpa_lock); 245 spin_unlock_irq(&cpa_lock);
237 flush_map(&l); 246 flush_map(&l);
238 list_for_each_entry_safe(pg, next, &l, lru) { 247 list_for_each_entry_safe(pg, next, &l, lru) {
248 list_del(&pg->lru);
249 clear_bit(PG_arch_1, &pg->flags);
250 if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
251 continue;
252 ClearPagePrivate(pg);
239 __free_page(pg); 253 __free_page(pg);
240 } 254 }
241} 255}
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 8d7c0864cc04..01437c46baae 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -235,7 +235,7 @@ static inline void pgd_list_del(pgd_t *pgd)
235 235
236#if (PTRS_PER_PMD == 1) 236#if (PTRS_PER_PMD == 1)
237/* Non-PAE pgd constructor */ 237/* Non-PAE pgd constructor */
238void pgd_ctor(void *pgd) 238static void pgd_ctor(void *pgd)
239{ 239{
240 unsigned long flags; 240 unsigned long flags;
241 241
@@ -257,7 +257,7 @@ void pgd_ctor(void *pgd)
257} 257}
258#else /* PTRS_PER_PMD > 1 */ 258#else /* PTRS_PER_PMD > 1 */
259/* PAE pgd constructor */ 259/* PAE pgd constructor */
260void pgd_ctor(void *pgd) 260static void pgd_ctor(void *pgd)
261{ 261{
262 /* PAE, kernel PMD may be shared */ 262 /* PAE, kernel PMD may be shared */
263 263
@@ -276,7 +276,7 @@ void pgd_ctor(void *pgd)
276} 276}
277#endif /* PTRS_PER_PMD */ 277#endif /* PTRS_PER_PMD */
278 278
279void pgd_dtor(void *pgd) 279static void pgd_dtor(void *pgd)
280{ 280{
281 unsigned long flags; /* can be called from interrupt context */ 281 unsigned long flags; /* can be called from interrupt context */
282 282
diff --git a/arch/i386/pci/acpi.c b/arch/i386/pci/acpi.c
index b33aea845f58..bc8a44bddaa7 100644
--- a/arch/i386/pci/acpi.c
+++ b/arch/i386/pci/acpi.c
@@ -8,20 +8,42 @@
8struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum) 8struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum)
9{ 9{
10 struct pci_bus *bus; 10 struct pci_bus *bus;
11 struct pci_sysdata *sd;
12 int pxm;
13
14 /* Allocate per-root-bus (not per bus) arch-specific data.
15 * TODO: leak; this memory is never freed.
16 * It's arguable whether it's worth the trouble to care.
17 */
18 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
19 if (!sd) {
20 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
21 return NULL;
22 }
11 23
12 if (domain != 0) { 24 if (domain != 0) {
13 printk(KERN_WARNING "PCI: Multiple domains not supported\n"); 25 printk(KERN_WARNING "PCI: Multiple domains not supported\n");
26 kfree(sd);
14 return NULL; 27 return NULL;
15 } 28 }
16 29
17 bus = pcibios_scan_root(busnum); 30 sd->node = -1;
31
32 pxm = acpi_get_pxm(device->handle);
33#ifdef CONFIG_ACPI_NUMA
34 if (pxm >= 0)
35 sd->node = pxm_to_node(pxm);
36#endif
37
38 bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
39 if (!bus)
40 kfree(sd);
41
18#ifdef CONFIG_ACPI_NUMA 42#ifdef CONFIG_ACPI_NUMA
19 if (bus != NULL) { 43 if (bus != NULL) {
20 int pxm = acpi_get_pxm(device->handle);
21 if (pxm >= 0) { 44 if (pxm >= 0) {
22 bus->sysdata = (void *)(unsigned long)pxm_to_node(pxm); 45 printk("bus %d -> pxm %d -> node %d\n",
23 printk("bus %d -> pxm %d -> node %ld\n", 46 busnum, pxm, sd->node);
24 busnum, pxm, (long)(bus->sysdata));
25 } 47 }
26 } 48 }
27#endif 49#endif
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index 3f78d4d8ecf3..85503deeda46 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -293,6 +293,7 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
293struct pci_bus * __devinit pcibios_scan_root(int busnum) 293struct pci_bus * __devinit pcibios_scan_root(int busnum)
294{ 294{
295 struct pci_bus *bus = NULL; 295 struct pci_bus *bus = NULL;
296 struct pci_sysdata *sd;
296 297
297 dmi_check_system(pciprobe_dmi_table); 298 dmi_check_system(pciprobe_dmi_table);
298 299
@@ -303,9 +304,19 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
303 } 304 }
304 } 305 }
305 306
307 /* Allocate per-root-bus (not per bus) arch-specific data.
308 * TODO: leak; this memory is never freed.
309 * It's arguable whether it's worth the trouble to care.
310 */
311 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
312 if (!sd) {
313 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
314 return NULL;
315 }
316
306 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum); 317 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
307 318
308 return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, NULL); 319 return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
309} 320}
310 321
311extern u8 pci_cache_line_size; 322extern u8 pci_cache_line_size;
diff --git a/arch/i386/pci/mmconfig-shared.c b/arch/i386/pci/mmconfig-shared.c
index c7cabeed4d7b..4df637e34f81 100644
--- a/arch/i386/pci/mmconfig-shared.c
+++ b/arch/i386/pci/mmconfig-shared.c
@@ -24,6 +24,9 @@
24 24
25DECLARE_BITMAP(pci_mmcfg_fallback_slots, 32*PCI_MMCFG_MAX_CHECK_BUS); 25DECLARE_BITMAP(pci_mmcfg_fallback_slots, 32*PCI_MMCFG_MAX_CHECK_BUS);
26 26
27/* Indicate if the mmcfg resources have been placed into the resource table. */
28static int __initdata pci_mmcfg_resources_inserted;
29
27/* K8 systems have some devices (typically in the builtin northbridge) 30/* K8 systems have some devices (typically in the builtin northbridge)
28 that are only accessible using type1 31 that are only accessible using type1
29 Normally this can be expressed in the MCFG by not listing them 32 Normally this can be expressed in the MCFG by not listing them
@@ -170,7 +173,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
170 return name != NULL; 173 return name != NULL;
171} 174}
172 175
173static void __init pci_mmcfg_insert_resources(void) 176static void __init pci_mmcfg_insert_resources(unsigned long resource_flags)
174{ 177{
175#define PCI_MMCFG_RESOURCE_NAME_LEN 19 178#define PCI_MMCFG_RESOURCE_NAME_LEN 19
176 int i; 179 int i;
@@ -194,10 +197,13 @@ static void __init pci_mmcfg_insert_resources(void)
194 cfg->pci_segment); 197 cfg->pci_segment);
195 res->start = cfg->address; 198 res->start = cfg->address;
196 res->end = res->start + (num_buses << 20) - 1; 199 res->end = res->start + (num_buses << 20) - 1;
197 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 200 res->flags = IORESOURCE_MEM | resource_flags;
198 insert_resource(&iomem_resource, res); 201 insert_resource(&iomem_resource, res);
199 names += PCI_MMCFG_RESOURCE_NAME_LEN; 202 names += PCI_MMCFG_RESOURCE_NAME_LEN;
200 } 203 }
204
205 /* Mark that the resources have been inserted. */
206 pci_mmcfg_resources_inserted = 1;
201} 207}
202 208
203static void __init pci_mmcfg_reject_broken(int type) 209static void __init pci_mmcfg_reject_broken(int type)
@@ -267,7 +273,43 @@ void __init pci_mmcfg_init(int type)
267 if (type == 1) 273 if (type == 1)
268 unreachable_devices(); 274 unreachable_devices();
269 if (known_bridge) 275 if (known_bridge)
270 pci_mmcfg_insert_resources(); 276 pci_mmcfg_insert_resources(IORESOURCE_BUSY);
271 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; 277 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
278 } else {
279 /*
280 * Signal not to attempt to insert mmcfg resources because
281 * the architecture mmcfg setup could not initialize.
282 */
283 pci_mmcfg_resources_inserted = 1;
272 } 284 }
273} 285}
286
287static int __init pci_mmcfg_late_insert_resources(void)
288{
289 /*
290 * If resources are already inserted or we are not using MMCONFIG,
291 * don't insert the resources.
292 */
293 if ((pci_mmcfg_resources_inserted == 1) ||
294 (pci_probe & PCI_PROBE_MMCONF) == 0 ||
295 (pci_mmcfg_config_num == 0) ||
296 (pci_mmcfg_config == NULL) ||
297 (pci_mmcfg_config[0].address == 0))
298 return 1;
299
300 /*
301 * Attempt to insert the mmcfg resources but not with the busy flag
302 * marked so it won't cause request errors when __request_region is
303 * called.
304 */
305 pci_mmcfg_insert_resources(0);
306
307 return 0;
308}
309
310/*
311 * Perform MMCONFIG resource insertion after PCI initialization to allow for
312 * misprogrammed MCFG tables that state larger sizes but actually conflict
313 * with other system resources.
314 */
315late_initcall(pci_mmcfg_late_insert_resources);
diff --git a/arch/i386/xen/time.c b/arch/i386/xen/time.c
index 51fdabf1fd4d..dfd6db69ead5 100644
--- a/arch/i386/xen/time.c
+++ b/arch/i386/xen/time.c
@@ -412,6 +412,7 @@ static void xen_timerop_set_mode(enum clock_event_mode mode,
412 break; 412 break;
413 413
414 case CLOCK_EVT_MODE_ONESHOT: 414 case CLOCK_EVT_MODE_ONESHOT:
415 case CLOCK_EVT_MODE_RESUME:
415 break; 416 break;
416 417
417 case CLOCK_EVT_MODE_UNUSED: 418 case CLOCK_EVT_MODE_UNUSED:
@@ -474,6 +475,8 @@ static void xen_vcpuop_set_mode(enum clock_event_mode mode,
474 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) 475 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
475 BUG(); 476 BUG();
476 break; 477 break;
478 case CLOCK_EVT_MODE_RESUME:
479 break;
477 } 480 }
478} 481}
479 482
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index e1189ba1ca5e..1cfab326fb7e 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -226,7 +226,7 @@ elf32_set_personality (void)
226} 226}
227 227
228static unsigned long 228static unsigned long
229elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused) 229elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
230{ 230{
231 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; 231 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
232 232
diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
index 1a6d64a68df5..a55c2735f759 100644
--- a/arch/powerpc/boot/ps3-head.S
+++ b/arch/powerpc/boot/ps3-head.S
@@ -20,6 +20,8 @@
20 20
21#include "ppc_asm.h" 21#include "ppc_asm.h"
22 22
23 .machine "ppc64"
24
23 .text 25 .text
24 26
25/* 27/*
diff --git a/arch/powerpc/boot/ps3-hvcall.S b/arch/powerpc/boot/ps3-hvcall.S
index c8b7df3210d1..585965f7e6a8 100644
--- a/arch/powerpc/boot/ps3-hvcall.S
+++ b/arch/powerpc/boot/ps3-hvcall.S
@@ -20,6 +20,8 @@
20 20
21#include "ppc_asm.h" 21#include "ppc_asm.h"
22 22
23 .machine "ppc64"
24
23/* 25/*
24 * The PS3 hypervisor uses a 64 bit "C" language calling convention. 26 * The PS3 hypervisor uses a 64 bit "C" language calling convention.
25 * The routines here marshal arguments between the 32 bit wrapper 27 * The routines here marshal arguments between the 32 bit wrapper
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 06c7e77e097a..eb4b512d65fa 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -26,6 +26,8 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/pagemap.h>
30
29#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
30#include <asm/tlb.h> 32#include <asm/tlb.h>
31 33
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 7de4e919687b..c2aaec5289dc 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -941,6 +941,13 @@ static const struct file_operations spufs_signal1_nosched_fops = {
941 .mmap = spufs_signal1_mmap, 941 .mmap = spufs_signal1_mmap,
942}; 942};
943 943
944static const struct file_operations spufs_signal1_nosched_fops = {
945 .open = spufs_signal1_open,
946 .release = spufs_signal1_release,
947 .write = spufs_signal1_write,
948 .mmap = spufs_signal1_mmap,
949};
950
944static int spufs_signal2_open(struct inode *inode, struct file *file) 951static int spufs_signal2_open(struct inode *inode, struct file *file)
945{ 952{
946 struct spufs_inode_info *i = SPUFS_I(inode); 953 struct spufs_inode_info *i = SPUFS_I(inode);
@@ -1076,6 +1083,13 @@ static const struct file_operations spufs_signal2_nosched_fops = {
1076 .mmap = spufs_signal2_mmap, 1083 .mmap = spufs_signal2_mmap,
1077}; 1084};
1078 1085
1086static const struct file_operations spufs_signal2_nosched_fops = {
1087 .open = spufs_signal2_open,
1088 .release = spufs_signal2_release,
1089 .write = spufs_signal2_write,
1090 .mmap = spufs_signal2_mmap,
1091};
1092
1079static void spufs_signal1_type_set(void *data, u64 val) 1093static void spufs_signal1_type_set(void *data, u64 val)
1080{ 1094{
1081 struct spu_context *ctx = data; 1095 struct spu_context *ctx = data;
@@ -2177,8 +2191,8 @@ struct tree_descr spufs_dir_contents[] = {
2177 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2191 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2178 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2192 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2179 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2193 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2180 { "signal1", &spufs_signal1_fops, 0666, }, 2194 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2181 { "signal2", &spufs_signal2_fops, 0666, }, 2195 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2182 { "signal1_type", &spufs_signal1_type, 0666, }, 2196 { "signal1_type", &spufs_signal1_type, 0666, },
2183 { "signal2_type", &spufs_signal2_type, 0666, }, 2197 { "signal2_type", &spufs_signal2_type, 0666, },
2184 { "cntl", &spufs_cntl_fops, 0666, }, 2198 { "cntl", &spufs_cntl_fops, 0666, },
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index a05079b07696..d4fc74f7bb15 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -102,4 +102,40 @@ config PS3_STORAGE
102 depends on PPC_PS3 102 depends on PPC_PS3
103 tristate 103 tristate
104 104
105config PS3_DISK
106 tristate "PS3 Disk Storage Driver"
107 depends on PPC_PS3 && BLOCK
108 select PS3_STORAGE
109 help
110 Include support for the PS3 Disk Storage.
111
112 This support is required to access the PS3 hard disk.
113 In general, all users will say Y or M.
114
115config PS3_ROM
116 tristate "PS3 BD/DVD/CD-ROM Storage Driver"
117 depends on PPC_PS3 && SCSI
118 select PS3_STORAGE
119 help
120 Include support for the PS3 ROM Storage.
121
122 This support is required to access the PS3 BD/DVD/CD-ROM drive.
123 In general, all users will say Y or M.
124 Also make sure to say Y or M to "SCSI CDROM support" later.
125
126config PS3_FLASH
127 tristate "PS3 FLASH ROM Storage Driver"
128 depends on PPC_PS3
129 select PS3_STORAGE
130 help
131 Include support for the PS3 FLASH ROM Storage.
132
133 This support is required to access the PS3 FLASH ROM, which
134 contains the boot loader and some boot options.
135 In general, all users will say Y or M.
136
137 As this driver needs a fixed buffer of 256 KiB of memory, it can
138 be disabled on the kernel command line using "ps3flash=off", to
139 not allocate this fixed buffer.
140
105endmenu 141endmenu
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 097ebd49f1bf..7aca37d79766 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -80,6 +80,7 @@ static void tmu_set_mode(enum clock_event_mode mode,
80 break; 80 break;
81 case CLOCK_EVT_MODE_UNUSED: 81 case CLOCK_EVT_MODE_UNUSED:
82 case CLOCK_EVT_MODE_SHUTDOWN: 82 case CLOCK_EVT_MODE_SHUTDOWN:
83 case CLOCK_EVT_MODE_RESUME:
83 break; 84 break;
84 } 85 }
85} 86}
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 831f540251f8..eac38388f5fd 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1749,8 +1749,8 @@ fpload:
1749__ndelay: 1749__ndelay:
1750 save %sp, -STACKFRAME_SZ, %sp 1750 save %sp, -STACKFRAME_SZ, %sp
1751 mov %i0, %o0 1751 mov %i0, %o0
1752 call .umul 1752 call .umul ! round multiplier up so large ns ok
1753 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ) 1753 mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
1754 call .umul 1754 call .umul
1755 mov %i1, %o1 ! udelay_val 1755 mov %i1, %o1 ! udelay_val
1756 ba delay_continue 1756 ba delay_continue
@@ -1760,11 +1760,17 @@ __ndelay:
1760__udelay: 1760__udelay:
1761 save %sp, -STACKFRAME_SZ, %sp 1761 save %sp, -STACKFRAME_SZ, %sp
1762 mov %i0, %o0 1762 mov %i0, %o0
1763 sethi %hi(0x10c6), %o1 1763 sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok
1764 call .umul 1764 call .umul
1765 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000 1765 or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
1766 call .umul 1766 call .umul
1767 mov %i1, %o1 ! udelay_val 1767 mov %i1, %o1 ! udelay_val
1768 sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32,
1769 or %g0, %lo(0x028f4b62), %l0
1770 addcc %o0, %l0, %o0 ! 2**32 * 0.009 999
1771 bcs,a 3f
1772 add %o1, 0x01, %o1
17733:
1768 call .umul 1774 call .umul
1769 mov HZ, %o0 ! >>32 earlier for wider range 1775 mov HZ, %o0 ! >>32 earlier for wider range
1770 1776
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index f257a67bcf93..75b2240ad0f9 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -47,6 +47,8 @@
47#include <asm/cacheflush.h> 47#include <asm/cacheflush.h>
48#include <asm/irq_regs.h> 48#include <asm/irq_regs.h>
49 49
50#include "irq.h"
51
50#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
51#define SMP_NOP2 "nop; nop;\n\t" 53#define SMP_NOP2 "nop; nop;\n\t"
52#define SMP_NOP3 "nop; nop; nop;\n\t" 54#define SMP_NOP3 "nop; nop; nop;\n\t"
@@ -268,7 +270,7 @@ void free_irq(unsigned int irq, void *dev_id)
268 kfree(action); 270 kfree(action);
269 271
270 if (!sparc_irq[cpu_irq].action) 272 if (!sparc_irq[cpu_irq].action)
271 disable_irq(irq); 273 __disable_irq(irq);
272 274
273out_unlock: 275out_unlock:
274 spin_unlock_irqrestore(&irq_action_lock, flags); 276 spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -464,7 +466,7 @@ int request_fast_irq(unsigned int irq,
464 466
465 sparc_irq[cpu_irq].action = action; 467 sparc_irq[cpu_irq].action = action;
466 468
467 enable_irq(irq); 469 __enable_irq(irq);
468 470
469 ret = 0; 471 ret = 0;
470out_unlock: 472out_unlock:
@@ -544,7 +546,7 @@ int request_irq(unsigned int irq,
544 546
545 *actionp = action; 547 *actionp = action;
546 548
547 enable_irq(irq); 549 __enable_irq(irq);
548 550
549 ret = 0; 551 ret = 0;
550out_unlock: 552out_unlock:
@@ -555,6 +557,25 @@ out:
555 557
556EXPORT_SYMBOL(request_irq); 558EXPORT_SYMBOL(request_irq);
557 559
560void disable_irq_nosync(unsigned int irq)
561{
562 return __disable_irq(irq);
563}
564EXPORT_SYMBOL(disable_irq_nosync);
565
566void disable_irq(unsigned int irq)
567{
568 return __disable_irq(irq);
569}
570EXPORT_SYMBOL(disable_irq);
571
572void enable_irq(unsigned int irq)
573{
574 return __enable_irq(irq);
575}
576
577EXPORT_SYMBOL(enable_irq);
578
558/* We really don't need these at all on the Sparc. We only have 579/* We really don't need these at all on the Sparc. We only have
559 * stubs here because they are exported to modules. 580 * stubs here because they are exported to modules.
560 */ 581 */
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
new file mode 100644
index 000000000000..32ef3ebd0a88
--- /dev/null
+++ b/arch/sparc/kernel/irq.h
@@ -0,0 +1,68 @@
1#include <asm/btfixup.h>
2
3/* Dave Redman (djhr@tadpole.co.uk)
4 * changed these to function pointers.. it saves cycles and will allow
5 * the irq dependencies to be split into different files at a later date
6 * sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
7 * Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Changed these to btfixup entities... It saves cycles :)
9 */
10
11BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
12BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
13BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
14BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
15BTFIXUPDEF_CALL(void, clear_clock_irq, void)
16BTFIXUPDEF_CALL(void, clear_profile_irq, int)
17BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
18
19static inline void __disable_irq(unsigned int irq)
20{
21 BTFIXUP_CALL(disable_irq)(irq);
22}
23
24static inline void __enable_irq(unsigned int irq)
25{
26 BTFIXUP_CALL(enable_irq)(irq);
27}
28
29static inline void disable_pil_irq(unsigned int irq)
30{
31 BTFIXUP_CALL(disable_pil_irq)(irq);
32}
33
34static inline void enable_pil_irq(unsigned int irq)
35{
36 BTFIXUP_CALL(enable_pil_irq)(irq);
37}
38
39static inline void clear_clock_irq(void)
40{
41 BTFIXUP_CALL(clear_clock_irq)();
42}
43
44static inline void clear_profile_irq(int irq)
45{
46 BTFIXUP_CALL(clear_profile_irq)(irq);
47}
48
49static inline void load_profile_irq(int cpu, int limit)
50{
51 BTFIXUP_CALL(load_profile_irq)(cpu, limit);
52}
53
54extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
55
56extern void claim_ticker14(irq_handler_t irq_handler,
57 int irq,
58 unsigned int timeout);
59
60#ifdef CONFIG_SMP
61BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
62BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
63BTFIXUPDEF_CALL(void, set_irq_udt, int)
64
65#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
66#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
67#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
68#endif
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 791771196905..f2eae457fc9a 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -36,6 +36,7 @@
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/irq_regs.h> 37#include <asm/irq_regs.h>
38 38
39#include "irq.h"
39 40
40/* 41/*
41 * I studied different documents and many live PROMs both from 2.30 42 * I studied different documents and many live PROMs both from 2.30
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 4fea3ac7bff0..6724ab90f82b 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -33,6 +33,8 @@
33#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
34#include <asm/cpudata.h> 34#include <asm/cpudata.h>
35 35
36#include "irq.h"
37
36int smp_num_cpus = 1; 38int smp_num_cpus = 1;
37volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; 39volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
38unsigned char boot_cpu_id = 0; 40unsigned char boot_cpu_id = 0;
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index d8e008a04e2b..55bac516dfe2 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -154,8 +154,6 @@ EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
154#else 154#else
155EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id)); 155EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
156#endif 156#endif
157EXPORT_SYMBOL(BTFIXUP_CALL(enable_irq));
158EXPORT_SYMBOL(BTFIXUP_CALL(disable_irq));
159EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea)); 157EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
160EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea)); 158EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
161EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl)); 159EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index 009e891a4329..c6ac9fc52563 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include "irq.h"
21 22
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/processor.h> 24#include <asm/processor.h>
@@ -40,6 +41,20 @@ static struct resource sun4c_timer_eb = { "sun4c_timer" };
40static struct resource sun4c_intr_eb = { "sun4c_intr" }; 41static struct resource sun4c_intr_eb = { "sun4c_intr" };
41#endif 42#endif
42 43
44/*
45 * Bit field defines for the interrupt registers on various
46 * Sparc machines.
47 */
48
49/* The sun4c interrupt register. */
50#define SUN4C_INT_ENABLE 0x01 /* Allow interrupts. */
51#define SUN4C_INT_E14 0x80 /* Enable level 14 IRQ. */
52#define SUN4C_INT_E10 0x20 /* Enable level 10 IRQ. */
53#define SUN4C_INT_E8 0x10 /* Enable level 8 IRQ. */
54#define SUN4C_INT_E6 0x08 /* Enable level 6 IRQ. */
55#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
56#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
57
43/* Pointer to the interrupt enable byte 58/* Pointer to the interrupt enable byte
44 * 59 *
45 * Dave Redman (djhr@tadpole.co.uk) 60 * Dave Redman (djhr@tadpole.co.uk)
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 396797e20c39..e0efab2a6bef 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -39,6 +39,8 @@
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/irq_regs.h> 40#include <asm/irq_regs.h>
41 41
42#include "irq.h"
43
42/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ 44/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
43/* #define DISTRIBUTE_IRQS */ 45/* #define DISTRIBUTE_IRQS */
44 46
@@ -188,7 +190,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
188 kfree(action); 190 kfree(action);
189 191
190 if (!(*actionp)) 192 if (!(*actionp))
191 disable_irq(irq); 193 __disable_irq(irq);
192 194
193out_unlock: 195out_unlock:
194 spin_unlock_irqrestore(&irq_action_lock, flags); 196 spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -346,7 +348,7 @@ int sun4d_request_irq(unsigned int irq,
346 else 348 else
347 *actionp = action; 349 *actionp = action;
348 350
349 enable_irq(irq); 351 __enable_irq(irq);
350 352
351 ret = 0; 353 ret = 0;
352out_unlock: 354out_unlock:
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 098c94f1a322..89a6de95070c 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -36,6 +36,7 @@
36#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/cpudata.h> 37#include <asm/cpudata.h>
38 38
39#include "irq.h"
39#define IRQ_CROSS_CALL 15 40#define IRQ_CROSS_CALL 15
40 41
41extern ctxd_t *srmmu_ctx_table_phys; 42extern ctxd_t *srmmu_ctx_table_phys;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 91a803ea88be..b92d6d2d5b04 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -38,11 +38,85 @@
38#include <asm/sbus.h> 38#include <asm/sbus.h>
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40 40
41#include "irq.h"
42
43/* On the sun4m, just like the timers, we have both per-cpu and master
44 * interrupt registers.
45 */
46
47/* These registers are used for sending/receiving irqs from/to
48 * different cpu's.
49 */
50struct sun4m_intreg_percpu {
51 unsigned int tbt; /* Interrupts still pending for this cpu. */
52
53 /* These next two registers are WRITE-ONLY and are only
54 * "on bit" sensitive, "off bits" written have NO affect.
55 */
56 unsigned int clear; /* Clear this cpus irqs here. */
57 unsigned int set; /* Set this cpus irqs here. */
58 unsigned char space[PAGE_SIZE - 12];
59};
60
61/*
62 * djhr
63 * Actually the clear and set fields in this struct are misleading..
64 * according to the SLAVIO manual (and the same applies for the SEC)
65 * the clear field clears bits in the mask which will ENABLE that IRQ
66 * the set field sets bits in the mask to DISABLE the IRQ.
67 *
68 * Also the undirected_xx address in the SLAVIO is defined as
69 * RESERVED and write only..
70 *
71 * DAVEM_NOTE: The SLAVIO only specifies behavior on uniprocessor
72 * sun4m machines, for MP the layout makes more sense.
73 */
74struct sun4m_intregs {
75 struct sun4m_intreg_percpu cpu_intregs[SUN4M_NCPUS];
76 unsigned int tbt; /* IRQ's that are still pending. */
77 unsigned int irqs; /* Master IRQ bits. */
78
79 /* Again, like the above, two these registers are WRITE-ONLY. */
80 unsigned int clear; /* Clear master IRQ's by setting bits here. */
81 unsigned int set; /* Set master IRQ's by setting bits here. */
82
83 /* This register is both READ and WRITE. */
84 unsigned int undirected_target; /* Which cpu gets undirected irqs. */
85};
86
41static unsigned long dummy; 87static unsigned long dummy;
42 88
43struct sun4m_intregs *sun4m_interrupts; 89struct sun4m_intregs *sun4m_interrupts;
44unsigned long *irq_rcvreg = &dummy; 90unsigned long *irq_rcvreg = &dummy;
45 91
92/* Dave Redman (djhr@tadpole.co.uk)
93 * The sun4m interrupt registers.
94 */
95#define SUN4M_INT_ENABLE 0x80000000
96#define SUN4M_INT_E14 0x00000080
97#define SUN4M_INT_E10 0x00080000
98
99#define SUN4M_HARD_INT(x) (0x000000001 << (x))
100#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
101
102#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
103#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
104#define SUN4M_INT_M2S_WRITE 0x20000000 /* write buffer error */
105#define SUN4M_INT_ECC 0x10000000 /* ecc memory error */
106#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
107#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
108#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
109#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
110#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
111#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
112#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
113#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
114#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
115#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
116
117#define SUN4M_INT_SBUS(x) (1 << (x+7))
118#define SUN4M_INT_VME(x) (1 << (x))
119
46/* These tables only apply for interrupts greater than 15.. 120/* These tables only apply for interrupts greater than 15..
47 * 121 *
48 * any intr value below 0x10 is considered to be a soft-int 122 * any intr value below 0x10 is considered to be a soft-int
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 63ed19bfd028..730eb5796f8e 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -31,6 +31,8 @@
31#include <asm/oplib.h> 31#include <asm/oplib.h>
32#include <asm/cpudata.h> 32#include <asm/cpudata.h>
33 33
34#include "irq.h"
35
34#define IRQ_RESCHEDULE 13 36#define IRQ_RESCHEDULE 13
35#define IRQ_STOP_CPU 14 37#define IRQ_STOP_CPU 14
36#define IRQ_CROSS_CALL 15 38#define IRQ_CROSS_CALL 15
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
index f1a7bd19e04f..707bfda86570 100644
--- a/arch/sparc/kernel/tick14.c
+++ b/arch/sparc/kernel/tick14.c
@@ -25,6 +25,8 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/io.h> 26#include <asm/io.h>
27 27
28#include "irq.h"
29
28extern unsigned long lvl14_save[5]; 30extern unsigned long lvl14_save[5];
29static unsigned long *linux_lvl14 = NULL; 31static unsigned long *linux_lvl14 = NULL;
30static unsigned long obp_lvl14[4]; 32static unsigned long obp_lvl14[4];
@@ -62,7 +64,7 @@ void claim_ticker14(irq_handler_t handler,
62 64
63 /* first we copy the obp handler instructions 65 /* first we copy the obp handler instructions
64 */ 66 */
65 disable_irq(irq_nr); 67 __disable_irq(irq_nr);
66 if (!handler) 68 if (!handler)
67 return; 69 return;
68 70
@@ -79,6 +81,6 @@ void claim_ticker14(irq_handler_t handler,
79 NULL)) { 81 NULL)) {
80 install_linux_ticker(); 82 install_linux_ticker();
81 load_profile_irq(cpu, timeout); 83 load_profile_irq(cpu, timeout);
82 enable_irq(irq_nr); 84 __enable_irq(irq_nr);
83 } 85 }
84} 86}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index f2fdbb3664d3..6a2513321620 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -44,6 +44,8 @@
44#include <asm/of_device.h> 44#include <asm/of_device.h>
45#include <asm/irq_regs.h> 45#include <asm/irq_regs.h>
46 46
47#include "irq.h"
48
47DEFINE_SPINLOCK(rtc_lock); 49DEFINE_SPINLOCK(rtc_lock);
48enum sparc_clock_type sp_clock_typ; 50enum sparc_clock_type sp_clock_typ;
49DEFINE_SPINLOCK(mostek_lock); 51DEFINE_SPINLOCK(mostek_lock);
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index a532922e2e35..a1bef07755a9 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -308,6 +308,9 @@ extern void sun4c_paging_init(void);
308extern void srmmu_paging_init(void); 308extern void srmmu_paging_init(void);
309extern void device_scan(void); 309extern void device_scan(void);
310 310
311pgprot_t PAGE_SHARED __read_mostly;
312EXPORT_SYMBOL(PAGE_SHARED);
313
311void __init paging_init(void) 314void __init paging_init(void)
312{ 315{
313 switch(sparc_cpu_model) { 316 switch(sparc_cpu_model) {
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index ca26232da7ab..17b485f2825c 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -2154,7 +2154,7 @@ void __init ld_mmu_srmmu(void)
2154 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); 2154 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
2155 2155
2156 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); 2156 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
2157 BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); 2157 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
2158 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); 2158 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2159 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); 2159 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2160 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); 2160 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index bdd835fba02e..a57a366e339a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -2155,7 +2155,7 @@ void __init ld_mmu_sun4c(void)
2155 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE); 2155 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
2156 2156
2157 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE)); 2157 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
2158 BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED)); 2158 PAGE_SHARED = pgprot_val(SUN4C_PAGE_SHARED);
2159 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY)); 2159 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
2160 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY)); 2160 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
2161 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL)); 2161 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index f1cc55677ff2..33dabf588bdd 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -23,6 +23,10 @@ config GENERIC_TIME
23 bool 23 bool
24 default y 24 default y
25 25
26config GENERIC_CMOS_UPDATE
27 bool
28 default y
29
26config GENERIC_CLOCKEVENTS 30config GENERIC_CLOCKEVENTS
27 bool 31 bool
28 default y 32 default y
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 7d36531aa5b9..d270c2f0be0f 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -280,6 +280,7 @@ EXPORT_SYMBOL(sys_getgid);
280EXPORT_SYMBOL(svr4_getcontext); 280EXPORT_SYMBOL(svr4_getcontext);
281EXPORT_SYMBOL(svr4_setcontext); 281EXPORT_SYMBOL(svr4_setcontext);
282EXPORT_SYMBOL(compat_sys_ioctl); 282EXPORT_SYMBOL(compat_sys_ioctl);
283EXPORT_SYMBOL(sys_ioctl);
283EXPORT_SYMBOL(sparc32_open); 284EXPORT_SYMBOL(sparc32_open);
284#endif 285#endif
285 286
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index e340eb401fb9..49063ca2efcd 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -403,58 +403,9 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
403 403
404static unsigned long timer_ticks_per_nsec_quotient __read_mostly; 404static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
405 405
406#define TICK_SIZE (tick_nsec / 1000) 406int update_persistent_clock(struct timespec now)
407
408#define USEC_AFTER 500000
409#define USEC_BEFORE 500000
410
411static void sync_cmos_clock(unsigned long dummy);
412
413static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
414
415static void sync_cmos_clock(unsigned long dummy)
416{
417 struct timeval now, next;
418 int fail = 1;
419
420 /*
421 * If we have an externally synchronized Linux clock, then update
422 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
423 * called as close as possible to 500 ms before the new second starts.
424 * This code is run on a timer. If the clock is set, that timer
425 * may not expire at the correct time. Thus, we adjust...
426 */
427 if (!ntp_synced())
428 /*
429 * Not synced, exit, do not restart a timer (if one is
430 * running, let it run out).
431 */
432 return;
433
434 do_gettimeofday(&now);
435 if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
436 now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
437 fail = set_rtc_mmss(now.tv_sec);
438
439 next.tv_usec = USEC_AFTER - now.tv_usec;
440 if (next.tv_usec <= 0)
441 next.tv_usec += USEC_PER_SEC;
442
443 if (!fail)
444 next.tv_sec = 659;
445 else
446 next.tv_sec = 0;
447
448 if (next.tv_usec >= USEC_PER_SEC) {
449 next.tv_sec++;
450 next.tv_usec -= USEC_PER_SEC;
451 }
452 mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
453}
454
455void notify_arch_cmos_timer(void)
456{ 407{
457 mod_timer(&sync_cmos_timer, jiffies + 1); 408 return set_rtc_mmss(now.tv_sec);
458} 409}
459 410
460/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 411/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
@@ -931,6 +882,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
931{ 882{
932 switch (mode) { 883 switch (mode) {
933 case CLOCK_EVT_MODE_ONESHOT: 884 case CLOCK_EVT_MODE_ONESHOT:
885 case CLOCK_EVT_MODE_RESUME:
934 break; 886 break;
935 887
936 case CLOCK_EVT_MODE_SHUTDOWN: 888 case CLOCK_EVT_MODE_SHUTDOWN:
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 14bf8ce3ea23..45f82ae6d389 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -32,6 +32,10 @@ config GENERIC_TIME_VSYSCALL
32 bool 32 bool
33 default y 33 default y
34 34
35config GENERIC_CMOS_UPDATE
36 bool
37 default y
38
35config ZONE_DMA32 39config ZONE_DMA32
36 bool 40 bool
37 default y 41 default y
@@ -56,6 +60,14 @@ config ZONE_DMA
56 bool 60 bool
57 default y 61 default y
58 62
63config QUICKLIST
64 bool
65 default y
66
67config NR_QUICK
68 int
69 default 2
70
59config ISA 71config ISA
60 bool 72 bool
61 73
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 29617ae3926d..128561d3e876 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -76,7 +76,8 @@ head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kern
76libs-y += arch/x86_64/lib/ 76libs-y += arch/x86_64/lib/
77core-y += arch/x86_64/kernel/ \ 77core-y += arch/x86_64/kernel/ \
78 arch/x86_64/mm/ \ 78 arch/x86_64/mm/ \
79 arch/x86_64/crypto/ 79 arch/x86_64/crypto/ \
80 arch/x86_64/vdso/
80core-$(CONFIG_IA32_EMULATION) += arch/x86_64/ia32/ 81core-$(CONFIG_IA32_EMULATION) += arch/x86_64/ia32/
81drivers-$(CONFIG_PCI) += arch/x86_64/pci/ 82drivers-$(CONFIG_PCI) += arch/x86_64/pci/
82drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/ 83drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 40178e5c3104..b7c4cd04bfc3 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,19 +1,22 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22-rc2 3# Linux kernel version: 2.6.22-git14
4# Mon May 21 13:23:40 2007 4# Fri Jul 20 09:53:15 2007
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
8CONFIG_X86=y 8CONFIG_X86=y
9CONFIG_GENERIC_TIME=y 9CONFIG_GENERIC_TIME=y
10CONFIG_GENERIC_TIME_VSYSCALL=y 10CONFIG_GENERIC_TIME_VSYSCALL=y
11CONFIG_GENERIC_CMOS_UPDATE=y
11CONFIG_ZONE_DMA32=y 12CONFIG_ZONE_DMA32=y
12CONFIG_LOCKDEP_SUPPORT=y 13CONFIG_LOCKDEP_SUPPORT=y
13CONFIG_STACKTRACE_SUPPORT=y 14CONFIG_STACKTRACE_SUPPORT=y
14CONFIG_SEMAPHORE_SLEEPERS=y 15CONFIG_SEMAPHORE_SLEEPERS=y
15CONFIG_MMU=y 16CONFIG_MMU=y
16CONFIG_ZONE_DMA=y 17CONFIG_ZONE_DMA=y
18CONFIG_QUICKLIST=y
19CONFIG_NR_QUICK=2
17CONFIG_RWSEM_GENERIC_SPINLOCK=y 20CONFIG_RWSEM_GENERIC_SPINLOCK=y
18CONFIG_GENERIC_HWEIGHT=y 21CONFIG_GENERIC_HWEIGHT=y
19CONFIG_GENERIC_CALIBRATE_DELAY=y 22CONFIG_GENERIC_CALIBRATE_DELAY=y
@@ -44,19 +47,18 @@ CONFIG_LOCALVERSION=""
44CONFIG_LOCALVERSION_AUTO=y 47CONFIG_LOCALVERSION_AUTO=y
45CONFIG_SWAP=y 48CONFIG_SWAP=y
46CONFIG_SYSVIPC=y 49CONFIG_SYSVIPC=y
47# CONFIG_IPC_NS is not set
48CONFIG_SYSVIPC_SYSCTL=y 50CONFIG_SYSVIPC_SYSCTL=y
49CONFIG_POSIX_MQUEUE=y 51CONFIG_POSIX_MQUEUE=y
50# CONFIG_BSD_PROCESS_ACCT is not set 52# CONFIG_BSD_PROCESS_ACCT is not set
51# CONFIG_TASKSTATS is not set 53# CONFIG_TASKSTATS is not set
52# CONFIG_UTS_NS is not set 54# CONFIG_USER_NS is not set
53# CONFIG_AUDIT is not set 55# CONFIG_AUDIT is not set
54CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
55CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
56CONFIG_LOG_BUF_SHIFT=18 58CONFIG_LOG_BUF_SHIFT=18
57# CONFIG_CPUSETS is not set 59# CONFIG_CPUSETS is not set
58CONFIG_SYSFS_DEPRECATED=y 60CONFIG_SYSFS_DEPRECATED=y
59# CONFIG_RELAY is not set 61CONFIG_RELAY=y
60CONFIG_BLK_DEV_INITRD=y 62CONFIG_BLK_DEV_INITRD=y
61CONFIG_INITRAMFS_SOURCE="" 63CONFIG_INITRAMFS_SOURCE=""
62CONFIG_CC_OPTIMIZE_FOR_SIZE=y 64CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -86,10 +88,6 @@ CONFIG_SLAB=y
86CONFIG_RT_MUTEXES=y 88CONFIG_RT_MUTEXES=y
87# CONFIG_TINY_SHMEM is not set 89# CONFIG_TINY_SHMEM is not set
88CONFIG_BASE_SMALL=0 90CONFIG_BASE_SMALL=0
89
90#
91# Loadable module support
92#
93CONFIG_MODULES=y 91CONFIG_MODULES=y
94CONFIG_MODULE_UNLOAD=y 92CONFIG_MODULE_UNLOAD=y
95CONFIG_MODULE_FORCE_UNLOAD=y 93CONFIG_MODULE_FORCE_UNLOAD=y
@@ -97,12 +95,9 @@ CONFIG_MODULE_FORCE_UNLOAD=y
97# CONFIG_MODULE_SRCVERSION_ALL is not set 95# CONFIG_MODULE_SRCVERSION_ALL is not set
98# CONFIG_KMOD is not set 96# CONFIG_KMOD is not set
99CONFIG_STOP_MACHINE=y 97CONFIG_STOP_MACHINE=y
100
101#
102# Block layer
103#
104CONFIG_BLOCK=y 98CONFIG_BLOCK=y
105# CONFIG_BLK_DEV_IO_TRACE is not set 99# CONFIG_BLK_DEV_IO_TRACE is not set
100# CONFIG_BLK_DEV_BSG is not set
106 101
107# 102#
108# IO Schedulers 103# IO Schedulers
@@ -165,9 +160,12 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
165CONFIG_MIGRATION=y 160CONFIG_MIGRATION=y
166CONFIG_RESOURCES_64BIT=y 161CONFIG_RESOURCES_64BIT=y
167CONFIG_ZONE_DMA_FLAG=1 162CONFIG_ZONE_DMA_FLAG=1
163CONFIG_BOUNCE=y
164CONFIG_VIRT_TO_BUS=y
168CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y 165CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
169CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y 166CONFIG_OUT_OF_LINE_PFN_TO_PAGE=y
170CONFIG_NR_CPUS=32 167CONFIG_NR_CPUS=32
168CONFIG_PHYSICAL_ALIGN=0x200000
171CONFIG_HOTPLUG_CPU=y 169CONFIG_HOTPLUG_CPU=y
172CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 170CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
173CONFIG_HPET_TIMER=y 171CONFIG_HPET_TIMER=y
@@ -180,7 +178,7 @@ CONFIG_X86_MCE_INTEL=y
180CONFIG_X86_MCE_AMD=y 178CONFIG_X86_MCE_AMD=y
181# CONFIG_KEXEC is not set 179# CONFIG_KEXEC is not set
182# CONFIG_CRASH_DUMP is not set 180# CONFIG_CRASH_DUMP is not set
183CONFIG_RELOCATABLE=y 181# CONFIG_RELOCATABLE is not set
184CONFIG_PHYSICAL_START=0x200000 182CONFIG_PHYSICAL_START=0x200000
185CONFIG_SECCOMP=y 183CONFIG_SECCOMP=y
186# CONFIG_CC_STACKPROTECTOR is not set 184# CONFIG_CC_STACKPROTECTOR is not set
@@ -201,7 +199,6 @@ CONFIG_GENERIC_PENDING_IRQ=y
201CONFIG_PM=y 199CONFIG_PM=y
202# CONFIG_PM_LEGACY is not set 200# CONFIG_PM_LEGACY is not set
203# CONFIG_PM_DEBUG is not set 201# CONFIG_PM_DEBUG is not set
204# CONFIG_PM_SYSFS_DEPRECATED is not set
205CONFIG_SOFTWARE_SUSPEND=y 202CONFIG_SOFTWARE_SUSPEND=y
206CONFIG_PM_STD_PARTITION="" 203CONFIG_PM_STD_PARTITION=""
207CONFIG_SUSPEND_SMP=y 204CONFIG_SUSPEND_SMP=y
@@ -248,7 +245,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
248# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set 245# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
249CONFIG_CPU_FREQ_GOV_USERSPACE=y 246CONFIG_CPU_FREQ_GOV_USERSPACE=y
250CONFIG_CPU_FREQ_GOV_ONDEMAND=y 247CONFIG_CPU_FREQ_GOV_ONDEMAND=y
251# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set 248CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
252 249
253# 250#
254# CPUFreq processor drivers 251# CPUFreq processor drivers
@@ -351,20 +348,8 @@ CONFIG_IPV6_SIT=y
351# CONFIG_IPV6_MULTIPLE_TABLES is not set 348# CONFIG_IPV6_MULTIPLE_TABLES is not set
352# CONFIG_NETWORK_SECMARK is not set 349# CONFIG_NETWORK_SECMARK is not set
353# CONFIG_NETFILTER is not set 350# CONFIG_NETFILTER is not set
354
355#
356# DCCP Configuration (EXPERIMENTAL)
357#
358# CONFIG_IP_DCCP is not set 351# CONFIG_IP_DCCP is not set
359
360#
361# SCTP Configuration (EXPERIMENTAL)
362#
363# CONFIG_IP_SCTP is not set 352# CONFIG_IP_SCTP is not set
364
365#
366# TIPC Configuration (EXPERIMENTAL)
367#
368# CONFIG_TIPC is not set 353# CONFIG_TIPC is not set
369# CONFIG_ATM is not set 354# CONFIG_ATM is not set
370# CONFIG_BRIDGE is not set 355# CONFIG_BRIDGE is not set
@@ -401,6 +386,7 @@ CONFIG_IPV6_SIT=y
401# CONFIG_MAC80211 is not set 386# CONFIG_MAC80211 is not set
402# CONFIG_IEEE80211 is not set 387# CONFIG_IEEE80211 is not set
403# CONFIG_RFKILL is not set 388# CONFIG_RFKILL is not set
389# CONFIG_NET_9P is not set
404 390
405# 391#
406# Device Drivers 392# Device Drivers
@@ -415,21 +401,9 @@ CONFIG_FW_LOADER=y
415# CONFIG_DEBUG_DRIVER is not set 401# CONFIG_DEBUG_DRIVER is not set
416# CONFIG_DEBUG_DEVRES is not set 402# CONFIG_DEBUG_DEVRES is not set
417# CONFIG_SYS_HYPERVISOR is not set 403# CONFIG_SYS_HYPERVISOR is not set
418
419#
420# Connector - unified userspace <-> kernelspace linker
421#
422# CONFIG_CONNECTOR is not set 404# CONFIG_CONNECTOR is not set
423# CONFIG_MTD is not set 405# CONFIG_MTD is not set
424
425#
426# Parallel port support
427#
428# CONFIG_PARPORT is not set 406# CONFIG_PARPORT is not set
429
430#
431# Plug and Play support
432#
433CONFIG_PNP=y 407CONFIG_PNP=y
434# CONFIG_PNP_DEBUG is not set 408# CONFIG_PNP_DEBUG is not set
435 409
@@ -437,10 +411,7 @@ CONFIG_PNP=y
437# Protocols 411# Protocols
438# 412#
439CONFIG_PNPACPI=y 413CONFIG_PNPACPI=y
440 414CONFIG_BLK_DEV=y
441#
442# Block devices
443#
444CONFIG_BLK_DEV_FD=y 415CONFIG_BLK_DEV_FD=y
445# CONFIG_BLK_CPQ_DA is not set 416# CONFIG_BLK_CPQ_DA is not set
446# CONFIG_BLK_CPQ_CISS_DA is not set 417# CONFIG_BLK_CPQ_CISS_DA is not set
@@ -458,17 +429,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
458CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 429CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
459# CONFIG_CDROM_PKTCDVD is not set 430# CONFIG_CDROM_PKTCDVD is not set
460# CONFIG_ATA_OVER_ETH is not set 431# CONFIG_ATA_OVER_ETH is not set
461 432CONFIG_MISC_DEVICES=y
462#
463# Misc devices
464#
465# CONFIG_IBM_ASM is not set 433# CONFIG_IBM_ASM is not set
466# CONFIG_PHANTOM is not set 434# CONFIG_PHANTOM is not set
435# CONFIG_EEPROM_93CX6 is not set
467# CONFIG_SGI_IOC4 is not set 436# CONFIG_SGI_IOC4 is not set
468# CONFIG_TIFM_CORE is not set 437# CONFIG_TIFM_CORE is not set
469# CONFIG_SONY_LAPTOP is not set 438# CONFIG_SONY_LAPTOP is not set
470# CONFIG_THINKPAD_ACPI is not set 439# CONFIG_THINKPAD_ACPI is not set
471# CONFIG_BLINK is not set
472CONFIG_IDE=y 440CONFIG_IDE=y
473CONFIG_BLK_DEV_IDE=y 441CONFIG_BLK_DEV_IDE=y
474 442
@@ -539,6 +507,7 @@ CONFIG_BLK_DEV_IDEDMA=y
539# 507#
540# CONFIG_RAID_ATTRS is not set 508# CONFIG_RAID_ATTRS is not set
541CONFIG_SCSI=y 509CONFIG_SCSI=y
510CONFIG_SCSI_DMA=y
542# CONFIG_SCSI_TGT is not set 511# CONFIG_SCSI_TGT is not set
543CONFIG_SCSI_NETLINK=y 512CONFIG_SCSI_NETLINK=y
544# CONFIG_SCSI_PROC_FS is not set 513# CONFIG_SCSI_PROC_FS is not set
@@ -590,11 +559,9 @@ CONFIG_AIC79XX_DEBUG_MASK=0
590# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set 559# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
591# CONFIG_SCSI_AIC94XX is not set 560# CONFIG_SCSI_AIC94XX is not set
592# CONFIG_SCSI_ARCMSR is not set 561# CONFIG_SCSI_ARCMSR is not set
593CONFIG_MEGARAID_NEWGEN=y 562# CONFIG_MEGARAID_NEWGEN is not set
594CONFIG_MEGARAID_MM=y
595CONFIG_MEGARAID_MAILBOX=y
596# CONFIG_MEGARAID_LEGACY is not set 563# CONFIG_MEGARAID_LEGACY is not set
597CONFIG_MEGARAID_SAS=y 564# CONFIG_MEGARAID_SAS is not set
598# CONFIG_SCSI_HPTIOP is not set 565# CONFIG_SCSI_HPTIOP is not set
599# CONFIG_SCSI_BUSLOGIC is not set 566# CONFIG_SCSI_BUSLOGIC is not set
600# CONFIG_SCSI_DMX3191D is not set 567# CONFIG_SCSI_DMX3191D is not set
@@ -614,7 +581,6 @@ CONFIG_MEGARAID_SAS=y
614# CONFIG_SCSI_DC395x is not set 581# CONFIG_SCSI_DC395x is not set
615# CONFIG_SCSI_DC390T is not set 582# CONFIG_SCSI_DC390T is not set
616# CONFIG_SCSI_DEBUG is not set 583# CONFIG_SCSI_DEBUG is not set
617# CONFIG_SCSI_ESP_CORE is not set
618# CONFIG_SCSI_SRP is not set 584# CONFIG_SCSI_SRP is not set
619CONFIG_ATA=y 585CONFIG_ATA=y
620# CONFIG_ATA_NONSTANDARD is not set 586# CONFIG_ATA_NONSTANDARD is not set
@@ -671,10 +637,6 @@ CONFIG_SATA_VIA=y
671# CONFIG_PATA_SIS is not set 637# CONFIG_PATA_SIS is not set
672# CONFIG_PATA_VIA is not set 638# CONFIG_PATA_VIA is not set
673# CONFIG_PATA_WINBOND is not set 639# CONFIG_PATA_WINBOND is not set
674
675#
676# Multi-device support (RAID and LVM)
677#
678CONFIG_MD=y 640CONFIG_MD=y
679# CONFIG_BLK_DEV_MD is not set 641# CONFIG_BLK_DEV_MD is not set
680CONFIG_BLK_DEV_DM=y 642CONFIG_BLK_DEV_DM=y
@@ -692,7 +654,7 @@ CONFIG_BLK_DEV_DM=y
692CONFIG_FUSION=y 654CONFIG_FUSION=y
693CONFIG_FUSION_SPI=y 655CONFIG_FUSION_SPI=y
694# CONFIG_FUSION_FC is not set 656# CONFIG_FUSION_FC is not set
695CONFIG_FUSION_SAS=y 657# CONFIG_FUSION_SAS is not set
696CONFIG_FUSION_MAX_SGE=128 658CONFIG_FUSION_MAX_SGE=128
697# CONFIG_FUSION_CTL is not set 659# CONFIG_FUSION_CTL is not set
698 660
@@ -710,7 +672,10 @@ CONFIG_IEEE1394=y
710# 672#
711# Controllers 673# Controllers
712# 674#
713# CONFIG_IEEE1394_PCILYNX is not set 675
676#
677# Texas Instruments PCILynx requires I2C
678#
714CONFIG_IEEE1394_OHCI1394=y 679CONFIG_IEEE1394_OHCI1394=y
715 680
716# 681#
@@ -722,32 +687,19 @@ CONFIG_IEEE1394_OHCI1394=y
722# CONFIG_IEEE1394_ETH1394 is not set 687# CONFIG_IEEE1394_ETH1394 is not set
723# CONFIG_IEEE1394_DV1394 is not set 688# CONFIG_IEEE1394_DV1394 is not set
724CONFIG_IEEE1394_RAWIO=y 689CONFIG_IEEE1394_RAWIO=y
725
726#
727# I2O device support
728#
729# CONFIG_I2O is not set 690# CONFIG_I2O is not set
730# CONFIG_MACINTOSH_DRIVERS is not set 691CONFIG_MACINTOSH_DRIVERS=y
731 692# CONFIG_MAC_EMUMOUSEBTN is not set
732#
733# Network device support
734#
735CONFIG_NETDEVICES=y 693CONFIG_NETDEVICES=y
694CONFIG_NETDEVICES_MULTIQUEUE=y
736# CONFIG_DUMMY is not set 695# CONFIG_DUMMY is not set
737# CONFIG_BONDING is not set 696# CONFIG_BONDING is not set
697# CONFIG_MACVLAN is not set
738# CONFIG_EQUALIZER is not set 698# CONFIG_EQUALIZER is not set
739CONFIG_TUN=y 699CONFIG_TUN=y
740# CONFIG_NET_SB1000 is not set 700# CONFIG_NET_SB1000 is not set
741
742#
743# ARCnet devices
744#
745# CONFIG_ARCNET is not set 701# CONFIG_ARCNET is not set
746# CONFIG_PHYLIB is not set 702# CONFIG_PHYLIB is not set
747
748#
749# Ethernet (10 or 100Mbit)
750#
751CONFIG_NET_ETHERNET=y 703CONFIG_NET_ETHERNET=y
752CONFIG_MII=y 704CONFIG_MII=y
753# CONFIG_HAPPYMEAL is not set 705# CONFIG_HAPPYMEAL is not set
@@ -756,10 +708,6 @@ CONFIG_MII=y
756CONFIG_NET_VENDOR_3COM=y 708CONFIG_NET_VENDOR_3COM=y
757CONFIG_VORTEX=y 709CONFIG_VORTEX=y
758# CONFIG_TYPHOON is not set 710# CONFIG_TYPHOON is not set
759
760#
761# Tulip family network device support
762#
763CONFIG_NET_TULIP=y 711CONFIG_NET_TULIP=y
764# CONFIG_DE2104X is not set 712# CONFIG_DE2104X is not set
765CONFIG_TULIP=y 713CONFIG_TULIP=y
@@ -773,7 +721,8 @@ CONFIG_TULIP=y
773# CONFIG_HP100 is not set 721# CONFIG_HP100 is not set
774CONFIG_NET_PCI=y 722CONFIG_NET_PCI=y
775# CONFIG_PCNET32 is not set 723# CONFIG_PCNET32 is not set
776# CONFIG_AMD8111_ETH is not set 724CONFIG_AMD8111_ETH=y
725# CONFIG_AMD8111E_NAPI is not set
777# CONFIG_ADAPTEC_STARFIRE is not set 726# CONFIG_ADAPTEC_STARFIRE is not set
778CONFIG_B44=y 727CONFIG_B44=y
779CONFIG_FORCEDETH=y 728CONFIG_FORCEDETH=y
@@ -808,7 +757,6 @@ CONFIG_E1000=y
808# CONFIG_SIS190 is not set 757# CONFIG_SIS190 is not set
809# CONFIG_SKGE is not set 758# CONFIG_SKGE is not set
810# CONFIG_SKY2 is not set 759# CONFIG_SKY2 is not set
811# CONFIG_SK98LIN is not set
812# CONFIG_VIA_VELOCITY is not set 760# CONFIG_VIA_VELOCITY is not set
813CONFIG_TIGON3=y 761CONFIG_TIGON3=y
814CONFIG_BNX2=y 762CONFIG_BNX2=y
@@ -823,10 +771,6 @@ CONFIG_S2IO=m
823# CONFIG_MYRI10GE is not set 771# CONFIG_MYRI10GE is not set
824# CONFIG_NETXEN_NIC is not set 772# CONFIG_NETXEN_NIC is not set
825# CONFIG_MLX4_CORE is not set 773# CONFIG_MLX4_CORE is not set
826
827#
828# Token Ring devices
829#
830# CONFIG_TR is not set 774# CONFIG_TR is not set
831 775
832# 776#
@@ -855,15 +799,7 @@ CONFIG_NETCONSOLE=y
855CONFIG_NETPOLL=y 799CONFIG_NETPOLL=y
856# CONFIG_NETPOLL_TRAP is not set 800# CONFIG_NETPOLL_TRAP is not set
857CONFIG_NET_POLL_CONTROLLER=y 801CONFIG_NET_POLL_CONTROLLER=y
858
859#
860# ISDN subsystem
861#
862# CONFIG_ISDN is not set 802# CONFIG_ISDN is not set
863
864#
865# Telephony Support
866#
867# CONFIG_PHONE is not set 803# CONFIG_PHONE is not set
868 804
869# 805#
@@ -871,6 +807,7 @@ CONFIG_NET_POLL_CONTROLLER=y
871# 807#
872CONFIG_INPUT=y 808CONFIG_INPUT=y
873# CONFIG_INPUT_FF_MEMLESS is not set 809# CONFIG_INPUT_FF_MEMLESS is not set
810# CONFIG_INPUT_POLLDEV is not set
874 811
875# 812#
876# Userland interfaces 813# Userland interfaces
@@ -936,6 +873,7 @@ CONFIG_HW_CONSOLE=y
936# 873#
937CONFIG_SERIAL_8250=y 874CONFIG_SERIAL_8250=y
938CONFIG_SERIAL_8250_CONSOLE=y 875CONFIG_SERIAL_8250_CONSOLE=y
876CONFIG_FIX_EARLYCON_MEM=y
939CONFIG_SERIAL_8250_PCI=y 877CONFIG_SERIAL_8250_PCI=y
940CONFIG_SERIAL_8250_PNP=y 878CONFIG_SERIAL_8250_PNP=y
941CONFIG_SERIAL_8250_NR_UARTS=4 879CONFIG_SERIAL_8250_NR_UARTS=4
@@ -951,16 +889,11 @@ CONFIG_SERIAL_CORE_CONSOLE=y
951CONFIG_UNIX98_PTYS=y 889CONFIG_UNIX98_PTYS=y
952CONFIG_LEGACY_PTYS=y 890CONFIG_LEGACY_PTYS=y
953CONFIG_LEGACY_PTY_COUNT=256 891CONFIG_LEGACY_PTY_COUNT=256
954
955#
956# IPMI
957#
958# CONFIG_IPMI_HANDLER is not set 892# CONFIG_IPMI_HANDLER is not set
959# CONFIG_WATCHDOG is not set 893# CONFIG_WATCHDOG is not set
960CONFIG_HW_RANDOM=y 894CONFIG_HW_RANDOM=y
961CONFIG_HW_RANDOM_INTEL=y 895CONFIG_HW_RANDOM_INTEL=y
962CONFIG_HW_RANDOM_AMD=y 896CONFIG_HW_RANDOM_AMD=y
963# CONFIG_HW_RANDOM_GEODE is not set
964# CONFIG_NVRAM is not set 897# CONFIG_NVRAM is not set
965CONFIG_RTC=y 898CONFIG_RTC=y
966# CONFIG_R3964 is not set 899# CONFIG_R3964 is not set
@@ -979,127 +912,19 @@ CONFIG_HPET=y
979# CONFIG_HPET_RTC_IRQ is not set 912# CONFIG_HPET_RTC_IRQ is not set
980CONFIG_HPET_MMAP=y 913CONFIG_HPET_MMAP=y
981# CONFIG_HANGCHECK_TIMER is not set 914# CONFIG_HANGCHECK_TIMER is not set
982
983#
984# TPM devices
985#
986# CONFIG_TCG_TPM is not set 915# CONFIG_TCG_TPM is not set
987# CONFIG_TELCLOCK is not set 916# CONFIG_TELCLOCK is not set
988CONFIG_DEVPORT=y 917CONFIG_DEVPORT=y
989CONFIG_I2C=m 918# CONFIG_I2C is not set
990CONFIG_I2C_BOARDINFO=y
991CONFIG_I2C_CHARDEV=m
992
993#
994# I2C Algorithms
995#
996# CONFIG_I2C_ALGOBIT is not set
997# CONFIG_I2C_ALGOPCF is not set
998# CONFIG_I2C_ALGOPCA is not set
999
1000#
1001# I2C Hardware Bus support
1002#
1003# CONFIG_I2C_ALI1535 is not set
1004# CONFIG_I2C_ALI1563 is not set
1005# CONFIG_I2C_ALI15X3 is not set
1006# CONFIG_I2C_AMD756 is not set
1007# CONFIG_I2C_AMD8111 is not set
1008# CONFIG_I2C_I801 is not set
1009# CONFIG_I2C_I810 is not set
1010# CONFIG_I2C_PIIX4 is not set
1011# CONFIG_I2C_NFORCE2 is not set
1012# CONFIG_I2C_OCORES is not set
1013# CONFIG_I2C_PARPORT_LIGHT is not set
1014# CONFIG_I2C_PROSAVAGE is not set
1015# CONFIG_I2C_SAVAGE4 is not set
1016# CONFIG_I2C_SIMTEC is not set
1017# CONFIG_I2C_SIS5595 is not set
1018# CONFIG_I2C_SIS630 is not set
1019# CONFIG_I2C_SIS96X is not set
1020# CONFIG_I2C_STUB is not set
1021# CONFIG_I2C_TINY_USB is not set
1022# CONFIG_I2C_VIA is not set
1023# CONFIG_I2C_VIAPRO is not set
1024# CONFIG_I2C_VOODOO3 is not set
1025
1026#
1027# Miscellaneous I2C Chip support
1028#
1029# CONFIG_SENSORS_DS1337 is not set
1030# CONFIG_SENSORS_DS1374 is not set
1031# CONFIG_SENSORS_EEPROM is not set
1032# CONFIG_SENSORS_PCF8574 is not set
1033# CONFIG_SENSORS_PCA9539 is not set
1034# CONFIG_SENSORS_PCF8591 is not set
1035# CONFIG_SENSORS_MAX6875 is not set
1036# CONFIG_I2C_DEBUG_CORE is not set
1037# CONFIG_I2C_DEBUG_ALGO is not set
1038# CONFIG_I2C_DEBUG_BUS is not set
1039# CONFIG_I2C_DEBUG_CHIP is not set
1040 919
1041# 920#
1042# SPI support 921# SPI support
1043# 922#
1044# CONFIG_SPI is not set 923# CONFIG_SPI is not set
1045# CONFIG_SPI_MASTER is not set 924# CONFIG_SPI_MASTER is not set
1046
1047#
1048# Dallas's 1-wire bus
1049#
1050# CONFIG_W1 is not set 925# CONFIG_W1 is not set
1051CONFIG_HWMON=y 926# CONFIG_POWER_SUPPLY is not set
1052# CONFIG_HWMON_VID is not set 927# CONFIG_HWMON is not set
1053# CONFIG_SENSORS_ABITUGURU is not set
1054# CONFIG_SENSORS_AD7418 is not set
1055# CONFIG_SENSORS_ADM1021 is not set
1056# CONFIG_SENSORS_ADM1025 is not set
1057# CONFIG_SENSORS_ADM1026 is not set
1058# CONFIG_SENSORS_ADM1029 is not set
1059# CONFIG_SENSORS_ADM1031 is not set
1060# CONFIG_SENSORS_ADM9240 is not set
1061# CONFIG_SENSORS_K8TEMP is not set
1062# CONFIG_SENSORS_ASB100 is not set
1063# CONFIG_SENSORS_ATXP1 is not set
1064# CONFIG_SENSORS_DS1621 is not set
1065# CONFIG_SENSORS_F71805F is not set
1066# CONFIG_SENSORS_FSCHER is not set
1067# CONFIG_SENSORS_FSCPOS is not set
1068# CONFIG_SENSORS_GL518SM is not set
1069# CONFIG_SENSORS_GL520SM is not set
1070CONFIG_SENSORS_CORETEMP=y
1071# CONFIG_SENSORS_IT87 is not set
1072# CONFIG_SENSORS_LM63 is not set
1073# CONFIG_SENSORS_LM75 is not set
1074# CONFIG_SENSORS_LM77 is not set
1075# CONFIG_SENSORS_LM78 is not set
1076# CONFIG_SENSORS_LM80 is not set
1077# CONFIG_SENSORS_LM83 is not set
1078# CONFIG_SENSORS_LM85 is not set
1079# CONFIG_SENSORS_LM87 is not set
1080# CONFIG_SENSORS_LM90 is not set
1081# CONFIG_SENSORS_LM92 is not set
1082# CONFIG_SENSORS_MAX1619 is not set
1083# CONFIG_SENSORS_MAX6650 is not set
1084# CONFIG_SENSORS_PC87360 is not set
1085# CONFIG_SENSORS_PC87427 is not set
1086# CONFIG_SENSORS_SIS5595 is not set
1087# CONFIG_SENSORS_SMSC47M1 is not set
1088# CONFIG_SENSORS_SMSC47M192 is not set
1089CONFIG_SENSORS_SMSC47B397=m
1090# CONFIG_SENSORS_VIA686A is not set
1091# CONFIG_SENSORS_VT1211 is not set
1092# CONFIG_SENSORS_VT8231 is not set
1093# CONFIG_SENSORS_W83781D is not set
1094# CONFIG_SENSORS_W83791D is not set
1095# CONFIG_SENSORS_W83792D is not set
1096# CONFIG_SENSORS_W83793 is not set
1097# CONFIG_SENSORS_W83L785TS is not set
1098# CONFIG_SENSORS_W83627HF is not set
1099# CONFIG_SENSORS_W83627EHF is not set
1100# CONFIG_SENSORS_HDAPS is not set
1101# CONFIG_SENSORS_APPLESMC is not set
1102# CONFIG_HWMON_DEBUG_CHIP is not set
1103 928
1104# 929#
1105# Multifunction device drivers 930# Multifunction device drivers
@@ -1149,15 +974,11 @@ CONFIG_SOUND=y
1149# Open Sound System 974# Open Sound System
1150# 975#
1151CONFIG_SOUND_PRIME=y 976CONFIG_SOUND_PRIME=y
1152# CONFIG_OSS_OBSOLETE is not set
1153# CONFIG_SOUND_TRIDENT is not set 977# CONFIG_SOUND_TRIDENT is not set
1154# CONFIG_SOUND_MSNDCLAS is not set 978# CONFIG_SOUND_MSNDCLAS is not set
1155# CONFIG_SOUND_MSNDPIN is not set 979# CONFIG_SOUND_MSNDPIN is not set
1156# CONFIG_SOUND_OSS is not set 980# CONFIG_SOUND_OSS is not set
1157 981CONFIG_HID_SUPPORT=y
1158#
1159# HID Devices
1160#
1161CONFIG_HID=y 982CONFIG_HID=y
1162# CONFIG_HID_DEBUG is not set 983# CONFIG_HID_DEBUG is not set
1163 984
@@ -1168,10 +989,7 @@ CONFIG_USB_HID=y
1168# CONFIG_USB_HIDINPUT_POWERBOOK is not set 989# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1169# CONFIG_HID_FF is not set 990# CONFIG_HID_FF is not set
1170# CONFIG_USB_HIDDEV is not set 991# CONFIG_USB_HIDDEV is not set
1171 992CONFIG_USB_SUPPORT=y
1172#
1173# USB support
1174#
1175CONFIG_USB_ARCH_HAS_HCD=y 993CONFIG_USB_ARCH_HAS_HCD=y
1176CONFIG_USB_ARCH_HAS_OHCI=y 994CONFIG_USB_ARCH_HAS_OHCI=y
1177CONFIG_USB_ARCH_HAS_EHCI=y 995CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1185,6 +1003,7 @@ CONFIG_USB_DEVICEFS=y
1185# CONFIG_USB_DEVICE_CLASS is not set 1003# CONFIG_USB_DEVICE_CLASS is not set
1186# CONFIG_USB_DYNAMIC_MINORS is not set 1004# CONFIG_USB_DYNAMIC_MINORS is not set
1187# CONFIG_USB_SUSPEND is not set 1005# CONFIG_USB_SUSPEND is not set
1006# CONFIG_USB_PERSIST is not set
1188# CONFIG_USB_OTG is not set 1007# CONFIG_USB_OTG is not set
1189 1008
1190# 1009#
@@ -1194,7 +1013,6 @@ CONFIG_USB_EHCI_HCD=y
1194# CONFIG_USB_EHCI_SPLIT_ISO is not set 1013# CONFIG_USB_EHCI_SPLIT_ISO is not set
1195# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1014# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1196# CONFIG_USB_EHCI_TT_NEWSCHED is not set 1015# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1197# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
1198# CONFIG_USB_ISP116X_HCD is not set 1016# CONFIG_USB_ISP116X_HCD is not set
1199CONFIG_USB_OHCI_HCD=y 1017CONFIG_USB_OHCI_HCD=y
1200# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1018# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1202,6 +1020,7 @@ CONFIG_USB_OHCI_HCD=y
1202CONFIG_USB_OHCI_LITTLE_ENDIAN=y 1020CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1203CONFIG_USB_UHCI_HCD=y 1021CONFIG_USB_UHCI_HCD=y
1204# CONFIG_USB_SL811_HCD is not set 1022# CONFIG_USB_SL811_HCD is not set
1023# CONFIG_USB_R8A66597_HCD is not set
1205 1024
1206# 1025#
1207# USB Device Class drivers 1026# USB Device Class drivers
@@ -1292,15 +1111,7 @@ CONFIG_USB_MON=y
1292# 1111#
1293# LED Triggers 1112# LED Triggers
1294# 1113#
1295
1296#
1297# InfiniBand support
1298#
1299# CONFIG_INFINIBAND is not set 1114# CONFIG_INFINIBAND is not set
1300
1301#
1302# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1303#
1304# CONFIG_EDAC is not set 1115# CONFIG_EDAC is not set
1305 1116
1306# 1117#
@@ -1320,11 +1131,13 @@ CONFIG_USB_MON=y
1320# 1131#
1321# DMA Devices 1132# DMA Devices
1322# 1133#
1134CONFIG_VIRTUALIZATION=y
1135# CONFIG_KVM is not set
1323 1136
1324# 1137#
1325# Virtualization 1138# Userspace I/O
1326# 1139#
1327# CONFIG_KVM is not set 1140# CONFIG_UIO is not set
1328 1141
1329# 1142#
1330# Firmware Drivers 1143# Firmware Drivers
@@ -1332,6 +1145,7 @@ CONFIG_USB_MON=y
1332# CONFIG_EDD is not set 1145# CONFIG_EDD is not set
1333# CONFIG_DELL_RBU is not set 1146# CONFIG_DELL_RBU is not set
1334# CONFIG_DCDBAS is not set 1147# CONFIG_DCDBAS is not set
1148CONFIG_DMIID=y
1335 1149
1336# 1150#
1337# File systems 1151# File systems
@@ -1447,7 +1261,6 @@ CONFIG_SUNRPC=y
1447# CONFIG_NCP_FS is not set 1261# CONFIG_NCP_FS is not set
1448# CONFIG_CODA_FS is not set 1262# CONFIG_CODA_FS is not set
1449# CONFIG_AFS_FS is not set 1263# CONFIG_AFS_FS is not set
1450# CONFIG_9P_FS is not set
1451 1264
1452# 1265#
1453# Partition Types 1266# Partition Types
@@ -1524,8 +1337,9 @@ CONFIG_DEBUG_FS=y
1524CONFIG_DEBUG_KERNEL=y 1337CONFIG_DEBUG_KERNEL=y
1525# CONFIG_DEBUG_SHIRQ is not set 1338# CONFIG_DEBUG_SHIRQ is not set
1526CONFIG_DETECT_SOFTLOCKUP=y 1339CONFIG_DETECT_SOFTLOCKUP=y
1340# CONFIG_SCHED_DEBUG is not set
1527# CONFIG_SCHEDSTATS is not set 1341# CONFIG_SCHEDSTATS is not set
1528# CONFIG_TIMER_STATS is not set 1342CONFIG_TIMER_STATS=y
1529# CONFIG_DEBUG_SLAB is not set 1343# CONFIG_DEBUG_SLAB is not set
1530# CONFIG_DEBUG_RT_MUTEXES is not set 1344# CONFIG_DEBUG_RT_MUTEXES is not set
1531# CONFIG_RT_MUTEX_TESTER is not set 1345# CONFIG_RT_MUTEX_TESTER is not set
@@ -1533,6 +1347,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
1533# CONFIG_DEBUG_MUTEXES is not set 1347# CONFIG_DEBUG_MUTEXES is not set
1534# CONFIG_DEBUG_LOCK_ALLOC is not set 1348# CONFIG_DEBUG_LOCK_ALLOC is not set
1535# CONFIG_PROVE_LOCKING is not set 1349# CONFIG_PROVE_LOCKING is not set
1350# CONFIG_LOCK_STAT is not set
1536# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1351# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1537# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1352# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1538# CONFIG_DEBUG_KOBJECT is not set 1353# CONFIG_DEBUG_KOBJECT is not set
@@ -1541,8 +1356,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
1541# CONFIG_DEBUG_VM is not set 1356# CONFIG_DEBUG_VM is not set
1542# CONFIG_DEBUG_LIST is not set 1357# CONFIG_DEBUG_LIST is not set
1543# CONFIG_FRAME_POINTER is not set 1358# CONFIG_FRAME_POINTER is not set
1544CONFIG_UNWIND_INFO=y
1545CONFIG_STACK_UNWIND=y
1546# CONFIG_FORCED_INLINING is not set 1359# CONFIG_FORCED_INLINING is not set
1547# CONFIG_RCU_TORTURE_TEST is not set 1360# CONFIG_RCU_TORTURE_TEST is not set
1548# CONFIG_LKDTM is not set 1361# CONFIG_LKDTM is not set
@@ -1557,10 +1370,6 @@ CONFIG_DEBUG_STACKOVERFLOW=y
1557# 1370#
1558# CONFIG_KEYS is not set 1371# CONFIG_KEYS is not set
1559# CONFIG_SECURITY is not set 1372# CONFIG_SECURITY is not set
1560
1561#
1562# Cryptographic options
1563#
1564# CONFIG_CRYPTO is not set 1373# CONFIG_CRYPTO is not set
1565 1374
1566# 1375#
@@ -1571,6 +1380,7 @@ CONFIG_BITREVERSE=y
1571# CONFIG_CRC16 is not set 1380# CONFIG_CRC16 is not set
1572# CONFIG_CRC_ITU_T is not set 1381# CONFIG_CRC_ITU_T is not set
1573CONFIG_CRC32=y 1382CONFIG_CRC32=y
1383# CONFIG_CRC7 is not set
1574# CONFIG_LIBCRC32C is not set 1384# CONFIG_LIBCRC32C is not set
1575CONFIG_ZLIB_INFLATE=y 1385CONFIG_ZLIB_INFLATE=y
1576CONFIG_PLIST=y 1386CONFIG_PLIST=y
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index ed56a8806eab..b70f3e7cf06c 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -38,6 +38,7 @@
38 38
39int sysctl_vsyscall32 = 1; 39int sysctl_vsyscall32 = 1;
40 40
41#undef ARCH_DLINFO
41#define ARCH_DLINFO do { \ 42#define ARCH_DLINFO do { \
42 if (sysctl_vsyscall32) { \ 43 if (sysctl_vsyscall32) { \
43 NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \ 44 NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 3f66e970d86f..938278697e20 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -104,7 +104,7 @@ ENTRY(ia32_sysenter_target)
104 pushq %rax 104 pushq %rax
105 CFI_ADJUST_CFA_OFFSET 8 105 CFI_ADJUST_CFA_OFFSET 8
106 cld 106 cld
107 SAVE_ARGS 0,0,0 107 SAVE_ARGS 0,0,1
108 /* no need to do an access_ok check here because rbp has been 108 /* no need to do an access_ok check here because rbp has been
109 32bit zero extended */ 109 32bit zero extended */
1101: movl (%rbp),%r9d 1101: movl (%rbp),%r9d
@@ -294,7 +294,7 @@ ia32_badarg:
294 */ 294 */
295 295
296ENTRY(ia32_syscall) 296ENTRY(ia32_syscall)
297 CFI_STARTPROC simple 297 CFI_STARTPROC32 simple
298 CFI_SIGNAL_FRAME 298 CFI_SIGNAL_FRAME
299 CFI_DEF_CFA rsp,SS+8-RIP 299 CFI_DEF_CFA rsp,SS+8-RIP
300 /*CFI_REL_OFFSET ss,SS-RIP*/ 300 /*CFI_REL_OFFSET ss,SS-RIP*/
@@ -330,6 +330,7 @@ ia32_sysret:
330 330
331ia32_tracesys: 331ia32_tracesys:
332 SAVE_REST 332 SAVE_REST
333 CLEAR_RREGS
333 movq $-ENOSYS,RAX(%rsp) /* really needed? */ 334 movq $-ENOSYS,RAX(%rsp) /* really needed? */
334 movq %rsp,%rdi /* &pt_regs -> arg1 */ 335 movq %rsp,%rdi /* &pt_regs -> arg1 */
335 call syscall_trace_enter 336 call syscall_trace_enter
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index a3d450d6c15b..8f681cae7bf7 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -20,7 +20,7 @@
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <asm/e820.h> 21#include <asm/e820.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/proto.h> 23#include <asm/iommu.h>
24#include <asm/pci-direct.h> 24#include <asm/pci-direct.h>
25#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/k8.h> 26#include <asm/k8.h>
@@ -214,7 +214,7 @@ void __init iommu_hole_init(void)
214 if (iommu_aperture_disabled || !fix_aperture || !early_pci_allowed()) 214 if (iommu_aperture_disabled || !fix_aperture || !early_pci_allowed())
215 return; 215 return;
216 216
217 printk("Checking aperture...\n"); 217 printk(KERN_INFO "Checking aperture...\n");
218 218
219 fix = 0; 219 fix = 0;
220 for (num = 24; num < 32; num++) { 220 for (num = 24; num < 32; num++) {
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 1b0e07bb8728..900ff38d68de 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -92,8 +92,9 @@ unsigned int safe_apic_wait_icr_idle(void)
92void enable_NMI_through_LVT0 (void * dummy) 92void enable_NMI_through_LVT0 (void * dummy)
93{ 93{
94 unsigned int v; 94 unsigned int v;
95 95
96 v = APIC_DM_NMI; /* unmask and set to NMI */ 96 /* unmask and set to NMI */
97 v = APIC_DM_NMI;
97 apic_write(APIC_LVT0, v); 98 apic_write(APIC_LVT0, v);
98} 99}
99 100
@@ -120,7 +121,7 @@ void ack_bad_irq(unsigned int irq)
120 * holds up an irq slot - in excessive cases (when multiple 121 * holds up an irq slot - in excessive cases (when multiple
121 * unexpected vectors occur) that might lock up the APIC 122 * unexpected vectors occur) that might lock up the APIC
122 * completely. 123 * completely.
123 * But don't ack when the APIC is disabled. -AK 124 * But don't ack when the APIC is disabled. -AK
124 */ 125 */
125 if (!disable_apic) 126 if (!disable_apic)
126 ack_APIC_irq(); 127 ack_APIC_irq();
@@ -616,7 +617,7 @@ early_param("apic", apic_set_verbosity);
616 * Detect and enable local APICs on non-SMP boards. 617 * Detect and enable local APICs on non-SMP boards.
617 * Original code written by Keir Fraser. 618 * Original code written by Keir Fraser.
618 * On AMD64 we trust the BIOS - if it says no APIC it is likely 619 * On AMD64 we trust the BIOS - if it says no APIC it is likely
619 * not correctly set up (usually the APIC timer won't work etc.) 620 * not correctly set up (usually the APIC timer won't work etc.)
620 */ 621 */
621 622
622static int __init detect_init_APIC (void) 623static int __init detect_init_APIC (void)
@@ -789,13 +790,13 @@ static void setup_APIC_timer(unsigned int clocks)
789 local_irq_save(flags); 790 local_irq_save(flags);
790 791
791 /* wait for irq slice */ 792 /* wait for irq slice */
792 if (hpet_address && hpet_use_timer) { 793 if (hpet_address && hpet_use_timer) {
793 int trigger = hpet_readl(HPET_T0_CMP); 794 int trigger = hpet_readl(HPET_T0_CMP);
794 while (hpet_readl(HPET_COUNTER) >= trigger) 795 while (hpet_readl(HPET_COUNTER) >= trigger)
795 /* do nothing */ ; 796 /* do nothing */ ;
796 while (hpet_readl(HPET_COUNTER) < trigger) 797 while (hpet_readl(HPET_COUNTER) < trigger)
797 /* do nothing */ ; 798 /* do nothing */ ;
798 } else { 799 } else {
799 int c1, c2; 800 int c1, c2;
800 outb_p(0x00, 0x43); 801 outb_p(0x00, 0x43);
801 c2 = inb_p(0x40); 802 c2 = inb_p(0x40);
@@ -881,10 +882,10 @@ static unsigned int calibration_result;
881 882
882void __init setup_boot_APIC_clock (void) 883void __init setup_boot_APIC_clock (void)
883{ 884{
884 if (disable_apic_timer) { 885 if (disable_apic_timer) {
885 printk(KERN_INFO "Disabling APIC timer\n"); 886 printk(KERN_INFO "Disabling APIC timer\n");
886 return; 887 return;
887 } 888 }
888 889
889 printk(KERN_INFO "Using local APIC timer interrupts.\n"); 890 printk(KERN_INFO "Using local APIC timer interrupts.\n");
890 using_apic_timer = 1; 891 using_apic_timer = 1;
@@ -990,8 +991,8 @@ int setup_profiling_timer(unsigned int multiplier)
990 return -EINVAL; 991 return -EINVAL;
991} 992}
992 993
993void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, 994void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
994 unsigned char msg_type, unsigned char mask) 995 unsigned char msg_type, unsigned char mask)
995{ 996{
996 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE; 997 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
997 unsigned int v = (mask << 16) | (msg_type << 8) | vector; 998 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
@@ -1128,20 +1129,6 @@ asmlinkage void smp_spurious_interrupt(void)
1128 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) 1129 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1129 ack_APIC_irq(); 1130 ack_APIC_irq();
1130 1131
1131#if 0
1132 static unsigned long last_warning;
1133 static unsigned long skipped;
1134
1135 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1136 if (time_before(last_warning+30*HZ,jiffies)) {
1137 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
1138 smp_processor_id(), skipped);
1139 last_warning = jiffies;
1140 skipped = 0;
1141 } else {
1142 skipped++;
1143 }
1144#endif
1145 irq_exit(); 1132 irq_exit();
1146} 1133}
1147 1134
@@ -1173,11 +1160,11 @@ asmlinkage void smp_error_interrupt(void)
1173 7: Illegal register address 1160 7: Illegal register address
1174 */ 1161 */
1175 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n", 1162 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1176 smp_processor_id(), v , v1); 1163 smp_processor_id(), v , v1);
1177 irq_exit(); 1164 irq_exit();
1178} 1165}
1179 1166
1180int disable_apic; 1167int disable_apic;
1181 1168
1182/* 1169/*
1183 * This initializes the IO-APIC and APIC hardware if this is 1170 * This initializes the IO-APIC and APIC hardware if this is
@@ -1185,11 +1172,11 @@ int disable_apic;
1185 */ 1172 */
1186int __init APIC_init_uniprocessor (void) 1173int __init APIC_init_uniprocessor (void)
1187{ 1174{
1188 if (disable_apic) { 1175 if (disable_apic) {
1189 printk(KERN_INFO "Apic disabled\n"); 1176 printk(KERN_INFO "Apic disabled\n");
1190 return -1; 1177 return -1;
1191 } 1178 }
1192 if (!cpu_has_apic) { 1179 if (!cpu_has_apic) {
1193 disable_apic = 1; 1180 disable_apic = 1;
1194 printk(KERN_INFO "Apic disabled by BIOS\n"); 1181 printk(KERN_INFO "Apic disabled by BIOS\n");
1195 return -1; 1182 return -1;
@@ -1211,8 +1198,8 @@ int __init APIC_init_uniprocessor (void)
1211 return 0; 1198 return 0;
1212} 1199}
1213 1200
1214static __init int setup_disableapic(char *str) 1201static __init int setup_disableapic(char *str)
1215{ 1202{
1216 disable_apic = 1; 1203 disable_apic = 1;
1217 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1204 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1218 return 0; 1205 return 0;
@@ -1220,10 +1207,10 @@ static __init int setup_disableapic(char *str)
1220early_param("disableapic", setup_disableapic); 1207early_param("disableapic", setup_disableapic);
1221 1208
1222/* same as disableapic, for compatibility */ 1209/* same as disableapic, for compatibility */
1223static __init int setup_nolapic(char *str) 1210static __init int setup_nolapic(char *str)
1224{ 1211{
1225 return setup_disableapic(str); 1212 return setup_disableapic(str);
1226} 1213}
1227early_param("nolapic", setup_nolapic); 1214early_param("nolapic", setup_nolapic);
1228 1215
1229static int __init parse_lapic_timer_c2_ok(char *arg) 1216static int __init parse_lapic_timer_c2_ok(char *arg)
@@ -1233,13 +1220,13 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1233} 1220}
1234early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1221early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1235 1222
1236static __init int setup_noapictimer(char *str) 1223static __init int setup_noapictimer(char *str)
1237{ 1224{
1238 if (str[0] != ' ' && str[0] != 0) 1225 if (str[0] != ' ' && str[0] != 0)
1239 return 0; 1226 return 0;
1240 disable_apic_timer = 1; 1227 disable_apic_timer = 1;
1241 return 1; 1228 return 1;
1242} 1229}
1243 1230
1244static __init int setup_apicmaintimer(char *str) 1231static __init int setup_apicmaintimer(char *str)
1245{ 1232{
@@ -1264,5 +1251,5 @@ static __init int setup_apicpmtimer(char *s)
1264} 1251}
1265__setup("apicpmtimer", setup_apicpmtimer); 1252__setup("apicpmtimer", setup_apicpmtimer);
1266 1253
1267__setup("noapictimer", setup_noapictimer); 1254__setup("noapictimer", setup_noapictimer);
1268 1255
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 13c6c37610e0..0f4d5e209e9b 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -194,37 +194,6 @@ unsigned long __init e820_end_of_ram(void)
194} 194}
195 195
196/* 196/*
197 * Find the hole size in the range.
198 */
199unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
200{
201 unsigned long ram = 0;
202 int i;
203
204 for (i = 0; i < e820.nr_map; i++) {
205 struct e820entry *ei = &e820.map[i];
206 unsigned long last, addr;
207
208 if (ei->type != E820_RAM ||
209 ei->addr+ei->size <= start ||
210 ei->addr >= end)
211 continue;
212
213 addr = round_up(ei->addr, PAGE_SIZE);
214 if (addr < start)
215 addr = start;
216
217 last = round_down(ei->addr + ei->size, PAGE_SIZE);
218 if (last >= end)
219 last = end;
220
221 if (last > addr)
222 ram += last - addr;
223 }
224 return ((end - start) - ram);
225}
226
227/*
228 * Mark e820 reserved areas as busy for the resource manager. 197 * Mark e820 reserved areas as busy for the resource manager.
229 */ 198 */
230void __init e820_reserve_resources(void) 199void __init e820_reserve_resources(void)
@@ -289,47 +258,61 @@ void __init e820_mark_nosave_regions(void)
289 } 258 }
290} 259}
291 260
292/* Walk the e820 map and register active regions within a node */ 261/*
293void __init 262 * Finds an active region in the address range from start_pfn to end_pfn and
294e820_register_active_regions(int nid, unsigned long start_pfn, 263 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
295 unsigned long end_pfn) 264 */
265static int __init e820_find_active_region(const struct e820entry *ei,
266 unsigned long start_pfn,
267 unsigned long end_pfn,
268 unsigned long *ei_startpfn,
269 unsigned long *ei_endpfn)
296{ 270{
297 int i; 271 *ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
298 unsigned long ei_startpfn, ei_endpfn; 272 *ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE) >> PAGE_SHIFT;
299 for (i = 0; i < e820.nr_map; i++) {
300 struct e820entry *ei = &e820.map[i];
301 ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
302 ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
303 >> PAGE_SHIFT;
304 273
305 /* Skip map entries smaller than a page */ 274 /* Skip map entries smaller than a page */
306 if (ei_startpfn >= ei_endpfn) 275 if (*ei_startpfn >= *ei_endpfn)
307 continue; 276 return 0;
308 277
309 /* Check if end_pfn_map should be updated */ 278 /* Check if end_pfn_map should be updated */
310 if (ei->type != E820_RAM && ei_endpfn > end_pfn_map) 279 if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map)
311 end_pfn_map = ei_endpfn; 280 end_pfn_map = *ei_endpfn;
312 281
313 /* Skip if map is outside the node */ 282 /* Skip if map is outside the node */
314 if (ei->type != E820_RAM || 283 if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
315 ei_endpfn <= start_pfn || 284 *ei_startpfn >= end_pfn)
316 ei_startpfn >= end_pfn) 285 return 0;
317 continue;
318 286
319 /* Check for overlaps */ 287 /* Check for overlaps */
320 if (ei_startpfn < start_pfn) 288 if (*ei_startpfn < start_pfn)
321 ei_startpfn = start_pfn; 289 *ei_startpfn = start_pfn;
322 if (ei_endpfn > end_pfn) 290 if (*ei_endpfn > end_pfn)
323 ei_endpfn = end_pfn; 291 *ei_endpfn = end_pfn;
324 292
325 /* Obey end_user_pfn to save on memmap */ 293 /* Obey end_user_pfn to save on memmap */
326 if (ei_startpfn >= end_user_pfn) 294 if (*ei_startpfn >= end_user_pfn)
327 continue; 295 return 0;
328 if (ei_endpfn > end_user_pfn) 296 if (*ei_endpfn > end_user_pfn)
329 ei_endpfn = end_user_pfn; 297 *ei_endpfn = end_user_pfn;
330 298
331 add_active_range(nid, ei_startpfn, ei_endpfn); 299 return 1;
332 } 300}
301
302/* Walk the e820 map and register active regions within a node */
303void __init
304e820_register_active_regions(int nid, unsigned long start_pfn,
305 unsigned long end_pfn)
306{
307 unsigned long ei_startpfn;
308 unsigned long ei_endpfn;
309 int i;
310
311 for (i = 0; i < e820.nr_map; i++)
312 if (e820_find_active_region(&e820.map[i],
313 start_pfn, end_pfn,
314 &ei_startpfn, &ei_endpfn))
315 add_active_range(nid, ei_startpfn, ei_endpfn);
333} 316}
334 317
335/* 318/*
@@ -350,12 +333,35 @@ void __init add_memory_region(unsigned long start, unsigned long size, int type)
350 e820.nr_map++; 333 e820.nr_map++;
351} 334}
352 335
336/*
337 * Find the hole size (in bytes) in the memory range.
338 * @start: starting address of the memory range to scan
339 * @end: ending address of the memory range to scan
340 */
341unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
342{
343 unsigned long start_pfn = start >> PAGE_SHIFT;
344 unsigned long end_pfn = end >> PAGE_SHIFT;
345 unsigned long ei_startpfn;
346 unsigned long ei_endpfn;
347 unsigned long ram = 0;
348 int i;
349
350 for (i = 0; i < e820.nr_map; i++) {
351 if (e820_find_active_region(&e820.map[i],
352 start_pfn, end_pfn,
353 &ei_startpfn, &ei_endpfn))
354 ram += ei_endpfn - ei_startpfn;
355 }
356 return end - start - (ram << PAGE_SHIFT);
357}
358
353void __init e820_print_map(char *who) 359void __init e820_print_map(char *who)
354{ 360{
355 int i; 361 int i;
356 362
357 for (i = 0; i < e820.nr_map; i++) { 363 for (i = 0; i < e820.nr_map; i++) {
358 printk(" %s: %016Lx - %016Lx ", who, 364 printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
359 (unsigned long long) e820.map[i].addr, 365 (unsigned long long) e820.map[i].addr,
360 (unsigned long long) (e820.map[i].addr + e820.map[i].size)); 366 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
361 switch (e820.map[i].type) { 367 switch (e820.map[i].type) {
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 990d9c218a5d..13aa4fd728f3 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -14,6 +14,7 @@
14#include <linux/pci_ids.h> 14#include <linux/pci_ids.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/proto.h> 16#include <asm/proto.h>
17#include <asm/iommu.h>
17#include <asm/dma.h> 18#include <asm/dma.h>
18 19
19static void __init via_bugs(void) 20static void __init via_bugs(void)
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index a67f87bf4015..830cfc6ee8cb 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -282,7 +282,7 @@ sysret_careful:
282sysret_signal: 282sysret_signal:
283 TRACE_IRQS_ON 283 TRACE_IRQS_ON
284 sti 284 sti
285 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 285 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
286 jz 1f 286 jz 1f
287 287
288 /* Really a signal */ 288 /* Really a signal */
@@ -375,7 +375,7 @@ int_very_careful:
375 jmp int_restore_rest 375 jmp int_restore_rest
376 376
377int_signal: 377int_signal:
378 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx 378 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
379 jz 1f 379 jz 1f
380 movq %rsp,%rdi # &ptregs -> arg1 380 movq %rsp,%rdi # &ptregs -> arg1
381 xorl %esi,%esi # oldset -> arg2 381 xorl %esi,%esi # oldset -> arg2
@@ -599,7 +599,7 @@ retint_careful:
599 jmp retint_check 599 jmp retint_check
600 600
601retint_signal: 601retint_signal:
602 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 602 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
603 jz retint_swapgs 603 jz retint_swapgs
604 TRACE_IRQS_ON 604 TRACE_IRQS_ON
605 sti 605 sti
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c
index b8286968662d..636f4f9fc6bb 100644
--- a/arch/x86_64/kernel/hpet.c
+++ b/arch/x86_64/kernel/hpet.c
@@ -190,7 +190,7 @@ int hpet_reenable(void)
190 */ 190 */
191 191
192#define TICK_COUNT 100000000 192#define TICK_COUNT 100000000
193#define TICK_MIN 5000 193#define SMI_THRESHOLD 50000
194#define MAX_TRIES 5 194#define MAX_TRIES 5
195 195
196/* 196/*
@@ -205,7 +205,7 @@ static void __init read_hpet_tsc(int *hpet, int *tsc)
205 tsc1 = get_cycles_sync(); 205 tsc1 = get_cycles_sync();
206 hpet1 = hpet_readl(HPET_COUNTER); 206 hpet1 = hpet_readl(HPET_COUNTER);
207 tsc2 = get_cycles_sync(); 207 tsc2 = get_cycles_sync();
208 if (tsc2 - tsc1 > TICK_MIN) 208 if ((tsc2 - tsc1) < SMI_THRESHOLD)
209 break; 209 break;
210 } 210 }
211 *hpet = hpet1; 211 *hpet = hpet1;
@@ -439,7 +439,7 @@ int hpet_rtc_dropped_irq(void)
439 return 1; 439 return 1;
440} 440}
441 441
442irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs) 442irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
443{ 443{
444 struct rtc_time curr_time; 444 struct rtc_time curr_time;
445 unsigned long rtc_int_flag = 0; 445 unsigned long rtc_int_flag = 0;
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 4b326655b208..948cae646099 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -444,24 +444,6 @@ void __init init_ISA_irqs (void)
444 } 444 }
445} 445}
446 446
447void apic_timer_interrupt(void);
448void spurious_interrupt(void);
449void error_interrupt(void);
450void reschedule_interrupt(void);
451void call_function_interrupt(void);
452void irq_move_cleanup_interrupt(void);
453void invalidate_interrupt0(void);
454void invalidate_interrupt1(void);
455void invalidate_interrupt2(void);
456void invalidate_interrupt3(void);
457void invalidate_interrupt4(void);
458void invalidate_interrupt5(void);
459void invalidate_interrupt6(void);
460void invalidate_interrupt7(void);
461void thermal_interrupt(void);
462void threshold_interrupt(void);
463void i8254_timer_resume(void);
464
465static void setup_timer_hardware(void) 447static void setup_timer_hardware(void)
466{ 448{
467 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 449 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 1c6c6f724573..050141c0602b 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -152,6 +152,32 @@ static inline void io_apic_modify(unsigned int apic, unsigned int value)
152 writel(value, &io_apic->data); 152 writel(value, &io_apic->data);
153} 153}
154 154
155static int io_apic_level_ack_pending(unsigned int irq)
156{
157 struct irq_pin_list *entry;
158 unsigned long flags;
159 int pending = 0;
160
161 spin_lock_irqsave(&ioapic_lock, flags);
162 entry = irq_2_pin + irq;
163 for (;;) {
164 unsigned int reg;
165 int pin;
166
167 pin = entry->pin;
168 if (pin == -1)
169 break;
170 reg = io_apic_read(entry->apic, 0x10 + pin*2);
171 /* Is the remote IRR bit set? */
172 pending |= (reg >> 14) & 1;
173 if (!entry->next)
174 break;
175 entry = irq_2_pin + entry->next;
176 }
177 spin_unlock_irqrestore(&ioapic_lock, flags);
178 return pending;
179}
180
155/* 181/*
156 * Synchronize the IO-APIC and the CPU by doing 182 * Synchronize the IO-APIC and the CPU by doing
157 * a dummy read from the IO-APIC 183 * a dummy read from the IO-APIC
@@ -1418,9 +1444,37 @@ static void ack_apic_level(unsigned int irq)
1418 ack_APIC_irq(); 1444 ack_APIC_irq();
1419 1445
1420 /* Now we can move and renable the irq */ 1446 /* Now we can move and renable the irq */
1421 move_masked_irq(irq); 1447 if (unlikely(do_unmask_irq)) {
1422 if (unlikely(do_unmask_irq)) 1448 /* Only migrate the irq if the ack has been received.
1449 *
1450 * On rare occasions the broadcast level triggered ack gets
1451 * delayed going to ioapics, and if we reprogram the
1452 * vector while Remote IRR is still set the irq will never
1453 * fire again.
1454 *
1455 * To prevent this scenario we read the Remote IRR bit
1456 * of the ioapic. This has two effects.
1457 * - On any sane system the read of the ioapic will
1458 * flush writes (and acks) going to the ioapic from
1459 * this cpu.
1460 * - We get to see if the ACK has actually been delivered.
1461 *
1462 * Based on failed experiments of reprogramming the
1463 * ioapic entry from outside of irq context starting
1464 * with masking the ioapic entry and then polling until
1465 * Remote IRR was clear before reprogramming the
1466 * ioapic I don't trust the Remote IRR bit to be
1467 * completey accurate.
1468 *
1469 * However there appears to be no other way to plug
1470 * this race, so if the Remote IRR bit is not
1471 * accurate and is causing problems then it is a hardware bug
1472 * and you can go talk to the chipset vendor about it.
1473 */
1474 if (!io_apic_level_ack_pending(irq))
1475 move_masked_irq(irq);
1423 unmask_IO_APIC_irq(irq); 1476 unmask_IO_APIC_irq(irq);
1477 }
1424} 1478}
1425 1479
1426static struct irq_chip ioapic_chip __read_mostly = { 1480static struct irq_chip ioapic_chip __read_mostly = {
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index f3fb8174559e..4d8450ee3635 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/poll.h>
22#include <linux/thread_info.h>
21#include <linux/ctype.h> 23#include <linux/ctype.h>
22#include <linux/kmod.h> 24#include <linux/kmod.h>
23#include <linux/kdebug.h> 25#include <linux/kdebug.h>
@@ -26,6 +28,7 @@
26#include <asm/mce.h> 28#include <asm/mce.h>
27#include <asm/uaccess.h> 29#include <asm/uaccess.h>
28#include <asm/smp.h> 30#include <asm/smp.h>
31#include <asm/idle.h>
29 32
30#define MISC_MCELOG_MINOR 227 33#define MISC_MCELOG_MINOR 227
31#define NR_BANKS 6 34#define NR_BANKS 6
@@ -34,13 +37,17 @@ atomic_t mce_entry;
34 37
35static int mce_dont_init; 38static int mce_dont_init;
36 39
37/* 0: always panic, 1: panic if deadlock possible, 2: try to avoid panic, 40/*
38 3: never panic or exit (for testing only) */ 41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
46 */
39static int tolerant = 1; 47static int tolerant = 1;
40static int banks; 48static int banks;
41static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; 49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
42static unsigned long console_logged; 50static unsigned long notify_user;
43static int notify_user;
44static int rip_msr; 51static int rip_msr;
45static int mce_bootlog = 1; 52static int mce_bootlog = 1;
46static atomic_t mce_events; 53static atomic_t mce_events;
@@ -48,6 +55,8 @@ static atomic_t mce_events;
48static char trigger[128]; 55static char trigger[128];
49static char *trigger_argv[2] = { trigger, NULL }; 56static char *trigger_argv[2] = { trigger, NULL };
50 57
58static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
59
51/* 60/*
52 * Lockless MCE logging infrastructure. 61 * Lockless MCE logging infrastructure.
53 * This avoids deadlocks on printk locks without having to break locks. Also 62 * This avoids deadlocks on printk locks without having to break locks. Also
@@ -94,8 +103,7 @@ void mce_log(struct mce *mce)
94 mcelog.entry[entry].finished = 1; 103 mcelog.entry[entry].finished = 1;
95 wmb(); 104 wmb();
96 105
97 if (!test_and_set_bit(0, &console_logged)) 106 set_bit(0, &notify_user);
98 notify_user = 1;
99} 107}
100 108
101static void print_mce(struct mce *m) 109static void print_mce(struct mce *m)
@@ -128,6 +136,7 @@ static void print_mce(struct mce *m)
128static void mce_panic(char *msg, struct mce *backup, unsigned long start) 136static void mce_panic(char *msg, struct mce *backup, unsigned long start)
129{ 137{
130 int i; 138 int i;
139
131 oops_begin(); 140 oops_begin();
132 for (i = 0; i < MCE_LOG_LEN; i++) { 141 for (i = 0; i < MCE_LOG_LEN; i++) {
133 unsigned long tsc = mcelog.entry[i].tsc; 142 unsigned long tsc = mcelog.entry[i].tsc;
@@ -139,10 +148,7 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
139 } 148 }
140 if (backup) 149 if (backup)
141 print_mce(backup); 150 print_mce(backup);
142 if (tolerant >= 3) 151 panic(msg);
143 printk("Fake panic: %s\n", msg);
144 else
145 panic(msg);
146} 152}
147 153
148static int mce_available(struct cpuinfo_x86 *c) 154static int mce_available(struct cpuinfo_x86 *c)
@@ -167,17 +173,6 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
167 } 173 }
168} 174}
169 175
170static void do_mce_trigger(void)
171{
172 static atomic_t mce_logged;
173 int events = atomic_read(&mce_events);
174 if (events != atomic_read(&mce_logged) && trigger[0]) {
175 /* Small race window, but should be harmless. */
176 atomic_set(&mce_logged, events);
177 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
178 }
179}
180
181/* 176/*
182 * The actual machine check handler 177 * The actual machine check handler
183 */ 178 */
@@ -185,11 +180,19 @@ static void do_mce_trigger(void)
185void do_machine_check(struct pt_regs * regs, long error_code) 180void do_machine_check(struct pt_regs * regs, long error_code)
186{ 181{
187 struct mce m, panicm; 182 struct mce m, panicm;
188 int nowayout = (tolerant < 1);
189 int kill_it = 0;
190 u64 mcestart = 0; 183 u64 mcestart = 0;
191 int i; 184 int i;
192 int panicm_found = 0; 185 int panicm_found = 0;
186 /*
187 * If no_way_out gets set, there is no safe way to recover from this
188 * MCE. If tolerant is cranked up, we'll try anyway.
189 */
190 int no_way_out = 0;
191 /*
192 * If kill_it gets set, there might be a way to recover from this
193 * error.
194 */
195 int kill_it = 0;
193 196
194 atomic_inc(&mce_entry); 197 atomic_inc(&mce_entry);
195 198
@@ -201,8 +204,9 @@ void do_machine_check(struct pt_regs * regs, long error_code)
201 memset(&m, 0, sizeof(struct mce)); 204 memset(&m, 0, sizeof(struct mce));
202 m.cpu = smp_processor_id(); 205 m.cpu = smp_processor_id();
203 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); 206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
207 /* if the restart IP is not valid, we're done for */
204 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 208 if (!(m.mcgstatus & MCG_STATUS_RIPV))
205 kill_it = 1; 209 no_way_out = 1;
206 210
207 rdtscll(mcestart); 211 rdtscll(mcestart);
208 barrier(); 212 barrier();
@@ -221,10 +225,18 @@ void do_machine_check(struct pt_regs * regs, long error_code)
221 continue; 225 continue;
222 226
223 if (m.status & MCI_STATUS_EN) { 227 if (m.status & MCI_STATUS_EN) {
224 /* In theory _OVER could be a nowayout too, but 228 /* if PCC was set, there's no way out */
225 assume any overflowed errors were no fatal. */ 229 no_way_out |= !!(m.status & MCI_STATUS_PCC);
226 nowayout |= !!(m.status & MCI_STATUS_PCC); 230 /*
227 kill_it |= !!(m.status & MCI_STATUS_UC); 231 * If this error was uncorrectable and there was
232 * an overflow, we're in trouble. If no overflow,
233 * we might get away with just killing a task.
234 */
235 if (m.status & MCI_STATUS_UC) {
236 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
237 no_way_out = 1;
238 kill_it = 1;
239 }
228 } 240 }
229 241
230 if (m.status & MCI_STATUS_MISCV) 242 if (m.status & MCI_STATUS_MISCV)
@@ -235,7 +247,6 @@ void do_machine_check(struct pt_regs * regs, long error_code)
235 mce_get_rip(&m, regs); 247 mce_get_rip(&m, regs);
236 if (error_code >= 0) 248 if (error_code >= 0)
237 rdtscll(m.tsc); 249 rdtscll(m.tsc);
238 wrmsrl(MSR_IA32_MC0_STATUS + i*4, 0);
239 if (error_code != -2) 250 if (error_code != -2)
240 mce_log(&m); 251 mce_log(&m);
241 252
@@ -251,45 +262,59 @@ void do_machine_check(struct pt_regs * regs, long error_code)
251 } 262 }
252 263
253 /* Never do anything final in the polling timer */ 264 /* Never do anything final in the polling timer */
254 if (!regs) { 265 if (!regs)
255 /* Normal interrupt context here. Call trigger for any new
256 events. */
257 do_mce_trigger();
258 goto out; 266 goto out;
259 }
260 267
261 /* If we didn't find an uncorrectable error, pick 268 /* If we didn't find an uncorrectable error, pick
262 the last one (shouldn't happen, just being safe). */ 269 the last one (shouldn't happen, just being safe). */
263 if (!panicm_found) 270 if (!panicm_found)
264 panicm = m; 271 panicm = m;
265 if (nowayout) 272
273 /*
274 * If we have decided that we just CAN'T continue, and the user
275 * has not set tolerant to an insane level, give up and die.
276 */
277 if (no_way_out && tolerant < 3)
266 mce_panic("Machine check", &panicm, mcestart); 278 mce_panic("Machine check", &panicm, mcestart);
267 if (kill_it) { 279
280 /*
281 * If the error seems to be unrecoverable, something should be
282 * done. Try to kill as little as possible. If we can kill just
283 * one task, do that. If the user has set the tolerance very
284 * high, don't try to do anything at all.
285 */
286 if (kill_it && tolerant < 3) {
268 int user_space = 0; 287 int user_space = 0;
269 288
270 if (m.mcgstatus & MCG_STATUS_RIPV) 289 /*
290 * If the EIPV bit is set, it means the saved IP is the
291 * instruction which caused the MCE.
292 */
293 if (m.mcgstatus & MCG_STATUS_EIPV)
271 user_space = panicm.rip && (panicm.cs & 3); 294 user_space = panicm.rip && (panicm.cs & 3);
272 295
273 /* When the machine was in user space and the CPU didn't get 296 /*
274 confused it's normally not necessary to panic, unless you 297 * If we know that the error was in user space, send a
275 are paranoid (tolerant == 0) 298 * SIGBUS. Otherwise, panic if tolerance is low.
276 299 *
277 RED-PEN could be more tolerant for MCEs in idle, 300 * do_exit() takes an awful lot of locks and has a slight
278 but most likely they occur at boot anyways, where 301 * risk of deadlocking.
279 it is best to just halt the machine. */ 302 */
280 if ((!user_space && (panic_on_oops || tolerant < 2)) || 303 if (user_space) {
281 (unsigned)current->pid <= 1)
282 mce_panic("Uncorrected machine check", &panicm, mcestart);
283
284 /* do_exit takes an awful lot of locks and has as
285 slight risk of deadlocking. If you don't want that
286 don't set tolerant >= 2 */
287 if (tolerant < 3)
288 do_exit(SIGBUS); 304 do_exit(SIGBUS);
305 } else if (panic_on_oops || tolerant < 2) {
306 mce_panic("Uncorrected machine check",
307 &panicm, mcestart);
308 }
289 } 309 }
290 310
311 /* notify userspace ASAP */
312 set_thread_flag(TIF_MCE_NOTIFY);
313
291 out: 314 out:
292 /* Last thing done in the machine check exception to clear state. */ 315 /* the last thing we do is clear state */
316 for (i = 0; i < banks; i++)
317 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
293 wrmsrl(MSR_IA32_MCG_STATUS, 0); 318 wrmsrl(MSR_IA32_MCG_STATUS, 0);
294 out2: 319 out2:
295 atomic_dec(&mce_entry); 320 atomic_dec(&mce_entry);
@@ -344,37 +369,69 @@ static void mcheck_timer(struct work_struct *work)
344 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 369 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
345 370
346 /* 371 /*
347 * It's ok to read stale data here for notify_user and 372 * Alert userspace if needed. If we logged an MCE, reduce the
348 * console_logged as we'll simply get the updated versions 373 * polling interval, otherwise increase the polling interval.
349 * on the next mcheck_timer execution and atomic operations
350 * on console_logged act as synchronization for notify_user
351 * writes.
352 */ 374 */
353 if (notify_user && console_logged) { 375 if (mce_notify_user()) {
376 next_interval = max(next_interval/2, HZ/100);
377 } else {
378 next_interval = min(next_interval*2,
379 (int)round_jiffies_relative(check_interval*HZ));
380 }
381
382 schedule_delayed_work(&mcheck_work, next_interval);
383}
384
385/*
386 * This is only called from process context. This is where we do
387 * anything we need to alert userspace about new MCEs. This is called
388 * directly from the poller and also from entry.S and idle, thanks to
389 * TIF_MCE_NOTIFY.
390 */
391int mce_notify_user(void)
392{
393 clear_thread_flag(TIF_MCE_NOTIFY);
394 if (test_and_clear_bit(0, &notify_user)) {
354 static unsigned long last_print; 395 static unsigned long last_print;
355 unsigned long now = jiffies; 396 unsigned long now = jiffies;
356 397
357 /* if we logged an MCE, reduce the polling interval */ 398 wake_up_interruptible(&mce_wait);
358 next_interval = max(next_interval/2, HZ/100); 399 if (trigger[0])
359 notify_user = 0; 400 call_usermodehelper(trigger, trigger_argv, NULL,
360 clear_bit(0, &console_logged); 401 UMH_NO_WAIT);
402
361 if (time_after_eq(now, last_print + (check_interval*HZ))) { 403 if (time_after_eq(now, last_print + (check_interval*HZ))) {
362 last_print = now; 404 last_print = now;
363 printk(KERN_INFO "Machine check events logged\n"); 405 printk(KERN_INFO "Machine check events logged\n");
364 } 406 }
365 } else { 407
366 next_interval = min(next_interval*2, check_interval*HZ); 408 return 1;
367 } 409 }
410 return 0;
411}
368 412
369 schedule_delayed_work(&mcheck_work, next_interval); 413/* see if the idle task needs to notify userspace */
414static int
415mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
416{
417 /* IDLE_END should be safe - interrupts are back on */
418 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
419 mce_notify_user();
420
421 return NOTIFY_OK;
370} 422}
371 423
424static struct notifier_block mce_idle_notifier = {
425 .notifier_call = mce_idle_callback,
426};
372 427
373static __init int periodic_mcheck_init(void) 428static __init int periodic_mcheck_init(void)
374{ 429{
375 next_interval = check_interval * HZ; 430 next_interval = check_interval * HZ;
376 if (next_interval) 431 if (next_interval)
377 schedule_delayed_work(&mcheck_work, next_interval); 432 schedule_delayed_work(&mcheck_work,
433 round_jiffies_relative(next_interval));
434 idle_notifier_register(&mce_idle_notifier);
378 return 0; 435 return 0;
379} 436}
380__initcall(periodic_mcheck_init); 437__initcall(periodic_mcheck_init);
@@ -465,6 +522,40 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
465 * Character device to read and clear the MCE log. 522 * Character device to read and clear the MCE log.
466 */ 523 */
467 524
525static DEFINE_SPINLOCK(mce_state_lock);
526static int open_count; /* #times opened */
527static int open_exclu; /* already open exclusive? */
528
529static int mce_open(struct inode *inode, struct file *file)
530{
531 spin_lock(&mce_state_lock);
532
533 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
534 spin_unlock(&mce_state_lock);
535 return -EBUSY;
536 }
537
538 if (file->f_flags & O_EXCL)
539 open_exclu = 1;
540 open_count++;
541
542 spin_unlock(&mce_state_lock);
543
544 return nonseekable_open(inode, file);
545}
546
547static int mce_release(struct inode *inode, struct file *file)
548{
549 spin_lock(&mce_state_lock);
550
551 open_count--;
552 open_exclu = 0;
553
554 spin_unlock(&mce_state_lock);
555
556 return 0;
557}
558
468static void collect_tscs(void *data) 559static void collect_tscs(void *data)
469{ 560{
470 unsigned long *cpu_tsc = (unsigned long *)data; 561 unsigned long *cpu_tsc = (unsigned long *)data;
@@ -532,6 +623,14 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff
532 return err ? -EFAULT : buf - ubuf; 623 return err ? -EFAULT : buf - ubuf;
533} 624}
534 625
626static unsigned int mce_poll(struct file *file, poll_table *wait)
627{
628 poll_wait(file, &mce_wait, wait);
629 if (rcu_dereference(mcelog.next))
630 return POLLIN | POLLRDNORM;
631 return 0;
632}
633
535static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 634static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
536{ 635{
537 int __user *p = (int __user *)arg; 636 int __user *p = (int __user *)arg;
@@ -555,7 +654,10 @@ static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned
555} 654}
556 655
557static const struct file_operations mce_chrdev_ops = { 656static const struct file_operations mce_chrdev_ops = {
657 .open = mce_open,
658 .release = mce_release,
558 .read = mce_read, 659 .read = mce_read,
660 .poll = mce_poll,
559 .ioctl = mce_ioctl, 661 .ioctl = mce_ioctl,
560}; 662};
561 663
@@ -620,7 +722,8 @@ static void mce_restart(void)
620 on_each_cpu(mce_init, NULL, 1, 1); 722 on_each_cpu(mce_init, NULL, 1, 1);
621 next_interval = check_interval * HZ; 723 next_interval = check_interval * HZ;
622 if (next_interval) 724 if (next_interval)
623 schedule_delayed_work(&mcheck_work, next_interval); 725 schedule_delayed_work(&mcheck_work,
726 round_jiffies_relative(next_interval));
624} 727}
625 728
626static struct sysdev_class mce_sysclass = { 729static struct sysdev_class mce_sysclass = {
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 03356e64f9c8..2f8a7f18b0fe 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -157,9 +157,9 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
157 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20; 157 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
158 wrmsr(address, low, high); 158 wrmsr(address, low, high);
159 159
160 setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD, 160 setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
161 THRESHOLD_APIC_VECTOR, 161 THRESHOLD_APIC_VECTOR,
162 K8_APIC_EXT_INT_MSG_FIX, 0); 162 K8_APIC_EXT_INT_MSG_FIX, 0);
163 163
164 threshold_defaults.address = address; 164 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0); 165 threshold_restart_bank(&threshold_defaults, 0, 0);
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 61ae57eb9e4c..8bf0ca03ac8e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -32,7 +32,6 @@
32 32
33/* Have we found an MP table */ 33/* Have we found an MP table */
34int smp_found_config; 34int smp_found_config;
35unsigned int __initdata maxcpus = NR_CPUS;
36 35
37/* 36/*
38 * Various Linux-internal data structures created from the 37 * Various Linux-internal data structures created from the
@@ -649,6 +648,20 @@ static int mp_find_ioapic(int gsi)
649 return -1; 648 return -1;
650} 649}
651 650
651static u8 uniq_ioapic_id(u8 id)
652{
653 int i;
654 DECLARE_BITMAP(used, 256);
655 bitmap_zero(used, 256);
656 for (i = 0; i < nr_ioapics; i++) {
657 struct mpc_config_ioapic *ia = &mp_ioapics[i];
658 __set_bit(ia->mpc_apicid, used);
659 }
660 if (!test_bit(id, used))
661 return id;
662 return find_first_zero_bit(used, 256);
663}
664
652void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) 665void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
653{ 666{
654 int idx = 0; 667 int idx = 0;
@@ -656,14 +669,14 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
656 if (bad_ioapic(address)) 669 if (bad_ioapic(address))
657 return; 670 return;
658 671
659 idx = nr_ioapics++; 672 idx = nr_ioapics;
660 673
661 mp_ioapics[idx].mpc_type = MP_IOAPIC; 674 mp_ioapics[idx].mpc_type = MP_IOAPIC;
662 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; 675 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
663 mp_ioapics[idx].mpc_apicaddr = address; 676 mp_ioapics[idx].mpc_apicaddr = address;
664 677
665 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 678 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
666 mp_ioapics[idx].mpc_apicid = id; 679 mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
667 mp_ioapics[idx].mpc_apicver = 0; 680 mp_ioapics[idx].mpc_apicver = 0;
668 681
669 /* 682 /*
@@ -680,6 +693,8 @@ void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
680 mp_ioapics[idx].mpc_apicaddr, 693 mp_ioapics[idx].mpc_apicaddr,
681 mp_ioapic_routing[idx].gsi_start, 694 mp_ioapic_routing[idx].gsi_start,
682 mp_ioapic_routing[idx].gsi_end); 695 mp_ioapic_routing[idx].gsi_end);
696
697 nr_ioapics++;
683} 698}
684 699
685void __init 700void __init
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index 5bd20b542c1e..ba16c968ca3f 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Derived from arch/powerpc/kernel/iommu.c 2 * Derived from arch/powerpc/kernel/iommu.c
3 * 3 *
4 * Copyright (C) IBM Corporation, 2006 4 * Copyright IBM Corporation, 2006-2007
5 * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us> 5 * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us>
6 * 6 *
7 * Author: Jon Mason <jdmason@kudzu.us> 7 * Author: Jon Mason <jdmason@kudzu.us>
@@ -35,7 +35,7 @@
35#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <asm/proto.h> 38#include <asm/iommu.h>
39#include <asm/calgary.h> 39#include <asm/calgary.h>
40#include <asm/tce.h> 40#include <asm/tce.h>
41#include <asm/pci-direct.h> 41#include <asm/pci-direct.h>
@@ -50,13 +50,7 @@ int use_calgary __read_mostly = 0;
50#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */ 50#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
51 51
52#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1 52#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
53#define PCI_VENDOR_DEVICE_ID_CALGARY \ 53#define PCI_DEVICE_ID_IBM_CALIOC2 0x0308
54 (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
55
56/* we need these for register space address calculation */
57#define START_ADDRESS 0xfe000000
58#define CHASSIS_BASE 0
59#define ONE_BASED_CHASSIS_NUM 1
60 54
61/* register offsets inside the host bridge space */ 55/* register offsets inside the host bridge space */
62#define CALGARY_CONFIG_REG 0x0108 56#define CALGARY_CONFIG_REG 0x0108
@@ -80,6 +74,12 @@ int use_calgary __read_mostly = 0;
80#define PHB_MEM_2_SIZE_LOW 0x02E0 74#define PHB_MEM_2_SIZE_LOW 0x02E0
81#define PHB_DOSHOLE_OFFSET 0x08E0 75#define PHB_DOSHOLE_OFFSET 0x08E0
82 76
77/* CalIOC2 specific */
78#define PHB_SAVIOR_L2 0x0DB0
79#define PHB_PAGE_MIG_CTRL 0x0DA8
80#define PHB_PAGE_MIG_DEBUG 0x0DA0
81#define PHB_ROOT_COMPLEX_STATUS 0x0CB0
82
83/* PHB_CONFIG_RW */ 83/* PHB_CONFIG_RW */
84#define PHB_TCE_ENABLE 0x20000000 84#define PHB_TCE_ENABLE 0x20000000
85#define PHB_SLOT_DISABLE 0x1C000000 85#define PHB_SLOT_DISABLE 0x1C000000
@@ -92,7 +92,11 @@ int use_calgary __read_mostly = 0;
92/* CSR (Channel/DMA Status Register) */ 92/* CSR (Channel/DMA Status Register) */
93#define CSR_AGENT_MASK 0xffe0ffff 93#define CSR_AGENT_MASK 0xffe0ffff
94/* CCR (Calgary Configuration Register) */ 94/* CCR (Calgary Configuration Register) */
95#define CCR_2SEC_TIMEOUT 0x000000000000000EUL 95#define CCR_2SEC_TIMEOUT 0x000000000000000EUL
96/* PMCR/PMDR (Page Migration Control/Debug Registers */
97#define PMR_SOFTSTOP 0x80000000
98#define PMR_SOFTSTOPFAULT 0x40000000
99#define PMR_HARDSTOP 0x20000000
96 100
97#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ 101#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
98#define MAX_NUM_CHASSIS 8 /* max number of chassis */ 102#define MAX_NUM_CHASSIS 8 /* max number of chassis */
@@ -155,9 +159,26 @@ struct calgary_bus_info {
155 void __iomem *bbar; 159 void __iomem *bbar;
156}; 160};
157 161
158static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, }; 162static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
163static void calgary_tce_cache_blast(struct iommu_table *tbl);
164static void calgary_dump_error_regs(struct iommu_table *tbl);
165static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
166static void calioc2_tce_cache_blast(struct iommu_table *tbl);
167static void calioc2_dump_error_regs(struct iommu_table *tbl);
168
169static struct cal_chipset_ops calgary_chip_ops = {
170 .handle_quirks = calgary_handle_quirks,
171 .tce_cache_blast = calgary_tce_cache_blast,
172 .dump_error_regs = calgary_dump_error_regs
173};
159 174
160static void tce_cache_blast(struct iommu_table *tbl); 175static struct cal_chipset_ops calioc2_chip_ops = {
176 .handle_quirks = calioc2_handle_quirks,
177 .tce_cache_blast = calioc2_tce_cache_blast,
178 .dump_error_regs = calioc2_dump_error_regs
179};
180
181static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
161 182
162/* enable this to stress test the chip's TCE cache */ 183/* enable this to stress test the chip's TCE cache */
163#ifdef CONFIG_IOMMU_DEBUG 184#ifdef CONFIG_IOMMU_DEBUG
@@ -187,6 +208,7 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
187{ 208{
188 return ~0UL; 209 return ~0UL;
189} 210}
211
190#endif /* CONFIG_IOMMU_DEBUG */ 212#endif /* CONFIG_IOMMU_DEBUG */
191 213
192static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen) 214static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
@@ -206,11 +228,12 @@ static inline int translate_phb(struct pci_dev* dev)
206} 228}
207 229
208static void iommu_range_reserve(struct iommu_table *tbl, 230static void iommu_range_reserve(struct iommu_table *tbl,
209 unsigned long start_addr, unsigned int npages) 231 unsigned long start_addr, unsigned int npages)
210{ 232{
211 unsigned long index; 233 unsigned long index;
212 unsigned long end; 234 unsigned long end;
213 unsigned long badbit; 235 unsigned long badbit;
236 unsigned long flags;
214 237
215 index = start_addr >> PAGE_SHIFT; 238 index = start_addr >> PAGE_SHIFT;
216 239
@@ -222,6 +245,8 @@ static void iommu_range_reserve(struct iommu_table *tbl,
222 if (end > tbl->it_size) /* don't go off the table */ 245 if (end > tbl->it_size) /* don't go off the table */
223 end = tbl->it_size; 246 end = tbl->it_size;
224 247
248 spin_lock_irqsave(&tbl->it_lock, flags);
249
225 badbit = verify_bit_range(tbl->it_map, 0, index, end); 250 badbit = verify_bit_range(tbl->it_map, 0, index, end);
226 if (badbit != ~0UL) { 251 if (badbit != ~0UL) {
227 if (printk_ratelimit()) 252 if (printk_ratelimit())
@@ -231,23 +256,29 @@ static void iommu_range_reserve(struct iommu_table *tbl,
231 } 256 }
232 257
233 set_bit_string(tbl->it_map, index, npages); 258 set_bit_string(tbl->it_map, index, npages);
259
260 spin_unlock_irqrestore(&tbl->it_lock, flags);
234} 261}
235 262
236static unsigned long iommu_range_alloc(struct iommu_table *tbl, 263static unsigned long iommu_range_alloc(struct iommu_table *tbl,
237 unsigned int npages) 264 unsigned int npages)
238{ 265{
266 unsigned long flags;
239 unsigned long offset; 267 unsigned long offset;
240 268
241 BUG_ON(npages == 0); 269 BUG_ON(npages == 0);
242 270
271 spin_lock_irqsave(&tbl->it_lock, flags);
272
243 offset = find_next_zero_string(tbl->it_map, tbl->it_hint, 273 offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
244 tbl->it_size, npages); 274 tbl->it_size, npages);
245 if (offset == ~0UL) { 275 if (offset == ~0UL) {
246 tce_cache_blast(tbl); 276 tbl->chip_ops->tce_cache_blast(tbl);
247 offset = find_next_zero_string(tbl->it_map, 0, 277 offset = find_next_zero_string(tbl->it_map, 0,
248 tbl->it_size, npages); 278 tbl->it_size, npages);
249 if (offset == ~0UL) { 279 if (offset == ~0UL) {
250 printk(KERN_WARNING "Calgary: IOMMU full.\n"); 280 printk(KERN_WARNING "Calgary: IOMMU full.\n");
281 spin_unlock_irqrestore(&tbl->it_lock, flags);
251 if (panic_on_overflow) 282 if (panic_on_overflow)
252 panic("Calgary: fix the allocator.\n"); 283 panic("Calgary: fix the allocator.\n");
253 else 284 else
@@ -259,17 +290,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
259 tbl->it_hint = offset + npages; 290 tbl->it_hint = offset + npages;
260 BUG_ON(tbl->it_hint > tbl->it_size); 291 BUG_ON(tbl->it_hint > tbl->it_size);
261 292
293 spin_unlock_irqrestore(&tbl->it_lock, flags);
294
262 return offset; 295 return offset;
263} 296}
264 297
265static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr, 298static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
266 unsigned int npages, int direction) 299 unsigned int npages, int direction)
267{ 300{
268 unsigned long entry, flags; 301 unsigned long entry;
269 dma_addr_t ret = bad_dma_address; 302 dma_addr_t ret = bad_dma_address;
270 303
271 spin_lock_irqsave(&tbl->it_lock, flags);
272
273 entry = iommu_range_alloc(tbl, npages); 304 entry = iommu_range_alloc(tbl, npages);
274 305
275 if (unlikely(entry == bad_dma_address)) 306 if (unlikely(entry == bad_dma_address))
@@ -282,23 +313,21 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
282 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, 313 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
283 direction); 314 direction);
284 315
285 spin_unlock_irqrestore(&tbl->it_lock, flags);
286
287 return ret; 316 return ret;
288 317
289error: 318error:
290 spin_unlock_irqrestore(&tbl->it_lock, flags);
291 printk(KERN_WARNING "Calgary: failed to allocate %u pages in " 319 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
292 "iommu %p\n", npages, tbl); 320 "iommu %p\n", npages, tbl);
293 return bad_dma_address; 321 return bad_dma_address;
294} 322}
295 323
296static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 324static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
297 unsigned int npages) 325 unsigned int npages)
298{ 326{
299 unsigned long entry; 327 unsigned long entry;
300 unsigned long badbit; 328 unsigned long badbit;
301 unsigned long badend; 329 unsigned long badend;
330 unsigned long flags;
302 331
303 /* were we called with bad_dma_address? */ 332 /* were we called with bad_dma_address? */
304 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 333 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
@@ -315,6 +344,8 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
315 344
316 tce_free(tbl, entry, npages); 345 tce_free(tbl, entry, npages);
317 346
347 spin_lock_irqsave(&tbl->it_lock, flags);
348
318 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages); 349 badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
319 if (badbit != ~0UL) { 350 if (badbit != ~0UL) {
320 if (printk_ratelimit()) 351 if (printk_ratelimit())
@@ -324,23 +355,40 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
324 } 355 }
325 356
326 __clear_bit_string(tbl->it_map, entry, npages); 357 __clear_bit_string(tbl->it_map, entry, npages);
358
359 spin_unlock_irqrestore(&tbl->it_lock, flags);
327} 360}
328 361
329static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 362static inline struct iommu_table *find_iommu_table(struct device *dev)
330 unsigned int npages)
331{ 363{
332 unsigned long flags; 364 struct pci_dev *pdev;
365 struct pci_bus *pbus;
366 struct iommu_table *tbl;
333 367
334 spin_lock_irqsave(&tbl->it_lock, flags); 368 pdev = to_pci_dev(dev);
335 369
336 __iommu_free(tbl, dma_addr, npages); 370 /* is the device behind a bridge? */
371 if (unlikely(pdev->bus->parent))
372 pbus = pdev->bus->parent;
373 else
374 pbus = pdev->bus;
337 375
338 spin_unlock_irqrestore(&tbl->it_lock, flags); 376 tbl = pci_iommu(pbus);
377
378 BUG_ON(pdev->bus->parent &&
379 (tbl->it_busno != pdev->bus->parent->number));
380
381 return tbl;
339} 382}
340 383
341static void __calgary_unmap_sg(struct iommu_table *tbl, 384static void calgary_unmap_sg(struct device *dev,
342 struct scatterlist *sglist, int nelems, int direction) 385 struct scatterlist *sglist, int nelems, int direction)
343{ 386{
387 struct iommu_table *tbl = find_iommu_table(dev);
388
389 if (!translate_phb(to_pci_dev(dev)))
390 return;
391
344 while (nelems--) { 392 while (nelems--) {
345 unsigned int npages; 393 unsigned int npages;
346 dma_addr_t dma = sglist->dma_address; 394 dma_addr_t dma = sglist->dma_address;
@@ -350,33 +398,17 @@ static void __calgary_unmap_sg(struct iommu_table *tbl,
350 break; 398 break;
351 399
352 npages = num_dma_pages(dma, dmalen); 400 npages = num_dma_pages(dma, dmalen);
353 __iommu_free(tbl, dma, npages); 401 iommu_free(tbl, dma, npages);
354 sglist++; 402 sglist++;
355 } 403 }
356} 404}
357 405
358void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
359 int nelems, int direction)
360{
361 unsigned long flags;
362 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
363
364 if (!translate_phb(to_pci_dev(dev)))
365 return;
366
367 spin_lock_irqsave(&tbl->it_lock, flags);
368
369 __calgary_unmap_sg(tbl, sglist, nelems, direction);
370
371 spin_unlock_irqrestore(&tbl->it_lock, flags);
372}
373
374static int calgary_nontranslate_map_sg(struct device* dev, 406static int calgary_nontranslate_map_sg(struct device* dev,
375 struct scatterlist *sg, int nelems, int direction) 407 struct scatterlist *sg, int nelems, int direction)
376{ 408{
377 int i; 409 int i;
378 410
379 for (i = 0; i < nelems; i++ ) { 411 for (i = 0; i < nelems; i++ ) {
380 struct scatterlist *s = &sg[i]; 412 struct scatterlist *s = &sg[i];
381 BUG_ON(!s->page); 413 BUG_ON(!s->page);
382 s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 414 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
@@ -385,11 +417,10 @@ static int calgary_nontranslate_map_sg(struct device* dev,
385 return nelems; 417 return nelems;
386} 418}
387 419
388int calgary_map_sg(struct device *dev, struct scatterlist *sg, 420static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
389 int nelems, int direction) 421 int nelems, int direction)
390{ 422{
391 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; 423 struct iommu_table *tbl = find_iommu_table(dev);
392 unsigned long flags;
393 unsigned long vaddr; 424 unsigned long vaddr;
394 unsigned int npages; 425 unsigned int npages;
395 unsigned long entry; 426 unsigned long entry;
@@ -398,8 +429,6 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
398 if (!translate_phb(to_pci_dev(dev))) 429 if (!translate_phb(to_pci_dev(dev)))
399 return calgary_nontranslate_map_sg(dev, sg, nelems, direction); 430 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
400 431
401 spin_lock_irqsave(&tbl->it_lock, flags);
402
403 for (i = 0; i < nelems; i++ ) { 432 for (i = 0; i < nelems; i++ ) {
404 struct scatterlist *s = &sg[i]; 433 struct scatterlist *s = &sg[i];
405 BUG_ON(!s->page); 434 BUG_ON(!s->page);
@@ -423,26 +452,23 @@ int calgary_map_sg(struct device *dev, struct scatterlist *sg,
423 s->dma_length = s->length; 452 s->dma_length = s->length;
424 } 453 }
425 454
426 spin_unlock_irqrestore(&tbl->it_lock, flags);
427
428 return nelems; 455 return nelems;
429error: 456error:
430 __calgary_unmap_sg(tbl, sg, nelems, direction); 457 calgary_unmap_sg(dev, sg, nelems, direction);
431 for (i = 0; i < nelems; i++) { 458 for (i = 0; i < nelems; i++) {
432 sg[i].dma_address = bad_dma_address; 459 sg[i].dma_address = bad_dma_address;
433 sg[i].dma_length = 0; 460 sg[i].dma_length = 0;
434 } 461 }
435 spin_unlock_irqrestore(&tbl->it_lock, flags);
436 return 0; 462 return 0;
437} 463}
438 464
439dma_addr_t calgary_map_single(struct device *dev, void *vaddr, 465static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
440 size_t size, int direction) 466 size_t size, int direction)
441{ 467{
442 dma_addr_t dma_handle = bad_dma_address; 468 dma_addr_t dma_handle = bad_dma_address;
443 unsigned long uaddr; 469 unsigned long uaddr;
444 unsigned int npages; 470 unsigned int npages;
445 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; 471 struct iommu_table *tbl = find_iommu_table(dev);
446 472
447 uaddr = (unsigned long)vaddr; 473 uaddr = (unsigned long)vaddr;
448 npages = num_dma_pages(uaddr, size); 474 npages = num_dma_pages(uaddr, size);
@@ -455,10 +481,10 @@ dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
455 return dma_handle; 481 return dma_handle;
456} 482}
457 483
458void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, 484static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
459 size_t size, int direction) 485 size_t size, int direction)
460{ 486{
461 struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata; 487 struct iommu_table *tbl = find_iommu_table(dev);
462 unsigned int npages; 488 unsigned int npages;
463 489
464 if (!translate_phb(to_pci_dev(dev))) 490 if (!translate_phb(to_pci_dev(dev)))
@@ -468,15 +494,13 @@ void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
468 iommu_free(tbl, dma_handle, npages); 494 iommu_free(tbl, dma_handle, npages);
469} 495}
470 496
471void* calgary_alloc_coherent(struct device *dev, size_t size, 497static void* calgary_alloc_coherent(struct device *dev, size_t size,
472 dma_addr_t *dma_handle, gfp_t flag) 498 dma_addr_t *dma_handle, gfp_t flag)
473{ 499{
474 void *ret = NULL; 500 void *ret = NULL;
475 dma_addr_t mapping; 501 dma_addr_t mapping;
476 unsigned int npages, order; 502 unsigned int npages, order;
477 struct iommu_table *tbl; 503 struct iommu_table *tbl = find_iommu_table(dev);
478
479 tbl = to_pci_dev(dev)->bus->self->sysdata;
480 504
481 size = PAGE_ALIGN(size); /* size rounded up to full pages */ 505 size = PAGE_ALIGN(size); /* size rounded up to full pages */
482 npages = size >> PAGE_SHIFT; 506 npages = size >> PAGE_SHIFT;
@@ -552,7 +576,22 @@ static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
552 return (void __iomem*)target; 576 return (void __iomem*)target;
553} 577}
554 578
555static void tce_cache_blast(struct iommu_table *tbl) 579static inline int is_calioc2(unsigned short device)
580{
581 return (device == PCI_DEVICE_ID_IBM_CALIOC2);
582}
583
584static inline int is_calgary(unsigned short device)
585{
586 return (device == PCI_DEVICE_ID_IBM_CALGARY);
587}
588
589static inline int is_cal_pci_dev(unsigned short device)
590{
591 return (is_calgary(device) || is_calioc2(device));
592}
593
594static void calgary_tce_cache_blast(struct iommu_table *tbl)
556{ 595{
557 u64 val; 596 u64 val;
558 u32 aer; 597 u32 aer;
@@ -589,6 +628,85 @@ static void tce_cache_blast(struct iommu_table *tbl)
589 (void)readl(target); /* flush */ 628 (void)readl(target); /* flush */
590} 629}
591 630
631static void calioc2_tce_cache_blast(struct iommu_table *tbl)
632{
633 void __iomem *bbar = tbl->bbar;
634 void __iomem *target;
635 u64 val64;
636 u32 val;
637 int i = 0;
638 int count = 1;
639 unsigned char bus = tbl->it_busno;
640
641begin:
642 printk(KERN_DEBUG "Calgary: CalIOC2 bus 0x%x entering tce cache blast "
643 "sequence - count %d\n", bus, count);
644
645 /* 1. using the Page Migration Control reg set SoftStop */
646 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
647 val = be32_to_cpu(readl(target));
648 printk(KERN_DEBUG "1a. read 0x%x [LE] from %p\n", val, target);
649 val |= PMR_SOFTSTOP;
650 printk(KERN_DEBUG "1b. writing 0x%x [LE] to %p\n", val, target);
651 writel(cpu_to_be32(val), target);
652
653 /* 2. poll split queues until all DMA activity is done */
654 printk(KERN_DEBUG "2a. starting to poll split queues\n");
655 target = calgary_reg(bbar, split_queue_offset(bus));
656 do {
657 val64 = readq(target);
658 i++;
659 } while ((val64 & 0xff) != 0xff && i < 100);
660 if (i == 100)
661 printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, "
662 "continuing anyway\n");
663
664 /* 3. poll Page Migration DEBUG for SoftStopFault */
665 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
666 val = be32_to_cpu(readl(target));
667 printk(KERN_DEBUG "3. read 0x%x [LE] from %p\n", val, target);
668
669 /* 4. if SoftStopFault - goto (1) */
670 if (val & PMR_SOFTSTOPFAULT) {
671 if (++count < 100)
672 goto begin;
673 else {
674 printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, "
675 "aborting TCE cache flush sequence!\n");
676 return; /* pray for the best */
677 }
678 }
679
680 /* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
681 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
682 printk(KERN_DEBUG "5a. slamming into HardStop by reading %p\n", target);
683 val = be32_to_cpu(readl(target));
684 printk(KERN_DEBUG "5b. read 0x%x [LE] from %p\n", val, target);
685 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
686 val = be32_to_cpu(readl(target));
687 printk(KERN_DEBUG "5c. read 0x%x [LE] from %p (debug)\n", val, target);
688
689 /* 6. invalidate TCE cache */
690 printk(KERN_DEBUG "6. invalidating TCE cache\n");
691 target = calgary_reg(bbar, tar_offset(bus));
692 writeq(tbl->tar_val, target);
693
694 /* 7. Re-read PMCR */
695 printk(KERN_DEBUG "7a. Re-reading PMCR\n");
696 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
697 val = be32_to_cpu(readl(target));
698 printk(KERN_DEBUG "7b. read 0x%x [LE] from %p\n", val, target);
699
700 /* 8. Remove HardStop */
701 printk(KERN_DEBUG "8a. removing HardStop from PMCR\n");
702 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_CTRL);
703 val = 0;
704 printk(KERN_DEBUG "8b. writing 0x%x [LE] to %p\n", val, target);
705 writel(cpu_to_be32(val), target);
706 val = be32_to_cpu(readl(target));
707 printk(KERN_DEBUG "8c. read 0x%x [LE] from %p\n", val, target);
708}
709
592static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start, 710static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
593 u64 limit) 711 u64 limit)
594{ 712{
@@ -598,7 +716,7 @@ static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
598 limit++; 716 limit++;
599 717
600 numpages = ((limit - start) >> PAGE_SHIFT); 718 numpages = ((limit - start) >> PAGE_SHIFT);
601 iommu_range_reserve(dev->sysdata, start, numpages); 719 iommu_range_reserve(pci_iommu(dev->bus), start, numpages);
602} 720}
603 721
604static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev) 722static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
@@ -606,7 +724,7 @@ static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
606 void __iomem *target; 724 void __iomem *target;
607 u64 low, high, sizelow; 725 u64 low, high, sizelow;
608 u64 start, limit; 726 u64 start, limit;
609 struct iommu_table *tbl = dev->sysdata; 727 struct iommu_table *tbl = pci_iommu(dev->bus);
610 unsigned char busnum = dev->bus->number; 728 unsigned char busnum = dev->bus->number;
611 void __iomem *bbar = tbl->bbar; 729 void __iomem *bbar = tbl->bbar;
612 730
@@ -630,7 +748,7 @@ static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
630 u32 val32; 748 u32 val32;
631 u64 low, high, sizelow, sizehigh; 749 u64 low, high, sizelow, sizehigh;
632 u64 start, limit; 750 u64 start, limit;
633 struct iommu_table *tbl = dev->sysdata; 751 struct iommu_table *tbl = pci_iommu(dev->bus);
634 unsigned char busnum = dev->bus->number; 752 unsigned char busnum = dev->bus->number;
635 void __iomem *bbar = tbl->bbar; 753 void __iomem *bbar = tbl->bbar;
636 754
@@ -666,14 +784,20 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
666{ 784{
667 unsigned int npages; 785 unsigned int npages;
668 u64 start; 786 u64 start;
669 struct iommu_table *tbl = dev->sysdata; 787 struct iommu_table *tbl = pci_iommu(dev->bus);
670 788
671 /* reserve EMERGENCY_PAGES from bad_dma_address and up */ 789 /* reserve EMERGENCY_PAGES from bad_dma_address and up */
672 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); 790 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
673 791
674 /* avoid the BIOS/VGA first 640KB-1MB region */ 792 /* avoid the BIOS/VGA first 640KB-1MB region */
675 start = (640 * 1024); 793 /* for CalIOC2 - avoid the entire first MB */
676 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT; 794 if (is_calgary(dev->device)) {
795 start = (640 * 1024);
796 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
797 } else { /* calioc2 */
798 start = 0;
799 npages = (1 * 1024 * 1024) >> PAGE_SHIFT;
800 }
677 iommu_range_reserve(tbl, start, npages); 801 iommu_range_reserve(tbl, start, npages);
678 802
679 /* reserve the two PCI peripheral memory regions in IO space */ 803 /* reserve the two PCI peripheral memory regions in IO space */
@@ -694,10 +818,17 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
694 if (ret) 818 if (ret)
695 return ret; 819 return ret;
696 820
697 tbl = dev->sysdata; 821 tbl = pci_iommu(dev->bus);
698 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space; 822 tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
699 tce_free(tbl, 0, tbl->it_size); 823 tce_free(tbl, 0, tbl->it_size);
700 824
825 if (is_calgary(dev->device))
826 tbl->chip_ops = &calgary_chip_ops;
827 else if (is_calioc2(dev->device))
828 tbl->chip_ops = &calioc2_chip_ops;
829 else
830 BUG();
831
701 calgary_reserve_regions(dev); 832 calgary_reserve_regions(dev);
702 833
703 /* set TARs for each PHB */ 834 /* set TARs for each PHB */
@@ -706,15 +837,15 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
706 837
707 /* zero out all TAR bits under sw control */ 838 /* zero out all TAR bits under sw control */
708 val64 &= ~TAR_SW_BITS; 839 val64 &= ~TAR_SW_BITS;
709
710 tbl = dev->sysdata;
711 table_phys = (u64)__pa(tbl->it_base); 840 table_phys = (u64)__pa(tbl->it_base);
841
712 val64 |= table_phys; 842 val64 |= table_phys;
713 843
714 BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M); 844 BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
715 val64 |= (u64) specified_table_size; 845 val64 |= (u64) specified_table_size;
716 846
717 tbl->tar_val = cpu_to_be64(val64); 847 tbl->tar_val = cpu_to_be64(val64);
848
718 writeq(tbl->tar_val, target); 849 writeq(tbl->tar_val, target);
719 readq(target); /* flush */ 850 readq(target); /* flush */
720 851
@@ -724,7 +855,7 @@ static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
724static void __init calgary_free_bus(struct pci_dev *dev) 855static void __init calgary_free_bus(struct pci_dev *dev)
725{ 856{
726 u64 val64; 857 u64 val64;
727 struct iommu_table *tbl = dev->sysdata; 858 struct iommu_table *tbl = pci_iommu(dev->bus);
728 void __iomem *target; 859 void __iomem *target;
729 unsigned int bitmapsz; 860 unsigned int bitmapsz;
730 861
@@ -739,16 +870,81 @@ static void __init calgary_free_bus(struct pci_dev *dev)
739 tbl->it_map = NULL; 870 tbl->it_map = NULL;
740 871
741 kfree(tbl); 872 kfree(tbl);
742 dev->sysdata = NULL; 873
874 set_pci_iommu(dev->bus, NULL);
743 875
744 /* Can't free bootmem allocated memory after system is up :-( */ 876 /* Can't free bootmem allocated memory after system is up :-( */
745 bus_info[dev->bus->number].tce_space = NULL; 877 bus_info[dev->bus->number].tce_space = NULL;
746} 878}
747 879
880static void calgary_dump_error_regs(struct iommu_table *tbl)
881{
882 void __iomem *bbar = tbl->bbar;
883 void __iomem *target;
884 u32 csr, plssr;
885
886 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
887 csr = be32_to_cpu(readl(target));
888
889 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
890 plssr = be32_to_cpu(readl(target));
891
892 /* If no error, the agent ID in the CSR is not valid */
893 printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, "
894 "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr);
895}
896
897static void calioc2_dump_error_regs(struct iommu_table *tbl)
898{
899 void __iomem *bbar = tbl->bbar;
900 u32 csr, csmr, plssr, mck, rcstat;
901 void __iomem *target;
902 unsigned long phboff = phb_offset(tbl->it_busno);
903 unsigned long erroff;
904 u32 errregs[7];
905 int i;
906
907 /* dump CSR */
908 target = calgary_reg(bbar, phboff | PHB_CSR_OFFSET);
909 csr = be32_to_cpu(readl(target));
910 /* dump PLSSR */
911 target = calgary_reg(bbar, phboff | PHB_PLSSR_OFFSET);
912 plssr = be32_to_cpu(readl(target));
913 /* dump CSMR */
914 target = calgary_reg(bbar, phboff | 0x290);
915 csmr = be32_to_cpu(readl(target));
916 /* dump mck */
917 target = calgary_reg(bbar, phboff | 0x800);
918 mck = be32_to_cpu(readl(target));
919
920 printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n",
921 tbl->it_busno);
922
923 printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
924 csr, plssr, csmr, mck);
925
926 /* dump rest of error regs */
927 printk(KERN_EMERG "Calgary: ");
928 for (i = 0; i < ARRAY_SIZE(errregs); i++) {
929 /* err regs are at 0x810 - 0x870 */
930 erroff = (0x810 + (i * 0x10));
931 target = calgary_reg(bbar, phboff | erroff);
932 errregs[i] = be32_to_cpu(readl(target));
933 printk("0x%08x@0x%lx ", errregs[i], erroff);
934 }
935 printk("\n");
936
937 /* root complex status */
938 target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
939 rcstat = be32_to_cpu(readl(target));
940 printk(KERN_EMERG "Calgary: 0x%08x@0x%x\n", rcstat,
941 PHB_ROOT_COMPLEX_STATUS);
942}
943
748static void calgary_watchdog(unsigned long data) 944static void calgary_watchdog(unsigned long data)
749{ 945{
750 struct pci_dev *dev = (struct pci_dev *)data; 946 struct pci_dev *dev = (struct pci_dev *)data;
751 struct iommu_table *tbl = dev->sysdata; 947 struct iommu_table *tbl = pci_iommu(dev->bus);
752 void __iomem *bbar = tbl->bbar; 948 void __iomem *bbar = tbl->bbar;
753 u32 val32; 949 u32 val32;
754 void __iomem *target; 950 void __iomem *target;
@@ -758,13 +954,14 @@ static void calgary_watchdog(unsigned long data)
758 954
759 /* If no error, the agent ID in the CSR is not valid */ 955 /* If no error, the agent ID in the CSR is not valid */
760 if (val32 & CSR_AGENT_MASK) { 956 if (val32 & CSR_AGENT_MASK) {
761 printk(KERN_EMERG "calgary_watchdog: DMA error on PHB %#x, " 957 tbl->chip_ops->dump_error_regs(tbl);
762 "CSR = %#x\n", dev->bus->number, val32); 958
959 /* reset error */
763 writel(0, target); 960 writel(0, target);
764 961
765 /* Disable bus that caused the error */ 962 /* Disable bus that caused the error */
766 target = calgary_reg(bbar, phb_offset(tbl->it_busno) | 963 target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
767 PHB_CONFIG_RW_OFFSET); 964 PHB_CONFIG_RW_OFFSET);
768 val32 = be32_to_cpu(readl(target)); 965 val32 = be32_to_cpu(readl(target));
769 val32 |= PHB_SLOT_DISABLE; 966 val32 |= PHB_SLOT_DISABLE;
770 writel(cpu_to_be32(val32), target); 967 writel(cpu_to_be32(val32), target);
@@ -775,8 +972,8 @@ static void calgary_watchdog(unsigned long data)
775 } 972 }
776} 973}
777 974
778static void __init calgary_increase_split_completion_timeout(void __iomem *bbar, 975static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
779 unsigned char busnum) 976 unsigned char busnum, unsigned long timeout)
780{ 977{
781 u64 val64; 978 u64 val64;
782 void __iomem *target; 979 void __iomem *target;
@@ -802,11 +999,40 @@ static void __init calgary_increase_split_completion_timeout(void __iomem *bbar,
802 /* zero out this PHB's timer bits */ 999 /* zero out this PHB's timer bits */
803 mask = ~(0xFUL << phb_shift); 1000 mask = ~(0xFUL << phb_shift);
804 val64 &= mask; 1001 val64 &= mask;
805 val64 |= (CCR_2SEC_TIMEOUT << phb_shift); 1002 val64 |= (timeout << phb_shift);
806 writeq(cpu_to_be64(val64), target); 1003 writeq(cpu_to_be64(val64), target);
807 readq(target); /* flush */ 1004 readq(target); /* flush */
808} 1005}
809 1006
1007static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1008{
1009 unsigned char busnum = dev->bus->number;
1010 void __iomem *bbar = tbl->bbar;
1011 void __iomem *target;
1012 u32 val;
1013
1014 /*
1015 * CalIOC2 designers recommend setting bit 8 in 0xnDB0 to 1
1016 */
1017 target = calgary_reg(bbar, phb_offset(busnum) | PHB_SAVIOR_L2);
1018 val = cpu_to_be32(readl(target));
1019 val |= 0x00800000;
1020 writel(cpu_to_be32(val), target);
1021}
1022
1023static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
1024{
1025 unsigned char busnum = dev->bus->number;
1026
1027 /*
1028 * Give split completion a longer timeout on bus 1 for aic94xx
1029 * http://bugzilla.kernel.org/show_bug.cgi?id=7180
1030 */
1031 if (is_calgary(dev->device) && (busnum == 1))
1032 calgary_set_split_completion_timeout(tbl->bbar, busnum,
1033 CCR_2SEC_TIMEOUT);
1034}
1035
810static void __init calgary_enable_translation(struct pci_dev *dev) 1036static void __init calgary_enable_translation(struct pci_dev *dev)
811{ 1037{
812 u32 val32; 1038 u32 val32;
@@ -816,7 +1042,7 @@ static void __init calgary_enable_translation(struct pci_dev *dev)
816 struct iommu_table *tbl; 1042 struct iommu_table *tbl;
817 1043
818 busnum = dev->bus->number; 1044 busnum = dev->bus->number;
819 tbl = dev->sysdata; 1045 tbl = pci_iommu(dev->bus);
820 bbar = tbl->bbar; 1046 bbar = tbl->bbar;
821 1047
822 /* enable TCE in PHB Config Register */ 1048 /* enable TCE in PHB Config Register */
@@ -824,20 +1050,15 @@ static void __init calgary_enable_translation(struct pci_dev *dev)
824 val32 = be32_to_cpu(readl(target)); 1050 val32 = be32_to_cpu(readl(target));
825 val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE; 1051 val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
826 1052
827 printk(KERN_INFO "Calgary: enabling translation on PHB %#x\n", busnum); 1053 printk(KERN_INFO "Calgary: enabling translation on %s PHB %#x\n",
1054 (dev->device == PCI_DEVICE_ID_IBM_CALGARY) ?
1055 "Calgary" : "CalIOC2", busnum);
828 printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this " 1056 printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
829 "bus.\n"); 1057 "bus.\n");
830 1058
831 writel(cpu_to_be32(val32), target); 1059 writel(cpu_to_be32(val32), target);
832 readl(target); /* flush */ 1060 readl(target); /* flush */
833 1061
834 /*
835 * Give split completion a longer timeout on bus 1 for aic94xx
836 * http://bugzilla.kernel.org/show_bug.cgi?id=7180
837 */
838 if (busnum == 1)
839 calgary_increase_split_completion_timeout(bbar, busnum);
840
841 init_timer(&tbl->watchdog_timer); 1062 init_timer(&tbl->watchdog_timer);
842 tbl->watchdog_timer.function = &calgary_watchdog; 1063 tbl->watchdog_timer.function = &calgary_watchdog;
843 tbl->watchdog_timer.data = (unsigned long)dev; 1064 tbl->watchdog_timer.data = (unsigned long)dev;
@@ -853,7 +1074,7 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
853 struct iommu_table *tbl; 1074 struct iommu_table *tbl;
854 1075
855 busnum = dev->bus->number; 1076 busnum = dev->bus->number;
856 tbl = dev->sysdata; 1077 tbl = pci_iommu(dev->bus);
857 bbar = tbl->bbar; 1078 bbar = tbl->bbar;
858 1079
859 /* disable TCE in PHB Config Register */ 1080 /* disable TCE in PHB Config Register */
@@ -871,13 +1092,19 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
871static void __init calgary_init_one_nontraslated(struct pci_dev *dev) 1092static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
872{ 1093{
873 pci_dev_get(dev); 1094 pci_dev_get(dev);
874 dev->sysdata = NULL; 1095 set_pci_iommu(dev->bus, NULL);
875 dev->bus->self = dev; 1096
1097 /* is the device behind a bridge? */
1098 if (dev->bus->parent)
1099 dev->bus->parent->self = dev;
1100 else
1101 dev->bus->self = dev;
876} 1102}
877 1103
878static int __init calgary_init_one(struct pci_dev *dev) 1104static int __init calgary_init_one(struct pci_dev *dev)
879{ 1105{
880 void __iomem *bbar; 1106 void __iomem *bbar;
1107 struct iommu_table *tbl;
881 int ret; 1108 int ret;
882 1109
883 BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM); 1110 BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
@@ -888,7 +1115,18 @@ static int __init calgary_init_one(struct pci_dev *dev)
888 goto done; 1115 goto done;
889 1116
890 pci_dev_get(dev); 1117 pci_dev_get(dev);
891 dev->bus->self = dev; 1118
1119 if (dev->bus->parent) {
1120 if (dev->bus->parent->self)
1121 printk(KERN_WARNING "Calgary: IEEEE, dev %p has "
1122 "bus->parent->self!\n", dev);
1123 dev->bus->parent->self = dev;
1124 } else
1125 dev->bus->self = dev;
1126
1127 tbl = pci_iommu(dev->bus);
1128 tbl->chip_ops->handle_quirks(tbl, dev);
1129
892 calgary_enable_translation(dev); 1130 calgary_enable_translation(dev);
893 1131
894 return 0; 1132 return 0;
@@ -924,11 +1162,18 @@ static int __init calgary_locate_bbars(void)
924 target = calgary_reg(bbar, offset); 1162 target = calgary_reg(bbar, offset);
925 1163
926 val = be32_to_cpu(readl(target)); 1164 val = be32_to_cpu(readl(target));
1165
927 start_bus = (u8)((val & 0x00FF0000) >> 16); 1166 start_bus = (u8)((val & 0x00FF0000) >> 16);
928 end_bus = (u8)((val & 0x0000FF00) >> 8); 1167 end_bus = (u8)((val & 0x0000FF00) >> 8);
929 for (bus = start_bus; bus <= end_bus; bus++) { 1168
930 bus_info[bus].bbar = bbar; 1169 if (end_bus) {
931 bus_info[bus].phbid = phb; 1170 for (bus = start_bus; bus <= end_bus; bus++) {
1171 bus_info[bus].bbar = bbar;
1172 bus_info[bus].phbid = phb;
1173 }
1174 } else {
1175 bus_info[start_bus].bbar = bbar;
1176 bus_info[start_bus].phbid = phb;
932 } 1177 }
933 } 1178 }
934 } 1179 }
@@ -948,22 +1193,24 @@ static int __init calgary_init(void)
948{ 1193{
949 int ret; 1194 int ret;
950 struct pci_dev *dev = NULL; 1195 struct pci_dev *dev = NULL;
1196 void *tce_space;
951 1197
952 ret = calgary_locate_bbars(); 1198 ret = calgary_locate_bbars();
953 if (ret) 1199 if (ret)
954 return ret; 1200 return ret;
955 1201
956 do { 1202 do {
957 dev = pci_get_device(PCI_VENDOR_ID_IBM, 1203 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
958 PCI_DEVICE_ID_IBM_CALGARY,
959 dev);
960 if (!dev) 1204 if (!dev)
961 break; 1205 break;
1206 if (!is_cal_pci_dev(dev->device))
1207 continue;
962 if (!translate_phb(dev)) { 1208 if (!translate_phb(dev)) {
963 calgary_init_one_nontraslated(dev); 1209 calgary_init_one_nontraslated(dev);
964 continue; 1210 continue;
965 } 1211 }
966 if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots) 1212 tce_space = bus_info[dev->bus->number].tce_space;
1213 if (!tce_space && !translate_empty_slots)
967 continue; 1214 continue;
968 1215
969 ret = calgary_init_one(dev); 1216 ret = calgary_init_one(dev);
@@ -976,10 +1223,11 @@ static int __init calgary_init(void)
976error: 1223error:
977 do { 1224 do {
978 dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM, 1225 dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
979 PCI_DEVICE_ID_IBM_CALGARY, 1226 PCI_ANY_ID, dev);
980 dev);
981 if (!dev) 1227 if (!dev)
982 break; 1228 break;
1229 if (!is_cal_pci_dev(dev->device))
1230 continue;
983 if (!translate_phb(dev)) { 1231 if (!translate_phb(dev)) {
984 pci_dev_put(dev); 1232 pci_dev_put(dev);
985 continue; 1233 continue;
@@ -1057,9 +1305,29 @@ static int __init build_detail_arrays(void)
1057 return 0; 1305 return 0;
1058} 1306}
1059 1307
1060void __init detect_calgary(void) 1308static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
1061{ 1309{
1310 int dev;
1062 u32 val; 1311 u32 val;
1312
1313 if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
1314 /*
1315 * FIXME: properly scan for devices accross the
1316 * PCI-to-PCI bridge on every CalIOC2 port.
1317 */
1318 return 1;
1319 }
1320
1321 for (dev = 1; dev < 8; dev++) {
1322 val = read_pci_config(bus, dev, 0, 0);
1323 if (val != 0xffffffff)
1324 break;
1325 }
1326 return (val != 0xffffffff);
1327}
1328
1329void __init detect_calgary(void)
1330{
1063 int bus; 1331 int bus;
1064 void *tbl; 1332 void *tbl;
1065 int calgary_found = 0; 1333 int calgary_found = 0;
@@ -1116,29 +1384,26 @@ void __init detect_calgary(void)
1116 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); 1384 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
1117 1385
1118 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { 1386 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
1119 int dev;
1120 struct calgary_bus_info *info = &bus_info[bus]; 1387 struct calgary_bus_info *info = &bus_info[bus];
1388 unsigned short pci_device;
1389 u32 val;
1390
1391 val = read_pci_config(bus, 0, 0, 0);
1392 pci_device = (val & 0xFFFF0000) >> 16;
1121 1393
1122 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) 1394 if (!is_cal_pci_dev(pci_device))
1123 continue; 1395 continue;
1124 1396
1125 if (info->translation_disabled) 1397 if (info->translation_disabled)
1126 continue; 1398 continue;
1127 1399
1128 /* 1400 if (calgary_bus_has_devices(bus, pci_device) ||
1129 * Scan the slots of the PCI bus to see if there is a device present. 1401 translate_empty_slots) {
1130 * The parent bus will be the zero-ith device, so start at 1. 1402 tbl = alloc_tce_table();
1131 */ 1403 if (!tbl)
1132 for (dev = 1; dev < 8; dev++) { 1404 goto cleanup;
1133 val = read_pci_config(bus, dev, 0, 0); 1405 info->tce_space = tbl;
1134 if (val != 0xffffffff || translate_empty_slots) { 1406 calgary_found = 1;
1135 tbl = alloc_tce_table();
1136 if (!tbl)
1137 goto cleanup;
1138 info->tce_space = tbl;
1139 calgary_found = 1;
1140 break;
1141 }
1142 } 1407 }
1143 } 1408 }
1144 1409
@@ -1249,3 +1514,66 @@ static int __init calgary_parse_options(char *p)
1249 return 1; 1514 return 1;
1250} 1515}
1251__setup("calgary=", calgary_parse_options); 1516__setup("calgary=", calgary_parse_options);
1517
1518static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
1519{
1520 struct iommu_table *tbl;
1521 unsigned int npages;
1522 int i;
1523
1524 tbl = pci_iommu(dev->bus);
1525
1526 for (i = 0; i < 4; i++) {
1527 struct resource *r = &dev->resource[PCI_BRIDGE_RESOURCES + i];
1528
1529 /* Don't give out TCEs that map MEM resources */
1530 if (!(r->flags & IORESOURCE_MEM))
1531 continue;
1532
1533 /* 0-based? we reserve the whole 1st MB anyway */
1534 if (!r->start)
1535 continue;
1536
1537 /* cover the whole region */
1538 npages = (r->end - r->start) >> PAGE_SHIFT;
1539 npages++;
1540
1541 iommu_range_reserve(tbl, r->start, npages);
1542 }
1543}
1544
1545static int __init calgary_fixup_tce_spaces(void)
1546{
1547 struct pci_dev *dev = NULL;
1548 void *tce_space;
1549
1550 if (no_iommu || swiotlb || !calgary_detected)
1551 return -ENODEV;
1552
1553 printk(KERN_DEBUG "Calgary: fixing up tce spaces\n");
1554
1555 do {
1556 dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
1557 if (!dev)
1558 break;
1559 if (!is_cal_pci_dev(dev->device))
1560 continue;
1561 if (!translate_phb(dev))
1562 continue;
1563
1564 tce_space = bus_info[dev->bus->number].tce_space;
1565 if (!tce_space)
1566 continue;
1567
1568 calgary_fixup_one_tce_space(dev);
1569
1570 } while (1);
1571
1572 return 0;
1573}
1574
1575/*
1576 * We need to be call after pcibios_assign_resources (fs_initcall level)
1577 * and before device_initcall.
1578 */
1579rootfs_initcall(calgary_fixup_tce_spaces);
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 90f6315d02d4..05d745ede561 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -8,7 +8,7 @@
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <asm/io.h> 10#include <asm/io.h>
11#include <asm/proto.h> 11#include <asm/iommu.h>
12#include <asm/calgary.h> 12#include <asm/calgary.h>
13 13
14int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 0;
@@ -321,6 +321,11 @@ static int __init pci_iommu_init(void)
321 return 0; 321 return 0;
322} 322}
323 323
324void pci_iommu_shutdown(void)
325{
326 gart_iommu_shutdown();
327}
328
324#ifdef CONFIG_PCI 329#ifdef CONFIG_PCI
325/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ 330/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
326 331
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index ae091cdc1a4d..4918c575d582 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -28,6 +28,7 @@
28#include <asm/mtrr.h> 28#include <asm/mtrr.h>
29#include <asm/pgtable.h> 29#include <asm/pgtable.h>
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h>
31#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
32#include <asm/swiotlb.h> 33#include <asm/swiotlb.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
@@ -235,7 +236,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
235} 236}
236 237
237/* Map a single area into the IOMMU */ 238/* Map a single area into the IOMMU */
238dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) 239static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
239{ 240{
240 unsigned long phys_mem, bus; 241 unsigned long phys_mem, bus;
241 242
@@ -253,7 +254,7 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
253/* 254/*
254 * Free a DMA mapping. 255 * Free a DMA mapping.
255 */ 256 */
256void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, 257static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
257 size_t size, int direction) 258 size_t size, int direction)
258{ 259{
259 unsigned long iommu_page; 260 unsigned long iommu_page;
@@ -275,7 +276,7 @@ void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
275/* 276/*
276 * Wrapper for pci_unmap_single working with scatterlists. 277 * Wrapper for pci_unmap_single working with scatterlists.
277 */ 278 */
278void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 279static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
279{ 280{
280 int i; 281 int i;
281 282
@@ -571,6 +572,26 @@ static const struct dma_mapping_ops gart_dma_ops = {
571 .unmap_sg = gart_unmap_sg, 572 .unmap_sg = gart_unmap_sg,
572}; 573};
573 574
575void gart_iommu_shutdown(void)
576{
577 struct pci_dev *dev;
578 int i;
579
580 if (no_agp && (dma_ops != &gart_dma_ops))
581 return;
582
583 for (i = 0; i < num_k8_northbridges; i++) {
584 u32 ctl;
585
586 dev = k8_northbridges[i];
587 pci_read_config_dword(dev, 0x90, &ctl);
588
589 ctl &= ~1;
590
591 pci_write_config_dword(dev, 0x90, ctl);
592 }
593}
594
574void __init gart_iommu_init(void) 595void __init gart_iommu_init(void)
575{ 596{
576 struct agp_kern_info info; 597 struct agp_kern_info info;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 6dade0c867cc..2a34c6c025a9 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -6,7 +6,7 @@
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
8 8
9#include <asm/proto.h> 9#include <asm/iommu.h>
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/dma.h> 11#include <asm/dma.h>
12 12
@@ -34,7 +34,7 @@ nommu_map_single(struct device *hwdev, void *ptr, size_t size,
34 return bus; 34 return bus;
35} 35}
36 36
37void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size, 37static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
38 int direction) 38 int direction)
39{ 39{
40} 40}
@@ -54,7 +54,7 @@ void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
54 * Device ownership issues as mentioned above for pci_map_single are 54 * Device ownership issues as mentioned above for pci_map_single are
55 * the same here. 55 * the same here.
56 */ 56 */
57int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 57static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
58 int nents, int direction) 58 int nents, int direction)
59{ 59{
60 int i; 60 int i;
@@ -74,7 +74,7 @@ int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
74 * Again, cpu read rules concerning calls here are the same as for 74 * Again, cpu read rules concerning calls here are the same as for
75 * pci_unmap_single() above. 75 * pci_unmap_single() above.
76 */ 76 */
77void nommu_unmap_sg(struct device *dev, struct scatterlist *sg, 77static void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
78 int nents, int dir) 78 int nents, int dir)
79{ 79{
80} 80}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 4b4569abc60c..b2f405ea7c85 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -5,7 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7 7
8#include <asm/proto.h> 8#include <asm/iommu.h>
9#include <asm/swiotlb.h> 9#include <asm/swiotlb.h>
10#include <asm/dma.h> 10#include <asm/dma.h>
11 11
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 5909039f37aa..92fade4a62cf 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -207,6 +207,7 @@ void cpu_idle (void)
207 if (__get_cpu_var(cpu_idle_state)) 207 if (__get_cpu_var(cpu_idle_state))
208 __get_cpu_var(cpu_idle_state) = 0; 208 __get_cpu_var(cpu_idle_state) = 0;
209 209
210 check_pgt_cache();
210 rmb(); 211 rmb();
211 idle = pm_idle; 212 idle = pm_idle;
212 if (!idle) 213 if (!idle)
@@ -278,7 +279,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
278 */ 279 */
279 if (!pm_idle) { 280 if (!pm_idle) {
280 if (!printed) { 281 if (!printed) {
281 printk("using mwait in idle threads.\n"); 282 printk(KERN_INFO "using mwait in idle threads.\n");
282 printed = 1; 283 printed = 1;
283 } 284 }
284 pm_idle = mwait_idle; 285 pm_idle = mwait_idle;
@@ -305,6 +306,7 @@ early_param("idle", idle_setup);
305void __show_regs(struct pt_regs * regs) 306void __show_regs(struct pt_regs * regs)
306{ 307{
307 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 308 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
309 unsigned long d0, d1, d2, d3, d6, d7;
308 unsigned int fsindex,gsindex; 310 unsigned int fsindex,gsindex;
309 unsigned int ds,cs,es; 311 unsigned int ds,cs,es;
310 312
@@ -349,6 +351,15 @@ void __show_regs(struct pt_regs * regs)
349 fs,fsindex,gs,gsindex,shadowgs); 351 fs,fsindex,gs,gsindex,shadowgs);
350 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 352 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
351 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 353 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
354
355 get_debugreg(d0, 0);
356 get_debugreg(d1, 1);
357 get_debugreg(d2, 2);
358 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
359 get_debugreg(d3, 3);
360 get_debugreg(d6, 6);
361 get_debugreg(d7, 7);
362 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
352} 363}
353 364
354void show_regs(struct pt_regs *regs) 365void show_regs(struct pt_regs *regs)
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 7503068e788d..368db2b9c5ac 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -16,6 +16,7 @@
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/iommu.h>
19 20
20/* 21/*
21 * Power off function, if any 22 * Power off function, if any
@@ -81,6 +82,7 @@ static inline void kb_wait(void)
81void machine_shutdown(void) 82void machine_shutdown(void)
82{ 83{
83 unsigned long flags; 84 unsigned long flags;
85
84 /* Stop the cpus and apics */ 86 /* Stop the cpus and apics */
85#ifdef CONFIG_SMP 87#ifdef CONFIG_SMP
86 int reboot_cpu_id; 88 int reboot_cpu_id;
@@ -111,6 +113,8 @@ void machine_shutdown(void)
111 disable_IO_APIC(); 113 disable_IO_APIC();
112 114
113 local_irq_restore(flags); 115 local_irq_restore(flags);
116
117 pci_iommu_shutdown();
114} 118}
115 119
116void machine_emergency_restart(void) 120void machine_emergency_restart(void)
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 33ef718f8cb5..6fa0a302e2aa 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -575,6 +575,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
575 level = cpuid_eax(1); 575 level = cpuid_eax(1);
576 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) 576 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
577 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 577 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
578 if (c->x86 == 0x10)
579 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
578 580
579 /* Enable workaround for FXSAVE leak */ 581 /* Enable workaround for FXSAVE leak */
580 if (c->x86 >= 6) 582 if (c->x86 >= 6)
@@ -600,8 +602,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
600 if (c->extended_cpuid_level >= 0x80000008) 602 if (c->extended_cpuid_level >= 0x80000008)
601 amd_detect_cmp(c); 603 amd_detect_cmp(c);
602 604
603 /* Fix cpuid4 emulation for more */ 605 if (c->extended_cpuid_level >= 0x80000006 &&
604 num_cache_leaves = 3; 606 (cpuid_edx(0x80000006) & 0xf000))
607 num_cache_leaves = 4;
608 else
609 num_cache_leaves = 3;
605 610
606 /* RDTSC can be speculated around */ 611 /* RDTSC can be speculated around */
607 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); 612 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 290f5d8037cd..4886afcd6287 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -26,6 +26,7 @@
26#include <asm/i387.h> 26#include <asm/i387.h>
27#include <asm/proto.h> 27#include <asm/proto.h>
28#include <asm/ia32_unistd.h> 28#include <asm/ia32_unistd.h>
29#include <asm/mce.h>
29 30
30/* #define DEBUG_SIG 1 */ 31/* #define DEBUG_SIG 1 */
31 32
@@ -472,6 +473,12 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
472 clear_thread_flag(TIF_SINGLESTEP); 473 clear_thread_flag(TIF_SINGLESTEP);
473 } 474 }
474 475
476#ifdef CONFIG_X86_MCE
477 /* notify userspace of pending MCEs */
478 if (thread_info_flags & _TIF_MCE_NOTIFY)
479 mce_notify_user();
480#endif /* CONFIG_X86_MCE */
481
475 /* deal with pending signal delivery */ 482 /* deal with pending signal delivery */
476 if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK)) 483 if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
477 do_signal(regs); 484 do_signal(regs);
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 0694940b2e73..673a300b5944 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
241 } 241 }
242 if (!cpus_empty(cpu_mask)) 242 if (!cpus_empty(cpu_mask))
243 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 243 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
244 244 check_pgt_cache();
245 preempt_enable(); 245 preempt_enable();
246} 246}
247EXPORT_SYMBOL(flush_tlb_mm); 247EXPORT_SYMBOL(flush_tlb_mm);
@@ -386,9 +386,9 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
386 return 0; 386 return 0;
387 } 387 }
388 388
389 spin_lock_bh(&call_lock); 389 spin_lock(&call_lock);
390 __smp_call_function_single(cpu, func, info, nonatomic, wait); 390 __smp_call_function_single(cpu, func, info, nonatomic, wait);
391 spin_unlock_bh(&call_lock); 391 spin_unlock(&call_lock);
392 put_cpu(); 392 put_cpu();
393 return 0; 393 return 0;
394} 394}
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index f61fb8e4f129..3aeae2fa2e24 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -136,9 +136,9 @@ int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136 struct iommu_table *tbl; 136 struct iommu_table *tbl;
137 int ret; 137 int ret;
138 138
139 if (dev->sysdata) { 139 if (pci_iommu(dev->bus)) {
140 printk(KERN_ERR "Calgary: dev %p has sysdata %p\n", 140 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
141 dev, dev->sysdata); 141 dev, pci_iommu(dev->bus));
142 BUG(); 142 BUG();
143 } 143 }
144 144
@@ -155,11 +155,7 @@ int build_tce_table(struct pci_dev *dev, void __iomem *bbar)
155 155
156 tbl->bbar = bbar; 156 tbl->bbar = bbar;
157 157
158 /* 158 set_pci_iommu(dev->bus, tbl);
159 * NUMA is already using the bus's sysdata pointer, so we use
160 * the bus's pci_dev's sysdata instead.
161 */
162 dev->sysdata = tbl;
163 159
164 return 0; 160 return 0;
165 161
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 4a0895bacf51..6d48a4e826d9 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -33,6 +33,7 @@
33#include <acpi/acpi_bus.h> 33#include <acpi/acpi_bus.h>
34#endif 34#endif
35#include <asm/8253pit.h> 35#include <asm/8253pit.h>
36#include <asm/i8253.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/vsyscall.h> 38#include <asm/vsyscall.h>
38#include <asm/timex.h> 39#include <asm/timex.h>
@@ -44,12 +45,14 @@
44#include <asm/hpet.h> 45#include <asm/hpet.h>
45#include <asm/mpspec.h> 46#include <asm/mpspec.h>
46#include <asm/nmi.h> 47#include <asm/nmi.h>
48#include <asm/vgtod.h>
47 49
48static char *timename = NULL; 50static char *timename = NULL;
49 51
50DEFINE_SPINLOCK(rtc_lock); 52DEFINE_SPINLOCK(rtc_lock);
51EXPORT_SYMBOL(rtc_lock); 53EXPORT_SYMBOL(rtc_lock);
52DEFINE_SPINLOCK(i8253_lock); 54DEFINE_SPINLOCK(i8253_lock);
55EXPORT_SYMBOL(i8253_lock);
53 56
54volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; 57volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
55 58
@@ -79,8 +82,9 @@ EXPORT_SYMBOL(profile_pc);
79 * sheet for details. 82 * sheet for details.
80 */ 83 */
81 84
82static void set_rtc_mmss(unsigned long nowtime) 85static int set_rtc_mmss(unsigned long nowtime)
83{ 86{
87 int retval = 0;
84 int real_seconds, real_minutes, cmos_minutes; 88 int real_seconds, real_minutes, cmos_minutes;
85 unsigned char control, freq_select; 89 unsigned char control, freq_select;
86 90
@@ -120,6 +124,7 @@ static void set_rtc_mmss(unsigned long nowtime)
120 if (abs(real_minutes - cmos_minutes) >= 30) { 124 if (abs(real_minutes - cmos_minutes) >= 30) {
121 printk(KERN_WARNING "time.c: can't update CMOS clock " 125 printk(KERN_WARNING "time.c: can't update CMOS clock "
122 "from %d to %d\n", cmos_minutes, real_minutes); 126 "from %d to %d\n", cmos_minutes, real_minutes);
127 retval = -1;
123 } else { 128 } else {
124 BIN_TO_BCD(real_seconds); 129 BIN_TO_BCD(real_seconds);
125 BIN_TO_BCD(real_minutes); 130 BIN_TO_BCD(real_minutes);
@@ -139,12 +144,17 @@ static void set_rtc_mmss(unsigned long nowtime)
139 CMOS_WRITE(freq_select, RTC_FREQ_SELECT); 144 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
140 145
141 spin_unlock(&rtc_lock); 146 spin_unlock(&rtc_lock);
147
148 return retval;
142} 149}
143 150
151int update_persistent_clock(struct timespec now)
152{
153 return set_rtc_mmss(now.tv_sec);
154}
144 155
145void main_timer_handler(void) 156void main_timer_handler(void)
146{ 157{
147 static unsigned long rtc_update = 0;
148/* 158/*
149 * Here we are in the timer irq handler. We have irqs locally disabled (so we 159 * Here we are in the timer irq handler. We have irqs locally disabled (so we
150 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running 160 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
@@ -172,20 +182,6 @@ void main_timer_handler(void)
172 if (!using_apic_timer) 182 if (!using_apic_timer)
173 smp_local_timer_interrupt(); 183 smp_local_timer_interrupt();
174 184
175/*
176 * If we have an externally synchronized Linux clock, then update CMOS clock
177 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
178 * closest to exactly 500 ms before the next second. If the update fails, we
179 * don't care, as it'll be updated on the next turn, and the problem (time way
180 * off) isn't likely to go away much sooner anyway.
181 */
182
183 if (ntp_synced() && xtime.tv_sec > rtc_update &&
184 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
185 set_rtc_mmss(xtime.tv_sec);
186 rtc_update = xtime.tv_sec + 660;
187 }
188
189 write_sequnlock(&xtime_lock); 185 write_sequnlock(&xtime_lock);
190} 186}
191 187
@@ -199,7 +195,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
199 return IRQ_HANDLED; 195 return IRQ_HANDLED;
200} 196}
201 197
202static unsigned long get_cmos_time(void) 198unsigned long read_persistent_clock(void)
203{ 199{
204 unsigned int year, mon, day, hour, min, sec; 200 unsigned int year, mon, day, hour, min, sec;
205 unsigned long flags; 201 unsigned long flags;
@@ -226,7 +222,7 @@ static unsigned long get_cmos_time(void)
226 /* 222 /*
227 * We know that x86-64 always uses BCD format, no need to check the 223 * We know that x86-64 always uses BCD format, no need to check the
228 * config register. 224 * config register.
229 */ 225 */
230 226
231 BCD_TO_BIN(sec); 227 BCD_TO_BIN(sec);
232 BCD_TO_BIN(min); 228 BCD_TO_BIN(min);
@@ -239,11 +235,11 @@ static unsigned long get_cmos_time(void)
239 BCD_TO_BIN(century); 235 BCD_TO_BIN(century);
240 year += century * 100; 236 year += century * 100;
241 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); 237 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
242 } else { 238 } else {
243 /* 239 /*
244 * x86-64 systems only exists since 2002. 240 * x86-64 systems only exists since 2002.
245 * This will work up to Dec 31, 2100 241 * This will work up to Dec 31, 2100
246 */ 242 */
247 year += 2000; 243 year += 2000;
248 } 244 }
249 245
@@ -255,45 +251,45 @@ static unsigned long get_cmos_time(void)
255#define TICK_COUNT 100000000 251#define TICK_COUNT 100000000
256static unsigned int __init tsc_calibrate_cpu_khz(void) 252static unsigned int __init tsc_calibrate_cpu_khz(void)
257{ 253{
258 int tsc_start, tsc_now; 254 int tsc_start, tsc_now;
259 int i, no_ctr_free; 255 int i, no_ctr_free;
260 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; 256 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
261 unsigned long flags; 257 unsigned long flags;
262 258
263 for (i = 0; i < 4; i++) 259 for (i = 0; i < 4; i++)
264 if (avail_to_resrv_perfctr_nmi_bit(i)) 260 if (avail_to_resrv_perfctr_nmi_bit(i))
265 break; 261 break;
266 no_ctr_free = (i == 4); 262 no_ctr_free = (i == 4);
267 if (no_ctr_free) { 263 if (no_ctr_free) {
268 i = 3; 264 i = 3;
269 rdmsrl(MSR_K7_EVNTSEL3, evntsel3); 265 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
270 wrmsrl(MSR_K7_EVNTSEL3, 0); 266 wrmsrl(MSR_K7_EVNTSEL3, 0);
271 rdmsrl(MSR_K7_PERFCTR3, pmc3); 267 rdmsrl(MSR_K7_PERFCTR3, pmc3);
272 } else { 268 } else {
273 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); 269 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
274 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 270 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
275 } 271 }
276 local_irq_save(flags); 272 local_irq_save(flags);
277 /* start meauring cycles, incrementing from 0 */ 273 /* start meauring cycles, incrementing from 0 */
278 wrmsrl(MSR_K7_PERFCTR0 + i, 0); 274 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
279 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); 275 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
280 rdtscl(tsc_start); 276 rdtscl(tsc_start);
281 do { 277 do {
282 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); 278 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
283 tsc_now = get_cycles_sync(); 279 tsc_now = get_cycles_sync();
284 } while ((tsc_now - tsc_start) < TICK_COUNT); 280 } while ((tsc_now - tsc_start) < TICK_COUNT);
285 281
286 local_irq_restore(flags); 282 local_irq_restore(flags);
287 if (no_ctr_free) { 283 if (no_ctr_free) {
288 wrmsrl(MSR_K7_EVNTSEL3, 0); 284 wrmsrl(MSR_K7_EVNTSEL3, 0);
289 wrmsrl(MSR_K7_PERFCTR3, pmc3); 285 wrmsrl(MSR_K7_PERFCTR3, pmc3);
290 wrmsrl(MSR_K7_EVNTSEL3, evntsel3); 286 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
291 } else { 287 } else {
292 release_perfctr_nmi(MSR_K7_PERFCTR0 + i); 288 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
293 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 289 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
294 } 290 }
295 291
296 return pmc_now * tsc_khz / (tsc_now - tsc_start); 292 return pmc_now * tsc_khz / (tsc_now - tsc_start);
297} 293}
298 294
299/* 295/*
@@ -321,7 +317,7 @@ static unsigned int __init pit_calibrate_tsc(void)
321 end = get_cycles_sync(); 317 end = get_cycles_sync();
322 318
323 spin_unlock_irqrestore(&i8253_lock, flags); 319 spin_unlock_irqrestore(&i8253_lock, flags);
324 320
325 return (end - start) / 50; 321 return (end - start) / 50;
326} 322}
327 323
@@ -366,25 +362,20 @@ static struct irqaction irq0 = {
366 .handler = timer_interrupt, 362 .handler = timer_interrupt,
367 .flags = IRQF_DISABLED | IRQF_IRQPOLL, 363 .flags = IRQF_DISABLED | IRQF_IRQPOLL,
368 .mask = CPU_MASK_NONE, 364 .mask = CPU_MASK_NONE,
369 .name = "timer" 365 .name = "timer"
370}; 366};
371 367
372void __init time_init(void) 368void __init time_init(void)
373{ 369{
374 if (nohpet) 370 if (nohpet)
375 hpet_address = 0; 371 hpet_address = 0;
376 xtime.tv_sec = get_cmos_time();
377 xtime.tv_nsec = 0;
378
379 set_normalized_timespec(&wall_to_monotonic,
380 -xtime.tv_sec, -xtime.tv_nsec);
381 372
382 if (hpet_arch_init()) 373 if (hpet_arch_init())
383 hpet_address = 0; 374 hpet_address = 0;
384 375
385 if (hpet_use_timer) { 376 if (hpet_use_timer) {
386 /* set tick_nsec to use the proper rate for HPET */ 377 /* set tick_nsec to use the proper rate for HPET */
387 tick_nsec = TICK_NSEC_HPET; 378 tick_nsec = TICK_NSEC_HPET;
388 tsc_khz = hpet_calibrate_tsc(); 379 tsc_khz = hpet_calibrate_tsc();
389 timename = "HPET"; 380 timename = "HPET";
390 } else { 381 } else {
@@ -415,54 +406,21 @@ void __init time_init(void)
415 setup_irq(0, &irq0); 406 setup_irq(0, &irq0);
416} 407}
417 408
418
419static long clock_cmos_diff;
420static unsigned long sleep_start;
421
422/* 409/*
423 * sysfs support for the timer. 410 * sysfs support for the timer.
424 */ 411 */
425 412
426static int timer_suspend(struct sys_device *dev, pm_message_t state) 413static int timer_suspend(struct sys_device *dev, pm_message_t state)
427{ 414{
428 /*
429 * Estimate time zone so that set_time can update the clock
430 */
431 long cmos_time = get_cmos_time();
432
433 clock_cmos_diff = -cmos_time;
434 clock_cmos_diff += get_seconds();
435 sleep_start = cmos_time;
436 return 0; 415 return 0;
437} 416}
438 417
439static int timer_resume(struct sys_device *dev) 418static int timer_resume(struct sys_device *dev)
440{ 419{
441 unsigned long flags;
442 unsigned long sec;
443 unsigned long ctime = get_cmos_time();
444 long sleep_length = (ctime - sleep_start) * HZ;
445
446 if (sleep_length < 0) {
447 printk(KERN_WARNING "Time skew detected in timer resume!\n");
448 /* The time after the resume must not be earlier than the time
449 * before the suspend or some nasty things will happen
450 */
451 sleep_length = 0;
452 ctime = sleep_start;
453 }
454 if (hpet_address) 420 if (hpet_address)
455 hpet_reenable(); 421 hpet_reenable();
456 else 422 else
457 i8254_timer_resume(); 423 i8254_timer_resume();
458
459 sec = ctime + clock_cmos_diff;
460 write_seqlock_irqsave(&xtime_lock,flags);
461 xtime.tv_sec = sec;
462 xtime.tv_nsec = 0;
463 jiffies += sleep_length;
464 write_sequnlock_irqrestore(&xtime_lock,flags);
465 touch_softlockup_watchdog();
466 return 0; 424 return 0;
467} 425}
468 426
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
index e850aa01e1b3..9b76b03d0600 100644
--- a/arch/x86_64/kernel/tsc.c
+++ b/arch/x86_64/kernel/tsc.c
@@ -61,25 +61,9 @@ inline int check_tsc_unstable(void)
61 * first tick after the change will be slightly wrong. 61 * first tick after the change will be slightly wrong.
62 */ 62 */
63 63
64#include <linux/workqueue.h> 64static unsigned int ref_freq;
65 65static unsigned long loops_per_jiffy_ref;
66static unsigned int cpufreq_delayed_issched = 0; 66static unsigned long tsc_khz_ref;
67static unsigned int cpufreq_init = 0;
68static struct work_struct cpufreq_delayed_get_work;
69
70static void handle_cpufreq_delayed_get(struct work_struct *v)
71{
72 unsigned int cpu;
73 for_each_online_cpu(cpu) {
74 cpufreq_get(cpu);
75 }
76 cpufreq_delayed_issched = 0;
77}
78
79static unsigned int ref_freq = 0;
80static unsigned long loops_per_jiffy_ref = 0;
81
82static unsigned long tsc_khz_ref = 0;
83 67
84static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 68static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
85 void *data) 69 void *data)
@@ -125,10 +109,8 @@ static struct notifier_block time_cpufreq_notifier_block = {
125 109
126static int __init cpufreq_tsc(void) 110static int __init cpufreq_tsc(void)
127{ 111{
128 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); 112 cpufreq_register_notifier(&time_cpufreq_notifier_block,
129 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, 113 CPUFREQ_TRANSITION_NOTIFIER);
130 CPUFREQ_TRANSITION_NOTIFIER))
131 cpufreq_init = 1;
132 return 0; 114 return 0;
133} 115}
134 116
@@ -153,17 +135,18 @@ __cpuinit int unsynchronized_tsc(void)
153#endif 135#endif
154 /* Most intel systems have synchronized TSCs except for 136 /* Most intel systems have synchronized TSCs except for
155 multi node systems */ 137 multi node systems */
156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 138 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
157#ifdef CONFIG_ACPI 139#ifdef CONFIG_ACPI
158 /* But TSC doesn't tick in C3 so don't use it there */ 140 /* But TSC doesn't tick in C3 so don't use it there */
159 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000) 141 if (acpi_gbl_FADT.header.length > 0 &&
142 acpi_gbl_FADT.C3latency < 1000)
160 return 1; 143 return 1;
161#endif 144#endif
162 return 0; 145 return 0;
163 } 146 }
164 147
165 /* Assume multi socket systems are not synchronized */ 148 /* Assume multi socket systems are not synchronized */
166 return num_present_cpus() > 1; 149 return num_present_cpus() > 1;
167} 150}
168 151
169int __init notsc_setup(char *s) 152int __init notsc_setup(char *s)
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 5c57ea4591c1..e7a5eb6cd785 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -54,6 +54,13 @@ SECTIONS
54 54
55 RODATA 55 RODATA
56 56
57 . = ALIGN(4);
58 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
59 __tracedata_start = .;
60 *(.tracedata)
61 __tracedata_end = .;
62 }
63
57 . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ 64 . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
58 /* Data */ 65 /* Data */
59 .data : AT(ADDR(.data) - LOAD_OFFSET) { 66 .data : AT(ADDR(.data) - LOAD_OFFSET) {
@@ -93,6 +100,9 @@ SECTIONS
93 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) 100 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
94 { *(.vsyscall_gtod_data) } 101 { *(.vsyscall_gtod_data) }
95 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); 102 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
103 .vsyscall_clock : AT(VLOAD(.vsyscall_clock))
104 { *(.vsyscall_clock) }
105 vsyscall_clock = VVIRT(.vsyscall_clock);
96 106
97 107
98 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) 108 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
@@ -133,20 +143,11 @@ SECTIONS
133 /* might get freed after init */ 143 /* might get freed after init */
134 . = ALIGN(4096); 144 . = ALIGN(4096);
135 __smp_alt_begin = .; 145 __smp_alt_begin = .;
136 __smp_alt_instructions = .;
137 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
138 *(.smp_altinstructions)
139 }
140 __smp_alt_instructions_end = .;
141 . = ALIGN(8);
142 __smp_locks = .; 146 __smp_locks = .;
143 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 147 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
144 *(.smp_locks) 148 *(.smp_locks)
145 } 149 }
146 __smp_locks_end = .; 150 __smp_locks_end = .;
147 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
148 *(.smp_altinstr_replacement)
149 }
150 . = ALIGN(4096); 151 . = ALIGN(4096);
151 __smp_alt_end = .; 152 __smp_alt_end = .;
152 153
@@ -189,6 +190,12 @@ SECTIONS
189 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } 190 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
190 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } 191 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
191 192
193/* vdso blob that is mapped into user space */
194 vdso_start = . ;
195 .vdso : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) }
196 . = ALIGN(4096);
197 vdso_end = .;
198
192#ifdef CONFIG_BLK_DEV_INITRD 199#ifdef CONFIG_BLK_DEV_INITRD
193 . = ALIGN(4096); 200 . = ALIGN(4096);
194 __initramfs_start = .; 201 __initramfs_start = .;
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 57660d58d500..06c34949bfdc 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -42,6 +42,7 @@
42#include <asm/segment.h> 42#include <asm/segment.h>
43#include <asm/desc.h> 43#include <asm/desc.h>
44#include <asm/topology.h> 44#include <asm/topology.h>
45#include <asm/vgtod.h>
45 46
46#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) 47#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
47#define __syscall_clobber "r11","rcx","memory" 48#define __syscall_clobber "r11","rcx","memory"
@@ -57,26 +58,9 @@
57 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) 58 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
58 * Try to keep this structure as small as possible to avoid cache line ping pongs 59 * Try to keep this structure as small as possible to avoid cache line ping pongs
59 */ 60 */
60struct vsyscall_gtod_data_t {
61 seqlock_t lock;
62
63 /* open coded 'struct timespec' */
64 time_t wall_time_sec;
65 u32 wall_time_nsec;
66
67 int sysctl_enabled;
68 struct timezone sys_tz;
69 struct { /* extract of a clocksource struct */
70 cycle_t (*vread)(void);
71 cycle_t cycle_last;
72 cycle_t mask;
73 u32 mult;
74 u32 shift;
75 } clock;
76};
77int __vgetcpu_mode __section_vgetcpu_mode; 61int __vgetcpu_mode __section_vgetcpu_mode;
78 62
79struct vsyscall_gtod_data_t __vsyscall_gtod_data __section_vsyscall_gtod_data = 63struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
80{ 64{
81 .lock = SEQLOCK_UNLOCKED, 65 .lock = SEQLOCK_UNLOCKED,
82 .sysctl_enabled = 1, 66 .sysctl_enabled = 1,
@@ -96,6 +80,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
96 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 80 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
97 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 81 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
98 vsyscall_gtod_data.sys_tz = sys_tz; 82 vsyscall_gtod_data.sys_tz = sys_tz;
83 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
84 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
99 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 85 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
100} 86}
101 87
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 84f11728fc76..2074bddd4f04 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -301,7 +301,7 @@ static int vmalloc_fault(unsigned long address)
301 return 0; 301 return 0;
302} 302}
303 303
304int page_fault_trace = 0; 304static int page_fault_trace;
305int exception_trace = 1; 305int exception_trace = 1;
306 306
307/* 307/*
@@ -568,7 +568,7 @@ out_of_memory:
568 } 568 }
569 printk("VM: killing process %s\n", tsk->comm); 569 printk("VM: killing process %s\n", tsk->comm);
570 if (error_code & 4) 570 if (error_code & 4)
571 do_exit(SIGKILL); 571 do_group_exit(SIGKILL);
572 goto no_context; 572 goto no_context;
573 573
574do_sigbus: 574do_sigbus:
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 9a0e98accf04..381c2ecd407e 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -700,8 +700,6 @@ int kern_addr_valid(unsigned long addr)
700#ifdef CONFIG_SYSCTL 700#ifdef CONFIG_SYSCTL
701#include <linux/sysctl.h> 701#include <linux/sysctl.h>
702 702
703extern int exception_trace, page_fault_trace;
704
705static ctl_table debug_table2[] = { 703static ctl_table debug_table2[] = {
706 { 704 {
707 .ctl_name = 99, 705 .ctl_name = 99,
@@ -774,3 +772,12 @@ void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
774 return __alloc_bootmem_core(pgdat->bdata, size, 772 return __alloc_bootmem_core(pgdat->bdata, size,
775 SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); 773 SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
776} 774}
775
776const char *arch_vma_name(struct vm_area_struct *vma)
777{
778 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
779 return "[vdso]";
780 if (vma == &gate_vma)
781 return "[vsyscall]";
782 return NULL;
783}
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index f983c75825d0..a96006f7ae0c 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -44,12 +44,12 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
44{ 44{
45 unsigned long prevbase; 45 unsigned long prevbase;
46 struct bootnode nodes[8]; 46 struct bootnode nodes[8];
47 int nodeid, i, nb; 47 int nodeid, i, j, nb;
48 unsigned char nodeids[8]; 48 unsigned char nodeids[8];
49 int found = 0; 49 int found = 0;
50 u32 reg; 50 u32 reg;
51 unsigned numnodes; 51 unsigned numnodes;
52 unsigned dualcore = 0; 52 unsigned num_cores;
53 53
54 if (!early_pci_allowed()) 54 if (!early_pci_allowed())
55 return -1; 55 return -1;
@@ -60,6 +60,9 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
60 60
61 printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb); 61 printk(KERN_INFO "Scanning NUMA topology in Northbridge %d\n", nb);
62 62
63 num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
64 printk(KERN_INFO "CPU has %d num_cores\n", num_cores);
65
63 reg = read_pci_config(0, nb, 0, 0x60); 66 reg = read_pci_config(0, nb, 0, 0x60);
64 numnodes = ((reg >> 4) & 0xF) + 1; 67 numnodes = ((reg >> 4) & 0xF) + 1;
65 if (numnodes <= 1) 68 if (numnodes <= 1)
@@ -73,8 +76,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
73 unsigned long base,limit; 76 unsigned long base,limit;
74 u32 nodeid; 77 u32 nodeid;
75 78
76 /* Undefined before E stepping, but hopefully 0 */
77 dualcore |= ((read_pci_config(0, nb, 3, 0xe8) >> 12) & 3) == 1;
78 base = read_pci_config(0, nb, 1, 0x40 + i*8); 79 base = read_pci_config(0, nb, 1, 0x40 + i*8);
79 limit = read_pci_config(0, nb, 1, 0x44 + i*8); 80 limit = read_pci_config(0, nb, 1, 0x44 + i*8);
80 81
@@ -170,8 +171,8 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
170 for (i = 0; i < 8; i++) { 171 for (i = 0; i < 8; i++) {
171 if (nodes[i].start != nodes[i].end) { 172 if (nodes[i].start != nodes[i].end) {
172 nodeid = nodeids[i]; 173 nodeid = nodeids[i];
173 apicid_to_node[nodeid << dualcore] = i; 174 for (j = 0; j < num_cores; j++)
174 apicid_to_node[(nodeid << dualcore) + dualcore] = i; 175 apicid_to_node[(nodeid * num_cores) + j] = i;
175 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 176 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
176 } 177 }
177 } 178 }
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 51548947ad3b..6da235522269 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -273,9 +273,6 @@ void __init numa_init_array(void)
273 273
274#ifdef CONFIG_NUMA_EMU 274#ifdef CONFIG_NUMA_EMU
275/* Numa emulation */ 275/* Numa emulation */
276#define E820_ADDR_HOLE_SIZE(start, end) \
277 (e820_hole_size((start) >> PAGE_SHIFT, (end) >> PAGE_SHIFT) << \
278 PAGE_SHIFT)
279char *cmdline __initdata; 276char *cmdline __initdata;
280 277
281/* 278/*
@@ -319,7 +316,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
319 return -1; 316 return -1;
320 if (num_nodes > MAX_NUMNODES) 317 if (num_nodes > MAX_NUMNODES)
321 num_nodes = MAX_NUMNODES; 318 num_nodes = MAX_NUMNODES;
322 size = (max_addr - *addr - E820_ADDR_HOLE_SIZE(*addr, max_addr)) / 319 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
323 num_nodes; 320 num_nodes;
324 /* 321 /*
325 * Calculate the number of big nodes that can be allocated as a result 322 * Calculate the number of big nodes that can be allocated as a result
@@ -347,7 +344,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
347 if (i == num_nodes + node_start - 1) 344 if (i == num_nodes + node_start - 1)
348 end = max_addr; 345 end = max_addr;
349 else 346 else
350 while (end - *addr - E820_ADDR_HOLE_SIZE(*addr, end) < 347 while (end - *addr - e820_hole_size(*addr, end) <
351 size) { 348 size) {
352 end += FAKE_NODE_MIN_SIZE; 349 end += FAKE_NODE_MIN_SIZE;
353 if (end > max_addr) { 350 if (end > max_addr) {
@@ -476,18 +473,22 @@ out:
476 473
477 /* 474 /*
478 * We need to vacate all active ranges that may have been registered by 475 * We need to vacate all active ranges that may have been registered by
479 * SRAT. 476 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
477 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
480 */ 478 */
481 remove_all_active_ranges(); 479 remove_all_active_ranges();
480#ifdef CONFIG_ACPI_NUMA
481 acpi_numa = -1;
482#endif
482 for_each_node_mask(i, node_possible_map) { 483 for_each_node_mask(i, node_possible_map) {
483 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 484 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
484 nodes[i].end >> PAGE_SHIFT); 485 nodes[i].end >> PAGE_SHIFT);
485 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 486 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
486 } 487 }
488 acpi_fake_nodes(nodes, num_nodes);
487 numa_init_array(); 489 numa_init_array();
488 return 0; 490 return 0;
489} 491}
490#undef E820_ADDR_HOLE_SIZE
491#endif /* CONFIG_NUMA_EMU */ 492#endif /* CONFIG_NUMA_EMU */
492 493
493void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) 494void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 9148f4a4cec6..36377b6b8efe 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -74,14 +74,12 @@ static void flush_kernel_map(void *arg)
74 struct page *pg; 74 struct page *pg;
75 75
76 /* When clflush is available always use it because it is 76 /* When clflush is available always use it because it is
77 much cheaper than WBINVD. Disable clflush for now because 77 much cheaper than WBINVD. */
78 the high level code is not ready yet */ 78 if (!cpu_has_clflush)
79 if (1 || !cpu_has_clflush)
80 asm volatile("wbinvd" ::: "memory"); 79 asm volatile("wbinvd" ::: "memory");
81 else list_for_each_entry(pg, l, lru) { 80 else list_for_each_entry(pg, l, lru) {
82 void *adr = page_address(pg); 81 void *adr = page_address(pg);
83 if (cpu_has_clflush) 82 cache_flush_page(adr);
84 cache_flush_page(adr);
85 } 83 }
86 __flush_tlb_all(); 84 __flush_tlb_all();
87} 85}
@@ -95,7 +93,8 @@ static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
95 93
96static inline void save_page(struct page *fpage) 94static inline void save_page(struct page *fpage)
97{ 95{
98 list_add(&fpage->lru, &deferred_pages); 96 if (!test_and_set_bit(PG_arch_1, &fpage->flags))
97 list_add(&fpage->lru, &deferred_pages);
99} 98}
100 99
101/* 100/*
@@ -129,9 +128,12 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
129 pte_t *kpte; 128 pte_t *kpte;
130 struct page *kpte_page; 129 struct page *kpte_page;
131 pgprot_t ref_prot2; 130 pgprot_t ref_prot2;
131
132 kpte = lookup_address(address); 132 kpte = lookup_address(address);
133 if (!kpte) return 0; 133 if (!kpte) return 0;
134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); 134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
135 BUG_ON(PageLRU(kpte_page));
136 BUG_ON(PageCompound(kpte_page));
135 if (pgprot_val(prot) != pgprot_val(ref_prot)) { 137 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
136 if (!pte_huge(*kpte)) { 138 if (!pte_huge(*kpte)) {
137 set_pte(kpte, pfn_pte(pfn, prot)); 139 set_pte(kpte, pfn_pte(pfn, prot));
@@ -159,10 +161,9 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
159 /* on x86-64 the direct mapping set at boot is not using 4k pages */ 161 /* on x86-64 the direct mapping set at boot is not using 4k pages */
160 BUG_ON(PageReserved(kpte_page)); 162 BUG_ON(PageReserved(kpte_page));
161 163
162 if (page_private(kpte_page) == 0) { 164 save_page(kpte_page);
163 save_page(kpte_page); 165 if (page_private(kpte_page) == 0)
164 revert_page(address, ref_prot); 166 revert_page(address, ref_prot);
165 }
166 return 0; 167 return 0;
167} 168}
168 169
@@ -234,6 +235,10 @@ void global_flush_tlb(void)
234 flush_map(&l); 235 flush_map(&l);
235 236
236 list_for_each_entry_safe(pg, next, &l, lru) { 237 list_for_each_entry_safe(pg, next, &l, lru) {
238 list_del(&pg->lru);
239 clear_bit(PG_arch_1, &pg->flags);
240 if (page_private(pg) != 0)
241 continue;
237 ClearPagePrivate(pg); 242 ClearPagePrivate(pg);
238 __free_page(pg); 243 __free_page(pg);
239 } 244 }
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1e76bb0a7277..acdf03e19146 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -106,9 +106,9 @@ static __init int slit_valid(struct acpi_table_slit *slit)
106 for (j = 0; j < d; j++) { 106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j]; 107 u8 val = slit->entry[d*i + j];
108 if (i == j) { 108 if (i == j) {
109 if (val != 10) 109 if (val != LOCAL_DISTANCE)
110 return 0; 110 return 0;
111 } else if (val <= 10) 111 } else if (val <= LOCAL_DISTANCE)
112 return 0; 112 return 0;
113 } 113 }
114 } 114 }
@@ -350,7 +350,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
350 350
351/* Sanity check to catch more bad SRATs (they are amazingly common). 351/* Sanity check to catch more bad SRATs (they are amazingly common).
352 Make sure the PXMs cover all memory. */ 352 Make sure the PXMs cover all memory. */
353static int nodes_cover_memory(void) 353static int __init nodes_cover_memory(const struct bootnode *nodes)
354{ 354{
355 int i; 355 int i;
356 unsigned long pxmram, e820ram; 356 unsigned long pxmram, e820ram;
@@ -394,6 +394,9 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
394{ 394{
395 int i; 395 int i;
396 396
397 if (acpi_numa <= 0)
398 return -1;
399
397 /* First clean up the node list */ 400 /* First clean up the node list */
398 for (i = 0; i < MAX_NUMNODES; i++) { 401 for (i = 0; i < MAX_NUMNODES; i++) {
399 cutoff_node(i, start, end); 402 cutoff_node(i, start, end);
@@ -403,10 +406,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
403 } 406 }
404 } 407 }
405 408
406 if (acpi_numa <= 0) 409 if (!nodes_cover_memory(nodes)) {
407 return -1;
408
409 if (!nodes_cover_memory()) {
410 bad_srat(); 410 bad_srat();
411 return -1; 411 return -1;
412 } 412 }
@@ -440,6 +440,86 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
440 return 0; 440 return 0;
441} 441}
442 442
443#ifdef CONFIG_NUMA_EMU
444static int __init find_node_by_addr(unsigned long addr)
445{
446 int ret = NUMA_NO_NODE;
447 int i;
448
449 for_each_node_mask(i, nodes_parsed) {
450 /*
451 * Find the real node that this emulated node appears on. For
452 * the sake of simplicity, we only use a real node's starting
453 * address to determine which emulated node it appears on.
454 */
455 if (addr >= nodes[i].start && addr < nodes[i].end) {
456 ret = i;
457 break;
458 }
459 }
460 return i;
461}
462
463/*
464 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
465 * mappings that respect the real ACPI topology but reflect our emulated
466 * environment. For each emulated node, we find which real node it appears on
467 * and create PXM to NID mappings for those fake nodes which mirror that
468 * locality. SLIT will now represent the correct distances between emulated
469 * nodes as a result of the real topology.
470 */
471void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
472{
473 int i, j;
474 int fake_node_to_pxm_map[MAX_NUMNODES] = {
475 [0 ... MAX_NUMNODES-1] = PXM_INVAL
476 };
477 unsigned char fake_apicid_to_node[MAX_LOCAL_APIC] = {
478 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
479 };
480
481 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
482 "topology.\n");
483 for (i = 0; i < num_nodes; i++) {
484 int nid, pxm;
485
486 nid = find_node_by_addr(fake_nodes[i].start);
487 if (nid == NUMA_NO_NODE)
488 continue;
489 pxm = node_to_pxm(nid);
490 if (pxm == PXM_INVAL)
491 continue;
492 fake_node_to_pxm_map[i] = pxm;
493 /*
494 * For each apicid_to_node mapping that exists for this real
495 * node, it must now point to the fake node ID.
496 */
497 for (j = 0; j < MAX_LOCAL_APIC; j++)
498 if (apicid_to_node[j] == nid)
499 fake_apicid_to_node[j] = i;
500 }
501 for (i = 0; i < num_nodes; i++)
502 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
503 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
504
505 nodes_clear(nodes_parsed);
506 for (i = 0; i < num_nodes; i++)
507 if (fake_nodes[i].start != fake_nodes[i].end)
508 node_set(i, nodes_parsed);
509 WARN_ON(!nodes_cover_memory(fake_nodes));
510}
511
512static int null_slit_node_compare(int a, int b)
513{
514 return node_to_pxm(a) == node_to_pxm(b);
515}
516#else
517static int null_slit_node_compare(int a, int b)
518{
519 return a == b;
520}
521#endif /* CONFIG_NUMA_EMU */
522
443void __init srat_reserve_add_area(int nodeid) 523void __init srat_reserve_add_area(int nodeid)
444{ 524{
445 if (found_add_area && nodes_add[nodeid].end) { 525 if (found_add_area && nodes_add[nodeid].end) {
@@ -464,7 +544,8 @@ int __node_distance(int a, int b)
464 int index; 544 int index;
465 545
466 if (!acpi_slit) 546 if (!acpi_slit)
467 return a == b ? 10 : 20; 547 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
548 REMOTE_DISTANCE;
468 index = acpi_slit->locality_count * node_to_pxm(a); 549 index = acpi_slit->locality_count * node_to_pxm(a);
469 return acpi_slit->entry[index + node_to_pxm(b)]; 550 return acpi_slit->entry[index + node_to_pxm(b)];
470} 551}
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 3acf60ded2a0..9cc813e29706 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -59,6 +59,8 @@ fill_mp_bus_to_cpumask(void)
59 j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus); 59 j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus);
60 j++) { 60 j++) {
61 struct pci_bus *bus; 61 struct pci_bus *bus;
62 struct pci_sysdata *sd;
63
62 long node = NODE_ID(nid); 64 long node = NODE_ID(nid);
63 /* Algorithm a bit dumb, but 65 /* Algorithm a bit dumb, but
64 it shouldn't matter here */ 66 it shouldn't matter here */
@@ -67,7 +69,9 @@ fill_mp_bus_to_cpumask(void)
67 continue; 69 continue;
68 if (!node_online(node)) 70 if (!node_online(node))
69 node = 0; 71 node = 0;
70 bus->sysdata = (void *)node; 72
73 sd = bus->sysdata;
74 sd->node = node;
71 } 75 }
72 } 76 }
73 } 77 }
diff --git a/arch/x86_64/vdso/Makefile b/arch/x86_64/vdso/Makefile
new file mode 100644
index 000000000000..faaa72fb250c
--- /dev/null
+++ b/arch/x86_64/vdso/Makefile
@@ -0,0 +1,49 @@
1#
2# x86-64 vDSO.
3#
4
5# files to link into the vdso
6# vdso-start.o has to be first
7vobjs-y := vdso-start.o vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
8
9# files to link into kernel
10obj-y := vma.o vdso.o vdso-syms.o
11
12vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
13
14$(obj)/vdso.o: $(obj)/vdso.so
15
16targets += vdso.so vdso.lds $(vobjs-y) vdso-syms.o
17
18# The DSO images are built using a special linker script.
19quiet_cmd_syscall = SYSCALL $@
20 cmd_syscall = $(CC) -m elf_x86_64 -nostdlib $(SYSCFLAGS_$(@F)) \
21 -Wl,-T,$(filter-out FORCE,$^) -o $@
22
23export CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
24
25vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \
26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
27 -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
28SYSCFLAGS_vdso.so = $(vdso-flags)
29
30$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
31
32$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE
33 $(call if_changed,syscall)
34
35CF := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
36
37$(obj)/vclock_gettime.o: CFLAGS = $(CF)
38$(obj)/vgetcpu.o: CFLAGS = $(CF)
39
40# We also create a special relocatable object that should mirror the symbol
41# table and layout of the linked DSO. With ld -R we can then refer to
42# these symbols in the kernel code rather than hand-coded addresses.
43extra-y += vdso-syms.o
44$(obj)/built-in.o: $(obj)/vdso-syms.o
45$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
46
47SYSCFLAGS_vdso-syms.o = -r -d
48$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE
49 $(call if_changed,syscall)
diff --git a/arch/x86_64/vdso/vclock_gettime.c b/arch/x86_64/vdso/vclock_gettime.c
new file mode 100644
index 000000000000..17f6a00de712
--- /dev/null
+++ b/arch/x86_64/vdso/vclock_gettime.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Fast user context implementation of clock_gettime and gettimeofday.
6 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
9 * Also alternative() doesn't work.
10 */
11
12#include <linux/kernel.h>
13#include <linux/posix-timers.h>
14#include <linux/time.h>
15#include <linux/string.h>
16#include <asm/vsyscall.h>
17#include <asm/vgtod.h>
18#include <asm/timex.h>
19#include <asm/hpet.h>
20#include <asm/unistd.h>
21#include <asm/io.h>
22#include <asm/vgtod.h>
23#include "vextern.h"
24
25#define gtod vdso_vsyscall_gtod_data
26
27static long vdso_fallback_gettime(long clock, struct timespec *ts)
28{
29 long ret;
30 asm("syscall" : "=a" (ret) :
31 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
32 return ret;
33}
34
35static inline long vgetns(void)
36{
37 cycles_t (*vread)(void);
38 vread = gtod->clock.vread;
39 return ((vread() - gtod->clock.cycle_last) * gtod->clock.mult) >>
40 gtod->clock.shift;
41}
42
43static noinline int do_realtime(struct timespec *ts)
44{
45 unsigned long seq, ns;
46 do {
47 seq = read_seqbegin(&gtod->lock);
48 ts->tv_sec = gtod->wall_time_sec;
49 ts->tv_nsec = gtod->wall_time_nsec;
50 ns = vgetns();
51 } while (unlikely(read_seqretry(&gtod->lock, seq)));
52 timespec_add_ns(ts, ns);
53 return 0;
54}
55
56/* Copy of the version in kernel/time.c which we cannot directly access */
57static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
58{
59 while (nsec >= NSEC_PER_SEC) {
60 nsec -= NSEC_PER_SEC;
61 ++sec;
62 }
63 while (nsec < 0) {
64 nsec += NSEC_PER_SEC;
65 --sec;
66 }
67 ts->tv_sec = sec;
68 ts->tv_nsec = nsec;
69}
70
71static noinline int do_monotonic(struct timespec *ts)
72{
73 unsigned long seq, ns, secs;
74 do {
75 seq = read_seqbegin(&gtod->lock);
76 secs = gtod->wall_time_sec;
77 ns = gtod->wall_time_nsec + vgetns();
78 secs += gtod->wall_to_monotonic.tv_sec;
79 ns += gtod->wall_to_monotonic.tv_nsec;
80 } while (unlikely(read_seqretry(&gtod->lock, seq)));
81 vset_normalized_timespec(ts, secs, ns);
82 return 0;
83}
84
85int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
86{
87 if (likely(gtod->sysctl_enabled && gtod->clock.vread))
88 switch (clock) {
89 case CLOCK_REALTIME:
90 return do_realtime(ts);
91 case CLOCK_MONOTONIC:
92 return do_monotonic(ts);
93 }
94 return vdso_fallback_gettime(clock, ts);
95}
96int clock_gettime(clockid_t, struct timespec *)
97 __attribute__((weak, alias("__vdso_clock_gettime")));
98
99int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
100{
101 long ret;
102 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
103 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
104 offsetof(struct timespec, tv_nsec) ||
105 sizeof(*tv) != sizeof(struct timespec));
106 do_realtime((struct timespec *)tv);
107 tv->tv_usec /= 1000;
108 if (unlikely(tz != NULL)) {
109 /* This relies on gcc inlining the memcpy. We'll notice
110 if it ever fails to do so. */
111 memcpy(tz, &gtod->sys_tz, sizeof(struct timezone));
112 }
113 return 0;
114 }
115 asm("syscall" : "=a" (ret) :
116 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
117 return ret;
118}
119int gettimeofday(struct timeval *, struct timezone *)
120 __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/x86_64/vdso/vdso-note.S b/arch/x86_64/vdso/vdso-note.S
new file mode 100644
index 000000000000..79a071e4357e
--- /dev/null
+++ b/arch/x86_64/vdso/vdso-note.S
@@ -0,0 +1,12 @@
1/*
2 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
3 * Here we can supply some information useful to userland.
4 */
5
6#include <linux/uts.h>
7#include <linux/version.h>
8#include <linux/elfnote.h>
9
10ELFNOTE_START(Linux, 0, "a")
11 .long LINUX_VERSION_CODE
12ELFNOTE_END
diff --git a/arch/x86_64/vdso/vdso-start.S b/arch/x86_64/vdso/vdso-start.S
new file mode 100644
index 000000000000..2dc2cdb84d67
--- /dev/null
+++ b/arch/x86_64/vdso/vdso-start.S
@@ -0,0 +1,2 @@
1 .globl vdso_kernel_start
2vdso_kernel_start:
diff --git a/arch/x86_64/vdso/vdso.S b/arch/x86_64/vdso/vdso.S
new file mode 100644
index 000000000000..92e80c1972a7
--- /dev/null
+++ b/arch/x86_64/vdso/vdso.S
@@ -0,0 +1,2 @@
1 .section ".vdso","a"
2 .incbin "arch/x86_64/vdso/vdso.so"
diff --git a/arch/x86_64/vdso/vdso.lds.S b/arch/x86_64/vdso/vdso.lds.S
new file mode 100644
index 000000000000..b9a60e665d08
--- /dev/null
+++ b/arch/x86_64/vdso/vdso.lds.S
@@ -0,0 +1,77 @@
1/*
2 * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
3 * object prelinked to its virtual address, and with only one read-only
4 * segment (that fits in one page). This script controls its layout.
5 */
6#include <asm/asm-offsets.h>
7#include "voffset.h"
8
9#define VDSO_PRELINK 0xffffffffff700000
10
11SECTIONS
12{
13 . = VDSO_PRELINK + SIZEOF_HEADERS;
14
15 .hash : { *(.hash) } :text
16 .gnu.hash : { *(.gnu.hash) }
17 .dynsym : { *(.dynsym) }
18 .dynstr : { *(.dynstr) }
19 .gnu.version : { *(.gnu.version) }
20 .gnu.version_d : { *(.gnu.version_d) }
21 .gnu.version_r : { *(.gnu.version_r) }
22
23 /* This linker script is used both with -r and with -shared.
24 For the layouts to match, we need to skip more than enough
25 space for the dynamic symbol table et al. If this amount
26 is insufficient, ld -shared will barf. Just increase it here. */
27 . = VDSO_PRELINK + VDSO_TEXT_OFFSET;
28
29 .text : { *(.text) } :text
30 .text.ptr : { *(.text.ptr) } :text
31 . = VDSO_PRELINK + 0x900;
32 .data : { *(.data) } :text
33 .bss : { *(.bss) } :text
34
35 .altinstructions : { *(.altinstructions) } :text
36 .altinstr_replacement : { *(.altinstr_replacement) } :text
37
38 .note : { *(.note.*) } :text :note
39 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
40 .eh_frame : { KEEP (*(.eh_frame)) } :text
41 .dynamic : { *(.dynamic) } :text :dynamic
42 .useless : {
43 *(.got.plt) *(.got)
44 *(.gnu.linkonce.d.*)
45 *(.dynbss)
46 *(.gnu.linkonce.b.*)
47 } :text
48}
49
50/*
51 * We must supply the ELF program headers explicitly to get just one
52 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
53 */
54PHDRS
55{
56 text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
57 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
58 note PT_NOTE FLAGS(4); /* PF_R */
59 eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
60}
61
62/*
63 * This controls what symbols we export from the DSO.
64 */
65VERSION
66{
67 LINUX_2.6 {
68 global:
69 clock_gettime;
70 __vdso_clock_gettime;
71 gettimeofday;
72 __vdso_gettimeofday;
73 getcpu;
74 __vdso_getcpu;
75 local: *;
76 };
77}
diff --git a/arch/x86_64/vdso/vextern.h b/arch/x86_64/vdso/vextern.h
new file mode 100644
index 000000000000..1683ba2ae3e8
--- /dev/null
+++ b/arch/x86_64/vdso/vextern.h
@@ -0,0 +1,16 @@
1#ifndef VEXTERN
2#include <asm/vsyscall.h>
3#define VEXTERN(x) \
4 extern typeof(x) *vdso_ ## x __attribute__((visibility("hidden")));
5#endif
6
7#define VMAGIC 0xfeedbabeabcdefabUL
8
9/* Any kernel variables used in the vDSO must be exported in the main
10 kernel's vmlinux.lds.S/vsyscall.h/proper __section and
11 put into vextern.h and be referenced as a pointer with vdso prefix.
12 The main kernel later fills in the values. */
13
14VEXTERN(jiffies)
15VEXTERN(vgetcpu_mode)
16VEXTERN(vsyscall_gtod_data)
diff --git a/arch/x86_64/vdso/vgetcpu.c b/arch/x86_64/vdso/vgetcpu.c
new file mode 100644
index 000000000000..91f6e85d0fc2
--- /dev/null
+++ b/arch/x86_64/vdso/vgetcpu.c
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Fast user context implementation of getcpu()
6 */
7
8#include <linux/kernel.h>
9#include <linux/getcpu.h>
10#include <linux/jiffies.h>
11#include <linux/time.h>
12#include <asm/vsyscall.h>
13#include <asm/vgtod.h>
14#include "vextern.h"
15
16long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
17{
18 unsigned int dummy, p;
19 unsigned long j = 0;
20
21 /* Fast cache - only recompute value once per jiffies and avoid
22 relatively costly rdtscp/cpuid otherwise.
23 This works because the scheduler usually keeps the process
24 on the same CPU and this syscall doesn't guarantee its
25 results anyways.
26 We do this here because otherwise user space would do it on
27 its own in a likely inferior way (no access to jiffies).
28 If you don't like it pass NULL. */
29 if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
30 p = tcache->blob[1];
31 } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
32 /* Load per CPU data from RDTSCP */
33 rdtscp(dummy, dummy, p);
34 } else {
35 /* Load per CPU data from GDT */
36 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
37 }
38 if (tcache) {
39 tcache->blob[0] = j;
40 tcache->blob[1] = p;
41 }
42 if (cpu)
43 *cpu = p & 0xfff;
44 if (node)
45 *node = p >> 12;
46 return 0;
47}
48
49long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
50 __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/x86_64/vdso/vma.c b/arch/x86_64/vdso/vma.c
new file mode 100644
index 000000000000..d4cb83a6c066
--- /dev/null
+++ b/arch/x86_64/vdso/vma.c
@@ -0,0 +1,139 @@
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/init.h>
9#include <linux/random.h>
10#include <asm/vsyscall.h>
11#include <asm/vgtod.h>
12#include <asm/proto.h>
13#include "voffset.h"
14
15int vdso_enabled = 1;
16
17#define VEXTERN(x) extern typeof(__ ## x) *vdso_ ## x;
18#include "vextern.h"
19#undef VEXTERN
20
21extern char vdso_kernel_start[], vdso_start[], vdso_end[];
22extern unsigned short vdso_sync_cpuid;
23
24struct page **vdso_pages;
25
26static inline void *var_ref(void *vbase, char *var, char *name)
27{
28 unsigned offset = var - &vdso_kernel_start[0] + VDSO_TEXT_OFFSET;
29 void *p = vbase + offset;
30 if (*(void **)p != (void *)VMAGIC) {
31 printk("VDSO: variable %s broken\n", name);
32 vdso_enabled = 0;
33 }
34 return p;
35}
36
37static int __init init_vdso_vars(void)
38{
39 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
40 int i;
41 char *vbase;
42
43 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
44 if (!vdso_pages)
45 goto oom;
46 for (i = 0; i < npages; i++) {
47 struct page *p;
48 p = alloc_page(GFP_KERNEL);
49 if (!p)
50 goto oom;
51 vdso_pages[i] = p;
52 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
53 }
54
55 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
56 if (!vbase)
57 goto oom;
58
59 if (memcmp(vbase, "\177ELF", 4)) {
60 printk("VDSO: I'm broken; not ELF\n");
61 vdso_enabled = 0;
62 }
63
64#define V(x) *(typeof(x) *) var_ref(vbase, (char *)RELOC_HIDE(&x, 0), #x)
65#define VEXTERN(x) \
66 V(vdso_ ## x) = &__ ## x;
67#include "vextern.h"
68#undef VEXTERN
69 return 0;
70
71 oom:
72 printk("Cannot allocate vdso\n");
73 vdso_enabled = 0;
74 return -ENOMEM;
75}
76__initcall(init_vdso_vars);
77
78struct linux_binprm;
79
80/* Put the vdso above the (randomized) stack with another randomized offset.
81 This way there is no hole in the middle of address space.
82 To save memory make sure it is still in the same PTE as the stack top.
83 This doesn't give that many random bits */
84static unsigned long vdso_addr(unsigned long start, unsigned len)
85{
86 unsigned long addr, end;
87 unsigned offset;
88 end = (start + PMD_SIZE - 1) & PMD_MASK;
89 if (end >= TASK_SIZE64)
90 end = TASK_SIZE64;
91 end -= len;
92 /* This loses some more bits than a modulo, but is cheaper */
93 offset = get_random_int() & (PTRS_PER_PTE - 1);
94 addr = start + (offset << PAGE_SHIFT);
95 if (addr >= end)
96 addr = end;
97 return addr;
98}
99
100/* Setup a VMA at program startup for the vsyscall page.
101 Not called for compat tasks */
102int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
103{
104 struct mm_struct *mm = current->mm;
105 unsigned long addr;
106 int ret;
107 unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE);
108
109 if (!vdso_enabled)
110 return 0;
111
112 down_write(&mm->mmap_sem);
113 addr = vdso_addr(mm->start_stack, len);
114 addr = get_unmapped_area(NULL, addr, len, 0, 0);
115 if (IS_ERR_VALUE(addr)) {
116 ret = addr;
117 goto up_fail;
118 }
119
120 ret = install_special_mapping(mm, addr, len,
121 VM_READ|VM_EXEC|
122 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
123 VM_ALWAYSDUMP,
124 vdso_pages);
125 if (ret)
126 goto up_fail;
127
128 current->mm->context.vdso = (void *)addr;
129up_fail:
130 up_write(&mm->mmap_sem);
131 return ret;
132}
133
134static __init int vdso_setup(char *s)
135{
136 vdso_enabled = simple_strtoul(s, NULL, 0);
137 return 0;
138}
139__setup("vdso=", vdso_setup);
diff --git a/arch/x86_64/vdso/voffset.h b/arch/x86_64/vdso/voffset.h
new file mode 100644
index 000000000000..5304204911f2
--- /dev/null
+++ b/arch/x86_64/vdso/voffset.h
@@ -0,0 +1 @@
#define VDSO_TEXT_OFFSET 0x500
diff --git a/arch/x86_64/vdso/vvar.c b/arch/x86_64/vdso/vvar.c
new file mode 100644
index 000000000000..6fc22219a472
--- /dev/null
+++ b/arch/x86_64/vdso/vvar.c
@@ -0,0 +1,12 @@
1/* Define pointer to external vDSO variables.
2 These are part of the vDSO. The kernel fills in the real addresses
3 at boot time. This is done because when the vdso is linked the
4 kernel isn't yet and we don't know the final addresses. */
5#include <linux/kernel.h>
6#include <linux/time.h>
7#include <asm/vsyscall.h>
8#include <asm/timex.h>
9#include <asm/vgtod.h>
10
11#define VEXTERN(x) typeof (__ ## x) *vdso_ ## x = (void *)VMAGIC;
12#include "vextern.h"
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 0c9f15c54e8c..ab04d848b19d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -36,13 +36,11 @@
36ACPI_MODULE_NAME("numa"); 36ACPI_MODULE_NAME("numa");
37 37
38static nodemask_t nodes_found_map = NODE_MASK_NONE; 38static nodemask_t nodes_found_map = NODE_MASK_NONE;
39#define PXM_INVAL -1
40#define NID_INVAL -1
41 39
42/* maps to convert between proximity domain and logical node ID */ 40/* maps to convert between proximity domain and logical node ID */
43static int pxm_to_node_map[MAX_PXM_DOMAINS] 41static int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
44 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; 42 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
45static int node_to_pxm_map[MAX_NUMNODES] 43static int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 44 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 45
48int pxm_to_node(int pxm) 46int pxm_to_node(int pxm)
@@ -59,6 +57,12 @@ int node_to_pxm(int node)
59 return node_to_pxm_map[node]; 57 return node_to_pxm_map[node];
60} 58}
61 59
60void __acpi_map_pxm_to_node(int pxm, int node)
61{
62 pxm_to_node_map[pxm] = node;
63 node_to_pxm_map[node] = pxm;
64}
65
62int acpi_map_pxm_to_node(int pxm) 66int acpi_map_pxm_to_node(int pxm)
63{ 67{
64 int node = pxm_to_node_map[pxm]; 68 int node = pxm_to_node_map[pxm];
@@ -67,8 +71,7 @@ int acpi_map_pxm_to_node(int pxm)
67 if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) 71 if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
68 return NID_INVAL; 72 return NID_INVAL;
69 node = first_unset_node(nodes_found_map); 73 node = first_unset_node(nodes_found_map);
70 pxm_to_node_map[pxm] = node; 74 __acpi_map_pxm_to_node(pxm, node);
71 node_to_pxm_map[node] = pxm;
72 node_set(node, nodes_found_map); 75 node_set(node, nodes_found_map);
73 } 76 }
74 77
@@ -83,7 +86,8 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
83 node_clear(node, nodes_found_map); 86 node_clear(node, nodes_found_map);
84} 87}
85 88
86void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header) 89static void __init
90acpi_table_print_srat_entry(struct acpi_subtable_header *header)
87{ 91{
88 92
89 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); 93 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
@@ -200,7 +204,7 @@ static int __init acpi_parse_srat(struct acpi_table_header *table)
200 return 0; 204 return 0;
201} 205}
202 206
203int __init 207static int __init
204acpi_table_parse_srat(enum acpi_srat_type id, 208acpi_table_parse_srat(enum acpi_srat_type id,
205 acpi_table_entry_handler handler, unsigned int max_entries) 209 acpi_table_entry_handler handler, unsigned int max_entries)
206{ 210{
@@ -211,14 +215,13 @@ acpi_table_parse_srat(enum acpi_srat_type id,
211 215
212int __init acpi_numa_init(void) 216int __init acpi_numa_init(void)
213{ 217{
214 int result;
215
216 /* SRAT: Static Resource Affinity Table */ 218 /* SRAT: Static Resource Affinity Table */
217 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 219 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
218 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 220 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
219 acpi_parse_processor_affinity, 221 acpi_parse_processor_affinity, NR_CPUS);
220 NR_CPUS); 222 acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
221 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific 223 acpi_parse_memory_affinity,
224 NR_NODE_MEMBLKS);
222 } 225 }
223 226
224 /* SLIT: System Locality Information Table */ 227 /* SLIT: System Locality Information Table */
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a9ab30fefffc..2b0c601e422e 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -142,6 +142,7 @@ void set_trace_device(struct device *dev)
142{ 142{
143 dev_hash_value = hash_string(DEVSEED, dev->bus_id, DEVHASH); 143 dev_hash_value = hash_string(DEVSEED, dev->bus_id, DEVHASH);
144} 144}
145EXPORT_SYMBOL(set_trace_device);
145 146
146/* 147/*
147 * We could just take the "tracedata" index into the .tracedata 148 * We could just take the "tracedata" index into the .tracedata
@@ -162,6 +163,7 @@ void generate_resume_trace(void *tracedata, unsigned int user)
162 file_hash_value = hash_string(lineno, file, FILEHASH); 163 file_hash_value = hash_string(lineno, file, FILEHASH);
163 set_magic_time(user_hash_value, file_hash_value, dev_hash_value); 164 set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
164} 165}
166EXPORT_SYMBOL(generate_resume_trace);
165 167
166extern char __tracedata_start, __tracedata_end; 168extern char __tracedata_start, __tracedata_end;
167static int show_file_hash(unsigned int value) 169static int show_file_hash(unsigned int value)
@@ -170,7 +172,8 @@ static int show_file_hash(unsigned int value)
170 char *tracedata; 172 char *tracedata;
171 173
172 match = 0; 174 match = 0;
173 for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ; tracedata += 6) { 175 for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
176 tracedata += 2 + sizeof(unsigned long)) {
174 unsigned short lineno = *(unsigned short *)tracedata; 177 unsigned short lineno = *(unsigned short *)tracedata;
175 const char *file = *(const char **)(tracedata + 2); 178 const char *file = *(const char **)(tracedata + 2);
176 unsigned int hash = hash_string(lineno, file, FILEHASH); 179 unsigned int hash = hash_string(lineno, file, FILEHASH);
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 819c829125fb..a7a099027fca 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -8,6 +8,7 @@
8obj-$(CONFIG_MAC_FLOPPY) += swim3.o 8obj-$(CONFIG_MAC_FLOPPY) += swim3.o
9obj-$(CONFIG_BLK_DEV_FD) += floppy.o 9obj-$(CONFIG_BLK_DEV_FD) += floppy.o
10obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o 10obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
11obj-$(CONFIG_PS3_DISK) += ps3disk.o
11obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o 12obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
12obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o 13obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
13obj-$(CONFIG_BLK_DEV_RAM) += rd.o 14obj-$(CONFIG_BLK_DEV_RAM) += rd.o
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
new file mode 100644
index 000000000000..170fb33dba97
--- /dev/null
+++ b/drivers/block/ps3disk.c
@@ -0,0 +1,630 @@
1/*
2 * PS3 Disk Storage Driver
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/ata.h>
22#include <linux/blkdev.h>
23
24#include <asm/lv1call.h>
25#include <asm/ps3stor.h>
26#include <asm/firmware.h>
27
28
29#define DEVICE_NAME "ps3disk"
30
31#define BOUNCE_SIZE (64*1024)
32
33#define PS3DISK_MAX_DISKS 16
34#define PS3DISK_MINORS 16
35
36
37#define PS3DISK_NAME "ps3d%c"
38
39
40struct ps3disk_private {
41 spinlock_t lock; /* Request queue spinlock */
42 struct request_queue *queue;
43 struct gendisk *gendisk;
44 unsigned int blocking_factor;
45 struct request *req;
46 u64 raw_capacity;
47 unsigned char model[ATA_ID_PROD_LEN+1];
48};
49
50
51#define LV1_STORAGE_SEND_ATA_COMMAND (2)
52#define LV1_STORAGE_ATA_HDDOUT (0x23)
53
54struct lv1_ata_cmnd_block {
55 u16 features;
56 u16 sector_count;
57 u16 LBA_low;
58 u16 LBA_mid;
59 u16 LBA_high;
60 u8 device;
61 u8 command;
62 u32 is_ext;
63 u32 proto;
64 u32 in_out;
65 u32 size;
66 u64 buffer;
67 u32 arglen;
68};
69
70enum lv1_ata_proto {
71 NON_DATA_PROTO = 0,
72 PIO_DATA_IN_PROTO = 1,
73 PIO_DATA_OUT_PROTO = 2,
74 DMA_PROTO = 3
75};
76
77enum lv1_ata_in_out {
78 DIR_WRITE = 0, /* memory -> device */
79 DIR_READ = 1 /* device -> memory */
80};
81
82static int ps3disk_major;
83
84
85static struct block_device_operations ps3disk_fops = {
86 .owner = THIS_MODULE,
87};
88
89
90static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
91 struct request *req, int gather)
92{
93 unsigned int offset = 0;
94 struct bio *bio;
95 sector_t sector;
96 struct bio_vec *bvec;
97 unsigned int i = 0, j;
98 size_t size;
99 void *buf;
100
101 rq_for_each_bio(bio, req) {
102 sector = bio->bi_sector;
103 dev_dbg(&dev->sbd.core,
104 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
105 __func__, __LINE__, i, bio_segments(bio),
106 bio_sectors(bio), sector);
107 bio_for_each_segment(bvec, bio, j) {
108 size = bvec->bv_len;
109 buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
110 if (gather)
111 memcpy(dev->bounce_buf+offset, buf, size);
112 else
113 memcpy(buf, dev->bounce_buf+offset, size);
114 offset += size;
115 flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
116 __bio_kunmap_atomic(bio, KM_IRQ0);
117 }
118 i++;
119 }
120}
121
122static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
123 struct request *req)
124{
125 struct ps3disk_private *priv = dev->sbd.core.driver_data;
126 int write = rq_data_dir(req), res;
127 const char *op = write ? "write" : "read";
128 u64 start_sector, sectors;
129 unsigned int region_id = dev->regions[dev->region_idx].id;
130
131#ifdef DEBUG
132 unsigned int n = 0;
133 struct bio *bio;
134
135 rq_for_each_bio(bio, req)
136 n++;
137 dev_dbg(&dev->sbd.core,
138 "%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
139 __func__, __LINE__, op, n, req->nr_sectors,
140 req->hard_nr_sectors);
141#endif
142
143 start_sector = req->sector * priv->blocking_factor;
144 sectors = req->nr_sectors * priv->blocking_factor;
145 dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
146 __func__, __LINE__, op, sectors, start_sector);
147
148 if (write) {
149 ps3disk_scatter_gather(dev, req, 1);
150
151 res = lv1_storage_write(dev->sbd.dev_id, region_id,
152 start_sector, sectors, 0,
153 dev->bounce_lpar, &dev->tag);
154 } else {
155 res = lv1_storage_read(dev->sbd.dev_id, region_id,
156 start_sector, sectors, 0,
157 dev->bounce_lpar, &dev->tag);
158 }
159 if (res) {
160 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
161 __LINE__, op, res);
162 end_request(req, 0);
163 return 0;
164 }
165
166 priv->req = req;
167 return 1;
168}
169
170static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
171 struct request *req)
172{
173 struct ps3disk_private *priv = dev->sbd.core.driver_data;
174 u64 res;
175
176 dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
177
178 res = lv1_storage_send_device_command(dev->sbd.dev_id,
179 LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
180 0, &dev->tag);
181 if (res) {
182 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
183 __func__, __LINE__, res);
184 end_request(req, 0);
185 return 0;
186 }
187
188 priv->req = req;
189 return 1;
190}
191
192static void ps3disk_do_request(struct ps3_storage_device *dev,
193 request_queue_t *q)
194{
195 struct request *req;
196
197 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
198
199 while ((req = elv_next_request(q))) {
200 if (blk_fs_request(req)) {
201 if (ps3disk_submit_request_sg(dev, req))
202 break;
203 } else if (req->cmd_type == REQ_TYPE_FLUSH) {
204 if (ps3disk_submit_flush_request(dev, req))
205 break;
206 } else {
207 blk_dump_rq_flags(req, DEVICE_NAME " bad request");
208 end_request(req, 0);
209 continue;
210 }
211 }
212}
213
214static void ps3disk_request(request_queue_t *q)
215{
216 struct ps3_storage_device *dev = q->queuedata;
217 struct ps3disk_private *priv = dev->sbd.core.driver_data;
218
219 if (priv->req) {
220 dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
221 return;
222 }
223
224 ps3disk_do_request(dev, q);
225}
226
227static irqreturn_t ps3disk_interrupt(int irq, void *data)
228{
229 struct ps3_storage_device *dev = data;
230 struct ps3disk_private *priv;
231 struct request *req;
232 int res, read, uptodate;
233 u64 tag, status;
234 unsigned long num_sectors;
235 const char *op;
236
237 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
238
239 if (tag != dev->tag)
240 dev_err(&dev->sbd.core,
241 "%s:%u: tag mismatch, got %lx, expected %lx\n",
242 __func__, __LINE__, tag, dev->tag);
243
244 if (res) {
245 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
246 __func__, __LINE__, res, status);
247 return IRQ_HANDLED;
248 }
249
250 priv = dev->sbd.core.driver_data;
251 req = priv->req;
252 if (!req) {
253 dev_dbg(&dev->sbd.core,
254 "%s:%u non-block layer request completed\n", __func__,
255 __LINE__);
256 dev->lv1_status = status;
257 complete(&dev->done);
258 return IRQ_HANDLED;
259 }
260
261 if (req->cmd_type == REQ_TYPE_FLUSH) {
262 read = 0;
263 num_sectors = req->hard_cur_sectors;
264 op = "flush";
265 } else {
266 read = !rq_data_dir(req);
267 num_sectors = req->nr_sectors;
268 op = read ? "read" : "write";
269 }
270 if (status) {
271 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
272 __LINE__, op, status);
273 uptodate = 0;
274 } else {
275 dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
276 __LINE__, op);
277 uptodate = 1;
278 if (read)
279 ps3disk_scatter_gather(dev, req, 0);
280 }
281
282 spin_lock(&priv->lock);
283 if (!end_that_request_first(req, uptodate, num_sectors)) {
284 add_disk_randomness(req->rq_disk);
285 blkdev_dequeue_request(req);
286 end_that_request_last(req, uptodate);
287 }
288 priv->req = NULL;
289 ps3disk_do_request(dev, priv->queue);
290 spin_unlock(&priv->lock);
291
292 return IRQ_HANDLED;
293}
294
295static int ps3disk_sync_cache(struct ps3_storage_device *dev)
296{
297 u64 res;
298
299 dev_dbg(&dev->sbd.core, "%s:%u: sync cache\n", __func__, __LINE__);
300
301 res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
302 if (res) {
303 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
304 __func__, __LINE__, res);
305 return -EIO;
306 }
307 return 0;
308}
309
310
311/* ATA helpers copied from drivers/ata/libata-core.c */
312
313static void swap_buf_le16(u16 *buf, unsigned int buf_words)
314{
315#ifdef __BIG_ENDIAN
316 unsigned int i;
317
318 for (i = 0; i < buf_words; i++)
319 buf[i] = le16_to_cpu(buf[i]);
320#endif /* __BIG_ENDIAN */
321}
322
323static u64 ata_id_n_sectors(const u16 *id)
324{
325 if (ata_id_has_lba(id)) {
326 if (ata_id_has_lba48(id))
327 return ata_id_u64(id, 100);
328 else
329 return ata_id_u32(id, 60);
330 } else {
331 if (ata_id_current_chs_valid(id))
332 return ata_id_u32(id, 57);
333 else
334 return id[1] * id[3] * id[6];
335 }
336}
337
338static void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs,
339 unsigned int len)
340{
341 unsigned int c;
342
343 while (len > 0) {
344 c = id[ofs] >> 8;
345 *s = c;
346 s++;
347
348 c = id[ofs] & 0xff;
349 *s = c;
350 s++;
351
352 ofs++;
353 len -= 2;
354 }
355}
356
357static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs,
358 unsigned int len)
359{
360 unsigned char *p;
361
362 WARN_ON(!(len & 1));
363
364 ata_id_string(id, s, ofs, len - 1);
365
366 p = s + strnlen(s, len - 1);
367 while (p > s && p[-1] == ' ')
368 p--;
369 *p = '\0';
370}
371
372static int ps3disk_identify(struct ps3_storage_device *dev)
373{
374 struct ps3disk_private *priv = dev->sbd.core.driver_data;
375 struct lv1_ata_cmnd_block ata_cmnd;
376 u16 *id = dev->bounce_buf;
377 u64 res;
378
379 dev_dbg(&dev->sbd.core, "%s:%u: identify disk\n", __func__, __LINE__);
380
381 memset(&ata_cmnd, 0, sizeof(struct lv1_ata_cmnd_block));
382 ata_cmnd.command = ATA_CMD_ID_ATA;
383 ata_cmnd.sector_count = 1;
384 ata_cmnd.size = ata_cmnd.arglen = ATA_ID_WORDS * 2;
385 ata_cmnd.buffer = dev->bounce_lpar;
386 ata_cmnd.proto = PIO_DATA_IN_PROTO;
387 ata_cmnd.in_out = DIR_READ;
388
389 res = ps3stor_send_command(dev, LV1_STORAGE_SEND_ATA_COMMAND,
390 ps3_mm_phys_to_lpar(__pa(&ata_cmnd)),
391 sizeof(ata_cmnd), ata_cmnd.buffer,
392 ata_cmnd.arglen);
393 if (res) {
394 dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%lx\n",
395 __func__, __LINE__, res);
396 return -EIO;
397 }
398
399 swap_buf_le16(id, ATA_ID_WORDS);
400
401 /* All we're interested in are raw capacity and model name */
402 priv->raw_capacity = ata_id_n_sectors(id);
403 ata_id_c_string(id, priv->model, ATA_ID_PROD, sizeof(priv->model));
404 return 0;
405}
406
407static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
408{
409 struct ps3_storage_device *dev = q->queuedata;
410
411 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
412
413 memset(req->cmd, 0, sizeof(req->cmd));
414 req->cmd_type = REQ_TYPE_FLUSH;
415}
416
417static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk,
418 sector_t *sector)
419{
420 struct ps3_storage_device *dev = q->queuedata;
421 struct request *req;
422 int res;
423
424 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
425
426 req = blk_get_request(q, WRITE, __GFP_WAIT);
427 ps3disk_prepare_flush(q, req);
428 res = blk_execute_rq(q, gendisk, req, 0);
429 if (res)
430 dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
431 __func__, __LINE__, res);
432 blk_put_request(req);
433 return res;
434}
435
436
437static unsigned long ps3disk_mask;
438
439static DEFINE_MUTEX(ps3disk_mask_mutex);
440
441static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
442{
443 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
444 struct ps3disk_private *priv;
445 int error;
446 unsigned int devidx;
447 struct request_queue *queue;
448 struct gendisk *gendisk;
449
450 if (dev->blk_size < 512) {
451 dev_err(&dev->sbd.core,
452 "%s:%u: cannot handle block size %lu\n", __func__,
453 __LINE__, dev->blk_size);
454 return -EINVAL;
455 }
456
457 BUILD_BUG_ON(PS3DISK_MAX_DISKS > BITS_PER_LONG);
458 mutex_lock(&ps3disk_mask_mutex);
459 devidx = find_first_zero_bit(&ps3disk_mask, PS3DISK_MAX_DISKS);
460 if (devidx >= PS3DISK_MAX_DISKS) {
461 dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__,
462 __LINE__);
463 mutex_unlock(&ps3disk_mask_mutex);
464 return -ENOSPC;
465 }
466 __set_bit(devidx, &ps3disk_mask);
467 mutex_unlock(&ps3disk_mask_mutex);
468
469 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
470 if (!priv) {
471 error = -ENOMEM;
472 goto fail;
473 }
474
475 dev->sbd.core.driver_data = priv;
476 spin_lock_init(&priv->lock);
477
478 dev->bounce_size = BOUNCE_SIZE;
479 dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
480 if (!dev->bounce_buf) {
481 error = -ENOMEM;
482 goto fail_free_priv;
483 }
484
485 error = ps3stor_setup(dev, ps3disk_interrupt);
486 if (error)
487 goto fail_free_bounce;
488
489 ps3disk_identify(dev);
490
491 queue = blk_init_queue(ps3disk_request, &priv->lock);
492 if (!queue) {
493 dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
494 __func__, __LINE__);
495 error = -ENOMEM;
496 goto fail_teardown;
497 }
498
499 priv->queue = queue;
500 queue->queuedata = dev;
501
502 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
503
504 blk_queue_max_sectors(queue, dev->bounce_size >> 9);
505 blk_queue_segment_boundary(queue, -1UL);
506 blk_queue_dma_alignment(queue, dev->blk_size-1);
507 blk_queue_hardsect_size(queue, dev->blk_size);
508
509 blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
510 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
511 ps3disk_prepare_flush);
512
513 blk_queue_max_phys_segments(queue, -1);
514 blk_queue_max_hw_segments(queue, -1);
515 blk_queue_max_segment_size(queue, dev->bounce_size);
516
517 gendisk = alloc_disk(PS3DISK_MINORS);
518 if (!gendisk) {
519 dev_err(&dev->sbd.core, "%s:%u: alloc_disk failed\n", __func__,
520 __LINE__);
521 error = -ENOMEM;
522 goto fail_cleanup_queue;
523 }
524
525 priv->gendisk = gendisk;
526 gendisk->major = ps3disk_major;
527 gendisk->first_minor = devidx * PS3DISK_MINORS;
528 gendisk->fops = &ps3disk_fops;
529 gendisk->queue = queue;
530 gendisk->private_data = dev;
531 gendisk->driverfs_dev = &dev->sbd.core;
532 snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
533 devidx+'a');
534 priv->blocking_factor = dev->blk_size >> 9;
535 set_capacity(gendisk,
536 dev->regions[dev->region_idx].size*priv->blocking_factor);
537
538 dev_info(&dev->sbd.core,
539 "%s is a %s (%lu MiB total, %lu MiB for OtherOS)\n",
540 gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
541 get_capacity(gendisk) >> 11);
542
543 add_disk(gendisk);
544 return 0;
545
546fail_cleanup_queue:
547 blk_cleanup_queue(queue);
548fail_teardown:
549 ps3stor_teardown(dev);
550fail_free_bounce:
551 kfree(dev->bounce_buf);
552fail_free_priv:
553 kfree(priv);
554 dev->sbd.core.driver_data = NULL;
555fail:
556 mutex_lock(&ps3disk_mask_mutex);
557 __clear_bit(devidx, &ps3disk_mask);
558 mutex_unlock(&ps3disk_mask_mutex);
559 return error;
560}
561
562static int ps3disk_remove(struct ps3_system_bus_device *_dev)
563{
564 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
565 struct ps3disk_private *priv = dev->sbd.core.driver_data;
566
567 mutex_lock(&ps3disk_mask_mutex);
568 __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS,
569 &ps3disk_mask);
570 mutex_unlock(&ps3disk_mask_mutex);
571 del_gendisk(priv->gendisk);
572 blk_cleanup_queue(priv->queue);
573 put_disk(priv->gendisk);
574 dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
575 ps3disk_sync_cache(dev);
576 ps3stor_teardown(dev);
577 kfree(dev->bounce_buf);
578 kfree(priv);
579 dev->sbd.core.driver_data = NULL;
580 return 0;
581}
582
583static struct ps3_system_bus_driver ps3disk = {
584 .match_id = PS3_MATCH_ID_STOR_DISK,
585 .core.name = DEVICE_NAME,
586 .core.owner = THIS_MODULE,
587 .probe = ps3disk_probe,
588 .remove = ps3disk_remove,
589 .shutdown = ps3disk_remove,
590};
591
592
593static int __init ps3disk_init(void)
594{
595 int error;
596
597 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
598 return -ENODEV;
599
600 error = register_blkdev(0, DEVICE_NAME);
601 if (error <= 0) {
602 printk(KERN_ERR "%s:%u: register_blkdev failed %d\n", __func__,
603 __LINE__, error);
604 return error;
605 }
606 ps3disk_major = error;
607
608 pr_info("%s:%u: registered block device major %d\n", __func__,
609 __LINE__, ps3disk_major);
610
611 error = ps3_system_bus_driver_register(&ps3disk);
612 if (error)
613 unregister_blkdev(ps3disk_major, DEVICE_NAME);
614
615 return error;
616}
617
618static void __exit ps3disk_exit(void)
619{
620 ps3_system_bus_driver_unregister(&ps3disk);
621 unregister_blkdev(ps3disk_major, DEVICE_NAME);
622}
623
624module_init(ps3disk_init);
625module_exit(ps3disk_exit);
626
627MODULE_LICENSE("GPL");
628MODULE_DESCRIPTION("PS3 Disk Storage Driver");
629MODULE_AUTHOR("Sony Corporation");
630MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 4e6f387fd189..8fecaf4010b1 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -107,6 +107,8 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/
107obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o 107obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
108obj-$(CONFIG_TCG_TPM) += tpm/ 108obj-$(CONFIG_TCG_TPM) += tpm/
109 109
110obj-$(CONFIG_PS3_FLASH) += ps3flash.o
111
110# Files generated that shall be removed upon make clean 112# Files generated that shall be removed upon make clean
111clean-files := consolemap_deftbl.c defkeymap.c 113clean-files := consolemap_deftbl.c defkeymap.c
112 114
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 7cda04b33534..2d7cd486e025 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -41,7 +41,7 @@ config HW_RANDOM_AMD
41 41
42config HW_RANDOM_GEODE 42config HW_RANDOM_GEODE
43 tristate "AMD Geode HW Random Number Generator support" 43 tristate "AMD Geode HW Random Number Generator support"
44 depends on HW_RANDOM && X86 && PCI 44 depends on HW_RANDOM && X86_32 && PCI
45 default HW_RANDOM 45 default HW_RANDOM
46 ---help--- 46 ---help---
47 This driver provides kernel-side support for the Random Number 47 This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
new file mode 100644
index 000000000000..79b6f461be75
--- /dev/null
+++ b/drivers/char/ps3flash.c
@@ -0,0 +1,440 @@
1/*
2 * PS3 FLASH ROM Storage Driver
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/uaccess.h>
24
25#include <asm/lv1call.h>
26#include <asm/ps3stor.h>
27
28
29#define DEVICE_NAME "ps3flash"
30
31#define FLASH_BLOCK_SIZE (256*1024)
32
33
34struct ps3flash_private {
35 struct mutex mutex; /* Bounce buffer mutex */
36};
37
38static struct ps3_storage_device *ps3flash_dev;
39
40static ssize_t ps3flash_read_write_sectors(struct ps3_storage_device *dev,
41 u64 lpar, u64 start_sector,
42 u64 sectors, int write)
43{
44 u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors,
45 write);
46 if (res) {
47 dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
48 __LINE__, write ? "write" : "read", res);
49 return -EIO;
50 }
51 return sectors;
52}
53
54static ssize_t ps3flash_read_sectors(struct ps3_storage_device *dev,
55 u64 start_sector, u64 sectors,
56 unsigned int sector_offset)
57{
58 u64 max_sectors, lpar;
59
60 max_sectors = dev->bounce_size / dev->blk_size;
61 if (sectors > max_sectors) {
62 dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %lu\n",
63 __func__, __LINE__, max_sectors);
64 sectors = max_sectors;
65 }
66
67 lpar = dev->bounce_lpar + sector_offset * dev->blk_size;
68 return ps3flash_read_write_sectors(dev, lpar, start_sector, sectors,
69 0);
70}
71
72static ssize_t ps3flash_write_chunk(struct ps3_storage_device *dev,
73 u64 start_sector)
74{
75 u64 sectors = dev->bounce_size / dev->blk_size;
76 return ps3flash_read_write_sectors(dev, dev->bounce_lpar, start_sector,
77 sectors, 1);
78}
79
80static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
81{
82 struct ps3_storage_device *dev = ps3flash_dev;
83 loff_t res;
84
85 mutex_lock(&file->f_mapping->host->i_mutex);
86 switch (origin) {
87 case 1:
88 offset += file->f_pos;
89 break;
90 case 2:
91 offset += dev->regions[dev->region_idx].size*dev->blk_size;
92 break;
93 }
94 if (offset < 0) {
95 res = -EINVAL;
96 goto out;
97 }
98
99 file->f_pos = offset;
100 res = file->f_pos;
101
102out:
103 mutex_unlock(&file->f_mapping->host->i_mutex);
104 return res;
105}
106
107static ssize_t ps3flash_read(struct file *file, char __user *buf, size_t count,
108 loff_t *pos)
109{
110 struct ps3_storage_device *dev = ps3flash_dev;
111 struct ps3flash_private *priv = dev->sbd.core.driver_data;
112 u64 size, start_sector, end_sector, offset;
113 ssize_t sectors_read;
114 size_t remaining, n;
115
116 dev_dbg(&dev->sbd.core,
117 "%s:%u: Reading %zu bytes at position %lld to user 0x%p\n",
118 __func__, __LINE__, count, *pos, buf);
119
120 size = dev->regions[dev->region_idx].size*dev->blk_size;
121 if (*pos >= size || !count)
122 return 0;
123
124 if (*pos + count > size) {
125 dev_dbg(&dev->sbd.core,
126 "%s:%u Truncating count from %zu to %llu\n", __func__,
127 __LINE__, count, size - *pos);
128 count = size - *pos;
129 }
130
131 start_sector = *pos / dev->blk_size;
132 offset = *pos % dev->blk_size;
133 end_sector = DIV_ROUND_UP(*pos + count, dev->blk_size);
134
135 remaining = count;
136 do {
137 mutex_lock(&priv->mutex);
138
139 sectors_read = ps3flash_read_sectors(dev, start_sector,
140 end_sector-start_sector,
141 0);
142 if (sectors_read < 0) {
143 mutex_unlock(&priv->mutex);
144 goto fail;
145 }
146
147 n = min(remaining, sectors_read*dev->blk_size-offset);
148 dev_dbg(&dev->sbd.core,
149 "%s:%u: copy %lu bytes from 0x%p to user 0x%p\n",
150 __func__, __LINE__, n, dev->bounce_buf+offset, buf);
151 if (copy_to_user(buf, dev->bounce_buf+offset, n)) {
152 mutex_unlock(&priv->mutex);
153 sectors_read = -EFAULT;
154 goto fail;
155 }
156
157 mutex_unlock(&priv->mutex);
158
159 *pos += n;
160 buf += n;
161 remaining -= n;
162 start_sector += sectors_read;
163 offset = 0;
164 } while (remaining > 0);
165
166 return count;
167
168fail:
169 return sectors_read;
170}
171
172static ssize_t ps3flash_write(struct file *file, const char __user *buf,
173 size_t count, loff_t *pos)
174{
175 struct ps3_storage_device *dev = ps3flash_dev;
176 struct ps3flash_private *priv = dev->sbd.core.driver_data;
177 u64 size, chunk_sectors, start_write_sector, end_write_sector,
178 end_read_sector, start_read_sector, head, tail, offset;
179 ssize_t res;
180 size_t remaining, n;
181 unsigned int sec_off;
182
183 dev_dbg(&dev->sbd.core,
184 "%s:%u: Writing %zu bytes at position %lld from user 0x%p\n",
185 __func__, __LINE__, count, *pos, buf);
186
187 size = dev->regions[dev->region_idx].size*dev->blk_size;
188 if (*pos >= size || !count)
189 return 0;
190
191 if (*pos + count > size) {
192 dev_dbg(&dev->sbd.core,
193 "%s:%u Truncating count from %zu to %llu\n", __func__,
194 __LINE__, count, size - *pos);
195 count = size - *pos;
196 }
197
198 chunk_sectors = dev->bounce_size / dev->blk_size;
199
200 start_write_sector = *pos / dev->bounce_size * chunk_sectors;
201 offset = *pos % dev->bounce_size;
202 end_write_sector = DIV_ROUND_UP(*pos + count, dev->bounce_size) *
203 chunk_sectors;
204
205 end_read_sector = DIV_ROUND_UP(*pos, dev->blk_size);
206 start_read_sector = (*pos + count) / dev->blk_size;
207
208 /*
209 * As we have to write in 256 KiB chunks, while we can read in blk_size
210 * (usually 512 bytes) chunks, we perform the following steps:
211 * 1. Read from start_write_sector to end_read_sector ("head")
212 * 2. Read from start_read_sector to end_write_sector ("tail")
213 * 3. Copy data to buffer
214 * 4. Write from start_write_sector to end_write_sector
215 * All of this is complicated by using only one 256 KiB bounce buffer.
216 */
217
218 head = end_read_sector - start_write_sector;
219 tail = end_write_sector - start_read_sector;
220
221 remaining = count;
222 do {
223 mutex_lock(&priv->mutex);
224
225 if (end_read_sector >= start_read_sector) {
226 /* Merge head and tail */
227 dev_dbg(&dev->sbd.core,
228 "Merged head and tail: %lu sectors at %lu\n",
229 chunk_sectors, start_write_sector);
230 res = ps3flash_read_sectors(dev, start_write_sector,
231 chunk_sectors, 0);
232 if (res < 0)
233 goto fail;
234 } else {
235 if (head) {
236 /* Read head */
237 dev_dbg(&dev->sbd.core,
238 "head: %lu sectors at %lu\n", head,
239 start_write_sector);
240 res = ps3flash_read_sectors(dev,
241 start_write_sector,
242 head, 0);
243 if (res < 0)
244 goto fail;
245 }
246 if (start_read_sector <
247 start_write_sector+chunk_sectors) {
248 /* Read tail */
249 dev_dbg(&dev->sbd.core,
250 "tail: %lu sectors at %lu\n", tail,
251 start_read_sector);
252 sec_off = start_read_sector-start_write_sector;
253 res = ps3flash_read_sectors(dev,
254 start_read_sector,
255 tail, sec_off);
256 if (res < 0)
257 goto fail;
258 }
259 }
260
261 n = min(remaining, dev->bounce_size-offset);
262 dev_dbg(&dev->sbd.core,
263 "%s:%u: copy %lu bytes from user 0x%p to 0x%p\n",
264 __func__, __LINE__, n, buf, dev->bounce_buf+offset);
265 if (copy_from_user(dev->bounce_buf+offset, buf, n)) {
266 res = -EFAULT;
267 goto fail;
268 }
269
270 res = ps3flash_write_chunk(dev, start_write_sector);
271 if (res < 0)
272 goto fail;
273
274 mutex_unlock(&priv->mutex);
275
276 *pos += n;
277 buf += n;
278 remaining -= n;
279 start_write_sector += chunk_sectors;
280 head = 0;
281 offset = 0;
282 } while (remaining > 0);
283
284 return count;
285
286fail:
287 mutex_unlock(&priv->mutex);
288 return res;
289}
290
291
292static irqreturn_t ps3flash_interrupt(int irq, void *data)
293{
294 struct ps3_storage_device *dev = data;
295 int res;
296 u64 tag, status;
297
298 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
299
300 if (tag != dev->tag)
301 dev_err(&dev->sbd.core,
302 "%s:%u: tag mismatch, got %lx, expected %lx\n",
303 __func__, __LINE__, tag, dev->tag);
304
305 if (res) {
306 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
307 __func__, __LINE__, res, status);
308 } else {
309 dev->lv1_status = status;
310 complete(&dev->done);
311 }
312 return IRQ_HANDLED;
313}
314
315
316static const struct file_operations ps3flash_fops = {
317 .owner = THIS_MODULE,
318 .llseek = ps3flash_llseek,
319 .read = ps3flash_read,
320 .write = ps3flash_write,
321};
322
323static struct miscdevice ps3flash_misc = {
324 .minor = MISC_DYNAMIC_MINOR,
325 .name = DEVICE_NAME,
326 .fops = &ps3flash_fops,
327};
328
329static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
330{
331 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
332 struct ps3flash_private *priv;
333 int error;
334 unsigned long tmp;
335
336 tmp = dev->regions[dev->region_idx].start*dev->blk_size;
337 if (tmp % FLASH_BLOCK_SIZE) {
338 dev_err(&dev->sbd.core,
339 "%s:%u region start %lu is not aligned\n", __func__,
340 __LINE__, tmp);
341 return -EINVAL;
342 }
343 tmp = dev->regions[dev->region_idx].size*dev->blk_size;
344 if (tmp % FLASH_BLOCK_SIZE) {
345 dev_err(&dev->sbd.core,
346 "%s:%u region size %lu is not aligned\n", __func__,
347 __LINE__, tmp);
348 return -EINVAL;
349 }
350
351 /* use static buffer, kmalloc cannot allocate 256 KiB */
352 if (!ps3flash_bounce_buffer.address)
353 return -ENODEV;
354
355 if (ps3flash_dev) {
356 dev_err(&dev->sbd.core,
357 "Only one FLASH device is supported\n");
358 return -EBUSY;
359 }
360
361 ps3flash_dev = dev;
362
363 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
364 if (!priv) {
365 error = -ENOMEM;
366 goto fail;
367 }
368
369 dev->sbd.core.driver_data = priv;
370 mutex_init(&priv->mutex);
371
372 dev->bounce_size = ps3flash_bounce_buffer.size;
373 dev->bounce_buf = ps3flash_bounce_buffer.address;
374
375 error = ps3stor_setup(dev, ps3flash_interrupt);
376 if (error)
377 goto fail_free_priv;
378
379 ps3flash_misc.parent = &dev->sbd.core;
380 error = misc_register(&ps3flash_misc);
381 if (error) {
382 dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n",
383 __func__, __LINE__, error);
384 goto fail_teardown;
385 }
386
387 dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
388 __func__, __LINE__, ps3flash_misc.minor);
389 return 0;
390
391fail_teardown:
392 ps3stor_teardown(dev);
393fail_free_priv:
394 kfree(priv);
395 dev->sbd.core.driver_data = NULL;
396fail:
397 ps3flash_dev = NULL;
398 return error;
399}
400
401static int ps3flash_remove(struct ps3_system_bus_device *_dev)
402{
403 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
404
405 misc_deregister(&ps3flash_misc);
406 ps3stor_teardown(dev);
407 kfree(dev->sbd.core.driver_data);
408 dev->sbd.core.driver_data = NULL;
409 ps3flash_dev = NULL;
410 return 0;
411}
412
413
414static struct ps3_system_bus_driver ps3flash = {
415 .match_id = PS3_MATCH_ID_STOR_FLASH,
416 .core.name = DEVICE_NAME,
417 .core.owner = THIS_MODULE,
418 .probe = ps3flash_probe,
419 .remove = ps3flash_remove,
420 .shutdown = ps3flash_remove,
421};
422
423
424static int __init ps3flash_init(void)
425{
426 return ps3_system_bus_driver_register(&ps3flash);
427}
428
429static void __exit ps3flash_exit(void)
430{
431 ps3_system_bus_driver_unregister(&ps3flash);
432}
433
434module_init(ps3flash_init);
435module_exit(ps3flash_exit);
436
437MODULE_LICENSE("GPL");
438MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver");
439MODULE_AUTHOR("Sony Corporation");
440MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 30c3f54c7666..ec6b65ec69ea 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -82,7 +82,7 @@
82#include <asm/uaccess.h> 82#include <asm/uaccess.h>
83#include <asm/system.h> 83#include <asm/system.h>
84 84
85#if defined(__i386__) 85#ifdef CONFIG_X86
86#include <asm/hpet.h> 86#include <asm/hpet.h>
87#endif 87#endif
88 88
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index e783dbf0f162..7b46faf22318 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -71,7 +71,7 @@ static struct clocksource clocksource_acpi_pm = {
71 .rating = 200, 71 .rating = 200,
72 .read = acpi_pm_read, 72 .read = acpi_pm_read,
73 .mask = (cycle_t)ACPI_PM_MASK, 73 .mask = (cycle_t)ACPI_PM_MASK,
74 .mult = 0, /*to be caluclated*/ 74 .mult = 0, /*to be calculated*/
75 .shift = 22, 75 .shift = 22,
76 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 76 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
77 77
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 31989dcd922c..906bf5e8de89 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -24,7 +24,12 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
24MODULE_DESCRIPTION("PC Speaker beeper driver"); 24MODULE_DESCRIPTION("PC Speaker beeper driver");
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26 26
27static DEFINE_SPINLOCK(i8253_beep_lock); 27#ifdef CONFIG_X86
28/* Use the global PIT lock ! */
29#include <asm/i8253.h>
30#else
31static DEFINE_SPINLOCK(i8253_lock);
32#endif
28 33
29static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 34static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
30{ 35{
@@ -43,7 +48,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
43 if (value > 20 && value < 32767) 48 if (value > 20 && value < 32767)
44 count = PIT_TICK_RATE / value; 49 count = PIT_TICK_RATE / value;
45 50
46 spin_lock_irqsave(&i8253_beep_lock, flags); 51 spin_lock_irqsave(&i8253_lock, flags);
47 52
48 if (count) { 53 if (count) {
49 /* enable counter 2 */ 54 /* enable counter 2 */
@@ -58,7 +63,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
58 outb(inb_p(0x61) & 0xFC, 0x61); 63 outb(inb_p(0x61) & 0xFC, 0x61);
59 } 64 }
60 65
61 spin_unlock_irqrestore(&i8253_beep_lock, flags); 66 spin_unlock_irqrestore(&i8253_lock, flags);
62 67
63 return 0; 68 return 0;
64} 69}
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index cf906c8cee4d..66f946aa30b3 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -21,9 +21,7 @@ menuconfig ISDN
21 21
22if ISDN 22if ISDN
23 23
24menu "Old ISDN4Linux" 24menuconfig ISDN_I4L
25
26config ISDN_I4L
27 tristate "Old ISDN4Linux (deprecated)" 25 tristate "Old ISDN4Linux (deprecated)"
28 ---help--- 26 ---help---
29 This driver allows you to use an ISDN adapter for networking 27 This driver allows you to use an ISDN adapter for networking
@@ -45,12 +43,8 @@ if ISDN_I4L
45source "drivers/isdn/i4l/Kconfig" 43source "drivers/isdn/i4l/Kconfig"
46endif 44endif
47 45
48endmenu 46menuconfig ISDN_CAPI
49 47 tristate "CAPI 2.0 subsystem"
50comment "CAPI subsystem"
51
52config ISDN_CAPI
53 tristate "CAPI2.0 support"
54 help 48 help
55 This provides the CAPI (Common ISDN Application Programming 49 This provides the CAPI (Common ISDN Application Programming
56 Interface, a standard making it easy for programs to access ISDN 50 Interface, a standard making it easy for programs to access ISDN
diff --git a/drivers/isdn/act2000/Kconfig b/drivers/isdn/act2000/Kconfig
index 78e6ad8d57c5..3fc1a5434ef7 100644
--- a/drivers/isdn/act2000/Kconfig
+++ b/drivers/isdn/act2000/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config ISDN_DRV_ACT2000 4config ISDN_DRV_ACT2000
5 tristate "IBM Active 2000 support" 5 tristate "IBM Active 2000 support"
6 depends on ISDN_I4L && ISA 6 depends on ISA
7 help 7 help
8 Say Y here if you have an IBM Active 2000 ISDN card. In order to use 8 Say Y here if you have an IBM Active 2000 ISDN card. In order to use
9 this card, additional firmware is necessary, which has to be loaded 9 this card, additional firmware is necessary, which has to be loaded
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index bcbb6502a773..0017e50c6948 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,9 +1,5 @@
1menu "Siemens Gigaset" 1menuconfig ISDN_DRV_GIGASET
2 depends on ISDN_I4L
3
4config ISDN_DRV_GIGASET
5 tristate "Siemens Gigaset support (isdn)" 2 tristate "Siemens Gigaset support (isdn)"
6 depends on ISDN_I4L
7 select CRC_CCITT 3 select CRC_CCITT
8 select BITREVERSE 4 select BITREVERSE
9 help 5 help
@@ -55,6 +51,4 @@ config GIGASET_UNDOCREQ
55 features like configuration mode of M105, say yes. If you 51 features like configuration mode of M105, say yes. If you
56 care about your device, say no. 52 care about your device, say no.
57 53
58endif 54endif # ISDN_DRV_GIGASET != n
59
60endmenu
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 12d91fb9f8cb..a3b945ac3256 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -1,6 +1,5 @@
1 1
2menu "Passive cards" 2menu "Passive cards"
3 depends on ISDN_I4L
4 3
5config ISDN_DRV_HISAX 4config ISDN_DRV_HISAX
6 tristate "HiSax SiemensChipSet driver support" 5 tristate "HiSax SiemensChipSet driver support"
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index e91c187992dd..36778b270c30 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -99,7 +99,6 @@ config ISDN_DRV_LOOP
99 99
100config ISDN_DIVERSION 100config ISDN_DIVERSION
101 tristate "Support isdn diversion services" 101 tristate "Support isdn diversion services"
102 depends on ISDN_I4L
103 help 102 help
104 This option allows you to use some supplementary diversion 103 This option allows you to use some supplementary diversion
105 services in conjunction with the HiSax driver on an EURO/DSS1 104 services in conjunction with the HiSax driver on an EURO/DSS1
@@ -119,13 +118,11 @@ config ISDN_DIVERSION
119endmenu 118endmenu
120 119
121comment "ISDN4Linux hardware drivers" 120comment "ISDN4Linux hardware drivers"
122 depends on ISDN_I4L
123 121
124source "drivers/isdn/hisax/Kconfig" 122source "drivers/isdn/hisax/Kconfig"
125 123
126 124
127menu "Active cards" 125menu "Active cards"
128 depends on ISDN_I4L!=n
129 126
130source "drivers/isdn/icn/Kconfig" 127source "drivers/isdn/icn/Kconfig"
131 128
diff --git a/drivers/isdn/icn/Kconfig b/drivers/isdn/icn/Kconfig
index fcb99f5f0b26..89d15eed765e 100644
--- a/drivers/isdn/icn/Kconfig
+++ b/drivers/isdn/icn/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config ISDN_DRV_ICN 4config ISDN_DRV_ICN
5 tristate "ICN 2B and 4B support" 5 tristate "ICN 2B and 4B support"
6 depends on ISDN_I4L && ISA 6 depends on ISA
7 help 7 help
8 This enables support for two kinds of ISDN-cards made by a German 8 This enables support for two kinds of ISDN-cards made by a German
9 company called ICN. 2B is the standard version for a single ISDN 9 company called ICN. 2B is the standard version for a single ISDN
diff --git a/drivers/isdn/pcbit/Kconfig b/drivers/isdn/pcbit/Kconfig
index 0933881ab0c2..ffba6eca1244 100644
--- a/drivers/isdn/pcbit/Kconfig
+++ b/drivers/isdn/pcbit/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config ISDN_DRV_PCBIT 4config ISDN_DRV_PCBIT
5 tristate "PCBIT-D support" 5 tristate "PCBIT-D support"
6 depends on ISDN_I4L && ISA && (BROKEN || X86) 6 depends on ISA && (BROKEN || X86)
7 help 7 help
8 This enables support for the PCBIT ISDN-card. This card is 8 This enables support for the PCBIT ISDN-card. This card is
9 manufactured in Portugal by Octal. For running this card, 9 manufactured in Portugal by Octal. For running this card,
diff --git a/drivers/isdn/sc/Kconfig b/drivers/isdn/sc/Kconfig
index 5346e33d816c..e6510ca7bf43 100644
--- a/drivers/isdn/sc/Kconfig
+++ b/drivers/isdn/sc/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config ISDN_DRV_SC 4config ISDN_DRV_SC
5 tristate "Spellcaster support" 5 tristate "Spellcaster support"
6 depends on ISDN_I4L && ISA 6 depends on ISA
7 help 7 help
8 This enables support for the Spellcaster BRI ISDN boards. This 8 This enables support for the Spellcaster BRI ISDN boards. This
9 driver currently builds only in a modularized version. 9 driver currently builds only in a modularized version.
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index d99d2fe53dca..1a87ba9d5156 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -244,7 +244,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
244static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) 244static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
245{ 245{
246 while (mc->nobjs) 246 while (mc->nobjs)
247 __free_page(mc->objects[--mc->nobjs]); 247 free_page((unsigned long)mc->objects[--mc->nobjs]);
248} 248}
249 249
250static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) 250static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
index 434fea1e82f7..18dade06d4a9 100644
--- a/drivers/lguest/lguest.c
+++ b/drivers/lguest/lguest.c
@@ -398,6 +398,8 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode,
398 break; 398 break;
399 case CLOCK_EVT_MODE_PERIODIC: 399 case CLOCK_EVT_MODE_PERIODIC:
400 BUG(); 400 BUG();
401 case CLOCK_EVT_MODE_RESUME:
402 break;
401 } 403 }
402} 404}
403 405
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ba952a032598..bdc52d6922b7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -920,6 +920,8 @@ static void crypt_dtr(struct dm_target *ti)
920{ 920{
921 struct crypt_config *cc = (struct crypt_config *) ti->private; 921 struct crypt_config *cc = (struct crypt_config *) ti->private;
922 922
923 flush_workqueue(_kcryptd_workqueue);
924
923 bioset_free(cc->bs); 925 bioset_free(cc->bs);
924 mempool_destroy(cc->page_pool); 926 mempool_destroy(cc->page_pool);
925 mempool_destroy(cc->io_pool); 927 mempool_destroy(cc->io_pool);
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 28c881895ab7..15aab374127e 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -903,8 +903,10 @@ static int __init at91_mci_probe(struct platform_device *pdev)
903 /* 903 /*
904 * Add host to MMC layer 904 * Add host to MMC layer
905 */ 905 */
906 if (host->board->det_pin) 906 if (host->board->det_pin) {
907 host->present = !at91_get_gpio_value(host->board->det_pin); 907 host->present = !at91_get_gpio_value(host->board->det_pin);
908 device_init_wakeup(&pdev->dev, 1);
909 }
908 else 910 else
909 host->present = -1; 911 host->present = -1;
910 912
@@ -940,6 +942,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
940 host = mmc_priv(mmc); 942 host = mmc_priv(mmc);
941 943
942 if (host->present != -1) { 944 if (host->present != -1) {
945 device_init_wakeup(&pdev->dev, 0);
943 free_irq(host->board->det_pin, host); 946 free_irq(host->board->det_pin, host);
944 cancel_delayed_work(&host->mmc->detect); 947 cancel_delayed_work(&host->mmc->detect);
945 } 948 }
@@ -966,8 +969,12 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
966static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) 969static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
967{ 970{
968 struct mmc_host *mmc = platform_get_drvdata(pdev); 971 struct mmc_host *mmc = platform_get_drvdata(pdev);
972 struct at91mci_host *host = mmc_priv(mmc);
969 int ret = 0; 973 int ret = 0;
970 974
975 if (device_may_wakeup(&pdev->dev))
976 enable_irq_wake(host->board->det_pin);
977
971 if (mmc) 978 if (mmc)
972 ret = mmc_suspend_host(mmc, state); 979 ret = mmc_suspend_host(mmc, state);
973 980
@@ -977,8 +984,12 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
977static int at91_mci_resume(struct platform_device *pdev) 984static int at91_mci_resume(struct platform_device *pdev)
978{ 985{
979 struct mmc_host *mmc = platform_get_drvdata(pdev); 986 struct mmc_host *mmc = platform_get_drvdata(pdev);
987 struct at91mci_host *host = mmc_priv(mmc);
980 int ret = 0; 988 int ret = 0;
981 989
990 if (device_may_wakeup(&pdev->dev))
991 disable_irq_wake(host->board->det_pin);
992
982 if (mmc) 993 if (mmc)
983 ret = mmc_resume_host(mmc); 994 ret = mmc_resume_host(mmc);
984 995
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 10d15c39d003..4a24db028d87 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1024,6 +1024,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
1024 1024
1025 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 1025 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1026 1026
1027 intmask &= ~SDHCI_INT_ERROR;
1028
1027 if (intmask & SDHCI_INT_BUS_POWER) { 1029 if (intmask & SDHCI_INT_BUS_POWER) {
1028 printk(KERN_ERR "%s: Card is consuming too much power!\n", 1030 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1029 mmc_hostname(host->mmc)); 1031 mmc_hostname(host->mmc));
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7400f4bc114f..a6c870480b8a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -107,6 +107,7 @@
107#define SDHCI_INT_CARD_INSERT 0x00000040 107#define SDHCI_INT_CARD_INSERT 0x00000040
108#define SDHCI_INT_CARD_REMOVE 0x00000080 108#define SDHCI_INT_CARD_REMOVE 0x00000080
109#define SDHCI_INT_CARD_INT 0x00000100 109#define SDHCI_INT_CARD_INT 0x00000100
110#define SDHCI_INT_ERROR 0x00008000
110#define SDHCI_INT_TIMEOUT 0x00010000 111#define SDHCI_INT_TIMEOUT 0x00010000
111#define SDHCI_INT_CRC 0x00020000 112#define SDHCI_INT_CRC 0x00020000
112#define SDHCI_INT_END_BIT 0x00040000 113#define SDHCI_INT_END_BIT 0x00040000
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 35f34665e3c4..9d8d40d5c8f7 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -38,6 +38,9 @@ config RTC_HCTOSYS_DEVICE
38 clock, usually rtc0. Initialization is done when the system 38 clock, usually rtc0. Initialization is done when the system
39 starts up, and when it resumes from a low power state. 39 starts up, and when it resumes from a low power state.
40 40
41 The driver for this RTC device must be loaded before late_initcall
42 functions run, so it must usually be statically linked.
43
41 This clock should be battery-backed, so that it reads the correct 44 This clock should be battery-backed, so that it reads the correct
42 time when the system boots from a power-off state. Otherwise, your 45 time when the system boots from a power-off state. Otherwise, your
43 system will need an external clock source (like an NTP server). 46 system will need an external clock source (like an NTP server).
@@ -305,6 +308,16 @@ config RTC_DRV_DS1553
305 This driver can also be built as a module. If so, the module 308 This driver can also be built as a module. If so, the module
306 will be called rtc-ds1553. 309 will be called rtc-ds1553.
307 310
311config RTC_DRV_STK17TA8
312 tristate "Simtek STK17TA8"
313 depends on RTC_CLASS
314 help
315 If you say yes here you get support for the
316 Simtek STK17TA8 timekeeping chip.
317
318 This driver can also be built as a module. If so, the module
319 will be called rtc-stk17ta8.
320
308config RTC_DRV_DS1742 321config RTC_DRV_DS1742
309 tristate "Dallas DS1742/1743" 322 tristate "Dallas DS1742/1743"
310 depends on RTC_CLASS 323 depends on RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 3109af9a1651..7ede9e725360 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
32obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o 32obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
33obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 33obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
34obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o 34obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
35obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
35obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 36obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
36obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o 37obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
37obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o 38obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index f98a83a11aae..46da5714932c 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -407,7 +407,7 @@ static __init int ds1553_init(void)
407 407
408static __exit void ds1553_exit(void) 408static __exit void ds1553_exit(void)
409{ 409{
410 return platform_driver_unregister(&ds1553_rtc_driver); 410 platform_driver_unregister(&ds1553_rtc_driver);
411} 411}
412 412
413module_init(ds1553_init); 413module_init(ds1553_init);
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index d1778ae8bca5..b2e5481ba3b6 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -263,7 +263,7 @@ static __init int ds1742_init(void)
263 263
264static __exit void ds1742_exit(void) 264static __exit void ds1742_exit(void)
265{ 265{
266 return platform_driver_unregister(&ds1742_rtc_driver); 266 platform_driver_unregister(&ds1742_rtc_driver);
267} 267}
268 268
269module_init(ds1742_init); 269module_init(ds1742_init);
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index eee4ee5bb75a..a1cd448639c9 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -31,17 +31,24 @@
31#define MAX6900_REG_DW 5 /* day of week 1-7 */ 31#define MAX6900_REG_DW 5 /* day of week 1-7 */
32#define MAX6900_REG_YR 6 /* year 00-99 */ 32#define MAX6900_REG_YR 6 /* year 00-99 */
33#define MAX6900_REG_CT 7 /* control */ 33#define MAX6900_REG_CT 7 /* control */
34#define MAX6900_REG_LEN 8 34 /* register 8 is undocumented */
35#define MAX6900_REG_CENTURY 9 /* century */
36#define MAX6900_REG_LEN 10
37
38#define MAX6900_BURST_LEN 8 /* can burst r/w first 8 regs */
35 39
36#define MAX6900_REG_CT_WP (1 << 7) /* Write Protect */ 40#define MAX6900_REG_CT_WP (1 << 7) /* Write Protect */
37 41
42
38/* 43/*
39 * register read/write commands 44 * register read/write commands
40 */ 45 */
41#define MAX6900_REG_CONTROL_WRITE 0x8e 46#define MAX6900_REG_CONTROL_WRITE 0x8e
42#define MAX6900_REG_BURST_READ 0xbf 47#define MAX6900_REG_CENTURY_WRITE 0x92
43#define MAX6900_REG_BURST_WRITE 0xbe 48#define MAX6900_REG_CENTURY_READ 0x93
44#define MAX6900_REG_RESERVED_READ 0x96 49#define MAX6900_REG_RESERVED_READ 0x96
50#define MAX6900_REG_BURST_WRITE 0xbe
51#define MAX6900_REG_BURST_READ 0xbf
45 52
46#define MAX6900_IDLE_TIME_AFTER_WRITE 3 /* specification says 2.5 mS */ 53#define MAX6900_IDLE_TIME_AFTER_WRITE 3 /* specification says 2.5 mS */
47 54
@@ -58,19 +65,32 @@ static int max6900_probe(struct i2c_adapter *adapter, int addr, int kind);
58 65
59static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf) 66static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
60{ 67{
61 u8 reg_addr[1] = { MAX6900_REG_BURST_READ }; 68 u8 reg_burst_read[1] = { MAX6900_REG_BURST_READ };
62 struct i2c_msg msgs[2] = { 69 u8 reg_century_read[1] = { MAX6900_REG_CENTURY_READ };
70 struct i2c_msg msgs[4] = {
63 { 71 {
64 .addr = client->addr, 72 .addr = client->addr,
65 .flags = 0, /* write */ 73 .flags = 0, /* write */
66 .len = sizeof(reg_addr), 74 .len = sizeof(reg_burst_read),
67 .buf = reg_addr 75 .buf = reg_burst_read
68 }, 76 },
69 { 77 {
70 .addr = client->addr, 78 .addr = client->addr,
71 .flags = I2C_M_RD, 79 .flags = I2C_M_RD,
72 .len = MAX6900_REG_LEN, 80 .len = MAX6900_BURST_LEN,
73 .buf = buf 81 .buf = buf
82 },
83 {
84 .addr = client->addr,
85 .flags = 0, /* write */
86 .len = sizeof(reg_century_read),
87 .buf = reg_century_read
88 },
89 {
90 .addr = client->addr,
91 .flags = I2C_M_RD,
92 .len = sizeof(buf[MAX6900_REG_CENTURY]),
93 .buf = &buf[MAX6900_REG_CENTURY]
74 } 94 }
75 }; 95 };
76 int rc; 96 int rc;
@@ -86,33 +106,58 @@ static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
86 106
87static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf) 107static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf)
88{ 108{
89 u8 i2c_buf[MAX6900_REG_LEN + 1] = { MAX6900_REG_BURST_WRITE }; 109 u8 i2c_century_buf[1 + 1] = { MAX6900_REG_CENTURY_WRITE };
90 struct i2c_msg msgs[1] = { 110 struct i2c_msg century_msgs[1] = {
91 { 111 {
92 .addr = client->addr, 112 .addr = client->addr,
93 .flags = 0, /* write */ 113 .flags = 0, /* write */
94 .len = MAX6900_REG_LEN + 1, 114 .len = sizeof(i2c_century_buf),
95 .buf = i2c_buf 115 .buf = i2c_century_buf
116 }
117 };
118 u8 i2c_burst_buf[MAX6900_BURST_LEN + 1] = { MAX6900_REG_BURST_WRITE };
119 struct i2c_msg burst_msgs[1] = {
120 {
121 .addr = client->addr,
122 .flags = 0, /* write */
123 .len = sizeof(i2c_burst_buf),
124 .buf = i2c_burst_buf
96 } 125 }
97 }; 126 };
98 int rc; 127 int rc;
99 128
100 memcpy(&i2c_buf[1], buf, MAX6900_REG_LEN); 129 /*
130 * We have to make separate calls to i2c_transfer because of
131 * the need to delay after each write to the chip. Also,
132 * we write the century byte first, since we set the write-protect
133 * bit as part of the burst write.
134 */
135 i2c_century_buf[1] = buf[MAX6900_REG_CENTURY];
136 rc = i2c_transfer(client->adapter, century_msgs,
137 ARRAY_SIZE(century_msgs));
138 if (rc != ARRAY_SIZE(century_msgs))
139 goto write_failed;
140 msleep(MAX6900_IDLE_TIME_AFTER_WRITE);
101 141
102 rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); 142 memcpy(&i2c_burst_buf[1], buf, MAX6900_BURST_LEN);
103 if (rc != ARRAY_SIZE(msgs)) { 143
104 dev_err(&client->dev, "%s: register write failed\n", 144 rc = i2c_transfer(client->adapter, burst_msgs, ARRAY_SIZE(burst_msgs));
105 __FUNCTION__); 145 if (rc != ARRAY_SIZE(burst_msgs))
106 return -EIO; 146 goto write_failed;
107 }
108 msleep(MAX6900_IDLE_TIME_AFTER_WRITE); 147 msleep(MAX6900_IDLE_TIME_AFTER_WRITE);
148
109 return 0; 149 return 0;
150
151write_failed:
152 dev_err(&client->dev, "%s: register write failed\n",
153 __FUNCTION__);
154 return -EIO;
110} 155}
111 156
112static int max6900_i2c_validate_client(struct i2c_client *client) 157static int max6900_i2c_validate_client(struct i2c_client *client)
113{ 158{
114 u8 regs[MAX6900_REG_LEN]; 159 u8 regs[MAX6900_REG_LEN];
115 u8 zero_mask[MAX6900_REG_LEN] = { 160 u8 zero_mask[] = {
116 0x80, /* seconds */ 161 0x80, /* seconds */
117 0x80, /* minutes */ 162 0x80, /* minutes */
118 0x40, /* hours */ 163 0x40, /* hours */
@@ -134,7 +179,7 @@ static int max6900_i2c_validate_client(struct i2c_client *client)
134 if (rc < 0) 179 if (rc < 0)
135 return rc; 180 return rc;
136 181
137 for (i = 0; i < MAX6900_REG_LEN; ++i) { 182 for (i = 0; i < ARRAY_SIZE(zero_mask); ++i) {
138 if (regs[i] & zero_mask[i]) 183 if (regs[i] & zero_mask[i])
139 return -ENODEV; 184 return -ENODEV;
140 } 185 }
@@ -156,7 +201,8 @@ static int max6900_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
156 tm->tm_hour = BCD2BIN(regs[MAX6900_REG_HR] & 0x3f); 201 tm->tm_hour = BCD2BIN(regs[MAX6900_REG_HR] & 0x3f);
157 tm->tm_mday = BCD2BIN(regs[MAX6900_REG_DT]); 202 tm->tm_mday = BCD2BIN(regs[MAX6900_REG_DT]);
158 tm->tm_mon = BCD2BIN(regs[MAX6900_REG_MO]) - 1; 203 tm->tm_mon = BCD2BIN(regs[MAX6900_REG_MO]) - 1;
159 tm->tm_year = BCD2BIN(regs[MAX6900_REG_YR]) + 100; 204 tm->tm_year = BCD2BIN(regs[MAX6900_REG_YR]) +
205 BCD2BIN(regs[MAX6900_REG_CENTURY]) * 100 - 1900;
160 tm->tm_wday = BCD2BIN(regs[MAX6900_REG_DW]); 206 tm->tm_wday = BCD2BIN(regs[MAX6900_REG_DW]);
161 207
162 return 0; 208 return 0;
@@ -189,9 +235,11 @@ static int max6900_i2c_set_time(struct i2c_client *client,
189 regs[MAX6900_REG_HR] = BIN2BCD(tm->tm_hour); 235 regs[MAX6900_REG_HR] = BIN2BCD(tm->tm_hour);
190 regs[MAX6900_REG_DT] = BIN2BCD(tm->tm_mday); 236 regs[MAX6900_REG_DT] = BIN2BCD(tm->tm_mday);
191 regs[MAX6900_REG_MO] = BIN2BCD(tm->tm_mon + 1); 237 regs[MAX6900_REG_MO] = BIN2BCD(tm->tm_mon + 1);
192 regs[MAX6900_REG_YR] = BIN2BCD(tm->tm_year - 100);
193 regs[MAX6900_REG_DW] = BIN2BCD(tm->tm_wday); 238 regs[MAX6900_REG_DW] = BIN2BCD(tm->tm_wday);
194 regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP; /* set write protect */ 239 regs[MAX6900_REG_YR] = BIN2BCD(tm->tm_year % 100);
240 regs[MAX6900_REG_CENTURY] = BIN2BCD((tm->tm_year + 1900) / 100);
241 /* set write protect */
242 regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP;
195 243
196 rc = max6900_i2c_write_regs(client, regs); 244 rc = max6900_i2c_write_regs(client, regs);
197 if (rc < 0) 245 if (rc < 0)
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
new file mode 100644
index 000000000000..f10d3facecbe
--- /dev/null
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -0,0 +1,420 @@
1/*
2 * A RTC driver for the Simtek STK17TA8
3 *
4 * By Thomas Hommel <thomas.hommel@gefanuc.com>
5 *
6 * Based on the DS1553 driver from
7 * Atsushi Nemoto <anemo@mba.ocn.ne.jp>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/bcd.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/jiffies.h>
19#include <linux/interrupt.h>
20#include <linux/rtc.h>
21#include <linux/platform_device.h>
22#include <linux/io.h>
23
24#define DRV_VERSION "0.1"
25
26#define RTC_REG_SIZE 0x20000
27#define RTC_OFFSET 0x1fff0
28
29#define RTC_FLAGS (RTC_OFFSET + 0)
30#define RTC_CENTURY (RTC_OFFSET + 1)
31#define RTC_SECONDS_ALARM (RTC_OFFSET + 2)
32#define RTC_MINUTES_ALARM (RTC_OFFSET + 3)
33#define RTC_HOURS_ALARM (RTC_OFFSET + 4)
34#define RTC_DATE_ALARM (RTC_OFFSET + 5)
35#define RTC_INTERRUPTS (RTC_OFFSET + 6)
36#define RTC_WATCHDOG (RTC_OFFSET + 7)
37#define RTC_CALIBRATION (RTC_OFFSET + 8)
38#define RTC_SECONDS (RTC_OFFSET + 9)
39#define RTC_MINUTES (RTC_OFFSET + 10)
40#define RTC_HOURS (RTC_OFFSET + 11)
41#define RTC_DAY (RTC_OFFSET + 12)
42#define RTC_DATE (RTC_OFFSET + 13)
43#define RTC_MONTH (RTC_OFFSET + 14)
44#define RTC_YEAR (RTC_OFFSET + 15)
45
46#define RTC_SECONDS_MASK 0x7f
47#define RTC_DAY_MASK 0x07
48#define RTC_CAL_MASK 0x3f
49
50/* Bits in the Calibration register */
51#define RTC_STOP 0x80
52
53/* Bits in the Flags register */
54#define RTC_FLAGS_AF 0x40
55#define RTC_FLAGS_PF 0x20
56#define RTC_WRITE 0x02
57#define RTC_READ 0x01
58
59/* Bits in the Interrupts register */
60#define RTC_INTS_AIE 0x40
61
62struct rtc_plat_data {
63 struct rtc_device *rtc;
64 void __iomem *ioaddr;
65 unsigned long baseaddr;
66 unsigned long last_jiffies;
67 int irq;
68 unsigned int irqen;
69 int alrm_sec;
70 int alrm_min;
71 int alrm_hour;
72 int alrm_mday;
73};
74
75static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
76{
77 struct platform_device *pdev = to_platform_device(dev);
78 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
79 void __iomem *ioaddr = pdata->ioaddr;
80 u8 flags;
81
82 flags = readb(pdata->ioaddr + RTC_FLAGS);
83 writeb(flags | RTC_WRITE, pdata->ioaddr + RTC_FLAGS);
84
85 writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
86 writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
87 writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
88 writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
89 writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
90 writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
91 writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
92 writeb(BIN2BCD((tm->tm_year + 1900) / 100), ioaddr + RTC_CENTURY);
93
94 writeb(flags & ~RTC_WRITE, pdata->ioaddr + RTC_FLAGS);
95 return 0;
96}
97
98static int stk17ta8_rtc_read_time(struct device *dev, struct rtc_time *tm)
99{
100 struct platform_device *pdev = to_platform_device(dev);
101 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
102 void __iomem *ioaddr = pdata->ioaddr;
103 unsigned int year, month, day, hour, minute, second, week;
104 unsigned int century;
105 u8 flags;
106
107 /* give enough time to update RTC in case of continuous read */
108 if (pdata->last_jiffies == jiffies)
109 msleep(1);
110 pdata->last_jiffies = jiffies;
111
112 flags = readb(pdata->ioaddr + RTC_FLAGS);
113 writeb(flags | RTC_READ, ioaddr + RTC_FLAGS);
114 second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK;
115 minute = readb(ioaddr + RTC_MINUTES);
116 hour = readb(ioaddr + RTC_HOURS);
117 day = readb(ioaddr + RTC_DATE);
118 week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK;
119 month = readb(ioaddr + RTC_MONTH);
120 year = readb(ioaddr + RTC_YEAR);
121 century = readb(ioaddr + RTC_CENTURY);
122 writeb(flags & ~RTC_READ, ioaddr + RTC_FLAGS);
123 tm->tm_sec = BCD2BIN(second);
124 tm->tm_min = BCD2BIN(minute);
125 tm->tm_hour = BCD2BIN(hour);
126 tm->tm_mday = BCD2BIN(day);
127 tm->tm_wday = BCD2BIN(week);
128 tm->tm_mon = BCD2BIN(month) - 1;
129 /* year is 1900 + tm->tm_year */
130 tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
131
132 if (rtc_valid_tm(tm) < 0) {
133 dev_err(dev, "retrieved date/time is not valid.\n");
134 rtc_time_to_tm(0, tm);
135 }
136 return 0;
137}
138
139static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
140{
141 void __iomem *ioaddr = pdata->ioaddr;
142 unsigned long irqflags;
143 u8 flags;
144
145 spin_lock_irqsave(&pdata->rtc->irq_lock, irqflags);
146
147 flags = readb(ioaddr + RTC_FLAGS);
148 writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
149
150 writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
151 0x80 : BIN2BCD(pdata->alrm_mday),
152 ioaddr + RTC_DATE_ALARM);
153 writeb(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
154 0x80 : BIN2BCD(pdata->alrm_hour),
155 ioaddr + RTC_HOURS_ALARM);
156 writeb(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
157 0x80 : BIN2BCD(pdata->alrm_min),
158 ioaddr + RTC_MINUTES_ALARM);
159 writeb(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
160 0x80 : BIN2BCD(pdata->alrm_sec),
161 ioaddr + RTC_SECONDS_ALARM);
162 writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
163 readb(ioaddr + RTC_FLAGS); /* clear interrupts */
164 writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
165 spin_unlock_irqrestore(&pdata->rtc->irq_lock, irqflags);
166}
167
168static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
169{
170 struct platform_device *pdev = to_platform_device(dev);
171 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
172
173 if (pdata->irq < 0)
174 return -EINVAL;
175 pdata->alrm_mday = alrm->time.tm_mday;
176 pdata->alrm_hour = alrm->time.tm_hour;
177 pdata->alrm_min = alrm->time.tm_min;
178 pdata->alrm_sec = alrm->time.tm_sec;
179 if (alrm->enabled)
180 pdata->irqen |= RTC_AF;
181 stk17ta8_rtc_update_alarm(pdata);
182 return 0;
183}
184
185static int stk17ta8_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
186{
187 struct platform_device *pdev = to_platform_device(dev);
188 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
189
190 if (pdata->irq < 0)
191 return -EINVAL;
192 alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
193 alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
194 alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
195 alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
196 alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
197 return 0;
198}
199
200static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
201{
202 struct platform_device *pdev = dev_id;
203 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
204 void __iomem *ioaddr = pdata->ioaddr;
205 unsigned long events = RTC_IRQF;
206
207 /* read and clear interrupt */
208 if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
209 return IRQ_NONE;
210 if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
211 events |= RTC_UF;
212 else
213 events |= RTC_AF;
214 rtc_update_irq(pdata->rtc, 1, events);
215 return IRQ_HANDLED;
216}
217
218static void stk17ta8_rtc_release(struct device *dev)
219{
220 struct platform_device *pdev = to_platform_device(dev);
221 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
222
223 if (pdata->irq >= 0) {
224 pdata->irqen = 0;
225 stk17ta8_rtc_update_alarm(pdata);
226 }
227}
228
229static int stk17ta8_rtc_ioctl(struct device *dev, unsigned int cmd,
230 unsigned long arg)
231{
232 struct platform_device *pdev = to_platform_device(dev);
233 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
234
235 if (pdata->irq < 0)
236 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
237 switch (cmd) {
238 case RTC_AIE_OFF:
239 pdata->irqen &= ~RTC_AF;
240 stk17ta8_rtc_update_alarm(pdata);
241 break;
242 case RTC_AIE_ON:
243 pdata->irqen |= RTC_AF;
244 stk17ta8_rtc_update_alarm(pdata);
245 break;
246 default:
247 return -ENOIOCTLCMD;
248 }
249 return 0;
250}
251
252static const struct rtc_class_ops stk17ta8_rtc_ops = {
253 .read_time = stk17ta8_rtc_read_time,
254 .set_time = stk17ta8_rtc_set_time,
255 .read_alarm = stk17ta8_rtc_read_alarm,
256 .set_alarm = stk17ta8_rtc_set_alarm,
257 .release = stk17ta8_rtc_release,
258 .ioctl = stk17ta8_rtc_ioctl,
259};
260
261static ssize_t stk17ta8_nvram_read(struct kobject *kobj, char *buf,
262 loff_t pos, size_t size)
263{
264 struct platform_device *pdev =
265 to_platform_device(container_of(kobj, struct device, kobj));
266 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
267 void __iomem *ioaddr = pdata->ioaddr;
268 ssize_t count;
269
270 for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
271 *buf++ = readb(ioaddr + pos++);
272 return count;
273}
274
275static ssize_t stk17ta8_nvram_write(struct kobject *kobj, char *buf,
276 loff_t pos, size_t size)
277{
278 struct platform_device *pdev =
279 to_platform_device(container_of(kobj, struct device, kobj));
280 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
281 void __iomem *ioaddr = pdata->ioaddr;
282 ssize_t count;
283
284 for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
285 writeb(*buf++, ioaddr + pos++);
286 return count;
287}
288
289static struct bin_attribute stk17ta8_nvram_attr = {
290 .attr = {
291 .name = "nvram",
292 .mode = S_IRUGO | S_IWUGO,
293 .owner = THIS_MODULE,
294 },
295 .size = RTC_OFFSET,
296 .read = stk17ta8_nvram_read,
297 .write = stk17ta8_nvram_write,
298};
299
300static int __init stk17ta8_rtc_probe(struct platform_device *pdev)
301{
302 struct rtc_device *rtc;
303 struct resource *res;
304 unsigned int cal;
305 unsigned int flags;
306 struct rtc_plat_data *pdata;
307 void __iomem *ioaddr = NULL;
308 int ret = 0;
309
310 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 if (!res)
312 return -ENODEV;
313
314 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
315 if (!pdata)
316 return -ENOMEM;
317 pdata->irq = -1;
318 if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
319 ret = -EBUSY;
320 goto out;
321 }
322 pdata->baseaddr = res->start;
323 ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
324 if (!ioaddr) {
325 ret = -ENOMEM;
326 goto out;
327 }
328 pdata->ioaddr = ioaddr;
329 pdata->irq = platform_get_irq(pdev, 0);
330
331 /* turn RTC on if it was not on */
332 cal = readb(ioaddr + RTC_CALIBRATION);
333 if (cal & RTC_STOP) {
334 cal &= RTC_CAL_MASK;
335 flags = readb(ioaddr + RTC_FLAGS);
336 writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
337 writeb(cal, ioaddr + RTC_CALIBRATION);
338 writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
339 }
340 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF)
341 dev_warn(&pdev->dev, "voltage-low detected.\n");
342
343 if (pdata->irq >= 0) {
344 writeb(0, ioaddr + RTC_INTERRUPTS);
345 if (request_irq(pdata->irq, stk17ta8_rtc_interrupt,
346 IRQF_DISABLED | IRQF_SHARED,
347 pdev->name, pdev) < 0) {
348 dev_warn(&pdev->dev, "interrupt not available.\n");
349 pdata->irq = -1;
350 }
351 }
352
353 rtc = rtc_device_register(pdev->name, &pdev->dev,
354 &stk17ta8_rtc_ops, THIS_MODULE);
355 if (IS_ERR(rtc)) {
356 ret = PTR_ERR(rtc);
357 goto out;
358 }
359 pdata->rtc = rtc;
360 pdata->last_jiffies = jiffies;
361 platform_set_drvdata(pdev, pdata);
362 ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
363 if (ret)
364 goto out;
365 return 0;
366 out:
367 if (pdata->rtc)
368 rtc_device_unregister(pdata->rtc);
369 if (pdata->irq >= 0)
370 free_irq(pdata->irq, pdev);
371 if (ioaddr)
372 iounmap(ioaddr);
373 if (pdata->baseaddr)
374 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
375 kfree(pdata);
376 return ret;
377}
378
379static int __devexit stk17ta8_rtc_remove(struct platform_device *pdev)
380{
381 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
382
383 sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
384 rtc_device_unregister(pdata->rtc);
385 if (pdata->irq >= 0) {
386 writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
387 free_irq(pdata->irq, pdev);
388 }
389 iounmap(pdata->ioaddr);
390 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
391 kfree(pdata);
392 return 0;
393}
394
395static struct platform_driver stk17ta8_rtc_driver = {
396 .probe = stk17ta8_rtc_probe,
397 .remove = __devexit_p(stk17ta8_rtc_remove),
398 .driver = {
399 .name = "stk17ta8",
400 .owner = THIS_MODULE,
401 },
402};
403
404static __init int stk17ta8_init(void)
405{
406 return platform_driver_register(&stk17ta8_rtc_driver);
407}
408
409static __exit void stk17ta8_exit(void)
410{
411 return platform_driver_unregister(&stk17ta8_rtc_driver);
412}
413
414module_init(stk17ta8_init);
415module_exit(stk17ta8_exit);
416
417MODULE_AUTHOR("Thomas Hommel <thomas.hommel@gefanuc.com>");
418MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
419MODULE_LICENSE("GPL");
420MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 0f8689557158..86a7ba7bad63 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -132,6 +132,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
132obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ 132obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
133obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 133obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
134obj-$(CONFIG_SCSI_STEX) += stex.o 134obj-$(CONFIG_SCSI_STEX) += stex.o
135obj-$(CONFIG_PS3_ROM) += ps3rom.o
135 136
136obj-$(CONFIG_ARM) += arm/ 137obj-$(CONFIG_ARM) += arm/
137 138
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
new file mode 100644
index 000000000000..b50f1e14f2a5
--- /dev/null
+++ b/drivers/scsi/ps3rom.c
@@ -0,0 +1,533 @@
1/*
2 * PS3 BD/DVD/CD-ROM Storage Driver
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/cdrom.h>
22#include <linux/highmem.h>
23
24#include <scsi/scsi.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_dbg.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h>
29
30#include <asm/lv1call.h>
31#include <asm/ps3stor.h>
32
33
34#define DEVICE_NAME "ps3rom"
35
36#define BOUNCE_SIZE (64*1024)
37
38#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE)
39
40
41struct ps3rom_private {
42 struct ps3_storage_device *dev;
43 struct scsi_cmnd *curr_cmd;
44};
45
46
47#define LV1_STORAGE_SEND_ATAPI_COMMAND (1)
48
49struct lv1_atapi_cmnd_block {
50 u8 pkt[32]; /* packet command block */
51 u32 pktlen; /* should be 12 for ATAPI 8020 */
52 u32 blocks;
53 u32 block_size;
54 u32 proto; /* transfer mode */
55 u32 in_out; /* transfer direction */
56 u64 buffer; /* parameter except command block */
57 u32 arglen; /* length above */
58};
59
60enum lv1_atapi_proto {
61 NON_DATA_PROTO = 0,
62 PIO_DATA_IN_PROTO = 1,
63 PIO_DATA_OUT_PROTO = 2,
64 DMA_PROTO = 3
65};
66
67enum lv1_atapi_in_out {
68 DIR_WRITE = 0, /* memory -> device */
69 DIR_READ = 1 /* device -> memory */
70};
71
72
73static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
74{
75 struct ps3rom_private *priv = shost_priv(scsi_dev->host);
76 struct ps3_storage_device *dev = priv->dev;
77
78 dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %u, channel %u\n", __func__,
79 __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel);
80
81 /*
82 * ATAPI SFF8020 devices use MODE_SENSE_10,
83 * so we can prohibit MODE_SENSE_6
84 */
85 scsi_dev->use_10_for_ms = 1;
86
87 /* we don't support {READ,WRITE}_6 */
88 scsi_dev->use_10_for_rw = 1;
89
90 return 0;
91}
92
93/*
94 * copy data from device into scatter/gather buffer
95 */
96static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
97{
98 int k, req_len, act_len, len, active;
99 void *kaddr;
100 struct scatterlist *sgpnt;
101 unsigned int buflen;
102
103 buflen = cmd->request_bufflen;
104 if (!buflen)
105 return 0;
106
107 if (!cmd->request_buffer)
108 return -1;
109
110 sgpnt = cmd->request_buffer;
111 active = 1;
112 for (k = 0, req_len = 0, act_len = 0; k < cmd->use_sg; ++k, ++sgpnt) {
113 if (active) {
114 kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
115 len = sgpnt->length;
116 if ((req_len + len) > buflen) {
117 active = 0;
118 len = buflen - req_len;
119 }
120 memcpy(kaddr + sgpnt->offset, buf + req_len, len);
121 flush_kernel_dcache_page(sgpnt->page);
122 kunmap_atomic(kaddr, KM_IRQ0);
123 act_len += len;
124 }
125 req_len += sgpnt->length;
126 }
127 cmd->resid = req_len - act_len;
128 return 0;
129}
130
131/*
132 * copy data from scatter/gather into device's buffer
133 */
134static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
135{
136 int k, req_len, len, fin;
137 void *kaddr;
138 struct scatterlist *sgpnt;
139 unsigned int buflen;
140
141 buflen = cmd->request_bufflen;
142 if (!buflen)
143 return 0;
144
145 if (!cmd->request_buffer)
146 return -1;
147
148 sgpnt = cmd->request_buffer;
149 for (k = 0, req_len = 0, fin = 0; k < cmd->use_sg; ++k, ++sgpnt) {
150 kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
151 len = sgpnt->length;
152 if ((req_len + len) > buflen) {
153 len = buflen - req_len;
154 fin = 1;
155 }
156 memcpy(buf + req_len, kaddr + sgpnt->offset, len);
157 kunmap_atomic(kaddr, KM_IRQ0);
158 if (fin)
159 return req_len + len;
160 req_len += sgpnt->length;
161 }
162 return req_len;
163}
164
165static int ps3rom_atapi_request(struct ps3_storage_device *dev,
166 struct scsi_cmnd *cmd)
167{
168 struct lv1_atapi_cmnd_block atapi_cmnd;
169 unsigned char opcode = cmd->cmnd[0];
170 int res;
171 u64 lpar;
172
173 dev_dbg(&dev->sbd.core, "%s:%u: send ATAPI command 0x%02x\n", __func__,
174 __LINE__, opcode);
175
176 memset(&atapi_cmnd, 0, sizeof(struct lv1_atapi_cmnd_block));
177 memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12);
178 atapi_cmnd.pktlen = 12;
179 atapi_cmnd.block_size = 1; /* transfer size is block_size * blocks */
180 atapi_cmnd.blocks = atapi_cmnd.arglen = cmd->request_bufflen;
181 atapi_cmnd.buffer = dev->bounce_lpar;
182
183 switch (cmd->sc_data_direction) {
184 case DMA_FROM_DEVICE:
185 if (cmd->request_bufflen >= CD_FRAMESIZE)
186 atapi_cmnd.proto = DMA_PROTO;
187 else
188 atapi_cmnd.proto = PIO_DATA_IN_PROTO;
189 atapi_cmnd.in_out = DIR_READ;
190 break;
191
192 case DMA_TO_DEVICE:
193 if (cmd->request_bufflen >= CD_FRAMESIZE)
194 atapi_cmnd.proto = DMA_PROTO;
195 else
196 atapi_cmnd.proto = PIO_DATA_OUT_PROTO;
197 atapi_cmnd.in_out = DIR_WRITE;
198 res = fetch_to_dev_buffer(cmd, dev->bounce_buf);
199 if (res < 0)
200 return DID_ERROR << 16;
201 break;
202
203 default:
204 atapi_cmnd.proto = NON_DATA_PROTO;
205 break;
206 }
207
208 lpar = ps3_mm_phys_to_lpar(__pa(&atapi_cmnd));
209 res = lv1_storage_send_device_command(dev->sbd.dev_id,
210 LV1_STORAGE_SEND_ATAPI_COMMAND,
211 lpar, sizeof(atapi_cmnd),
212 atapi_cmnd.buffer,
213 atapi_cmnd.arglen, &dev->tag);
214 if (res == LV1_DENIED_BY_POLICY) {
215 dev_dbg(&dev->sbd.core,
216 "%s:%u: ATAPI command 0x%02x denied by policy\n",
217 __func__, __LINE__, opcode);
218 return DID_ERROR << 16;
219 }
220
221 if (res) {
222 dev_err(&dev->sbd.core,
223 "%s:%u: ATAPI command 0x%02x failed %d\n", __func__,
224 __LINE__, opcode, res);
225 return DID_ERROR << 16;
226 }
227
228 return 0;
229}
230
231static inline unsigned int srb10_lba(const struct scsi_cmnd *cmd)
232{
233 return cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | cmd->cmnd[4] << 8 |
234 cmd->cmnd[5];
235}
236
237static inline unsigned int srb10_len(const struct scsi_cmnd *cmd)
238{
239 return cmd->cmnd[7] << 8 | cmd->cmnd[8];
240}
241
242static int ps3rom_read_request(struct ps3_storage_device *dev,
243 struct scsi_cmnd *cmd, u32 start_sector,
244 u32 sectors)
245{
246 int res;
247
248 dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n",
249 __func__, __LINE__, sectors, start_sector);
250
251 res = lv1_storage_read(dev->sbd.dev_id,
252 dev->regions[dev->region_idx].id, start_sector,
253 sectors, 0, dev->bounce_lpar, &dev->tag);
254 if (res) {
255 dev_err(&dev->sbd.core, "%s:%u: read failed %d\n", __func__,
256 __LINE__, res);
257 return DID_ERROR << 16;
258 }
259
260 return 0;
261}
262
263static int ps3rom_write_request(struct ps3_storage_device *dev,
264 struct scsi_cmnd *cmd, u32 start_sector,
265 u32 sectors)
266{
267 int res;
268
269 dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n",
270 __func__, __LINE__, sectors, start_sector);
271
272 res = fetch_to_dev_buffer(cmd, dev->bounce_buf);
273 if (res < 0)
274 return DID_ERROR << 16;
275
276 res = lv1_storage_write(dev->sbd.dev_id,
277 dev->regions[dev->region_idx].id, start_sector,
278 sectors, 0, dev->bounce_lpar, &dev->tag);
279 if (res) {
280 dev_err(&dev->sbd.core, "%s:%u: write failed %d\n", __func__,
281 __LINE__, res);
282 return DID_ERROR << 16;
283 }
284
285 return 0;
286}
287
288static int ps3rom_queuecommand(struct scsi_cmnd *cmd,
289 void (*done)(struct scsi_cmnd *))
290{
291 struct ps3rom_private *priv = shost_priv(cmd->device->host);
292 struct ps3_storage_device *dev = priv->dev;
293 unsigned char opcode;
294 int res;
295
296#ifdef DEBUG
297 scsi_print_command(cmd);
298#endif
299
300 priv->curr_cmd = cmd;
301 cmd->scsi_done = done;
302
303 opcode = cmd->cmnd[0];
304 /*
305 * While we can submit READ/WRITE SCSI commands as ATAPI commands,
306 * it's recommended for various reasons (performance, error handling,
307 * ...) to use lv1_storage_{read,write}() instead
308 */
309 switch (opcode) {
310 case READ_10:
311 res = ps3rom_read_request(dev, cmd, srb10_lba(cmd),
312 srb10_len(cmd));
313 break;
314
315 case WRITE_10:
316 res = ps3rom_write_request(dev, cmd, srb10_lba(cmd),
317 srb10_len(cmd));
318 break;
319
320 default:
321 res = ps3rom_atapi_request(dev, cmd);
322 break;
323 }
324
325 if (res) {
326 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
327 cmd->result = res;
328 cmd->sense_buffer[0] = 0x70;
329 cmd->sense_buffer[2] = ILLEGAL_REQUEST;
330 priv->curr_cmd = NULL;
331 cmd->scsi_done(cmd);
332 }
333
334 return 0;
335}
336
337static int decode_lv1_status(u64 status, unsigned char *sense_key,
338 unsigned char *asc, unsigned char *ascq)
339{
340 if (((status >> 24) & 0xff) != SAM_STAT_CHECK_CONDITION)
341 return -1;
342
343 *sense_key = (status >> 16) & 0xff;
344 *asc = (status >> 8) & 0xff;
345 *ascq = status & 0xff;
346 return 0;
347}
348
349static irqreturn_t ps3rom_interrupt(int irq, void *data)
350{
351 struct ps3_storage_device *dev = data;
352 struct Scsi_Host *host;
353 struct ps3rom_private *priv;
354 struct scsi_cmnd *cmd;
355 int res;
356 u64 tag, status;
357 unsigned char sense_key, asc, ascq;
358
359 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
360 /*
361 * status = -1 may mean that ATAPI transport completed OK, but
362 * ATAPI command itself resulted CHECK CONDITION
363 * so, upper layer should issue REQUEST_SENSE to check the sense data
364 */
365
366 if (tag != dev->tag)
367 dev_err(&dev->sbd.core,
368 "%s:%u: tag mismatch, got %lx, expected %lx\n",
369 __func__, __LINE__, tag, dev->tag);
370
371 if (res) {
372 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
373 __func__, __LINE__, res, status);
374 return IRQ_HANDLED;
375 }
376
377 host = dev->sbd.core.driver_data;
378 priv = shost_priv(host);
379 cmd = priv->curr_cmd;
380
381 if (!status) {
382 /* OK, completed */
383 if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
384 res = fill_from_dev_buffer(cmd, dev->bounce_buf);
385 if (res) {
386 cmd->result = DID_ERROR << 16;
387 goto done;
388 }
389 }
390 cmd->result = DID_OK << 16;
391 goto done;
392 }
393
394 if (cmd->cmnd[0] == REQUEST_SENSE) {
395 /* SCSI spec says request sense should never get error */
396 dev_err(&dev->sbd.core, "%s:%u: end error without autosense\n",
397 __func__, __LINE__);
398 cmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION;
399 goto done;
400 }
401
402 if (decode_lv1_status(status, &sense_key, &asc, &ascq)) {
403 cmd->result = DID_ERROR << 16;
404 goto done;
405 }
406
407 cmd->sense_buffer[0] = 0x70;
408 cmd->sense_buffer[2] = sense_key;
409 cmd->sense_buffer[7] = 16 - 6;
410 cmd->sense_buffer[12] = asc;
411 cmd->sense_buffer[13] = ascq;
412 cmd->result = SAM_STAT_CHECK_CONDITION;
413
414done:
415 priv->curr_cmd = NULL;
416 cmd->scsi_done(cmd);
417 return IRQ_HANDLED;
418}
419
420static struct scsi_host_template ps3rom_host_template = {
421 .name = DEVICE_NAME,
422 .slave_configure = ps3rom_slave_configure,
423 .queuecommand = ps3rom_queuecommand,
424 .can_queue = 1,
425 .this_id = 7,
426 .sg_tablesize = SG_ALL,
427 .cmd_per_lun = 1,
428 .emulated = 1, /* only sg driver uses this */
429 .max_sectors = PS3ROM_MAX_SECTORS,
430 .use_clustering = ENABLE_CLUSTERING,
431 .module = THIS_MODULE,
432};
433
434
435static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
436{
437 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
438 int error;
439 struct Scsi_Host *host;
440 struct ps3rom_private *priv;
441
442 if (dev->blk_size != CD_FRAMESIZE) {
443 dev_err(&dev->sbd.core,
444 "%s:%u: cannot handle block size %lu\n", __func__,
445 __LINE__, dev->blk_size);
446 return -EINVAL;
447 }
448
449 dev->bounce_size = BOUNCE_SIZE;
450 dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
451 if (!dev->bounce_buf)
452 return -ENOMEM;
453
454 error = ps3stor_setup(dev, ps3rom_interrupt);
455 if (error)
456 goto fail_free_bounce;
457
458 host = scsi_host_alloc(&ps3rom_host_template,
459 sizeof(struct ps3rom_private));
460 if (!host) {
461 dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n",
462 __func__, __LINE__);
463 goto fail_teardown;
464 }
465
466 priv = shost_priv(host);
467 dev->sbd.core.driver_data = host;
468 priv->dev = dev;
469
470 /* One device/LUN per SCSI bus */
471 host->max_id = 1;
472 host->max_lun = 1;
473
474 error = scsi_add_host(host, &dev->sbd.core);
475 if (error) {
476 dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed %d\n",
477 __func__, __LINE__, error);
478 error = -ENODEV;
479 goto fail_host_put;
480 }
481
482 scsi_scan_host(host);
483 return 0;
484
485fail_host_put:
486 scsi_host_put(host);
487 dev->sbd.core.driver_data = NULL;
488fail_teardown:
489 ps3stor_teardown(dev);
490fail_free_bounce:
491 kfree(dev->bounce_buf);
492 return error;
493}
494
495static int ps3rom_remove(struct ps3_system_bus_device *_dev)
496{
497 struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
498 struct Scsi_Host *host = dev->sbd.core.driver_data;
499
500 scsi_remove_host(host);
501 ps3stor_teardown(dev);
502 scsi_host_put(host);
503 dev->sbd.core.driver_data = NULL;
504 kfree(dev->bounce_buf);
505 return 0;
506}
507
508static struct ps3_system_bus_driver ps3rom = {
509 .match_id = PS3_MATCH_ID_STOR_ROM,
510 .core.name = DEVICE_NAME,
511 .core.owner = THIS_MODULE,
512 .probe = ps3rom_probe,
513 .remove = ps3rom_remove
514};
515
516
517static int __init ps3rom_init(void)
518{
519 return ps3_system_bus_driver_register(&ps3rom);
520}
521
522static void __exit ps3rom_exit(void)
523{
524 ps3_system_bus_driver_unregister(&ps3rom);
525}
526
527module_init(ps3rom_init);
528module_exit(ps3rom_exit);
529
530MODULE_LICENSE("GPL");
531MODULE_DESCRIPTION("PS3 BD/DVD/CD-ROM Storage Driver");
532MODULE_AUTHOR("Sony Corporation");
533MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_ROM);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 018884d7a5fa..b05de30b5d9b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -303,8 +303,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
303 * creates board info from kernel command lines 303 * creates board info from kernel command lines
304 */ 304 */
305 305
306static void __init_or_module 306static void scan_boardinfo(struct spi_master *master)
307scan_boardinfo(struct spi_master *master)
308{ 307{
309 struct boardinfo *bi; 308 struct boardinfo *bi;
310 struct device *dev = master->cdev.dev; 309 struct device *dev = master->cdev.dev;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0c5644bb59af..2a237f09ee5d 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -849,6 +849,16 @@ config FB_INTSRAM
849 Say Y if you want to map Frame Buffer in internal SRAM. Say N if you want 849 Say Y if you want to map Frame Buffer in internal SRAM. Say N if you want
850 to let frame buffer in external SDRAM. 850 to let frame buffer in external SDRAM.
851 851
852config FB_ATMEL_STN
853 bool "Use a STN display with AT91/AT32 LCD Controller"
854 depends on FB_ATMEL && MACH_AT91SAM9261EK
855 default n
856 help
857 Say Y if you want to connect a STN LCD display to the AT91/AT32 LCD
858 Controller. Say N if you want to connect a TFT.
859
860 If unsure, say N.
861
852config FB_NVIDIA 862config FB_NVIDIA
853 tristate "nVidia Framebuffer Support" 863 tristate "nVidia Framebuffer Support"
854 depends on FB && PCI 864 depends on FB && PCI
@@ -1796,13 +1806,14 @@ config FB_PS3
1796 select FB_SYS_COPYAREA 1806 select FB_SYS_COPYAREA
1797 select FB_SYS_IMAGEBLIT 1807 select FB_SYS_IMAGEBLIT
1798 select FB_SYS_FOPS 1808 select FB_SYS_FOPS
1809 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
1799 ---help--- 1810 ---help---
1800 Include support for the virtual frame buffer in the PS3 platform. 1811 Include support for the virtual frame buffer in the PS3 platform.
1801 1812
1802config FB_PS3_DEFAULT_SIZE_M 1813config FB_PS3_DEFAULT_SIZE_M
1803 int "PS3 default frame buffer size (in MiB)" 1814 int "PS3 default frame buffer size (in MiB)"
1804 depends on FB_PS3 1815 depends on FB_PS3
1805 default 18 1816 default 9
1806 ---help--- 1817 ---help---
1807 This is the default size (in MiB) of the virtual frame buffer in 1818 This is the default size (in MiB) of the virtual frame buffer in
1808 the PS3. 1819 the PS3.
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index e1d5bd0c98c4..235b618b4117 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -79,6 +79,29 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
79 .accel = FB_ACCEL_NONE, 79 .accel = FB_ACCEL_NONE,
80}; 80};
81 81
82static unsigned long compute_hozval(unsigned long xres, unsigned long lcdcon2)
83{
84 unsigned long value;
85
86 if (!(cpu_is_at91sam9261() || cpu_is_at32ap7000()))
87 return xres;
88
89 value = xres;
90 if ((lcdcon2 & ATMEL_LCDC_DISTYPE) != ATMEL_LCDC_DISTYPE_TFT) {
91 /* STN display */
92 if ((lcdcon2 & ATMEL_LCDC_DISTYPE) == ATMEL_LCDC_DISTYPE_STNCOLOR) {
93 value *= 3;
94 }
95 if ( (lcdcon2 & ATMEL_LCDC_IFWIDTH) == ATMEL_LCDC_IFWIDTH_4
96 || ( (lcdcon2 & ATMEL_LCDC_IFWIDTH) == ATMEL_LCDC_IFWIDTH_8
97 && (lcdcon2 & ATMEL_LCDC_SCANMOD) == ATMEL_LCDC_SCANMOD_DUAL ))
98 value = DIV_ROUND_UP(value, 4);
99 else
100 value = DIV_ROUND_UP(value, 8);
101 }
102
103 return value;
104}
82 105
83static void atmel_lcdfb_update_dma(struct fb_info *info, 106static void atmel_lcdfb_update_dma(struct fb_info *info,
84 struct fb_var_screeninfo *var) 107 struct fb_var_screeninfo *var)
@@ -181,6 +204,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
181 var->xoffset = var->yoffset = 0; 204 var->xoffset = var->yoffset = 0;
182 205
183 switch (var->bits_per_pixel) { 206 switch (var->bits_per_pixel) {
207 case 1:
184 case 2: 208 case 2:
185 case 4: 209 case 4:
186 case 8: 210 case 8:
@@ -195,8 +219,11 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
195 var->blue.offset = 10; 219 var->blue.offset = 10;
196 var->red.length = var->green.length = var->blue.length = 5; 220 var->red.length = var->green.length = var->blue.length = 5;
197 break; 221 break;
198 case 24:
199 case 32: 222 case 32:
223 var->transp.offset = 24;
224 var->transp.length = 8;
225 /* fall through */
226 case 24:
200 var->red.offset = 0; 227 var->red.offset = 0;
201 var->green.offset = 8; 228 var->green.offset = 8;
202 var->blue.offset = 16; 229 var->blue.offset = 16;
@@ -228,8 +255,10 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
228static int atmel_lcdfb_set_par(struct fb_info *info) 255static int atmel_lcdfb_set_par(struct fb_info *info)
229{ 256{
230 struct atmel_lcdfb_info *sinfo = info->par; 257 struct atmel_lcdfb_info *sinfo = info->par;
258 unsigned long hozval_linesz;
231 unsigned long value; 259 unsigned long value;
232 unsigned long clk_value_khz; 260 unsigned long clk_value_khz;
261 unsigned long bits_per_line;
233 262
234 dev_dbg(info->device, "%s:\n", __func__); 263 dev_dbg(info->device, "%s:\n", __func__);
235 dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n", 264 dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n",
@@ -241,12 +270,15 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
241 270
242 lcdc_writel(sinfo, ATMEL_LCDC_DMACON, 0); 271 lcdc_writel(sinfo, ATMEL_LCDC_DMACON, 0);
243 272
244 if (info->var.bits_per_pixel <= 8) 273 if (info->var.bits_per_pixel == 1)
274 info->fix.visual = FB_VISUAL_MONO01;
275 else if (info->var.bits_per_pixel <= 8)
245 info->fix.visual = FB_VISUAL_PSEUDOCOLOR; 276 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
246 else 277 else
247 info->fix.visual = FB_VISUAL_TRUECOLOR; 278 info->fix.visual = FB_VISUAL_TRUECOLOR;
248 279
249 info->fix.line_length = info->var.xres_virtual * (info->var.bits_per_pixel / 8); 280 bits_per_line = info->var.xres_virtual * info->var.bits_per_pixel;
281 info->fix.line_length = DIV_ROUND_UP(bits_per_line, 8);
250 282
251 /* Re-initialize the DMA engine... */ 283 /* Re-initialize the DMA engine... */
252 dev_dbg(info->device, " * update DMA engine\n"); 284 dev_dbg(info->device, " * update DMA engine\n");
@@ -262,18 +294,21 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
262 /* Set pixel clock */ 294 /* Set pixel clock */
263 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000; 295 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
264 296
265 value = clk_value_khz / PICOS2KHZ(info->var.pixclock); 297 value = DIV_ROUND_UP(clk_value_khz, PICOS2KHZ(info->var.pixclock));
266
267 if (clk_value_khz % PICOS2KHZ(info->var.pixclock))
268 value++;
269 298
270 value = (value / 2) - 1; 299 value = (value / 2) - 1;
300 dev_dbg(info->device, " * programming CLKVAL = 0x%08lx\n", value);
271 301
272 if (value <= 0) { 302 if (value <= 0) {
273 dev_notice(info->device, "Bypassing pixel clock divider\n"); 303 dev_notice(info->device, "Bypassing pixel clock divider\n");
274 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS); 304 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS);
275 } else 305 } else {
276 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, value << ATMEL_LCDC_CLKVAL_OFFSET); 306 lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, value << ATMEL_LCDC_CLKVAL_OFFSET);
307 info->var.pixclock = KHZ2PICOS(clk_value_khz / (2 * (value + 1)));
308 dev_dbg(info->device, " updated pixclk: %lu KHz\n",
309 PICOS2KHZ(info->var.pixclock));
310 }
311
277 312
278 /* Initialize control register 2 */ 313 /* Initialize control register 2 */
279 value = sinfo->default_lcdcon2; 314 value = sinfo->default_lcdcon2;
@@ -311,9 +346,14 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
311 dev_dbg(info->device, " * LCDTIM2 = %08lx\n", value); 346 dev_dbg(info->device, " * LCDTIM2 = %08lx\n", value);
312 lcdc_writel(sinfo, ATMEL_LCDC_TIM2, value); 347 lcdc_writel(sinfo, ATMEL_LCDC_TIM2, value);
313 348
349 /* Horizontal value (aka line size) */
350 hozval_linesz = compute_hozval(info->var.xres,
351 lcdc_readl(sinfo, ATMEL_LCDC_LCDCON2));
352
314 /* Display size */ 353 /* Display size */
315 value = (info->var.xres - 1) << ATMEL_LCDC_HOZVAL_OFFSET; 354 value = (hozval_linesz - 1) << ATMEL_LCDC_HOZVAL_OFFSET;
316 value |= info->var.yres - 1; 355 value |= info->var.yres - 1;
356 dev_dbg(info->device, " * LCDFRMCFG = %08lx\n", value);
317 lcdc_writel(sinfo, ATMEL_LCDC_LCDFRMCFG, value); 357 lcdc_writel(sinfo, ATMEL_LCDC_LCDFRMCFG, value);
318 358
319 /* FIFO Threshold: Use formula from data sheet */ 359 /* FIFO Threshold: Use formula from data sheet */
@@ -421,6 +461,15 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
421 ret = 0; 461 ret = 0;
422 } 462 }
423 break; 463 break;
464
465 case FB_VISUAL_MONO01:
466 if (regno < 2) {
467 val = (regno == 0) ? 0x00 : 0x1F;
468 lcdc_writel(sinfo, ATMEL_LCDC_LUT(regno), val);
469 ret = 0;
470 }
471 break;
472
424 } 473 }
425 474
426 return ret; 475 return ret;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f46fe95f69fb..d18b73aafa0d 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -187,7 +187,11 @@ static void vgacon_scrollback_init(int pitch)
187 } 187 }
188} 188}
189 189
190static void vgacon_scrollback_startup(void) 190/*
191 * Called only duing init so call of alloc_bootmen is ok.
192 * Marked __init_refok to silence modpost.
193 */
194static void __init_refok vgacon_scrollback_startup(void)
191{ 195{
192 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE 196 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
193 * 1024); 197 * 1024);
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 3972aa8cf859..646ec823c168 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -1067,7 +1067,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1067 info->fix.smem_len = ps3fb_videomemory.size - offset; 1067 info->fix.smem_len = ps3fb_videomemory.size - offset;
1068 info->pseudo_palette = info->par; 1068 info->pseudo_palette = info->par;
1069 info->par = NULL; 1069 info->par = NULL;
1070 info->flags = FBINFO_FLAG_DEFAULT; 1070 info->flags = FBINFO_DEFAULT | FBINFO_READS_FAST;
1071 1071
1072 retval = fb_alloc_cmap(&info->cmap, 256, 0); 1072 retval = fb_alloc_cmap(&info->cmap, 256, 0);
1073 if (retval < 0) 1073 if (retval < 0)
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 0fe547842c64..41381e61832c 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2146,7 +2146,7 @@ static void __devexit rivafb_remove(struct pci_dev *pd)
2146 * ------------------------------------------------------------------------- */ 2146 * ------------------------------------------------------------------------- */
2147 2147
2148#ifndef MODULE 2148#ifndef MODULE
2149static int __init rivafb_setup(char *options) 2149static int __devinit rivafb_setup(char *options)
2150{ 2150{
2151 char *this_opt; 2151 char *this_opt;
2152 2152
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index ba24cb2ff6ce..4482a0673b15 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -45,7 +45,7 @@
45 45
46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); 46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *); 47static int load_elf_library(struct file *);
48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long); 48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
49 49
50/* 50/*
51 * If we don't support core dumping, then supply a NULL so we 51 * If we don't support core dumping, then supply a NULL so we
@@ -80,7 +80,7 @@ static struct linux_binfmt elf_format = {
80 .hasvdso = 1 80 .hasvdso = 1
81}; 81};
82 82
83#define BAD_ADDR(x) IS_ERR_VALUE(x) 83#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
84 84
85static int set_brk(unsigned long start, unsigned long end) 85static int set_brk(unsigned long start, unsigned long end)
86{ 86{
@@ -295,70 +295,33 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
295#ifndef elf_map 295#ifndef elf_map
296 296
297static unsigned long elf_map(struct file *filep, unsigned long addr, 297static unsigned long elf_map(struct file *filep, unsigned long addr,
298 struct elf_phdr *eppnt, int prot, int type, 298 struct elf_phdr *eppnt, int prot, int type)
299 unsigned long total_size)
300{ 299{
301 unsigned long map_addr; 300 unsigned long map_addr;
302 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); 301 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
303 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
304 addr = ELF_PAGESTART(addr);
305 size = ELF_PAGEALIGN(size);
306 302
303 down_write(&current->mm->mmap_sem);
307 /* mmap() will return -EINVAL if given a zero size, but a 304 /* mmap() will return -EINVAL if given a zero size, but a
308 * segment with zero filesize is perfectly valid */ 305 * segment with zero filesize is perfectly valid */
309 if (!size) 306 if (eppnt->p_filesz + pageoffset)
310 return addr; 307 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
311 308 eppnt->p_filesz + pageoffset, prot, type,
312 down_write(&current->mm->mmap_sem); 309 eppnt->p_offset - pageoffset);
313 /* 310 else
314 * total_size is the size of the ELF (interpreter) image. 311 map_addr = ELF_PAGESTART(addr);
315 * The _first_ mmap needs to know the full size, otherwise
316 * randomization might put this image into an overlapping
317 * position with the ELF binary image. (since size < total_size)
318 * So we first map the 'big' image - and unmap the remainder at
319 * the end. (which unmap is needed for ELF images with holes.)
320 */
321 if (total_size) {
322 total_size = ELF_PAGEALIGN(total_size);
323 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
324 if (!BAD_ADDR(map_addr))
325 do_munmap(current->mm, map_addr+size, total_size-size);
326 } else
327 map_addr = do_mmap(filep, addr, size, prot, type, off);
328
329 up_write(&current->mm->mmap_sem); 312 up_write(&current->mm->mmap_sem);
330 return(map_addr); 313 return(map_addr);
331} 314}
332 315
333#endif /* !elf_map */ 316#endif /* !elf_map */
334 317
335static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
336{
337 int i, first_idx = -1, last_idx = -1;
338
339 for (i = 0; i < nr; i++) {
340 if (cmds[i].p_type == PT_LOAD) {
341 last_idx = i;
342 if (first_idx == -1)
343 first_idx = i;
344 }
345 }
346 if (first_idx == -1)
347 return 0;
348
349 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
350 ELF_PAGESTART(cmds[first_idx].p_vaddr);
351}
352
353
354/* This is much more generalized than the library routine read function, 318/* This is much more generalized than the library routine read function,
355 so we keep this separate. Technically the library read function 319 so we keep this separate. Technically the library read function
356 is only provided so that we can read a.out libraries that have 320 is only provided so that we can read a.out libraries that have
357 an ELF header */ 321 an ELF header */
358 322
359static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 323static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
360 struct file *interpreter, unsigned long *interp_map_addr, 324 struct file *interpreter, unsigned long *interp_load_addr)
361 unsigned long no_base)
362{ 325{
363 struct elf_phdr *elf_phdata; 326 struct elf_phdr *elf_phdata;
364 struct elf_phdr *eppnt; 327 struct elf_phdr *eppnt;
@@ -366,7 +329,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
366 int load_addr_set = 0; 329 int load_addr_set = 0;
367 unsigned long last_bss = 0, elf_bss = 0; 330 unsigned long last_bss = 0, elf_bss = 0;
368 unsigned long error = ~0UL; 331 unsigned long error = ~0UL;
369 unsigned long total_size;
370 int retval, i, size; 332 int retval, i, size;
371 333
372 /* First of all, some simple consistency checks */ 334 /* First of all, some simple consistency checks */
@@ -405,12 +367,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
405 goto out_close; 367 goto out_close;
406 } 368 }
407 369
408 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
409 if (!total_size) {
410 error = -EINVAL;
411 goto out_close;
412 }
413
414 eppnt = elf_phdata; 370 eppnt = elf_phdata;
415 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 371 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
416 if (eppnt->p_type == PT_LOAD) { 372 if (eppnt->p_type == PT_LOAD) {
@@ -428,14 +384,9 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
428 vaddr = eppnt->p_vaddr; 384 vaddr = eppnt->p_vaddr;
429 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 385 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
430 elf_type |= MAP_FIXED; 386 elf_type |= MAP_FIXED;
431 else if (no_base && interp_elf_ex->e_type == ET_DYN)
432 load_addr = -vaddr;
433 387
434 map_addr = elf_map(interpreter, load_addr + vaddr, 388 map_addr = elf_map(interpreter, load_addr + vaddr,
435 eppnt, elf_prot, elf_type, total_size); 389 eppnt, elf_prot, elf_type);
436 total_size = 0;
437 if (!*interp_map_addr)
438 *interp_map_addr = map_addr;
439 error = map_addr; 390 error = map_addr;
440 if (BAD_ADDR(map_addr)) 391 if (BAD_ADDR(map_addr))
441 goto out_close; 392 goto out_close;
@@ -501,7 +452,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
501 goto out_close; 452 goto out_close;
502 } 453 }
503 454
504 error = load_addr; 455 *interp_load_addr = load_addr;
456 error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
505 457
506out_close: 458out_close:
507 kfree(elf_phdata); 459 kfree(elf_phdata);
@@ -598,8 +550,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
598 int elf_exec_fileno; 550 int elf_exec_fileno;
599 int retval, i; 551 int retval, i;
600 unsigned int size; 552 unsigned int size;
601 unsigned long elf_entry; 553 unsigned long elf_entry, interp_load_addr = 0;
602 unsigned long interp_load_addr = 0;
603 unsigned long start_code, end_code, start_data, end_data; 554 unsigned long start_code, end_code, start_data, end_data;
604 unsigned long reloc_func_desc = 0; 555 unsigned long reloc_func_desc = 0;
605 char passed_fileno[6]; 556 char passed_fileno[6];
@@ -863,7 +814,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
863 current->mm->start_stack = bprm->p; 814 current->mm->start_stack = bprm->p;
864 815
865 /* Now we do a little grungy work by mmaping the ELF image into 816 /* Now we do a little grungy work by mmaping the ELF image into
866 the correct location in memory. */ 817 the correct location in memory. At this point, we assume that
818 the image should be loaded at fixed address, not at a variable
819 address. */
867 for(i = 0, elf_ppnt = elf_phdata; 820 for(i = 0, elf_ppnt = elf_phdata;
868 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { 821 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
869 int elf_prot = 0, elf_flags; 822 int elf_prot = 0, elf_flags;
@@ -917,15 +870,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
917 * default mmap base, as well as whatever program they 870 * default mmap base, as well as whatever program they
918 * might try to exec. This is because the brk will 871 * might try to exec. This is because the brk will
919 * follow the loader, and is not movable. */ 872 * follow the loader, and is not movable. */
920#ifdef CONFIG_X86
921 load_bias = 0;
922#else
923 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); 873 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
924#endif
925 } 874 }
926 875
927 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 876 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
928 elf_prot, elf_flags,0); 877 elf_prot, elf_flags);
929 if (BAD_ADDR(error)) { 878 if (BAD_ADDR(error)) {
930 send_sig(SIGKILL, current, 0); 879 send_sig(SIGKILL, current, 0);
931 retval = IS_ERR((void *)error) ? 880 retval = IS_ERR((void *)error) ?
@@ -1001,25 +950,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1001 } 950 }
1002 951
1003 if (elf_interpreter) { 952 if (elf_interpreter) {
1004 if (interpreter_type == INTERPRETER_AOUT) { 953 if (interpreter_type == INTERPRETER_AOUT)
1005 elf_entry = load_aout_interp(&loc->interp_ex, 954 elf_entry = load_aout_interp(&loc->interp_ex,
1006 interpreter); 955 interpreter);
1007 } else { 956 else
1008 unsigned long uninitialized_var(interp_map_addr);
1009
1010 elf_entry = load_elf_interp(&loc->interp_elf_ex, 957 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1011 interpreter, 958 interpreter,
1012 &interp_map_addr, 959 &interp_load_addr);
1013 load_bias);
1014 if (!BAD_ADDR(elf_entry)) {
1015 /*
1016 * load_elf_interp() returns relocation
1017 * adjustment
1018 */
1019 interp_load_addr = elf_entry;
1020 elf_entry += loc->interp_elf_ex.e_entry;
1021 }
1022 }
1023 if (BAD_ADDR(elf_entry)) { 960 if (BAD_ADDR(elf_entry)) {
1024 force_sig(SIGSEGV, current); 961 force_sig(SIGSEGV, current);
1025 retval = IS_ERR((void *)elf_entry) ? 962 retval = IS_ERR((void *)elf_entry) ?
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 8e61236abf4a..f89ff083079b 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -86,7 +86,6 @@ const struct file_operations coda_dir_operations = {
86 .read = generic_read_dir, 86 .read = generic_read_dir,
87 .readdir = coda_readdir, 87 .readdir = coda_readdir,
88 .open = coda_open, 88 .open = coda_open,
89 .flush = coda_flush,
90 .release = coda_release, 89 .release = coda_release,
91 .fsync = coda_fsync, 90 .fsync = coda_fsync,
92}; 91};
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 7594962604c2..29137ff3ca67 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -25,10 +25,6 @@
25 25
26#include "coda_int.h" 26#include "coda_int.h"
27 27
28/* if CODA_STORE fails with EOPNOTSUPP, venus clearly doesn't support
29 * CODA_STORE/CODA_RELEASE and we fall back on using the CODA_CLOSE upcall */
30static int use_coda_close;
31
32static ssize_t 28static ssize_t
33coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos) 29coda_file_read(struct file *coda_file, char __user *buf, size_t count, loff_t *ppos)
34{ 30{
@@ -163,47 +159,6 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
163 return 0; 159 return 0;
164} 160}
165 161
166int coda_flush(struct file *coda_file, fl_owner_t id)
167{
168 unsigned short flags = coda_file->f_flags & ~O_EXCL;
169 unsigned short coda_flags = coda_flags_to_cflags(flags);
170 struct coda_file_info *cfi;
171 struct inode *coda_inode;
172 int err = 0, fcnt;
173
174 lock_kernel();
175
176 /* last close semantics */
177 fcnt = file_count(coda_file);
178 if (fcnt > 1)
179 goto out;
180
181 /* No need to make an upcall when we have not made any modifications
182 * to the file */
183 if ((coda_file->f_flags & O_ACCMODE) == O_RDONLY)
184 goto out;
185
186 if (use_coda_close)
187 goto out;
188
189 cfi = CODA_FTOC(coda_file);
190 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
191
192 coda_inode = coda_file->f_path.dentry->d_inode;
193
194 err = venus_store(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
195 coda_file->f_uid);
196
197 if (err == -EOPNOTSUPP) {
198 use_coda_close = 1;
199 err = 0;
200 }
201
202out:
203 unlock_kernel();
204 return err;
205}
206
207int coda_release(struct inode *coda_inode, struct file *coda_file) 162int coda_release(struct inode *coda_inode, struct file *coda_file)
208{ 163{
209 unsigned short flags = (coda_file->f_flags) & (~O_EXCL); 164 unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
@@ -215,21 +170,11 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
215 170
216 lock_kernel(); 171 lock_kernel();
217 172
218 if (!use_coda_close) {
219 err = venus_release(coda_inode->i_sb, coda_i2f(coda_inode),
220 coda_flags);
221 if (err == -EOPNOTSUPP) {
222 use_coda_close = 1;
223 err = 0;
224 }
225 }
226
227 cfi = CODA_FTOC(coda_file); 173 cfi = CODA_FTOC(coda_file);
228 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); 174 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
229 175
230 if (use_coda_close) 176 err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
231 err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode), 177 coda_flags, coda_file->f_uid);
232 coda_flags, coda_file->f_uid);
233 178
234 host_inode = cfi->cfi_container->f_path.dentry->d_inode; 179 host_inode = cfi->cfi_container->f_path.dentry->d_inode;
235 cii = ITOC(coda_inode); 180 cii = ITOC(coda_inode);
@@ -246,7 +191,10 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
246 coda_file->private_data = NULL; 191 coda_file->private_data = NULL;
247 192
248 unlock_kernel(); 193 unlock_kernel();
249 return err; 194
195 /* VFS fput ignores the return value from file_operations->release, so
196 * there is no use returning an error here */
197 return 0;
250} 198}
251 199
252int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync) 200int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
@@ -288,7 +236,6 @@ const struct file_operations coda_file_operations = {
288 .write = coda_file_write, 236 .write = coda_file_write,
289 .mmap = coda_file_mmap, 237 .mmap = coda_file_mmap,
290 .open = coda_open, 238 .open = coda_open,
291 .flush = coda_flush,
292 .release = coda_release, 239 .release = coda_release,
293 .fsync = coda_fsync, 240 .fsync = coda_fsync,
294 .splice_read = coda_file_splice_read, 241 .splice_read = coda_file_splice_read,
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index cd561d2e90b0..cdb4c07a7870 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -160,55 +160,8 @@ int venus_lookup(struct super_block *sb, struct CodaFid *fid,
160 return error; 160 return error;
161} 161}
162 162
163int venus_store(struct super_block *sb, struct CodaFid *fid, int flags,
164 vuid_t uid)
165{
166 union inputArgs *inp;
167 union outputArgs *outp;
168 int insize, outsize, error;
169#ifdef CONFIG_CODA_FS_OLD_API
170 struct coda_cred cred = { 0, };
171 cred.cr_fsuid = uid;
172#endif
173
174 insize = SIZE(store);
175 UPARG(CODA_STORE);
176
177#ifdef CONFIG_CODA_FS_OLD_API
178 memcpy(&(inp->ih.cred), &cred, sizeof(cred));
179#else
180 inp->ih.uid = uid;
181#endif
182
183 inp->coda_store.VFid = *fid;
184 inp->coda_store.flags = flags;
185
186 error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
187
188 CODA_FREE(inp, insize);
189 return error;
190}
191
192int venus_release(struct super_block *sb, struct CodaFid *fid, int flags)
193{
194 union inputArgs *inp;
195 union outputArgs *outp;
196 int insize, outsize, error;
197
198 insize = SIZE(release);
199 UPARG(CODA_RELEASE);
200
201 inp->coda_release.VFid = *fid;
202 inp->coda_release.flags = flags;
203
204 error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
205
206 CODA_FREE(inp, insize);
207 return error;
208}
209
210int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, 163int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
211 vuid_t uid) 164 vuid_t uid)
212{ 165{
213 union inputArgs *inp; 166 union inputArgs *inp;
214 union outputArgs *outp; 167 union outputArgs *outp;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 6ab8de40904c..2d295dda4c1d 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1503,9 +1503,9 @@ static void exp_flags(struct seq_file *m, int flag, int fsid,
1503 if (flag & NFSEXP_FSID) 1503 if (flag & NFSEXP_FSID)
1504 seq_printf(m, ",fsid=%d", fsid); 1504 seq_printf(m, ",fsid=%d", fsid);
1505 if (anonu != (uid_t)-2 && anonu != (0x10000-2)) 1505 if (anonu != (uid_t)-2 && anonu != (0x10000-2))
1506 seq_printf(m, ",sanonuid=%d", anonu); 1506 seq_printf(m, ",anonuid=%u", anonu);
1507 if (anong != (gid_t)-2 && anong != (0x10000-2)) 1507 if (anong != (gid_t)-2 && anong != (0x10000-2))
1508 seq_printf(m, ",sanongid=%d", anong); 1508 seq_printf(m, ",anongid=%u", anong);
1509 if (fsloc && fsloc->locations_count > 0) { 1509 if (fsloc && fsloc->locations_count > 0) {
1510 char *loctype = (fsloc->migrated) ? "refer" : "replicas"; 1510 char *loctype = (fsloc->migrated) ? "refer" : "replicas";
1511 int i; 1511 int i;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index f133afebed7a..bee251cb87c8 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -507,7 +507,8 @@ static int show_stat(struct seq_file *p, void *v)
507 } 507 }
508 seq_printf(p, "intr %llu", (unsigned long long)sum); 508 seq_printf(p, "intr %llu", (unsigned long long)sum);
509 509
510#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) 510#ifndef CONFIG_SMP
511 /* Touches too many cache lines on SMP setups */
511 for (i = 0; i < NR_IRQS; i++) 512 for (i = 0; i < NR_IRQS; i++)
512 seq_printf(p, " %u", per_irq_sum[i]); 513 seq_printf(p, " %u", per_irq_sum[i]);
513#endif 514#endif
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index ef48d094dd2b..276f7207a564 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -70,9 +70,9 @@ static inline int find_next_one_bit(void *addr, int size, int offset)
70 if (!size) 70 if (!size)
71 return result; 71 return result;
72 tmp = leBPL_to_cpup(p); 72 tmp = leBPL_to_cpup(p);
73 found_first: 73found_first:
74 tmp &= ~0UL >> (BITS_PER_LONG - size); 74 tmp &= ~0UL >> (BITS_PER_LONG - size);
75 found_middle: 75found_middle:
76 return result + ffz(~tmp); 76 return result + ffz(~tmp);
77} 77}
78 78
@@ -110,11 +110,11 @@ static int __load_block_bitmap(struct super_block *sb,
110 nr_groups); 110 nr_groups);
111 } 111 }
112 112
113 if (bitmap->s_block_bitmap[block_group]) 113 if (bitmap->s_block_bitmap[block_group]) {
114 return block_group; 114 return block_group;
115 else { 115 } else {
116 retval = 116 retval = read_block_bitmap(sb, bitmap, block_group,
117 read_block_bitmap(sb, bitmap, block_group, block_group); 117 block_group);
118 if (retval < 0) 118 if (retval < 0)
119 return retval; 119 return retval;
120 return block_group; 120 return block_group;
@@ -155,22 +155,16 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
155 155
156 mutex_lock(&sbi->s_alloc_mutex); 156 mutex_lock(&sbi->s_alloc_mutex);
157 if (bloc.logicalBlockNum < 0 || 157 if (bloc.logicalBlockNum < 0 ||
158 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, 158 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
159 bloc. 159 udf_debug("%d < %d || %d + %d > %d\n",
160 partitionReferenceNum)) 160 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161 { 161 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
162 udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
163 bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
164 bloc.
165 partitionReferenceNum));
166 goto error_return; 162 goto error_return;
167 } 163 }
168 164
169 block = 165 block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
170 bloc.logicalBlockNum + offset +
171 (sizeof(struct spaceBitmapDesc) << 3);
172 166
173 do_more: 167do_more:
174 overflow = 0; 168 overflow = 0;
175 block_group = block >> (sb->s_blocksize_bits + 3); 169 block_group = block >> (sb->s_blocksize_bits + 3);
176 bit = block % (sb->s_blocksize << 3); 170 bit = block % (sb->s_blocksize << 3);
@@ -190,18 +184,13 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
190 for (i = 0; i < count; i++) { 184 for (i = 0; i < count; i++) {
191 if (udf_set_bit(bit + i, bh->b_data)) { 185 if (udf_set_bit(bit + i, bh->b_data)) {
192 udf_debug("bit %ld already set\n", bit + i); 186 udf_debug("bit %ld already set\n", bit + i);
193 udf_debug("byte=%2x\n", 187 udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
194 ((char *)bh->b_data)[(bit + i) >> 3]);
195 } else { 188 } else {
196 if (inode) 189 if (inode)
197 DQUOT_FREE_BLOCK(inode, 1); 190 DQUOT_FREE_BLOCK(inode, 1);
198 if (UDF_SB_LVIDBH(sb)) { 191 if (UDF_SB_LVIDBH(sb)) {
199 UDF_SB_LVID(sb)-> 192 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
200 freeSpaceTable[UDF_SB_PARTITION(sb)] = 193 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
201 cpu_to_le32(le32_to_cpu
202 (UDF_SB_LVID(sb)->
203 freeSpaceTable[UDF_SB_PARTITION
204 (sb)]) + 1);
205 } 194 }
206 } 195 }
207 } 196 }
@@ -211,7 +200,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
211 count = overflow; 200 count = overflow;
212 goto do_more; 201 goto do_more;
213 } 202 }
214 error_return: 203error_return:
215 sb->s_dirt = 1; 204 sb->s_dirt = 1;
216 if (UDF_SB_LVIDBH(sb)) 205 if (UDF_SB_LVIDBH(sb))
217 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 206 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
@@ -238,7 +227,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
238 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) 227 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
239 block_count = UDF_SB_PARTLEN(sb, partition) - first_block; 228 block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
240 229
241 repeat: 230repeat:
242 nr_groups = (UDF_SB_PARTLEN(sb, partition) + 231 nr_groups = (UDF_SB_PARTLEN(sb, partition) +
243 (sizeof(struct spaceBitmapDesc) << 3) + 232 (sizeof(struct spaceBitmapDesc) << 3) +
244 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); 233 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
@@ -254,11 +243,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
254 bit = block % (sb->s_blocksize << 3); 243 bit = block % (sb->s_blocksize << 3);
255 244
256 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 245 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
257 if (!udf_test_bit(bit, bh->b_data)) 246 if (!udf_test_bit(bit, bh->b_data)) {
258 goto out; 247 goto out;
259 else if (DQUOT_PREALLOC_BLOCK(inode, 1)) 248 } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
260 goto out; 249 goto out;
261 else if (!udf_clear_bit(bit, bh->b_data)) { 250 } else if (!udf_clear_bit(bit, bh->b_data)) {
262 udf_debug("bit already cleared for block %d\n", bit); 251 udf_debug("bit already cleared for block %d\n", bit);
263 DQUOT_FREE_BLOCK(inode, 1); 252 DQUOT_FREE_BLOCK(inode, 1);
264 goto out; 253 goto out;
@@ -271,12 +260,10 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
271 mark_buffer_dirty(bh); 260 mark_buffer_dirty(bh);
272 if (block_count > 0) 261 if (block_count > 0)
273 goto repeat; 262 goto repeat;
274 out: 263out:
275 if (UDF_SB_LVIDBH(sb)) { 264 if (UDF_SB_LVIDBH(sb)) {
276 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 265 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
277 cpu_to_le32(le32_to_cpu 266 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
278 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
279 alloc_count);
280 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 267 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
281 } 268 }
282 sb->s_dirt = 1; 269 sb->s_dirt = 1;
@@ -299,7 +286,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
299 *err = -ENOSPC; 286 *err = -ENOSPC;
300 mutex_lock(&sbi->s_alloc_mutex); 287 mutex_lock(&sbi->s_alloc_mutex);
301 288
302 repeat: 289repeat:
303 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 290 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
304 goal = 0; 291 goal = 0;
305 292
@@ -312,31 +299,27 @@ static int udf_bitmap_new_block(struct super_block *sb,
312 if (bitmap_nr < 0) 299 if (bitmap_nr < 0)
313 goto error_return; 300 goto error_return;
314 bh = bitmap->s_block_bitmap[bitmap_nr]; 301 bh = bitmap->s_block_bitmap[bitmap_nr];
315 ptr = 302 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
316 memscan((char *)bh->b_data + group_start, 0xFF, 303 sb->s_blocksize - group_start);
317 sb->s_blocksize - group_start);
318 304
319 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 305 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
320 bit = block % (sb->s_blocksize << 3); 306 bit = block % (sb->s_blocksize << 3);
321 307 if (udf_test_bit(bit, bh->b_data))
322 if (udf_test_bit(bit, bh->b_data)) {
323 goto got_block; 308 goto got_block;
324 } 309
325 end_goal = (bit + 63) & ~63; 310 end_goal = (bit + 63) & ~63;
326 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); 311 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
327 if (bit < end_goal) 312 if (bit < end_goal)
328 goto got_block; 313 goto got_block;
329 ptr = 314
330 memscan((char *)bh->b_data + (bit >> 3), 0xFF, 315 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
331 sb->s_blocksize - ((bit + 7) >> 3));
332 newbit = (ptr - ((char *)bh->b_data)) << 3; 316 newbit = (ptr - ((char *)bh->b_data)) << 3;
333 if (newbit < sb->s_blocksize << 3) { 317 if (newbit < sb->s_blocksize << 3) {
334 bit = newbit; 318 bit = newbit;
335 goto search_back; 319 goto search_back;
336 } 320 }
337 newbit = 321
338 udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, 322 newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
339 bit);
340 if (newbit < sb->s_blocksize << 3) { 323 if (newbit < sb->s_blocksize << 3) {
341 bit = newbit; 324 bit = newbit;
342 goto got_block; 325 goto got_block;
@@ -354,18 +337,16 @@ static int udf_bitmap_new_block(struct super_block *sb,
354 goto error_return; 337 goto error_return;
355 bh = bitmap->s_block_bitmap[bitmap_nr]; 338 bh = bitmap->s_block_bitmap[bitmap_nr];
356 if (i < nr_groups) { 339 if (i < nr_groups) {
357 ptr = 340 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
358 memscan((char *)bh->b_data + group_start, 0xFF, 341 sb->s_blocksize - group_start);
359 sb->s_blocksize - group_start);
360 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 342 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
361 bit = (ptr - ((char *)bh->b_data)) << 3; 343 bit = (ptr - ((char *)bh->b_data)) << 3;
362 break; 344 break;
363 } 345 }
364 } else { 346 } else {
365 bit = 347 bit = udf_find_next_one_bit((char *)bh->b_data,
366 udf_find_next_one_bit((char *)bh->b_data, 348 sb->s_blocksize << 3,
367 sb->s_blocksize << 3, 349 group_start << 3);
368 group_start << 3);
369 if (bit < sb->s_blocksize << 3) 350 if (bit < sb->s_blocksize << 3)
370 break; 351 break;
371 } 352 }
@@ -377,20 +358,17 @@ static int udf_bitmap_new_block(struct super_block *sb,
377 if (bit < sb->s_blocksize << 3) 358 if (bit < sb->s_blocksize << 3)
378 goto search_back; 359 goto search_back;
379 else 360 else
380 bit = 361 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
381 udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
382 group_start << 3);
383 if (bit >= sb->s_blocksize << 3) { 362 if (bit >= sb->s_blocksize << 3) {
384 mutex_unlock(&sbi->s_alloc_mutex); 363 mutex_unlock(&sbi->s_alloc_mutex);
385 return 0; 364 return 0;
386 } 365 }
387 366
388 search_back: 367search_back:
389 for (i = 0; 368 for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--)
390 i < 7 && bit > (group_start << 3) 369 ; /* empty loop */
391 && udf_test_bit(bit - 1, bh->b_data); i++, bit--) ;
392 370
393 got_block: 371got_block:
394 372
395 /* 373 /*
396 * Check quota for allocation of this block. 374 * Check quota for allocation of this block.
@@ -402,7 +380,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
402 } 380 }
403 381
404 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 382 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
405 (sizeof(struct spaceBitmapDesc) << 3); 383 (sizeof(struct spaceBitmapDesc) << 3);
406 384
407 if (!udf_clear_bit(bit, bh->b_data)) { 385 if (!udf_clear_bit(bit, bh->b_data)) {
408 udf_debug("bit already cleared for block %d\n", bit); 386 udf_debug("bit already cleared for block %d\n", bit);
@@ -413,9 +391,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
413 391
414 if (UDF_SB_LVIDBH(sb)) { 392 if (UDF_SB_LVIDBH(sb)) {
415 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 393 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
416 cpu_to_le32(le32_to_cpu 394 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
417 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
418 1);
419 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 395 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
420 } 396 }
421 sb->s_dirt = 1; 397 sb->s_dirt = 1;
@@ -423,7 +399,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
423 *err = 0; 399 *err = 0;
424 return newblock; 400 return newblock;
425 401
426 error_return: 402error_return:
427 *err = -EIO; 403 *err = -EIO;
428 mutex_unlock(&sbi->s_alloc_mutex); 404 mutex_unlock(&sbi->s_alloc_mutex);
429 return 0; 405 return 0;
@@ -445,14 +421,10 @@ static void udf_table_free_blocks(struct super_block *sb,
445 421
446 mutex_lock(&sbi->s_alloc_mutex); 422 mutex_lock(&sbi->s_alloc_mutex);
447 if (bloc.logicalBlockNum < 0 || 423 if (bloc.logicalBlockNum < 0 ||
448 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, 424 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
449 bloc. 425 udf_debug("%d < %d || %d + %d > %d\n",
450 partitionReferenceNum)) 426 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
451 { 427 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
452 udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
453 bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
454 bloc.
455 partitionReferenceNum));
456 goto error_return; 428 goto error_return;
457 } 429 }
458 430
@@ -462,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
462 DQUOT_FREE_BLOCK(inode, count); 434 DQUOT_FREE_BLOCK(inode, count);
463 if (UDF_SB_LVIDBH(sb)) { 435 if (UDF_SB_LVIDBH(sb)) {
464 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 436 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
465 cpu_to_le32(le32_to_cpu 437 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
466 (UDF_SB_LVID(sb)->
467 freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
468 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 438 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
469 } 439 }
470 440
@@ -476,47 +446,28 @@ static void udf_table_free_blocks(struct super_block *sb,
476 epos.block = oepos.block = UDF_I_LOCATION(table); 446 epos.block = oepos.block = UDF_I_LOCATION(table);
477 epos.bh = oepos.bh = NULL; 447 epos.bh = oepos.bh = NULL;
478 448
479 while (count && (etype = 449 while (count &&
480 udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 450 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
481 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == 451 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) {
482 start)) { 452 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
483 if ((0x3FFFFFFF - elen) < 453 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
484 (count << sb->s_blocksize_bits)) { 454 start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
485 count -= 455 elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
486 ((0x3FFFFFFF -
487 elen) >> sb->s_blocksize_bits);
488 start +=
489 ((0x3FFFFFFF -
490 elen) >> sb->s_blocksize_bits);
491 elen =
492 (etype << 30) | (0x40000000 -
493 sb->s_blocksize);
494 } else { 456 } else {
495 elen = (etype << 30) | 457 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
496 (elen + (count << sb->s_blocksize_bits));
497 start += count; 458 start += count;
498 count = 0; 459 count = 0;
499 } 460 }
500 udf_write_aext(table, &oepos, eloc, elen, 1); 461 udf_write_aext(table, &oepos, eloc, elen, 1);
501 } else if (eloc.logicalBlockNum == (end + 1)) { 462 } else if (eloc.logicalBlockNum == (end + 1)) {
502 if ((0x3FFFFFFF - elen) < 463 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
503 (count << sb->s_blocksize_bits)) { 464 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
504 count -= 465 end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
505 ((0x3FFFFFFF - 466 eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
506 elen) >> sb->s_blocksize_bits); 467 elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
507 end -=
508 ((0x3FFFFFFF -
509 elen) >> sb->s_blocksize_bits);
510 eloc.logicalBlockNum -=
511 ((0x3FFFFFFF -
512 elen) >> sb->s_blocksize_bits);
513 elen =
514 (etype << 30) | (0x40000000 -
515 sb->s_blocksize);
516 } else { 468 } else {
517 eloc.logicalBlockNum = start; 469 eloc.logicalBlockNum = start;
518 elen = (etype << 30) | 470 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
519 (elen + (count << sb->s_blocksize_bits));
520 end -= count; 471 end -= count;
521 count = 0; 472 count = 0;
522 } 473 }
@@ -530,21 +481,23 @@ static void udf_table_free_blocks(struct super_block *sb,
530 get_bh(epos.bh); 481 get_bh(epos.bh);
531 oepos.bh = epos.bh; 482 oepos.bh = epos.bh;
532 oepos.offset = 0; 483 oepos.offset = 0;
533 } else 484 } else {
534 oepos.offset = epos.offset; 485 oepos.offset = epos.offset;
486 }
535 } 487 }
536 488
537 if (count) { 489 if (count) {
538 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate 490 /*
539 a new block, and since we hold the super block lock already 491 * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
540 very bad things would happen :) 492 * a new block, and since we hold the super block lock already
541 493 * very bad things would happen :)
542 We copy the behavior of udf_add_aext, but instead of 494 *
543 trying to allocate a new block close to the existing one, 495 * We copy the behavior of udf_add_aext, but instead of
544 we just steal a block from the extent we are trying to add. 496 * trying to allocate a new block close to the existing one,
545 497 * we just steal a block from the extent we are trying to add.
546 It would be nice if the blocks were close together, but it 498 *
547 isn't required. 499 * It would be nice if the blocks were close together, but it
500 * isn't required.
548 */ 501 */
549 502
550 int adsize; 503 int adsize;
@@ -553,13 +506,14 @@ static void udf_table_free_blocks(struct super_block *sb,
553 struct allocExtDesc *aed; 506 struct allocExtDesc *aed;
554 507
555 eloc.logicalBlockNum = start; 508 eloc.logicalBlockNum = start;
556 elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits); 509 elen = EXT_RECORDED_ALLOCATED |
510 (count << sb->s_blocksize_bits);
557 511
558 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 512 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
559 adsize = sizeof(short_ad); 513 adsize = sizeof(short_ad);
560 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 514 } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
561 adsize = sizeof(long_ad); 515 adsize = sizeof(long_ad);
562 else { 516 } else {
563 brelse(oepos.bh); 517 brelse(oepos.bh);
564 brelse(epos.bh); 518 brelse(epos.bh);
565 goto error_return; 519 goto error_return;
@@ -577,28 +531,21 @@ static void udf_table_free_blocks(struct super_block *sb,
577 eloc.logicalBlockNum++; 531 eloc.logicalBlockNum++;
578 elen -= sb->s_blocksize; 532 elen -= sb->s_blocksize;
579 533
580 if (!(epos.bh = udf_tread(sb, 534 if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) {
581 udf_get_lb_pblock(sb,
582 epos.block,
583 0)))) {
584 brelse(oepos.bh); 535 brelse(oepos.bh);
585 goto error_return; 536 goto error_return;
586 } 537 }
587 aed = (struct allocExtDesc *)(epos.bh->b_data); 538 aed = (struct allocExtDesc *)(epos.bh->b_data);
588 aed->previousAllocExtLocation = 539 aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
589 cpu_to_le32(oepos.block.logicalBlockNum);
590 if (epos.offset + adsize > sb->s_blocksize) { 540 if (epos.offset + adsize > sb->s_blocksize) {
591 loffset = epos.offset; 541 loffset = epos.offset;
592 aed->lengthAllocDescs = cpu_to_le32(adsize); 542 aed->lengthAllocDescs = cpu_to_le32(adsize);
593 sptr = UDF_I_DATA(inode) + epos.offset - 543 sptr = UDF_I_DATA(inode) + epos.offset -
594 udf_file_entry_alloc_offset(inode) + 544 udf_file_entry_alloc_offset(inode) +
595 UDF_I_LENEATTR(inode) - adsize; 545 UDF_I_LENEATTR(inode) - adsize;
596 dptr = 546 dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
597 epos.bh->b_data +
598 sizeof(struct allocExtDesc);
599 memcpy(dptr, sptr, adsize); 547 memcpy(dptr, sptr, adsize);
600 epos.offset = 548 epos.offset = sizeof(struct allocExtDesc) + adsize;
601 sizeof(struct allocExtDesc) + adsize;
602 } else { 549 } else {
603 loffset = epos.offset + adsize; 550 loffset = epos.offset + adsize;
604 aed->lengthAllocDescs = cpu_to_le32(0); 551 aed->lengthAllocDescs = cpu_to_le32(0);
@@ -606,60 +553,46 @@ static void udf_table_free_blocks(struct super_block *sb,
606 epos.offset = sizeof(struct allocExtDesc); 553 epos.offset = sizeof(struct allocExtDesc);
607 554
608 if (oepos.bh) { 555 if (oepos.bh) {
609 aed = 556 aed = (struct allocExtDesc *)oepos.bh->b_data;
610 (struct allocExtDesc *)oepos.bh->
611 b_data;
612 aed->lengthAllocDescs = 557 aed->lengthAllocDescs =
613 cpu_to_le32(le32_to_cpu 558 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
614 (aed->
615 lengthAllocDescs) +
616 adsize);
617 } else { 559 } else {
618 UDF_I_LENALLOC(table) += adsize; 560 UDF_I_LENALLOC(table) += adsize;
619 mark_inode_dirty(table); 561 mark_inode_dirty(table);
620 } 562 }
621 } 563 }
622 if (UDF_SB_UDFREV(sb) >= 0x0200) 564 if (UDF_SB_UDFREV(sb) >= 0x0200)
623 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 565 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
624 1, epos.block.logicalBlockNum, 566 epos.block.logicalBlockNum, sizeof(tag));
625 sizeof(tag));
626 else 567 else
627 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 568 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
628 1, epos.block.logicalBlockNum, 569 epos.block.logicalBlockNum, sizeof(tag));
629 sizeof(tag)); 570
630 switch (UDF_I_ALLOCTYPE(table)) { 571 switch (UDF_I_ALLOCTYPE(table)) {
631 case ICBTAG_FLAG_AD_SHORT: 572 case ICBTAG_FLAG_AD_SHORT:
632 { 573 sad = (short_ad *)sptr;
633 sad = (short_ad *) sptr; 574 sad->extLength = cpu_to_le32(
634 sad->extLength = 575 EXT_NEXT_EXTENT_ALLOCDECS |
635 cpu_to_le32 576 sb->s_blocksize);
636 (EXT_NEXT_EXTENT_ALLOCDECS | sb-> 577 sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
637 s_blocksize);
638 sad->extPosition =
639 cpu_to_le32(epos.block.
640 logicalBlockNum);
641 break; 578 break;
642 } 579 case ICBTAG_FLAG_AD_LONG:
643 case ICBTAG_FLAG_AD_LONG: 580 lad = (long_ad *)sptr;
644 { 581 lad->extLength = cpu_to_le32(
645 lad = (long_ad *) sptr; 582 EXT_NEXT_EXTENT_ALLOCDECS |
646 lad->extLength = 583 sb->s_blocksize);
647 cpu_to_le32 584 lad->extLocation = cpu_to_lelb(epos.block);
648 (EXT_NEXT_EXTENT_ALLOCDECS | sb->
649 s_blocksize);
650 lad->extLocation =
651 cpu_to_lelb(epos.block);
652 break; 585 break;
653 }
654 } 586 }
655 if (oepos.bh) { 587 if (oepos.bh) {
656 udf_update_tag(oepos.bh->b_data, loffset); 588 udf_update_tag(oepos.bh->b_data, loffset);
657 mark_buffer_dirty(oepos.bh); 589 mark_buffer_dirty(oepos.bh);
658 } else 590 } else {
659 mark_inode_dirty(table); 591 mark_inode_dirty(table);
592 }
660 } 593 }
661 594
662 if (elen) { /* It's possible that stealing the block emptied the extent */ 595 if (elen) { /* It's possible that stealing the block emptied the extent */
663 udf_write_aext(table, &epos, eloc, elen, 1); 596 udf_write_aext(table, &epos, eloc, elen, 1);
664 597
665 if (!epos.bh) { 598 if (!epos.bh) {
@@ -668,9 +601,7 @@ static void udf_table_free_blocks(struct super_block *sb,
668 } else { 601 } else {
669 aed = (struct allocExtDesc *)epos.bh->b_data; 602 aed = (struct allocExtDesc *)epos.bh->b_data;
670 aed->lengthAllocDescs = 603 aed->lengthAllocDescs =
671 cpu_to_le32(le32_to_cpu 604 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
672 (aed->lengthAllocDescs) +
673 adsize);
674 udf_update_tag(epos.bh->b_data, epos.offset); 605 udf_update_tag(epos.bh->b_data, epos.offset);
675 mark_buffer_dirty(epos.bh); 606 mark_buffer_dirty(epos.bh);
676 } 607 }
@@ -680,7 +611,7 @@ static void udf_table_free_blocks(struct super_block *sb,
680 brelse(epos.bh); 611 brelse(epos.bh);
681 brelse(oepos.bh); 612 brelse(oepos.bh);
682 613
683 error_return: 614error_return:
684 sb->s_dirt = 1; 615 sb->s_dirt = 1;
685 mutex_unlock(&sbi->s_alloc_mutex); 616 mutex_unlock(&sbi->s_alloc_mutex);
686 return; 617 return;
@@ -714,47 +645,36 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
714 epos.bh = NULL; 645 epos.bh = NULL;
715 eloc.logicalBlockNum = 0xFFFFFFFF; 646 eloc.logicalBlockNum = 0xFFFFFFFF;
716 647
717 while (first_block != eloc.logicalBlockNum && (etype = 648 while (first_block != eloc.logicalBlockNum &&
718 udf_next_aext(table, 649 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
719 &epos,
720 &eloc,
721 &elen,
722 1)) !=
723 -1) {
724 udf_debug("eloc=%d, elen=%d, first_block=%d\n", 650 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
725 eloc.logicalBlockNum, elen, first_block); 651 eloc.logicalBlockNum, elen, first_block);
726 ; /* empty loop body */ 652 ; /* empty loop body */
727 } 653 }
728 654
729 if (first_block == eloc.logicalBlockNum) { 655 if (first_block == eloc.logicalBlockNum) {
730 epos.offset -= adsize; 656 epos.offset -= adsize;
731 657
732 alloc_count = (elen >> sb->s_blocksize_bits); 658 alloc_count = (elen >> sb->s_blocksize_bits);
733 if (inode 659 if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) {
734 && DQUOT_PREALLOC_BLOCK(inode,
735 alloc_count >
736 block_count ? block_count :
737 alloc_count))
738 alloc_count = 0; 660 alloc_count = 0;
739 else if (alloc_count > block_count) { 661 } else if (alloc_count > block_count) {
740 alloc_count = block_count; 662 alloc_count = block_count;
741 eloc.logicalBlockNum += alloc_count; 663 eloc.logicalBlockNum += alloc_count;
742 elen -= (alloc_count << sb->s_blocksize_bits); 664 elen -= (alloc_count << sb->s_blocksize_bits);
743 udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 665 udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
744 1); 666 } else {
745 } else 667 udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
746 udf_delete_aext(table, epos, eloc, 668 }
747 (etype << 30) | elen); 669 } else {
748 } else
749 alloc_count = 0; 670 alloc_count = 0;
671 }
750 672
751 brelse(epos.bh); 673 brelse(epos.bh);
752 674
753 if (alloc_count && UDF_SB_LVIDBH(sb)) { 675 if (alloc_count && UDF_SB_LVIDBH(sb)) {
754 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 676 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
755 cpu_to_le32(le32_to_cpu 677 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
756 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
757 alloc_count);
758 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 678 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
759 sb->s_dirt = 1; 679 sb->s_dirt = 1;
760 } 680 }
@@ -797,18 +717,17 @@ static int udf_table_new_block(struct super_block *sb,
797 epos.block = UDF_I_LOCATION(table); 717 epos.block = UDF_I_LOCATION(table);
798 epos.bh = goal_epos.bh = NULL; 718 epos.bh = goal_epos.bh = NULL;
799 719
800 while (spread && (etype = 720 while (spread &&
801 udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 721 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
802 if (goal >= eloc.logicalBlockNum) { 722 if (goal >= eloc.logicalBlockNum) {
803 if (goal < 723 if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
804 eloc.logicalBlockNum +
805 (elen >> sb->s_blocksize_bits))
806 nspread = 0; 724 nspread = 0;
807 else 725 else
808 nspread = goal - eloc.logicalBlockNum - 726 nspread = goal - eloc.logicalBlockNum -
809 (elen >> sb->s_blocksize_bits); 727 (elen >> sb->s_blocksize_bits);
810 } else 728 } else {
811 nspread = eloc.logicalBlockNum - goal; 729 nspread = eloc.logicalBlockNum - goal;
730 }
812 731
813 if (nspread < spread) { 732 if (nspread < spread) {
814 spread = nspread; 733 spread = nspread;
@@ -856,9 +775,7 @@ static int udf_table_new_block(struct super_block *sb,
856 775
857 if (UDF_SB_LVIDBH(sb)) { 776 if (UDF_SB_LVIDBH(sb)) {
858 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 777 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
859 cpu_to_le32(le32_to_cpu 778 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
860 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
861 1);
862 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 779 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
863 } 780 }
864 781
@@ -877,27 +794,23 @@ inline void udf_free_blocks(struct super_block *sb,
877 794
878 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 795 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
879 return udf_bitmap_free_blocks(sb, inode, 796 return udf_bitmap_free_blocks(sb, inode,
880 UDF_SB_PARTMAPS(sb)[partition]. 797 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
881 s_uspace.s_bitmap, bloc, offset, 798 bloc, offset, count);
882 count); 799 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
883 } else if (UDF_SB_PARTFLAGS(sb, partition) &
884 UDF_PART_FLAG_UNALLOC_TABLE) {
885 return udf_table_free_blocks(sb, inode, 800 return udf_table_free_blocks(sb, inode,
886 UDF_SB_PARTMAPS(sb)[partition]. 801 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
887 s_uspace.s_table, bloc, offset, 802 bloc, offset, count);
888 count);
889 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 803 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
890 return udf_bitmap_free_blocks(sb, inode, 804 return udf_bitmap_free_blocks(sb, inode,
891 UDF_SB_PARTMAPS(sb)[partition]. 805 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
892 s_fspace.s_bitmap, bloc, offset, 806 bloc, offset, count);
893 count);
894 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 807 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
895 return udf_table_free_blocks(sb, inode, 808 return udf_table_free_blocks(sb, inode,
896 UDF_SB_PARTMAPS(sb)[partition]. 809 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
897 s_fspace.s_table, bloc, offset, 810 bloc, offset, count);
898 count); 811 } else {
899 } else
900 return; 812 return;
813 }
901} 814}
902 815
903inline int udf_prealloc_blocks(struct super_block *sb, 816inline int udf_prealloc_blocks(struct super_block *sb,
@@ -907,29 +820,23 @@ inline int udf_prealloc_blocks(struct super_block *sb,
907{ 820{
908 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 821 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
909 return udf_bitmap_prealloc_blocks(sb, inode, 822 return udf_bitmap_prealloc_blocks(sb, inode,
910 UDF_SB_PARTMAPS(sb) 823 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
911 [partition].s_uspace.s_bitmap, 824 partition, first_block, block_count);
912 partition, first_block, 825 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
913 block_count);
914 } else if (UDF_SB_PARTFLAGS(sb, partition) &
915 UDF_PART_FLAG_UNALLOC_TABLE) {
916 return udf_table_prealloc_blocks(sb, inode, 826 return udf_table_prealloc_blocks(sb, inode,
917 UDF_SB_PARTMAPS(sb)[partition]. 827 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
918 s_uspace.s_table, partition, 828 partition, first_block, block_count);
919 first_block, block_count);
920 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 829 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
921 return udf_bitmap_prealloc_blocks(sb, inode, 830 return udf_bitmap_prealloc_blocks(sb, inode,
922 UDF_SB_PARTMAPS(sb) 831 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
923 [partition].s_fspace.s_bitmap, 832 partition, first_block, block_count);
924 partition, first_block,
925 block_count);
926 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 833 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
927 return udf_table_prealloc_blocks(sb, inode, 834 return udf_table_prealloc_blocks(sb, inode,
928 UDF_SB_PARTMAPS(sb)[partition]. 835 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
929 s_fspace.s_table, partition, 836 partition, first_block, block_count);
930 first_block, block_count); 837 } else {
931 } else
932 return 0; 838 return 0;
839 }
933} 840}
934 841
935inline int udf_new_block(struct super_block *sb, 842inline int udf_new_block(struct super_block *sb,
@@ -940,26 +847,21 @@ inline int udf_new_block(struct super_block *sb,
940 847
941 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 848 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
942 ret = udf_bitmap_new_block(sb, inode, 849 ret = udf_bitmap_new_block(sb, inode,
943 UDF_SB_PARTMAPS(sb)[partition]. 850 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
944 s_uspace.s_bitmap, partition, goal, 851 partition, goal, err);
945 err);
946 return ret; 852 return ret;
947 } else if (UDF_SB_PARTFLAGS(sb, partition) & 853 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
948 UDF_PART_FLAG_UNALLOC_TABLE) {
949 return udf_table_new_block(sb, inode, 854 return udf_table_new_block(sb, inode,
950 UDF_SB_PARTMAPS(sb)[partition]. 855 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
951 s_uspace.s_table, partition, goal, 856 partition, goal, err);
952 err);
953 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 857 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
954 return udf_bitmap_new_block(sb, inode, 858 return udf_bitmap_new_block(sb, inode,
955 UDF_SB_PARTMAPS(sb)[partition]. 859 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
956 s_fspace.s_bitmap, partition, goal, 860 partition, goal, err);
957 err);
958 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 861 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
959 return udf_table_new_block(sb, inode, 862 return udf_table_new_block(sb, inode,
960 UDF_SB_PARTMAPS(sb)[partition]. 863 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
961 s_fspace.s_table, partition, goal, 864 partition, goal, err);
962 err);
963 } else { 865 } else {
964 *err = -EIO; 866 *err = -EIO;
965 return 0; 867 return 0;
diff --git a/fs/udf/crc.c b/fs/udf/crc.c
index ae3d49790941..85aaee5fab26 100644
--- a/fs/udf/crc.c
+++ b/fs/udf/crc.c
@@ -111,7 +111,7 @@ int main(void)
111 return 0; 111 return 0;
112} 112}
113 113
114#endif /* defined(TEST) */ 114#endif /* defined(TEST) */
115 115
116/****************************************************************************/ 116/****************************************************************************/
117#if defined(GENERATE) 117#if defined(GENERATE)
@@ -169,4 +169,4 @@ int main(int argc, char **argv)
169 return 0; 169 return 0;
170} 170}
171 171
172#endif /* defined(GENERATE) */ 172#endif /* defined(GENERATE) */
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 79bab9fe120c..9e3b9f97ddbc 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -43,10 +43,10 @@ static int do_udf_readdir(struct inode *, struct file *, filldir_t, void *);
43/* readdir and lookup functions */ 43/* readdir and lookup functions */
44 44
45const struct file_operations udf_dir_operations = { 45const struct file_operations udf_dir_operations = {
46 .read = generic_read_dir, 46 .read = generic_read_dir,
47 .readdir = udf_readdir, 47 .readdir = udf_readdir,
48 .ioctl = udf_ioctl, 48 .ioctl = udf_ioctl,
49 .fsync = udf_fsync_file, 49 .fsync = udf_fsync_file,
50}; 50};
51 51
52/* 52/*
@@ -83,8 +83,7 @@ int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
83 lock_kernel(); 83 lock_kernel();
84 84
85 if (filp->f_pos == 0) { 85 if (filp->f_pos == 0) {
86 if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 86 if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
87 0) {
88 unlock_kernel(); 87 unlock_kernel();
89 return 0; 88 return 0;
90 } 89 }
@@ -93,7 +92,7 @@ int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
93 92
94 result = do_udf_readdir(dir, filp, filldir, dirent); 93 result = do_udf_readdir(dir, filp, filldir, dirent);
95 unlock_kernel(); 94 unlock_kernel();
96 return result; 95 return result;
97} 96}
98 97
99static int 98static int
@@ -125,21 +124,20 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
125 if (nf_pos == 0) 124 if (nf_pos == 0)
126 nf_pos = (udf_ext0_offset(dir) >> 2); 125 nf_pos = (udf_ext0_offset(dir) >> 2);
127 126
128 fibh.soffset = fibh.eoffset = 127 fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
129 (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 128 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
130 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
131 fibh.sbh = fibh.ebh = NULL; 129 fibh.sbh = fibh.ebh = NULL;
132 else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2), 130 } else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
133 &epos, &eloc, &elen, 131 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
134 &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
135 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 132 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
136 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 133 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
137 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 134 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
138 epos.offset -= sizeof(short_ad); 135 epos.offset -= sizeof(short_ad);
139 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 136 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
140 epos.offset -= sizeof(long_ad); 137 epos.offset -= sizeof(long_ad);
141 } else 138 } else {
142 offset = 0; 139 offset = 0;
140 }
143 141
144 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { 142 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
145 brelse(epos.bh); 143 brelse(epos.bh);
@@ -149,15 +147,11 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
149 if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) { 147 if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
150 i = 16 >> (dir->i_sb->s_blocksize_bits - 9); 148 i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
151 if (i + offset > (elen >> dir->i_sb->s_blocksize_bits)) 149 if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
152 i = (elen >> dir->i_sb->s_blocksize_bits) - 150 i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
153 offset;
154 for (num = 0; i > 0; i--) { 151 for (num = 0; i > 0; i--) {
155 block = 152 block = udf_get_lb_pblock(dir->i_sb, eloc, offset + i);
156 udf_get_lb_pblock(dir->i_sb, eloc,
157 offset + i);
158 tmp = udf_tgetblk(dir->i_sb, block); 153 tmp = udf_tgetblk(dir->i_sb, block);
159 if (tmp && !buffer_uptodate(tmp) 154 if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
160 && !buffer_locked(tmp))
161 bha[num++] = tmp; 155 bha[num++] = tmp;
162 else 156 else
163 brelse(tmp); 157 brelse(tmp);
@@ -178,7 +172,6 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
178 172
179 fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, 173 fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
180 &elen, &offset); 174 &elen, &offset);
181
182 if (!fi) { 175 if (!fi) {
183 if (fibh.sbh != fibh.ebh) 176 if (fibh.sbh != fibh.ebh)
184 brelse(fibh.ebh); 177 brelse(fibh.ebh);
@@ -190,19 +183,16 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
190 liu = le16_to_cpu(cfi.lengthOfImpUse); 183 liu = le16_to_cpu(cfi.lengthOfImpUse);
191 lfi = cfi.lengthFileIdent; 184 lfi = cfi.lengthFileIdent;
192 185
193 if (fibh.sbh == fibh.ebh) 186 if (fibh.sbh == fibh.ebh) {
194 nameptr = fi->fileIdent + liu; 187 nameptr = fi->fileIdent + liu;
195 else { 188 } else {
196 int poffset; /* Unpaded ending offset */ 189 int poffset; /* Unpaded ending offset */
197 190
198 poffset = 191 poffset = fibh.soffset + sizeof(struct fileIdentDesc) + liu + lfi;
199 fibh.soffset + sizeof(struct fileIdentDesc) + liu +
200 lfi;
201 192
202 if (poffset >= lfi) 193 if (poffset >= lfi) {
203 nameptr = 194 nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
204 (char *)(fibh.ebh->b_data + poffset - lfi); 195 } else {
205 else {
206 nameptr = fname; 196 nameptr = fname;
207 memcpy(nameptr, fi->fileIdent + liu, 197 memcpy(nameptr, fi->fileIdent + liu,
208 lfi - poffset); 198 lfi - poffset);
@@ -235,17 +225,15 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir,
235 } 225 }
236 226
237 if (flen) { 227 if (flen) {
238 if (filldir 228 if (filldir(dirent, fname, flen, filp->f_pos, iblock, dt_type) < 0) {
239 (dirent, fname, flen, filp->f_pos, iblock,
240 dt_type) < 0) {
241 if (fibh.sbh != fibh.ebh) 229 if (fibh.sbh != fibh.ebh)
242 brelse(fibh.ebh); 230 brelse(fibh.ebh);
243 brelse(fibh.sbh); 231 brelse(fibh.sbh);
244 brelse(epos.bh); 232 brelse(epos.bh);
245 return 0; 233 return 0;
246 } 234 }
247 } 235 }
248 } /* end while */ 236 } /* end while */
249 237
250 filp->f_pos = nf_pos + 1; 238 filp->f_pos = nf_pos + 1;
251 239
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 8adc77c1d579..ff8c08fd7bf5 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -31,7 +31,7 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
31 31
32 *error = 0; 32 *error = 0;
33 33
34 ad = (uint8_t *) (*bh)->b_data + *offset; 34 ad = (uint8_t *)(*bh)->b_data + *offset;
35 *offset += ad_size; 35 *offset += ad_size;
36 36
37 if (!ad) { 37 if (!ad) {
@@ -51,7 +51,7 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
51 ad = tmpad; 51 ad = tmpad;
52 52
53 remainder = dir->i_sb->s_blocksize - loffset; 53 remainder = dir->i_sb->s_blocksize - loffset;
54 memcpy((uint8_t *) ad, (*bh)->b_data + loffset, remainder); 54 memcpy((uint8_t *)ad, (*bh)->b_data + loffset, remainder);
55 55
56 brelse(*bh); 56 brelse(*bh);
57 block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); 57 block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
@@ -60,10 +60,10 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad,
60 if (!((*bh) = udf_tread(dir->i_sb, block))) 60 if (!((*bh) = udf_tread(dir->i_sb, block)))
61 return NULL; 61 return NULL;
62 62
63 memcpy((uint8_t *) ad + remainder, (*bh)->b_data, 63 memcpy((uint8_t *)ad + remainder, (*bh)->b_data, ad_size - remainder);
64 ad_size - remainder);
65 *offset = ad_size - remainder; 64 *offset = ad_size - remainder;
66 } 65 }
66
67 return ad; 67 return ad;
68} 68}
69#endif 69#endif
@@ -86,15 +86,13 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
86 (UDF_I_EFE(dir) ? 86 (UDF_I_EFE(dir) ?
87 sizeof(struct extendedFileEntry) : 87 sizeof(struct extendedFileEntry) :
88 sizeof(struct fileEntry)), 88 sizeof(struct fileEntry)),
89 dir->i_sb->s_blocksize, 89 dir->i_sb->s_blocksize, &(fibh->eoffset));
90 &(fibh->eoffset));
91
92 if (!fi) 90 if (!fi)
93 return NULL; 91 return NULL;
94 92
95 *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2); 93 *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
96 94
97 memcpy((uint8_t *) cfi, (uint8_t *) fi, 95 memcpy((uint8_t *)cfi, (uint8_t *)fi,
98 sizeof(struct fileIdentDesc)); 96 sizeof(struct fileIdentDesc));
99 97
100 return fi; 98 return fi;
@@ -121,21 +119,14 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
121 return NULL; 119 return NULL;
122 fibh->soffset = fibh->eoffset = 0; 120 fibh->soffset = fibh->eoffset = 0;
123 121
124 if (! 122 if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
125 (*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1)))
126 {
127 i = 16 >> (dir->i_sb->s_blocksize_bits - 9); 123 i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
128 if (i + *offset > 124 if (i + *offset > (*elen >> dir->i_sb->s_blocksize_bits))
129 (*elen >> dir->i_sb->s_blocksize_bits)) 125 i = (*elen >> dir->i_sb->s_blocksize_bits)-*offset;
130 i = (*elen >> dir->i_sb->s_blocksize_bits) -
131 *offset;
132 for (num = 0; i > 0; i--) { 126 for (num = 0; i > 0; i--) {
133 block = 127 block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset + i);
134 udf_get_lb_pblock(dir->i_sb, *eloc,
135 *offset + i);
136 tmp = udf_tgetblk(dir->i_sb, block); 128 tmp = udf_tgetblk(dir->i_sb, block);
137 if (tmp && !buffer_uptodate(tmp) 129 if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
138 && !buffer_locked(tmp))
139 bha[num++] = tmp; 130 bha[num++] = tmp;
140 else 131 else
141 brelse(tmp); 132 brelse(tmp);
@@ -160,7 +151,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
160 *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2); 151 *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2);
161 152
162 if (fibh->eoffset <= dir->i_sb->s_blocksize) { 153 if (fibh->eoffset <= dir->i_sb->s_blocksize) {
163 memcpy((uint8_t *) cfi, (uint8_t *) fi, 154 memcpy((uint8_t *)cfi, (uint8_t *)fi,
164 sizeof(struct fileIdentDesc)); 155 sizeof(struct fileIdentDesc));
165 } else if (fibh->eoffset > dir->i_sb->s_blocksize) { 156 } else if (fibh->eoffset > dir->i_sb->s_blocksize) {
166 int lextoffset = epos->offset; 157 int lextoffset = epos->offset;
@@ -187,21 +178,17 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos,
187 if (sizeof(struct fileIdentDesc) > -fibh->soffset) { 178 if (sizeof(struct fileIdentDesc) > -fibh->soffset) {
188 int fi_len; 179 int fi_len;
189 180
190 memcpy((uint8_t *) cfi, (uint8_t *) fi, -fibh->soffset); 181 memcpy((uint8_t *)cfi, (uint8_t *)fi, -fibh->soffset);
191 memcpy((uint8_t *) cfi - fibh->soffset, 182 memcpy((uint8_t *)cfi - fibh->soffset, fibh->ebh->b_data,
192 fibh->ebh->b_data,
193 sizeof(struct fileIdentDesc) + fibh->soffset); 183 sizeof(struct fileIdentDesc) + fibh->soffset);
194 184
195 fi_len = 185 fi_len = (sizeof(struct fileIdentDesc) + cfi->lengthFileIdent +
196 (sizeof(struct fileIdentDesc) + 186 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
197 cfi->lengthFileIdent +
198 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
199 187
200 *nf_pos += 188 *nf_pos += ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2);
201 ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2);
202 fibh->eoffset = fibh->soffset + fi_len; 189 fibh->eoffset = fibh->soffset + fi_len;
203 } else { 190 } else {
204 memcpy((uint8_t *) cfi, (uint8_t *) fi, 191 memcpy((uint8_t *)cfi, (uint8_t *)fi,
205 sizeof(struct fileIdentDesc)); 192 sizeof(struct fileIdentDesc));
206 } 193 }
207 } 194 }
@@ -237,9 +224,10 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
237 } 224 }
238 if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) { 225 if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) {
239 lengthThisIdent = sizeof(struct fileIdentDesc); 226 lengthThisIdent = sizeof(struct fileIdentDesc);
240 } else 227 } else {
241 lengthThisIdent = sizeof(struct fileIdentDesc) + 228 lengthThisIdent = sizeof(struct fileIdentDesc) +
242 fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse); 229 fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse);
230 }
243 231
244 /* we need to figure padding, too! */ 232 /* we need to figure padding, too! */
245 padlen = lengthThisIdent % UDF_NAME_PAD; 233 padlen = lengthThisIdent % UDF_NAME_PAD;
@@ -270,22 +258,20 @@ static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
270 return NULL; 258 return NULL;
271 } 259 }
272 260
273 ptr = 261 ptr = (uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr);
274 (uint8_t *) (fe->extendedAttr) +
275 le32_to_cpu(fe->lengthExtendedAttr);
276 262
277 if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) { 263 if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) {
278 ptr += *offset; 264 ptr += *offset;
279 } 265 }
280 266
281 ext = (extent_ad *) ptr; 267 ext = (extent_ad *)ptr;
282 268
283 *offset = *offset + sizeof(extent_ad); 269 *offset = *offset + sizeof(extent_ad);
284 return ext; 270 return ext;
285} 271}
286#endif 272#endif
287 273
288short_ad *udf_get_fileshortad(uint8_t * ptr, int maxoffset, int *offset, 274short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset,
289 int inc) 275 int inc)
290{ 276{
291 short_ad *sa; 277 short_ad *sa;
@@ -297,7 +283,7 @@ short_ad *udf_get_fileshortad(uint8_t * ptr, int maxoffset, int *offset,
297 283
298 if ((*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset)) 284 if ((*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset))
299 return NULL; 285 return NULL;
300 else if ((sa = (short_ad *) ptr)->extLength == 0) 286 else if ((sa = (short_ad *)ptr)->extLength == 0)
301 return NULL; 287 return NULL;
302 288
303 if (inc) 289 if (inc)
@@ -305,7 +291,7 @@ short_ad *udf_get_fileshortad(uint8_t * ptr, int maxoffset, int *offset,
305 return sa; 291 return sa;
306} 292}
307 293
308long_ad *udf_get_filelongad(uint8_t * ptr, int maxoffset, int *offset, int inc) 294long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, int *offset, int inc)
309{ 295{
310 long_ad *la; 296 long_ad *la;
311 297
@@ -316,7 +302,7 @@ long_ad *udf_get_filelongad(uint8_t * ptr, int maxoffset, int *offset, int inc)
316 302
317 if ((*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset)) 303 if ((*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset))
318 return NULL; 304 return NULL;
319 else if ((la = (long_ad *) ptr)->extLength == 0) 305 else if ((la = (long_ad *)ptr)->extLength == 0)
320 return NULL; 306 return NULL;
321 307
322 if (inc) 308 if (inc)
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index 294ce2daa03a..56387711589b 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -39,8 +39,8 @@
39 39
40/* Character set specification (ECMA 167r3 1/7.2.1) */ 40/* Character set specification (ECMA 167r3 1/7.2.1) */
41typedef struct { 41typedef struct {
42 uint8_t charSetType; 42 uint8_t charSetType;
43 uint8_t charSetInfo[63]; 43 uint8_t charSetInfo[63];
44} __attribute__ ((packed)) charspec; 44} __attribute__ ((packed)) charspec;
45 45
46/* Character Set Type (ECMA 167r3 1/7.2.1.1) */ 46/* Character Set Type (ECMA 167r3 1/7.2.1.1) */
@@ -54,33 +54,33 @@ typedef struct {
54#define CHARSPEC_TYPE_CS7 0x07 /* (1/7.2.9) */ 54#define CHARSPEC_TYPE_CS7 0x07 /* (1/7.2.9) */
55#define CHARSPEC_TYPE_CS8 0x08 /* (1/7.2.10) */ 55#define CHARSPEC_TYPE_CS8 0x08 /* (1/7.2.10) */
56 56
57typedef uint8_t dstring; 57typedef uint8_t dstring;
58 58
59/* Timestamp (ECMA 167r3 1/7.3) */ 59/* Timestamp (ECMA 167r3 1/7.3) */
60typedef struct { 60typedef struct {
61 __le16 typeAndTimezone; 61 __le16 typeAndTimezone;
62 __le16 year; 62 __le16 year;
63 uint8_t month; 63 uint8_t month;
64 uint8_t day; 64 uint8_t day;
65 uint8_t hour; 65 uint8_t hour;
66 uint8_t minute; 66 uint8_t minute;
67 uint8_t second; 67 uint8_t second;
68 uint8_t centiseconds; 68 uint8_t centiseconds;
69 uint8_t hundredsOfMicroseconds; 69 uint8_t hundredsOfMicroseconds;
70 uint8_t microseconds; 70 uint8_t microseconds;
71} __attribute__ ((packed)) timestamp; 71} __attribute__ ((packed)) timestamp;
72 72
73typedef struct { 73typedef struct {
74 uint16_t typeAndTimezone; 74 uint16_t typeAndTimezone;
75 int16_t year; 75 int16_t year;
76 uint8_t month; 76 uint8_t month;
77 uint8_t day; 77 uint8_t day;
78 uint8_t hour; 78 uint8_t hour;
79 uint8_t minute; 79 uint8_t minute;
80 uint8_t second; 80 uint8_t second;
81 uint8_t centiseconds; 81 uint8_t centiseconds;
82 uint8_t hundredsOfMicroseconds; 82 uint8_t hundredsOfMicroseconds;
83 uint8_t microseconds; 83 uint8_t microseconds;
84} __attribute__ ((packed)) kernel_timestamp; 84} __attribute__ ((packed)) kernel_timestamp;
85 85
86/* Type and Time Zone (ECMA 167r3 1/7.3.1) */ 86/* Type and Time Zone (ECMA 167r3 1/7.3.1) */
@@ -92,9 +92,9 @@ typedef struct {
92 92
93/* Entity identifier (ECMA 167r3 1/7.4) */ 93/* Entity identifier (ECMA 167r3 1/7.4) */
94typedef struct { 94typedef struct {
95 uint8_t flags; 95 uint8_t flags;
96 uint8_t ident[23]; 96 uint8_t ident[23];
97 uint8_t identSuffix[8]; 97 uint8_t identSuffix[8];
98} __attribute__ ((packed)) regid; 98} __attribute__ ((packed)) regid;
99 99
100/* Flags (ECMA 167r3 1/7.4.1) */ 100/* Flags (ECMA 167r3 1/7.4.1) */
@@ -104,10 +104,10 @@ typedef struct {
104/* Volume Structure Descriptor (ECMA 167r3 2/9.1) */ 104/* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
105#define VSD_STD_ID_LEN 5 105#define VSD_STD_ID_LEN 5
106struct volStructDesc { 106struct volStructDesc {
107 uint8_t structType; 107 uint8_t structType;
108 uint8_t stdIdent[VSD_STD_ID_LEN]; 108 uint8_t stdIdent[VSD_STD_ID_LEN];
109 uint8_t structVersion; 109 uint8_t structVersion;
110 uint8_t structData[2041]; 110 uint8_t structData[2041];
111} __attribute__ ((packed)); 111} __attribute__ ((packed));
112 112
113/* Standard Identifier (EMCA 167r2 2/9.1.2) */ 113/* Standard Identifier (EMCA 167r2 2/9.1.2) */
@@ -123,36 +123,36 @@ struct volStructDesc {
123 123
124/* Beginning Extended Area Descriptor (ECMA 167r3 2/9.2) */ 124/* Beginning Extended Area Descriptor (ECMA 167r3 2/9.2) */
125struct beginningExtendedAreaDesc { 125struct beginningExtendedAreaDesc {
126 uint8_t structType; 126 uint8_t structType;
127 uint8_t stdIdent[VSD_STD_ID_LEN]; 127 uint8_t stdIdent[VSD_STD_ID_LEN];
128 uint8_t structVersion; 128 uint8_t structVersion;
129 uint8_t structData[2041]; 129 uint8_t structData[2041];
130} __attribute__ ((packed)); 130} __attribute__ ((packed));
131 131
132/* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */ 132/* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */
133struct terminatingExtendedAreaDesc { 133struct terminatingExtendedAreaDesc {
134 uint8_t structType; 134 uint8_t structType;
135 uint8_t stdIdent[VSD_STD_ID_LEN]; 135 uint8_t stdIdent[VSD_STD_ID_LEN];
136 uint8_t structVersion; 136 uint8_t structVersion;
137 uint8_t structData[2041]; 137 uint8_t structData[2041];
138} __attribute__ ((packed)); 138} __attribute__ ((packed));
139 139
140/* Boot Descriptor (ECMA 167r3 2/9.4) */ 140/* Boot Descriptor (ECMA 167r3 2/9.4) */
141struct bootDesc { 141struct bootDesc {
142 uint8_t structType; 142 uint8_t structType;
143 uint8_t stdIdent[VSD_STD_ID_LEN]; 143 uint8_t stdIdent[VSD_STD_ID_LEN];
144 uint8_t structVersion; 144 uint8_t structVersion;
145 uint8_t reserved1; 145 uint8_t reserved1;
146 regid archType; 146 regid archType;
147 regid bootIdent; 147 regid bootIdent;
148 __le32 bootExtLocation; 148 __le32 bootExtLocation;
149 __le32 bootExtLength; 149 __le32 bootExtLength;
150 __le64 loadAddress; 150 __le64 loadAddress;
151 __le64 startAddress; 151 __le64 startAddress;
152 timestamp descCreationDateAndTime; 152 timestamp descCreationDateAndTime;
153 __le16 flags; 153 __le16 flags;
154 uint8_t reserved2[32]; 154 uint8_t reserved2[32];
155 uint8_t bootUse[1906]; 155 uint8_t bootUse[1906];
156} __attribute__ ((packed)); 156} __attribute__ ((packed));
157 157
158/* Flags (ECMA 167r3 2/9.4.12) */ 158/* Flags (ECMA 167r3 2/9.4.12) */
@@ -160,25 +160,25 @@ struct bootDesc {
160 160
161/* Extent Descriptor (ECMA 167r3 3/7.1) */ 161/* Extent Descriptor (ECMA 167r3 3/7.1) */
162typedef struct { 162typedef struct {
163 __le32 extLength; 163 __le32 extLength;
164 __le32 extLocation; 164 __le32 extLocation;
165} __attribute__ ((packed)) extent_ad; 165} __attribute__ ((packed)) extent_ad;
166 166
167typedef struct { 167typedef struct {
168 uint32_t extLength; 168 uint32_t extLength;
169 uint32_t extLocation; 169 uint32_t extLocation;
170} kernel_extent_ad; 170} kernel_extent_ad;
171 171
172/* Descriptor Tag (ECMA 167r3 3/7.2) */ 172/* Descriptor Tag (ECMA 167r3 3/7.2) */
173typedef struct { 173typedef struct {
174 __le16 tagIdent; 174 __le16 tagIdent;
175 __le16 descVersion; 175 __le16 descVersion;
176 uint8_t tagChecksum; 176 uint8_t tagChecksum;
177 uint8_t reserved; 177 uint8_t reserved;
178 __le16 tagSerialNum; 178 __le16 tagSerialNum;
179 __le16 descCRC; 179 __le16 descCRC;
180 __le16 descCRCLength; 180 __le16 descCRCLength;
181 __le32 tagLocation; 181 __le32 tagLocation;
182} __attribute__ ((packed)) tag; 182} __attribute__ ((packed)) tag;
183 183
184/* Tag Identifier (ECMA 167r3 3/7.2.1) */ 184/* Tag Identifier (ECMA 167r3 3/7.2.1) */
@@ -194,37 +194,37 @@ typedef struct {
194 194
195/* NSR Descriptor (ECMA 167r3 3/9.1) */ 195/* NSR Descriptor (ECMA 167r3 3/9.1) */
196struct NSRDesc { 196struct NSRDesc {
197 uint8_t structType; 197 uint8_t structType;
198 uint8_t stdIdent[VSD_STD_ID_LEN]; 198 uint8_t stdIdent[VSD_STD_ID_LEN];
199 uint8_t structVersion; 199 uint8_t structVersion;
200 uint8_t reserved; 200 uint8_t reserved;
201 uint8_t structData[2040]; 201 uint8_t structData[2040];
202} __attribute__ ((packed)); 202} __attribute__ ((packed));
203 203
204/* Primary Volume Descriptor (ECMA 167r3 3/10.1) */ 204/* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
205struct primaryVolDesc { 205struct primaryVolDesc {
206 tag descTag; 206 tag descTag;
207 __le32 volDescSeqNum; 207 __le32 volDescSeqNum;
208 __le32 primaryVolDescNum; 208 __le32 primaryVolDescNum;
209 dstring volIdent[32]; 209 dstring volIdent[32];
210 __le16 volSeqNum; 210 __le16 volSeqNum;
211 __le16 maxVolSeqNum; 211 __le16 maxVolSeqNum;
212 __le16 interchangeLvl; 212 __le16 interchangeLvl;
213 __le16 maxInterchangeLvl; 213 __le16 maxInterchangeLvl;
214 __le32 charSetList; 214 __le32 charSetList;
215 __le32 maxCharSetList; 215 __le32 maxCharSetList;
216 dstring volSetIdent[128]; 216 dstring volSetIdent[128];
217 charspec descCharSet; 217 charspec descCharSet;
218 charspec explanatoryCharSet; 218 charspec explanatoryCharSet;
219 extent_ad volAbstract; 219 extent_ad volAbstract;
220 extent_ad volCopyright; 220 extent_ad volCopyright;
221 regid appIdent; 221 regid appIdent;
222 timestamp recordingDateAndTime; 222 timestamp recordingDateAndTime;
223 regid impIdent; 223 regid impIdent;
224 uint8_t impUse[64]; 224 uint8_t impUse[64];
225 __le32 predecessorVolDescSeqLocation; 225 __le32 predecessorVolDescSeqLocation;
226 __le16 flags; 226 __le16 flags;
227 uint8_t reserved[22]; 227 uint8_t reserved[22];
228} __attribute__ ((packed)); 228} __attribute__ ((packed));
229 229
230/* Flags (ECMA 167r3 3/10.1.21) */ 230/* Flags (ECMA 167r3 3/10.1.21) */
@@ -232,26 +232,26 @@ struct primaryVolDesc {
232 232
233/* Anchor Volume Descriptor Pointer (ECMA 167r3 3/10.2) */ 233/* Anchor Volume Descriptor Pointer (ECMA 167r3 3/10.2) */
234struct anchorVolDescPtr { 234struct anchorVolDescPtr {
235 tag descTag; 235 tag descTag;
236 extent_ad mainVolDescSeqExt; 236 extent_ad mainVolDescSeqExt;
237 extent_ad reserveVolDescSeqExt; 237 extent_ad reserveVolDescSeqExt;
238 uint8_t reserved[480]; 238 uint8_t reserved[480];
239} __attribute__ ((packed)); 239} __attribute__ ((packed));
240 240
241/* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */ 241/* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */
242struct volDescPtr { 242struct volDescPtr {
243 tag descTag; 243 tag descTag;
244 __le32 volDescSeqNum; 244 __le32 volDescSeqNum;
245 extent_ad nextVolDescSeqExt; 245 extent_ad nextVolDescSeqExt;
246 uint8_t reserved[484]; 246 uint8_t reserved[484];
247} __attribute__ ((packed)); 247} __attribute__ ((packed));
248 248
249/* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */ 249/* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */
250struct impUseVolDesc { 250struct impUseVolDesc {
251 tag descTag; 251 tag descTag;
252 __le32 volDescSeqNum; 252 __le32 volDescSeqNum;
253 regid impIdent; 253 regid impIdent;
254 uint8_t impUse[460]; 254 uint8_t impUse[460];
255} __attribute__ ((packed)); 255} __attribute__ ((packed));
256 256
257/* Partition Descriptor (ECMA 167r3 3/10.5) */ 257/* Partition Descriptor (ECMA 167r3 3/10.5) */
@@ -291,26 +291,26 @@ struct partitionDesc {
291 291
292/* Logical Volume Descriptor (ECMA 167r3 3/10.6) */ 292/* Logical Volume Descriptor (ECMA 167r3 3/10.6) */
293struct logicalVolDesc { 293struct logicalVolDesc {
294 tag descTag; 294 tag descTag;
295 __le32 volDescSeqNum; 295 __le32 volDescSeqNum;
296 charspec descCharSet; 296 charspec descCharSet;
297 dstring logicalVolIdent[128]; 297 dstring logicalVolIdent[128];
298 __le32 logicalBlockSize; 298 __le32 logicalBlockSize;
299 regid domainIdent; 299 regid domainIdent;
300 uint8_t logicalVolContentsUse[16]; 300 uint8_t logicalVolContentsUse[16];
301 __le32 mapTableLength; 301 __le32 mapTableLength;
302 __le32 numPartitionMaps; 302 __le32 numPartitionMaps;
303 regid impIdent; 303 regid impIdent;
304 uint8_t impUse[128]; 304 uint8_t impUse[128];
305 extent_ad integritySeqExt; 305 extent_ad integritySeqExt;
306 uint8_t partitionMaps[0]; 306 uint8_t partitionMaps[0];
307} __attribute__ ((packed)); 307} __attribute__ ((packed));
308 308
309/* Generic Partition Map (ECMA 167r3 3/10.7.1) */ 309/* Generic Partition Map (ECMA 167r3 3/10.7.1) */
310struct genericPartitionMap { 310struct genericPartitionMap {
311 uint8_t partitionMapType; 311 uint8_t partitionMapType;
312 uint8_t partitionMapLength; 312 uint8_t partitionMapLength;
313 uint8_t partitionMapping[0]; 313 uint8_t partitionMapping[0];
314} __attribute__ ((packed)); 314} __attribute__ ((packed));
315 315
316/* Partition Map Type (ECMA 167r3 3/10.7.1.1) */ 316/* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
@@ -320,45 +320,45 @@ struct genericPartitionMap {
320 320
321/* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */ 321/* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */
322struct genericPartitionMap1 { 322struct genericPartitionMap1 {
323 uint8_t partitionMapType; 323 uint8_t partitionMapType;
324 uint8_t partitionMapLength; 324 uint8_t partitionMapLength;
325 __le16 volSeqNum; 325 __le16 volSeqNum;
326 __le16 partitionNum; 326 __le16 partitionNum;
327} __attribute__ ((packed)); 327} __attribute__ ((packed));
328 328
329/* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */ 329/* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */
330struct genericPartitionMap2 { 330struct genericPartitionMap2 {
331 uint8_t partitionMapType; 331 uint8_t partitionMapType;
332 uint8_t partitionMapLength; 332 uint8_t partitionMapLength;
333 uint8_t partitionIdent[62]; 333 uint8_t partitionIdent[62];
334} __attribute__ ((packed)); 334} __attribute__ ((packed));
335 335
336/* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */ 336/* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */
337struct unallocSpaceDesc { 337struct unallocSpaceDesc {
338 tag descTag; 338 tag descTag;
339 __le32 volDescSeqNum; 339 __le32 volDescSeqNum;
340 __le32 numAllocDescs; 340 __le32 numAllocDescs;
341 extent_ad allocDescs[0]; 341 extent_ad allocDescs[0];
342} __attribute__ ((packed)); 342} __attribute__ ((packed));
343 343
344/* Terminating Descriptor (ECMA 167r3 3/10.9) */ 344/* Terminating Descriptor (ECMA 167r3 3/10.9) */
345struct terminatingDesc { 345struct terminatingDesc {
346 tag descTag; 346 tag descTag;
347 uint8_t reserved[496]; 347 uint8_t reserved[496];
348} __attribute__ ((packed)); 348} __attribute__ ((packed));
349 349
350/* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */ 350/* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */
351struct logicalVolIntegrityDesc { 351struct logicalVolIntegrityDesc {
352 tag descTag; 352 tag descTag;
353 timestamp recordingDateAndTime; 353 timestamp recordingDateAndTime;
354 __le32 integrityType; 354 __le32 integrityType;
355 extent_ad nextIntegrityExt; 355 extent_ad nextIntegrityExt;
356 uint8_t logicalVolContentsUse[32]; 356 uint8_t logicalVolContentsUse[32];
357 __le32 numOfPartitions; 357 __le32 numOfPartitions;
358 __le32 lengthOfImpUse; 358 __le32 lengthOfImpUse;
359 __le32 freeSpaceTable[0]; 359 __le32 freeSpaceTable[0];
360 __le32 sizeTable[0]; 360 __le32 sizeTable[0];
361 uint8_t impUse[0]; 361 uint8_t impUse[0];
362} __attribute__ ((packed)); 362} __attribute__ ((packed));
363 363
364/* Integrity Type (ECMA 167r3 3/10.10.3) */ 364/* Integrity Type (ECMA 167r3 3/10.10.3) */
@@ -367,48 +367,48 @@ struct logicalVolIntegrityDesc {
367 367
368/* Recorded Address (ECMA 167r3 4/7.1) */ 368/* Recorded Address (ECMA 167r3 4/7.1) */
369typedef struct { 369typedef struct {
370 __le32 logicalBlockNum; 370 __le32 logicalBlockNum;
371 __le16 partitionReferenceNum; 371 __le16 partitionReferenceNum;
372} __attribute__ ((packed)) lb_addr; 372} __attribute__ ((packed)) lb_addr;
373 373
374/* ... and its in-core analog */ 374/* ... and its in-core analog */
375typedef struct { 375typedef struct {
376 uint32_t logicalBlockNum; 376 uint32_t logicalBlockNum;
377 uint16_t partitionReferenceNum; 377 uint16_t partitionReferenceNum;
378} kernel_lb_addr; 378} kernel_lb_addr;
379 379
380/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */ 380/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
381typedef struct { 381typedef struct {
382 __le32 extLength; 382 __le32 extLength;
383 __le32 extPosition; 383 __le32 extPosition;
384} __attribute__ ((packed)) short_ad; 384} __attribute__ ((packed)) short_ad;
385 385
386/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */ 386/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
387typedef struct { 387typedef struct {
388 __le32 extLength; 388 __le32 extLength;
389 lb_addr extLocation; 389 lb_addr extLocation;
390 uint8_t impUse[6]; 390 uint8_t impUse[6];
391} __attribute__ ((packed)) long_ad; 391} __attribute__ ((packed)) long_ad;
392 392
393typedef struct { 393typedef struct {
394 uint32_t extLength; 394 uint32_t extLength;
395 kernel_lb_addr extLocation; 395 kernel_lb_addr extLocation;
396 uint8_t impUse[6]; 396 uint8_t impUse[6];
397} kernel_long_ad; 397} kernel_long_ad;
398 398
399/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */ 399/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
400typedef struct { 400typedef struct {
401 __le32 extLength; 401 __le32 extLength;
402 __le32 recordedLength; 402 __le32 recordedLength;
403 __le32 informationLength; 403 __le32 informationLength;
404 lb_addr extLocation; 404 lb_addr extLocation;
405} __attribute__ ((packed)) ext_ad; 405} __attribute__ ((packed)) ext_ad;
406 406
407typedef struct { 407typedef struct {
408 uint32_t extLength; 408 uint32_t extLength;
409 uint32_t recordedLength; 409 uint32_t recordedLength;
410 uint32_t informationLength; 410 uint32_t informationLength;
411 kernel_lb_addr extLocation; 411 kernel_lb_addr extLocation;
412} kernel_ext_ad; 412} kernel_ext_ad;
413 413
414/* Descriptor Tag (ECMA 167r3 4/7.2 - See 3/7.2) */ 414/* Descriptor Tag (ECMA 167r3 4/7.2 - See 3/7.2) */
@@ -428,48 +428,48 @@ typedef struct {
428 428
429/* File Set Descriptor (ECMA 167r3 4/14.1) */ 429/* File Set Descriptor (ECMA 167r3 4/14.1) */
430struct fileSetDesc { 430struct fileSetDesc {
431 tag descTag; 431 tag descTag;
432 timestamp recordingDateAndTime; 432 timestamp recordingDateAndTime;
433 __le16 interchangeLvl; 433 __le16 interchangeLvl;
434 __le16 maxInterchangeLvl; 434 __le16 maxInterchangeLvl;
435 __le32 charSetList; 435 __le32 charSetList;
436 __le32 maxCharSetList; 436 __le32 maxCharSetList;
437 __le32 fileSetNum; 437 __le32 fileSetNum;
438 __le32 fileSetDescNum; 438 __le32 fileSetDescNum;
439 charspec logicalVolIdentCharSet; 439 charspec logicalVolIdentCharSet;
440 dstring logicalVolIdent[128]; 440 dstring logicalVolIdent[128];
441 charspec fileSetCharSet; 441 charspec fileSetCharSet;
442 dstring fileSetIdent[32]; 442 dstring fileSetIdent[32];
443 dstring copyrightFileIdent[32]; 443 dstring copyrightFileIdent[32];
444 dstring abstractFileIdent[32]; 444 dstring abstractFileIdent[32];
445 long_ad rootDirectoryICB; 445 long_ad rootDirectoryICB;
446 regid domainIdent; 446 regid domainIdent;
447 long_ad nextExt; 447 long_ad nextExt;
448 long_ad streamDirectoryICB; 448 long_ad streamDirectoryICB;
449 uint8_t reserved[32]; 449 uint8_t reserved[32];
450} __attribute__ ((packed)); 450} __attribute__ ((packed));
451 451
452/* Partition Header Descriptor (ECMA 167r3 4/14.3) */ 452/* Partition Header Descriptor (ECMA 167r3 4/14.3) */
453struct partitionHeaderDesc { 453struct partitionHeaderDesc {
454 short_ad unallocSpaceTable; 454 short_ad unallocSpaceTable;
455 short_ad unallocSpaceBitmap; 455 short_ad unallocSpaceBitmap;
456 short_ad partitionIntegrityTable; 456 short_ad partitionIntegrityTable;
457 short_ad freedSpaceTable; 457 short_ad freedSpaceTable;
458 short_ad freedSpaceBitmap; 458 short_ad freedSpaceBitmap;
459 uint8_t reserved[88]; 459 uint8_t reserved[88];
460} __attribute__ ((packed)); 460} __attribute__ ((packed));
461 461
462/* File Identifier Descriptor (ECMA 167r3 4/14.4) */ 462/* File Identifier Descriptor (ECMA 167r3 4/14.4) */
463struct fileIdentDesc { 463struct fileIdentDesc {
464 tag descTag; 464 tag descTag;
465 __le16 fileVersionNum; 465 __le16 fileVersionNum;
466 uint8_t fileCharacteristics; 466 uint8_t fileCharacteristics;
467 uint8_t lengthFileIdent; 467 uint8_t lengthFileIdent;
468 long_ad icb; 468 long_ad icb;
469 __le16 lengthOfImpUse; 469 __le16 lengthOfImpUse;
470 uint8_t impUse[0]; 470 uint8_t impUse[0];
471 uint8_t fileIdent[0]; 471 uint8_t fileIdent[0];
472 uint8_t padding[0]; 472 uint8_t padding[0];
473} __attribute__ ((packed)); 473} __attribute__ ((packed));
474 474
475/* File Characteristics (ECMA 167r3 4/14.4.3) */ 475/* File Characteristics (ECMA 167r3 4/14.4.3) */
@@ -481,21 +481,21 @@ struct fileIdentDesc {
481 481
482/* Allocation Ext Descriptor (ECMA 167r3 4/14.5) */ 482/* Allocation Ext Descriptor (ECMA 167r3 4/14.5) */
483struct allocExtDesc { 483struct allocExtDesc {
484 tag descTag; 484 tag descTag;
485 __le32 previousAllocExtLocation; 485 __le32 previousAllocExtLocation;
486 __le32 lengthAllocDescs; 486 __le32 lengthAllocDescs;
487} __attribute__ ((packed)); 487} __attribute__ ((packed));
488 488
489/* ICB Tag (ECMA 167r3 4/14.6) */ 489/* ICB Tag (ECMA 167r3 4/14.6) */
490typedef struct { 490typedef struct {
491 __le32 priorRecordedNumDirectEntries; 491 __le32 priorRecordedNumDirectEntries;
492 __le16 strategyType; 492 __le16 strategyType;
493 __le16 strategyParameter; 493 __le16 strategyParameter;
494 __le16 numEntries; 494 __le16 numEntries;
495 uint8_t reserved; 495 uint8_t reserved;
496 uint8_t fileType; 496 uint8_t fileType;
497 lb_addr parentICBLocation; 497 lb_addr parentICBLocation;
498 __le16 flags; 498 __le16 flags;
499} __attribute__ ((packed)) icbtag; 499} __attribute__ ((packed)) icbtag;
500 500
501/* Strategy Type (ECMA 167r3 4/14.6.2) */ 501/* Strategy Type (ECMA 167r3 4/14.6.2) */
@@ -541,41 +541,41 @@ typedef struct {
541 541
542/* Indirect Entry (ECMA 167r3 4/14.7) */ 542/* Indirect Entry (ECMA 167r3 4/14.7) */
543struct indirectEntry { 543struct indirectEntry {
544 tag descTag; 544 tag descTag;
545 icbtag icbTag; 545 icbtag icbTag;
546 long_ad indirectICB; 546 long_ad indirectICB;
547} __attribute__ ((packed)); 547} __attribute__ ((packed));
548 548
549/* Terminal Entry (ECMA 167r3 4/14.8) */ 549/* Terminal Entry (ECMA 167r3 4/14.8) */
550struct terminalEntry { 550struct terminalEntry {
551 tag descTag; 551 tag descTag;
552 icbtag icbTag; 552 icbtag icbTag;
553} __attribute__ ((packed)); 553} __attribute__ ((packed));
554 554
555/* File Entry (ECMA 167r3 4/14.9) */ 555/* File Entry (ECMA 167r3 4/14.9) */
556struct fileEntry { 556struct fileEntry {
557 tag descTag; 557 tag descTag;
558 icbtag icbTag; 558 icbtag icbTag;
559 __le32 uid; 559 __le32 uid;
560 __le32 gid; 560 __le32 gid;
561 __le32 permissions; 561 __le32 permissions;
562 __le16 fileLinkCount; 562 __le16 fileLinkCount;
563 uint8_t recordFormat; 563 uint8_t recordFormat;
564 uint8_t recordDisplayAttr; 564 uint8_t recordDisplayAttr;
565 __le32 recordLength; 565 __le32 recordLength;
566 __le64 informationLength; 566 __le64 informationLength;
567 __le64 logicalBlocksRecorded; 567 __le64 logicalBlocksRecorded;
568 timestamp accessTime; 568 timestamp accessTime;
569 timestamp modificationTime; 569 timestamp modificationTime;
570 timestamp attrTime; 570 timestamp attrTime;
571 __le32 checkpoint; 571 __le32 checkpoint;
572 long_ad extendedAttrICB; 572 long_ad extendedAttrICB;
573 regid impIdent; 573 regid impIdent;
574 __le64 uniqueID; 574 __le64 uniqueID;
575 __le32 lengthExtendedAttr; 575 __le32 lengthExtendedAttr;
576 __le32 lengthAllocDescs; 576 __le32 lengthAllocDescs;
577 uint8_t extendedAttr[0]; 577 uint8_t extendedAttr[0];
578 uint8_t allocDescs[0]; 578 uint8_t allocDescs[0];
579} __attribute__ ((packed)); 579} __attribute__ ((packed));
580 580
581/* Permissions (ECMA 167r3 4/14.9.5) */ 581/* Permissions (ECMA 167r3 4/14.9.5) */
@@ -617,51 +617,51 @@ struct fileEntry {
617 617
618/* Extended Attribute Header Descriptor (ECMA 167r3 4/14.10.1) */ 618/* Extended Attribute Header Descriptor (ECMA 167r3 4/14.10.1) */
619struct extendedAttrHeaderDesc { 619struct extendedAttrHeaderDesc {
620 tag descTag; 620 tag descTag;
621 __le32 impAttrLocation; 621 __le32 impAttrLocation;
622 __le32 appAttrLocation; 622 __le32 appAttrLocation;
623} __attribute__ ((packed)); 623} __attribute__ ((packed));
624 624
625/* Generic Format (ECMA 167r3 4/14.10.2) */ 625/* Generic Format (ECMA 167r3 4/14.10.2) */
626struct genericFormat { 626struct genericFormat {
627 __le32 attrType; 627 __le32 attrType;
628 uint8_t attrSubtype; 628 uint8_t attrSubtype;
629 uint8_t reserved[3]; 629 uint8_t reserved[3];
630 __le32 attrLength; 630 __le32 attrLength;
631 uint8_t attrData[0]; 631 uint8_t attrData[0];
632} __attribute__ ((packed)); 632} __attribute__ ((packed));
633 633
634/* Character Set Information (ECMA 167r3 4/14.10.3) */ 634/* Character Set Information (ECMA 167r3 4/14.10.3) */
635struct charSetInfo { 635struct charSetInfo {
636 __le32 attrType; 636 __le32 attrType;
637 uint8_t attrSubtype; 637 uint8_t attrSubtype;
638 uint8_t reserved[3]; 638 uint8_t reserved[3];
639 __le32 attrLength; 639 __le32 attrLength;
640 __le32 escapeSeqLength; 640 __le32 escapeSeqLength;
641 uint8_t charSetType; 641 uint8_t charSetType;
642 uint8_t escapeSeq[0]; 642 uint8_t escapeSeq[0];
643} __attribute__ ((packed)); 643} __attribute__ ((packed));
644 644
645/* Alternate Permissions (ECMA 167r3 4/14.10.4) */ 645/* Alternate Permissions (ECMA 167r3 4/14.10.4) */
646struct altPerms { 646struct altPerms {
647 __le32 attrType; 647 __le32 attrType;
648 uint8_t attrSubtype; 648 uint8_t attrSubtype;
649 uint8_t reserved[3]; 649 uint8_t reserved[3];
650 __le32 attrLength; 650 __le32 attrLength;
651 __le16 ownerIdent; 651 __le16 ownerIdent;
652 __le16 groupIdent; 652 __le16 groupIdent;
653 __le16 permission; 653 __le16 permission;
654} __attribute__ ((packed)); 654} __attribute__ ((packed));
655 655
656/* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */ 656/* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */
657struct fileTimesExtAttr { 657struct fileTimesExtAttr {
658 __le32 attrType; 658 __le32 attrType;
659 uint8_t attrSubtype; 659 uint8_t attrSubtype;
660 uint8_t reserved[3]; 660 uint8_t reserved[3];
661 __le32 attrLength; 661 __le32 attrLength;
662 __le32 dataLength; 662 __le32 dataLength;
663 __le32 fileTimeExistence; 663 __le32 fileTimeExistence;
664 uint8_t fileTimes; 664 uint8_t fileTimes;
665} __attribute__ ((packed)); 665} __attribute__ ((packed));
666 666
667/* FileTimeExistence (ECMA 167r3 4/14.10.5.6) */ 667/* FileTimeExistence (ECMA 167r3 4/14.10.5.6) */
@@ -672,47 +672,47 @@ struct fileTimesExtAttr {
672 672
673/* Information Times Extended Attribute (ECMA 167r3 4/14.10.6) */ 673/* Information Times Extended Attribute (ECMA 167r3 4/14.10.6) */
674struct infoTimesExtAttr { 674struct infoTimesExtAttr {
675 __le32 attrType; 675 __le32 attrType;
676 uint8_t attrSubtype; 676 uint8_t attrSubtype;
677 uint8_t reserved[3]; 677 uint8_t reserved[3];
678 __le32 attrLength; 678 __le32 attrLength;
679 __le32 dataLength; 679 __le32 dataLength;
680 __le32 infoTimeExistence; 680 __le32 infoTimeExistence;
681 uint8_t infoTimes[0]; 681 uint8_t infoTimes[0];
682} __attribute__ ((packed)); 682} __attribute__ ((packed));
683 683
684/* Device Specification (ECMA 167r3 4/14.10.7) */ 684/* Device Specification (ECMA 167r3 4/14.10.7) */
685struct deviceSpec { 685struct deviceSpec {
686 __le32 attrType; 686 __le32 attrType;
687 uint8_t attrSubtype; 687 uint8_t attrSubtype;
688 uint8_t reserved[3]; 688 uint8_t reserved[3];
689 __le32 attrLength; 689 __le32 attrLength;
690 __le32 impUseLength; 690 __le32 impUseLength;
691 __le32 majorDeviceIdent; 691 __le32 majorDeviceIdent;
692 __le32 minorDeviceIdent; 692 __le32 minorDeviceIdent;
693 uint8_t impUse[0]; 693 uint8_t impUse[0];
694} __attribute__ ((packed)); 694} __attribute__ ((packed));
695 695
696/* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */ 696/* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
697struct impUseExtAttr { 697struct impUseExtAttr {
698 __le32 attrType; 698 __le32 attrType;
699 uint8_t attrSubtype; 699 uint8_t attrSubtype;
700 uint8_t reserved[3]; 700 uint8_t reserved[3];
701 __le32 attrLength; 701 __le32 attrLength;
702 __le32 impUseLength; 702 __le32 impUseLength;
703 regid impIdent; 703 regid impIdent;
704 uint8_t impUse[0]; 704 uint8_t impUse[0];
705} __attribute__ ((packed)); 705} __attribute__ ((packed));
706 706
707/* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */ 707/* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
708struct appUseExtAttr { 708struct appUseExtAttr {
709 __le32 attrType; 709 __le32 attrType;
710 uint8_t attrSubtype; 710 uint8_t attrSubtype;
711 uint8_t reserved[3]; 711 uint8_t reserved[3];
712 __le32 attrLength; 712 __le32 attrLength;
713 __le32 appUseLength; 713 __le32 appUseLength;
714 regid appIdent; 714 regid appIdent;
715 uint8_t appUse[0]; 715 uint8_t appUse[0];
716} __attribute__ ((packed)); 716} __attribute__ ((packed));
717 717
718#define EXTATTR_CHAR_SET 1 718#define EXTATTR_CHAR_SET 1
@@ -725,29 +725,29 @@ struct appUseExtAttr {
725 725
726/* Unallocated Space Entry (ECMA 167r3 4/14.11) */ 726/* Unallocated Space Entry (ECMA 167r3 4/14.11) */
727struct unallocSpaceEntry { 727struct unallocSpaceEntry {
728 tag descTag; 728 tag descTag;
729 icbtag icbTag; 729 icbtag icbTag;
730 __le32 lengthAllocDescs; 730 __le32 lengthAllocDescs;
731 uint8_t allocDescs[0]; 731 uint8_t allocDescs[0];
732} __attribute__ ((packed)); 732} __attribute__ ((packed));
733 733
734/* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */ 734/* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
735struct spaceBitmapDesc { 735struct spaceBitmapDesc {
736 tag descTag; 736 tag descTag;
737 __le32 numOfBits; 737 __le32 numOfBits;
738 __le32 numOfBytes; 738 __le32 numOfBytes;
739 uint8_t bitmap[0]; 739 uint8_t bitmap[0];
740} __attribute__ ((packed)); 740} __attribute__ ((packed));
741 741
742/* Partition Integrity Entry (ECMA 167r3 4/14.13) */ 742/* Partition Integrity Entry (ECMA 167r3 4/14.13) */
743struct partitionIntegrityEntry { 743struct partitionIntegrityEntry {
744 tag descTag; 744 tag descTag;
745 icbtag icbTag; 745 icbtag icbTag;
746 timestamp recordingDateAndTime; 746 timestamp recordingDateAndTime;
747 uint8_t integrityType; 747 uint8_t integrityType;
748 uint8_t reserved[175]; 748 uint8_t reserved[175];
749 regid impIdent; 749 regid impIdent;
750 uint8_t impUse[256]; 750 uint8_t impUse[256];
751} __attribute__ ((packed)); 751} __attribute__ ((packed));
752 752
753/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */ 753/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
@@ -764,46 +764,46 @@ struct partitionIntegrityEntry {
764 764
765/* Logical Volume Header Descriptor (ECMA 167r3 4/14.15) */ 765/* Logical Volume Header Descriptor (ECMA 167r3 4/14.15) */
766struct logicalVolHeaderDesc { 766struct logicalVolHeaderDesc {
767 __le64 uniqueID; 767 __le64 uniqueID;
768 uint8_t reserved[24]; 768 uint8_t reserved[24];
769} __attribute__ ((packed)); 769} __attribute__ ((packed));
770 770
771/* Path Component (ECMA 167r3 4/14.16.1) */ 771/* Path Component (ECMA 167r3 4/14.16.1) */
772struct pathComponent { 772struct pathComponent {
773 uint8_t componentType; 773 uint8_t componentType;
774 uint8_t lengthComponentIdent; 774 uint8_t lengthComponentIdent;
775 __le16 componentFileVersionNum; 775 __le16 componentFileVersionNum;
776 dstring componentIdent[0]; 776 dstring componentIdent[0];
777} __attribute__ ((packed)); 777} __attribute__ ((packed));
778 778
779/* File Entry (ECMA 167r3 4/14.17) */ 779/* File Entry (ECMA 167r3 4/14.17) */
780struct extendedFileEntry { 780struct extendedFileEntry {
781 tag descTag; 781 tag descTag;
782 icbtag icbTag; 782 icbtag icbTag;
783 __le32 uid; 783 __le32 uid;
784 __le32 gid; 784 __le32 gid;
785 __le32 permissions; 785 __le32 permissions;
786 __le16 fileLinkCount; 786 __le16 fileLinkCount;
787 uint8_t recordFormat; 787 uint8_t recordFormat;
788 uint8_t recordDisplayAttr; 788 uint8_t recordDisplayAttr;
789 __le32 recordLength; 789 __le32 recordLength;
790 __le64 informationLength; 790 __le64 informationLength;
791 __le64 objectSize; 791 __le64 objectSize;
792 __le64 logicalBlocksRecorded; 792 __le64 logicalBlocksRecorded;
793 timestamp accessTime; 793 timestamp accessTime;
794 timestamp modificationTime; 794 timestamp modificationTime;
795 timestamp createTime; 795 timestamp createTime;
796 timestamp attrTime; 796 timestamp attrTime;
797 __le32 checkpoint; 797 __le32 checkpoint;
798 __le32 reserved; 798 __le32 reserved;
799 long_ad extendedAttrICB; 799 long_ad extendedAttrICB;
800 long_ad streamDirectoryICB; 800 long_ad streamDirectoryICB;
801 regid impIdent; 801 regid impIdent;
802 __le64 uniqueID; 802 __le64 uniqueID;
803 __le32 lengthExtendedAttr; 803 __le32 lengthExtendedAttr;
804 __le32 lengthAllocDescs; 804 __le32 lengthAllocDescs;
805 uint8_t extendedAttr[0]; 805 uint8_t extendedAttr[0];
806 uint8_t allocDescs[0]; 806 uint8_t allocDescs[0];
807} __attribute__ ((packed)); 807} __attribute__ ((packed));
808 808
809#endif /* _ECMA_167_H */ 809#endif /* _ECMA_167_H */
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 67bf36bd3e6e..5d7a4ea27753 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -30,7 +30,7 @@
30#include <linux/udf_fs.h> 30#include <linux/udf_fs.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/string.h> /* memset */ 33#include <linux/string.h> /* memset */
34#include <linux/capability.h> 34#include <linux/capability.h>
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
@@ -55,11 +55,11 @@ static int udf_adinicb_readpage(struct file *file, struct page *page)
55 SetPageUptodate(page); 55 SetPageUptodate(page);
56 kunmap(page); 56 kunmap(page);
57 unlock_page(page); 57 unlock_page(page);
58
58 return 0; 59 return 0;
59} 60}
60 61
61static int udf_adinicb_writepage(struct page *page, 62static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc)
62 struct writeback_control *wbc)
63{ 63{
64 struct inode *inode = page->mapping->host; 64 struct inode *inode = page->mapping->host;
65 char *kaddr; 65 char *kaddr;
@@ -72,6 +72,7 @@ static int udf_adinicb_writepage(struct page *page,
72 SetPageUptodate(page); 72 SetPageUptodate(page);
73 kunmap(page); 73 kunmap(page);
74 unlock_page(page); 74 unlock_page(page);
75
75 return 0; 76 return 0;
76} 77}
77 78
@@ -100,11 +101,11 @@ static int udf_adinicb_commit_write(struct file *file, struct page *page,
100} 101}
101 102
102const struct address_space_operations udf_adinicb_aops = { 103const struct address_space_operations udf_adinicb_aops = {
103 .readpage = udf_adinicb_readpage, 104 .readpage = udf_adinicb_readpage,
104 .writepage = udf_adinicb_writepage, 105 .writepage = udf_adinicb_writepage,
105 .sync_page = block_sync_page, 106 .sync_page = block_sync_page,
106 .prepare_write = udf_adinicb_prepare_write, 107 .prepare_write = udf_adinicb_prepare_write,
107 .commit_write = udf_adinicb_commit_write, 108 .commit_write = udf_adinicb_commit_write,
108}; 109};
109 110
110static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 111static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -122,8 +123,8 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
122 else 123 else
123 pos = ppos; 124 pos = ppos;
124 125
125 if (inode->i_sb->s_blocksize < 126 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
126 (udf_file_entry_alloc_offset(inode) + pos + count)) { 127 pos + count)) {
127 udf_expand_file_adinicb(inode, pos + count, &err); 128 udf_expand_file_adinicb(inode, pos + count, &err);
128 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 129 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
129 udf_debug("udf_expand_adinicb: err=%d\n", err); 130 udf_debug("udf_expand_adinicb: err=%d\n", err);
@@ -138,9 +139,9 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
138 } 139 }
139 140
140 retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); 141 retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
141
142 if (retval > 0) 142 if (retval > 0)
143 mark_inode_dirty(inode); 143 mark_inode_dirty(inode);
144
144 return retval; 145 return retval;
145} 146}
146 147
@@ -181,10 +182,12 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
181int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 182int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
182 unsigned long arg) 183 unsigned long arg)
183{ 184{
185 long old_block, new_block;
184 int result = -EINVAL; 186 int result = -EINVAL;
185 187
186 if (file_permission(filp, MAY_READ) != 0) { 188 if (file_permission(filp, MAY_READ) != 0) {
187 udf_debug("no permission to access inode %lu\n", inode->i_ino); 189 udf_debug("no permission to access inode %lu\n",
190 inode->i_ino);
188 return -EPERM; 191 return -EPERM;
189 } 192 }
190 193
@@ -196,26 +199,19 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
196 switch (cmd) { 199 switch (cmd) {
197 case UDF_GETVOLIDENT: 200 case UDF_GETVOLIDENT:
198 return copy_to_user((char __user *)arg, 201 return copy_to_user((char __user *)arg,
199 UDF_SB_VOLIDENT(inode->i_sb), 202 UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0;
200 32) ? -EFAULT : 0;
201 case UDF_RELOCATE_BLOCKS: 203 case UDF_RELOCATE_BLOCKS:
202 { 204 if (!capable(CAP_SYS_ADMIN))
203 long old, new; 205 return -EACCES;
204 206 if (get_user(old_block, (long __user *)arg))
205 if (!capable(CAP_SYS_ADMIN)) 207 return -EFAULT;
206 return -EACCES; 208 if ((result = udf_relocate_blocks(inode->i_sb,
207 if (get_user(old, (long __user *)arg)) 209 old_block, &new_block)) == 0)
208 return -EFAULT; 210 result = put_user(new_block, (long __user *)arg);
209 if ((result = udf_relocate_blocks(inode->i_sb, 211 return result;
210 old, &new)) == 0)
211 result = put_user(new, (long __user *)arg);
212
213 return result;
214 }
215 case UDF_GETEASIZE: 212 case UDF_GETEASIZE:
216 result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg); 213 result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg);
217 break; 214 break;
218
219 case UDF_GETEABLOCK: 215 case UDF_GETEABLOCK:
220 result = copy_to_user((char __user *)arg, UDF_I_DATA(inode), 216 result = copy_to_user((char __user *)arg, UDF_I_DATA(inode),
221 UDF_I_LENEATTR(inode)) ? -EFAULT : 0; 217 UDF_I_LENEATTR(inode)) ? -EFAULT : 0;
@@ -248,16 +244,16 @@ static int udf_release_file(struct inode *inode, struct file *filp)
248} 244}
249 245
250const struct file_operations udf_file_operations = { 246const struct file_operations udf_file_operations = {
251 .read = do_sync_read, 247 .read = do_sync_read,
252 .aio_read = generic_file_aio_read, 248 .aio_read = generic_file_aio_read,
253 .ioctl = udf_ioctl, 249 .ioctl = udf_ioctl,
254 .open = generic_file_open, 250 .open = generic_file_open,
255 .mmap = generic_file_mmap, 251 .mmap = generic_file_mmap,
256 .write = do_sync_write, 252 .write = do_sync_write,
257 .aio_write = udf_file_aio_write, 253 .aio_write = udf_file_aio_write,
258 .release = udf_release_file, 254 .release = udf_release_file,
259 .fsync = udf_fsync_file, 255 .fsync = udf_fsync_file,
260 .splice_read = generic_file_splice_read, 256 .splice_read = generic_file_splice_read,
261}; 257};
262 258
263const struct inode_operations udf_file_inode_operations = { 259const struct inode_operations udf_file_inode_operations = {
diff --git a/fs/udf/fsync.c b/fs/udf/fsync.c
index 7f0901c4f1f1..b2c472b733b8 100644
--- a/fs/udf/fsync.c
+++ b/fs/udf/fsync.c
@@ -32,6 +32,7 @@ static int udf_fsync_inode(struct inode *, int);
32int udf_fsync_file(struct file *file, struct dentry *dentry, int datasync) 32int udf_fsync_file(struct file *file, struct dentry *dentry, int datasync)
33{ 33{
34 struct inode *inode = dentry->d_inode; 34 struct inode *inode = dentry->d_inode;
35
35 return udf_fsync_inode(inode, datasync); 36 return udf_fsync_inode(inode, datasync);
36} 37}
37 38
@@ -46,5 +47,6 @@ static int udf_fsync_inode(struct inode *inode, int datasync)
46 return err; 47 return err;
47 48
48 err |= udf_sync_inode(inode); 49 err |= udf_sync_inode(inode);
50
49 return err ? -EIO : 0; 51 return err ? -EIO : 0;
50} 52}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 2eb503806bce..636d8f613929 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -46,12 +46,10 @@ void udf_free_inode(struct inode *inode)
46 if (sbi->s_lvidbh) { 46 if (sbi->s_lvidbh) {
47 if (S_ISDIR(inode->i_mode)) 47 if (S_ISDIR(inode->i_mode))
48 UDF_SB_LVIDIU(sb)->numDirs = 48 UDF_SB_LVIDIU(sb)->numDirs =
49 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) 49 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1);
50 - 1);
51 else 50 else
52 UDF_SB_LVIDIU(sb)->numFiles = 51 UDF_SB_LVIDIU(sb)->numFiles =
53 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) 52 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1);
54 - 1);
55 53
56 mark_buffer_dirty(sbi->s_lvidbh); 54 mark_buffer_dirty(sbi->s_lvidbh);
57 } 55 }
@@ -82,10 +80,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
82 UDF_I_NEXT_ALLOC_GOAL(inode) = 0; 80 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
83 UDF_I_STRAT4096(inode) = 0; 81 UDF_I_STRAT4096(inode) = 0;
84 82
85 block = 83 block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
86 udf_new_block(dir->i_sb, NULL, 84 start, err);
87 UDF_I_LOCATION(dir).partitionReferenceNum, start,
88 err);
89 if (*err) { 85 if (*err) {
90 iput(inode); 86 iput(inode);
91 return NULL; 87 return NULL;
@@ -95,17 +91,13 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
95 if (UDF_SB_LVIDBH(sb)) { 91 if (UDF_SB_LVIDBH(sb)) {
96 struct logicalVolHeaderDesc *lvhd; 92 struct logicalVolHeaderDesc *lvhd;
97 uint64_t uniqueID; 93 uint64_t uniqueID;
98 lvhd = 94 lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse);
99 (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->
100 logicalVolContentsUse);
101 if (S_ISDIR(mode)) 95 if (S_ISDIR(mode))
102 UDF_SB_LVIDIU(sb)->numDirs = 96 UDF_SB_LVIDIU(sb)->numDirs =
103 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) 97 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1);
104 + 1);
105 else 98 else
106 UDF_SB_LVIDIU(sb)->numFiles = 99 UDF_SB_LVIDIU(sb)->numFiles =
107 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) 100 cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1);
108 + 1);
109 UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID); 101 UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
110 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 102 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
111 uniqueID += 16; 103 uniqueID += 16;
@@ -118,12 +110,12 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
118 inode->i_gid = dir->i_gid; 110 inode->i_gid = dir->i_gid;
119 if (S_ISDIR(mode)) 111 if (S_ISDIR(mode))
120 mode |= S_ISGID; 112 mode |= S_ISGID;
121 } else 113 } else {
122 inode->i_gid = current->fsgid; 114 inode->i_gid = current->fsgid;
115 }
123 116
124 UDF_I_LOCATION(inode).logicalBlockNum = block; 117 UDF_I_LOCATION(inode).logicalBlockNum = block;
125 UDF_I_LOCATION(inode).partitionReferenceNum = 118 UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
126 UDF_I_LOCATION(dir).partitionReferenceNum;
127 inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0); 119 inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
128 inode->i_blocks = 0; 120 inode->i_blocks = 0;
129 UDF_I_LENEATTR(inode) = 0; 121 UDF_I_LENEATTR(inode) = 0;
@@ -132,14 +124,10 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
132 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { 124 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
133 UDF_I_EFE(inode) = 1; 125 UDF_I_EFE(inode) = 1;
134 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE); 126 UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE);
135 UDF_I_DATA(inode) = 127 UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
136 kzalloc(inode->i_sb->s_blocksize -
137 sizeof(struct extendedFileEntry), GFP_KERNEL);
138 } else { 128 } else {
139 UDF_I_EFE(inode) = 0; 129 UDF_I_EFE(inode) = 0;
140 UDF_I_DATA(inode) = 130 UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
141 kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry),
142 GFP_KERNEL);
143 } 131 }
144 if (!UDF_I_DATA(inode)) { 132 if (!UDF_I_DATA(inode)) {
145 iput(inode); 133 iput(inode);
@@ -154,7 +142,7 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
154 else 142 else
155 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; 143 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
156 inode->i_mtime = inode->i_atime = inode->i_ctime = 144 inode->i_mtime = inode->i_atime = inode->i_ctime =
157 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); 145 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
158 insert_inode_hash(inode); 146 insert_inode_hash(inode);
159 mark_inode_dirty(inode); 147 mark_inode_dirty(inode);
160 mutex_unlock(&sbi->s_alloc_mutex); 148 mutex_unlock(&sbi->s_alloc_mutex);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index be6326f449a1..0d2c41666cd2 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -97,7 +97,8 @@ void udf_delete_inode(struct inode *inode)
97 97
98 unlock_kernel(); 98 unlock_kernel();
99 return; 99 return;
100 no_delete: 100
101no_delete:
101 clear_inode(inode); 102 clear_inode(inode);
102} 103}
103 104
@@ -144,12 +145,12 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
144} 145}
145 146
146const struct address_space_operations udf_aops = { 147const struct address_space_operations udf_aops = {
147 .readpage = udf_readpage, 148 .readpage = udf_readpage,
148 .writepage = udf_writepage, 149 .writepage = udf_writepage,
149 .sync_page = block_sync_page, 150 .sync_page = block_sync_page,
150 .prepare_write = udf_prepare_write, 151 .prepare_write = udf_prepare_write,
151 .commit_write = generic_commit_write, 152 .commit_write = generic_commit_write,
152 .bmap = udf_bmap, 153 .bmap = udf_bmap,
153}; 154};
154 155
155void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err) 156void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
@@ -230,12 +231,10 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
230 *block = udf_new_block(inode->i_sb, inode, 231 *block = udf_new_block(inode->i_sb, inode,
231 UDF_I_LOCATION(inode).partitionReferenceNum, 232 UDF_I_LOCATION(inode).partitionReferenceNum,
232 UDF_I_LOCATION(inode).logicalBlockNum, err); 233 UDF_I_LOCATION(inode).logicalBlockNum, err);
233
234 if (!(*block)) 234 if (!(*block))
235 return NULL; 235 return NULL;
236 newblock = udf_get_pblock(inode->i_sb, *block, 236 newblock = udf_get_pblock(inode->i_sb, *block,
237 UDF_I_LOCATION(inode).partitionReferenceNum, 237 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
238 0);
239 if (!newblock) 238 if (!newblock)
240 return NULL; 239 return NULL;
241 dbh = udf_tgetblk(inode->i_sb, newblock); 240 dbh = udf_tgetblk(inode->i_sb, newblock);
@@ -247,16 +246,13 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
247 unlock_buffer(dbh); 246 unlock_buffer(dbh);
248 mark_buffer_dirty_inode(dbh, inode); 247 mark_buffer_dirty_inode(dbh, inode);
249 248
250 sfibh.soffset = sfibh.eoffset = 249 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
251 (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
252 sfibh.sbh = sfibh.ebh = NULL; 250 sfibh.sbh = sfibh.ebh = NULL;
253 dfibh.soffset = dfibh.eoffset = 0; 251 dfibh.soffset = dfibh.eoffset = 0;
254 dfibh.sbh = dfibh.ebh = dbh; 252 dfibh.sbh = dfibh.ebh = dbh;
255 while ((f_pos < size)) { 253 while ((f_pos < size)) {
256 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 254 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
257 sfi = 255 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
258 udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL,
259 NULL, NULL);
260 if (!sfi) { 256 if (!sfi) {
261 brelse(dbh); 257 brelse(dbh);
262 return NULL; 258 return NULL;
@@ -267,8 +263,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
267 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); 263 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
268 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); 264 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
269 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, 265 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
270 sfi->fileIdent + 266 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) {
271 le16_to_cpu(sfi->lengthOfImpUse))) {
272 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 267 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
273 brelse(dbh); 268 brelse(dbh);
274 return NULL; 269 return NULL;
@@ -276,12 +271,10 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
276 } 271 }
277 mark_buffer_dirty_inode(dbh, inode); 272 mark_buffer_dirty_inode(dbh, inode);
278 273
279 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, 274 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
280 UDF_I_LENALLOC(inode));
281 UDF_I_LENALLOC(inode) = 0; 275 UDF_I_LENALLOC(inode) = 0;
282 eloc.logicalBlockNum = *block; 276 eloc.logicalBlockNum = *block;
283 eloc.partitionReferenceNum = 277 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
284 UDF_I_LOCATION(inode).partitionReferenceNum;
285 elen = inode->i_size; 278 elen = inode->i_size;
286 UDF_I_LENEXTENTS(inode) = elen; 279 UDF_I_LENEXTENTS(inode) = elen;
287 epos.bh = NULL; 280 epos.bh = NULL;
@@ -334,11 +327,12 @@ static int udf_get_block(struct inode *inode, sector_t block,
334 if (new) 327 if (new)
335 set_buffer_new(bh_result); 328 set_buffer_new(bh_result);
336 map_bh(bh_result, inode->i_sb, phys); 329 map_bh(bh_result, inode->i_sb, phys);
337 abort: 330
331abort:
338 unlock_kernel(); 332 unlock_kernel();
339 return err; 333 return err;
340 334
341 abort_negative: 335abort_negative:
342 udf_warning(inode->i_sb, "udf_get_block", "block < 0"); 336 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
343 goto abort; 337 goto abort;
344} 338}
@@ -346,13 +340,13 @@ static int udf_get_block(struct inode *inode, sector_t block,
346static struct buffer_head *udf_getblk(struct inode *inode, long block, 340static struct buffer_head *udf_getblk(struct inode *inode, long block,
347 int create, int *err) 341 int create, int *err)
348{ 342{
343 struct buffer_head *bh;
349 struct buffer_head dummy; 344 struct buffer_head dummy;
350 345
351 dummy.b_state = 0; 346 dummy.b_state = 0;
352 dummy.b_blocknr = -1000; 347 dummy.b_blocknr = -1000;
353 *err = udf_get_block(inode, block, &dummy, create); 348 *err = udf_get_block(inode, block, &dummy, create);
354 if (!*err && buffer_mapped(&dummy)) { 349 if (!*err && buffer_mapped(&dummy)) {
355 struct buffer_head *bh;
356 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 350 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
357 if (buffer_new(&dummy)) { 351 if (buffer_new(&dummy)) {
358 lock_buffer(bh); 352 lock_buffer(bh);
@@ -363,6 +357,7 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
363 } 357 }
364 return bh; 358 return bh;
365 } 359 }
360
366 return NULL; 361 return NULL;
367} 362}
368 363
@@ -373,42 +368,41 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
373 sector_t add; 368 sector_t add;
374 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 369 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
375 struct super_block *sb = inode->i_sb; 370 struct super_block *sb = inode->i_sb;
376 kernel_lb_addr prealloc_loc = { 0, 0 }; 371 kernel_lb_addr prealloc_loc = {};
377 int prealloc_len = 0; 372 int prealloc_len = 0;
378 373
379 /* The previous extent is fake and we should not extend by anything 374 /* The previous extent is fake and we should not extend by anything
380 * - there's nothing to do... */ 375 * - there's nothing to do... */
381 if (!blocks && fake) 376 if (!blocks && fake)
382 return 0; 377 return 0;
378
383 /* Round the last extent up to a multiple of block size */ 379 /* Round the last extent up to a multiple of block size */
384 if (last_ext->extLength & (sb->s_blocksize - 1)) { 380 if (last_ext->extLength & (sb->s_blocksize - 1)) {
385 last_ext->extLength = 381 last_ext->extLength =
386 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | 382 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
387 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + 383 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
388 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); 384 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
389 UDF_I_LENEXTENTS(inode) = 385 UDF_I_LENEXTENTS(inode) =
390 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) & 386 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
391 ~(sb->s_blocksize - 1); 387 ~(sb->s_blocksize - 1);
392 } 388 }
389
393 /* Last extent are just preallocated blocks? */ 390 /* Last extent are just preallocated blocks? */
394 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == 391 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
395 EXT_NOT_RECORDED_ALLOCATED) {
396 /* Save the extent so that we can reattach it to the end */ 392 /* Save the extent so that we can reattach it to the end */
397 prealloc_loc = last_ext->extLocation; 393 prealloc_loc = last_ext->extLocation;
398 prealloc_len = last_ext->extLength; 394 prealloc_len = last_ext->extLength;
399 /* Mark the extent as a hole */ 395 /* Mark the extent as a hole */
400 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 396 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
401 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 397 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
402 last_ext->extLocation.logicalBlockNum = 0; 398 last_ext->extLocation.logicalBlockNum = 0;
403 last_ext->extLocation.partitionReferenceNum = 0; 399 last_ext->extLocation.partitionReferenceNum = 0;
404 } 400 }
401
405 /* Can we merge with the previous extent? */ 402 /* Can we merge with the previous extent? */
406 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == 403 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
407 EXT_NOT_RECORDED_NOT_ALLOCATED) { 404 add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength &
408 add = 405 UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
409 ((1 << 30) - sb->s_blocksize -
410 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->
411 s_blocksize_bits;
412 if (add > blocks) 406 if (add > blocks)
413 add = blocks; 407 add = blocks;
414 blocks -= add; 408 blocks -= add;
@@ -419,19 +413,20 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
419 udf_add_aext(inode, last_pos, last_ext->extLocation, 413 udf_add_aext(inode, last_pos, last_ext->extLocation,
420 last_ext->extLength, 1); 414 last_ext->extLength, 1);
421 count++; 415 count++;
422 } else 416 } else {
423 udf_write_aext(inode, last_pos, last_ext->extLocation, 417 udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
424 last_ext->extLength, 1); 418 }
419
425 /* Managed to do everything necessary? */ 420 /* Managed to do everything necessary? */
426 if (!blocks) 421 if (!blocks)
427 goto out; 422 goto out;
428 423
429 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ 424 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
430 last_ext->extLocation.logicalBlockNum = 0; 425 last_ext->extLocation.logicalBlockNum = 0;
431 last_ext->extLocation.partitionReferenceNum = 0; 426 last_ext->extLocation.partitionReferenceNum = 0;
432 add = (1 << (30 - sb->s_blocksize_bits)) - 1; 427 add = (1 << (30-sb->s_blocksize_bits)) - 1;
433 last_ext->extLength = 428 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
434 EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); 429
435 /* Create enough extents to cover the whole hole */ 430 /* Create enough extents to cover the whole hole */
436 while (blocks > add) { 431 while (blocks > add) {
437 blocks -= add; 432 blocks -= add;
@@ -442,22 +437,23 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
442 } 437 }
443 if (blocks) { 438 if (blocks) {
444 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 439 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
445 (blocks << sb->s_blocksize_bits); 440 (blocks << sb->s_blocksize_bits);
446 if (udf_add_aext(inode, last_pos, last_ext->extLocation, 441 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
447 last_ext->extLength, 1) == -1) 442 last_ext->extLength, 1) == -1)
448 return -1; 443 return -1;
449 count++; 444 count++;
450 } 445 }
451 out: 446
447out:
452 /* Do we have some preallocated blocks saved? */ 448 /* Do we have some preallocated blocks saved? */
453 if (prealloc_len) { 449 if (prealloc_len) {
454 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) 450 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
455 == -1)
456 return -1; 451 return -1;
457 last_ext->extLocation = prealloc_loc; 452 last_ext->extLocation = prealloc_loc;
458 last_ext->extLength = prealloc_len; 453 last_ext->extLength = prealloc_len;
459 count++; 454 count++;
460 } 455 }
456
461 /* last_pos should point to the last written extent... */ 457 /* last_pos should point to the last written extent... */
462 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 458 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
463 last_pos->offset -= sizeof(short_ad); 459 last_pos->offset -= sizeof(short_ad);
@@ -465,6 +461,7 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
465 last_pos->offset -= sizeof(long_ad); 461 last_pos->offset -= sizeof(long_ad);
466 else 462 else
467 return -1; 463 return -1;
464
468 return count; 465 return count;
469} 466}
470 467
@@ -490,7 +487,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
490 prev_epos.block = UDF_I_LOCATION(inode); 487 prev_epos.block = UDF_I_LOCATION(inode);
491 prev_epos.bh = NULL; 488 prev_epos.bh = NULL;
492 cur_epos = next_epos = prev_epos; 489 cur_epos = next_epos = prev_epos;
493 b_off = (loff_t) block << inode->i_sb->s_blocksize_bits; 490 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
494 491
495 /* find the extent which contains the block we are looking for. 492 /* find the extent which contains the block we are looking for.
496 alternate between laarr[0] and laarr[1] for locations of the 493 alternate between laarr[0] and laarr[1] for locations of the
@@ -515,8 +512,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
515 prev_epos.offset = cur_epos.offset; 512 prev_epos.offset = cur_epos.offset;
516 cur_epos.offset = next_epos.offset; 513 cur_epos.offset = next_epos.offset;
517 514
518 if ((etype = 515 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
519 udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
520 break; 516 break;
521 517
522 c = !c; 518 c = !c;
@@ -526,8 +522,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
526 522
527 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 523 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
528 pgoal = eloc.logicalBlockNum + 524 pgoal = eloc.logicalBlockNum +
529 ((elen + inode->i_sb->s_blocksize - 1) >> 525 ((elen + inode->i_sb->s_blocksize - 1) >>
530 inode->i_sb->s_blocksize_bits); 526 inode->i_sb->s_blocksize_bits);
531 527
532 count++; 528 count++;
533 } while (lbcount + elen <= b_off); 529 } while (lbcount + elen <= b_off);
@@ -547,8 +543,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
547 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { 543 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
548 if (elen & (inode->i_sb->s_blocksize - 1)) { 544 if (elen & (inode->i_sb->s_blocksize - 1)) {
549 elen = EXT_RECORDED_ALLOCATED | 545 elen = EXT_RECORDED_ALLOCATED |
550 ((elen + inode->i_sb->s_blocksize - 1) & 546 ((elen + inode->i_sb->s_blocksize - 1) &
551 ~(inode->i_sb->s_blocksize - 1)); 547 ~(inode->i_sb->s_blocksize - 1));
552 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1); 548 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
553 } 549 }
554 brelse(prev_epos.bh); 550 brelse(prev_epos.bh);
@@ -570,8 +566,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
570 startnum = 1; 566 startnum = 1;
571 } else { 567 } else {
572 /* Create a fake extent when there's not one */ 568 /* Create a fake extent when there's not one */
573 memset(&laarr[0].extLocation, 0x00, 569 memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
574 sizeof(kernel_lb_addr));
575 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; 570 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
576 /* Will udf_extend_file() create real extent from a fake one? */ 571 /* Will udf_extend_file() create real extent from a fake one? */
577 startnum = (offset > 0); 572 startnum = (offset > 0);
@@ -591,16 +586,14 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
591 offset = 0; 586 offset = 0;
592 count += ret; 587 count += ret;
593 /* We are not covered by a preallocated extent? */ 588 /* We are not covered by a preallocated extent? */
594 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != 589 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
595 EXT_NOT_RECORDED_ALLOCATED) {
596 /* Is there any real extent? - otherwise we overwrite 590 /* Is there any real extent? - otherwise we overwrite
597 * the fake one... */ 591 * the fake one... */
598 if (count) 592 if (count)
599 c = !c; 593 c = !c;
600 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 594 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
601 inode->i_sb->s_blocksize; 595 inode->i_sb->s_blocksize;
602 memset(&laarr[c].extLocation, 0x00, 596 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
603 sizeof(kernel_lb_addr));
604 count++; 597 count++;
605 endnum++; 598 endnum++;
606 } 599 }
@@ -618,8 +611,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
618 } 611 }
619 612
620 /* if the current block is located in an extent, read the next extent */ 613 /* if the current block is located in an extent, read the next extent */
621 if ((etype = 614 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) {
622 udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) {
623 laarr[c + 1].extLength = (etype << 30) | elen; 615 laarr[c + 1].extLength = (etype << 30) | elen;
624 laarr[c + 1].extLocation = eloc; 616 laarr[c + 1].extLocation = eloc;
625 count++; 617 count++;
@@ -631,24 +623,21 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
631 } 623 }
632 624
633 /* if the current extent is not recorded but allocated, get the 625 /* if the current extent is not recorded but allocated, get the
634 block in the extent corresponding to the requested block */ 626 * block in the extent corresponding to the requested block */
635 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 627 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
636 newblocknum = laarr[c].extLocation.logicalBlockNum + offset; 628 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
637 else { /* otherwise, allocate a new block */ 629 } else { /* otherwise, allocate a new block */
638
639 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block) 630 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
640 goal = UDF_I_NEXT_ALLOC_GOAL(inode); 631 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
641 632
642 if (!goal) { 633 if (!goal) {
643 if (!(goal = pgoal)) 634 if (!(goal = pgoal))
644 goal = 635 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
645 UDF_I_LOCATION(inode).logicalBlockNum + 1;
646 } 636 }
647 637
648 if (!(newblocknum = udf_new_block(inode->i_sb, inode, 638 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
649 UDF_I_LOCATION(inode). 639 UDF_I_LOCATION(inode).partitionReferenceNum,
650 partitionReferenceNum, goal, 640 goal, err))) {
651 err))) {
652 brelse(prev_epos.bh); 641 brelse(prev_epos.bh);
653 *err = -ENOSPC; 642 *err = -ENOSPC;
654 return NULL; 643 return NULL;
@@ -657,8 +646,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
657 } 646 }
658 647
659 /* if the extent the requsted block is located in contains multiple blocks, 648 /* if the extent the requsted block is located in contains multiple blocks,
660 split the extent into at most three extents. blocks prior to requested 649 * split the extent into at most three extents. blocks prior to requested
661 block, requested block, and blocks after requested block */ 650 * block, requested block, and blocks after requested block */
662 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); 651 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
663 652
664#ifdef UDF_PREALLOCATE 653#ifdef UDF_PREALLOCATE
@@ -670,15 +659,14 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
670 udf_merge_extents(inode, laarr, &endnum); 659 udf_merge_extents(inode, laarr, &endnum);
671 660
672 /* write back the new extents, inserting new extents if the new number 661 /* write back the new extents, inserting new extents if the new number
673 of extents is greater than the old number, and deleting extents if 662 * of extents is greater than the old number, and deleting extents if
674 the new number of extents is less than the old number */ 663 * the new number of extents is less than the old number */
675 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); 664 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
676 665
677 brelse(prev_epos.bh); 666 brelse(prev_epos.bh);
678 667
679 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum, 668 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
680 UDF_I_LOCATION(inode). 669 UDF_I_LOCATION(inode).partitionReferenceNum, 0))) {
681 partitionReferenceNum, 0))) {
682 return NULL; 670 return NULL;
683 } 671 }
684 *phys = newblock; 672 *phys = newblock;
@@ -692,6 +680,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
692 udf_sync_inode(inode); 680 udf_sync_inode(inode);
693 else 681 else
694 mark_inode_dirty(inode); 682 mark_inode_dirty(inode);
683
695 return result; 684 return result;
696} 685}
697 686
@@ -701,16 +690,15 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
701 int *endnum) 690 int *endnum)
702{ 691{
703 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || 692 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
704 (laarr[*c].extLength >> 30) == 693 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
705 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
706 int curr = *c; 694 int curr = *c;
707 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + 695 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
708 inode->i_sb->s_blocksize - 696 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
709 1) >> inode->i_sb->s_blocksize_bits;
710 int8_t etype = (laarr[curr].extLength >> 30); 697 int8_t etype = (laarr[curr].extLength >> 30);
711 698
712 if (blen == 1) ; 699 if (blen == 1) {
713 else if (!offset || blen == offset + 1) { 700 ;
701 } else if (!offset || blen == offset + 1) {
714 laarr[curr + 2] = laarr[curr + 1]; 702 laarr[curr + 2] = laarr[curr + 1];
715 laarr[curr + 1] = laarr[curr]; 703 laarr[curr + 1] = laarr[curr];
716 } else { 704 } else {
@@ -720,20 +708,15 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
720 708
721 if (offset) { 709 if (offset) {
722 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 710 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
723 udf_free_blocks(inode->i_sb, inode, 711 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
724 laarr[curr].extLocation, 0, 712 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
725 offset); 713 (offset << inode->i_sb->s_blocksize_bits);
726 laarr[curr].extLength =
727 EXT_NOT_RECORDED_NOT_ALLOCATED | (offset <<
728 inode->
729 i_sb->
730 s_blocksize_bits);
731 laarr[curr].extLocation.logicalBlockNum = 0; 714 laarr[curr].extLocation.logicalBlockNum = 0;
732 laarr[curr].extLocation.partitionReferenceNum = 715 laarr[curr].extLocation.partitionReferenceNum = 0;
733 0; 716 } else {
734 } else
735 laarr[curr].extLength = (etype << 30) | 717 laarr[curr].extLength = (etype << 30) |
736 (offset << inode->i_sb->s_blocksize_bits); 718 (offset << inode->i_sb->s_blocksize_bits);
719 }
737 curr++; 720 curr++;
738 (*c)++; 721 (*c)++;
739 (*endnum)++; 722 (*endnum)++;
@@ -742,18 +725,16 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
742 laarr[curr].extLocation.logicalBlockNum = newblocknum; 725 laarr[curr].extLocation.logicalBlockNum = newblocknum;
743 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 726 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
744 laarr[curr].extLocation.partitionReferenceNum = 727 laarr[curr].extLocation.partitionReferenceNum =
745 UDF_I_LOCATION(inode).partitionReferenceNum; 728 UDF_I_LOCATION(inode).partitionReferenceNum;
746 laarr[curr].extLength = EXT_RECORDED_ALLOCATED | 729 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
747 inode->i_sb->s_blocksize; 730 inode->i_sb->s_blocksize;
748 curr++; 731 curr++;
749 732
750 if (blen != offset + 1) { 733 if (blen != offset + 1) {
751 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 734 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
752 laarr[curr].extLocation.logicalBlockNum += 735 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
753 (offset + 1); 736 laarr[curr].extLength = (etype << 30) |
754 laarr[curr].extLength = 737 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
755 (etype << 30) | ((blen - (offset + 1)) << inode->
756 i_sb->s_blocksize_bits);
757 curr++; 738 curr++;
758 (*endnum)++; 739 (*endnum)++;
759 } 740 }
@@ -772,90 +753,69 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
772 else 753 else
773 start = c; 754 start = c;
774 } else { 755 } else {
775 if ((laarr[c + 1].extLength >> 30) == 756 if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
776 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
777 start = c + 1; 757 start = c + 1;
778 length = currlength = 758 length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
779 (((laarr[c + 1]. 759 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
780 extLength & UDF_EXTENT_LENGTH_MASK) + 760 } else {
781 inode->i_sb->s_blocksize -
782 1) >> inode->i_sb->s_blocksize_bits);
783 } else
784 start = c; 761 start = c;
762 }
785 } 763 }
786 764
787 for (i = start + 1; i <= *endnum; i++) { 765 for (i = start + 1; i <= *endnum; i++) {
788 if (i == *endnum) { 766 if (i == *endnum) {
789 if (lastblock) 767 if (lastblock)
790 length += UDF_DEFAULT_PREALLOC_BLOCKS; 768 length += UDF_DEFAULT_PREALLOC_BLOCKS;
791 } else if ((laarr[i].extLength >> 30) == 769 } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
792 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 770 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
793 length += 771 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
794 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 772 } else {
795 inode->i_sb->s_blocksize -
796 1) >> inode->i_sb->s_blocksize_bits);
797 else
798 break; 773 break;
774 }
799 } 775 }
800 776
801 if (length) { 777 if (length) {
802 int next = laarr[start].extLocation.logicalBlockNum + 778 int next = laarr[start].extLocation.logicalBlockNum +
803 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + 779 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
804 inode->i_sb->s_blocksize - 780 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
805 1) >> inode->i_sb->s_blocksize_bits);
806 int numalloc = udf_prealloc_blocks(inode->i_sb, inode, 781 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
807 laarr[start].extLocation. 782 laarr[start].extLocation.partitionReferenceNum,
808 partitionReferenceNum, 783 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
809 next, 784 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
810 (UDF_DEFAULT_PREALLOC_BLOCKS 785 if (numalloc) {
811 > 786 if (start == (c + 1)) {
812 length ? length :
813 UDF_DEFAULT_PREALLOC_BLOCKS)
814 - currlength);
815
816 if (numalloc) {
817 if (start == (c + 1))
818 laarr[start].extLength += 787 laarr[start].extLength +=
819 (numalloc << inode->i_sb->s_blocksize_bits); 788 (numalloc << inode->i_sb->s_blocksize_bits);
820 else { 789 } else {
821 memmove(&laarr[c + 2], &laarr[c + 1], 790 memmove(&laarr[c + 2], &laarr[c + 1],
822 sizeof(long_ad) * (*endnum - (c + 1))); 791 sizeof(long_ad) * (*endnum - (c + 1)));
823 (*endnum)++; 792 (*endnum)++;
824 laarr[c + 1].extLocation.logicalBlockNum = next; 793 laarr[c + 1].extLocation.logicalBlockNum = next;
825 laarr[c + 1].extLocation.partitionReferenceNum = 794 laarr[c + 1].extLocation.partitionReferenceNum =
826 laarr[c].extLocation.partitionReferenceNum; 795 laarr[c].extLocation.partitionReferenceNum;
827 laarr[c + 1].extLength = 796 laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED |
828 EXT_NOT_RECORDED_ALLOCATED | (numalloc << 797 (numalloc << inode->i_sb->s_blocksize_bits);
829 inode->i_sb->
830 s_blocksize_bits);
831 start = c + 1; 798 start = c + 1;
832 } 799 }
833 800
834 for (i = start + 1; numalloc && i < *endnum; i++) { 801 for (i = start + 1; numalloc && i < *endnum; i++) {
835 int elen = 802 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
836 ((laarr[i]. 803 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
837 extLength & UDF_EXTENT_LENGTH_MASK) +
838 inode->i_sb->s_blocksize -
839 1) >> inode->i_sb->s_blocksize_bits;
840 804
841 if (elen > numalloc) { 805 if (elen > numalloc) {
842 laarr[i].extLength -= 806 laarr[i].extLength -=
843 (numalloc << inode->i_sb-> 807 (numalloc << inode->i_sb->s_blocksize_bits);
844 s_blocksize_bits);
845 numalloc = 0; 808 numalloc = 0;
846 } else { 809 } else {
847 numalloc -= elen; 810 numalloc -= elen;
848 if (*endnum > (i + 1)) 811 if (*endnum > (i + 1))
849 memmove(&laarr[i], 812 memmove(&laarr[i], &laarr[i + 1],
850 &laarr[i + 1], 813 sizeof(long_ad) * (*endnum - (i + 1)));
851 sizeof(long_ad) *
852 (*endnum - (i + 1)));
853 i--; 814 i--;
854 (*endnum)--; 815 (*endnum)--;
855 } 816 }
856 } 817 }
857 UDF_I_LENEXTENTS(inode) += 818 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
858 numalloc << inode->i_sb->s_blocksize_bits;
859 } 819 }
860 } 820 }
861} 821}
@@ -867,119 +827,68 @@ static void udf_merge_extents(struct inode *inode,
867 int i; 827 int i;
868 828
869 for (i = 0; i < (*endnum - 1); i++) { 829 for (i = 0; i < (*endnum - 1); i++) {
870 if ((laarr[i].extLength >> 30) == 830 if ((laarr[i].extLength >> 30) == (laarr[i + 1].extLength >> 30)) {
871 (laarr[i + 1].extLength >> 30)) { 831 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
872 if (((laarr[i].extLength >> 30) == 832 ((laarr[i + 1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
873 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
874 ||
875 ((laarr[i + 1].extLocation.logicalBlockNum -
876 laarr[i].extLocation.logicalBlockNum) ==
877 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 833 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
878 inode->i_sb->s_blocksize - 834 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) {
879 1) >> inode->i_sb->s_blocksize_bits))) { 835 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
880 if (((laarr[i]. 836 (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
881 extLength & UDF_EXTENT_LENGTH_MASK) + 837 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
882 (laarr[i + 1]. 838 laarr[i + 1].extLength = (laarr[i + 1].extLength -
883 extLength & UDF_EXTENT_LENGTH_MASK) + 839 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
884 inode->i_sb->s_blocksize - 840 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1);
885 1) & ~UDF_EXTENT_LENGTH_MASK) { 841 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
886 laarr[i + 1].extLength = 842 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
887 (laarr[i + 1].extLength - 843 laarr[i + 1].extLocation.logicalBlockNum =
888 (laarr[i]. 844 laarr[i].extLocation.logicalBlockNum +
889 extLength & 845 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
890 UDF_EXTENT_LENGTH_MASK) + 846 inode->i_sb->s_blocksize_bits);
891 UDF_EXTENT_LENGTH_MASK) & ~(inode->
892 i_sb->
893 s_blocksize
894 - 1);
895 laarr[i].extLength =
896 (laarr[i].
897 extLength & UDF_EXTENT_FLAG_MASK) +
898 (UDF_EXTENT_LENGTH_MASK + 1) -
899 inode->i_sb->s_blocksize;
900 laarr[i +
901 1].extLocation.logicalBlockNum =
902 laarr[i].extLocation.
903 logicalBlockNum +
904 ((laarr[i].
905 extLength &
906 UDF_EXTENT_LENGTH_MASK) >> inode->
907 i_sb->s_blocksize_bits);
908 } else { 847 } else {
909 laarr[i].extLength = 848 laarr[i].extLength = laarr[i + 1].extLength +
910 laarr[i + 1].extLength + 849 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
911 (((laarr[i]. 850 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1));
912 extLength &
913 UDF_EXTENT_LENGTH_MASK) +
914 inode->i_sb->s_blocksize -
915 1) & ~(inode->i_sb->s_blocksize -
916 1));
917 if (*endnum > (i + 2)) 851 if (*endnum > (i + 2))
918 memmove(&laarr[i + 1], 852 memmove(&laarr[i + 1], &laarr[i + 2],
919 &laarr[i + 2], 853 sizeof(long_ad) * (*endnum - (i + 2)));
920 sizeof(long_ad) *
921 (*endnum - (i + 2)));
922 i--; 854 i--;
923 (*endnum)--; 855 (*endnum)--;
924 } 856 }
925 } 857 }
926 } else 858 } else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
927 if (((laarr[i].extLength >> 30) == 859 ((laarr[i + 1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
928 (EXT_NOT_RECORDED_ALLOCATED >> 30)) 860 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
929 && ((laarr[i + 1].extLength >> 30) == 861 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
930 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { 862 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
931 udf_free_blocks(inode->i_sb, inode,
932 laarr[i].extLocation, 0,
933 ((laarr[i].
934 extLength & UDF_EXTENT_LENGTH_MASK) +
935 inode->i_sb->s_blocksize -
936 1) >> inode->i_sb->s_blocksize_bits);
937 laarr[i].extLocation.logicalBlockNum = 0; 863 laarr[i].extLocation.logicalBlockNum = 0;
938 laarr[i].extLocation.partitionReferenceNum = 0; 864 laarr[i].extLocation.partitionReferenceNum = 0;
939 865
940 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 866 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
941 (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + 867 (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
942 inode->i_sb->s_blocksize - 868 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
943 1) & ~UDF_EXTENT_LENGTH_MASK) { 869 laarr[i + 1].extLength = (laarr[i + 1].extLength -
944 laarr[i + 1].extLength = 870 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
945 (laarr[i + 1].extLength - 871 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1);
946 (laarr[i]. 872 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
947 extLength & UDF_EXTENT_LENGTH_MASK) + 873 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
948 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->
949 s_blocksize -
950 1);
951 laarr[i].extLength =
952 (laarr[i].
953 extLength & UDF_EXTENT_FLAG_MASK) +
954 (UDF_EXTENT_LENGTH_MASK + 1) -
955 inode->i_sb->s_blocksize;
956 } else { 874 } else {
957 laarr[i].extLength = laarr[i + 1].extLength + 875 laarr[i].extLength = laarr[i + 1].extLength +
958 (((laarr[i]. 876 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
959 extLength & UDF_EXTENT_LENGTH_MASK) + 877 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1));
960 inode->i_sb->s_blocksize -
961 1) & ~(inode->i_sb->s_blocksize - 1));
962 if (*endnum > (i + 2)) 878 if (*endnum > (i + 2))
963 memmove(&laarr[i + 1], &laarr[i + 2], 879 memmove(&laarr[i + 1], &laarr[i + 2],
964 sizeof(long_ad) * (*endnum - 880 sizeof(long_ad) * (*endnum - (i + 2)));
965 (i + 2)));
966 i--; 881 i--;
967 (*endnum)--; 882 (*endnum)--;
968 } 883 }
969 } else if ((laarr[i].extLength >> 30) == 884 } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
970 (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 885 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
971 udf_free_blocks(inode->i_sb, inode, 886 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
972 laarr[i].extLocation, 0, 887 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
973 ((laarr[i].
974 extLength & UDF_EXTENT_LENGTH_MASK) +
975 inode->i_sb->s_blocksize -
976 1) >> inode->i_sb->s_blocksize_bits);
977 laarr[i].extLocation.logicalBlockNum = 0; 888 laarr[i].extLocation.logicalBlockNum = 0;
978 laarr[i].extLocation.partitionReferenceNum = 0; 889 laarr[i].extLocation.partitionReferenceNum = 0;
979 laarr[i].extLength = 890 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
980 (laarr[i]. 891 EXT_NOT_RECORDED_NOT_ALLOCATED;
981 extLength & UDF_EXTENT_LENGTH_MASK) |
982 EXT_NOT_RECORDED_NOT_ALLOCATED;
983 } 892 }
984 } 893 }
985} 894}
@@ -1025,10 +934,13 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
1025 934
1026 if (buffer_uptodate(bh)) 935 if (buffer_uptodate(bh))
1027 return bh; 936 return bh;
937
1028 ll_rw_block(READ, 1, &bh); 938 ll_rw_block(READ, 1, &bh);
939
1029 wait_on_buffer(bh); 940 wait_on_buffer(bh);
1030 if (buffer_uptodate(bh)) 941 if (buffer_uptodate(bh))
1031 return bh; 942 return bh;
943
1032 brelse(bh); 944 brelse(bh);
1033 *err = -EIO; 945 *err = -EIO;
1034 return NULL; 946 return NULL;
@@ -1047,26 +959,24 @@ void udf_truncate(struct inode *inode)
1047 959
1048 lock_kernel(); 960 lock_kernel();
1049 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 961 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
1050 if (inode->i_sb->s_blocksize < 962 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
1051 (udf_file_entry_alloc_offset(inode) + inode->i_size)) { 963 inode->i_size)) {
1052 udf_expand_file_adinicb(inode, inode->i_size, &err); 964 udf_expand_file_adinicb(inode, inode->i_size, &err);
1053 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { 965 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
1054 inode->i_size = UDF_I_LENALLOC(inode); 966 inode->i_size = UDF_I_LENALLOC(inode);
1055 unlock_kernel(); 967 unlock_kernel();
1056 return; 968 return;
1057 } else 969 } else {
1058 udf_truncate_extents(inode); 970 udf_truncate_extents(inode);
971 }
1059 } else { 972 } else {
1060 offset = inode->i_size & (inode->i_sb->s_blocksize - 1); 973 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1061 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + 974 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00,
1062 offset, 0x00, 975 inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
1063 inode->i_sb->s_blocksize - offset -
1064 udf_file_entry_alloc_offset(inode));
1065 UDF_I_LENALLOC(inode) = inode->i_size; 976 UDF_I_LENALLOC(inode) = inode->i_size;
1066 } 977 }
1067 } else { 978 } else {
1068 block_truncate_page(inode->i_mapping, inode->i_size, 979 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
1069 udf_get_block);
1070 udf_truncate_extents(inode); 980 udf_truncate_extents(inode);
1071 } 981 }
1072 982
@@ -1097,7 +1007,6 @@ static void __udf_read_inode(struct inode *inode)
1097 * i_op = NULL; 1007 * i_op = NULL;
1098 */ 1008 */
1099 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident); 1009 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
1100
1101 if (!bh) { 1010 if (!bh) {
1102 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", 1011 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1103 inode->i_ino); 1012 inode->i_ino);
@@ -1107,8 +1016,7 @@ static void __udf_read_inode(struct inode *inode)
1107 1016
1108 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && 1017 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1109 ident != TAG_IDENT_USE) { 1018 ident != TAG_IDENT_USE) {
1110 printk(KERN_ERR 1019 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1111 "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1112 inode->i_ino, ident); 1020 inode->i_ino, ident);
1113 brelse(bh); 1021 brelse(bh);
1114 make_bad_inode(inode); 1022 make_bad_inode(inode);
@@ -1121,9 +1029,7 @@ static void __udf_read_inode(struct inode *inode)
1121 struct buffer_head *ibh = NULL, *nbh = NULL; 1029 struct buffer_head *ibh = NULL, *nbh = NULL;
1122 struct indirectEntry *ie; 1030 struct indirectEntry *ie;
1123 1031
1124 ibh = 1032 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
1125 udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1,
1126 &ident);
1127 if (ident == TAG_IDENT_IE) { 1033 if (ident == TAG_IDENT_IE) {
1128 if (ibh) { 1034 if (ibh) {
1129 kernel_lb_addr loc; 1035 kernel_lb_addr loc;
@@ -1132,13 +1038,10 @@ static void __udf_read_inode(struct inode *inode)
1132 loc = lelb_to_cpu(ie->indirectICB.extLocation); 1038 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1133 1039
1134 if (ie->indirectICB.extLength && 1040 if (ie->indirectICB.extLength &&
1135 (nbh = 1041 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident))) {
1136 udf_read_ptagged(inode->i_sb, loc, 0, 1042 if (ident == TAG_IDENT_FE ||
1137 &ident))) { 1043 ident == TAG_IDENT_EFE) {
1138 if (ident == TAG_IDENT_FE 1044 memcpy(&UDF_I_LOCATION(inode), &loc,
1139 || ident == TAG_IDENT_EFE) {
1140 memcpy(&UDF_I_LOCATION(inode),
1141 &loc,
1142 sizeof(kernel_lb_addr)); 1045 sizeof(kernel_lb_addr));
1143 brelse(bh); 1046 brelse(bh);
1144 brelse(ibh); 1047 brelse(ibh);
@@ -1149,11 +1052,13 @@ static void __udf_read_inode(struct inode *inode)
1149 brelse(nbh); 1052 brelse(nbh);
1150 brelse(ibh); 1053 brelse(ibh);
1151 } 1054 }
1152 } else 1055 } else {
1153 brelse(ibh); 1056 brelse(ibh);
1057 }
1154 } 1058 }
1155 } else 1059 } else {
1156 brelse(ibh); 1060 brelse(ibh);
1061 }
1157 } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) { 1062 } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) {
1158 printk(KERN_ERR "udf: unsupported strategy type: %d\n", 1063 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1159 le16_to_cpu(fe->icbTag.strategyType)); 1064 le16_to_cpu(fe->icbTag.strategyType));
@@ -1179,11 +1084,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1179 1084
1180 if (le16_to_cpu(fe->icbTag.strategyType) == 4) 1085 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1181 UDF_I_STRAT4096(inode) = 0; 1086 UDF_I_STRAT4096(inode) = 0;
1182 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */ 1087 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1183 UDF_I_STRAT4096(inode) = 1; 1088 UDF_I_STRAT4096(inode) = 1;
1184 1089
1185 UDF_I_ALLOCTYPE(inode) = 1090 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1186 le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1187 UDF_I_UNIQUE(inode) = 0; 1091 UDF_I_UNIQUE(inode) = 0;
1188 UDF_I_LENEATTR(inode) = 0; 1092 UDF_I_LENEATTR(inode) = 0;
1189 UDF_I_LENEXTENTS(inode) = 0; 1093 UDF_I_LENEXTENTS(inode) = 0;
@@ -1193,23 +1097,16 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1193 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) { 1097 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) {
1194 UDF_I_EFE(inode) = 1; 1098 UDF_I_EFE(inode) = 1;
1195 UDF_I_USE(inode) = 0; 1099 UDF_I_USE(inode) = 0;
1196 if (udf_alloc_i_data 1100 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) {
1197 (inode,
1198 inode->i_sb->s_blocksize -
1199 sizeof(struct extendedFileEntry))) {
1200 make_bad_inode(inode); 1101 make_bad_inode(inode);
1201 return; 1102 return;
1202 } 1103 }
1203 memcpy(UDF_I_DATA(inode), 1104 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry),
1204 bh->b_data + sizeof(struct extendedFileEntry), 1105 inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1205 inode->i_sb->s_blocksize -
1206 sizeof(struct extendedFileEntry));
1207 } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) { 1106 } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) {
1208 UDF_I_EFE(inode) = 0; 1107 UDF_I_EFE(inode) = 0;
1209 UDF_I_USE(inode) = 0; 1108 UDF_I_USE(inode) = 0;
1210 if (udf_alloc_i_data 1109 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) {
1211 (inode,
1212 inode->i_sb->s_blocksize - sizeof(struct fileEntry))) {
1213 make_bad_inode(inode); 1110 make_bad_inode(inode);
1214 return; 1111 return;
1215 } 1112 }
@@ -1219,19 +1116,13 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1219 UDF_I_EFE(inode) = 0; 1116 UDF_I_EFE(inode) = 0;
1220 UDF_I_USE(inode) = 1; 1117 UDF_I_USE(inode) = 1;
1221 UDF_I_LENALLOC(inode) = 1118 UDF_I_LENALLOC(inode) =
1222 le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)-> 1119 le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1223 lengthAllocDescs); 1120 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) {
1224 if (udf_alloc_i_data
1225 (inode,
1226 inode->i_sb->s_blocksize -
1227 sizeof(struct unallocSpaceEntry))) {
1228 make_bad_inode(inode); 1121 make_bad_inode(inode);
1229 return; 1122 return;
1230 } 1123 }
1231 memcpy(UDF_I_DATA(inode), 1124 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry),
1232 bh->b_data + sizeof(struct unallocSpaceEntry), 1125 inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1233 inode->i_sb->s_blocksize -
1234 sizeof(struct unallocSpaceEntry));
1235 return; 1126 return;
1236 } 1127 }
1237 1128
@@ -1257,7 +1148,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1257 1148
1258 if (UDF_I_EFE(inode) == 0) { 1149 if (UDF_I_EFE(inode) == 0) {
1259 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << 1150 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1260 (inode->i_sb->s_blocksize_bits - 9); 1151 (inode->i_sb->s_blocksize_bits - 9);
1261 1152
1262 if (udf_stamp_to_time(&convtime, &convtime_usec, 1153 if (udf_stamp_to_time(&convtime, &convtime_usec,
1263 lets_to_cpu(fe->accessTime))) { 1154 lets_to_cpu(fe->accessTime))) {
@@ -1326,78 +1217,56 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1326 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID); 1217 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1327 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr); 1218 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1328 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs); 1219 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1329 offset = 1220 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1330 sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1331 } 1221 }
1332 1222
1333 switch (fe->icbTag.fileType) { 1223 switch (fe->icbTag.fileType) {
1334 case ICBTAG_FILE_TYPE_DIRECTORY: 1224 case ICBTAG_FILE_TYPE_DIRECTORY:
1335 { 1225 inode->i_op = &udf_dir_inode_operations;
1336 inode->i_op = &udf_dir_inode_operations; 1226 inode->i_fop = &udf_dir_operations;
1337 inode->i_fop = &udf_dir_operations; 1227 inode->i_mode |= S_IFDIR;
1338 inode->i_mode |= S_IFDIR; 1228 inc_nlink(inode);
1339 inc_nlink(inode); 1229 break;
1340 break;
1341 }
1342 case ICBTAG_FILE_TYPE_REALTIME: 1230 case ICBTAG_FILE_TYPE_REALTIME:
1343 case ICBTAG_FILE_TYPE_REGULAR: 1231 case ICBTAG_FILE_TYPE_REGULAR:
1344 case ICBTAG_FILE_TYPE_UNDEF: 1232 case ICBTAG_FILE_TYPE_UNDEF:
1345 { 1233 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1346 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 1234 inode->i_data.a_ops = &udf_adinicb_aops;
1347 inode->i_data.a_ops = &udf_adinicb_aops; 1235 else
1348 else 1236 inode->i_data.a_ops = &udf_aops;
1349 inode->i_data.a_ops = &udf_aops; 1237 inode->i_op = &udf_file_inode_operations;
1350 inode->i_op = &udf_file_inode_operations; 1238 inode->i_fop = &udf_file_operations;
1351 inode->i_fop = &udf_file_operations; 1239 inode->i_mode |= S_IFREG;
1352 inode->i_mode |= S_IFREG; 1240 break;
1353 break;
1354 }
1355 case ICBTAG_FILE_TYPE_BLOCK: 1241 case ICBTAG_FILE_TYPE_BLOCK:
1356 { 1242 inode->i_mode |= S_IFBLK;
1357 inode->i_mode |= S_IFBLK; 1243 break;
1358 break;
1359 }
1360 case ICBTAG_FILE_TYPE_CHAR: 1244 case ICBTAG_FILE_TYPE_CHAR:
1361 { 1245 inode->i_mode |= S_IFCHR;
1362 inode->i_mode |= S_IFCHR; 1246 break;
1363 break;
1364 }
1365 case ICBTAG_FILE_TYPE_FIFO: 1247 case ICBTAG_FILE_TYPE_FIFO:
1366 { 1248 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1367 init_special_inode(inode, inode->i_mode | S_IFIFO, 0); 1249 break;
1368 break;
1369 }
1370 case ICBTAG_FILE_TYPE_SOCKET: 1250 case ICBTAG_FILE_TYPE_SOCKET:
1371 { 1251 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1372 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); 1252 break;
1373 break;
1374 }
1375 case ICBTAG_FILE_TYPE_SYMLINK: 1253 case ICBTAG_FILE_TYPE_SYMLINK:
1376 { 1254 inode->i_data.a_ops = &udf_symlink_aops;
1377 inode->i_data.a_ops = &udf_symlink_aops; 1255 inode->i_op = &page_symlink_inode_operations;
1378 inode->i_op = &page_symlink_inode_operations; 1256 inode->i_mode = S_IFLNK | S_IRWXUGO;
1379 inode->i_mode = S_IFLNK | S_IRWXUGO; 1257 break;
1380 break;
1381 }
1382 default: 1258 default:
1383 { 1259 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1384 printk(KERN_ERR 1260 inode->i_ino, fe->icbTag.fileType);
1385 "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n", 1261 make_bad_inode(inode);
1386 inode->i_ino, fe->icbTag.fileType); 1262 return;
1387 make_bad_inode(inode);
1388 return;
1389 }
1390 } 1263 }
1391 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 1264 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1392 struct deviceSpec *dsea = (struct deviceSpec *) 1265 struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1393 udf_get_extendedattr(inode, 12, 1);
1394
1395 if (dsea) { 1266 if (dsea) {
1396 init_special_inode(inode, inode->i_mode, 1267 init_special_inode(inode, inode->i_mode,
1397 MKDEV(le32_to_cpu 1268 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1398 (dsea->majorDeviceIdent), 1269 le32_to_cpu(dsea->minorDeviceIdent)));
1399 le32_to_cpu(dsea->
1400 minorDeviceIdent)));
1401 /* Developer ID ??? */ 1270 /* Developer ID ??? */
1402 } else { 1271 } else {
1403 make_bad_inode(inode); 1272 make_bad_inode(inode);
@@ -1410,8 +1279,7 @@ static int udf_alloc_i_data(struct inode *inode, size_t size)
1410 UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL); 1279 UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL);
1411 1280
1412 if (!UDF_I_DATA(inode)) { 1281 if (!UDF_I_DATA(inode)) {
1413 printk(KERN_ERR 1282 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) no free memory\n",
1414 "udf:udf_alloc_i_data (ino %ld) no free memory\n",
1415 inode->i_ino); 1283 inode->i_ino);
1416 return -ENOMEM; 1284 return -ENOMEM;
1417 } 1285 }
@@ -1428,12 +1296,12 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
1428 permissions = le32_to_cpu(fe->permissions); 1296 permissions = le32_to_cpu(fe->permissions);
1429 flags = le16_to_cpu(fe->icbTag.flags); 1297 flags = le16_to_cpu(fe->icbTag.flags);
1430 1298
1431 mode = ((permissions) & S_IRWXO) | 1299 mode = (( permissions ) & S_IRWXO) |
1432 ((permissions >> 2) & S_IRWXG) | 1300 (( permissions >> 2 ) & S_IRWXG) |
1433 ((permissions >> 4) & S_IRWXU) | 1301 (( permissions >> 4 ) & S_IRWXU) |
1434 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | 1302 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1435 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | 1303 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1436 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); 1304 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1437 1305
1438 return mode; 1306 return mode;
1439} 1307}
@@ -1456,9 +1324,11 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
1456int udf_write_inode(struct inode *inode, int sync) 1324int udf_write_inode(struct inode *inode, int sync)
1457{ 1325{
1458 int ret; 1326 int ret;
1327
1459 lock_kernel(); 1328 lock_kernel();
1460 ret = udf_update_inode(inode, sync); 1329 ret = udf_update_inode(inode, sync);
1461 unlock_kernel(); 1330 unlock_kernel();
1331
1462 return ret; 1332 return ret;
1463} 1333}
1464 1334
@@ -1479,10 +1349,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1479 kernel_timestamp cpu_time; 1349 kernel_timestamp cpu_time;
1480 int err = 0; 1350 int err = 0;
1481 1351
1482 bh = udf_tread(inode->i_sb, 1352 bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1483 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode),
1484 0));
1485
1486 if (!bh) { 1353 if (!bh) {
1487 udf_debug("bread failure\n"); 1354 udf_debug("bread failure\n");
1488 return -EIO; 1355 return -EIO;
@@ -1495,27 +1362,21 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1495 1362
1496 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) { 1363 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
1497 struct unallocSpaceEntry *use = 1364 struct unallocSpaceEntry *use =
1498 (struct unallocSpaceEntry *)bh->b_data; 1365 (struct unallocSpaceEntry *)bh->b_data;
1499 1366
1500 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1367 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1501 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), 1368 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode),
1502 UDF_I_DATA(inode), 1369 inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1503 inode->i_sb->s_blocksize - 1370 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) - sizeof(tag);
1504 sizeof(struct unallocSpaceEntry)); 1371 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1505 crclen =
1506 sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1507 sizeof(tag);
1508 use->descTag.tagLocation =
1509 cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1510 use->descTag.descCRCLength = cpu_to_le16(crclen); 1372 use->descTag.descCRCLength = cpu_to_le16(crclen);
1511 use->descTag.descCRC = 1373 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1512 cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1513 1374
1514 use->descTag.tagChecksum = 0; 1375 use->descTag.tagChecksum = 0;
1515 for (i = 0; i < 16; i++) 1376 for (i = 0; i < 16; i++) {
1516 if (i != 4) 1377 if (i != 4)
1517 use->descTag.tagChecksum += 1378 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1518 ((uint8_t *) & (use->descTag))[i]; 1379 }
1519 1380
1520 mark_buffer_dirty(bh); 1381 mark_buffer_dirty(bh);
1521 brelse(bh); 1382 brelse(bh);
@@ -1532,13 +1393,14 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1532 else 1393 else
1533 fe->gid = cpu_to_le32(inode->i_gid); 1394 fe->gid = cpu_to_le32(inode->i_gid);
1534 1395
1535 udfperms = ((inode->i_mode & S_IRWXO)) | 1396 udfperms = ((inode->i_mode & S_IRWXO) ) |
1536 ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4); 1397 ((inode->i_mode & S_IRWXG) << 2) |
1398 ((inode->i_mode & S_IRWXU) << 4);
1537 1399
1538 udfperms |= (le32_to_cpu(fe->permissions) & 1400 udfperms |= (le32_to_cpu(fe->permissions) &
1539 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | 1401 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1540 FE_PERM_G_DELETE | FE_PERM_G_CHATTR | 1402 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1541 FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); 1403 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1542 fe->permissions = cpu_to_le32(udfperms); 1404 fe->permissions = cpu_to_le32(udfperms);
1543 1405
1544 if (S_ISDIR(inode->i_mode)) 1406 if (S_ISDIR(inode->i_mode))
@@ -1550,22 +1412,20 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1550 1412
1551 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 1413 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1552 regid *eid; 1414 regid *eid;
1553 struct deviceSpec *dsea = (struct deviceSpec *) 1415 struct deviceSpec *dsea =
1554 udf_get_extendedattr(inode, 12, 1); 1416 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1555
1556 if (!dsea) { 1417 if (!dsea) {
1557 dsea = (struct deviceSpec *) 1418 dsea = (struct deviceSpec *)
1558 udf_add_extendedattr(inode, 1419 udf_add_extendedattr(inode,
1559 sizeof(struct deviceSpec) + 1420 sizeof(struct deviceSpec) +
1560 sizeof(regid), 12, 0x3); 1421 sizeof(regid), 12, 0x3);
1561 dsea->attrType = cpu_to_le32(12); 1422 dsea->attrType = cpu_to_le32(12);
1562 dsea->attrSubtype = 1; 1423 dsea->attrSubtype = 1;
1563 dsea->attrLength = 1424 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1564 cpu_to_le32(sizeof(struct deviceSpec) + 1425 sizeof(regid));
1565 sizeof(regid));
1566 dsea->impUseLength = cpu_to_le32(sizeof(regid)); 1426 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1567 } 1427 }
1568 eid = (regid *) dsea->impUse; 1428 eid = (regid *)dsea->impUse;
1569 memset(eid, 0, sizeof(regid)); 1429 memset(eid, 0, sizeof(regid));
1570 strcpy(eid->ident, UDF_ID_DEVELOPER); 1430 strcpy(eid->ident, UDF_ID_DEVELOPER);
1571 eid->identSuffix[0] = UDF_OS_CLASS_UNIX; 1431 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1577,10 +1437,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1577 if (UDF_I_EFE(inode) == 0) { 1437 if (UDF_I_EFE(inode) == 0) {
1578 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), 1438 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode),
1579 inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1439 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1580 fe->logicalBlocksRecorded = 1440 fe->logicalBlocksRecorded = cpu_to_le64(
1581 cpu_to_le64((inode->i_blocks + 1441 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1582 (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1442 (inode->i_sb->s_blocksize_bits - 9));
1583 1) >> (inode->i_sb->s_blocksize_bits - 9));
1584 1443
1585 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1444 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1586 fe->accessTime = cpu_to_lets(cpu_time); 1445 fe->accessTime = cpu_to_lets(cpu_time);
@@ -1598,19 +1457,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1598 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); 1457 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1599 crclen = sizeof(struct fileEntry); 1458 crclen = sizeof(struct fileEntry);
1600 } else { 1459 } else {
1601 memcpy(bh->b_data + sizeof(struct extendedFileEntry), 1460 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode),
1602 UDF_I_DATA(inode), 1461 inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1603 inode->i_sb->s_blocksize -
1604 sizeof(struct extendedFileEntry));
1605 efe->objectSize = cpu_to_le64(inode->i_size); 1462 efe->objectSize = cpu_to_le64(inode->i_size);
1606 efe->logicalBlocksRecorded = cpu_to_le64((inode->i_blocks + 1463 efe->logicalBlocksRecorded = cpu_to_le64(
1607 (1 << 1464 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1608 (inode->i_sb-> 1465 (inode->i_sb->s_blocksize_bits - 9));
1609 s_blocksize_bits -
1610 9)) -
1611 1) >> (inode->i_sb->
1612 s_blocksize_bits
1613 - 9));
1614 1466
1615 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec || 1467 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1616 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec && 1468 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
@@ -1671,13 +1523,13 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1671 else if (S_ISSOCK(inode->i_mode)) 1523 else if (S_ISSOCK(inode->i_mode))
1672 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; 1524 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1673 1525
1674 icbflags = UDF_I_ALLOCTYPE(inode) | 1526 icbflags = UDF_I_ALLOCTYPE(inode) |
1675 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | 1527 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1676 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | 1528 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1677 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | 1529 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1678 (le16_to_cpu(fe->icbTag.flags) & 1530 (le16_to_cpu(fe->icbTag.flags) &
1679 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | 1531 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1680 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); 1532 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1681 1533
1682 fe->icbTag.flags = cpu_to_le16(icbflags); 1534 fe->icbTag.flags = cpu_to_le16(icbflags);
1683 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 1535 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
@@ -1685,18 +1537,16 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1685 else 1537 else
1686 fe->descTag.descVersion = cpu_to_le16(2); 1538 fe->descTag.descVersion = cpu_to_le16(2);
1687 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); 1539 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1688 fe->descTag.tagLocation = 1540 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1689 cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1690 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag); 1541 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1691 fe->descTag.descCRCLength = cpu_to_le16(crclen); 1542 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1692 fe->descTag.descCRC = 1543 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1693 cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1694 1544
1695 fe->descTag.tagChecksum = 0; 1545 fe->descTag.tagChecksum = 0;
1696 for (i = 0; i < 16; i++) 1546 for (i = 0; i < 16; i++) {
1697 if (i != 4) 1547 if (i != 4)
1698 fe->descTag.tagChecksum += 1548 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1699 ((uint8_t *) & (fe->descTag))[i]; 1549 }
1700 1550
1701 /* write the data blocks */ 1551 /* write the data blocks */
1702 mark_buffer_dirty(bh); 1552 mark_buffer_dirty(bh);
@@ -1709,6 +1559,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1709 } 1559 }
1710 } 1560 }
1711 brelse(bh); 1561 brelse(bh);
1562
1712 return err; 1563 return err;
1713} 1564}
1714 1565
@@ -1729,8 +1580,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
1729 if (is_bad_inode(inode)) 1580 if (is_bad_inode(inode))
1730 goto out_iput; 1581 goto out_iput;
1731 1582
1732 if (ino.logicalBlockNum >= 1583 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1733 UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1734 udf_debug("block=%d, partition=%d out of range\n", 1584 udf_debug("block=%d, partition=%d out of range\n",
1735 ino.logicalBlockNum, ino.partitionReferenceNum); 1585 ino.logicalBlockNum, ino.partitionReferenceNum);
1736 make_bad_inode(inode); 1586 make_bad_inode(inode);
@@ -1739,7 +1589,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
1739 1589
1740 return inode; 1590 return inode;
1741 1591
1742 out_iput: 1592 out_iput:
1743 iput(inode); 1593 iput(inode);
1744 return NULL; 1594 return NULL;
1745} 1595}
@@ -1755,9 +1605,7 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1755 uint8_t *ptr; 1605 uint8_t *ptr;
1756 1606
1757 if (!epos->bh) 1607 if (!epos->bh)
1758 ptr = 1608 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1759 UDF_I_DATA(inode) + epos->offset -
1760 udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1761 else 1609 else
1762 ptr = epos->bh->b_data + epos->offset; 1610 ptr = epos->bh->b_data + epos->offset;
1763 1611
@@ -1774,18 +1622,13 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1774 int err, loffset; 1622 int err, loffset;
1775 kernel_lb_addr obloc = epos->block; 1623 kernel_lb_addr obloc = epos->block;
1776 1624
1777 if (! 1625 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1778 (epos->block.logicalBlockNum = 1626 obloc.partitionReferenceNum,
1779 udf_new_block(inode->i_sb, NULL, 1627 obloc.logicalBlockNum, &err))) {
1780 obloc.partitionReferenceNum,
1781 obloc.logicalBlockNum, &err))) {
1782 return -1; 1628 return -1;
1783 } 1629 }
1784 if (! 1630 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1785 (nbh = 1631 epos->block, 0)))) {
1786 udf_tgetblk(inode->i_sb,
1787 udf_get_lb_pblock(inode->i_sb, epos->block,
1788 0)))) {
1789 return -1; 1632 return -1;
1790 } 1633 }
1791 lock_buffer(nbh); 1634 lock_buffer(nbh);
@@ -1796,8 +1639,7 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1796 1639
1797 aed = (struct allocExtDesc *)(nbh->b_data); 1640 aed = (struct allocExtDesc *)(nbh->b_data);
1798 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) 1641 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1799 aed->previousAllocExtLocation = 1642 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1800 cpu_to_le32(obloc.logicalBlockNum);
1801 if (epos->offset + adsize > inode->i_sb->s_blocksize) { 1643 if (epos->offset + adsize > inode->i_sb->s_blocksize) {
1802 loffset = epos->offset; 1644 loffset = epos->offset;
1803 aed->lengthAllocDescs = cpu_to_le32(adsize); 1645 aed->lengthAllocDescs = cpu_to_le32(adsize);
@@ -1814,9 +1656,7 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1814 if (epos->bh) { 1656 if (epos->bh) {
1815 aed = (struct allocExtDesc *)epos->bh->b_data; 1657 aed = (struct allocExtDesc *)epos->bh->b_data;
1816 aed->lengthAllocDescs = 1658 aed->lengthAllocDescs =
1817 cpu_to_le32(le32_to_cpu 1659 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1818 (aed->lengthAllocDescs) +
1819 adsize);
1820 } else { 1660 } else {
1821 UDF_I_LENALLOC(inode) += adsize; 1661 UDF_I_LENALLOC(inode) += adsize;
1822 mark_inode_dirty(inode); 1662 mark_inode_dirty(inode);
@@ -1830,37 +1670,30 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1830 epos->block.logicalBlockNum, sizeof(tag)); 1670 epos->block.logicalBlockNum, sizeof(tag));
1831 switch (UDF_I_ALLOCTYPE(inode)) { 1671 switch (UDF_I_ALLOCTYPE(inode)) {
1832 case ICBTAG_FLAG_AD_SHORT: 1672 case ICBTAG_FLAG_AD_SHORT:
1833 { 1673 sad = (short_ad *)sptr;
1834 sad = (short_ad *) sptr; 1674 sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1835 sad->extLength = 1675 inode->i_sb->s_blocksize);
1836 cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | 1676 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1837 inode->i_sb->s_blocksize); 1677 break;
1838 sad->extPosition =
1839 cpu_to_le32(epos->block.logicalBlockNum);
1840 break;
1841 }
1842 case ICBTAG_FLAG_AD_LONG: 1678 case ICBTAG_FLAG_AD_LONG:
1843 { 1679 lad = (long_ad *)sptr;
1844 lad = (long_ad *) sptr; 1680 lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1845 lad->extLength = 1681 inode->i_sb->s_blocksize);
1846 cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | 1682 lad->extLocation = cpu_to_lelb(epos->block);
1847 inode->i_sb->s_blocksize); 1683 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1848 lad->extLocation = cpu_to_lelb(epos->block); 1684 break;
1849 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1850 break;
1851 }
1852 } 1685 }
1853 if (epos->bh) { 1686 if (epos->bh) {
1854 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) 1687 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1855 || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1688 UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1856 udf_update_tag(epos->bh->b_data, loffset); 1689 udf_update_tag(epos->bh->b_data, loffset);
1857 else 1690 else
1858 udf_update_tag(epos->bh->b_data, 1691 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1859 sizeof(struct allocExtDesc));
1860 mark_buffer_dirty_inode(epos->bh, inode); 1692 mark_buffer_dirty_inode(epos->bh, inode);
1861 brelse(epos->bh); 1693 brelse(epos->bh);
1862 } else 1694 } else {
1863 mark_inode_dirty(inode); 1695 mark_inode_dirty(inode);
1696 }
1864 epos->bh = nbh; 1697 epos->bh = nbh;
1865 } 1698 }
1866 1699
@@ -1872,14 +1705,11 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
1872 } else { 1705 } else {
1873 aed = (struct allocExtDesc *)epos->bh->b_data; 1706 aed = (struct allocExtDesc *)epos->bh->b_data;
1874 aed->lengthAllocDescs = 1707 aed->lengthAllocDescs =
1875 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 1708 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1876 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) 1709 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1877 || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1710 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1878 udf_update_tag(epos->bh->b_data,
1879 epos->offset + (inc ? 0 : adsize));
1880 else 1711 else
1881 udf_update_tag(epos->bh->b_data, 1712 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1882 sizeof(struct allocExtDesc));
1883 mark_buffer_dirty_inode(epos->bh, inode); 1713 mark_buffer_dirty_inode(epos->bh, inode);
1884 } 1714 }
1885 1715
@@ -1891,51 +1721,47 @@ int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
1891{ 1721{
1892 int adsize; 1722 int adsize;
1893 uint8_t *ptr; 1723 uint8_t *ptr;
1724 short_ad *sad;
1725 long_ad *lad;
1894 1726
1895 if (!epos->bh) 1727 if (!epos->bh)
1896 ptr = 1728 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1897 UDF_I_DATA(inode) + epos->offset -
1898 udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1899 else 1729 else
1900 ptr = epos->bh->b_data + epos->offset; 1730 ptr = epos->bh->b_data + epos->offset;
1901 1731
1902 switch (UDF_I_ALLOCTYPE(inode)) { 1732 switch (UDF_I_ALLOCTYPE(inode)) {
1903 case ICBTAG_FLAG_AD_SHORT: 1733 case ICBTAG_FLAG_AD_SHORT:
1904 { 1734 sad = (short_ad *)ptr;
1905 short_ad *sad = (short_ad *) ptr; 1735 sad->extLength = cpu_to_le32(elen);
1906 sad->extLength = cpu_to_le32(elen); 1736 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1907 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum); 1737 adsize = sizeof(short_ad);
1908 adsize = sizeof(short_ad); 1738 break;
1909 break;
1910 }
1911 case ICBTAG_FLAG_AD_LONG: 1739 case ICBTAG_FLAG_AD_LONG:
1912 { 1740 lad = (long_ad *)ptr;
1913 long_ad *lad = (long_ad *) ptr; 1741 lad->extLength = cpu_to_le32(elen);
1914 lad->extLength = cpu_to_le32(elen); 1742 lad->extLocation = cpu_to_lelb(eloc);
1915 lad->extLocation = cpu_to_lelb(eloc); 1743 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1916 memset(lad->impUse, 0x00, sizeof(lad->impUse)); 1744 adsize = sizeof(long_ad);
1917 adsize = sizeof(long_ad); 1745 break;
1918 break;
1919 }
1920 default: 1746 default:
1921 return -1; 1747 return -1;
1922 } 1748 }
1923 1749
1924 if (epos->bh) { 1750 if (epos->bh) {
1925 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) 1751 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1926 || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) { 1752 UDF_SB_UDFREV(inode->i_sb) >= 0x0201) {
1927 struct allocExtDesc *aed = 1753 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1928 (struct allocExtDesc *)epos->bh->b_data;
1929 udf_update_tag(epos->bh->b_data, 1754 udf_update_tag(epos->bh->b_data,
1930 le32_to_cpu(aed->lengthAllocDescs) + 1755 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1931 sizeof(struct allocExtDesc));
1932 } 1756 }
1933 mark_buffer_dirty_inode(epos->bh, inode); 1757 mark_buffer_dirty_inode(epos->bh, inode);
1934 } else 1758 } else {
1935 mark_inode_dirty(inode); 1759 mark_inode_dirty(inode);
1760 }
1936 1761
1937 if (inc) 1762 if (inc)
1938 epos->offset += adsize; 1763 epos->offset += adsize;
1764
1939 return (elen >> 30); 1765 return (elen >> 30);
1940} 1766}
1941 1767
@@ -1949,14 +1775,9 @@ int8_t udf_next_aext(struct inode * inode, struct extent_position * epos,
1949 epos->block = *eloc; 1775 epos->block = *eloc;
1950 epos->offset = sizeof(struct allocExtDesc); 1776 epos->offset = sizeof(struct allocExtDesc);
1951 brelse(epos->bh); 1777 brelse(epos->bh);
1952 if (! 1778 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0)))) {
1953 (epos->bh =
1954 udf_tread(inode->i_sb,
1955 udf_get_lb_pblock(inode->i_sb, epos->block,
1956 0)))) {
1957 udf_debug("reading block %d failed!\n", 1779 udf_debug("reading block %d failed!\n",
1958 udf_get_lb_pblock(inode->i_sb, epos->block, 1780 udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1959 0));
1960 return -1; 1781 return -1;
1961 } 1782 }
1962 } 1783 }
@@ -1970,75 +1791,49 @@ int8_t udf_current_aext(struct inode * inode, struct extent_position * epos,
1970 int alen; 1791 int alen;
1971 int8_t etype; 1792 int8_t etype;
1972 uint8_t *ptr; 1793 uint8_t *ptr;
1794 short_ad *sad;
1795 long_ad *lad;
1796
1973 1797
1974 if (!epos->bh) { 1798 if (!epos->bh) {
1975 if (!epos->offset) 1799 if (!epos->offset)
1976 epos->offset = udf_file_entry_alloc_offset(inode); 1800 epos->offset = udf_file_entry_alloc_offset(inode);
1977 ptr = 1801 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1978 UDF_I_DATA(inode) + epos->offset - 1802 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1979 udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1980 alen =
1981 udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1982 } else { 1803 } else {
1983 if (!epos->offset) 1804 if (!epos->offset)
1984 epos->offset = sizeof(struct allocExtDesc); 1805 epos->offset = sizeof(struct allocExtDesc);
1985 ptr = epos->bh->b_data + epos->offset; 1806 ptr = epos->bh->b_data + epos->offset;
1986 alen = 1807 alen = sizeof(struct allocExtDesc) +
1987 sizeof(struct allocExtDesc) + 1808 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1988 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
1989 lengthAllocDescs);
1990 } 1809 }
1991 1810
1992 switch (UDF_I_ALLOCTYPE(inode)) { 1811 switch (UDF_I_ALLOCTYPE(inode)) {
1993 case ICBTAG_FLAG_AD_SHORT: 1812 case ICBTAG_FLAG_AD_SHORT:
1994 { 1813 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1995 short_ad *sad; 1814 return -1;
1996 1815 etype = le32_to_cpu(sad->extLength) >> 30;
1997 if (! 1816 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1998 (sad = 1817 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1999 udf_get_fileshortad(ptr, alen, &epos->offset, 1818 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
2000 inc))) 1819 break;
2001 return -1;
2002
2003 etype = le32_to_cpu(sad->extLength) >> 30;
2004 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
2005 eloc->partitionReferenceNum =
2006 UDF_I_LOCATION(inode).partitionReferenceNum;
2007 *elen =
2008 le32_to_cpu(sad->
2009 extLength) & UDF_EXTENT_LENGTH_MASK;
2010 break;
2011 }
2012 case ICBTAG_FLAG_AD_LONG: 1820 case ICBTAG_FLAG_AD_LONG:
2013 { 1821 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
2014 long_ad *lad;
2015
2016 if (!
2017 (lad =
2018 udf_get_filelongad(ptr, alen, &epos->offset, inc)))
2019 return -1;
2020
2021 etype = le32_to_cpu(lad->extLength) >> 30;
2022 *eloc = lelb_to_cpu(lad->extLocation);
2023 *elen =
2024 le32_to_cpu(lad->
2025 extLength) & UDF_EXTENT_LENGTH_MASK;
2026 break;
2027 }
2028 default:
2029 {
2030 udf_debug("alloc_type = %d unsupported\n",
2031 UDF_I_ALLOCTYPE(inode));
2032 return -1; 1822 return -1;
2033 } 1823 etype = le32_to_cpu(lad->extLength) >> 30;
1824 *eloc = lelb_to_cpu(lad->extLocation);
1825 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1826 break;
1827 default:
1828 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1829 return -1;
2034 } 1830 }
2035 1831
2036 return etype; 1832 return etype;
2037} 1833}
2038 1834
2039static int8_t 1835static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
2040udf_insert_aext(struct inode *inode, struct extent_position epos, 1836 kernel_lb_addr neloc, uint32_t nelen)
2041 kernel_lb_addr neloc, uint32_t nelen)
2042{ 1837{
2043 kernel_lb_addr oeloc; 1838 kernel_lb_addr oeloc;
2044 uint32_t oelen; 1839 uint32_t oelen;
@@ -2049,12 +1844,12 @@ udf_insert_aext(struct inode *inode, struct extent_position epos,
2049 1844
2050 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { 1845 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
2051 udf_write_aext(inode, &epos, neloc, nelen, 1); 1846 udf_write_aext(inode, &epos, neloc, nelen, 1);
2052
2053 neloc = oeloc; 1847 neloc = oeloc;
2054 nelen = (etype << 30) | oelen; 1848 nelen = (etype << 30) | oelen;
2055 } 1849 }
2056 udf_add_aext(inode, &epos, neloc, nelen, 1); 1850 udf_add_aext(inode, &epos, neloc, nelen, 1);
2057 brelse(epos.bh); 1851 brelse(epos.bh);
1852
2058 return (nelen >> 30); 1853 return (nelen >> 30);
2059} 1854}
2060 1855
@@ -2105,15 +1900,12 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
2105 } else { 1900 } else {
2106 aed = (struct allocExtDesc *)oepos.bh->b_data; 1901 aed = (struct allocExtDesc *)oepos.bh->b_data;
2107 aed->lengthAllocDescs = 1902 aed->lengthAllocDescs =
2108 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - 1903 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2 * adsize));
2109 (2 * adsize)); 1904 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2110 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) 1905 UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2111 || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1906 udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize));
2112 udf_update_tag(oepos.bh->b_data,
2113 oepos.offset - (2 * adsize));
2114 else 1907 else
2115 udf_update_tag(oepos.bh->b_data, 1908 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2116 sizeof(struct allocExtDesc));
2117 mark_buffer_dirty_inode(oepos.bh, inode); 1909 mark_buffer_dirty_inode(oepos.bh, inode);
2118 } 1910 }
2119 } else { 1911 } else {
@@ -2124,21 +1916,19 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
2124 } else { 1916 } else {
2125 aed = (struct allocExtDesc *)oepos.bh->b_data; 1917 aed = (struct allocExtDesc *)oepos.bh->b_data;
2126 aed->lengthAllocDescs = 1918 aed->lengthAllocDescs =
2127 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - 1919 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2128 adsize); 1920 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2129 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) 1921 UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2130 || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1922 udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
2131 udf_update_tag(oepos.bh->b_data,
2132 epos.offset - adsize);
2133 else 1923 else
2134 udf_update_tag(oepos.bh->b_data, 1924 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2135 sizeof(struct allocExtDesc));
2136 mark_buffer_dirty_inode(oepos.bh, inode); 1925 mark_buffer_dirty_inode(oepos.bh, inode);
2137 } 1926 }
2138 } 1927 }
2139 1928
2140 brelse(epos.bh); 1929 brelse(epos.bh);
2141 brelse(oepos.bh); 1930 brelse(oepos.bh);
1931
2142 return (elen >> 30); 1932 return (elen >> 30);
2143} 1933}
2144 1934
@@ -2162,8 +1952,7 @@ int8_t inode_bmap(struct inode * inode, sector_t block,
2162 1952
2163 do { 1953 do {
2164 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) { 1954 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) {
2165 *offset = 1955 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2166 (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2167 UDF_I_LENEXTENTS(inode) = lbcount; 1956 UDF_I_LENEXTENTS(inode) = lbcount;
2168 return -1; 1957 return -1;
2169 } 1958 }
@@ -2180,13 +1969,12 @@ long udf_block_map(struct inode *inode, sector_t block)
2180 kernel_lb_addr eloc; 1969 kernel_lb_addr eloc;
2181 uint32_t elen; 1970 uint32_t elen;
2182 sector_t offset; 1971 sector_t offset;
2183 struct extent_position epos = { NULL, 0, {0, 0} }; 1972 struct extent_position epos = {};
2184 int ret; 1973 int ret;
2185 1974
2186 lock_kernel(); 1975 lock_kernel();
2187 1976
2188 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == 1977 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
2189 (EXT_RECORDED_ALLOCATED >> 30))
2190 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset); 1978 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2191 else 1979 else
2192 ret = 0; 1980 ret = 0;
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 4826c3616eef..579bae71e67e 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -43,7 +43,7 @@ unsigned int udf_get_last_session(struct super_block *sb)
43 udf_debug("XA disk: %s, vol_desc_start=%d\n", 43 udf_debug("XA disk: %s, vol_desc_start=%d\n",
44 (ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba); 44 (ms_info.xa_flag ? "yes" : "no"), ms_info.addr.lba);
45#if WE_OBEY_THE_WRITTEN_STANDARDS 45#if WE_OBEY_THE_WRITTEN_STANDARDS
46 if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */ 46 if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */
47#endif 47#endif
48 vol_desc_start = ms_info.addr.lba; 48 vol_desc_start = ms_info.addr.lba;
49 } else { 49 } else {
@@ -57,7 +57,7 @@ unsigned long udf_get_last_block(struct super_block *sb)
57 struct block_device *bdev = sb->s_bdev; 57 struct block_device *bdev = sb->s_bdev;
58 unsigned long lblock = 0; 58 unsigned long lblock = 0;
59 59
60 if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long)&lblock)) 60 if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock))
61 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits; 61 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
62 62
63 if (lblock) 63 if (lblock)
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index a7f57277a96e..15297deb5051 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -54,15 +54,15 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
54 int i; 54 int i;
55 55
56 ea = UDF_I_DATA(inode); 56 ea = UDF_I_DATA(inode);
57 if (UDF_I_LENEATTR(inode)) 57 if (UDF_I_LENEATTR(inode)) {
58 ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); 58 ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
59 else { 59 } else {
60 ad = ea; 60 ad = ea;
61 size += sizeof(struct extendedAttrHeaderDesc); 61 size += sizeof(struct extendedAttrHeaderDesc);
62 } 62 }
63 63
64 offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - 64 offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) -
65 UDF_I_LENALLOC(inode); 65 UDF_I_LENALLOC(inode);
66 66
67 /* TODO - Check for FreeEASpace */ 67 /* TODO - Check for FreeEASpace */
68 68
@@ -76,56 +76,45 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
76 76
77 if (UDF_I_LENEATTR(inode)) { 77 if (UDF_I_LENEATTR(inode)) {
78 /* check checksum/crc */ 78 /* check checksum/crc */
79 if (le16_to_cpu(eahd->descTag.tagIdent) != 79 if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
80 TAG_IDENT_EAHD 80 le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) {
81 || le32_to_cpu(eahd->descTag.tagLocation) !=
82 UDF_I_LOCATION(inode).logicalBlockNum) {
83 return NULL; 81 return NULL;
84 } 82 }
85 } else { 83 } else {
86 size -= sizeof(struct extendedAttrHeaderDesc); 84 size -= sizeof(struct extendedAttrHeaderDesc);
87 UDF_I_LENEATTR(inode) += 85 UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc);
88 sizeof(struct extendedAttrHeaderDesc);
89 eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); 86 eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
90 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 87 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
91 eahd->descTag.descVersion = cpu_to_le16(3); 88 eahd->descTag.descVersion = cpu_to_le16(3);
92 else 89 else
93 eahd->descTag.descVersion = cpu_to_le16(2); 90 eahd->descTag.descVersion = cpu_to_le16(2);
94 eahd->descTag.tagSerialNum = 91 eahd->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
95 cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); 92 eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
96 eahd->descTag.tagLocation =
97 cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
98 eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); 93 eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
99 eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); 94 eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
100 } 95 }
101 96
102 offset = UDF_I_LENEATTR(inode); 97 offset = UDF_I_LENEATTR(inode);
103 if (type < 2048) { 98 if (type < 2048) {
104 if (le32_to_cpu(eahd->appAttrLocation) < 99 if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) {
105 UDF_I_LENEATTR(inode)) { 100 uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
106 uint32_t aal = 101 memmove(&ea[offset - aal + size],
107 le32_to_cpu(eahd->appAttrLocation); 102 &ea[aal], offset - aal);
108 memmove(&ea[offset - aal + size], &ea[aal],
109 offset - aal);
110 offset -= aal; 103 offset -= aal;
111 eahd->appAttrLocation = cpu_to_le32(aal + size); 104 eahd->appAttrLocation = cpu_to_le32(aal + size);
112 } 105 }
113 if (le32_to_cpu(eahd->impAttrLocation) < 106 if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode)) {
114 UDF_I_LENEATTR(inode)) { 107 uint32_t ial = le32_to_cpu(eahd->impAttrLocation);
115 uint32_t ial = 108 memmove(&ea[offset - ial + size],
116 le32_to_cpu(eahd->impAttrLocation); 109 &ea[ial], offset - ial);
117 memmove(&ea[offset - ial + size], &ea[ial],
118 offset - ial);
119 offset -= ial; 110 offset -= ial;
120 eahd->impAttrLocation = cpu_to_le32(ial + size); 111 eahd->impAttrLocation = cpu_to_le32(ial + size);
121 } 112 }
122 } else if (type < 65536) { 113 } else if (type < 65536) {
123 if (le32_to_cpu(eahd->appAttrLocation) < 114 if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) {
124 UDF_I_LENEATTR(inode)) { 115 uint32_t aal = le32_to_cpu(eahd->appAttrLocation);
125 uint32_t aal = 116 memmove(&ea[offset - aal + size],
126 le32_to_cpu(eahd->appAttrLocation); 117 &ea[aal], offset - aal);
127 memmove(&ea[offset - aal + size], &ea[aal],
128 offset - aal);
129 offset -= aal; 118 offset -= aal;
130 eahd->appAttrLocation = cpu_to_le32(aal + size); 119 eahd->appAttrLocation = cpu_to_le32(aal + size);
131 } 120 }
@@ -133,18 +122,18 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
133 /* rewrite CRC + checksum of eahd */ 122 /* rewrite CRC + checksum of eahd */
134 crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag); 123 crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
135 eahd->descTag.descCRCLength = cpu_to_le16(crclen); 124 eahd->descTag.descCRCLength = cpu_to_le16(crclen);
136 eahd->descTag.descCRC = 125 eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd +
137 cpu_to_le16(udf_crc((char *)eahd + sizeof(tag), crclen, 0)); 126 sizeof(tag), crclen, 0));
138 eahd->descTag.tagChecksum = 0; 127 eahd->descTag.tagChecksum = 0;
139 for (i = 0; i < 16; i++) 128 for (i = 0; i < 16; i++)
140 if (i != 4) 129 if (i != 4)
141 eahd->descTag.tagChecksum += 130 eahd->descTag.tagChecksum += ((uint8_t *)&(eahd->descTag))[i];
142 ((uint8_t *) & (eahd->descTag))[i];
143 UDF_I_LENEATTR(inode) += size; 131 UDF_I_LENEATTR(inode) += size;
144 return (struct genericFormat *)&ea[offset]; 132 return (struct genericFormat *)&ea[offset];
145 } 133 }
146 if (loc & 0x02) { 134 if (loc & 0x02) {
147 } 135 }
136
148 return NULL; 137 return NULL;
149} 138}
150 139
@@ -163,8 +152,7 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
163 152
164 /* check checksum/crc */ 153 /* check checksum/crc */
165 if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD || 154 if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD ||
166 le32_to_cpu(eahd->descTag.tagLocation) != 155 le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) {
167 UDF_I_LOCATION(inode).logicalBlockNum) {
168 return NULL; 156 return NULL;
169 } 157 }
170 158
@@ -177,13 +165,13 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
177 165
178 while (offset < UDF_I_LENEATTR(inode)) { 166 while (offset < UDF_I_LENEATTR(inode)) {
179 gaf = (struct genericFormat *)&ea[offset]; 167 gaf = (struct genericFormat *)&ea[offset];
180 if (le32_to_cpu(gaf->attrType) == type 168 if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype)
181 && gaf->attrSubtype == subtype)
182 return gaf; 169 return gaf;
183 else 170 else
184 offset += le32_to_cpu(gaf->attrLength); 171 offset += le32_to_cpu(gaf->attrLength);
185 } 172 }
186 } 173 }
174
187 return NULL; 175 return NULL;
188} 176}
189 177
@@ -216,23 +204,22 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
216 return NULL; 204 return NULL;
217 } 205 }
218 206
219 tag_p = (tag *) (bh->b_data); 207 tag_p = (tag *)(bh->b_data);
220 208
221 *ident = le16_to_cpu(tag_p->tagIdent); 209 *ident = le16_to_cpu(tag_p->tagIdent);
222 210
223 if (location != le32_to_cpu(tag_p->tagLocation)) { 211 if (location != le32_to_cpu(tag_p->tagLocation)) {
224 udf_debug("location mismatch block %u, tag %u != %u\n", 212 udf_debug("location mismatch block %u, tag %u != %u\n",
225 block + UDF_SB_SESSION(sb), 213 block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location);
226 le32_to_cpu(tag_p->tagLocation), location);
227 goto error_out; 214 goto error_out;
228 } 215 }
229 216
230 /* Verify the tag checksum */ 217 /* Verify the tag checksum */
231 checksum = 0U; 218 checksum = 0U;
232 for (i = 0; i < 4; i++) 219 for (i = 0; i < 4; i++)
233 checksum += (uint8_t) (bh->b_data[i]); 220 checksum += (uint8_t)(bh->b_data[i]);
234 for (i = 5; i < 16; i++) 221 for (i = 5; i < 16; i++)
235 checksum += (uint8_t) (bh->b_data[i]); 222 checksum += (uint8_t)(bh->b_data[i]);
236 if (checksum != tag_p->tagChecksum) { 223 if (checksum != tag_p->tagChecksum) {
237 printk(KERN_ERR "udf: tag checksum failed block %d\n", block); 224 printk(KERN_ERR "udf: tag checksum failed block %d\n", block);
238 goto error_out; 225 goto error_out;
@@ -249,16 +236,14 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
249 /* Verify the descriptor CRC */ 236 /* Verify the descriptor CRC */
250 if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize || 237 if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
251 le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag), 238 le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag),
252 le16_to_cpu(tag_p-> 239 le16_to_cpu(tag_p->descCRCLength), 0)) {
253 descCRCLength),
254 0)) {
255 return bh; 240 return bh;
256 } 241 }
257 udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", 242 udf_debug("Crc failure block %d: crc = %d, crclen = %d\n",
258 block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC), 243 block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC),
259 le16_to_cpu(tag_p->descCRCLength)); 244 le16_to_cpu(tag_p->descCRCLength));
260 245
261 error_out: 246error_out:
262 brelse(bh); 247 brelse(bh);
263 return NULL; 248 return NULL;
264} 249}
@@ -272,7 +257,7 @@ struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc,
272 257
273void udf_update_tag(char *data, int length) 258void udf_update_tag(char *data, int length)
274{ 259{
275 tag *tptr = (tag *) data; 260 tag *tptr = (tag *)data;
276 int i; 261 int i;
277 262
278 length -= sizeof(tag); 263 length -= sizeof(tag);
@@ -283,13 +268,13 @@ void udf_update_tag(char *data, int length)
283 268
284 for (i = 0; i < 16; i++) 269 for (i = 0; i < 16; i++)
285 if (i != 4) 270 if (i != 4)
286 tptr->tagChecksum += (uint8_t) (data[i]); 271 tptr->tagChecksum += (uint8_t)(data[i]);
287} 272}
288 273
289void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, 274void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
290 uint32_t loc, int length) 275 uint32_t loc, int length)
291{ 276{
292 tag *tptr = (tag *) data; 277 tag *tptr = (tag *)data;
293 tptr->tagIdent = cpu_to_le16(ident); 278 tptr->tagIdent = cpu_to_le16(ident);
294 tptr->descVersion = cpu_to_le16(version); 279 tptr->descVersion = cpu_to_le16(version);
295 tptr->tagSerialNum = cpu_to_le16(snum); 280 tptr->tagSerialNum = cpu_to_le16(snum);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 334d363a0903..bec96a6b3343 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -37,6 +37,7 @@ static inline int udf_match(int len1, const char *name1, int len2,
37{ 37{
38 if (len1 != len2) 38 if (len1 != len2)
39 return 0; 39 return 0;
40
40 return !memcmp(name1, name2, len1); 41 return !memcmp(name1, name2, len1);
41} 42}
42 43
@@ -52,7 +53,7 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
52 uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse); 53 uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse);
53 uint8_t lfi = cfi->lengthFileIdent; 54 uint8_t lfi = cfi->lengthFileIdent;
54 int padlen = fibh->eoffset - fibh->soffset - liu - lfi - 55 int padlen = fibh->eoffset - fibh->soffset - liu - lfi -
55 sizeof(struct fileIdentDesc); 56 sizeof(struct fileIdentDesc);
56 int adinicb = 0; 57 int adinicb = 0;
57 58
58 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 59 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
@@ -61,85 +62,75 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
61 offset = fibh->soffset + sizeof(struct fileIdentDesc); 62 offset = fibh->soffset + sizeof(struct fileIdentDesc);
62 63
63 if (impuse) { 64 if (impuse) {
64 if (adinicb || (offset + liu < 0)) 65 if (adinicb || (offset + liu < 0)) {
65 memcpy((uint8_t *) sfi->impUse, impuse, liu); 66 memcpy((uint8_t *)sfi->impUse, impuse, liu);
66 else if (offset >= 0) 67 } else if (offset >= 0) {
67 memcpy(fibh->ebh->b_data + offset, impuse, liu); 68 memcpy(fibh->ebh->b_data + offset, impuse, liu);
68 else { 69 } else {
69 memcpy((uint8_t *) sfi->impUse, impuse, -offset); 70 memcpy((uint8_t *)sfi->impUse, impuse, -offset);
70 memcpy(fibh->ebh->b_data, impuse - offset, 71 memcpy(fibh->ebh->b_data, impuse - offset, liu + offset);
71 liu + offset);
72 } 72 }
73 } 73 }
74 74
75 offset += liu; 75 offset += liu;
76 76
77 if (fileident) { 77 if (fileident) {
78 if (adinicb || (offset + lfi < 0)) 78 if (adinicb || (offset + lfi < 0)) {
79 memcpy((uint8_t *) sfi->fileIdent + liu, fileident, 79 memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
80 lfi); 80 } else if (offset >= 0) {
81 else if (offset >= 0)
82 memcpy(fibh->ebh->b_data + offset, fileident, lfi); 81 memcpy(fibh->ebh->b_data + offset, fileident, lfi);
83 else { 82 } else {
84 memcpy((uint8_t *) sfi->fileIdent + liu, fileident, 83 memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset);
85 -offset); 84 memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset);
86 memcpy(fibh->ebh->b_data, fileident - offset,
87 lfi + offset);
88 } 85 }
89 } 86 }
90 87
91 offset += lfi; 88 offset += lfi;
92 89
93 if (adinicb || (offset + padlen < 0)) 90 if (adinicb || (offset + padlen < 0)) {
94 memset((uint8_t *) sfi->padding + liu + lfi, 0x00, padlen); 91 memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
95 else if (offset >= 0) 92 } else if (offset >= 0) {
96 memset(fibh->ebh->b_data + offset, 0x00, padlen); 93 memset(fibh->ebh->b_data + offset, 0x00, padlen);
97 else { 94 } else {
98 memset((uint8_t *) sfi->padding + liu + lfi, 0x00, -offset); 95 memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
99 memset(fibh->ebh->b_data, 0x00, padlen + offset); 96 memset(fibh->ebh->b_data, 0x00, padlen + offset);
100 } 97 }
101 98
102 crc = 99 crc = udf_crc((uint8_t *)cfi + sizeof(tag),
103 udf_crc((uint8_t *) cfi + sizeof(tag), 100 sizeof(struct fileIdentDesc) - sizeof(tag), 0);
104 sizeof(struct fileIdentDesc) - sizeof(tag), 0); 101
105 102 if (fibh->sbh == fibh->ebh) {
106 if (fibh->sbh == fibh->ebh) 103 crc = udf_crc((uint8_t *)sfi->impUse,
107 crc = udf_crc((uint8_t *) sfi->impUse, 104 crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
108 crclen + sizeof(tag) - 105 } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
109 sizeof(struct fileIdentDesc), crc); 106 crc = udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset,
110 else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) 107 crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc);
111 crc = 108 } else {
112 udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + 109 crc = udf_crc((uint8_t *)sfi->impUse,
113 fibh->soffset, 110 -fibh->soffset - sizeof(struct fileIdentDesc), crc);
114 crclen + sizeof(tag) - sizeof(struct fileIdentDesc),
115 crc);
116 else {
117 crc = udf_crc((uint8_t *) sfi->impUse,
118 -fibh->soffset - sizeof(struct fileIdentDesc),
119 crc);
120 crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc); 111 crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc);
121 } 112 }
122 113
123 cfi->descTag.descCRC = cpu_to_le16(crc); 114 cfi->descTag.descCRC = cpu_to_le16(crc);
124 cfi->descTag.descCRCLength = cpu_to_le16(crclen); 115 cfi->descTag.descCRCLength = cpu_to_le16(crclen);
125 116
126 for (i = 0; i < 16; i++) 117 for (i = 0; i < 16; i++) {
127 if (i != 4) 118 if (i != 4)
128 checksum += ((uint8_t *) & cfi->descTag)[i]; 119 checksum += ((uint8_t *)&cfi->descTag)[i];
120 }
129 121
130 cfi->descTag.tagChecksum = checksum; 122 cfi->descTag.tagChecksum = checksum;
131 if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) 123 if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) {
132 memcpy((uint8_t *) sfi, (uint8_t *) cfi, 124 memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc));
133 sizeof(struct fileIdentDesc)); 125 } else {
134 else { 126 memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset);
135 memcpy((uint8_t *) sfi, (uint8_t *) cfi, -fibh->soffset); 127 memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset,
136 memcpy(fibh->ebh->b_data, (uint8_t *) cfi - fibh->soffset,
137 sizeof(struct fileIdentDesc) + fibh->soffset); 128 sizeof(struct fileIdentDesc) + fibh->soffset);
138 } 129 }
139 130
140 if (adinicb) 131 if (adinicb) {
141 mark_inode_dirty(inode); 132 mark_inode_dirty(inode);
142 else { 133 } else {
143 if (fibh->sbh != fibh->ebh) 134 if (fibh->sbh != fibh->ebh)
144 mark_buffer_dirty_inode(fibh->ebh, inode); 135 mark_buffer_dirty_inode(fibh->ebh, inode);
145 mark_buffer_dirty_inode(fibh->sbh, inode); 136 mark_buffer_dirty_inode(fibh->sbh, inode);
@@ -163,26 +154,25 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
163 kernel_lb_addr eloc; 154 kernel_lb_addr eloc;
164 uint32_t elen; 155 uint32_t elen;
165 sector_t offset; 156 sector_t offset;
166 struct extent_position epos = { NULL, 0, {0, 0} }; 157 struct extent_position epos = {};
167 158
168 size = (udf_ext0_offset(dir) + dir->i_size) >> 2; 159 size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
169 f_pos = (udf_ext0_offset(dir) >> 2); 160 f_pos = (udf_ext0_offset(dir) >> 2);
170 161
171 fibh->soffset = fibh->eoffset = 162 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
172 (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 163 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
173 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
174 fibh->sbh = fibh->ebh = NULL; 164 fibh->sbh = fibh->ebh = NULL;
175 else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 165 } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
176 &epos, &eloc, &elen, 166 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
177 &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
178 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 167 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
179 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 168 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
180 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 169 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
181 epos.offset -= sizeof(short_ad); 170 epos.offset -= sizeof(short_ad);
182 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 171 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
183 epos.offset -= sizeof(long_ad); 172 epos.offset -= sizeof(long_ad);
184 } else 173 } else {
185 offset = 0; 174 offset = 0;
175 }
186 176
187 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) { 177 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) {
188 brelse(epos.bh); 178 brelse(epos.bh);
@@ -196,7 +186,6 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
196 while ((f_pos < size)) { 186 while ((f_pos < size)) {
197 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, 187 fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
198 &elen, &offset); 188 &elen, &offset);
199
200 if (!fi) { 189 if (!fi) {
201 if (fibh->sbh != fibh->ebh) 190 if (fibh->sbh != fibh->ebh)
202 brelse(fibh->ebh); 191 brelse(fibh->ebh);
@@ -213,20 +202,14 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
213 } else { 202 } else {
214 int poffset; /* Unpaded ending offset */ 203 int poffset; /* Unpaded ending offset */
215 204
216 poffset = 205 poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
217 fibh->soffset + sizeof(struct fileIdentDesc) + liu +
218 lfi;
219 206
220 if (poffset >= lfi) 207 if (poffset >= lfi) {
221 nameptr = 208 nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi);
222 (uint8_t *) (fibh->ebh->b_data + poffset - 209 } else {
223 lfi);
224 else {
225 nameptr = fname; 210 nameptr = fname;
226 memcpy(nameptr, fi->fileIdent + liu, 211 memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
227 lfi - poffset); 212 memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
228 memcpy(nameptr + lfi - poffset,
229 fibh->ebh->b_data, poffset);
230 } 213 }
231 } 214 }
232 215
@@ -244,18 +227,18 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
244 continue; 227 continue;
245 228
246 if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi))) { 229 if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi))) {
247 if (udf_match 230 if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) {
248 (flen, fname, dentry->d_name.len,
249 dentry->d_name.name)) {
250 brelse(epos.bh); 231 brelse(epos.bh);
251 return fi; 232 return fi;
252 } 233 }
253 } 234 }
254 } 235 }
236
255 if (fibh->sbh != fibh->ebh) 237 if (fibh->sbh != fibh->ebh)
256 brelse(fibh->ebh); 238 brelse(fibh->ebh);
257 brelse(fibh->sbh); 239 brelse(fibh->sbh);
258 brelse(epos.bh); 240 brelse(epos.bh);
241
259 return NULL; 242 return NULL;
260} 243}
261 244
@@ -306,15 +289,19 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
306#ifdef UDF_RECOVERY 289#ifdef UDF_RECOVERY
307 /* temporary shorthand for specifying files by inode number */ 290 /* temporary shorthand for specifying files by inode number */
308 if (!strncmp(dentry->d_name.name, ".B=", 3)) { 291 if (!strncmp(dentry->d_name.name, ".B=", 3)) {
309 kernel_lb_addr lb = 292 kernel_lb_addr lb = {
310 { 0, simple_strtoul(dentry->d_name.name + 3, NULL, 0) }; 293 .logicalBlockNum = 0,
294 .partitionReferenceNum = simple_strtoul(dentry->d_name.name + 3,
295 NULL, 0),
296 };
311 inode = udf_iget(dir->i_sb, lb); 297 inode = udf_iget(dir->i_sb, lb);
312 if (!inode) { 298 if (!inode) {
313 unlock_kernel(); 299 unlock_kernel();
314 return ERR_PTR(-EACCES); 300 return ERR_PTR(-EACCES);
315 } 301 }
316 } else 302 }
317#endif /* UDF_RECOVERY */ 303 else
304#endif /* UDF_RECOVERY */
318 305
319 if (udf_find_entry(dir, dentry, &fibh, &cfi)) { 306 if (udf_find_entry(dir, dentry, &fibh, &cfi)) {
320 if (fibh.sbh != fibh.ebh) 307 if (fibh.sbh != fibh.ebh)
@@ -329,6 +316,7 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
329 } 316 }
330 unlock_kernel(); 317 unlock_kernel();
331 d_add(dentry, inode); 318 d_add(dentry, inode);
319
332 return NULL; 320 return NULL;
333} 321}
334 322
@@ -352,7 +340,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
352 kernel_lb_addr eloc; 340 kernel_lb_addr eloc;
353 uint32_t elen; 341 uint32_t elen;
354 sector_t offset; 342 sector_t offset;
355 struct extent_position epos = { NULL, 0, {0, 0} }; 343 struct extent_position epos = {};
356 344
357 sb = dir->i_sb; 345 sb = dir->i_sb;
358 346
@@ -361,36 +349,33 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
361 *err = -EINVAL; 349 *err = -EINVAL;
362 return NULL; 350 return NULL;
363 } 351 }
364 352 if (!(namelen = udf_put_filename(sb, dentry->d_name.name, name,
365 if (! 353 dentry->d_name.len))) {
366 (namelen =
367 udf_put_filename(sb, dentry->d_name.name, name,
368 dentry->d_name.len))) {
369 *err = -ENAMETOOLONG; 354 *err = -ENAMETOOLONG;
370 return NULL; 355 return NULL;
371 } 356 }
372 } else 357 } else {
373 namelen = 0; 358 namelen = 0;
359 }
374 360
375 nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; 361 nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
376 362
377 f_pos = (udf_ext0_offset(dir) >> 2); 363 f_pos = (udf_ext0_offset(dir) >> 2);
378 364
379 fibh->soffset = fibh->eoffset = 365 fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
380 (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; 366 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
381 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB)
382 fibh->sbh = fibh->ebh = NULL; 367 fibh->sbh = fibh->ebh = NULL;
383 else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 368 } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
384 &epos, &eloc, &elen, 369 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
385 &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
386 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 370 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
387 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 371 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
388 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 372 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
389 epos.offset -= sizeof(short_ad); 373 epos.offset -= sizeof(short_ad);
390 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 374 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
391 epos.offset -= sizeof(long_ad); 375 epos.offset -= sizeof(long_ad);
392 } else 376 } else {
393 offset = 0; 377 offset = 0;
378 }
394 379
395 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) { 380 if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) {
396 brelse(epos.bh); 381 brelse(epos.bh);
@@ -423,40 +408,33 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
423 liu = le16_to_cpu(cfi->lengthOfImpUse); 408 liu = le16_to_cpu(cfi->lengthOfImpUse);
424 lfi = cfi->lengthFileIdent; 409 lfi = cfi->lengthFileIdent;
425 410
426 if (fibh->sbh == fibh->ebh) 411 if (fibh->sbh == fibh->ebh) {
427 nameptr = fi->fileIdent + liu; 412 nameptr = fi->fileIdent + liu;
428 else { 413 } else {
429 int poffset; /* Unpaded ending offset */ 414 int poffset; /* Unpaded ending offset */
430 415
431 poffset = 416 poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi;
432 fibh->soffset + sizeof(struct fileIdentDesc) + liu +
433 lfi;
434 417
435 if (poffset >= lfi) 418 if (poffset >= lfi) {
436 nameptr = 419 nameptr = (char *)(fibh->ebh->b_data + poffset - lfi);
437 (char *)(fibh->ebh->b_data + poffset - lfi); 420 } else {
438 else {
439 nameptr = fname; 421 nameptr = fname;
440 memcpy(nameptr, fi->fileIdent + liu, 422 memcpy(nameptr, fi->fileIdent + liu, lfi - poffset);
441 lfi - poffset); 423 memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset);
442 memcpy(nameptr + lfi - poffset,
443 fibh->ebh->b_data, poffset);
444 } 424 }
445 } 425 }
446 426
447 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 427 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
448 if (((sizeof(struct fileIdentDesc) + liu + lfi + 428 if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen) {
449 3) & ~3) == nfidlen) {
450 brelse(epos.bh); 429 brelse(epos.bh);
451 cfi->descTag.tagSerialNum = cpu_to_le16(1); 430 cfi->descTag.tagSerialNum = cpu_to_le16(1);
452 cfi->fileVersionNum = cpu_to_le16(1); 431 cfi->fileVersionNum = cpu_to_le16(1);
453 cfi->fileCharacteristics = 0; 432 cfi->fileCharacteristics = 0;
454 cfi->lengthFileIdent = namelen; 433 cfi->lengthFileIdent = namelen;
455 cfi->lengthOfImpUse = cpu_to_le16(0); 434 cfi->lengthOfImpUse = cpu_to_le16(0);
456 if (!udf_write_fi 435 if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
457 (dir, cfi, fi, fibh, NULL, name))
458 return fi; 436 return fi;
459 else { 437 } else {
460 *err = -EIO; 438 *err = -EIO;
461 return NULL; 439 return NULL;
462 } 440 }
@@ -467,8 +445,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
467 continue; 445 continue;
468 446
469 if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) && 447 if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) &&
470 udf_match(flen, fname, dentry->d_name.len, 448 udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) {
471 dentry->d_name.name)) {
472 if (fibh->sbh != fibh->ebh) 449 if (fibh->sbh != fibh->ebh)
473 brelse(fibh->ebh); 450 brelse(fibh->ebh);
474 brelse(fibh->sbh); 451 brelse(fibh->sbh);
@@ -478,7 +455,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
478 } 455 }
479 } 456 }
480 457
481 add: 458add:
482 f_pos += nfidlen; 459 f_pos += nfidlen;
483 460
484 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB && 461 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB &&
@@ -491,14 +468,11 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
491 if (fibh->sbh != fibh->ebh) 468 if (fibh->sbh != fibh->ebh)
492 brelse(fibh->ebh); 469 brelse(fibh->ebh);
493 brelse(fibh->sbh); 470 brelse(fibh->sbh);
494 if (! 471 if (!(fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err)))
495 (fibh->sbh = fibh->ebh =
496 udf_expand_dir_adinicb(dir, &block, err)))
497 return NULL; 472 return NULL;
498 epos.block = UDF_I_LOCATION(dir); 473 epos.block = UDF_I_LOCATION(dir);
499 eloc.logicalBlockNum = block; 474 eloc.logicalBlockNum = block;
500 eloc.partitionReferenceNum = 475 eloc.partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum;
501 UDF_I_LOCATION(dir).partitionReferenceNum;
502 elen = dir->i_sb->s_blocksize; 476 elen = dir->i_sb->s_blocksize;
503 epos.offset = udf_file_entry_alloc_offset(dir); 477 epos.offset = udf_file_entry_alloc_offset(dir);
504 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 478 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
@@ -517,16 +491,13 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
517 491
518 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 492 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
519 block = UDF_I_LOCATION(dir).logicalBlockNum; 493 block = UDF_I_LOCATION(dir).logicalBlockNum;
520 fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + 494 fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset -
521 fibh->soffset -
522 udf_ext0_offset(dir) + 495 udf_ext0_offset(dir) +
523 UDF_I_LENEATTR(dir)); 496 UDF_I_LENEATTR(dir));
524 } else { 497 } else {
525 block = eloc.logicalBlockNum + ((elen - 1) >> 498 block = eloc.logicalBlockNum + ((elen - 1) >>
526 dir->i_sb-> 499 dir->i_sb->s_blocksize_bits);
527 s_blocksize_bits); 500 fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset);
528 fi = (struct fileIdentDesc *)(fibh->sbh->b_data +
529 fibh->soffset);
530 } 501 }
531 } else { 502 } else {
532 fibh->soffset = fibh->eoffset - sb->s_blocksize; 503 fibh->soffset = fibh->eoffset - sb->s_blocksize;
@@ -538,42 +509,36 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
538 509
539 block = eloc.logicalBlockNum + ((elen - 1) >> 510 block = eloc.logicalBlockNum + ((elen - 1) >>
540 dir->i_sb->s_blocksize_bits); 511 dir->i_sb->s_blocksize_bits);
541 512 fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err);
542 if (! 513 if (!fibh->ebh) {
543 (fibh->ebh =
544 udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
545 1, err))) {
546 brelse(epos.bh); 514 brelse(epos.bh);
547 brelse(fibh->sbh); 515 brelse(fibh->sbh);
548 return NULL; 516 return NULL;
549 } 517 }
550 518
551 if (!(fibh->soffset)) { 519 if (!fibh->soffset) {
552 if (udf_next_aext(dir, &epos, &eloc, &elen, 1) == 520 if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
553 (EXT_RECORDED_ALLOCATED >> 30)) { 521 (EXT_RECORDED_ALLOCATED >> 30)) {
554 block = eloc.logicalBlockNum + ((elen - 1) >> 522 block = eloc.logicalBlockNum + ((elen - 1) >>
555 dir->i_sb-> 523 dir->i_sb->s_blocksize_bits);
556 s_blocksize_bits); 524 } else {
557 } else
558 block++; 525 block++;
526 }
559 527
560 brelse(fibh->sbh); 528 brelse(fibh->sbh);
561 fibh->sbh = fibh->ebh; 529 fibh->sbh = fibh->ebh;
562 fi = (struct fileIdentDesc *)(fibh->sbh->b_data); 530 fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
563 } else { 531 } else {
564 fi = (struct fileIdentDesc *) 532 fi = (struct fileIdentDesc *)
565 (fibh->sbh->b_data + sb->s_blocksize + 533 (fibh->sbh->b_data + sb->s_blocksize + fibh->soffset);
566 fibh->soffset);
567 } 534 }
568 } 535 }
569 536
570 memset(cfi, 0, sizeof(struct fileIdentDesc)); 537 memset(cfi, 0, sizeof(struct fileIdentDesc));
571 if (UDF_SB_UDFREV(sb) >= 0x0200) 538 if (UDF_SB_UDFREV(sb) >= 0x0200)
572 udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, 539 udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag));
573 sizeof(tag));
574 else 540 else
575 udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, 541 udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag));
576 sizeof(tag));
577 cfi->fileVersionNum = cpu_to_le16(1); 542 cfi->fileVersionNum = cpu_to_le16(1);
578 cfi->lengthFileIdent = namelen; 543 cfi->lengthFileIdent = namelen;
579 cfi->lengthOfImpUse = cpu_to_le16(0); 544 cfi->lengthOfImpUse = cpu_to_le16(0);
@@ -599,8 +564,10 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
599 struct fileIdentDesc *cfi) 564 struct fileIdentDesc *cfi)
600{ 565{
601 cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED; 566 cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED;
567
602 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) 568 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
603 memset(&(cfi->icb), 0x00, sizeof(long_ad)); 569 memset(&(cfi->icb), 0x00, sizeof(long_ad));
570
604 return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL); 571 return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
605} 572}
606 573
@@ -637,8 +604,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
637 } 604 }
638 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 605 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
639 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 606 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
640 *(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 607 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
641 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); 608 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
642 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 609 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
643 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 610 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
644 mark_inode_dirty(dir); 611 mark_inode_dirty(dir);
@@ -648,6 +615,7 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
648 brelse(fibh.sbh); 615 brelse(fibh.sbh);
649 unlock_kernel(); 616 unlock_kernel();
650 d_instantiate(dentry, inode); 617 d_instantiate(dentry, inode);
618
651 return 0; 619 return 0;
652} 620}
653 621
@@ -679,8 +647,8 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
679 } 647 }
680 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 648 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
681 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 649 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
682 *(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 650 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
683 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); 651 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
684 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 652 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
685 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 653 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
686 mark_inode_dirty(dir); 654 mark_inode_dirty(dir);
@@ -692,7 +660,8 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
692 brelse(fibh.sbh); 660 brelse(fibh.sbh);
693 d_instantiate(dentry, inode); 661 d_instantiate(dentry, inode);
694 err = 0; 662 err = 0;
695 out: 663
664out:
696 unlock_kernel(); 665 unlock_kernel();
697 return err; 666 return err;
698} 667}
@@ -725,10 +694,9 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
725 inode->i_nlink = 2; 694 inode->i_nlink = 2;
726 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 695 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
727 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(dir)); 696 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(dir));
728 *(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 697 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
729 cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL); 698 cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL);
730 cfi.fileCharacteristics = 699 cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
731 FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
732 udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL); 700 udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL);
733 brelse(fibh.sbh); 701 brelse(fibh.sbh);
734 inode->i_mode = S_IFDIR | mode; 702 inode->i_mode = S_IFDIR | mode;
@@ -744,8 +712,8 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
744 } 712 }
745 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 713 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
746 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); 714 cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode));
747 *(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 715 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
748 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); 716 cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL);
749 cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; 717 cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
750 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 718 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
751 inc_nlink(dir); 719 inc_nlink(dir);
@@ -755,7 +723,8 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
755 brelse(fibh.ebh); 723 brelse(fibh.ebh);
756 brelse(fibh.sbh); 724 brelse(fibh.sbh);
757 err = 0; 725 err = 0;
758 out: 726
727out:
759 unlock_kernel(); 728 unlock_kernel();
760 return err; 729 return err;
761} 730}
@@ -770,26 +739,25 @@ static int empty_dir(struct inode *dir)
770 kernel_lb_addr eloc; 739 kernel_lb_addr eloc;
771 uint32_t elen; 740 uint32_t elen;
772 sector_t offset; 741 sector_t offset;
773 struct extent_position epos = { NULL, 0, {0, 0} }; 742 struct extent_position epos = {};
774 743
775 f_pos = (udf_ext0_offset(dir) >> 2); 744 f_pos = (udf_ext0_offset(dir) >> 2);
776 745
777 fibh.soffset = fibh.eoffset = 746 fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
778 (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
779 747
780 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) 748 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
781 fibh.sbh = fibh.ebh = NULL; 749 fibh.sbh = fibh.ebh = NULL;
782 else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 750 } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2),
783 &epos, &eloc, &elen, 751 &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
784 &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
785 block = udf_get_lb_pblock(dir->i_sb, eloc, offset); 752 block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
786 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 753 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
787 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) 754 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT)
788 epos.offset -= sizeof(short_ad); 755 epos.offset -= sizeof(short_ad);
789 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) 756 else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG)
790 epos.offset -= sizeof(long_ad); 757 epos.offset -= sizeof(long_ad);
791 } else 758 } else {
792 offset = 0; 759 offset = 0;
760 }
793 761
794 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { 762 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
795 brelse(epos.bh); 763 brelse(epos.bh);
@@ -803,7 +771,6 @@ static int empty_dir(struct inode *dir)
803 while ((f_pos < size)) { 771 while ((f_pos < size)) {
804 fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, 772 fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc,
805 &elen, &offset); 773 &elen, &offset);
806
807 if (!fi) { 774 if (!fi) {
808 if (fibh.sbh != fibh.ebh) 775 if (fibh.sbh != fibh.ebh)
809 brelse(fibh.ebh); 776 brelse(fibh.ebh);
@@ -812,8 +779,8 @@ static int empty_dir(struct inode *dir)
812 return 0; 779 return 0;
813 } 780 }
814 781
815 if (cfi.lengthFileIdent 782 if (cfi.lengthFileIdent &&
816 && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) { 783 (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) {
817 if (fibh.sbh != fibh.ebh) 784 if (fibh.sbh != fibh.ebh)
818 brelse(fibh.ebh); 785 brelse(fibh.ebh);
819 brelse(fibh.sbh); 786 brelse(fibh.sbh);
@@ -821,10 +788,12 @@ static int empty_dir(struct inode *dir)
821 return 0; 788 return 0;
822 } 789 }
823 } 790 }
791
824 if (fibh.sbh != fibh.ebh) 792 if (fibh.sbh != fibh.ebh)
825 brelse(fibh.ebh); 793 brelse(fibh.ebh);
826 brelse(fibh.sbh); 794 brelse(fibh.sbh);
827 brelse(epos.bh); 795 brelse(epos.bh);
796
828 return 1; 797 return 1;
829} 798}
830 799
@@ -859,15 +828,15 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
859 clear_nlink(inode); 828 clear_nlink(inode);
860 inode->i_size = 0; 829 inode->i_size = 0;
861 inode_dec_link_count(dir); 830 inode_dec_link_count(dir);
862 inode->i_ctime = dir->i_ctime = dir->i_mtime = 831 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
863 current_fs_time(dir->i_sb);
864 mark_inode_dirty(dir); 832 mark_inode_dirty(dir);
865 833
866 end_rmdir: 834end_rmdir:
867 if (fibh.sbh != fibh.ebh) 835 if (fibh.sbh != fibh.ebh)
868 brelse(fibh.ebh); 836 brelse(fibh.ebh);
869 brelse(fibh.sbh); 837 brelse(fibh.sbh);
870 out: 838
839out:
871 unlock_kernel(); 840 unlock_kernel();
872 return retval; 841 return retval;
873} 842}
@@ -906,11 +875,12 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
906 inode->i_ctime = dir->i_ctime; 875 inode->i_ctime = dir->i_ctime;
907 retval = 0; 876 retval = 0;
908 877
909 end_unlink: 878end_unlink:
910 if (fibh.sbh != fibh.ebh) 879 if (fibh.sbh != fibh.ebh)
911 brelse(fibh.ebh); 880 brelse(fibh.ebh);
912 brelse(fibh.sbh); 881 brelse(fibh.sbh);
913 out: 882
883out:
914 unlock_kernel(); 884 unlock_kernel();
915 return retval; 885 return retval;
916} 886}
@@ -922,7 +892,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
922 struct pathComponent *pc; 892 struct pathComponent *pc;
923 char *compstart; 893 char *compstart;
924 struct udf_fileident_bh fibh; 894 struct udf_fileident_bh fibh;
925 struct extent_position epos = { NULL, 0, {0, 0} }; 895 struct extent_position epos = {};
926 int eoffset, elen = 0; 896 int eoffset, elen = 0;
927 struct fileIdentDesc *fi; 897 struct fileIdentDesc *fi;
928 struct fileIdentDesc cfi; 898 struct fileIdentDesc cfi;
@@ -945,26 +915,22 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
945 uint32_t elen; 915 uint32_t elen;
946 916
947 block = udf_new_block(inode->i_sb, inode, 917 block = udf_new_block(inode->i_sb, inode,
948 UDF_I_LOCATION(inode). 918 UDF_I_LOCATION(inode).partitionReferenceNum,
949 partitionReferenceNum, 919 UDF_I_LOCATION(inode).logicalBlockNum, &err);
950 UDF_I_LOCATION(inode).logicalBlockNum,
951 &err);
952 if (!block) 920 if (!block)
953 goto out_no_entry; 921 goto out_no_entry;
954 epos.block = UDF_I_LOCATION(inode); 922 epos.block = UDF_I_LOCATION(inode);
955 epos.offset = udf_file_entry_alloc_offset(inode); 923 epos.offset = udf_file_entry_alloc_offset(inode);
956 epos.bh = NULL; 924 epos.bh = NULL;
957 eloc.logicalBlockNum = block; 925 eloc.logicalBlockNum = block;
958 eloc.partitionReferenceNum = 926 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
959 UDF_I_LOCATION(inode).partitionReferenceNum;
960 elen = inode->i_sb->s_blocksize; 927 elen = inode->i_sb->s_blocksize;
961 UDF_I_LENEXTENTS(inode) = elen; 928 UDF_I_LENEXTENTS(inode) = elen;
962 udf_add_aext(inode, &epos, eloc, elen, 0); 929 udf_add_aext(inode, &epos, eloc, elen, 0);
963 brelse(epos.bh); 930 brelse(epos.bh);
964 931
965 block = udf_get_pblock(inode->i_sb, block, 932 block = udf_get_pblock(inode->i_sb, block,
966 UDF_I_LOCATION(inode). 933 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
967 partitionReferenceNum, 0);
968 epos.bh = udf_tread(inode->i_sb, block); 934 epos.bh = udf_tread(inode->i_sb, block);
969 lock_buffer(epos.bh); 935 lock_buffer(epos.bh);
970 memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); 936 memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
@@ -972,8 +938,9 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
972 unlock_buffer(epos.bh); 938 unlock_buffer(epos.bh);
973 mark_buffer_dirty_inode(epos.bh, inode); 939 mark_buffer_dirty_inode(epos.bh, inode);
974 ea = epos.bh->b_data + udf_ext0_offset(inode); 940 ea = epos.bh->b_data + udf_ext0_offset(inode);
975 } else 941 } else {
976 ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); 942 ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
943 }
977 944
978 eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); 945 eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
979 pc = (struct pathComponent *)ea; 946 pc = (struct pathComponent *)ea;
@@ -1010,20 +977,17 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1010 if (compstart[0] == '.') { 977 if (compstart[0] == '.') {
1011 if ((symname - compstart) == 1) 978 if ((symname - compstart) == 1)
1012 pc->componentType = 4; 979 pc->componentType = 4;
1013 else if ((symname - compstart) == 2 980 else if ((symname - compstart) == 2 && compstart[1] == '.')
1014 && compstart[1] == '.')
1015 pc->componentType = 3; 981 pc->componentType = 3;
1016 } 982 }
1017 983
1018 if (pc->componentType == 5) { 984 if (pc->componentType == 5) {
1019 if (! 985 namelen = udf_put_filename(inode->i_sb, compstart, name,
1020 (namelen = 986 symname - compstart);
1021 udf_put_filename(inode->i_sb, compstart, name, 987 if (!namelen)
1022 symname - compstart)))
1023 goto out_no_entry; 988 goto out_no_entry;
1024 989
1025 if (elen + sizeof(struct pathComponent) + namelen > 990 if (elen + sizeof(struct pathComponent) + namelen > eoffset)
1026 eoffset)
1027 goto out_no_entry; 991 goto out_no_entry;
1028 else 992 else
1029 pc->lengthComponentIdent = namelen; 993 pc->lengthComponentIdent = namelen;
@@ -1053,12 +1017,10 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1053 if (UDF_SB_LVIDBH(inode->i_sb)) { 1017 if (UDF_SB_LVIDBH(inode->i_sb)) {
1054 struct logicalVolHeaderDesc *lvhd; 1018 struct logicalVolHeaderDesc *lvhd;
1055 uint64_t uniqueID; 1019 uint64_t uniqueID;
1056 lvhd = 1020 lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
1057 (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->
1058 logicalVolContentsUse);
1059 uniqueID = le64_to_cpu(lvhd->uniqueID); 1021 uniqueID = le64_to_cpu(lvhd->uniqueID);
1060 *(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 1022 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
1061 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); 1023 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
1062 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 1024 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
1063 uniqueID += 16; 1025 uniqueID += 16;
1064 lvhd->uniqueID = cpu_to_le64(uniqueID); 1026 lvhd->uniqueID = cpu_to_le64(uniqueID);
@@ -1074,11 +1036,11 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1074 d_instantiate(dentry, inode); 1036 d_instantiate(dentry, inode);
1075 err = 0; 1037 err = 0;
1076 1038
1077 out: 1039out:
1078 unlock_kernel(); 1040 unlock_kernel();
1079 return err; 1041 return err;
1080 1042
1081 out_no_entry: 1043out_no_entry:
1082 inode_dec_link_count(inode); 1044 inode_dec_link_count(inode);
1083 iput(inode); 1045 iput(inode);
1084 goto out; 1046 goto out;
@@ -1107,12 +1069,10 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1107 if (UDF_SB_LVIDBH(inode->i_sb)) { 1069 if (UDF_SB_LVIDBH(inode->i_sb)) {
1108 struct logicalVolHeaderDesc *lvhd; 1070 struct logicalVolHeaderDesc *lvhd;
1109 uint64_t uniqueID; 1071 uint64_t uniqueID;
1110 lvhd = 1072 lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse);
1111 (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->
1112 logicalVolContentsUse);
1113 uniqueID = le64_to_cpu(lvhd->uniqueID); 1073 uniqueID = le64_to_cpu(lvhd->uniqueID);
1114 *(__le32 *) ((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 1074 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
1115 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); 1075 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
1116 if (!(++uniqueID & 0x00000000FFFFFFFFUL)) 1076 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
1117 uniqueID += 16; 1077 uniqueID += 16;
1118 lvhd->uniqueID = cpu_to_le64(uniqueID); 1078 lvhd->uniqueID = cpu_to_le64(uniqueID);
@@ -1122,6 +1082,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1122 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { 1082 if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) {
1123 mark_inode_dirty(dir); 1083 mark_inode_dirty(dir);
1124 } 1084 }
1085
1125 if (fibh.sbh != fibh.ebh) 1086 if (fibh.sbh != fibh.ebh)
1126 brelse(fibh.ebh); 1087 brelse(fibh.ebh);
1127 brelse(fibh.sbh); 1088 brelse(fibh.sbh);
@@ -1131,6 +1092,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1131 atomic_inc(&inode->i_count); 1092 atomic_inc(&inode->i_count);
1132 d_instantiate(dentry, inode); 1093 d_instantiate(dentry, inode);
1133 unlock_kernel(); 1094 unlock_kernel();
1095
1134 return 0; 1096 return 0;
1135} 1097}
1136 1098
@@ -1143,8 +1105,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1143 struct inode *old_inode = old_dentry->d_inode; 1105 struct inode *old_inode = old_dentry->d_inode;
1144 struct inode *new_inode = new_dentry->d_inode; 1106 struct inode *new_inode = new_dentry->d_inode;
1145 struct udf_fileident_bh ofibh, nfibh; 1107 struct udf_fileident_bh ofibh, nfibh;
1146 struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = 1108 struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL, ocfi, ncfi;
1147 NULL, ocfi, ncfi;
1148 struct buffer_head *dir_bh = NULL; 1109 struct buffer_head *dir_bh = NULL;
1149 int retval = -ENOENT; 1110 int retval = -ENOENT;
1150 kernel_lb_addr tloc; 1111 kernel_lb_addr tloc;
@@ -1181,36 +1142,27 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1181 if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) { 1142 if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) {
1182 dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) - 1143 dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) -
1183 (UDF_I_EFE(old_inode) ? 1144 (UDF_I_EFE(old_inode) ?
1184 sizeof(struct 1145 sizeof(struct extendedFileEntry) :
1185 extendedFileEntry) :
1186 sizeof(struct fileEntry)), 1146 sizeof(struct fileEntry)),
1187 old_inode->i_sb->s_blocksize, 1147 old_inode->i_sb->s_blocksize, &offset);
1188 &offset);
1189 } else { 1148 } else {
1190 dir_bh = udf_bread(old_inode, 0, 0, &retval); 1149 dir_bh = udf_bread(old_inode, 0, 0, &retval);
1191 if (!dir_bh) 1150 if (!dir_bh)
1192 goto end_rename; 1151 goto end_rename;
1193 dir_fi = 1152 dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset);
1194 udf_get_fileident(dir_bh->b_data,
1195 old_inode->i_sb->s_blocksize,
1196 &offset);
1197 } 1153 }
1198 if (!dir_fi) 1154 if (!dir_fi)
1199 goto end_rename; 1155 goto end_rename;
1200 tloc = lelb_to_cpu(dir_fi->icb.extLocation); 1156 tloc = lelb_to_cpu(dir_fi->icb.extLocation);
1201 if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) 1157 if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) != old_dir->i_ino)
1202 != old_dir->i_ino)
1203 goto end_rename; 1158 goto end_rename;
1204 1159
1205 retval = -EMLINK; 1160 retval = -EMLINK;
1206 if (!new_inode 1161 if (!new_inode && new_dir->i_nlink >= (256 << sizeof(new_dir->i_nlink)) - 1)
1207 && new_dir->i_nlink >=
1208 (256 << sizeof(new_dir->i_nlink)) - 1)
1209 goto end_rename; 1162 goto end_rename;
1210 } 1163 }
1211 if (!nfi) { 1164 if (!nfi) {
1212 nfi = 1165 nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval);
1213 udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval);
1214 if (!nfi) 1166 if (!nfi)
1215 goto end_rename; 1167 goto end_rename;
1216 } 1168 }
@@ -1244,13 +1196,12 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1244 if (dir_fi) { 1196 if (dir_fi) {
1245 dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir)); 1197 dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir));
1246 udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) + 1198 udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) +
1247 le16_to_cpu(dir_fi-> 1199 le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
1248 lengthOfImpUse) +
1249 3) & ~3);
1250 if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) { 1200 if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) {
1251 mark_inode_dirty(old_inode); 1201 mark_inode_dirty(old_inode);
1252 } else 1202 } else {
1253 mark_buffer_dirty_inode(dir_bh, old_inode); 1203 mark_buffer_dirty_inode(dir_bh, old_inode);
1204 }
1254 inode_dec_link_count(old_dir); 1205 inode_dec_link_count(old_dir);
1255 if (new_inode) { 1206 if (new_inode) {
1256 inode_dec_link_count(new_inode); 1207 inode_dec_link_count(new_inode);
@@ -1268,7 +1219,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1268 1219
1269 retval = 0; 1220 retval = 0;
1270 1221
1271 end_rename: 1222end_rename:
1272 brelse(dir_bh); 1223 brelse(dir_bh);
1273 if (nfi) { 1224 if (nfi) {
1274 if (nfibh.sbh != nfibh.ebh) 1225 if (nfibh.sbh != nfibh.ebh)
@@ -1276,17 +1227,18 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1276 brelse(nfibh.sbh); 1227 brelse(nfibh.sbh);
1277 } 1228 }
1278 unlock_kernel(); 1229 unlock_kernel();
1230
1279 return retval; 1231 return retval;
1280} 1232}
1281 1233
1282const struct inode_operations udf_dir_inode_operations = { 1234const struct inode_operations udf_dir_inode_operations = {
1283 .lookup = udf_lookup, 1235 .lookup = udf_lookup,
1284 .create = udf_create, 1236 .create = udf_create,
1285 .link = udf_link, 1237 .link = udf_link,
1286 .unlink = udf_unlink, 1238 .unlink = udf_unlink,
1287 .symlink = udf_symlink, 1239 .symlink = udf_symlink,
1288 .mkdir = udf_mkdir, 1240 .mkdir = udf_mkdir,
1289 .rmdir = udf_rmdir, 1241 .rmdir = udf_rmdir,
1290 .mknod = udf_mknod, 1242 .mknod = udf_mknod,
1291 .rename = udf_rename, 1243 .rename = udf_rename,
1292}; 1244};
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
index bec5d340d8c5..65ff47902bd2 100644
--- a/fs/udf/osta_udf.h
+++ b/fs/udf/osta_udf.h
@@ -66,64 +66,64 @@
66#define IS_DF_SOFT_WRITE_PROTECT 0x02 66#define IS_DF_SOFT_WRITE_PROTECT 0x02
67 67
68struct UDFIdentSuffix { 68struct UDFIdentSuffix {
69 __le16 UDFRevision; 69 __le16 UDFRevision;
70 uint8_t OSClass; 70 uint8_t OSClass;
71 uint8_t OSIdentifier; 71 uint8_t OSIdentifier;
72 uint8_t reserved[4]; 72 uint8_t reserved[4];
73} __attribute__ ((packed)); 73} __attribute__ ((packed));
74 74
75struct impIdentSuffix { 75struct impIdentSuffix {
76 uint8_t OSClass; 76 uint8_t OSClass;
77 uint8_t OSIdentifier; 77 uint8_t OSIdentifier;
78 uint8_t reserved[6]; 78 uint8_t reserved[6];
79} __attribute__ ((packed)); 79} __attribute__ ((packed));
80 80
81struct appIdentSuffix { 81struct appIdentSuffix {
82 uint8_t impUse[8]; 82 uint8_t impUse[8];
83} __attribute__ ((packed)); 83} __attribute__ ((packed));
84 84
85/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */ 85/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
86/* Implementation Use (UDF 2.50 2.2.6.4) */ 86/* Implementation Use (UDF 2.50 2.2.6.4) */
87struct logicalVolIntegrityDescImpUse { 87struct logicalVolIntegrityDescImpUse {
88 regid impIdent; 88 regid impIdent;
89 __le32 numFiles; 89 __le32 numFiles;
90 __le32 numDirs; 90 __le32 numDirs;
91 __le16 minUDFReadRev; 91 __le16 minUDFReadRev;
92 __le16 minUDFWriteRev; 92 __le16 minUDFWriteRev;
93 __le16 maxUDFWriteRev; 93 __le16 maxUDFWriteRev;
94 uint8_t impUse[0]; 94 uint8_t impUse[0];
95} __attribute__ ((packed)); 95} __attribute__ ((packed));
96 96
97/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */ 97/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
98/* Implementation Use (UDF 2.50 2.2.7.2) */ 98/* Implementation Use (UDF 2.50 2.2.7.2) */
99struct impUseVolDescImpUse { 99struct impUseVolDescImpUse {
100 charspec LVICharset; 100 charspec LVICharset;
101 dstring logicalVolIdent[128]; 101 dstring logicalVolIdent[128];
102 dstring LVInfo1[36]; 102 dstring LVInfo1[36];
103 dstring LVInfo2[36]; 103 dstring LVInfo2[36];
104 dstring LVInfo3[36]; 104 dstring LVInfo3[36];
105 regid impIdent; 105 regid impIdent;
106 uint8_t impUse[128]; 106 uint8_t impUse[128];
107} __attribute__ ((packed)); 107} __attribute__ ((packed));
108 108
109struct udfPartitionMap2 { 109struct udfPartitionMap2 {
110 uint8_t partitionMapType; 110 uint8_t partitionMapType;
111 uint8_t partitionMapLength; 111 uint8_t partitionMapLength;
112 uint8_t reserved1[2]; 112 uint8_t reserved1[2];
113 regid partIdent; 113 regid partIdent;
114 __le16 volSeqNum; 114 __le16 volSeqNum;
115 __le16 partitionNum; 115 __le16 partitionNum;
116} __attribute__ ((packed)); 116} __attribute__ ((packed));
117 117
118/* Virtual Partition Map (UDF 2.50 2.2.8) */ 118/* Virtual Partition Map (UDF 2.50 2.2.8) */
119struct virtualPartitionMap { 119struct virtualPartitionMap {
120 uint8_t partitionMapType; 120 uint8_t partitionMapType;
121 uint8_t partitionMapLength; 121 uint8_t partitionMapLength;
122 uint8_t reserved1[2]; 122 uint8_t reserved1[2];
123 regid partIdent; 123 regid partIdent;
124 __le16 volSeqNum; 124 __le16 volSeqNum;
125 __le16 partitionNum; 125 __le16 partitionNum;
126 uint8_t reserved2[24]; 126 uint8_t reserved2[24];
127} __attribute__ ((packed)); 127} __attribute__ ((packed));
128 128
129/* Sparable Partition Map (UDF 2.50 2.2.9) */ 129/* Sparable Partition Map (UDF 2.50 2.2.9) */
@@ -143,62 +143,62 @@ struct sparablePartitionMap {
143 143
144/* Metadata Partition Map (UDF 2.4.0 2.2.10) */ 144/* Metadata Partition Map (UDF 2.4.0 2.2.10) */
145struct metadataPartitionMap { 145struct metadataPartitionMap {
146 uint8_t partitionMapType; 146 uint8_t partitionMapType;
147 uint8_t partitionMapLength; 147 uint8_t partitionMapLength;
148 uint8_t reserved1[2]; 148 uint8_t reserved1[2];
149 regid partIdent; 149 regid partIdent;
150 __le16 volSeqNum; 150 __le16 volSeqNum;
151 __le16 partitionNum; 151 __le16 partitionNum;
152 __le32 metadataFileLoc; 152 __le32 metadataFileLoc;
153 __le32 metadataMirrorFileLoc; 153 __le32 metadataMirrorFileLoc;
154 __le32 metadataBitmapFileLoc; 154 __le32 metadataBitmapFileLoc;
155 __le32 allocUnitSize; 155 __le32 allocUnitSize;
156 __le16 alignUnitSize; 156 __le16 alignUnitSize;
157 uint8_t flags; 157 uint8_t flags;
158 uint8_t reserved2[5]; 158 uint8_t reserved2[5];
159} __attribute__ ((packed)); 159} __attribute__ ((packed));
160 160
161/* Virtual Allocation Table (UDF 1.5 2.2.10) */ 161/* Virtual Allocation Table (UDF 1.5 2.2.10) */
162struct virtualAllocationTable15 { 162struct virtualAllocationTable15 {
163 __le32 VirtualSector[0]; 163 __le32 VirtualSector[0];
164 regid vatIdent; 164 regid vatIdent;
165 __le32 previousVATICBLoc; 165 __le32 previousVATICBLoc;
166} __attribute__ ((packed)); 166} __attribute__ ((packed));
167 167
168#define ICBTAG_FILE_TYPE_VAT15 0x00U 168#define ICBTAG_FILE_TYPE_VAT15 0x00U
169 169
170/* Virtual Allocation Table (UDF 2.50 2.2.11) */ 170/* Virtual Allocation Table (UDF 2.50 2.2.11) */
171struct virtualAllocationTable20 { 171struct virtualAllocationTable20 {
172 __le16 lengthHeader; 172 __le16 lengthHeader;
173 __le16 lengthImpUse; 173 __le16 lengthImpUse;
174 dstring logicalVolIdent[128]; 174 dstring logicalVolIdent[128];
175 __le32 previousVATICBLoc; 175 __le32 previousVATICBLoc;
176 __le32 numFiles; 176 __le32 numFiles;
177 __le32 numDirs; 177 __le32 numDirs;
178 __le16 minReadRevision; 178 __le16 minReadRevision;
179 __le16 minWriteRevision; 179 __le16 minWriteRevision;
180 __le16 maxWriteRevision; 180 __le16 maxWriteRevision;
181 __le16 reserved; 181 __le16 reserved;
182 uint8_t impUse[0]; 182 uint8_t impUse[0];
183 __le32 vatEntry[0]; 183 __le32 vatEntry[0];
184} __attribute__ ((packed)); 184} __attribute__ ((packed));
185 185
186#define ICBTAG_FILE_TYPE_VAT20 0xF8U 186#define ICBTAG_FILE_TYPE_VAT20 0xF8U
187 187
188/* Sparing Table (UDF 2.50 2.2.12) */ 188/* Sparing Table (UDF 2.50 2.2.12) */
189struct sparingEntry { 189struct sparingEntry {
190 __le32 origLocation; 190 __le32 origLocation;
191 __le32 mappedLocation; 191 __le32 mappedLocation;
192} __attribute__ ((packed)); 192} __attribute__ ((packed));
193 193
194struct sparingTable { 194struct sparingTable {
195 tag descTag; 195 tag descTag;
196 regid sparingIdent; 196 regid sparingIdent;
197 __le16 reallocationTableLen; 197 __le16 reallocationTableLen;
198 __le16 reserved; 198 __le16 reserved;
199 __le32 sequenceNum; 199 __le32 sequenceNum;
200 struct sparingEntry 200 struct sparingEntry
201 mapEntry[0]; 201 mapEntry[0];
202} __attribute__ ((packed)); 202} __attribute__ ((packed));
203 203
204/* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */ 204/* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */
@@ -208,8 +208,8 @@ struct sparingTable {
208 208
209/* struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */ 209/* struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
210struct allocDescImpUse { 210struct allocDescImpUse {
211 __le16 flags; 211 __le16 flags;
212 uint8_t impUse[4]; 212 uint8_t impUse[4];
213} __attribute__ ((packed)); 213} __attribute__ ((packed));
214 214
215#define AD_IU_EXT_ERASED 0x0001 215#define AD_IU_EXT_ERASED 0x0001
@@ -220,23 +220,23 @@ struct allocDescImpUse {
220/* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */ 220/* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */
221/* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */ 221/* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */
222struct freeEaSpace { 222struct freeEaSpace {
223 __le16 headerChecksum; 223 __le16 headerChecksum;
224 uint8_t freeEASpace[0]; 224 uint8_t freeEASpace[0];
225} __attribute__ ((packed)); 225} __attribute__ ((packed));
226 226
227/* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */ 227/* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */
228struct DVDCopyrightImpUse { 228struct DVDCopyrightImpUse {
229 __le16 headerChecksum; 229 __le16 headerChecksum;
230 uint8_t CGMSInfo; 230 uint8_t CGMSInfo;
231 uint8_t dataType; 231 uint8_t dataType;
232 uint8_t protectionSystemInfo[4]; 232 uint8_t protectionSystemInfo[4];
233} __attribute__ ((packed)); 233} __attribute__ ((packed));
234 234
235/* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */ 235/* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */
236/* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */ 236/* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */
237struct freeAppEASpace { 237struct freeAppEASpace {
238 __le16 headerChecksum; 238 __le16 headerChecksum;
239 uint8_t freeEASpace[0]; 239 uint8_t freeEASpace[0];
240} __attribute__ ((packed)); 240} __attribute__ ((packed));
241 241
242/* UDF Defined System Stream (UDF 2.50 3.3.7) */ 242/* UDF Defined System Stream (UDF 2.50 3.3.7) */
@@ -276,4 +276,4 @@ struct freeAppEASpace {
276#define UDF_OS_ID_BEOS 0x00U 276#define UDF_OS_ID_BEOS 0x00U
277#define UDF_OS_ID_WINCE 0x00U 277#define UDF_OS_ID_WINCE 0x00U
278 278
279#endif /* _OSTA_UDF_H */ 279#endif /* _OSTA_UDF_H */
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index a95d830a674d..aaab24c8c498 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * HISTORY 15 * HISTORY
16 * 16 *
17 * 12/06/98 blf Created file. 17 * 12/06/98 blf Created file.
18 * 18 *
19 */ 19 */
20 20
@@ -32,19 +32,17 @@ inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
32 uint16_t partition, uint32_t offset) 32 uint16_t partition, uint32_t offset)
33{ 33{
34 if (partition >= UDF_SB_NUMPARTS(sb)) { 34 if (partition >= UDF_SB_NUMPARTS(sb)) {
35 udf_debug 35 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
36 ("block=%d, partition=%d, offset=%d: invalid partition\n", 36 block, partition, offset);
37 block, partition, offset);
38 return 0xFFFFFFFF; 37 return 0xFFFFFFFF;
39 } 38 }
40 if (UDF_SB_PARTFUNC(sb, partition)) 39 if (UDF_SB_PARTFUNC(sb, partition))
41 return UDF_SB_PARTFUNC(sb, partition) (sb, block, partition, 40 return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset);
42 offset);
43 else 41 else
44 return UDF_SB_PARTROOT(sb, partition) + block + offset; 42 return UDF_SB_PARTROOT(sb, partition) + block + offset;
45} 43}
46 44
47uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block, 45uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
48 uint16_t partition, uint32_t offset) 46 uint16_t partition, uint32_t offset)
49{ 47{
50 struct buffer_head *bh = NULL; 48 struct buffer_head *bh = NULL;
@@ -52,14 +50,11 @@ uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block,
52 uint32_t index; 50 uint32_t index;
53 uint32_t loc; 51 uint32_t loc;
54 52
55 index = 53 index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
56 (sb->s_blocksize -
57 UDF_SB_TYPEVIRT(sb, partition).s_start_offset) / sizeof(uint32_t);
58 54
59 if (block > UDF_SB_TYPEVIRT(sb, partition).s_num_entries) { 55 if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) {
60 udf_debug 56 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
61 ("Trying to access block beyond end of VAT (%d max %d)\n", 57 block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
62 block, UDF_SB_TYPEVIRT(sb, partition).s_num_entries);
63 return 0xFFFFFFFF; 58 return 0xFFFFFFFF;
64 } 59 }
65 60
@@ -69,10 +64,7 @@ uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block,
69 index = block % (sb->s_blocksize / sizeof(uint32_t)); 64 index = block % (sb->s_blocksize / sizeof(uint32_t));
70 } else { 65 } else {
71 newblock = 0; 66 newblock = 0;
72 index = 67 index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
73 UDF_SB_TYPEVIRT(sb,
74 partition).s_start_offset /
75 sizeof(uint32_t) + block;
76 } 68 }
77 69
78 loc = udf_block_map(UDF_SB_VAT(sb), newblock); 70 loc = udf_block_map(UDF_SB_VAT(sb), newblock);
@@ -83,7 +75,7 @@ uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block,
83 return 0xFFFFFFFF; 75 return 0xFFFFFFFF;
84 } 76 }
85 77
86 loc = le32_to_cpu(((__le32 *) bh->b_data)[index]); 78 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
87 79
88 brelse(bh); 80 brelse(bh);
89 81
@@ -93,8 +85,8 @@ uint32_t udf_get_pblock_virt15(struct super_block * sb, uint32_t block,
93 } 85 }
94 86
95 return udf_get_pblock(sb, loc, 87 return udf_get_pblock(sb, loc,
96 UDF_I_LOCATION(UDF_SB_VAT(sb)). 88 UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum,
97 partitionReferenceNum, offset); 89 offset);
98} 90}
99 91
100inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block, 92inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
@@ -108,40 +100,29 @@ uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
108{ 100{
109 int i; 101 int i;
110 struct sparingTable *st = NULL; 102 struct sparingTable *st = NULL;
111 uint32_t packet = 103 uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
112 (block + offset) & ~(UDF_SB_TYPESPAR(sb, partition).s_packet_len -
113 1);
114 104
115 for (i = 0; i < 4; i++) { 105 for (i = 0; i < 4; i++) {
116 if (UDF_SB_TYPESPAR(sb, partition).s_spar_map[i] != NULL) { 106 if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) {
117 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb, 107 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
118 partition).
119 s_spar_map[i]->b_data;
120 break; 108 break;
121 } 109 }
122 } 110 }
123 111
124 if (st) { 112 if (st) {
125 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { 113 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
126 if (le32_to_cpu(st->mapEntry[i].origLocation) >= 114 if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) {
127 0xFFFFFFF0)
128 break; 115 break;
129 else if (le32_to_cpu(st->mapEntry[i].origLocation) == 116 } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
130 packet) { 117 return le32_to_cpu(st->mapEntry[i].mappedLocation) +
131 return le32_to_cpu(st->mapEntry[i]. 118 ((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
132 mappedLocation) + ((block + 119 } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
133 offset) &
134 (UDF_SB_TYPESPAR
135 (sb,
136 partition).
137 s_packet_len
138 - 1));
139 } else if (le32_to_cpu(st->mapEntry[i].origLocation) >
140 packet)
141 break; 120 break;
121 }
142 } 122 }
143 } 123 }
144 return UDF_SB_PARTROOT(sb, partition) + block + offset; 124
125 return UDF_SB_PARTROOT(sb,partition) + block + offset;
145} 126}
146 127
147int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) 128int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
@@ -153,20 +134,14 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
153 int i, j, k, l; 134 int i, j, k, l;
154 135
155 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { 136 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
156 if (old_block > UDF_SB_PARTROOT(sb, i) && 137 if (old_block > UDF_SB_PARTROOT(sb,i) &&
157 old_block < UDF_SB_PARTROOT(sb, i) + UDF_SB_PARTLEN(sb, i)) 138 old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) {
158 { 139 sdata = &UDF_SB_TYPESPAR(sb,i);
159 sdata = &UDF_SB_TYPESPAR(sb, i); 140 packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
160 packet =
161 (old_block -
162 UDF_SB_PARTROOT(sb,
163 i)) & ~(sdata->s_packet_len - 1);
164 141
165 for (j = 0; j < 4; j++) { 142 for (j = 0; j < 4; j++) {
166 if (UDF_SB_TYPESPAR(sb, i).s_spar_map[j] != 143 if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
167 NULL) { 144 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
168 st = (struct sparingTable *)sdata->
169 s_spar_map[j]->b_data;
170 break; 145 break;
171 } 146 }
172 } 147 }
@@ -174,122 +149,51 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
174 if (!st) 149 if (!st)
175 return 1; 150 return 1;
176 151
177 for (k = 0; k < le16_to_cpu(st->reallocationTableLen); 152 for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) {
178 k++) { 153 if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) {
179 if (le32_to_cpu(st->mapEntry[k].origLocation) ==
180 0xFFFFFFFF) {
181 for (; j < 4; j++) { 154 for (; j < 4; j++) {
182 if (sdata->s_spar_map[j]) { 155 if (sdata->s_spar_map[j]) {
183 st = (struct 156 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
184 sparingTable *) 157 st->mapEntry[k].origLocation = cpu_to_le32(packet);
185 sdata-> 158 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
186 s_spar_map[j]-> 159 mark_buffer_dirty(sdata->s_spar_map[j]);
187 b_data;
188 st->mapEntry[k].
189 origLocation =
190 cpu_to_le32(packet);
191 udf_update_tag((char *)
192 st,
193 sizeof
194 (struct
195 sparingTable)
196 +
197 le16_to_cpu
198 (st->
199 reallocationTableLen)
200 *
201 sizeof
202 (struct
203 sparingEntry));
204 mark_buffer_dirty
205 (sdata->
206 s_spar_map[j]);
207 } 160 }
208 } 161 }
209 *new_block = 162 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
210 le32_to_cpu(st->mapEntry[k]. 163 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
211 mappedLocation) +
212 ((old_block -
213 UDF_SB_PARTROOT(sb,
214 i)) & (sdata->
215 s_packet_len
216 - 1));
217 return 0; 164 return 0;
218 } else 165 } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
219 if (le32_to_cpu 166 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
220 (st->mapEntry[k].origLocation) == 167 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
221 packet) {
222 *new_block =
223 le32_to_cpu(st->mapEntry[k].
224 mappedLocation) +
225 ((old_block -
226 UDF_SB_PARTROOT(sb,
227 i)) & (sdata->
228 s_packet_len
229 - 1));
230 return 0; 168 return 0;
231 } else 169 } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
232 if (le32_to_cpu
233 (st->mapEntry[k].origLocation) > packet)
234 break; 170 break;
171 }
235 } 172 }
236 for (l = k; l < le16_to_cpu(st->reallocationTableLen); 173
237 l++) { 174 for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) {
238 if (le32_to_cpu(st->mapEntry[l].origLocation) == 175 if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) {
239 0xFFFFFFFF) {
240 for (; j < 4; j++) { 176 for (; j < 4; j++) {
241 if (sdata->s_spar_map[j]) { 177 if (sdata->s_spar_map[j]) {
242 st = (struct 178 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
243 sparingTable *) 179 mapEntry = st->mapEntry[l];
244 sdata-> 180 mapEntry.origLocation = cpu_to_le32(packet);
245 s_spar_map[j]-> 181 memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry));
246 b_data; 182 st->mapEntry[k] = mapEntry;
247 mapEntry = 183 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
248 st->mapEntry[l]; 184 mark_buffer_dirty(sdata->s_spar_map[j]);
249 mapEntry.origLocation =
250 cpu_to_le32(packet);
251 memmove(&st->
252 mapEntry[k + 1],
253 &st->
254 mapEntry[k],
255 (l -
256 k) *
257 sizeof(struct
258 sparingEntry));
259 st->mapEntry[k] =
260 mapEntry;
261 udf_update_tag((char *)
262 st,
263 sizeof
264 (struct
265 sparingTable)
266 +
267 le16_to_cpu
268 (st->
269 reallocationTableLen)
270 *
271 sizeof
272 (struct
273 sparingEntry));
274 mark_buffer_dirty
275 (sdata->
276 s_spar_map[j]);
277 } 185 }
278 } 186 }
279 *new_block = 187 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
280 le32_to_cpu(st->mapEntry[k]. 188 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
281 mappedLocation) +
282 ((old_block -
283 UDF_SB_PARTROOT(sb,
284 i)) & (sdata->
285 s_packet_len
286 - 1));
287 return 0; 189 return 0;
288 } 190 }
289 } 191 }
192
290 return 1; 193 return 1;
291 } 194 } /* if old_block */
292 } 195 }
196
293 if (i == UDF_SB_NUMPARTS(sb)) { 197 if (i == UDF_SB_NUMPARTS(sb)) {
294 /* outside of partitions */ 198 /* outside of partitions */
295 /* for now, fail =) */ 199 /* for now, fail =) */
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 72097ee6b752..7b30964665db 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -104,11 +104,11 @@ static int udf_get_sb(struct file_system_type *fs_type,
104} 104}
105 105
106static struct file_system_type udf_fstype = { 106static struct file_system_type udf_fstype = {
107 .owner = THIS_MODULE, 107 .owner = THIS_MODULE,
108 .name = "udf", 108 .name = "udf",
109 .get_sb = udf_get_sb, 109 .get_sb = udf_get_sb,
110 .kill_sb = kill_block_super, 110 .kill_sb = kill_block_super,
111 .fs_flags = FS_REQUIRES_DEV, 111 .fs_flags = FS_REQUIRES_DEV,
112}; 112};
113 113
114static struct kmem_cache *udf_inode_cachep; 114static struct kmem_cache *udf_inode_cachep;
@@ -116,8 +116,7 @@ static struct kmem_cache *udf_inode_cachep;
116static struct inode *udf_alloc_inode(struct super_block *sb) 116static struct inode *udf_alloc_inode(struct super_block *sb)
117{ 117{
118 struct udf_inode_info *ei; 118 struct udf_inode_info *ei;
119 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, 119 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
120 GFP_KERNEL);
121 if (!ei) 120 if (!ei)
122 return NULL; 121 return NULL;
123 122
@@ -150,7 +149,7 @@ static int init_inodecache(void)
150 0, (SLAB_RECLAIM_ACCOUNT | 149 0, (SLAB_RECLAIM_ACCOUNT |
151 SLAB_MEM_SPREAD), 150 SLAB_MEM_SPREAD),
152 init_once); 151 init_once);
153 if (udf_inode_cachep == NULL) 152 if (!udf_inode_cachep)
154 return -ENOMEM; 153 return -ENOMEM;
155 return 0; 154 return 0;
156} 155}
@@ -162,15 +161,15 @@ static void destroy_inodecache(void)
162 161
163/* Superblock operations */ 162/* Superblock operations */
164static const struct super_operations udf_sb_ops = { 163static const struct super_operations udf_sb_ops = {
165 .alloc_inode = udf_alloc_inode, 164 .alloc_inode = udf_alloc_inode,
166 .destroy_inode = udf_destroy_inode, 165 .destroy_inode = udf_destroy_inode,
167 .write_inode = udf_write_inode, 166 .write_inode = udf_write_inode,
168 .delete_inode = udf_delete_inode, 167 .delete_inode = udf_delete_inode,
169 .clear_inode = udf_clear_inode, 168 .clear_inode = udf_clear_inode,
170 .put_super = udf_put_super, 169 .put_super = udf_put_super,
171 .write_super = udf_write_super, 170 .write_super = udf_write_super,
172 .statfs = udf_statfs, 171 .statfs = udf_statfs,
173 .remount_fs = udf_remount_fs, 172 .remount_fs = udf_remount_fs,
174}; 173};
175 174
176struct udf_options { 175struct udf_options {
@@ -193,16 +192,20 @@ struct udf_options {
193static int __init init_udf_fs(void) 192static int __init init_udf_fs(void)
194{ 193{
195 int err; 194 int err;
195
196 err = init_inodecache(); 196 err = init_inodecache();
197 if (err) 197 if (err)
198 goto out1; 198 goto out1;
199 err = register_filesystem(&udf_fstype); 199 err = register_filesystem(&udf_fstype);
200 if (err) 200 if (err)
201 goto out; 201 goto out;
202
202 return 0; 203 return 0;
203 out: 204
205out:
204 destroy_inodecache(); 206 destroy_inodecache();
205 out1: 207
208out1:
206 return err; 209 return err;
207} 210}
208 211
@@ -213,7 +216,7 @@ static void __exit exit_udf_fs(void)
213} 216}
214 217
215module_init(init_udf_fs) 218module_init(init_udf_fs)
216 module_exit(exit_udf_fs) 219module_exit(exit_udf_fs)
217 220
218/* 221/*
219 * udf_parse_options 222 * udf_parse_options
@@ -239,7 +242,7 @@ module_init(init_udf_fs)
239 * 242 *
240 * The remaining are for debugging and disaster recovery: 243 * The remaining are for debugging and disaster recovery:
241 * 244 *
242 * novrs Skip volume sequence recognition 245 * novrs Skip volume sequence recognition
243 * 246 *
244 * The following expect a offset from 0. 247 * The following expect a offset from 0.
245 * 248 *
@@ -268,6 +271,7 @@ module_init(init_udf_fs)
268 * July 1, 1997 - Andrew E. Mileski 271 * July 1, 1997 - Andrew E. Mileski
269 * Written, tested, and released. 272 * Written, tested, and released.
270 */ 273 */
274
271enum { 275enum {
272 Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete, 276 Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
273 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad, 277 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
@@ -278,32 +282,32 @@ enum {
278}; 282};
279 283
280static match_table_t tokens = { 284static match_table_t tokens = {
281 {Opt_novrs, "novrs"}, 285 {Opt_novrs, "novrs"},
282 {Opt_nostrict, "nostrict"}, 286 {Opt_nostrict, "nostrict"},
283 {Opt_bs, "bs=%u"}, 287 {Opt_bs, "bs=%u"},
284 {Opt_unhide, "unhide"}, 288 {Opt_unhide, "unhide"},
285 {Opt_undelete, "undelete"}, 289 {Opt_undelete, "undelete"},
286 {Opt_noadinicb, "noadinicb"}, 290 {Opt_noadinicb, "noadinicb"},
287 {Opt_adinicb, "adinicb"}, 291 {Opt_adinicb, "adinicb"},
288 {Opt_shortad, "shortad"}, 292 {Opt_shortad, "shortad"},
289 {Opt_longad, "longad"}, 293 {Opt_longad, "longad"},
290 {Opt_uforget, "uid=forget"}, 294 {Opt_uforget, "uid=forget"},
291 {Opt_uignore, "uid=ignore"}, 295 {Opt_uignore, "uid=ignore"},
292 {Opt_gforget, "gid=forget"}, 296 {Opt_gforget, "gid=forget"},
293 {Opt_gignore, "gid=ignore"}, 297 {Opt_gignore, "gid=ignore"},
294 {Opt_gid, "gid=%u"}, 298 {Opt_gid, "gid=%u"},
295 {Opt_uid, "uid=%u"}, 299 {Opt_uid, "uid=%u"},
296 {Opt_umask, "umask=%o"}, 300 {Opt_umask, "umask=%o"},
297 {Opt_session, "session=%u"}, 301 {Opt_session, "session=%u"},
298 {Opt_lastblock, "lastblock=%u"}, 302 {Opt_lastblock, "lastblock=%u"},
299 {Opt_anchor, "anchor=%u"}, 303 {Opt_anchor, "anchor=%u"},
300 {Opt_volume, "volume=%u"}, 304 {Opt_volume, "volume=%u"},
301 {Opt_partition, "partition=%u"}, 305 {Opt_partition, "partition=%u"},
302 {Opt_fileset, "fileset=%u"}, 306 {Opt_fileset, "fileset=%u"},
303 {Opt_rootdir, "rootdir=%u"}, 307 {Opt_rootdir, "rootdir=%u"},
304 {Opt_utf8, "utf8"}, 308 {Opt_utf8, "utf8"},
305 {Opt_iocharset, "iocharset=%s"}, 309 {Opt_iocharset, "iocharset=%s"},
306 {Opt_err, NULL} 310 {Opt_err, NULL}
307}; 311};
308 312
309static int udf_parse_options(char *options, struct udf_options *uopt) 313static int udf_parse_options(char *options, struct udf_options *uopt)
@@ -444,9 +448,11 @@ static int udf_parse_options(char *options, struct udf_options *uopt)
444void udf_write_super(struct super_block *sb) 448void udf_write_super(struct super_block *sb)
445{ 449{
446 lock_kernel(); 450 lock_kernel();
451
447 if (!(sb->s_flags & MS_RDONLY)) 452 if (!(sb->s_flags & MS_RDONLY))
448 udf_open_lvid(sb); 453 udf_open_lvid(sb);
449 sb->s_dirt = 0; 454 sb->s_dirt = 0;
455
450 unlock_kernel(); 456 unlock_kernel();
451} 457}
452 458
@@ -455,16 +461,16 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
455 struct udf_options uopt; 461 struct udf_options uopt;
456 462
457 uopt.flags = UDF_SB(sb)->s_flags; 463 uopt.flags = UDF_SB(sb)->s_flags;
458 uopt.uid = UDF_SB(sb)->s_uid; 464 uopt.uid = UDF_SB(sb)->s_uid;
459 uopt.gid = UDF_SB(sb)->s_gid; 465 uopt.gid = UDF_SB(sb)->s_gid;
460 uopt.umask = UDF_SB(sb)->s_umask; 466 uopt.umask = UDF_SB(sb)->s_umask;
461 467
462 if (!udf_parse_options(options, &uopt)) 468 if (!udf_parse_options(options, &uopt))
463 return -EINVAL; 469 return -EINVAL;
464 470
465 UDF_SB(sb)->s_flags = uopt.flags; 471 UDF_SB(sb)->s_flags = uopt.flags;
466 UDF_SB(sb)->s_uid = uopt.uid; 472 UDF_SB(sb)->s_uid = uopt.uid;
467 UDF_SB(sb)->s_gid = uopt.gid; 473 UDF_SB(sb)->s_gid = uopt.gid;
468 UDF_SB(sb)->s_umask = uopt.umask; 474 UDF_SB(sb)->s_umask = uopt.umask;
469 475
470 if (UDF_SB_LVIDBH(sb)) { 476 if (UDF_SB_LVIDBH(sb)) {
@@ -517,6 +523,7 @@ static int udf_set_blocksize(struct super_block *sb, int bsize)
517 printk(KERN_ERR "udf: bad block size (%d)\n", bsize); 523 printk(KERN_ERR "udf: bad block size (%d)\n", bsize);
518 return 0; 524 return 0;
519 } 525 }
526
520 return sb->s_blocksize; 527 return sb->s_blocksize;
521} 528}
522 529
@@ -552,15 +559,12 @@ static int udf_vrs(struct super_block *sb, int silent)
552 559
553 /* Look for ISO descriptors */ 560 /* Look for ISO descriptors */
554 vsd = (struct volStructDesc *)(bh->b_data + 561 vsd = (struct volStructDesc *)(bh->b_data +
555 (sector & 562 (sector & (sb->s_blocksize - 1)));
556 (sb->s_blocksize - 1)));
557 563
558 if (vsd->stdIdent[0] == 0) { 564 if (vsd->stdIdent[0] == 0) {
559 brelse(bh); 565 brelse(bh);
560 break; 566 break;
561 } else 567 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
562 if (!strncmp
563 (vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
564 iso9660 = sector; 568 iso9660 = sector;
565 switch (vsd->structType) { 569 switch (vsd->structType) {
566 case 0: 570 case 0:
@@ -587,21 +591,13 @@ static int udf_vrs(struct super_block *sb, int silent)
587 vsd->structType); 591 vsd->structType);
588 break; 592 break;
589 } 593 }
590 } else 594 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) {
591 if (!strncmp 595 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) {
592 (vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) {
593 } else
594 if (!strncmp
595 (vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) {
596 brelse(bh); 596 brelse(bh);
597 break; 597 break;
598 } else 598 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) {
599 if (!strncmp
600 (vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) {
601 nsr02 = sector; 599 nsr02 = sector;
602 } else 600 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) {
603 if (!strncmp
604 (vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) {
605 nsr03 = sector; 601 nsr03 = sector;
606 } 602 }
607 brelse(bh); 603 brelse(bh);
@@ -644,11 +640,10 @@ static void udf_find_anchor(struct super_block *sb)
644 640
645 if (lastblock) { 641 if (lastblock) {
646 int varlastblock = udf_variable_to_fixed(lastblock); 642 int varlastblock = udf_variable_to_fixed(lastblock);
647 int last[] = { lastblock, lastblock - 2, 643 int last[] = { lastblock, lastblock - 2,
648 lastblock - 150, lastblock - 152, 644 lastblock - 150, lastblock - 152,
649 varlastblock, varlastblock - 2, 645 varlastblock, varlastblock - 2,
650 varlastblock - 150, varlastblock - 152 646 varlastblock - 150, varlastblock - 152 };
651 };
652 647
653 lastblock = 0; 648 lastblock = 0;
654 649
@@ -664,88 +659,54 @@ static void udf_find_anchor(struct super_block *sb)
664 if (last[i] < 0 || !(bh = sb_bread(sb, last[i]))) { 659 if (last[i] < 0 || !(bh = sb_bread(sb, last[i]))) {
665 ident = location = 0; 660 ident = location = 0;
666 } else { 661 } else {
667 ident = 662 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
668 le16_to_cpu(((tag *) bh->b_data)->tagIdent); 663 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
669 location =
670 le32_to_cpu(((tag *) bh->b_data)->
671 tagLocation);
672 brelse(bh); 664 brelse(bh);
673 } 665 }
674 666
675 if (ident == TAG_IDENT_AVDP) { 667 if (ident == TAG_IDENT_AVDP) {
676 if (location == last[i] - UDF_SB_SESSION(sb)) { 668 if (location == last[i] - UDF_SB_SESSION(sb)) {
677 lastblock = UDF_SB_ANCHOR(sb)[0] = 669 lastblock = UDF_SB_ANCHOR(sb)[0] = last[i] - UDF_SB_SESSION(sb);
678 last[i] - UDF_SB_SESSION(sb); 670 UDF_SB_ANCHOR(sb)[1] = last[i] - 256 - UDF_SB_SESSION(sb);
679 UDF_SB_ANCHOR(sb)[1] = 671 } else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb)) {
680 last[i] - 256 - UDF_SB_SESSION(sb);
681 } else if (location ==
682 udf_variable_to_fixed(last[i]) -
683 UDF_SB_SESSION(sb)) {
684 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); 672 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
685 lastblock = UDF_SB_ANCHOR(sb)[0] = 673 lastblock = UDF_SB_ANCHOR(sb)[0] = udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb);
686 udf_variable_to_fixed(last[i]) - 674 UDF_SB_ANCHOR(sb)[1] = lastblock - 256 - UDF_SB_SESSION(sb);
687 UDF_SB_SESSION(sb); 675 } else {
688 UDF_SB_ANCHOR(sb)[1] = 676 udf_debug("Anchor found at block %d, location mismatch %d.\n",
689 lastblock - 256 - 677 last[i], location);
690 UDF_SB_SESSION(sb); 678 }
691 } else 679 } else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) {
692 udf_debug
693 ("Anchor found at block %d, location mismatch %d.\n",
694 last[i], location);
695 } else if (ident == TAG_IDENT_FE
696 || ident == TAG_IDENT_EFE) {
697 lastblock = last[i]; 680 lastblock = last[i];
698 UDF_SB_ANCHOR(sb)[3] = 512; 681 UDF_SB_ANCHOR(sb)[3] = 512;
699 } else { 682 } else {
700 if (last[i] < 256 683 if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256))) {
701 || !(bh = sb_bread(sb, last[i] - 256))) {
702 ident = location = 0; 684 ident = location = 0;
703 } else { 685 } else {
704 ident = 686 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
705 le16_to_cpu(((tag *) bh->b_data)-> 687 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
706 tagIdent);
707 location =
708 le32_to_cpu(((tag *) bh->b_data)->
709 tagLocation);
710 brelse(bh); 688 brelse(bh);
711 } 689 }
712 690
713 if (ident == TAG_IDENT_AVDP && 691 if (ident == TAG_IDENT_AVDP &&
714 location == 692 location == last[i] - 256 - UDF_SB_SESSION(sb)) {
715 last[i] - 256 - UDF_SB_SESSION(sb)) {
716 lastblock = last[i]; 693 lastblock = last[i];
717 UDF_SB_ANCHOR(sb)[1] = last[i] - 256; 694 UDF_SB_ANCHOR(sb)[1] = last[i] - 256;
718 } else { 695 } else {
719 if (last[i] < 312 + UDF_SB_SESSION(sb) 696 if (last[i] < 312 + UDF_SB_SESSION(sb) ||
720 || !(bh = 697 !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb)))) {
721 sb_bread(sb,
722 last[i] - 312 -
723 UDF_SB_SESSION(sb))))
724 {
725 ident = location = 0; 698 ident = location = 0;
726 } else { 699 } else {
727 ident = 700 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
728 le16_to_cpu(((tag *) bh-> 701 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
729 b_data)->
730 tagIdent);
731 location =
732 le32_to_cpu(((tag *) bh->
733 b_data)->
734 tagLocation);
735 brelse(bh); 702 brelse(bh);
736 } 703 }
737 704
738 if (ident == TAG_IDENT_AVDP && 705 if (ident == TAG_IDENT_AVDP &&
739 location == 706 location == udf_variable_to_fixed(last[i]) - 256) {
740 udf_variable_to_fixed(last[i]) - 707 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
741 256) { 708 lastblock = udf_variable_to_fixed(last[i]);
742 UDF_SET_FLAG(sb, 709 UDF_SB_ANCHOR(sb)[1] = lastblock - 256;
743 UDF_FLAG_VARCONV);
744 lastblock =
745 udf_variable_to_fixed(last
746 [i]);
747 UDF_SB_ANCHOR(sb)[1] =
748 lastblock - 256;
749 } 710 }
750 } 711 }
751 } 712 }
@@ -755,9 +716,8 @@ static void udf_find_anchor(struct super_block *sb)
755 if (!lastblock) { 716 if (!lastblock) {
756 /* We havn't found the lastblock. check 312 */ 717 /* We havn't found the lastblock. check 312 */
757 if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb)))) { 718 if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb)))) {
758 ident = le16_to_cpu(((tag *) bh->b_data)->tagIdent); 719 ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
759 location = 720 location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
760 le32_to_cpu(((tag *) bh->b_data)->tagLocation);
761 brelse(bh); 721 brelse(bh);
762 722
763 if (ident == TAG_IDENT_AVDP && location == 256) 723 if (ident == TAG_IDENT_AVDP && location == 256)
@@ -767,19 +727,13 @@ static void udf_find_anchor(struct super_block *sb)
767 727
768 for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) { 728 for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) {
769 if (UDF_SB_ANCHOR(sb)[i]) { 729 if (UDF_SB_ANCHOR(sb)[i]) {
770 if (!(bh = udf_read_tagged(sb, 730 if (!(bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i],
771 UDF_SB_ANCHOR(sb)[i], 731 UDF_SB_ANCHOR(sb)[i], &ident))) {
772 UDF_SB_ANCHOR(sb)[i],
773 &ident))) {
774 UDF_SB_ANCHOR(sb)[i] = 0; 732 UDF_SB_ANCHOR(sb)[i] = 0;
775 } else { 733 } else {
776 brelse(bh); 734 brelse(bh);
777 if ((ident != TAG_IDENT_AVDP) && (i || 735 if ((ident != TAG_IDENT_AVDP) &&
778 (ident != 736 (i || (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE))) {
779 TAG_IDENT_FE
780 && ident !=
781 TAG_IDENT_EFE)))
782 {
783 UDF_SB_ANCHOR(sb)[i] = 0; 737 UDF_SB_ANCHOR(sb)[i] = 0;
784 } 738 }
785 } 739 }
@@ -789,9 +743,7 @@ static void udf_find_anchor(struct super_block *sb)
789 UDF_SB_LASTBLOCK(sb) = lastblock; 743 UDF_SB_LASTBLOCK(sb) = lastblock;
790} 744}
791 745
792static int 746static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root)
793udf_find_fileset(struct super_block *sb, kernel_lb_addr * fileset,
794 kernel_lb_addr * root)
795{ 747{
796 struct buffer_head *bh = NULL; 748 struct buffer_head *bh = NULL;
797 long lastblock; 749 long lastblock;
@@ -801,18 +753,19 @@ udf_find_fileset(struct super_block *sb, kernel_lb_addr * fileset,
801 fileset->partitionReferenceNum != 0xFFFF) { 753 fileset->partitionReferenceNum != 0xFFFF) {
802 bh = udf_read_ptagged(sb, *fileset, 0, &ident); 754 bh = udf_read_ptagged(sb, *fileset, 0, &ident);
803 755
804 if (!bh) 756 if (!bh) {
805 return 1; 757 return 1;
806 else if (ident != TAG_IDENT_FSD) { 758 } else if (ident != TAG_IDENT_FSD) {
807 brelse(bh); 759 brelse(bh);
808 return 1; 760 return 1;
809 } 761 }
810 762
811 } 763 }
812 764
813 if (!bh) { /* Search backwards through the partitions */ 765 if (!bh) { /* Search backwards through the partitions */
814 kernel_lb_addr newfileset; 766 kernel_lb_addr newfileset;
815 767
768/* --> cvg: FIXME - is it reasonable? */
816 return 1; 769 return 1;
817 770
818 for (newfileset.partitionReferenceNum = UDF_SB_NUMPARTS(sb) - 1; 771 for (newfileset.partitionReferenceNum = UDF_SB_NUMPARTS(sb) - 1;
@@ -820,14 +773,11 @@ udf_find_fileset(struct super_block *sb, kernel_lb_addr * fileset,
820 fileset->logicalBlockNum == 0xFFFFFFFF && 773 fileset->logicalBlockNum == 0xFFFFFFFF &&
821 fileset->partitionReferenceNum == 0xFFFF); 774 fileset->partitionReferenceNum == 0xFFFF);
822 newfileset.partitionReferenceNum--) { 775 newfileset.partitionReferenceNum--) {
823 lastblock = 776 lastblock = UDF_SB_PARTLEN(sb, newfileset.partitionReferenceNum);
824 UDF_SB_PARTLEN(sb,
825 newfileset.partitionReferenceNum);
826 newfileset.logicalBlockNum = 0; 777 newfileset.logicalBlockNum = 0;
827 778
828 do { 779 do {
829 bh = udf_read_ptagged(sb, newfileset, 0, 780 bh = udf_read_ptagged(sb, newfileset, 0, &ident);
830 &ident);
831 if (!bh) { 781 if (!bh) {
832 newfileset.logicalBlockNum++; 782 newfileset.logicalBlockNum++;
833 continue; 783 continue;
@@ -835,38 +785,28 @@ udf_find_fileset(struct super_block *sb, kernel_lb_addr * fileset,
835 785
836 switch (ident) { 786 switch (ident) {
837 case TAG_IDENT_SBD: 787 case TAG_IDENT_SBD:
838 { 788 {
839 struct spaceBitmapDesc *sp; 789 struct spaceBitmapDesc *sp;
840 sp = (struct spaceBitmapDesc *) 790 sp = (struct spaceBitmapDesc *)bh->b_data;
841 bh->b_data; 791 newfileset.logicalBlockNum += 1 +
842 newfileset.logicalBlockNum += 792 ((le32_to_cpu(sp->numOfBytes) +
843 1 + 793 sizeof(struct spaceBitmapDesc) - 1)
844 ((le32_to_cpu 794 >> sb->s_blocksize_bits);
845 (sp->numOfBytes) + 795 brelse(bh);
846 sizeof(struct 796 break;
847 spaceBitmapDesc) - 797 }
848 1)
849 >> sb->s_blocksize_bits);
850 brelse(bh);
851 break;
852 }
853 case TAG_IDENT_FSD: 798 case TAG_IDENT_FSD:
854 { 799 *fileset = newfileset;
855 *fileset = newfileset; 800 break;
856 break;
857 }
858 default: 801 default:
859 { 802 newfileset.logicalBlockNum++;
860 newfileset.logicalBlockNum++; 803 brelse(bh);
861 brelse(bh); 804 bh = NULL;
862 bh = NULL; 805 break;
863 break;
864 }
865 } 806 }
866 } 807 } while (newfileset.logicalBlockNum < lastblock &&
867 while (newfileset.logicalBlockNum < lastblock && 808 fileset->logicalBlockNum == 0xFFFFFFFF &&
868 fileset->logicalBlockNum == 0xFFFFFFFF && 809 fileset->partitionReferenceNum == 0xFFFF);
869 fileset->partitionReferenceNum == 0xFFFF);
870 } 810 }
871 } 811 }
872 812
@@ -898,10 +838,10 @@ static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
898 lets_to_cpu(pvoldesc->recordingDateAndTime))) { 838 lets_to_cpu(pvoldesc->recordingDateAndTime))) {
899 kernel_timestamp ts; 839 kernel_timestamp ts;
900 ts = lets_to_cpu(pvoldesc->recordingDateAndTime); 840 ts = lets_to_cpu(pvoldesc->recordingDateAndTime);
901 udf_debug 841 udf_debug("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n",
902 ("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n", 842 recording, recording_usec,
903 recording, recording_usec, ts.year, ts.month, ts.day, 843 ts.year, ts.month, ts.day, ts.hour,
904 ts.hour, ts.minute, ts.typeAndTimezone); 844 ts.minute, ts.typeAndTimezone);
905 UDF_SB_RECORDTIME(sb).tv_sec = recording; 845 UDF_SB_RECORDTIME(sb).tv_sec = recording;
906 UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000; 846 UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000;
907 } 847 }
@@ -920,9 +860,8 @@ static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh)
920 } 860 }
921} 861}
922 862
923static void 863static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
924udf_load_fileset(struct super_block *sb, struct buffer_head *bh, 864 kernel_lb_addr *root)
925 kernel_lb_addr * root)
926{ 865{
927 struct fileSetDesc *fset; 866 struct fileSetDesc *fset;
928 867
@@ -945,121 +884,72 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
945 884
946 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { 885 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
947 udf_debug("Searching map: (%d == %d)\n", 886 udf_debug("Searching map: (%d == %d)\n",
948 UDF_SB_PARTMAPS(sb)[i].s_partition_num, 887 UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber));
949 le16_to_cpu(p->partitionNumber)); 888 if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber)) {
950 if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == 889 UDF_SB_PARTLEN(sb,i) = le32_to_cpu(p->partitionLength); /* blocks */
951 le16_to_cpu(p->partitionNumber)) { 890 UDF_SB_PARTROOT(sb,i) = le32_to_cpu(p->partitionStartingLocation);
952 UDF_SB_PARTLEN(sb, i) = le32_to_cpu(p->partitionLength); /* blocks */ 891 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_READ_ONLY)
953 UDF_SB_PARTROOT(sb, i) = 892 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_READ_ONLY;
954 le32_to_cpu(p->partitionStartingLocation); 893 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_WRITE_ONCE)
955 if (le32_to_cpu(p->accessType) == 894 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_WRITE_ONCE;
956 PD_ACCESS_TYPE_READ_ONLY) 895 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_REWRITABLE)
957 UDF_SB_PARTFLAGS(sb, i) |= 896 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_REWRITABLE;
958 UDF_PART_FLAG_READ_ONLY; 897 if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_OVERWRITABLE)
959 if (le32_to_cpu(p->accessType) == 898 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_OVERWRITABLE;
960 PD_ACCESS_TYPE_WRITE_ONCE) 899
961 UDF_SB_PARTFLAGS(sb, i) |= 900 if (!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) ||
962 UDF_PART_FLAG_WRITE_ONCE; 901 !strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) {
963 if (le32_to_cpu(p->accessType) ==
964 PD_ACCESS_TYPE_REWRITABLE)
965 UDF_SB_PARTFLAGS(sb, i) |=
966 UDF_PART_FLAG_REWRITABLE;
967 if (le32_to_cpu(p->accessType) ==
968 PD_ACCESS_TYPE_OVERWRITABLE)
969 UDF_SB_PARTFLAGS(sb, i) |=
970 UDF_PART_FLAG_OVERWRITABLE;
971
972 if (!strcmp
973 (p->partitionContents.ident,
974 PD_PARTITION_CONTENTS_NSR02)
975 || !strcmp(p->partitionContents.ident,
976 PD_PARTITION_CONTENTS_NSR03)) {
977 struct partitionHeaderDesc *phd; 902 struct partitionHeaderDesc *phd;
978 903
979 phd = 904 phd = (struct partitionHeaderDesc *)(p->partitionContentsUse);
980 (struct partitionHeaderDesc *)(p->
981 partitionContentsUse);
982 if (phd->unallocSpaceTable.extLength) { 905 if (phd->unallocSpaceTable.extLength) {
983 kernel_lb_addr loc = 906 kernel_lb_addr loc = {
984 { le32_to_cpu(phd-> 907 .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition),
985 unallocSpaceTable. 908 .partitionReferenceNum = i,
986 extPosition), i }; 909 };
987 910
988 UDF_SB_PARTMAPS(sb)[i].s_uspace. 911 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table =
989 s_table = udf_iget(sb, loc); 912 udf_iget(sb, loc);
990 UDF_SB_PARTFLAGS(sb, i) |= 913 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE;
991 UDF_PART_FLAG_UNALLOC_TABLE; 914 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
992 udf_debug 915 i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino);
993 ("unallocSpaceTable (part %d) @ %ld\n",
994 i,
995 UDF_SB_PARTMAPS(sb)[i].s_uspace.
996 s_table->i_ino);
997 } 916 }
998 if (phd->unallocSpaceBitmap.extLength) { 917 if (phd->unallocSpaceBitmap.extLength) {
999 UDF_SB_ALLOC_BITMAP(sb, i, s_uspace); 918 UDF_SB_ALLOC_BITMAP(sb, i, s_uspace);
1000 if (UDF_SB_PARTMAPS(sb)[i].s_uspace. 919 if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL) {
1001 s_bitmap != NULL) { 920 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extLength =
1002 UDF_SB_PARTMAPS(sb)[i].s_uspace. 921 le32_to_cpu(phd->unallocSpaceBitmap.extLength);
1003 s_bitmap->s_extLength = 922 UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition =
1004 le32_to_cpu(phd-> 923 le32_to_cpu(phd->unallocSpaceBitmap.extPosition);
1005 unallocSpaceBitmap. 924 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_BITMAP;
1006 extLength); 925 udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
1007 UDF_SB_PARTMAPS(sb)[i].s_uspace. 926 i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition);
1008 s_bitmap->s_extPosition =
1009 le32_to_cpu(phd->
1010 unallocSpaceBitmap.
1011 extPosition);
1012 UDF_SB_PARTFLAGS(sb, i) |=
1013 UDF_PART_FLAG_UNALLOC_BITMAP;
1014 udf_debug
1015 ("unallocSpaceBitmap (part %d) @ %d\n",
1016 i,
1017 UDF_SB_PARTMAPS(sb)[i].
1018 s_uspace.s_bitmap->
1019 s_extPosition);
1020 } 927 }
1021 } 928 }
1022 if (phd->partitionIntegrityTable.extLength) 929 if (phd->partitionIntegrityTable.extLength)
1023 udf_debug 930 udf_debug("partitionIntegrityTable (part %d)\n", i);
1024 ("partitionIntegrityTable (part %d)\n",
1025 i);
1026 if (phd->freedSpaceTable.extLength) { 931 if (phd->freedSpaceTable.extLength) {
1027 kernel_lb_addr loc = 932 kernel_lb_addr loc = {
1028 { le32_to_cpu(phd->freedSpaceTable. 933 .logicalBlockNum = le32_to_cpu(phd->freedSpaceTable.extPosition),
1029 extPosition), i }; 934 .partitionReferenceNum = i,
1030 935 };
1031 UDF_SB_PARTMAPS(sb)[i].s_fspace. 936
1032 s_table = udf_iget(sb, loc); 937 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table =
1033 UDF_SB_PARTFLAGS(sb, i) |= 938 udf_iget(sb, loc);
1034 UDF_PART_FLAG_FREED_TABLE; 939 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE;
1035 udf_debug 940 udf_debug("freedSpaceTable (part %d) @ %ld\n",
1036 ("freedSpaceTable (part %d) @ %ld\n", 941 i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino);
1037 i,
1038 UDF_SB_PARTMAPS(sb)[i].s_fspace.
1039 s_table->i_ino);
1040 } 942 }
1041 if (phd->freedSpaceBitmap.extLength) { 943 if (phd->freedSpaceBitmap.extLength) {
1042 UDF_SB_ALLOC_BITMAP(sb, i, s_fspace); 944 UDF_SB_ALLOC_BITMAP(sb, i, s_fspace);
1043 if (UDF_SB_PARTMAPS(sb)[i].s_fspace. 945 if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL) {
1044 s_bitmap != NULL) { 946 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extLength =
1045 UDF_SB_PARTMAPS(sb)[i].s_fspace. 947 le32_to_cpu(phd->freedSpaceBitmap.extLength);
1046 s_bitmap->s_extLength = 948 UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition =
1047 le32_to_cpu(phd-> 949 le32_to_cpu(phd->freedSpaceBitmap.extPosition);
1048 freedSpaceBitmap. 950 UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_BITMAP;
1049 extLength); 951 udf_debug("freedSpaceBitmap (part %d) @ %d\n",
1050 UDF_SB_PARTMAPS(sb)[i].s_fspace. 952 i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition);
1051 s_bitmap->s_extPosition =
1052 le32_to_cpu(phd->
1053 freedSpaceBitmap.
1054 extPosition);
1055 UDF_SB_PARTFLAGS(sb, i) |=
1056 UDF_PART_FLAG_FREED_BITMAP;
1057 udf_debug
1058 ("freedSpaceBitmap (part %d) @ %d\n",
1059 i,
1060 UDF_SB_PARTMAPS(sb)[i].
1061 s_fspace.s_bitmap->
1062 s_extPosition);
1063 } 953 }
1064 } 954 }
1065 } 955 }
@@ -1070,16 +960,14 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh)
1070 udf_debug("Partition (%d) not found in partition map\n", 960 udf_debug("Partition (%d) not found in partition map\n",
1071 le16_to_cpu(p->partitionNumber)); 961 le16_to_cpu(p->partitionNumber));
1072 } else { 962 } else {
1073 udf_debug 963 udf_debug("Partition (%d:%d type %x) starts at physical %d, block length %d\n",
1074 ("Partition (%d:%d type %x) starts at physical %d, block length %d\n", 964 le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i),
1075 le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb, i), 965 UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i));
1076 UDF_SB_PARTROOT(sb, i), UDF_SB_PARTLEN(sb, i));
1077 } 966 }
1078} 967}
1079 968
1080static int 969static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1081udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh, 970 kernel_lb_addr *fileset)
1082 kernel_lb_addr * fileset)
1083{ 971{
1084 struct logicalVolDesc *lvd; 972 struct logicalVolDesc *lvd;
1085 int i, j, offset; 973 int i, j, offset;
@@ -1090,116 +978,69 @@ udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh,
1090 UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps)); 978 UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps));
1091 979
1092 for (i = 0, offset = 0; 980 for (i = 0, offset = 0;
1093 i < UDF_SB_NUMPARTS(sb) 981 i < UDF_SB_NUMPARTS(sb) && offset < le32_to_cpu(lvd->mapTableLength);
1094 && offset < le32_to_cpu(lvd->mapTableLength); 982 i++, offset += ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength) {
1095 i++, offset += 983 type = ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapType;
1096 ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->
1097 partitionMapLength) {
1098 type =
1099 ((struct genericPartitionMap *)
1100 &(lvd->partitionMaps[offset]))->partitionMapType;
1101 if (type == 1) { 984 if (type == 1) {
1102 struct genericPartitionMap1 *gpm1 = 985 struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)&(lvd->partitionMaps[offset]);
1103 (struct genericPartitionMap1 *)&(lvd-> 986 UDF_SB_PARTTYPE(sb,i) = UDF_TYPE1_MAP15;
1104 partitionMaps 987 UDF_SB_PARTVSN(sb,i) = le16_to_cpu(gpm1->volSeqNum);
1105 [offset]); 988 UDF_SB_PARTNUM(sb,i) = le16_to_cpu(gpm1->partitionNum);
1106 UDF_SB_PARTTYPE(sb, i) = UDF_TYPE1_MAP15; 989 UDF_SB_PARTFUNC(sb,i) = NULL;
1107 UDF_SB_PARTVSN(sb, i) = le16_to_cpu(gpm1->volSeqNum);
1108 UDF_SB_PARTNUM(sb, i) = le16_to_cpu(gpm1->partitionNum);
1109 UDF_SB_PARTFUNC(sb, i) = NULL;
1110 } else if (type == 2) { 990 } else if (type == 2) {
1111 struct udfPartitionMap2 *upm2 = 991 struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)&(lvd->partitionMaps[offset]);
1112 (struct udfPartitionMap2 *)&(lvd-> 992 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) {
1113 partitionMaps[offset]); 993 if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150) {
1114 if (!strncmp 994 UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP15;
1115 (upm2->partIdent.ident, UDF_ID_VIRTUAL, 995 UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt15;
1116 strlen(UDF_ID_VIRTUAL))) { 996 } else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200) {
1117 if (le16_to_cpu 997 UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP20;
1118 (((__le16 *) upm2->partIdent. 998 UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt20;
1119 identSuffix)[0]) == 0x0150) {
1120 UDF_SB_PARTTYPE(sb, i) =
1121 UDF_VIRTUAL_MAP15;
1122 UDF_SB_PARTFUNC(sb, i) =
1123 udf_get_pblock_virt15;
1124 } else
1125 if (le16_to_cpu
1126 (((__le16 *) upm2->partIdent.
1127 identSuffix)[0]) == 0x0200) {
1128 UDF_SB_PARTTYPE(sb, i) =
1129 UDF_VIRTUAL_MAP20;
1130 UDF_SB_PARTFUNC(sb, i) =
1131 udf_get_pblock_virt20;
1132 } 999 }
1133 } else 1000 } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) {
1134 if (!strncmp
1135 (upm2->partIdent.ident, UDF_ID_SPARABLE,
1136 strlen(UDF_ID_SPARABLE))) {
1137 uint32_t loc; 1001 uint32_t loc;
1138 uint16_t ident; 1002 uint16_t ident;
1139 struct sparingTable *st; 1003 struct sparingTable *st;
1140 struct sparablePartitionMap *spm = 1004 struct sparablePartitionMap *spm = (struct sparablePartitionMap *)&(lvd->partitionMaps[offset]);
1141 (struct sparablePartitionMap *)&(lvd-> 1005
1142 partitionMaps 1006 UDF_SB_PARTTYPE(sb,i) = UDF_SPARABLE_MAP15;
1143 [offset]); 1007 UDF_SB_TYPESPAR(sb,i).s_packet_len = le16_to_cpu(spm->packetLength);
1144
1145 UDF_SB_PARTTYPE(sb, i) = UDF_SPARABLE_MAP15;
1146 UDF_SB_TYPESPAR(sb, i).s_packet_len =
1147 le16_to_cpu(spm->packetLength);
1148 for (j = 0; j < spm->numSparingTables; j++) { 1008 for (j = 0; j < spm->numSparingTables; j++) {
1149 loc = 1009 loc = le32_to_cpu(spm->locSparingTable[j]);
1150 le32_to_cpu(spm-> 1010 UDF_SB_TYPESPAR(sb,i).s_spar_map[j] =
1151 locSparingTable[j]); 1011 udf_read_tagged(sb, loc, loc, &ident);
1152 UDF_SB_TYPESPAR(sb, i).s_spar_map[j] = 1012 if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
1153 udf_read_tagged(sb, loc, loc, 1013 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,i).s_spar_map[j]->b_data;
1154 &ident); 1014 if (ident != 0 ||
1155 if (UDF_SB_TYPESPAR(sb, i). 1015 strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING))) {
1156 s_spar_map[j] != NULL) { 1016 brelse(UDF_SB_TYPESPAR(sb,i).s_spar_map[j]);
1157 st = (struct sparingTable *) 1017 UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = NULL;
1158 UDF_SB_TYPESPAR(sb,
1159 i).
1160 s_spar_map[j]->b_data;
1161 if (ident != 0
1162 || strncmp(st->sparingIdent.
1163 ident,
1164 UDF_ID_SPARING,
1165 strlen
1166 (UDF_ID_SPARING)))
1167 {
1168 brelse(UDF_SB_TYPESPAR
1169 (sb,
1170 i).
1171 s_spar_map[j]);
1172 UDF_SB_TYPESPAR(sb,
1173 i).
1174 s_spar_map[j] =
1175 NULL;
1176 } 1018 }
1177 } 1019 }
1178 } 1020 }
1179 UDF_SB_PARTFUNC(sb, i) = udf_get_pblock_spar15; 1021 UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_spar15;
1180 } else { 1022 } else {
1181 udf_debug("Unknown ident: %s\n", 1023 udf_debug("Unknown ident: %s\n", upm2->partIdent.ident);
1182 upm2->partIdent.ident);
1183 continue; 1024 continue;
1184 } 1025 }
1185 UDF_SB_PARTVSN(sb, i) = le16_to_cpu(upm2->volSeqNum); 1026 UDF_SB_PARTVSN(sb,i) = le16_to_cpu(upm2->volSeqNum);
1186 UDF_SB_PARTNUM(sb, i) = le16_to_cpu(upm2->partitionNum); 1027 UDF_SB_PARTNUM(sb,i) = le16_to_cpu(upm2->partitionNum);
1187 } 1028 }
1188 udf_debug("Partition (%d:%d) type %d on volume %d\n", 1029 udf_debug("Partition (%d:%d) type %d on volume %d\n",
1189 i, UDF_SB_PARTNUM(sb, i), type, UDF_SB_PARTVSN(sb, 1030 i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i));
1190 i));
1191 } 1031 }
1192 1032
1193 if (fileset) { 1033 if (fileset) {
1194 long_ad *la = (long_ad *) & (lvd->logicalVolContentsUse[0]); 1034 long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]);
1195 1035
1196 *fileset = lelb_to_cpu(la->extLocation); 1036 *fileset = lelb_to_cpu(la->extLocation);
1197 udf_debug 1037 udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
1198 ("FileSet found in LogicalVolDesc at block=%d, partition=%d\n", 1038 fileset->logicalBlockNum,
1199 fileset->logicalBlockNum, fileset->partitionReferenceNum); 1039 fileset->partitionReferenceNum);
1200 } 1040 }
1201 if (lvd->integritySeqExt.extLength) 1041 if (lvd->integritySeqExt.extLength)
1202 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); 1042 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1043
1203 return 0; 1044 return 0;
1204} 1045}
1205 1046
@@ -1219,9 +1060,7 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
1219 UDF_SB_LVIDBH(sb) = bh; 1060 UDF_SB_LVIDBH(sb) = bh;
1220 1061
1221 if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength) 1062 if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength)
1222 udf_load_logicalvolint(sb, 1063 udf_load_logicalvolint(sb, leea_to_cpu(UDF_SB_LVID(sb)->nextIntegrityExt));
1223 leea_to_cpu(UDF_SB_LVID(sb)->
1224 nextIntegrityExt));
1225 1064
1226 if (UDF_SB_LVIDBH(sb) != bh) 1065 if (UDF_SB_LVIDBH(sb) != bh)
1227 brelse(bh); 1066 brelse(bh);
@@ -1247,9 +1086,8 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
1247 * July 1, 1997 - Andrew E. Mileski 1086 * July 1, 1997 - Andrew E. Mileski
1248 * Written, tested, and released. 1087 * Written, tested, and released.
1249 */ 1088 */
1250static int 1089static int udf_process_sequence(struct super_block *sb, long block, long lastblock,
1251udf_process_sequence(struct super_block *sb, long block, long lastblock, 1090 kernel_lb_addr *fileset)
1252 kernel_lb_addr * fileset)
1253{ 1091{
1254 struct buffer_head *bh = NULL; 1092 struct buffer_head *bh = NULL;
1255 struct udf_vds_record vds[VDS_POS_LENGTH]; 1093 struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1274,82 +1112,71 @@ udf_process_sequence(struct super_block *sb, long block, long lastblock,
1274 gd = (struct generic_desc *)bh->b_data; 1112 gd = (struct generic_desc *)bh->b_data;
1275 vdsn = le32_to_cpu(gd->volDescSeqNum); 1113 vdsn = le32_to_cpu(gd->volDescSeqNum);
1276 switch (ident) { 1114 switch (ident) {
1277 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1115 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1278 if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum) { 1116 if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum) {
1279 vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = 1117 vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn;
1280 vdsn;
1281 vds[VDS_POS_PRIMARY_VOL_DESC].block = block; 1118 vds[VDS_POS_PRIMARY_VOL_DESC].block = block;
1282 } 1119 }
1283 break; 1120 break;
1284 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ 1121 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1285 if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum) { 1122 if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum) {
1286 vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn; 1123 vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn;
1287 vds[VDS_POS_VOL_DESC_PTR].block = block; 1124 vds[VDS_POS_VOL_DESC_PTR].block = block;
1288 1125
1289 vdp = (struct volDescPtr *)bh->b_data; 1126 vdp = (struct volDescPtr *)bh->b_data;
1290 next_s = 1127 next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1291 le32_to_cpu(vdp->nextVolDescSeqExt. 1128 next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength);
1292 extLocation);
1293 next_e =
1294 le32_to_cpu(vdp->nextVolDescSeqExt.
1295 extLength);
1296 next_e = next_e >> sb->s_blocksize_bits; 1129 next_e = next_e >> sb->s_blocksize_bits;
1297 next_e += next_s; 1130 next_e += next_s;
1298 } 1131 }
1299 break; 1132 break;
1300 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1133 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1301 if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum) { 1134 if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum) {
1302 vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = 1135 vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn;
1303 vdsn;
1304 vds[VDS_POS_IMP_USE_VOL_DESC].block = block; 1136 vds[VDS_POS_IMP_USE_VOL_DESC].block = block;
1305 } 1137 }
1306 break; 1138 break;
1307 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1139 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1308 if (!vds[VDS_POS_PARTITION_DESC].block) 1140 if (!vds[VDS_POS_PARTITION_DESC].block)
1309 vds[VDS_POS_PARTITION_DESC].block = block; 1141 vds[VDS_POS_PARTITION_DESC].block = block;
1310 break; 1142 break;
1311 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1143 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1312 if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum) { 1144 if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum) {
1313 vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = 1145 vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn;
1314 vdsn;
1315 vds[VDS_POS_LOGICAL_VOL_DESC].block = block; 1146 vds[VDS_POS_LOGICAL_VOL_DESC].block = block;
1316 } 1147 }
1317 break; 1148 break;
1318 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1149 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1319 if (vdsn >= 1150 if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum) {
1320 vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum) { 1151 vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn;
1321 vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum =
1322 vdsn;
1323 vds[VDS_POS_UNALLOC_SPACE_DESC].block = block; 1152 vds[VDS_POS_UNALLOC_SPACE_DESC].block = block;
1324 } 1153 }
1325 break; 1154 break;
1326 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ 1155 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1327 vds[VDS_POS_TERMINATING_DESC].block = block; 1156 vds[VDS_POS_TERMINATING_DESC].block = block;
1328 if (next_e) { 1157 if (next_e) {
1329 block = next_s; 1158 block = next_s;
1330 lastblock = next_e; 1159 lastblock = next_e;
1331 next_s = next_e = 0; 1160 next_s = next_e = 0;
1332 } else 1161 } else {
1333 done = 1; 1162 done = 1;
1163 }
1334 break; 1164 break;
1335 } 1165 }
1336 brelse(bh); 1166 brelse(bh);
1337 } 1167 }
1338 for (i = 0; i < VDS_POS_LENGTH; i++) { 1168 for (i = 0; i < VDS_POS_LENGTH; i++) {
1339 if (vds[i].block) { 1169 if (vds[i].block) {
1340 bh = udf_read_tagged(sb, vds[i].block, vds[i].block, 1170 bh = udf_read_tagged(sb, vds[i].block, vds[i].block, &ident);
1341 &ident);
1342 1171
1343 if (i == VDS_POS_PRIMARY_VOL_DESC) 1172 if (i == VDS_POS_PRIMARY_VOL_DESC) {
1344 udf_load_pvoldesc(sb, bh); 1173 udf_load_pvoldesc(sb, bh);
1345 else if (i == VDS_POS_LOGICAL_VOL_DESC) 1174 } else if (i == VDS_POS_LOGICAL_VOL_DESC) {
1346 udf_load_logicalvol(sb, bh, fileset); 1175 udf_load_logicalvol(sb, bh, fileset);
1347 else if (i == VDS_POS_PARTITION_DESC) { 1176 } else if (i == VDS_POS_PARTITION_DESC) {
1348 struct buffer_head *bh2 = NULL; 1177 struct buffer_head *bh2 = NULL;
1349 udf_load_partdesc(sb, bh); 1178 udf_load_partdesc(sb, bh);
1350 for (j = vds[i].block + 1; 1179 for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) {
1351 j < vds[VDS_POS_TERMINATING_DESC].block;
1352 j++) {
1353 bh2 = udf_read_tagged(sb, j, j, &ident); 1180 bh2 = udf_read_tagged(sb, j, j, &ident);
1354 gd = (struct generic_desc *)bh2->b_data; 1181 gd = (struct generic_desc *)bh2->b_data;
1355 if (ident == TAG_IDENT_PD) 1182 if (ident == TAG_IDENT_PD)
@@ -1378,16 +1205,17 @@ static int udf_check_valid(struct super_block *sb, int novrs, int silent)
1378 /* Check that it is NSR02 compliant */ 1205 /* Check that it is NSR02 compliant */
1379 /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ 1206 /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
1380 else if ((block = udf_vrs(sb, silent)) == -1) { 1207 else if ((block = udf_vrs(sb, silent)) == -1) {
1381 udf_debug 1208 udf_debug("Failed to read byte 32768. Assuming open disc. "
1382 ("Failed to read byte 32768. Assuming open disc. Skipping validity check\n"); 1209 "Skipping validity check\n");
1383 if (!UDF_SB_LASTBLOCK(sb)) 1210 if (!UDF_SB_LASTBLOCK(sb))
1384 UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb); 1211 UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
1385 return 0; 1212 return 0;
1386 } else 1213 } else {
1387 return !block; 1214 return !block;
1215 }
1388} 1216}
1389 1217
1390static int udf_load_partition(struct super_block *sb, kernel_lb_addr * fileset) 1218static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
1391{ 1219{
1392 struct anchorVolDescPtr *anchor; 1220 struct anchorVolDescPtr *anchor;
1393 uint16_t ident; 1221 uint16_t ident;
@@ -1399,28 +1227,20 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr * fileset)
1399 return 1; 1227 return 1;
1400 1228
1401 for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) { 1229 for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) {
1402 if (UDF_SB_ANCHOR(sb)[i] && (bh = udf_read_tagged(sb, 1230 if (UDF_SB_ANCHOR(sb)[i] &&
1403 UDF_SB_ANCHOR 1231 (bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i],
1404 (sb)[i], 1232 UDF_SB_ANCHOR(sb)[i], &ident))) {
1405 UDF_SB_ANCHOR
1406 (sb)[i],
1407 &ident))) {
1408 anchor = (struct anchorVolDescPtr *)bh->b_data; 1233 anchor = (struct anchorVolDescPtr *)bh->b_data;
1409 1234
1410 /* Locate the main sequence */ 1235 /* Locate the main sequence */
1411 main_s = 1236 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1412 le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); 1237 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength );
1413 main_e =
1414 le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1415 main_e = main_e >> sb->s_blocksize_bits; 1238 main_e = main_e >> sb->s_blocksize_bits;
1416 main_e += main_s; 1239 main_e += main_s;
1417 1240
1418 /* Locate the reserve sequence */ 1241 /* Locate the reserve sequence */
1419 reserve_s = 1242 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1420 le32_to_cpu(anchor->reserveVolDescSeqExt. 1243 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1421 extLocation);
1422 reserve_e =
1423 le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1424 reserve_e = reserve_e >> sb->s_blocksize_bits; 1244 reserve_e = reserve_e >> sb->s_blocksize_bits;
1425 reserve_e += reserve_s; 1245 reserve_e += reserve_s;
1426 1246
@@ -1428,10 +1248,8 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr * fileset)
1428 1248
1429 /* Process the main & reserve sequences */ 1249 /* Process the main & reserve sequences */
1430 /* responsible for finding the PartitionDesc(s) */ 1250 /* responsible for finding the PartitionDesc(s) */
1431 if (! 1251 if (!(udf_process_sequence(sb, main_s, main_e, fileset) &&
1432 (udf_process_sequence(sb, main_s, main_e, fileset) 1252 udf_process_sequence(sb, reserve_s, reserve_e, fileset))) {
1433 && udf_process_sequence(sb, reserve_s, reserve_e,
1434 fileset))) {
1435 break; 1253 break;
1436 } 1254 }
1437 } 1255 }
@@ -1444,81 +1262,67 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr * fileset)
1444 udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]); 1262 udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]);
1445 1263
1446 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { 1264 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
1265 kernel_lb_addr uninitialized_var(ino);
1447 switch (UDF_SB_PARTTYPE(sb, i)) { 1266 switch (UDF_SB_PARTTYPE(sb, i)) {
1448 case UDF_VIRTUAL_MAP15: 1267 case UDF_VIRTUAL_MAP15:
1449 case UDF_VIRTUAL_MAP20: 1268 case UDF_VIRTUAL_MAP20:
1450 { 1269 if (!UDF_SB_LASTBLOCK(sb)) {
1451 kernel_lb_addr uninitialized_var(ino); 1270 UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb);
1271 udf_find_anchor(sb);
1272 }
1452 1273
1453 if (!UDF_SB_LASTBLOCK(sb)) { 1274 if (!UDF_SB_LASTBLOCK(sb)) {
1454 UDF_SB_LASTBLOCK(sb) = 1275 udf_debug("Unable to determine Lastblock (For "
1455 udf_get_last_block(sb); 1276 "Virtual Partition)\n");
1456 udf_find_anchor(sb); 1277 return 1;
1457 } 1278 }
1458 1279
1459 if (!UDF_SB_LASTBLOCK(sb)) { 1280 for (j = 0; j < UDF_SB_NUMPARTS(sb); j++) {
1460 udf_debug 1281 if (j != i && UDF_SB_PARTVSN(sb, i) ==
1461 ("Unable to determine Lastblock (For Virtual Partition)\n"); 1282 UDF_SB_PARTVSN(sb, j) &&
1462 return 1; 1283 UDF_SB_PARTNUM(sb, i) ==
1284 UDF_SB_PARTNUM(sb, j)) {
1285 ino.partitionReferenceNum = j;
1286 ino.logicalBlockNum =
1287 UDF_SB_LASTBLOCK(sb) -
1288 UDF_SB_PARTROOT(sb, j);
1289 break;
1463 } 1290 }
1291 }
1464 1292
1465 for (j = 0; j < UDF_SB_NUMPARTS(sb); j++) { 1293 if (j == UDF_SB_NUMPARTS(sb))
1466 if (j != i && 1294 return 1;
1467 UDF_SB_PARTVSN(sb,
1468 i) ==
1469 UDF_SB_PARTVSN(sb, j)
1470 && UDF_SB_PARTNUM(sb,
1471 i) ==
1472 UDF_SB_PARTNUM(sb, j)) {
1473 ino.partitionReferenceNum = j;
1474 ino.logicalBlockNum =
1475 UDF_SB_LASTBLOCK(sb) -
1476 UDF_SB_PARTROOT(sb, j);
1477 break;
1478 }
1479 }
1480 1295
1481 if (j == UDF_SB_NUMPARTS(sb)) 1296 if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino)))
1482 return 1; 1297 return 1;
1483 1298
1484 if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino))) 1299 if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP15) {
1485 return 1; 1300 UDF_SB_TYPEVIRT(sb, i).s_start_offset =
1301 udf_ext0_offset(UDF_SB_VAT(sb));
1302 UDF_SB_TYPEVIRT(sb, i).s_num_entries =
1303 (UDF_SB_VAT(sb)->i_size - 36) >> 2;
1304 } else if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP20) {
1305 struct buffer_head *bh = NULL;
1306 uint32_t pos;
1486 1307
1487 if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP15) { 1308 pos = udf_block_map(UDF_SB_VAT(sb), 0);
1488 UDF_SB_TYPEVIRT(sb, i).s_start_offset = 1309 bh = sb_bread(sb, pos);
1489 udf_ext0_offset(UDF_SB_VAT(sb)); 1310 if (!bh)
1490 UDF_SB_TYPEVIRT(sb, i).s_num_entries = 1311 return 1;
1491 (UDF_SB_VAT(sb)->i_size - 36) >> 2; 1312 UDF_SB_TYPEVIRT(sb, i).s_start_offset =
1492 } else if (UDF_SB_PARTTYPE(sb, i) == 1313 le16_to_cpu(((struct
1493 UDF_VIRTUAL_MAP20) { 1314 virtualAllocationTable20 *)bh->b_data +
1494 struct buffer_head *bh = NULL; 1315 udf_ext0_offset(UDF_SB_VAT(sb)))->
1495 uint32_t pos; 1316 lengthHeader) +
1496 1317 udf_ext0_offset(UDF_SB_VAT(sb));
1497 pos = udf_block_map(UDF_SB_VAT(sb), 0); 1318 UDF_SB_TYPEVIRT(sb, i).s_num_entries =
1498 bh = sb_bread(sb, pos); 1319 (UDF_SB_VAT(sb)->i_size -
1499 if (!bh) 1320 UDF_SB_TYPEVIRT(sb, i).s_start_offset) >> 2;
1500 return 1; 1321 brelse(bh);
1501 UDF_SB_TYPEVIRT(sb, i).s_start_offset =
1502 le16_to_cpu(((struct
1503 virtualAllocationTable20
1504 *)bh->b_data +
1505 udf_ext0_offset
1506 (UDF_SB_VAT(sb)))->
1507 lengthHeader) +
1508 udf_ext0_offset(UDF_SB_VAT(sb));
1509 UDF_SB_TYPEVIRT(sb, i).s_num_entries =
1510 (UDF_SB_VAT(sb)->i_size -
1511 UDF_SB_TYPEVIRT(sb,
1512 i).
1513 s_start_offset) >> 2;
1514 brelse(bh);
1515 }
1516 UDF_SB_PARTROOT(sb, i) =
1517 udf_get_pblock(sb, 0, i, 0);
1518 UDF_SB_PARTLEN(sb, i) =
1519 UDF_SB_PARTLEN(sb,
1520 ino.partitionReferenceNum);
1521 } 1322 }
1323 UDF_SB_PARTROOT(sb, i) = udf_get_pblock(sb, 0, i, 0);
1324 UDF_SB_PARTLEN(sb, i) = UDF_SB_PARTLEN(sb,
1325 ino.partitionReferenceNum);
1522 } 1326 }
1523 } 1327 }
1524 return 0; 1328 return 0;
@@ -1555,42 +1359,32 @@ static void udf_open_lvid(struct super_block *sb)
1555 1359
1556static void udf_close_lvid(struct super_block *sb) 1360static void udf_close_lvid(struct super_block *sb)
1557{ 1361{
1362 kernel_timestamp cpu_time;
1363 int i;
1364
1558 if (UDF_SB_LVIDBH(sb) && 1365 if (UDF_SB_LVIDBH(sb) &&
1559 UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN) { 1366 UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN) {
1560 int i;
1561 kernel_timestamp cpu_time;
1562
1563 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1367 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1564 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1368 UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1565 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) 1369 if (udf_time_to_stamp(&cpu_time, CURRENT_TIME))
1566 UDF_SB_LVID(sb)->recordingDateAndTime = 1370 UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time);
1567 cpu_to_lets(cpu_time); 1371 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev))
1568 if (UDF_MAX_WRITE_VERSION > 1372 UDF_SB_LVIDIU(sb)->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
1569 le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev)) 1373 if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev))
1570 UDF_SB_LVIDIU(sb)->maxUDFWriteRev = 1374 UDF_SB_LVIDIU(sb)->minUDFReadRev = cpu_to_le16(UDF_SB_UDFREV(sb));
1571 cpu_to_le16(UDF_MAX_WRITE_VERSION); 1375 if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev))
1572 if (UDF_SB_UDFREV(sb) > 1376 UDF_SB_LVIDIU(sb)->minUDFWriteRev = cpu_to_le16(UDF_SB_UDFREV(sb));
1573 le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev)) 1377 UDF_SB_LVID(sb)->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1574 UDF_SB_LVIDIU(sb)->minUDFReadRev =
1575 cpu_to_le16(UDF_SB_UDFREV(sb));
1576 if (UDF_SB_UDFREV(sb) >
1577 le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev))
1578 UDF_SB_LVIDIU(sb)->minUDFWriteRev =
1579 cpu_to_le16(UDF_SB_UDFREV(sb));
1580 UDF_SB_LVID(sb)->integrityType =
1581 cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1582 1378
1583 UDF_SB_LVID(sb)->descTag.descCRC = 1379 UDF_SB_LVID(sb)->descTag.descCRC =
1584 cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag), 1380 cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag),
1585 le16_to_cpu(UDF_SB_LVID(sb)->descTag. 1381 le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0));
1586 descCRCLength), 0));
1587 1382
1588 UDF_SB_LVID(sb)->descTag.tagChecksum = 0; 1383 UDF_SB_LVID(sb)->descTag.tagChecksum = 0;
1589 for (i = 0; i < 16; i++) 1384 for (i = 0; i < 16; i++)
1590 if (i != 4) 1385 if (i != 4)
1591 UDF_SB_LVID(sb)->descTag.tagChecksum += 1386 UDF_SB_LVID(sb)->descTag.tagChecksum +=
1592 ((uint8_t *) & 1387 ((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i];
1593 (UDF_SB_LVID(sb)->descTag))[i];
1594 1388
1595 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 1389 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
1596 } 1390 }
@@ -1628,6 +1422,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1628 sbi = kmalloc(sizeof(struct udf_sb_info), GFP_KERNEL); 1422 sbi = kmalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
1629 if (!sbi) 1423 if (!sbi)
1630 return -ENOMEM; 1424 return -ENOMEM;
1425
1631 sb->s_fs_info = sbi; 1426 sb->s_fs_info = sbi;
1632 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); 1427 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
1633 1428
@@ -1679,7 +1474,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1679 UDF_SB_ANCHOR(sb)[2] = uopt.anchor; 1474 UDF_SB_ANCHOR(sb)[2] = uopt.anchor;
1680 UDF_SB_ANCHOR(sb)[3] = 256; 1475 UDF_SB_ANCHOR(sb)[3] = 256;
1681 1476
1682 if (udf_check_valid(sb, uopt.novrs, silent)) { /* read volume recognition sequences */ 1477 if (udf_check_valid(sb, uopt.novrs, silent)) { /* read volume recognition sequences */
1683 printk("UDF-fs: No VRS found\n"); 1478 printk("UDF-fs: No VRS found\n");
1684 goto error_out; 1479 goto error_out;
1685 } 1480 }
@@ -1701,10 +1496,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1701 udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb)); 1496 udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb));
1702 1497
1703 if (UDF_SB_LVIDBH(sb)) { 1498 if (UDF_SB_LVIDBH(sb)) {
1704 uint16_t minUDFReadRev = 1499 uint16_t minUDFReadRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev);
1705 le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev); 1500 uint16_t minUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
1706 uint16_t minUDFWriteRev =
1707 le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev);
1708 /* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */ 1501 /* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */
1709 1502
1710 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 1503 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
@@ -1729,10 +1522,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1729 goto error_out; 1522 goto error_out;
1730 } 1523 }
1731 1524
1732 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1525 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_READ_ONLY) {
1733 UDF_PART_FLAG_READ_ONLY) { 1526 printk("UDF-fs: Partition marked readonly; forcing readonly mount\n");
1734 printk
1735 ("UDF-fs: Partition marked readonly; forcing readonly mount\n");
1736 sb->s_flags |= MS_RDONLY; 1527 sb->s_flags |= MS_RDONLY;
1737 } 1528 }
1738 1529
@@ -1744,10 +1535,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1744 if (!silent) { 1535 if (!silent) {
1745 kernel_timestamp ts; 1536 kernel_timestamp ts;
1746 udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb)); 1537 udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb));
1747 udf_info 1538 udf_info("UDF %s (%s) Mounting volume '%s', "
1748 ("UDF %s (%s) Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n", 1539 "timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
1749 UDFFS_VERSION, UDFFS_DATE, UDF_SB_VOLIDENT(sb), ts.year, 1540 UDFFS_VERSION, UDFFS_DATE,
1750 ts.month, ts.day, ts.hour, ts.minute, ts.typeAndTimezone); 1541 UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute,
1542 ts.typeAndTimezone);
1751 } 1543 }
1752 if (!(sb->s_flags & MS_RDONLY)) 1544 if (!(sb->s_flags & MS_RDONLY))
1753 udf_open_lvid(sb); 1545 udf_open_lvid(sb);
@@ -1772,30 +1564,21 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1772 sb->s_maxbytes = MAX_LFS_FILESIZE; 1564 sb->s_maxbytes = MAX_LFS_FILESIZE;
1773 return 0; 1565 return 0;
1774 1566
1775 error_out: 1567error_out:
1776 if (UDF_SB_VAT(sb)) 1568 if (UDF_SB_VAT(sb))
1777 iput(UDF_SB_VAT(sb)); 1569 iput(UDF_SB_VAT(sb));
1778 if (UDF_SB_NUMPARTS(sb)) { 1570 if (UDF_SB_NUMPARTS(sb)) {
1779 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1571 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
1780 UDF_PART_FLAG_UNALLOC_TABLE) 1572 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
1781 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace. 1573 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
1782 s_table); 1574 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
1783 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1575 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
1784 UDF_PART_FLAG_FREED_TABLE) 1576 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace);
1785 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace. 1577 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
1786 s_table); 1578 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace);
1787 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1579 if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) {
1788 UDF_PART_FLAG_UNALLOC_BITMAP)
1789 UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_uspace);
1790 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
1791 UDF_PART_FLAG_FREED_BITMAP)
1792 UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_fspace);
1793 if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) ==
1794 UDF_SPARABLE_MAP15) {
1795 for (i = 0; i < 4; i++) 1580 for (i = 0; i < 4; i++)
1796 brelse(UDF_SB_TYPESPAR 1581 brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
1797 (sb,
1798 UDF_SB_PARTITION(sb)).s_spar_map[i]);
1799 } 1582 }
1800 } 1583 }
1801#ifdef CONFIG_UDF_NLS 1584#ifdef CONFIG_UDF_NLS
@@ -1808,6 +1591,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1808 UDF_SB_FREE(sb); 1591 UDF_SB_FREE(sb);
1809 kfree(sbi); 1592 kfree(sbi);
1810 sb->s_fs_info = NULL; 1593 sb->s_fs_info = NULL;
1594
1811 return -EINVAL; 1595 return -EINVAL;
1812} 1596}
1813 1597
@@ -1823,8 +1607,8 @@ void udf_error(struct super_block *sb, const char *function,
1823 va_start(args, fmt); 1607 va_start(args, fmt);
1824 vsnprintf(error_buf, sizeof(error_buf), fmt, args); 1608 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
1825 va_end(args); 1609 va_end(args);
1826 printk(KERN_CRIT "UDF-fs error (device %s): %s: %s\n", 1610 printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
1827 sb->s_id, function, error_buf); 1611 sb->s_id, function, error_buf);
1828} 1612}
1829 1613
1830void udf_warning(struct super_block *sb, const char *function, 1614void udf_warning(struct super_block *sb, const char *function,
@@ -1859,26 +1643,17 @@ static void udf_put_super(struct super_block *sb)
1859 if (UDF_SB_VAT(sb)) 1643 if (UDF_SB_VAT(sb))
1860 iput(UDF_SB_VAT(sb)); 1644 iput(UDF_SB_VAT(sb));
1861 if (UDF_SB_NUMPARTS(sb)) { 1645 if (UDF_SB_NUMPARTS(sb)) {
1862 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1646 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE)
1863 UDF_PART_FLAG_UNALLOC_TABLE) 1647 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
1864 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace. 1648 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE)
1865 s_table); 1649 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
1866 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1650 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP)
1867 UDF_PART_FLAG_FREED_TABLE) 1651 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace);
1868 iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace. 1652 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP)
1869 s_table); 1653 UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace);
1870 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1654 if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) {
1871 UDF_PART_FLAG_UNALLOC_BITMAP)
1872 UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_uspace);
1873 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) &
1874 UDF_PART_FLAG_FREED_BITMAP)
1875 UDF_SB_FREE_BITMAP(sb, UDF_SB_PARTITION(sb), s_fspace);
1876 if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) ==
1877 UDF_SPARABLE_MAP15) {
1878 for (i = 0; i < 4; i++) 1655 for (i = 0; i < 4; i++)
1879 brelse(UDF_SB_TYPESPAR 1656 brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]);
1880 (sb,
1881 UDF_SB_PARTITION(sb)).s_spar_map[i]);
1882 } 1657 }
1883 } 1658 }
1884#ifdef CONFIG_UDF_NLS 1659#ifdef CONFIG_UDF_NLS
@@ -1917,8 +1692,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
1917 buf->f_bavail = buf->f_bfree; 1692 buf->f_bavail = buf->f_bfree;
1918 buf->f_files = (UDF_SB_LVIDBH(sb) ? 1693 buf->f_files = (UDF_SB_LVIDBH(sb) ?
1919 (le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1694 (le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) +
1920 le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + 1695 le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree;
1921 buf->f_bfree;
1922 buf->f_ffree = buf->f_bfree; 1696 buf->f_ffree = buf->f_bfree;
1923 /* __kernel_fsid_t f_fsid */ 1697 /* __kernel_fsid_t f_fsid */
1924 buf->f_namelen = UDF_NAME_LEN - 2; 1698 buf->f_namelen = UDF_NAME_LEN - 2;
@@ -1930,8 +1704,7 @@ static unsigned char udf_bitmap_lookup[16] = {
1930 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 1704 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
1931}; 1705};
1932 1706
1933static unsigned int 1707static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
1934udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
1935{ 1708{
1936 struct buffer_head *bh = NULL; 1709 struct buffer_head *bh = NULL;
1937 unsigned int accum = 0; 1710 unsigned int accum = 0;
@@ -1961,8 +1734,8 @@ udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
1961 1734
1962 bm = (struct spaceBitmapDesc *)bh->b_data; 1735 bm = (struct spaceBitmapDesc *)bh->b_data;
1963 bytes = le32_to_cpu(bm->numOfBytes); 1736 bytes = le32_to_cpu(bm->numOfBytes);
1964 index = sizeof(struct spaceBitmapDesc); /* offset in first block only */ 1737 index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
1965 ptr = (uint8_t *) bh->b_data; 1738 ptr = (uint8_t *)bh->b_data;
1966 1739
1967 while (bytes > 0) { 1740 while (bytes > 0) {
1968 while ((bytes > 0) && (index < sb->s_blocksize)) { 1741 while ((bytes > 0) && (index < sb->s_blocksize)) {
@@ -1981,19 +1754,18 @@ udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
1981 goto out; 1754 goto out;
1982 } 1755 }
1983 index = 0; 1756 index = 0;
1984 ptr = (uint8_t *) bh->b_data; 1757 ptr = (uint8_t *)bh->b_data;
1985 } 1758 }
1986 } 1759 }
1987 brelse(bh); 1760 brelse(bh);
1988 1761
1989 out: 1762out:
1990 unlock_kernel(); 1763 unlock_kernel();
1991 1764
1992 return accum; 1765 return accum;
1993} 1766}
1994 1767
1995static unsigned int 1768static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table)
1996udf_count_free_table(struct super_block *sb, struct inode *table)
1997{ 1769{
1998 unsigned int accum = 0; 1770 unsigned int accum = 0;
1999 uint32_t elen; 1771 uint32_t elen;
@@ -2007,8 +1779,9 @@ udf_count_free_table(struct super_block *sb, struct inode *table)
2007 epos.offset = sizeof(struct unallocSpaceEntry); 1779 epos.offset = sizeof(struct unallocSpaceEntry);
2008 epos.bh = NULL; 1780 epos.bh = NULL;
2009 1781
2010 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) 1782 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
2011 accum += (elen >> table->i_sb->s_blocksize_bits); 1783 accum += (elen >> table->i_sb->s_blocksize_bits);
1784 }
2012 brelse(epos.bh); 1785 brelse(epos.bh);
2013 1786
2014 unlock_kernel(); 1787 unlock_kernel();
@@ -2021,12 +1794,8 @@ static unsigned int udf_count_free(struct super_block *sb)
2021 unsigned int accum = 0; 1794 unsigned int accum = 0;
2022 1795
2023 if (UDF_SB_LVIDBH(sb)) { 1796 if (UDF_SB_LVIDBH(sb)) {
2024 if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > 1797 if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb)) {
2025 UDF_SB_PARTITION(sb)) { 1798 accum = le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]);
2026 accum =
2027 le32_to_cpu(UDF_SB_LVID(sb)->
2028 freeSpaceTable[UDF_SB_PARTITION(sb)]);
2029
2030 if (accum == 0xFFFFFFFF) 1799 if (accum == 0xFFFFFFFF)
2031 accum = 0; 1800 accum = 0;
2032 } 1801 }
@@ -2035,40 +1804,24 @@ static unsigned int udf_count_free(struct super_block *sb)
2035 if (accum) 1804 if (accum)
2036 return accum; 1805 return accum;
2037 1806
2038 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1807 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) {
2039 UDF_PART_FLAG_UNALLOC_BITMAP) { 1808 accum += udf_count_free_bitmap(sb,
2040 accum += 1809 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap);
2041 udf_count_free_bitmap(sb,
2042 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
2043 (sb)].s_uspace.
2044 s_bitmap);
2045 } 1810 }
2046 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1811 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) {
2047 UDF_PART_FLAG_FREED_BITMAP) { 1812 accum += udf_count_free_bitmap(sb,
2048 accum += 1813 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap);
2049 udf_count_free_bitmap(sb,
2050 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
2051 (sb)].s_fspace.
2052 s_bitmap);
2053 } 1814 }
2054 if (accum) 1815 if (accum)
2055 return accum; 1816 return accum;
2056 1817
2057 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1818 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) {
2058 UDF_PART_FLAG_UNALLOC_TABLE) { 1819 accum += udf_count_free_table(sb,
2059 accum += 1820 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table);
2060 udf_count_free_table(sb,
2061 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
2062 (sb)].s_uspace.
2063 s_table);
2064 } 1821 }
2065 if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & 1822 if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) {
2066 UDF_PART_FLAG_FREED_TABLE) { 1823 accum += udf_count_free_table(sb,
2067 accum += 1824 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table);
2068 udf_count_free_table(sb,
2069 UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION
2070 (sb)].s_fspace.
2071 s_table);
2072 } 1825 }
2073 1826
2074 return accum; 1827 return accum;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index c4b82a920082..e6f933dd6a7b 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -11,7 +11,7 @@
11 * Each contributing author retains all rights to their own work. 11 * Each contributing author retains all rights to their own work.
12 * 12 *
13 * (C) 1998-2001 Ben Fennema 13 * (C) 1998-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc 14 * (C) 1999 Stelias Computing Inc
15 * 15 *
16 * HISTORY 16 * HISTORY
17 * 17 *
@@ -33,8 +33,7 @@
33#include <linux/buffer_head.h> 33#include <linux/buffer_head.h>
34#include "udf_i.h" 34#include "udf_i.h"
35 35
36static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, 36static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, char *to)
37 char *to)
38{ 37{
39 struct pathComponent *pc; 38 struct pathComponent *pc;
40 int elen = 0; 39 int elen = 0;
@@ -81,9 +80,9 @@ static int udf_symlink_filler(struct file *file, struct page *page)
81 char *p = kmap(page); 80 char *p = kmap(page);
82 81
83 lock_kernel(); 82 lock_kernel();
84 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 83 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
85 symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); 84 symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode);
86 else { 85 } else {
87 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); 86 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
88 87
89 if (!bh) 88 if (!bh)
@@ -100,7 +99,8 @@ static int udf_symlink_filler(struct file *file, struct page *page)
100 kunmap(page); 99 kunmap(page);
101 unlock_page(page); 100 unlock_page(page);
102 return 0; 101 return 0;
103 out: 102
103out:
104 unlock_kernel(); 104 unlock_kernel();
105 SetPageError(page); 105 SetPageError(page);
106 kunmap(page); 106 kunmap(page);
@@ -112,5 +112,5 @@ static int udf_symlink_filler(struct file *file, struct page *page)
112 * symlinks can't do much... 112 * symlinks can't do much...
113 */ 113 */
114const struct address_space_operations udf_symlink_aops = { 114const struct address_space_operations udf_symlink_aops = {
115 .readpage = udf_symlink_filler, 115 .readpage = udf_symlink_filler,
116}; 116};
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index b2002da0a5c0..7fc3912885a5 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -32,13 +32,11 @@ static void extent_trunc(struct inode *inode, struct extent_position *epos,
32 kernel_lb_addr eloc, int8_t etype, uint32_t elen, 32 kernel_lb_addr eloc, int8_t etype, uint32_t elen,
33 uint32_t nelen) 33 uint32_t nelen)
34{ 34{
35 kernel_lb_addr neloc = { 0, 0 }; 35 kernel_lb_addr neloc = {};
36 int last_block = 36 int last_block = (elen + inode->i_sb->s_blocksize - 1) >>
37 (elen + inode->i_sb->s_blocksize - 37 inode->i_sb->s_blocksize_bits;
38 1) >> inode->i_sb->s_blocksize_bits; 38 int first_block = (nelen + inode->i_sb->s_blocksize - 1) >>
39 int first_block = 39 inode->i_sb->s_blocksize_bits;
40 (nelen + inode->i_sb->s_blocksize -
41 1) >> inode->i_sb->s_blocksize_bits;
42 40
43 if (nelen) { 41 if (nelen) {
44 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 42 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
@@ -70,7 +68,7 @@ static void extent_trunc(struct inode *inode, struct extent_position *epos,
70 */ 68 */
71void udf_truncate_tail_extent(struct inode *inode) 69void udf_truncate_tail_extent(struct inode *inode)
72{ 70{
73 struct extent_position epos = { NULL, 0, {0, 0} }; 71 struct extent_position epos = {};
74 kernel_lb_addr eloc; 72 kernel_lb_addr eloc;
75 uint32_t elen, nelen; 73 uint32_t elen, nelen;
76 uint64_t lbcount = 0; 74 uint64_t lbcount = 0;
@@ -156,16 +154,16 @@ void udf_discard_prealloc(struct inode *inode)
156 extent_trunc(inode, &epos, eloc, etype, elen, 0); 154 extent_trunc(inode, &epos, eloc, etype, elen, 0);
157 if (!epos.bh) { 155 if (!epos.bh) {
158 UDF_I_LENALLOC(inode) = 156 UDF_I_LENALLOC(inode) =
159 epos.offset - udf_file_entry_alloc_offset(inode); 157 epos.offset - udf_file_entry_alloc_offset(inode);
160 mark_inode_dirty(inode); 158 mark_inode_dirty(inode);
161 } else { 159 } else {
162 struct allocExtDesc *aed = 160 struct allocExtDesc *aed =
163 (struct allocExtDesc *)(epos.bh->b_data); 161 (struct allocExtDesc *)(epos.bh->b_data);
164 aed->lengthAllocDescs = 162 aed->lengthAllocDescs =
165 cpu_to_le32(epos.offset - 163 cpu_to_le32(epos.offset -
166 sizeof(struct allocExtDesc)); 164 sizeof(struct allocExtDesc));
167 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) 165 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
168 || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 166 UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
169 udf_update_tag(epos.bh->b_data, epos.offset); 167 udf_update_tag(epos.bh->b_data, epos.offset);
170 else 168 else
171 udf_update_tag(epos.bh->b_data, 169 udf_update_tag(epos.bh->b_data,
@@ -182,7 +180,7 @@ void udf_discard_prealloc(struct inode *inode)
182void udf_truncate_extents(struct inode *inode) 180void udf_truncate_extents(struct inode *inode)
183{ 181{
184 struct extent_position epos; 182 struct extent_position epos;
185 kernel_lb_addr eloc, neloc = { 0, 0 }; 183 kernel_lb_addr eloc, neloc = {};
186 uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; 184 uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
187 int8_t etype; 185 int8_t etype;
188 struct super_block *sb = inode->i_sb; 186 struct super_block *sb = inode->i_sb;
@@ -198,9 +196,8 @@ void udf_truncate_extents(struct inode *inode)
198 BUG(); 196 BUG();
199 197
200 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); 198 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
201 byte_offset = 199 byte_offset = (offset << sb->s_blocksize_bits) +
202 (offset << sb->s_blocksize_bits) + 200 (inode->i_size & (sb->s_blocksize - 1));
203 (inode->i_size & (sb->s_blocksize - 1));
204 if (etype != -1) { 201 if (etype != -1) {
205 epos.offset -= adsize; 202 epos.offset -= adsize;
206 extent_trunc(inode, &epos, eloc, etype, elen, byte_offset); 203 extent_trunc(inode, &epos, eloc, etype, elen, byte_offset);
@@ -215,9 +212,7 @@ void udf_truncate_extents(struct inode *inode)
215 else 212 else
216 lenalloc -= sizeof(struct allocExtDesc); 213 lenalloc -= sizeof(struct allocExtDesc);
217 214
218 while ((etype = 215 while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
219 udf_current_aext(inode, &epos, &eloc, &elen,
220 0)) != -1) {
221 if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { 216 if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
222 udf_write_aext(inode, &epos, neloc, nelen, 0); 217 udf_write_aext(inode, &epos, neloc, nelen, 0);
223 if (indirect_ext_len) { 218 if (indirect_ext_len) {
@@ -229,52 +224,35 @@ void udf_truncate_extents(struct inode *inode)
229 0, indirect_ext_len); 224 0, indirect_ext_len);
230 } else { 225 } else {
231 if (!epos.bh) { 226 if (!epos.bh) {
232 UDF_I_LENALLOC(inode) = 227 UDF_I_LENALLOC(inode) = lenalloc;
233 lenalloc;
234 mark_inode_dirty(inode); 228 mark_inode_dirty(inode);
235 } else { 229 } else {
236 struct allocExtDesc *aed = 230 struct allocExtDesc *aed =
237 (struct allocExtDesc 231 (struct allocExtDesc *)(epos.bh->b_data);
238 *)(epos.bh->b_data);
239 aed->lengthAllocDescs = 232 aed->lengthAllocDescs =
240 cpu_to_le32(lenalloc); 233 cpu_to_le32(lenalloc);
241 if (!UDF_QUERY_FLAG 234 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
242 (sb, UDF_FLAG_STRICT) 235 UDF_SB_UDFREV(sb) >= 0x0201)
243 || UDF_SB_UDFREV(sb) >= 236 udf_update_tag(epos.bh->b_data,
244 0x0201) 237 lenalloc +
245 udf_update_tag(epos.bh-> 238 sizeof(struct allocExtDesc));
246 b_data,
247 lenalloc
248 +
249 sizeof
250 (struct
251 allocExtDesc));
252 else 239 else
253 udf_update_tag(epos.bh-> 240 udf_update_tag(epos.bh->b_data,
254 b_data, 241 sizeof(struct allocExtDesc));
255 sizeof 242 mark_buffer_dirty_inode(epos.bh, inode);
256 (struct
257 allocExtDesc));
258 mark_buffer_dirty_inode(epos.bh,
259 inode);
260 } 243 }
261 } 244 }
262 brelse(epos.bh); 245 brelse(epos.bh);
263 epos.offset = sizeof(struct allocExtDesc); 246 epos.offset = sizeof(struct allocExtDesc);
264 epos.block = eloc; 247 epos.block = eloc;
265 epos.bh = 248 epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, eloc, 0));
266 udf_tread(sb,
267 udf_get_lb_pblock(sb, eloc, 0));
268 if (elen) 249 if (elen)
269 indirect_ext_len = (elen + 250 indirect_ext_len = (elen + sb->s_blocksize -1) >>
270 sb->s_blocksize - 251 sb->s_blocksize_bits;
271 1) >> sb->
272 s_blocksize_bits;
273 else 252 else
274 indirect_ext_len = 1; 253 indirect_ext_len = 1;
275 } else { 254 } else {
276 extent_trunc(inode, &epos, eloc, etype, elen, 255 extent_trunc(inode, &epos, eloc, etype, elen, 0);
277 0);
278 epos.offset += adsize; 256 epos.offset += adsize;
279 } 257 }
280 } 258 }
@@ -292,16 +270,13 @@ void udf_truncate_extents(struct inode *inode)
292 struct allocExtDesc *aed = 270 struct allocExtDesc *aed =
293 (struct allocExtDesc *)(epos.bh->b_data); 271 (struct allocExtDesc *)(epos.bh->b_data);
294 aed->lengthAllocDescs = cpu_to_le32(lenalloc); 272 aed->lengthAllocDescs = cpu_to_le32(lenalloc);
295 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) 273 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) ||
296 || UDF_SB_UDFREV(sb) >= 0x0201) 274 UDF_SB_UDFREV(sb) >= 0x0201)
297 udf_update_tag(epos.bh->b_data, 275 udf_update_tag(epos.bh->b_data,
298 lenalloc + 276 lenalloc + sizeof(struct allocExtDesc));
299 sizeof(struct
300 allocExtDesc));
301 else 277 else
302 udf_update_tag(epos.bh->b_data, 278 udf_update_tag(epos.bh->b_data,
303 sizeof(struct 279 sizeof(struct allocExtDesc));
304 allocExtDesc));
305 mark_buffer_dirty_inode(epos.bh, inode); 280 mark_buffer_dirty_inode(epos.bh, inode);
306 } 281 }
307 } 282 }
@@ -314,21 +289,14 @@ void udf_truncate_extents(struct inode *inode)
314 * no extent above inode->i_size => truncate is 289 * no extent above inode->i_size => truncate is
315 * extending the file by 'offset' blocks. 290 * extending the file by 'offset' blocks.
316 */ 291 */
317 if ((!epos.bh 292 if ((!epos.bh &&
318 && epos.offset == 293 epos.offset == udf_file_entry_alloc_offset(inode)) ||
319 udf_file_entry_alloc_offset(inode)) || (epos.bh 294 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
320 && epos.
321 offset ==
322 sizeof
323 (struct
324 allocExtDesc)))
325 {
326 /* File has no extents at all or has empty last 295 /* File has no extents at all or has empty last
327 * indirect extent! Create a fake extent... */ 296 * indirect extent! Create a fake extent... */
328 extent.extLocation.logicalBlockNum = 0; 297 extent.extLocation.logicalBlockNum = 0;
329 extent.extLocation.partitionReferenceNum = 0; 298 extent.extLocation.partitionReferenceNum = 0;
330 extent.extLength = 299 extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
331 EXT_NOT_RECORDED_NOT_ALLOCATED;
332 } else { 300 } else {
333 epos.offset -= adsize; 301 epos.offset -= adsize;
334 etype = udf_next_aext(inode, &epos, 302 etype = udf_next_aext(inode, &epos,
@@ -337,10 +305,7 @@ void udf_truncate_extents(struct inode *inode)
337 extent.extLength |= etype << 30; 305 extent.extLength |= etype << 30;
338 } 306 }
339 udf_extend_file(inode, &epos, &extent, 307 udf_extend_file(inode, &epos, &extent,
340 offset + 308 offset + ((inode->i_size & (sb->s_blocksize - 1)) != 0));
341 ((inode->
342 i_size & (sb->s_blocksize - 1)) !=
343 0));
344 } 309 }
345 } 310 }
346 UDF_I_LENEXTENTS(inode) = inode->i_size; 311 UDF_I_LENEXTENTS(inode) = inode->i_size;
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index bee4308a8113..d7dbe6f3ba0c 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -23,4 +23,4 @@ static inline struct udf_inode_info *UDF_I(struct inode *inode)
23#define UDF_I_LAD(X) ( UDF_I(X)->i_ext.i_lad ) 23#define UDF_I_LAD(X) ( UDF_I(X)->i_ext.i_lad )
24#define UDF_I_DATA(X) ( UDF_I(X)->i_ext.i_data ) 24#define UDF_I_DATA(X) ( UDF_I(X)->i_ext.i_data )
25 25
26#endif /* !defined(_LINUX_UDF_I_H) */ 26#endif /* !defined(_LINUX_UDF_I_H) */
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 60f31d8cebee..3e937d3fb8f9 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -20,8 +20,8 @@
20#define UDF_FLAG_VARCONV 8 20#define UDF_FLAG_VARCONV 8
21#define UDF_FLAG_NLS_MAP 9 21#define UDF_FLAG_NLS_MAP 9
22#define UDF_FLAG_UTF8 10 22#define UDF_FLAG_UTF8 10
23#define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */ 23#define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
24#define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */ 24#define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */
25#define UDF_FLAG_GID_FORGET 13 25#define UDF_FLAG_GID_FORGET 13
26#define UDF_FLAG_GID_IGNORE 14 26#define UDF_FLAG_GID_IGNORE 14
27 27
@@ -41,8 +41,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
41 41
42#define UDF_SB_FREE(X)\ 42#define UDF_SB_FREE(X)\
43{\ 43{\
44 if (UDF_SB(X))\ 44 if (UDF_SB(X)) {\
45 {\
46 kfree(UDF_SB_PARTMAPS(X));\ 45 kfree(UDF_SB_PARTMAPS(X));\
47 UDF_SB_PARTMAPS(X) = NULL;\ 46 UDF_SB_PARTMAPS(X) = NULL;\
48 }\ 47 }\
@@ -51,13 +50,10 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
51#define UDF_SB_ALLOC_PARTMAPS(X,Y)\ 50#define UDF_SB_ALLOC_PARTMAPS(X,Y)\
52{\ 51{\
53 UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\ 52 UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\
54 if (UDF_SB_PARTMAPS(X) != NULL)\ 53 if (UDF_SB_PARTMAPS(X) != NULL) {\
55 {\
56 UDF_SB_NUMPARTS(X) = Y;\ 54 UDF_SB_NUMPARTS(X) = Y;\
57 memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\ 55 memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\
58 }\ 56 } else {\
59 else\
60 {\
61 UDF_SB_NUMPARTS(X) = 0;\ 57 UDF_SB_NUMPARTS(X) = 0;\
62 udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\ 58 udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\
63 }\ 59 }\
@@ -72,15 +68,12 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
72 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\ 68 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\
73 else\ 69 else\
74 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\ 70 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\
75 if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL)\ 71 if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL) {\
76 {\
77 memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\ 72 memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\
78 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\ 73 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\
79 (struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\ 74 (struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\
80 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\ 75 UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\
81 }\ 76 } else {\
82 else\
83 {\
84 udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\ 77 udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\
85 }\ 78 }\
86} 79}
@@ -90,8 +83,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
90 int i;\ 83 int i;\
91 int nr_groups = UDF_SB_BITMAP_NR_GROUPS(X,Y,Z);\ 84 int nr_groups = UDF_SB_BITMAP_NR_GROUPS(X,Y,Z);\
92 int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\ 85 int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\
93 for (i=0; i<nr_groups; i++)\ 86 for (i = 0; i < nr_groups; i++) {\
94 {\
95 if (UDF_SB_BITMAP(X,Y,Z,i))\ 87 if (UDF_SB_BITMAP(X,Y,Z,i))\
96 brelse(UDF_SB_BITMAP(X,Y,Z,i));\ 88 brelse(UDF_SB_BITMAP(X,Y,Z,i));\
97 }\ 89 }\
@@ -139,4 +131,4 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
139#define UDF_SB_FLAGS(X) ( UDF_SB(X)->s_flags ) 131#define UDF_SB_FLAGS(X) ( UDF_SB(X)->s_flags )
140#define UDF_SB_VAT(X) ( UDF_SB(X)->s_vat ) 132#define UDF_SB_VAT(X) ( UDF_SB(X)->s_vat )
141 133
142#endif /* __LINUX_UDF_SB_H */ 134#endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 76f2b82a39dc..c8016cc9e7e6 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -63,8 +63,8 @@ struct udf_vds_record {
63}; 63};
64 64
65struct generic_desc { 65struct generic_desc {
66 tag descTag; 66 tag descTag;
67 __le32 volDescSeqNum; 67 __le32 volDescSeqNum;
68}; 68};
69 69
70struct ustr { 70struct ustr {
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index 450daab35a13..c4bd1203f857 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -7,75 +7,93 @@
7static inline kernel_lb_addr lelb_to_cpu(lb_addr in) 7static inline kernel_lb_addr lelb_to_cpu(lb_addr in)
8{ 8{
9 kernel_lb_addr out; 9 kernel_lb_addr out;
10
10 out.logicalBlockNum = le32_to_cpu(in.logicalBlockNum); 11 out.logicalBlockNum = le32_to_cpu(in.logicalBlockNum);
11 out.partitionReferenceNum = le16_to_cpu(in.partitionReferenceNum); 12 out.partitionReferenceNum = le16_to_cpu(in.partitionReferenceNum);
13
12 return out; 14 return out;
13} 15}
14 16
15static inline lb_addr cpu_to_lelb(kernel_lb_addr in) 17static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
16{ 18{
17 lb_addr out; 19 lb_addr out;
20
18 out.logicalBlockNum = cpu_to_le32(in.logicalBlockNum); 21 out.logicalBlockNum = cpu_to_le32(in.logicalBlockNum);
19 out.partitionReferenceNum = cpu_to_le16(in.partitionReferenceNum); 22 out.partitionReferenceNum = cpu_to_le16(in.partitionReferenceNum);
23
20 return out; 24 return out;
21} 25}
22 26
23static inline kernel_timestamp lets_to_cpu(timestamp in) 27static inline kernel_timestamp lets_to_cpu(timestamp in)
24{ 28{
25 kernel_timestamp out; 29 kernel_timestamp out;
30
26 memcpy(&out, &in, sizeof(timestamp)); 31 memcpy(&out, &in, sizeof(timestamp));
27 out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone); 32 out.typeAndTimezone = le16_to_cpu(in.typeAndTimezone);
28 out.year = le16_to_cpu(in.year); 33 out.year = le16_to_cpu(in.year);
34
29 return out; 35 return out;
30} 36}
31 37
32static inline short_ad lesa_to_cpu(short_ad in) 38static inline short_ad lesa_to_cpu(short_ad in)
33{ 39{
34 short_ad out; 40 short_ad out;
41
35 out.extLength = le32_to_cpu(in.extLength); 42 out.extLength = le32_to_cpu(in.extLength);
36 out.extPosition = le32_to_cpu(in.extPosition); 43 out.extPosition = le32_to_cpu(in.extPosition);
44
37 return out; 45 return out;
38} 46}
39 47
40static inline short_ad cpu_to_lesa(short_ad in) 48static inline short_ad cpu_to_lesa(short_ad in)
41{ 49{
42 short_ad out; 50 short_ad out;
51
43 out.extLength = cpu_to_le32(in.extLength); 52 out.extLength = cpu_to_le32(in.extLength);
44 out.extPosition = cpu_to_le32(in.extPosition); 53 out.extPosition = cpu_to_le32(in.extPosition);
54
45 return out; 55 return out;
46} 56}
47 57
48static inline kernel_long_ad lela_to_cpu(long_ad in) 58static inline kernel_long_ad lela_to_cpu(long_ad in)
49{ 59{
50 kernel_long_ad out; 60 kernel_long_ad out;
61
51 out.extLength = le32_to_cpu(in.extLength); 62 out.extLength = le32_to_cpu(in.extLength);
52 out.extLocation = lelb_to_cpu(in.extLocation); 63 out.extLocation = lelb_to_cpu(in.extLocation);
64
53 return out; 65 return out;
54} 66}
55 67
56static inline long_ad cpu_to_lela(kernel_long_ad in) 68static inline long_ad cpu_to_lela(kernel_long_ad in)
57{ 69{
58 long_ad out; 70 long_ad out;
71
59 out.extLength = cpu_to_le32(in.extLength); 72 out.extLength = cpu_to_le32(in.extLength);
60 out.extLocation = cpu_to_lelb(in.extLocation); 73 out.extLocation = cpu_to_lelb(in.extLocation);
74
61 return out; 75 return out;
62} 76}
63 77
64static inline kernel_extent_ad leea_to_cpu(extent_ad in) 78static inline kernel_extent_ad leea_to_cpu(extent_ad in)
65{ 79{
66 kernel_extent_ad out; 80 kernel_extent_ad out;
81
67 out.extLength = le32_to_cpu(in.extLength); 82 out.extLength = le32_to_cpu(in.extLength);
68 out.extLocation = le32_to_cpu(in.extLocation); 83 out.extLocation = le32_to_cpu(in.extLocation);
84
69 return out; 85 return out;
70} 86}
71 87
72static inline timestamp cpu_to_lets(kernel_timestamp in) 88static inline timestamp cpu_to_lets(kernel_timestamp in)
73{ 89{
74 timestamp out; 90 timestamp out;
91
75 memcpy(&out, &in, sizeof(timestamp)); 92 memcpy(&out, &in, sizeof(timestamp));
76 out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone); 93 out.typeAndTimezone = cpu_to_le16(in.typeAndTimezone);
77 out.year = cpu_to_le16(in.year); 94 out.year = cpu_to_le16(in.year);
95
78 return out; 96 return out;
79} 97}
80 98
81#endif /* __UDF_ENDIAN_H */ 99#endif /* __UDF_ENDIAN_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index b9f3198080e9..3fd80eb66af3 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -18,18 +18,18 @@
18 Boston, MA 02111-1307, USA. */ 18 Boston, MA 02111-1307, USA. */
19 19
20/* 20/*
21 * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time 21 * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time
22 * 10/04/98: added new table-based lookup after seeing how ugly the gnu code is 22 * 10/04/98: added new table-based lookup after seeing how ugly the gnu code is
23 * blf 09/27/99: ripped out all the old code and inserted new table from 23 * blf 09/27/99: ripped out all the old code and inserted new table from
24 * John Brockmeyer (without leap second corrections) 24 * John Brockmeyer (without leap second corrections)
25 * rewrote udf_stamp_to_time and fixed timezone accounting in 25 * rewrote udf_stamp_to_time and fixed timezone accounting in
26 udf_time_to_stamp. 26 * udf_time_to_stamp.
27 */ 27 */
28 28
29/* 29/*
30 * We don't take into account leap seconds. This may be correct or incorrect. 30 * We don't take into account leap seconds. This may be correct or incorrect.
31 * For more NIST information (especially dealing with leap seconds), see: 31 * For more NIST information (especially dealing with leap seconds), see:
32 * http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm 32 * http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm
33 */ 33 */
34 34
35#include <linux/types.h> 35#include <linux/types.h>
@@ -54,28 +54,28 @@ static const unsigned short int __mon_yday[2][13] = {
54}; 54};
55 55
56#define MAX_YEAR_SECONDS 69 56#define MAX_YEAR_SECONDS 69
57#define SPD 0x15180 /*3600*24 */ 57#define SPD 0x15180 /*3600*24 */
58#define SPY(y,l,s) (SPD * (365*y+l)+s) 58#define SPY(y,l,s) (SPD * (365*y+l)+s)
59 59
60static time_t year_seconds[MAX_YEAR_SECONDS] = { 60static time_t year_seconds[MAX_YEAR_SECONDS]= {
61/*1970*/ SPY(0, 0, 0), SPY(1, 0, 0), SPY(2, 0, 0), SPY(3, 1, 0), 61/*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0),
62/*1974*/ SPY(4, 1, 0), SPY(5, 1, 0), SPY(6, 1, 0), SPY(7, 2, 0), 62/*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0),
63/*1978*/ SPY(8, 2, 0), SPY(9, 2, 0), SPY(10, 2, 0), SPY(11, 3, 0), 63/*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0),
64/*1982*/ SPY(12, 3, 0), SPY(13, 3, 0), SPY(14, 3, 0), SPY(15, 4, 0), 64/*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0),
65/*1986*/ SPY(16, 4, 0), SPY(17, 4, 0), SPY(18, 4, 0), SPY(19, 5, 0), 65/*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0),
66/*1990*/ SPY(20, 5, 0), SPY(21, 5, 0), SPY(22, 5, 0), SPY(23, 6, 0), 66/*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0),
67/*1994*/ SPY(24, 6, 0), SPY(25, 6, 0), SPY(26, 6, 0), SPY(27, 7, 0), 67/*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0),
68/*1998*/ SPY(28, 7, 0), SPY(29, 7, 0), SPY(30, 7, 0), SPY(31, 8, 0), 68/*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0),
69/*2002*/ SPY(32, 8, 0), SPY(33, 8, 0), SPY(34, 8, 0), SPY(35, 9, 0), 69/*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0),
70/*2006*/ SPY(36, 9, 0), SPY(37, 9, 0), SPY(38, 9, 0), SPY(39, 10, 0), 70/*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0),
71/*2010*/ SPY(40, 10, 0), SPY(41, 10, 0), SPY(42, 10, 0), SPY(43, 11, 0), 71/*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0),
72/*2014*/ SPY(44, 11, 0), SPY(45, 11, 0), SPY(46, 11, 0), SPY(47, 12, 0), 72/*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0),
73/*2018*/ SPY(48, 12, 0), SPY(49, 12, 0), SPY(50, 12, 0), SPY(51, 13, 0), 73/*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0),
74/*2022*/ SPY(52, 13, 0), SPY(53, 13, 0), SPY(54, 13, 0), SPY(55, 14, 0), 74/*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0),
75/*2026*/ SPY(56, 14, 0), SPY(57, 14, 0), SPY(58, 14, 0), SPY(59, 15, 0), 75/*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0),
76/*2030*/ SPY(60, 15, 0), SPY(61, 15, 0), SPY(62, 15, 0), SPY(63, 16, 0), 76/*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0),
77/*2034*/ SPY(64, 16, 0), SPY(65, 16, 0), SPY(66, 16, 0), SPY(67, 17, 0), 77/*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0),
78/*2038*/ SPY(68, 17, 0) 78/*2038*/ SPY(68,17,0)
79}; 79};
80 80
81extern struct timezone sys_tz; 81extern struct timezone sys_tz;
@@ -83,7 +83,7 @@ extern struct timezone sys_tz;
83#define SECS_PER_HOUR (60 * 60) 83#define SECS_PER_HOUR (60 * 60)
84#define SECS_PER_DAY (SECS_PER_HOUR * 24) 84#define SECS_PER_DAY (SECS_PER_HOUR * 24)
85 85
86time_t *udf_stamp_to_time(time_t * dest, long *dest_usec, kernel_timestamp src) 86time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src)
87{ 87{
88 int yday; 88 int yday;
89 uint8_t type = src.typeAndTimezone >> 12; 89 uint8_t type = src.typeAndTimezone >> 12;
@@ -93,10 +93,11 @@ time_t *udf_stamp_to_time(time_t * dest, long *dest_usec, kernel_timestamp src)
93 offset = src.typeAndTimezone << 4; 93 offset = src.typeAndTimezone << 4;
94 /* sign extent offset */ 94 /* sign extent offset */
95 offset = (offset >> 4); 95 offset = (offset >> 4);
96 if (offset == -2047) /* unspecified offset */ 96 if (offset == -2047) /* unspecified offset */
97 offset = 0; 97 offset = 0;
98 } else 98 } else {
99 offset = 0; 99 offset = 0;
100 }
100 101
101 if ((src.year < EPOCH_YEAR) || 102 if ((src.year < EPOCH_YEAR) ||
102 (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) { 103 (src.year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) {
@@ -107,12 +108,10 @@ time_t *udf_stamp_to_time(time_t * dest, long *dest_usec, kernel_timestamp src)
107 *dest = year_seconds[src.year - EPOCH_YEAR]; 108 *dest = year_seconds[src.year - EPOCH_YEAR];
108 *dest -= offset * 60; 109 *dest -= offset * 60;
109 110
110 yday = ((__mon_yday[__isleap(src.year)] 111 yday = ((__mon_yday[__isleap (src.year)]
111 [src.month - 1]) + (src.day - 1)); 112 [src.month - 1]) + (src.day - 1));
112 *dest += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second; 113 *dest += ( ( (yday * 24) + src.hour ) * 60 + src.minute ) * 60 + src.second;
113 *dest_usec = 114 *dest_usec = src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 + src.microseconds;
114 src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 +
115 src.microseconds;
116 return dest; 115 return dest;
117} 116}
118 117
@@ -145,8 +144,9 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
145 long int yg = y + days / 365 - (days % 365 < 0); 144 long int yg = y + days / 365 - (days % 365 < 0);
146 145
147 /* Adjust DAYS and Y to match the guessed year. */ 146 /* Adjust DAYS and Y to match the guessed year. */
148 days -= ((yg - y) * 365 + LEAPS_THRU_END_OF(yg - 1) 147 days -= ((yg - y) * 365
149 - LEAPS_THRU_END_OF(y - 1)); 148 + LEAPS_THRU_END_OF (yg - 1)
149 - LEAPS_THRU_END_OF (y - 1));
150 y = yg; 150 y = yg;
151 } 151 }
152 dest->year = y; 152 dest->year = y;
@@ -158,11 +158,9 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts)
158 dest->day = days + 1; 158 dest->day = days + 1;
159 159
160 dest->centiseconds = ts.tv_nsec / 10000000; 160 dest->centiseconds = ts.tv_nsec / 10000000;
161 dest->hundredsOfMicroseconds = 161 dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100;
162 (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100; 162 dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
163 dest->microseconds = 163 dest->hundredsOfMicroseconds * 100);
164 (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
165 dest->hundredsOfMicroseconds * 100);
166 return dest; 164 return dest;
167} 165}
168 166
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 46835240275c..9e6099c26c27 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -29,21 +29,23 @@
29 29
30static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int); 30static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
31 31
32static int udf_char_to_ustr(struct ustr *dest, const uint8_t * src, int strlen) 32static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
33{ 33{
34 if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2)) 34 if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
35 return 0; 35 return 0;
36
36 memset(dest, 0, sizeof(struct ustr)); 37 memset(dest, 0, sizeof(struct ustr));
37 memcpy(dest->u_name, src, strlen); 38 memcpy(dest->u_name, src, strlen);
38 dest->u_cmpID = 0x08; 39 dest->u_cmpID = 0x08;
39 dest->u_len = strlen; 40 dest->u_len = strlen;
41
40 return strlen; 42 return strlen;
41} 43}
42 44
43/* 45/*
44 * udf_build_ustr 46 * udf_build_ustr
45 */ 47 */
46int udf_build_ustr(struct ustr *dest, dstring * ptr, int size) 48int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
47{ 49{
48 int usesize; 50 int usesize;
49 51
@@ -55,13 +57,14 @@ int udf_build_ustr(struct ustr *dest, dstring * ptr, int size)
55 dest->u_cmpID = ptr[0]; 57 dest->u_cmpID = ptr[0];
56 dest->u_len = ptr[size - 1]; 58 dest->u_len = ptr[size - 1];
57 memcpy(dest->u_name, ptr + 1, usesize - 1); 59 memcpy(dest->u_name, ptr + 1, usesize - 1);
60
58 return 0; 61 return 0;
59} 62}
60 63
61/* 64/*
62 * udf_build_ustr_exact 65 * udf_build_ustr_exact
63 */ 66 */
64static int udf_build_ustr_exact(struct ustr *dest, dstring * ptr, int exactsize) 67static int udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
65{ 68{
66 if ((!dest) || (!ptr) || (!exactsize)) 69 if ((!dest) || (!ptr) || (!exactsize))
67 return -1; 70 return -1;
@@ -70,6 +73,7 @@ static int udf_build_ustr_exact(struct ustr *dest, dstring * ptr, int exactsize)
70 dest->u_cmpID = ptr[0]; 73 dest->u_cmpID = ptr[0];
71 dest->u_len = exactsize - 1; 74 dest->u_len = exactsize - 1;
72 memcpy(dest->u_name, ptr + 1, exactsize - 1); 75 memcpy(dest->u_name, ptr + 1, exactsize - 1);
76
73 return 0; 77 return 0;
74} 78}
75 79
@@ -129,20 +133,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
129 c = (c << 8) | ocu[i++]; 133 c = (c << 8) | ocu[i++];
130 134
131 /* Compress Unicode to UTF-8 */ 135 /* Compress Unicode to UTF-8 */
132 if (c < 0x80U) 136 if (c < 0x80U) {
133 utf_o->u_name[utf_o->u_len++] = (uint8_t) c; 137 utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
134 else if (c < 0x800U) { 138 } else if (c < 0x800U) {
135 utf_o->u_name[utf_o->u_len++] = 139 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xc0 | (c >> 6));
136 (uint8_t) (0xc0 | (c >> 6)); 140 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
137 utf_o->u_name[utf_o->u_len++] =
138 (uint8_t) (0x80 | (c & 0x3f));
139 } else { 141 } else {
140 utf_o->u_name[utf_o->u_len++] = 142 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xe0 | (c >> 12));
141 (uint8_t) (0xe0 | (c >> 12)); 143 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | ((c >> 6) & 0x3f));
142 utf_o->u_name[utf_o->u_len++] = 144 utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f));
143 (uint8_t) (0x80 | ((c >> 6) & 0x3f));
144 utf_o->u_name[utf_o->u_len++] =
145 (uint8_t) (0x80 | (c & 0x3f));
146 } 145 }
147 } 146 }
148 utf_o->u_cmpID = 8; 147 utf_o->u_cmpID = 8;
@@ -173,7 +172,7 @@ int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i)
173 * November 12, 1997 - Andrew E. Mileski 172 * November 12, 1997 - Andrew E. Mileski
174 * Written, tested, and released. 173 * Written, tested, and released.
175 */ 174 */
176static int udf_UTF8toCS0(dstring * ocu, struct ustr *utf, int length) 175static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
177{ 176{
178 unsigned c, i, max_val, utf_char; 177 unsigned c, i, max_val, utf_char;
179 int utf_cnt, u_len; 178 int utf_cnt, u_len;
@@ -182,12 +181,12 @@ static int udf_UTF8toCS0(dstring * ocu, struct ustr *utf, int length)
182 ocu[0] = 8; 181 ocu[0] = 8;
183 max_val = 0xffU; 182 max_val = 0xffU;
184 183
185 try_again: 184try_again:
186 u_len = 0U; 185 u_len = 0U;
187 utf_char = 0U; 186 utf_char = 0U;
188 utf_cnt = 0U; 187 utf_cnt = 0U;
189 for (i = 0U; i < utf->u_len; i++) { 188 for (i = 0U; i < utf->u_len; i++) {
190 c = (uint8_t) utf->u_name[i]; 189 c = (uint8_t)utf->u_name[i];
191 190
192 /* Complete a multi-byte UTF-8 character */ 191 /* Complete a multi-byte UTF-8 character */
193 if (utf_cnt) { 192 if (utf_cnt) {
@@ -213,37 +212,40 @@ static int udf_UTF8toCS0(dstring * ocu, struct ustr *utf, int length)
213 } else if ((c & 0xfeU) == 0xfcU) { 212 } else if ((c & 0xfeU) == 0xfcU) {
214 utf_char = c & 0x01U; 213 utf_char = c & 0x01U;
215 utf_cnt = 5; 214 utf_cnt = 5;
216 } else 215 } else {
217 goto error_out; 216 goto error_out;
217 }
218 continue; 218 continue;
219 } else 219 } else {
220 /* Single byte UTF-8 character (most common) */ 220 /* Single byte UTF-8 character (most common) */
221 utf_char = c; 221 utf_char = c;
222 }
222 } 223 }
223 224
224 /* Choose no compression if necessary */ 225 /* Choose no compression if necessary */
225 if (utf_char > max_val) { 226 if (utf_char > max_val) {
226 if (0xffU == max_val) { 227 if (max_val == 0xffU) {
227 max_val = 0xffffU; 228 max_val = 0xffffU;
228 ocu[0] = (uint8_t) 0x10U; 229 ocu[0] = (uint8_t)0x10U;
229 goto try_again; 230 goto try_again;
230 } 231 }
231 goto error_out; 232 goto error_out;
232 } 233 }
233 234
234 if (max_val == 0xffffU) { 235 if (max_val == 0xffffU) {
235 ocu[++u_len] = (uint8_t) (utf_char >> 8); 236 ocu[++u_len] = (uint8_t)(utf_char >> 8);
236 } 237 }
237 ocu[++u_len] = (uint8_t) (utf_char & 0xffU); 238 ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
238 } 239 }
239 240
240 if (utf_cnt) { 241 if (utf_cnt) {
241 error_out: 242error_out:
242 ocu[++u_len] = '?'; 243 ocu[++u_len] = '?';
243 printk(KERN_DEBUG "udf: bad UTF-8 character\n"); 244 printk(KERN_DEBUG "udf: bad UTF-8 character\n");
244 } 245 }
245 246
246 ocu[length - 1] = (uint8_t) u_len + 1; 247 ocu[length - 1] = (uint8_t)u_len + 1;
248
247 return u_len + 1; 249 return u_len + 1;
248} 250}
249 251
@@ -288,7 +290,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
288 return utf_o->u_len; 290 return utf_o->u_len;
289} 291}
290 292
291static int udf_NLStoCS0(struct nls_table *nls, dstring * ocu, struct ustr *uni, 293static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
292 int length) 294 int length)
293{ 295{
294 unsigned len, i, max_val; 296 unsigned len, i, max_val;
@@ -299,7 +301,7 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring * ocu, struct ustr *uni,
299 ocu[0] = 8; 301 ocu[0] = 8;
300 max_val = 0xffU; 302 max_val = 0xffU;
301 303
302 try_again: 304try_again:
303 u_len = 0U; 305 u_len = 0U;
304 for (i = 0U; i < uni->u_len; i++) { 306 for (i = 0U; i < uni->u_len; i++) {
305 len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char); 307 len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
@@ -308,21 +310,21 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring * ocu, struct ustr *uni,
308 310
309 if (uni_char > max_val) { 311 if (uni_char > max_val) {
310 max_val = 0xffffU; 312 max_val = 0xffffU;
311 ocu[0] = (uint8_t) 0x10U; 313 ocu[0] = (uint8_t)0x10U;
312 goto try_again; 314 goto try_again;
313 } 315 }
314 316
315 if (max_val == 0xffffU) 317 if (max_val == 0xffffU)
316 ocu[++u_len] = (uint8_t) (uni_char >> 8); 318 ocu[++u_len] = (uint8_t)(uni_char >> 8);
317 ocu[++u_len] = (uint8_t) (uni_char & 0xffU); 319 ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
318 i += len - 1; 320 i += len - 1;
319 } 321 }
320 322
321 ocu[length - 1] = (uint8_t) u_len + 1; 323 ocu[length - 1] = (uint8_t)u_len + 1;
322 return u_len + 1; 324 return u_len + 1;
323} 325}
324 326
325int udf_get_filename(struct super_block *sb, uint8_t * sname, uint8_t * dname, 327int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
326 int flen) 328 int flen)
327{ 329{
328 struct ustr filename, unifilename; 330 struct ustr filename, unifilename;
@@ -334,30 +336,29 @@ int udf_get_filename(struct super_block *sb, uint8_t * sname, uint8_t * dname,
334 336
335 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { 337 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
336 if (!udf_CS0toUTF8(&filename, &unifilename)) { 338 if (!udf_CS0toUTF8(&filename, &unifilename)) {
337 udf_debug("Failed in udf_get_filename: sname = %s\n", 339 udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
338 sname);
339 return 0; 340 return 0;
340 } 341 }
341 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { 342 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
342 if (!udf_CS0toNLS 343 if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename)) {
343 (UDF_SB(sb)->s_nls_map, &filename, &unifilename)) { 344 udf_debug("Failed in udf_get_filename: sname = %s\n", sname);
344 udf_debug("Failed in udf_get_filename: sname = %s\n",
345 sname);
346 return 0; 345 return 0;
347 } 346 }
348 } else 347 } else {
349 return 0; 348 return 0;
349 }
350 350
351 if ((len = 351 len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
352 udf_translate_to_linux(dname, filename.u_name, filename.u_len, 352 unifilename.u_name, unifilename.u_len);
353 unifilename.u_name, unifilename.u_len))) { 353 if (len) {
354 return len; 354 return len;
355 } 355 }
356
356 return 0; 357 return 0;
357} 358}
358 359
359int udf_put_filename(struct super_block *sb, const uint8_t * sname, 360int udf_put_filename(struct super_block *sb, const uint8_t *sname,
360 uint8_t * dname, int flen) 361 uint8_t *dname, int flen)
361{ 362{
362 struct ustr unifilename; 363 struct ustr unifilename;
363 int namelen; 364 int namelen;
@@ -367,31 +368,29 @@ int udf_put_filename(struct super_block *sb, const uint8_t * sname,
367 } 368 }
368 369
369 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { 370 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
370 if (! 371 namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
371 (namelen = 372 if (!namelen) {
372 udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN))) {
373 return 0; 373 return 0;
374 } 374 }
375 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { 375 } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
376 if (! 376 namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN);
377 (namelen = 377 if (!namelen) {
378 udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename,
379 UDF_NAME_LEN))) {
380 return 0; 378 return 0;
381 } 379 }
382 } else 380 } else {
383 return 0; 381 return 0;
382 }
384 383
385 return namelen; 384 return namelen;
386} 385}
387 386
388#define ILLEGAL_CHAR_MARK '_' 387#define ILLEGAL_CHAR_MARK '_'
389#define EXT_MARK '.' 388#define EXT_MARK '.'
390#define CRC_MARK '#' 389#define CRC_MARK '#'
391#define EXT_SIZE 5 390#define EXT_SIZE 5
392 391
393static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName, 392static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen,
394 int udfLen, uint8_t * fidName, int fidNameLen) 393 uint8_t *fidName, int fidNameLen)
395{ 394{
396 int index, newIndex = 0, needsCRC = 0; 395 int index, newIndex = 0, needsCRC = 0;
397 int extIndex = 0, newExtIndex = 0, hasExt = 0; 396 int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -399,8 +398,8 @@ static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName,
399 uint8_t curr; 398 uint8_t curr;
400 const uint8_t hexChar[] = "0123456789ABCDEF"; 399 const uint8_t hexChar[] = "0123456789ABCDEF";
401 400
402 if (udfName[0] == '.' && (udfLen == 1 || 401 if (udfName[0] == '.' &&
403 (udfLen == 2 && udfName[1] == '.'))) { 402 (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
404 needsCRC = 1; 403 needsCRC = 1;
405 newIndex = udfLen; 404 newIndex = udfLen;
406 memcpy(newName, udfName, udfLen); 405 memcpy(newName, udfName, udfLen);
@@ -410,16 +409,13 @@ static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName,
410 if (curr == '/' || curr == 0) { 409 if (curr == '/' || curr == 0) {
411 needsCRC = 1; 410 needsCRC = 1;
412 curr = ILLEGAL_CHAR_MARK; 411 curr = ILLEGAL_CHAR_MARK;
413 while (index + 1 < udfLen 412 while (index + 1 < udfLen && (udfName[index + 1] == '/' ||
414 && (udfName[index + 1] == '/' 413 udfName[index + 1] == 0))
415 || udfName[index + 1] == 0))
416 index++; 414 index++;
417 } 415 } if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE) {
418 if (curr == EXT_MARK 416 if (udfLen == index + 1) {
419 && (udfLen - index - 1) <= EXT_SIZE) {
420 if (udfLen == index + 1)
421 hasExt = 0; 417 hasExt = 0;
422 else { 418 } else {
423 hasExt = 1; 419 hasExt = 1;
424 extIndex = index; 420 extIndex = index;
425 newExtIndex = newIndex; 421 newExtIndex = newIndex;
@@ -437,23 +433,16 @@ static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName,
437 433
438 if (hasExt) { 434 if (hasExt) {
439 int maxFilenameLen; 435 int maxFilenameLen;
440 for (index = 0; 436 for(index = 0; index < EXT_SIZE && extIndex + index + 1 < udfLen; index++) {
441 index < EXT_SIZE && extIndex + index + 1 < udfLen;
442 index++) {
443 curr = udfName[extIndex + index + 1]; 437 curr = udfName[extIndex + index + 1];
444 438
445 if (curr == '/' || curr == 0) { 439 if (curr == '/' || curr == 0) {
446 needsCRC = 1; 440 needsCRC = 1;
447 curr = ILLEGAL_CHAR_MARK; 441 curr = ILLEGAL_CHAR_MARK;
448 while (extIndex + index + 2 < udfLen 442 while(extIndex + index + 2 < udfLen &&
449 && (index + 1 < EXT_SIZE 443 (index + 1 < EXT_SIZE
450 && 444 && (udfName[extIndex + index + 2] == '/' ||
451 (udfName 445 udfName[extIndex + index + 2] == 0)))
452 [extIndex + index + 2] ==
453 '/'
454 || udfName[extIndex +
455 index + 2] ==
456 0)))
457 index++; 446 index++;
458 } 447 }
459 ext[localExtIndex++] = curr; 448 ext[localExtIndex++] = curr;
@@ -463,8 +452,9 @@ static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName,
463 newIndex = maxFilenameLen; 452 newIndex = maxFilenameLen;
464 else 453 else
465 newIndex = newExtIndex; 454 newIndex = newExtIndex;
466 } else if (newIndex > 250) 455 } else if (newIndex > 250) {
467 newIndex = 250; 456 newIndex = 250;
457 }
468 newName[newIndex++] = CRC_MARK; 458 newName[newIndex++] = CRC_MARK;
469 valueCRC = udf_crc(fidName, fidNameLen, 0); 459 valueCRC = udf_crc(fidName, fidNameLen, 0);
470 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; 460 newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12];
@@ -478,5 +468,6 @@ static int udf_translate_to_linux(uint8_t * newName, uint8_t * udfName,
478 newName[newIndex++] = ext[index]; 468 newName[newIndex++] = ext[index];
479 } 469 }
480 } 470 }
471
481 return newIndex; 472 return newIndex;
482} 473}
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index e2fcee2b340d..62c5ee4311da 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -13,6 +13,7 @@
13 13
14extern int pxm_to_node(int); 14extern int pxm_to_node(int);
15extern int node_to_pxm(int); 15extern int node_to_pxm(int);
16extern void __acpi_map_pxm_to_node(int, int);
16extern int acpi_map_pxm_to_node(int); 17extern int acpi_map_pxm_to_node(int);
17extern void __cpuinit acpi_unmap_pxm_to_node(int); 18extern void __cpuinit acpi_unmap_pxm_to_node(int);
18 19
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index c03290ccecb2..43114c824608 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -47,6 +47,14 @@ extern void e820_register_memory(void);
47extern void limit_regions(unsigned long long size); 47extern void limit_regions(unsigned long long size);
48extern void print_memory_map(char *who); 48extern void print_memory_map(char *who);
49 49
50#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
51extern void e820_mark_nosave_regions(void);
52#else
53static inline void e820_mark_nosave_regions(void)
54{
55}
56#endif
57
50#endif/*!__ASSEMBLY__*/ 58#endif/*!__ASSEMBLY__*/
51 59
52#endif/*__E820_HEADER*/ 60#endif/*__E820_HEADER*/
diff --git a/include/asm-i386/geode.h b/include/asm-i386/geode.h
new file mode 100644
index 000000000000..6da4bbbea3dc
--- /dev/null
+++ b/include/asm-i386/geode.h
@@ -0,0 +1,159 @@
1/*
2 * AMD Geode definitions
3 * Copyright (C) 2006, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10#ifndef _ASM_GEODE_H_
11#define _ASM_GEODE_H_
12
13#include <asm/processor.h>
14#include <linux/io.h>
15
16/* Generic southbridge functions */
17
18#define GEODE_DEV_PMS 0
19#define GEODE_DEV_ACPI 1
20#define GEODE_DEV_GPIO 2
21#define GEODE_DEV_MFGPT 3
22
23extern int geode_get_dev_base(unsigned int dev);
24
25/* Useful macros */
26#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
27#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
28#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
29#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
30
31/* MSRS */
32
33#define GX_GLCP_SYS_RSTPLL 0x4C000014
34
35#define MSR_LBAR_SMB 0x5140000B
36#define MSR_LBAR_GPIO 0x5140000C
37#define MSR_LBAR_MFGPT 0x5140000D
38#define MSR_LBAR_ACPI 0x5140000E
39#define MSR_LBAR_PMS 0x5140000F
40
41#define MSR_PIC_YSEL_LOW 0x51400020
42#define MSR_PIC_YSEL_HIGH 0x51400021
43#define MSR_PIC_ZSEL_LOW 0x51400022
44#define MSR_PIC_ZSEL_HIGH 0x51400023
45
46#define MFGPT_IRQ_MSR 0x51400028
47#define MFGPT_NR_MSR 0x51400029
48
49/* Resource Sizes */
50
51#define LBAR_GPIO_SIZE 0xFF
52#define LBAR_MFGPT_SIZE 0x40
53#define LBAR_ACPI_SIZE 0x40
54#define LBAR_PMS_SIZE 0x80
55
56/* ACPI registers (PMS block) */
57
58/*
59 * PM1_EN is only valid when VSA is enabled for 16 bit reads.
60 * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
61 * with a 32 bit read at offset 0x0
62 */
63
64#define PM1_STS 0x00
65#define PM1_EN 0x02
66#define PM1_CNT 0x08
67#define PM2_CNT 0x0C
68#define PM_TMR 0x10
69#define PM_GPE0_STS 0x18
70#define PM_GPE0_EN 0x1C
71
72/* PMC registers (PMS block) */
73
74#define PM_SSD 0x00
75#define PM_SCXA 0x04
76#define PM_SCYA 0x08
77#define PM_OUT_SLPCTL 0x0C
78#define PM_SCLK 0x10
79#define PM_SED 0x1
80#define PM_SCXD 0x18
81#define PM_SCYD 0x1C
82#define PM_IN_SLPCTL 0x20
83#define PM_WKD 0x30
84#define PM_WKXD 0x34
85#define PM_RD 0x38
86#define PM_WKXA 0x3C
87#define PM_FSD 0x40
88#define PM_TSD 0x44
89#define PM_PSD 0x48
90#define PM_NWKD 0x4C
91#define PM_AWKD 0x50
92#define PM_SSC 0x54
93
94/* GPIO */
95
96#define GPIO_OUTPUT_VAL 0x00
97#define GPIO_OUTPUT_ENABLE 0x04
98#define GPIO_OUTPUT_OPEN_DRAIN 0x08
99#define GPIO_OUTPUT_INVERT 0x0C
100#define GPIO_OUTPUT_AUX1 0x10
101#define GPIO_OUTPUT_AUX2 0x14
102#define GPIO_PULL_UP 0x18
103#define GPIO_PULL_DOWN 0x1C
104#define GPIO_INPUT_ENABLE 0x20
105#define GPIO_INPUT_INVERT 0x24
106#define GPIO_INPUT_FILTER 0x28
107#define GPIO_INPUT_EVENT_COUNT 0x2C
108#define GPIO_READ_BACK 0x30
109#define GPIO_INPUT_AUX1 0x34
110#define GPIO_EVENTS_ENABLE 0x38
111#define GPIO_LOCK_ENABLE 0x3C
112#define GPIO_POSITIVE_EDGE_EN 0x40
113#define GPIO_NEGATIVE_EDGE_EN 0x44
114#define GPIO_POSITIVE_EDGE_STS 0x48
115#define GPIO_NEGATIVE_EDGE_STS 0x4C
116
117#define GPIO_MAP_X 0xE0
118#define GPIO_MAP_Y 0xE4
119#define GPIO_MAP_Z 0xE8
120#define GPIO_MAP_W 0xEC
121
122extern void geode_gpio_set(unsigned int, unsigned int);
123extern void geode_gpio_clear(unsigned int, unsigned int);
124extern int geode_gpio_isset(unsigned int, unsigned int);
125extern void geode_gpio_setup_event(unsigned int, int, int);
126extern void geode_gpio_set_irq(unsigned int, unsigned int);
127
128static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
129{
130 geode_gpio_setup_event(gpio, pair, 0);
131}
132
133static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
134{
135 geode_gpio_setup_event(gpio, pair, 1);
136}
137
138/* Specific geode tests */
139
140static inline int is_geode_gx(void)
141{
142 return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
143 (boot_cpu_data.x86 == 5) &&
144 (boot_cpu_data.x86_model == 5));
145}
146
147static inline int is_geode_lx(void)
148{
149 return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
150 (boot_cpu_data.x86 == 5) &&
151 (boot_cpu_data.x86_model == 10));
152}
153
154static inline int is_geode(void)
155{
156 return (is_geode_gx() || is_geode_lx());
157}
158
159#endif
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index dddeedf504b7..c82dc7ed96b3 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -4,112 +4,82 @@
4 4
5#ifdef CONFIG_HPET_TIMER 5#ifdef CONFIG_HPET_TIMER
6 6
7#include <linux/errno.h>
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/param.h>
12#include <linux/string.h>
13#include <linux/mm.h>
14#include <linux/interrupt.h>
15#include <linux/time.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/smp.h>
19
20#include <asm/io.h>
21#include <asm/smp.h>
22#include <asm/irq.h>
23#include <asm/msr.h>
24#include <asm/delay.h>
25#include <asm/mpspec.h>
26#include <asm/uaccess.h>
27#include <asm/processor.h>
28
29#include <linux/timex.h>
30
31/* 7/*
32 * Documentation on HPET can be found at: 8 * Documentation on HPET can be found at:
33 * http://www.intel.com/ial/home/sp/pcmmspec.htm 9 * http://www.intel.com/ial/home/sp/pcmmspec.htm
34 * ftp://download.intel.com/ial/home/sp/mmts098.pdf 10 * ftp://download.intel.com/ial/home/sp/mmts098.pdf
35 */ 11 */
36 12
37#define HPET_MMAP_SIZE 1024 13#define HPET_MMAP_SIZE 1024
38 14
39#define HPET_ID 0x000 15#define HPET_ID 0x000
40#define HPET_PERIOD 0x004 16#define HPET_PERIOD 0x004
41#define HPET_CFG 0x010 17#define HPET_CFG 0x010
42#define HPET_STATUS 0x020 18#define HPET_STATUS 0x020
43#define HPET_COUNTER 0x0f0 19#define HPET_COUNTER 0x0f0
44#define HPET_T0_CFG 0x100 20#define HPET_T0_CFG 0x100
45#define HPET_T0_CMP 0x108 21#define HPET_T0_CMP 0x108
46#define HPET_T0_ROUTE 0x110 22#define HPET_T0_ROUTE 0x110
47#define HPET_T1_CFG 0x120 23#define HPET_T1_CFG 0x120
48#define HPET_T1_CMP 0x128 24#define HPET_T1_CMP 0x128
49#define HPET_T1_ROUTE 0x130 25#define HPET_T1_ROUTE 0x130
50#define HPET_T2_CFG 0x140 26#define HPET_T2_CFG 0x140
51#define HPET_T2_CMP 0x148 27#define HPET_T2_CMP 0x148
52#define HPET_T2_ROUTE 0x150 28#define HPET_T2_ROUTE 0x150
53 29
54#define HPET_ID_LEGSUP 0x00008000 30#define HPET_ID_REV 0x000000ff
55#define HPET_ID_NUMBER 0x00001f00 31#define HPET_ID_NUMBER 0x00001f00
56#define HPET_ID_REV 0x000000ff 32#define HPET_ID_64BIT 0x00002000
33#define HPET_ID_LEGSUP 0x00008000
34#define HPET_ID_VENDOR 0xffff0000
57#define HPET_ID_NUMBER_SHIFT 8 35#define HPET_ID_NUMBER_SHIFT 8
36#define HPET_ID_VENDOR_SHIFT 16
58 37
59#define HPET_CFG_ENABLE 0x001 38#define HPET_ID_VENDOR_8086 0x8086
60#define HPET_CFG_LEGACY 0x002 39
40#define HPET_CFG_ENABLE 0x001
41#define HPET_CFG_LEGACY 0x002
61#define HPET_LEGACY_8254 2 42#define HPET_LEGACY_8254 2
62#define HPET_LEGACY_RTC 8 43#define HPET_LEGACY_RTC 8
63 44
64#define HPET_TN_ENABLE 0x004 45#define HPET_TN_LEVEL 0x0002
65#define HPET_TN_PERIODIC 0x008 46#define HPET_TN_ENABLE 0x0004
66#define HPET_TN_PERIODIC_CAP 0x010 47#define HPET_TN_PERIODIC 0x0008
67#define HPET_TN_SETVAL 0x040 48#define HPET_TN_PERIODIC_CAP 0x0010
68#define HPET_TN_32BIT 0x100 49#define HPET_TN_64BIT_CAP 0x0020
69 50#define HPET_TN_SETVAL 0x0040
70/* Use our own asm for 64 bit multiply/divide */ 51#define HPET_TN_32BIT 0x0100
71#define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) \ 52#define HPET_TN_ROUTE 0x3e00
72 __asm__ __volatile__("mull %2" \ 53#define HPET_TN_FSB 0x4000
73 :"=a" (eax_out), "=d" (edx_out) \ 54#define HPET_TN_FSB_CAP 0x8000
74 :"r" (reg_in), "0" (eax_in)) 55#define HPET_TN_ROUTE_SHIFT 9
75 56
76#define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) \
77 __asm__ __volatile__("divl %2" \
78 :"=a" (eax_out), "=d" (edx_out) \
79 :"r" (reg_in), "0" (eax_in), "1" (edx_in))
80
81#define KERNEL_TICK_USEC (1000000UL/HZ) /* tick value in microsec */
82/* Max HPET Period is 10^8 femto sec as in HPET spec */ 57/* Max HPET Period is 10^8 femto sec as in HPET spec */
83#define HPET_MAX_PERIOD (100000000UL) 58#define HPET_MAX_PERIOD 100000000UL
84/* 59/*
85 * Min HPET period is 10^5 femto sec just for safety. If it is less than this, 60 * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
86 * then 32 bit HPET counter wrapsaround in less than 0.5 sec. 61 * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
87 */ 62 */
88#define HPET_MIN_PERIOD (100000UL) 63#define HPET_MIN_PERIOD 100000UL
89#define HPET_TICK_RATE (HZ * 100000UL)
90 64
91extern unsigned long hpet_address; /* hpet memory map physical address */ 65/* hpet memory map physical address */
66extern unsigned long hpet_address;
92extern int is_hpet_enabled(void); 67extern int is_hpet_enabled(void);
93
94#ifdef CONFIG_X86_64
95extern unsigned long hpet_tick; /* hpet clks count per tick */
96extern int hpet_use_timer;
97extern int hpet_rtc_timer_init(void);
98extern int hpet_enable(void); 68extern int hpet_enable(void);
99extern int is_hpet_capable(void);
100extern int hpet_readl(unsigned long a);
101#else
102extern int hpet_enable(void);
103#endif
104 69
105#ifdef CONFIG_HPET_EMULATE_RTC 70#ifdef CONFIG_HPET_EMULATE_RTC
71
72#include <linux/interrupt.h>
73
106extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); 74extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
107extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); 75extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
108extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); 76extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
77 unsigned char sec);
109extern int hpet_set_periodic_freq(unsigned long freq); 78extern int hpet_set_periodic_freq(unsigned long freq);
110extern int hpet_rtc_dropped_irq(void); 79extern int hpet_rtc_dropped_irq(void);
111extern int hpet_rtc_timer_init(void); 80extern int hpet_rtc_timer_init(void);
112extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); 81extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
82
113#endif /* CONFIG_HPET_EMULATE_RTC */ 83#endif /* CONFIG_HPET_EMULATE_RTC */
114 84
115#else 85#else
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 6cb0dd4dcdde..7577d058d86e 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -3,19 +3,15 @@
3 3
4#include <linux/clockchips.h> 4#include <linux/clockchips.h>
5 5
6/* i8253A PIT registers */
7#define PIT_MODE 0x43
8#define PIT_CH0 0x40
9#define PIT_CH2 0x42
10
6extern spinlock_t i8253_lock; 11extern spinlock_t i8253_lock;
7 12
8extern struct clock_event_device *global_clock_event; 13extern struct clock_event_device *global_clock_event;
9 14
10/** 15extern void setup_pit_timer(void);
11 * pit_interrupt_hook - hook into timer tick
12 * @regs: standard registers from interrupt
13 *
14 * Call the global clock event handler.
15 **/
16static inline void pit_interrupt_hook(void)
17{
18 global_clock_event->event_handler(global_clock_event);
19}
20 16
21#endif /* __ASM_I8253_H__ */ 17#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 56e5689863ae..23ecda0b28a0 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -12,5 +12,5 @@
12 12
13static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
14{ 14{
15 pit_interrupt_hook(); 15 global_clock_event->event_handler(global_clock_event);
16} 16}
diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h
index a96d9f6604ee..48540ba97166 100644
--- a/include/asm-i386/mach-default/io_ports.h
+++ b/include/asm-i386/mach-default/io_ports.h
@@ -7,11 +7,6 @@
7#ifndef _MACH_IO_PORTS_H 7#ifndef _MACH_IO_PORTS_H
8#define _MACH_IO_PORTS_H 8#define _MACH_IO_PORTS_H
9 9
10/* i8253A PIT registers */
11#define PIT_MODE 0x43
12#define PIT_CH0 0x40
13#define PIT_CH2 0x42
14
15/* i8259A PIC registers */ 10/* i8259A PIC registers */
16#define PIC_MASTER_CMD 0x20 11#define PIC_MASTER_CMD 0x20
17#define PIC_MASTER_IMR 0x21 12#define PIC_MASTER_IMR 0x21
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index a955e57ad016..e23fd9fbebb3 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -19,14 +19,37 @@ static inline void kb_wait(void)
19static inline void mach_reboot(void) 19static inline void mach_reboot(void)
20{ 20{
21 int i; 21 int i;
22
23 /* old method, works on most machines */
22 for (i = 0; i < 10; i++) { 24 for (i = 0; i < 10; i++) {
23 kb_wait(); 25 kb_wait();
24 udelay(50); 26 udelay(50);
27 outb(0xfe, 0x64); /* pulse reset low */
28 udelay(50);
29 }
30
31 /* New method: sets the "System flag" which, when set, indicates
32 * successful completion of the keyboard controller self-test (Basic
33 * Assurance Test, BAT). This is needed for some machines with no
34 * keyboard plugged in. This read-modify-write sequence sets only the
35 * system flag
36 */
37 for (i = 0; i < 10; i++) {
38 int cmd;
39
40 outb(0x20, 0x64); /* read Controller Command Byte */
41 udelay(50);
42 kb_wait();
43 udelay(50);
44 cmd = inb(0x60);
45 udelay(50);
46 kb_wait();
47 udelay(50);
25 outb(0x60, 0x64); /* write Controller Command Byte */ 48 outb(0x60, 0x64); /* write Controller Command Byte */
26 udelay(50); 49 udelay(50);
27 kb_wait(); 50 kb_wait();
28 udelay(50); 51 udelay(50);
29 outb(0x14, 0x60); /* set "System flag" */ 52 outb(cmd | 0x04, 0x60); /* set "System flag" */
30 udelay(50); 53 udelay(50);
31 kb_wait(); 54 kb_wait();
32 udelay(50); 55 udelay(50);
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 60f9dcc15d54..bc2b58926308 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -12,7 +12,7 @@
12 **/ 12 **/
13static inline void do_timer_interrupt_hook(void) 13static inline void do_timer_interrupt_hook(void)
14{ 14{
15 pit_interrupt_hook(); 15 global_clock_event->event_handler(global_clock_event);
16 voyager_timer_interrupt(); 16 voyager_timer_interrupt();
17} 17}
18 18
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h
index 99a890047023..1613b42eaf58 100644
--- a/include/asm-i386/mc146818rtc.h
+++ b/include/asm-i386/mc146818rtc.h
@@ -6,6 +6,7 @@
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h> 8#include <asm/system.h>
9#include <asm/processor.h>
9#include <linux/mc146818rtc.h> 10#include <linux/mc146818rtc.h>
10 11
11#ifndef RTC_PORT 12#ifndef RTC_PORT
@@ -43,8 +44,10 @@ static inline void lock_cmos(unsigned char reg)
43 unsigned long new; 44 unsigned long new;
44 new = ((smp_processor_id()+1) << 8) | reg; 45 new = ((smp_processor_id()+1) << 8) | reg;
45 for (;;) { 46 for (;;) {
46 if (cmos_lock) 47 if (cmos_lock) {
48 cpu_relax();
47 continue; 49 continue;
50 }
48 if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) 51 if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
49 return; 52 return;
50 } 53 }
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 99cf5d3692a9..80ecc66b6d86 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -44,7 +44,6 @@
44extern int nx_enabled; 44extern int nx_enabled;
45 45
46#ifdef CONFIG_X86_PAE 46#ifdef CONFIG_X86_PAE
47extern unsigned long long __supported_pte_mask;
48typedef struct { unsigned long pte_low, pte_high; } pte_t; 47typedef struct { unsigned long pte_low, pte_high; } pte_t;
49typedef struct { unsigned long long pmd; } pmd_t; 48typedef struct { unsigned long long pmd; } pmd_t;
50typedef struct { unsigned long long pgd; } pgd_t; 49typedef struct { unsigned long long pgd; } pgd_t;
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
index 392d3fe5d45e..d790343e9982 100644
--- a/include/asm-i386/pci.h
+++ b/include/asm-i386/pci.h
@@ -3,6 +3,11 @@
3 3
4 4
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
6
7struct pci_sysdata {
8 int node; /* NUMA node */
9};
10
6#include <linux/mm.h> /* for struct page */ 11#include <linux/mm.h> /* for struct page */
7 12
8/* Can be used to override the logic in pci_scan_bus for skipping 13/* Can be used to override the logic in pci_scan_bus for skipping
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 422cffef00c9..48a7f69bb767 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -88,7 +88,6 @@ struct cpuinfo_x86 {
88#define X86_VENDOR_UMC 3 88#define X86_VENDOR_UMC 3
89#define X86_VENDOR_NEXGEN 4 89#define X86_VENDOR_NEXGEN 4
90#define X86_VENDOR_CENTAUR 5 90#define X86_VENDOR_CENTAUR 5
91#define X86_VENDOR_RISE 6
92#define X86_VENDOR_TRANSMETA 7 91#define X86_VENDOR_TRANSMETA 7
93#define X86_VENDOR_NSC 8 92#define X86_VENDOR_NSC 8
94#define X86_VENDOR_NUM 9 93#define X86_VENDOR_NUM 9
diff --git a/include/asm-i386/resume-trace.h b/include/asm-i386/resume-trace.h
new file mode 100644
index 000000000000..ec9cfd656230
--- /dev/null
+++ b/include/asm-i386/resume-trace.h
@@ -0,0 +1,13 @@
1#define TRACE_RESUME(user) do { \
2 if (pm_trace_enabled) { \
3 void *tracedata; \
4 asm volatile("movl $1f,%0\n" \
5 ".section .tracedata,\"a\"\n" \
6 "1:\t.word %c1\n" \
7 "\t.long %c2\n" \
8 ".previous" \
9 :"=r" (tracedata) \
10 : "i" (__LINE__), "i" (__FILE__)); \
11 generate_resume_trace(tracedata, user); \
12 } \
13} while (0)
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index b9277361954b..a9b64453bdf5 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -2,203 +2,35 @@
2#define _I386_STRING_H_ 2#define _I386_STRING_H_
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5/*
6 * On a 486 or Pentium, we are better off not using the
7 * byte string operations. But on a 386 or a PPro the
8 * byte string ops are faster than doing it by hand
9 * (MUCH faster on a Pentium).
10 */
11
12/*
13 * This string-include defines all string functions as inline
14 * functions. Use gcc. It also assumes ds=es=data space, this should be
15 * normal. Most of the string-functions are rather heavily hand-optimized,
16 * see especially strsep,strstr,str[c]spn. They should work, but are not
17 * very easy to understand. Everything is done entirely within the register
18 * set, making the functions fast and clean. String instructions have been
19 * used through-out, making for "slightly" unclear code :-)
20 *
21 * NO Copyright (C) 1991, 1992 Linus Torvalds,
22 * consider these trivial functions to be PD.
23 */
24 5
25/* AK: in fact I bet it would be better to move this stuff all out of line. 6/* Let gcc decide wether to inline or use the out of line functions */
26 */
27 7
28#define __HAVE_ARCH_STRCPY 8#define __HAVE_ARCH_STRCPY
29static inline char * strcpy(char * dest,const char *src) 9extern char *strcpy(char *dest, const char *src);
30{
31int d0, d1, d2;
32__asm__ __volatile__(
33 "1:\tlodsb\n\t"
34 "stosb\n\t"
35 "testb %%al,%%al\n\t"
36 "jne 1b"
37 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
38 :"0" (src),"1" (dest) : "memory");
39return dest;
40}
41 10
42#define __HAVE_ARCH_STRNCPY 11#define __HAVE_ARCH_STRNCPY
43static inline char * strncpy(char * dest,const char *src,size_t count) 12extern char *strncpy(char *dest, const char *src, size_t count);
44{
45int d0, d1, d2, d3;
46__asm__ __volatile__(
47 "1:\tdecl %2\n\t"
48 "js 2f\n\t"
49 "lodsb\n\t"
50 "stosb\n\t"
51 "testb %%al,%%al\n\t"
52 "jne 1b\n\t"
53 "rep\n\t"
54 "stosb\n"
55 "2:"
56 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
57 :"0" (src),"1" (dest),"2" (count) : "memory");
58return dest;
59}
60 13
61#define __HAVE_ARCH_STRCAT 14#define __HAVE_ARCH_STRCAT
62static inline char * strcat(char * dest,const char * src) 15extern char *strcat(char *dest, const char *src);
63{
64int d0, d1, d2, d3;
65__asm__ __volatile__(
66 "repne\n\t"
67 "scasb\n\t"
68 "decl %1\n"
69 "1:\tlodsb\n\t"
70 "stosb\n\t"
71 "testb %%al,%%al\n\t"
72 "jne 1b"
73 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
74 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory");
75return dest;
76}
77 16
78#define __HAVE_ARCH_STRNCAT 17#define __HAVE_ARCH_STRNCAT
79static inline char * strncat(char * dest,const char * src,size_t count) 18extern char *strncat(char *dest, const char *src, size_t count);
80{
81int d0, d1, d2, d3;
82__asm__ __volatile__(
83 "repne\n\t"
84 "scasb\n\t"
85 "decl %1\n\t"
86 "movl %8,%3\n"
87 "1:\tdecl %3\n\t"
88 "js 2f\n\t"
89 "lodsb\n\t"
90 "stosb\n\t"
91 "testb %%al,%%al\n\t"
92 "jne 1b\n"
93 "2:\txorl %2,%2\n\t"
94 "stosb"
95 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
96 : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
97 : "memory");
98return dest;
99}
100 19
101#define __HAVE_ARCH_STRCMP 20#define __HAVE_ARCH_STRCMP
102static inline int strcmp(const char * cs,const char * ct) 21extern int strcmp(const char *cs, const char *ct);
103{
104int d0, d1;
105register int __res;
106__asm__ __volatile__(
107 "1:\tlodsb\n\t"
108 "scasb\n\t"
109 "jne 2f\n\t"
110 "testb %%al,%%al\n\t"
111 "jne 1b\n\t"
112 "xorl %%eax,%%eax\n\t"
113 "jmp 3f\n"
114 "2:\tsbbl %%eax,%%eax\n\t"
115 "orb $1,%%al\n"
116 "3:"
117 :"=a" (__res), "=&S" (d0), "=&D" (d1)
118 :"1" (cs),"2" (ct)
119 :"memory");
120return __res;
121}
122 22
123#define __HAVE_ARCH_STRNCMP 23#define __HAVE_ARCH_STRNCMP
124static inline int strncmp(const char * cs,const char * ct,size_t count) 24extern int strncmp(const char *cs, const char *ct, size_t count);
125{
126register int __res;
127int d0, d1, d2;
128__asm__ __volatile__(
129 "1:\tdecl %3\n\t"
130 "js 2f\n\t"
131 "lodsb\n\t"
132 "scasb\n\t"
133 "jne 3f\n\t"
134 "testb %%al,%%al\n\t"
135 "jne 1b\n"
136 "2:\txorl %%eax,%%eax\n\t"
137 "jmp 4f\n"
138 "3:\tsbbl %%eax,%%eax\n\t"
139 "orb $1,%%al\n"
140 "4:"
141 :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
142 :"1" (cs),"2" (ct),"3" (count)
143 :"memory");
144return __res;
145}
146 25
147#define __HAVE_ARCH_STRCHR 26#define __HAVE_ARCH_STRCHR
148static inline char * strchr(const char * s, int c) 27extern char *strchr(const char *s, int c);
149{
150int d0;
151register char * __res;
152__asm__ __volatile__(
153 "movb %%al,%%ah\n"
154 "1:\tlodsb\n\t"
155 "cmpb %%ah,%%al\n\t"
156 "je 2f\n\t"
157 "testb %%al,%%al\n\t"
158 "jne 1b\n\t"
159 "movl $1,%1\n"
160 "2:\tmovl %1,%0\n\t"
161 "decl %0"
162 :"=a" (__res), "=&S" (d0)
163 :"1" (s),"0" (c)
164 :"memory");
165return __res;
166}
167 28
168#define __HAVE_ARCH_STRRCHR 29#define __HAVE_ARCH_STRRCHR
169static inline char * strrchr(const char * s, int c) 30extern char *strrchr(const char *s, int c);
170{
171int d0, d1;
172register char * __res;
173__asm__ __volatile__(
174 "movb %%al,%%ah\n"
175 "1:\tlodsb\n\t"
176 "cmpb %%ah,%%al\n\t"
177 "jne 2f\n\t"
178 "leal -1(%%esi),%0\n"
179 "2:\ttestb %%al,%%al\n\t"
180 "jne 1b"
181 :"=g" (__res), "=&S" (d0), "=&a" (d1)
182 :"0" (0),"1" (s),"2" (c)
183 :"memory");
184return __res;
185}
186 31
187#define __HAVE_ARCH_STRLEN 32#define __HAVE_ARCH_STRLEN
188static inline size_t strlen(const char * s) 33extern size_t strlen(const char *s);
189{
190int d0;
191register int __res;
192__asm__ __volatile__(
193 "repne\n\t"
194 "scasb\n\t"
195 "notl %0\n\t"
196 "decl %0"
197 :"=c" (__res), "=&D" (d0)
198 :"1" (s),"a" (0), "0" (0xffffffffu)
199 :"memory");
200return __res;
201}
202 34
203static __always_inline void * __memcpy(void * to, const void * from, size_t n) 35static __always_inline void * __memcpy(void * to, const void * from, size_t n)
204{ 36{
@@ -207,9 +39,7 @@ __asm__ __volatile__(
207 "rep ; movsl\n\t" 39 "rep ; movsl\n\t"
208 "movl %4,%%ecx\n\t" 40 "movl %4,%%ecx\n\t"
209 "andl $3,%%ecx\n\t" 41 "andl $3,%%ecx\n\t"
210#if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */
211 "jz 1f\n\t" 42 "jz 1f\n\t"
212#endif
213 "rep ; movsb\n\t" 43 "rep ; movsb\n\t"
214 "1:" 44 "1:"
215 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 45 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
@@ -328,23 +158,7 @@ void *memmove(void * dest,const void * src, size_t n);
328#define memcmp __builtin_memcmp 158#define memcmp __builtin_memcmp
329 159
330#define __HAVE_ARCH_MEMCHR 160#define __HAVE_ARCH_MEMCHR
331static inline void * memchr(const void * cs,int c,size_t count) 161extern void *memchr(const void * cs,int c,size_t count);
332{
333int d0;
334register void * __res;
335if (!count)
336 return NULL;
337__asm__ __volatile__(
338 "repne\n\t"
339 "scasb\n\t"
340 "je 1f\n\t"
341 "movl $1,%0\n"
342 "1:\tdecl %0"
343 :"=D" (__res), "=&c" (d0)
344 :"a" (c),"0" (cs),"1" (count)
345 :"memory");
346return __res;
347}
348 162
349static inline void * __memset_generic(void * s, char c,size_t count) 163static inline void * __memset_generic(void * s, char c,size_t count)
350{ 164{
@@ -386,29 +200,10 @@ return (s);
386 200
387/* Added by Gertjan van Wingerde to make minix and sysv module work */ 201/* Added by Gertjan van Wingerde to make minix and sysv module work */
388#define __HAVE_ARCH_STRNLEN 202#define __HAVE_ARCH_STRNLEN
389static inline size_t strnlen(const char * s, size_t count) 203extern size_t strnlen(const char * s, size_t count);
390{
391int d0;
392register int __res;
393__asm__ __volatile__(
394 "movl %2,%0\n\t"
395 "jmp 2f\n"
396 "1:\tcmpb $0,(%0)\n\t"
397 "je 3f\n\t"
398 "incl %0\n"
399 "2:\tdecl %1\n\t"
400 "cmpl $-1,%1\n\t"
401 "jne 1b\n"
402 "3:\tsubl %2,%0"
403 :"=a" (__res), "=&d" (d0)
404 :"c" (s),"1" (count)
405 :"memory");
406return __res;
407}
408/* end of additional stuff */ 204/* end of additional stuff */
409 205
410#define __HAVE_ARCH_STRSTR 206#define __HAVE_ARCH_STRSTR
411
412extern char *strstr(const char *cs, const char *ct); 207extern char *strstr(const char *cs, const char *ct);
413 208
414/* 209/*
@@ -474,19 +269,7 @@ __asm__ __volatile__( \
474 * find the first occurrence of byte 'c', or 1 past the area if none 269 * find the first occurrence of byte 'c', or 1 past the area if none
475 */ 270 */
476#define __HAVE_ARCH_MEMSCAN 271#define __HAVE_ARCH_MEMSCAN
477static inline void * memscan(void * addr, int c, size_t size) 272extern void *memscan(void * addr, int c, size_t size);
478{
479 if (!size)
480 return addr;
481 __asm__("repnz; scasb\n\t"
482 "jnz 1f\n\t"
483 "dec %%edi\n"
484 "1:"
485 : "=D" (addr), "=c" (size)
486 : "0" (addr), "1" (size), "a" (c)
487 : "memory");
488 return addr;
489}
490 273
491#endif /* __KERNEL__ */ 274#endif /* __KERNEL__ */
492 275
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 51a713e33a9e..0db7e994fb8b 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -5,13 +5,11 @@
5 5
6#define TICK_SIZE (tick_nsec / 1000) 6#define TICK_SIZE (tick_nsec / 1000)
7 7
8void setup_pit_timer(void);
9unsigned long long native_sched_clock(void); 8unsigned long long native_sched_clock(void);
10unsigned long native_calculate_cpu_khz(void); 9unsigned long native_calculate_cpu_khz(void);
11 10
12extern int timer_ack; 11extern int timer_ack;
13extern int no_timer_check; 12extern int no_timer_check;
14extern int no_sync_cmos_clock;
15extern int recalibrate_cpu_khz(void); 13extern int recalibrate_cpu_khz(void);
16 14
17#ifndef CONFIG_PARAVIRT 15#ifndef CONFIG_PARAVIRT
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index fc525c5cd5a9..a50fa6741486 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -160,7 +160,11 @@ DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
160 native_flush_tlb_others(&mask, mm, va) 160 native_flush_tlb_others(&mask, mm, va)
161#endif 161#endif
162 162
163#define flush_tlb_kernel_range(start, end) flush_tlb_all() 163static inline void flush_tlb_kernel_range(unsigned long start,
164 unsigned long end)
165{
166 flush_tlb_all();
167}
164 168
165static inline void flush_tlb_pgtables(struct mm_struct *mm, 169static inline void flush_tlb_pgtables(struct mm_struct *mm,
166 unsigned long start, unsigned long end) 170 unsigned long start, unsigned long end)
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 7fc512d90ea8..19b2dafd0c81 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -67,7 +67,7 @@ static inline int node_to_first_cpu(int node)
67 return first_cpu(mask); 67 return first_cpu(mask);
68} 68}
69 69
70#define pcibus_to_node(bus) ((long) (bus)->sysdata) 70#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
71#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) 71#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus))
72 72
73/* sched_domains SD_NODE_INIT for NUMAQ machines */ 73/* sched_domains SD_NODE_INIT for NUMAQ machines */
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index e2aa5e0d0cc7..d2a4f7be9c2c 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -581,7 +581,7 @@ long __must_check __strncpy_from_user(char *dst,
581 * If there is a limit on the length of a valid string, you may wish to 581 * If there is a limit on the length of a valid string, you may wish to
582 * consider using strnlen_user() instead. 582 * consider using strnlen_user() instead.
583 */ 583 */
584#define strlen_user(str) strnlen_user(str, ~0UL >> 1) 584#define strlen_user(str) strnlen_user(str, LONG_MAX)
585 585
586long strnlen_user(const char __user *str, long n); 586long strnlen_user(const char __user *str, long n);
587unsigned long __must_check clear_user(void __user *mem, unsigned long len); 587unsigned long __must_check clear_user(void __user *mem, unsigned long len);
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h
index ff520ea97473..afb88a5973f0 100644
--- a/include/asm-sparc/irq.h
+++ b/include/asm-sparc/irq.h
@@ -7,178 +7,16 @@
7#ifndef _SPARC_IRQ_H 7#ifndef _SPARC_IRQ_H
8#define _SPARC_IRQ_H 8#define _SPARC_IRQ_H
9 9
10#include <linux/linkage.h>
11#include <linux/threads.h> /* For NR_CPUS */
12#include <linux/interrupt.h> 10#include <linux/interrupt.h>
13 11
14#include <asm/system.h> /* For SUN4M_NCPUS */
15#include <asm/btfixup.h>
16
17#define __irq_ino(irq) irq
18#define __irq_pil(irq) irq
19
20#define NR_IRQS 16 12#define NR_IRQS 16
21 13
22#define irq_canonicalize(irq) (irq) 14#define irq_canonicalize(irq) (irq)
23 15
24/* Dave Redman (djhr@tadpole.co.uk) 16extern void disable_irq_nosync(unsigned int irq);
25 * changed these to function pointers.. it saves cycles and will allow 17extern void disable_irq(unsigned int irq);
26 * the irq dependencies to be split into different files at a later date 18extern void enable_irq(unsigned int irq);
27 * sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
28 * Jakub Jelinek (jj@sunsite.mff.cuni.cz)
29 * Changed these to btfixup entities... It saves cycles :)
30 */
31BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
32BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
33BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
34BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
35BTFIXUPDEF_CALL(void, clear_clock_irq, void)
36BTFIXUPDEF_CALL(void, clear_profile_irq, int)
37BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
38
39static inline void disable_irq_nosync(unsigned int irq)
40{
41 BTFIXUP_CALL(disable_irq)(irq);
42}
43
44static inline void disable_irq(unsigned int irq)
45{
46 BTFIXUP_CALL(disable_irq)(irq);
47}
48
49static inline void enable_irq(unsigned int irq)
50{
51 BTFIXUP_CALL(enable_irq)(irq);
52}
53
54static inline void disable_pil_irq(unsigned int irq)
55{
56 BTFIXUP_CALL(disable_pil_irq)(irq);
57}
58
59static inline void enable_pil_irq(unsigned int irq)
60{
61 BTFIXUP_CALL(enable_pil_irq)(irq);
62}
63
64static inline void clear_clock_irq(void)
65{
66 BTFIXUP_CALL(clear_clock_irq)();
67}
68
69static inline void clear_profile_irq(int irq)
70{
71 BTFIXUP_CALL(clear_profile_irq)(irq);
72}
73
74static inline void load_profile_irq(int cpu, int limit)
75{
76 BTFIXUP_CALL(load_profile_irq)(cpu, limit);
77}
78
79extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
80extern void claim_ticker14(irq_handler_t irq_handler,
81 int irq,
82 unsigned int timeout);
83
84#ifdef CONFIG_SMP
85BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
86BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
87BTFIXUPDEF_CALL(void, set_irq_udt, int)
88
89#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
90#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
91#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
92#endif
93 19
94extern int request_fast_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, __const__ char *devname); 20extern int request_fast_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, __const__ char *devname);
95 21
96/* On the sun4m, just like the timers, we have both per-cpu and master
97 * interrupt registers.
98 */
99
100/* These registers are used for sending/receiving irqs from/to
101 * different cpu's.
102 */
103struct sun4m_intreg_percpu {
104 unsigned int tbt; /* Interrupts still pending for this cpu. */
105
106 /* These next two registers are WRITE-ONLY and are only
107 * "on bit" sensitive, "off bits" written have NO affect.
108 */
109 unsigned int clear; /* Clear this cpus irqs here. */
110 unsigned int set; /* Set this cpus irqs here. */
111 unsigned char space[PAGE_SIZE - 12];
112};
113
114/*
115 * djhr
116 * Actually the clear and set fields in this struct are misleading..
117 * according to the SLAVIO manual (and the same applies for the SEC)
118 * the clear field clears bits in the mask which will ENABLE that IRQ
119 * the set field sets bits in the mask to DISABLE the IRQ.
120 *
121 * Also the undirected_xx address in the SLAVIO is defined as
122 * RESERVED and write only..
123 *
124 * DAVEM_NOTE: The SLAVIO only specifies behavior on uniprocessor
125 * sun4m machines, for MP the layout makes more sense.
126 */
127struct sun4m_intregs {
128 struct sun4m_intreg_percpu cpu_intregs[SUN4M_NCPUS];
129 unsigned int tbt; /* IRQ's that are still pending. */
130 unsigned int irqs; /* Master IRQ bits. */
131
132 /* Again, like the above, two these registers are WRITE-ONLY. */
133 unsigned int clear; /* Clear master IRQ's by setting bits here. */
134 unsigned int set; /* Set master IRQ's by setting bits here. */
135
136 /* This register is both READ and WRITE. */
137 unsigned int undirected_target; /* Which cpu gets undirected irqs. */
138};
139
140extern struct sun4m_intregs *sun4m_interrupts;
141
142/*
143 * Bit field defines for the interrupt registers on various
144 * Sparc machines.
145 */
146
147/* The sun4c interrupt register. */
148#define SUN4C_INT_ENABLE 0x01 /* Allow interrupts. */
149#define SUN4C_INT_E14 0x80 /* Enable level 14 IRQ. */
150#define SUN4C_INT_E10 0x20 /* Enable level 10 IRQ. */
151#define SUN4C_INT_E8 0x10 /* Enable level 8 IRQ. */
152#define SUN4C_INT_E6 0x08 /* Enable level 6 IRQ. */
153#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
154#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
155
156/* Dave Redman (djhr@tadpole.co.uk)
157 * The sun4m interrupt registers.
158 */
159#define SUN4M_INT_ENABLE 0x80000000
160#define SUN4M_INT_E14 0x00000080
161#define SUN4M_INT_E10 0x00080000
162
163#define SUN4M_HARD_INT(x) (0x000000001 << (x))
164#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
165
166#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
167#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
168#define SUN4M_INT_M2S_WRITE 0x20000000 /* write buffer error */
169#define SUN4M_INT_ECC 0x10000000 /* ecc memory error */
170#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
171#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
172#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
173#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
174#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
175#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
176#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
177#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
178#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
179#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
180
181#define SUN4M_INT_SBUS(x) (1 << (x+7))
182#define SUN4M_INT_VME(x) (1 << (x))
183
184#endif 22#endif
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index a55f4c3488b0..2cc235b74d94 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -46,7 +46,6 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
46#define pgd_ERROR(e) __builtin_trap() 46#define pgd_ERROR(e) __builtin_trap()
47 47
48BTFIXUPDEF_INT(page_none) 48BTFIXUPDEF_INT(page_none)
49BTFIXUPDEF_INT(page_shared)
50BTFIXUPDEF_INT(page_copy) 49BTFIXUPDEF_INT(page_copy)
51BTFIXUPDEF_INT(page_readonly) 50BTFIXUPDEF_INT(page_readonly)
52BTFIXUPDEF_INT(page_kernel) 51BTFIXUPDEF_INT(page_kernel)
@@ -66,7 +65,7 @@ BTFIXUPDEF_INT(page_kernel)
66#define PTE_SIZE (PTRS_PER_PTE*4) 65#define PTE_SIZE (PTRS_PER_PTE*4)
67 66
68#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none)) 67#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
69#define PAGE_SHARED __pgprot(BTFIXUP_INT(page_shared)) 68extern pgprot_t PAGE_SHARED;
70#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) 69#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
71#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) 70#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
72 71
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index a29f05087a31..1da8f49c0fe2 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -29,6 +29,7 @@
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30 30
31#include <acpi/pdc_intel.h> 31#include <acpi/pdc_intel.h>
32#include <asm/numa.h>
32 33
33#define COMPILER_DEPENDENT_INT64 long long 34#define COMPILER_DEPENDENT_INT64 long long
34#define COMPILER_DEPENDENT_UINT64 unsigned long long 35#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -141,6 +142,16 @@ extern int acpi_pci_disabled;
141extern int acpi_skip_timer_override; 142extern int acpi_skip_timer_override;
142extern int acpi_use_timer_override; 143extern int acpi_use_timer_override;
143 144
145#ifdef CONFIG_ACPI_NUMA
146extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes,
147 int num_nodes);
148#else
149static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
150 int num_nodes)
151{
152}
153#endif
154
144#endif /*__KERNEL__*/ 155#endif /*__KERNEL__*/
145 156
146#endif /*_ASM_ACPI_H*/ 157#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 45e9fca1febc..85125ef3c414 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -83,8 +83,10 @@ extern void disable_APIC_timer(void);
83extern void enable_APIC_timer(void); 83extern void enable_APIC_timer(void);
84extern void setup_apic_routing(void); 84extern void setup_apic_routing(void);
85 85
86extern void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector, 86extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
87 unsigned char msg_type, unsigned char mask); 87 unsigned char msg_type, unsigned char mask);
88
89extern int apic_is_clustered_box(void);
88 90
89#define K8_APIC_EXT_LVT_BASE 0x500 91#define K8_APIC_EXT_LVT_BASE 0x500
90#define K8_APIC_EXT_INT_MSG_FIX 0x0 92#define K8_APIC_EXT_INT_MSG_FIX 0x0
diff --git a/include/asm-x86_64/auxvec.h b/include/asm-x86_64/auxvec.h
index 2403c4cfced2..1d5ab0d03950 100644
--- a/include/asm-x86_64/auxvec.h
+++ b/include/asm-x86_64/auxvec.h
@@ -1,4 +1,6 @@
1#ifndef __ASM_X86_64_AUXVEC_H 1#ifndef __ASM_X86_64_AUXVEC_H
2#define __ASM_X86_64_AUXVEC_H 2#define __ASM_X86_64_AUXVEC_H
3 3
4#define AT_SYSINFO_EHDR 33
5
4#endif 6#endif
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index 4d5747a0923c..67f60406e2d8 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Derived from include/asm-powerpc/iommu.h 2 * Derived from include/asm-powerpc/iommu.h
3 * 3 *
4 * Copyright (C) IBM Corporation, 2006 4 * Copyright IBM Corporation, 2006-2007
5 * 5 *
6 * Author: Jon Mason <jdmason@us.ibm.com> 6 * Author: Jon Mason <jdmason@us.ibm.com>
7 * Author: Muli Ben-Yehuda <muli@il.ibm.com> 7 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
@@ -31,6 +31,7 @@
31#include <asm/types.h> 31#include <asm/types.h>
32 32
33struct iommu_table { 33struct iommu_table {
34 struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
34 unsigned long it_base; /* mapped address of tce table */ 35 unsigned long it_base; /* mapped address of tce table */
35 unsigned long it_hint; /* Hint for next alloc */ 36 unsigned long it_hint; /* Hint for next alloc */
36 unsigned long *it_map; /* A simple allocation bitmap for now */ 37 unsigned long *it_map; /* A simple allocation bitmap for now */
@@ -42,6 +43,12 @@ struct iommu_table {
42 unsigned char it_busno; /* Bus number this table belongs to */ 43 unsigned char it_busno; /* Bus number this table belongs to */
43}; 44};
44 45
46struct cal_chipset_ops {
47 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
48 void (*tce_cache_blast)(struct iommu_table *tbl);
49 void (*dump_error_regs)(struct iommu_table *tbl);
50};
51
45#define TCE_TABLE_SIZE_UNSPECIFIED ~0 52#define TCE_TABLE_SIZE_UNSPECIFIED ~0
46#define TCE_TABLE_SIZE_64K 0 53#define TCE_TABLE_SIZE_64K 0
47#define TCE_TABLE_SIZE_128K 1 54#define TCE_TABLE_SIZE_128K 1
diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h
index 93b2b15d4325..d02e32e3c3f0 100644
--- a/include/asm-x86_64/dmi.h
+++ b/include/asm-x86_64/dmi.h
@@ -3,15 +3,12 @@
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
6extern void *dmi_ioremap(unsigned long addr, unsigned long size);
7extern void dmi_iounmap(void *addr, unsigned long size);
8
9#define DMI_MAX_DATA 2048 6#define DMI_MAX_DATA 2048
10 7
11extern int dmi_alloc_index; 8extern int dmi_alloc_index;
12extern char dmi_alloc_data[DMI_MAX_DATA]; 9extern char dmi_alloc_data[DMI_MAX_DATA];
13 10
14/* This is so early that there is no good way to allocate dynamic memory. 11/* This is so early that there is no good way to allocate dynamic memory.
15 Allocate data in an BSS array. */ 12 Allocate data in an BSS array. */
16static inline void *dmi_alloc(unsigned len) 13static inline void *dmi_alloc(unsigned len)
17{ 14{
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index 6d24ea7c4d9d..b4fbe47f6ccd 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -162,6 +162,19 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
162/* 1GB for 64bit, 8MB for 32bit */ 162/* 1GB for 64bit, 8MB for 32bit */
163#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 163#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
164 164
165
166#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
167struct linux_binprm;
168extern int arch_setup_additional_pages(struct linux_binprm *bprm,
169 int executable_stack);
170
171extern int vdso_enabled;
172
173#define ARCH_DLINFO \
174do if (vdso_enabled) { \
175 NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
176} while (0)
177
165#endif 178#endif
166 179
167#endif 180#endif
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index 2acb9b7f6418..cdfbe4a6ae6f 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -22,9 +22,9 @@
22 * compile time, but to set the physical address only 22 * compile time, but to set the physical address only
23 * in the boot process. 23 * in the boot process.
24 * 24 *
25 * these 'compile-time allocated' memory buffers are 25 * These 'compile-time allocated' memory buffers are
26 * fixed-size 4k pages. (or larger if used with an increment 26 * fixed-size 4k pages (or larger if used with an increment
27 * highger than 1) use fixmap_set(idx,phys) to associate 27 * higher than 1). Use set_fixmap(idx,phys) to associate
28 * physical memory with fixmap indices. 28 * physical memory with fixmap indices.
29 * 29 *
30 * TLB entries of such buffers will not be flushed across 30 * TLB entries of such buffers will not be flushed across
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index 59a66f084611..79bb950f82c5 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -1,78 +1,18 @@
1#ifndef _ASM_X8664_HPET_H 1#ifndef _ASM_X8664_HPET_H
2#define _ASM_X8664_HPET_H 1 2#define _ASM_X8664_HPET_H 1
3 3
4/* 4#include <asm-i386/hpet.h>
5 * Documentation on HPET can be found at:
6 * http://www.intel.com/ial/home/sp/pcmmspec.htm
7 * ftp://download.intel.com/ial/home/sp/mmts098.pdf
8 */
9
10#define HPET_MMAP_SIZE 1024
11
12#define HPET_ID 0x000
13#define HPET_PERIOD 0x004
14#define HPET_CFG 0x010
15#define HPET_STATUS 0x020
16#define HPET_COUNTER 0x0f0
17#define HPET_Tn_OFFSET 0x20
18#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET)
19#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET)
20#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET)
21#define HPET_T0_CFG HPET_Tn_CFG(0)
22#define HPET_T0_CMP HPET_Tn_CMP(0)
23#define HPET_T1_CFG HPET_Tn_CFG(1)
24#define HPET_T1_CMP HPET_Tn_CMP(1)
25
26#define HPET_ID_VENDOR 0xffff0000
27#define HPET_ID_LEGSUP 0x00008000
28#define HPET_ID_64BIT 0x00002000
29#define HPET_ID_NUMBER 0x00001f00
30#define HPET_ID_REV 0x000000ff
31#define HPET_ID_NUMBER_SHIFT 8
32
33#define HPET_ID_VENDOR_SHIFT 16
34#define HPET_ID_VENDOR_8086 0x8086
35
36#define HPET_CFG_ENABLE 0x001
37#define HPET_CFG_LEGACY 0x002
38#define HPET_LEGACY_8254 2
39#define HPET_LEGACY_RTC 8
40
41#define HPET_TN_LEVEL 0x0002
42#define HPET_TN_ENABLE 0x0004
43#define HPET_TN_PERIODIC 0x0008
44#define HPET_TN_PERIODIC_CAP 0x0010
45#define HPET_TN_64BIT_CAP 0x0020
46#define HPET_TN_SETVAL 0x0040
47#define HPET_TN_32BIT 0x0100
48#define HPET_TN_ROUTE 0x3e00
49#define HPET_TN_FSB 0x4000
50#define HPET_TN_FSB_CAP 0x8000
51
52#define HPET_TN_ROUTE_SHIFT 9
53 5
54#define HPET_TICK_RATE (HZ * 100000UL) 6#define HPET_TICK_RATE (HZ * 100000UL)
55 7
56extern int is_hpet_enabled(void);
57extern int hpet_rtc_timer_init(void); 8extern int hpet_rtc_timer_init(void);
58extern int apic_is_clustered_box(void);
59extern int hpet_arch_init(void); 9extern int hpet_arch_init(void);
60extern int hpet_timer_stop_set_go(unsigned long tick); 10extern int hpet_timer_stop_set_go(unsigned long tick);
61extern int hpet_reenable(void); 11extern int hpet_reenable(void);
62extern unsigned int hpet_calibrate_tsc(void); 12extern unsigned int hpet_calibrate_tsc(void);
63 13
64extern int hpet_use_timer; 14extern int hpet_use_timer;
65extern unsigned long hpet_address;
66extern unsigned long hpet_period; 15extern unsigned long hpet_period;
67extern unsigned long hpet_tick; 16extern unsigned long hpet_tick;
68 17
69#ifdef CONFIG_HPET_EMULATE_RTC
70extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
71extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
72extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec);
73extern int hpet_set_periodic_freq(unsigned long freq);
74extern int hpet_rtc_dropped_irq(void);
75extern int hpet_rtc_timer_init(void);
76#endif /* CONFIG_HPET_EMULATE_RTC */
77
78#endif 18#endif
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 6153ae5df2e8..09dfc18a6dd0 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -95,6 +95,26 @@
95 95
96 96
97#ifndef __ASSEMBLY__ 97#ifndef __ASSEMBLY__
98
99/* Interrupt handlers registered during init_IRQ */
100void apic_timer_interrupt(void);
101void spurious_interrupt(void);
102void error_interrupt(void);
103void reschedule_interrupt(void);
104void call_function_interrupt(void);
105void irq_move_cleanup_interrupt(void);
106void invalidate_interrupt0(void);
107void invalidate_interrupt1(void);
108void invalidate_interrupt2(void);
109void invalidate_interrupt3(void);
110void invalidate_interrupt4(void);
111void invalidate_interrupt5(void);
112void invalidate_interrupt6(void);
113void invalidate_interrupt7(void);
114void thermal_interrupt(void);
115void threshold_interrupt(void);
116void i8254_timer_resume(void);
117
98typedef int vector_irq_t[NR_VECTORS]; 118typedef int vector_irq_t[NR_VECTORS];
99DECLARE_PER_CPU(vector_irq_t, vector_irq); 119DECLARE_PER_CPU(vector_irq_t, vector_irq);
100extern void __setup_vector_irq(int cpu); 120extern void __setup_vector_irq(int cpu);
diff --git a/include/asm-x86_64/i8253.h b/include/asm-x86_64/i8253.h
new file mode 100644
index 000000000000..015d8df07690
--- /dev/null
+++ b/include/asm-x86_64/i8253.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_I8253_H__
2#define __ASM_I8253_H__
3
4extern spinlock_t i8253_lock;
5
6#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-x86_64/iommu.h b/include/asm-x86_64/iommu.h
new file mode 100644
index 000000000000..5af471f228ee
--- /dev/null
+++ b/include/asm-x86_64/iommu.h
@@ -0,0 +1,29 @@
1#ifndef _ASM_X8664_IOMMU_H
2#define _ASM_X8664_IOMMU_H 1
3
4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void);
6extern int force_iommu, no_iommu;
7extern int iommu_detected;
8#ifdef CONFIG_IOMMU
9extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *);
12extern void iommu_hole_init(void);
13extern int fallback_aper_order;
14extern int fallback_aper_force;
15extern int iommu_aperture;
16extern int iommu_aperture_allowed;
17extern int iommu_aperture_disabled;
18extern int fix_aperture;
19#else
20#define iommu_aperture 0
21#define iommu_aperture_allowed 0
22
23static inline void gart_iommu_shutdown(void)
24{
25}
26
27#endif
28
29#endif
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 177e92b4019b..556be5563e30 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -105,6 +105,8 @@ extern atomic_t mce_entry;
105 105
106extern void do_machine_check(struct pt_regs *, long); 106extern void do_machine_check(struct pt_regs *, long);
107 107
108extern int mce_notify_user(void);
109
108#endif 110#endif
109 111
110#endif 112#endif
diff --git a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h
index 5dc6ed79859a..d2cd4a9d984d 100644
--- a/include/asm-x86_64/mmu.h
+++ b/include/asm-x86_64/mmu.h
@@ -15,6 +15,7 @@ typedef struct {
15 rwlock_t ldtlock; 15 rwlock_t ldtlock;
16 int size; 16 int size;
17 struct semaphore sem; 17 struct semaphore sem;
18 void *vdso;
18} mm_context_t; 19} mm_context_t;
19 20
20#endif 21#endif
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index bda94fd5176f..88926eb44f5c 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -5,6 +5,25 @@
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8struct pci_sysdata {
9 int node; /* NUMA node */
10 void* iommu; /* IOMMU private data */
11};
12
13#ifdef CONFIG_CALGARY_IOMMU
14static inline void* pci_iommu(struct pci_bus *bus)
15{
16 struct pci_sysdata *sd = bus->sysdata;
17 return sd->iommu;
18}
19
20static inline void set_pci_iommu(struct pci_bus *bus, void *val)
21{
22 struct pci_sysdata *sd = bus->sysdata;
23 sd->iommu = val;
24}
25#endif /* CONFIG_CALGARY_IOMMU */
26
8#include <linux/mm.h> /* for struct page */ 27#include <linux/mm.h> /* for struct page */
9 28
10/* Can be used to override the logic in pci_scan_bus for skipping 29/* Can be used to override the logic in pci_scan_bus for skipping
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 8bb564687860..b467be6d367f 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -4,6 +4,10 @@
4#include <asm/pda.h> 4#include <asm/pda.h>
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/quicklist.h>
8
9#define QUICK_PGD 0 /* We preserve special mappings over free */
10#define QUICK_PT 1 /* Other page table pages that are zero on free */
7 11
8#define pmd_populate_kernel(mm, pmd, pte) \ 12#define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) 13 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
@@ -20,23 +24,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
20static inline void pmd_free(pmd_t *pmd) 24static inline void pmd_free(pmd_t *pmd)
21{ 25{
22 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 26 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
23 free_page((unsigned long)pmd); 27 quicklist_free(QUICK_PT, NULL, pmd);
24} 28}
25 29
26static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) 30static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
27{ 31{
28 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 32 return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
29} 33}
30 34
31static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 35static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
32{ 36{
33 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 37 return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
34} 38}
35 39
36static inline void pud_free (pud_t *pud) 40static inline void pud_free (pud_t *pud)
37{ 41{
38 BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); 42 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
39 free_page((unsigned long)pud); 43 quicklist_free(QUICK_PT, NULL, pud);
40} 44}
41 45
42static inline void pgd_list_add(pgd_t *pgd) 46static inline void pgd_list_add(pgd_t *pgd)
@@ -57,41 +61,57 @@ static inline void pgd_list_del(pgd_t *pgd)
57 spin_unlock(&pgd_lock); 61 spin_unlock(&pgd_lock);
58} 62}
59 63
60static inline pgd_t *pgd_alloc(struct mm_struct *mm) 64static inline void pgd_ctor(void *x)
61{ 65{
62 unsigned boundary; 66 unsigned boundary;
63 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 67 pgd_t *pgd = x;
64 if (!pgd) 68 struct page *page = virt_to_page(pgd);
65 return NULL; 69
66 pgd_list_add(pgd);
67 /* 70 /*
68 * Copy kernel pointers in from init. 71 * Copy kernel pointers in from init.
69 * Could keep a freelist or slab cache of those because the kernel
70 * part never changes.
71 */ 72 */
72 boundary = pgd_index(__PAGE_OFFSET); 73 boundary = pgd_index(__PAGE_OFFSET);
73 memset(pgd, 0, boundary * sizeof(pgd_t));
74 memcpy(pgd + boundary, 74 memcpy(pgd + boundary,
75 init_level4_pgt + boundary, 75 init_level4_pgt + boundary,
76 (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); 76 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
77
78 spin_lock(&pgd_lock);
79 list_add(&page->lru, &pgd_list);
80 spin_unlock(&pgd_lock);
81}
82
83static inline void pgd_dtor(void *x)
84{
85 pgd_t *pgd = x;
86 struct page *page = virt_to_page(pgd);
87
88 spin_lock(&pgd_lock);
89 list_del(&page->lru);
90 spin_unlock(&pgd_lock);
91}
92
93static inline pgd_t *pgd_alloc(struct mm_struct *mm)
94{
95 pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD,
96 GFP_KERNEL|__GFP_REPEAT, pgd_ctor);
77 return pgd; 97 return pgd;
78} 98}
79 99
80static inline void pgd_free(pgd_t *pgd) 100static inline void pgd_free(pgd_t *pgd)
81{ 101{
82 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); 102 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
83 pgd_list_del(pgd); 103 quicklist_free(QUICK_PGD, pgd_dtor, pgd);
84 free_page((unsigned long)pgd);
85} 104}
86 105
87static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 106static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
88{ 107{
89 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 108 return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
90} 109}
91 110
92static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 111static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
93{ 112{
94 void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 113 void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
114
95 if (!p) 115 if (!p)
96 return NULL; 116 return NULL;
97 return virt_to_page(p); 117 return virt_to_page(p);
@@ -103,17 +123,22 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
103static inline void pte_free_kernel(pte_t *pte) 123static inline void pte_free_kernel(pte_t *pte)
104{ 124{
105 BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); 125 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
106 free_page((unsigned long)pte); 126 quicklist_free(QUICK_PT, NULL, pte);
107} 127}
108 128
109static inline void pte_free(struct page *pte) 129static inline void pte_free(struct page *pte)
110{ 130{
111 __free_page(pte); 131 quicklist_free_page(QUICK_PT, NULL, pte);
112} 132}
113 133
114#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 134#define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte))
115 135
116#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 136#define __pmd_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
117#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 137#define __pud_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
118 138
139static inline void check_pgt_cache(void)
140{
141 quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16);
142 quicklist_trim(QUICK_PT, NULL, 25, 16);
143}
119#endif /* _X86_64_PGALLOC_H */ 144#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 3ba53099297d..60cff1e4f7a3 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -409,7 +409,6 @@ extern int kern_addr_valid(unsigned long addr);
409#define HAVE_ARCH_UNMAPPED_AREA 409#define HAVE_ARCH_UNMAPPED_AREA
410 410
411#define pgtable_cache_init() do { } while (0) 411#define pgtable_cache_init() do { } while (0)
412#define check_pgt_cache() do { } while (0)
413 412
414#define PAGE_AGP PAGE_KERNEL_NOCACHE 413#define PAGE_AGP PAGE_KERNEL_NOCACHE
415#define HAVE_PAGE_AGP 1 414#define HAVE_PAGE_AGP 1
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index efc87a5aff7f..a1645bbc03bd 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -83,7 +83,6 @@ struct cpuinfo_x86 {
83#define X86_VENDOR_UMC 3 83#define X86_VENDOR_UMC 3
84#define X86_VENDOR_NEXGEN 4 84#define X86_VENDOR_NEXGEN 4
85#define X86_VENDOR_CENTAUR 5 85#define X86_VENDOR_CENTAUR 5
86#define X86_VENDOR_RISE 6
87#define X86_VENDOR_TRANSMETA 7 86#define X86_VENDOR_TRANSMETA 7
88#define X86_VENDOR_NUM 8 87#define X86_VENDOR_NUM 8
89#define X86_VENDOR_UNKNOWN 0xff 88#define X86_VENDOR_UNKNOWN 0xff
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 85255db1e82d..d6e3225549c0 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -85,24 +85,6 @@ extern int exception_trace;
85extern unsigned cpu_khz; 85extern unsigned cpu_khz;
86extern unsigned tsc_khz; 86extern unsigned tsc_khz;
87 87
88extern void no_iommu_init(void);
89extern int force_iommu, no_iommu;
90extern int iommu_detected;
91#ifdef CONFIG_IOMMU
92extern void gart_iommu_init(void);
93extern void __init gart_parse_options(char *);
94extern void iommu_hole_init(void);
95extern int fallback_aper_order;
96extern int fallback_aper_force;
97extern int iommu_aperture;
98extern int iommu_aperture_allowed;
99extern int iommu_aperture_disabled;
100extern int fix_aperture;
101#else
102#define iommu_aperture 0
103#define iommu_aperture_allowed 0
104#endif
105
106extern int reboot_force; 88extern int reboot_force;
107extern int notsc_setup(char *); 89extern int notsc_setup(char *);
108 90
diff --git a/include/asm-x86_64/ptrace.h b/include/asm-x86_64/ptrace.h
index 5ea84dbb1e9c..7f166ccb0606 100644
--- a/include/asm-x86_64/ptrace.h
+++ b/include/asm-x86_64/ptrace.h
@@ -1,6 +1,7 @@
1#ifndef _X86_64_PTRACE_H 1#ifndef _X86_64_PTRACE_H
2#define _X86_64_PTRACE_H 2#define _X86_64_PTRACE_H
3 3
4#include <linux/compiler.h> /* For __user */
4#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
5 6
6#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/resume-trace.h b/include/asm-x86_64/resume-trace.h
new file mode 100644
index 000000000000..34bf998fdf62
--- /dev/null
+++ b/include/asm-x86_64/resume-trace.h
@@ -0,0 +1,13 @@
1#define TRACE_RESUME(user) do { \
2 if (pm_trace_enabled) { \
3 void *tracedata; \
4 asm volatile("movq $1f,%0\n" \
5 ".section .tracedata,\"a\"\n" \
6 "1:\t.word %c1\n" \
7 "\t.quad %c2\n" \
8 ".previous" \
9 :"=r" (tracedata) \
10 : "i" (__LINE__), "i" (__FILE__)); \
11 generate_resume_trace(tracedata, user); \
12 } \
13} while (0)
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index 9505d9f4bead..e583da7918fb 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -29,6 +29,9 @@ return (to);
29 function. */ 29 function. */
30 30
31#define __HAVE_ARCH_MEMCPY 1 31#define __HAVE_ARCH_MEMCPY 1
32#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
33extern void *memcpy(void *to, const void *from, size_t len);
34#else
32extern void *__memcpy(void *to, const void *from, size_t len); 35extern void *__memcpy(void *to, const void *from, size_t len);
33#define memcpy(dst,src,len) \ 36#define memcpy(dst,src,len) \
34 ({ size_t __len = (len); \ 37 ({ size_t __len = (len); \
@@ -38,7 +41,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
38 else \ 41 else \
39 __ret = __builtin_memcpy((dst),(src),__len); \ 42 __ret = __builtin_memcpy((dst),(src),__len); \
40 __ret; }) 43 __ret; })
41 44#endif
42 45
43#define __HAVE_ARCH_MEMSET 46#define __HAVE_ARCH_MEMSET
44void *memset(void *s, int c, size_t n); 47void *memset(void *s, int c, size_t n);
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index e4f246d62c46..6313d33a0686 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -109,7 +109,7 @@ static inline void write_cr4(unsigned long val)
109#define stts() write_cr0(8 | read_cr0()) 109#define stts() write_cr0(8 | read_cr0())
110 110
111#define wbinvd() \ 111#define wbinvd() \
112 __asm__ __volatile__ ("wbinvd": : :"memory"); 112 __asm__ __volatile__ ("wbinvd": : :"memory")
113 113
114#endif /* __KERNEL__ */ 114#endif /* __KERNEL__ */
115 115
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 10bb5a8ed688..33c72ef15a0c 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -115,6 +115,7 @@ static inline struct thread_info *stack_thread_info(void)
115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 115#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
116#define TIF_SECCOMP 8 /* secure computing */ 116#define TIF_SECCOMP 8 /* secure computing */
117#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ 117#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
118#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
118/* 16 free */ 119/* 16 free */
119#define TIF_IA32 17 /* 32bit process */ 120#define TIF_IA32 17 /* 32bit process */
120#define TIF_FORK 18 /* ret_from_fork */ 121#define TIF_FORK 18 /* ret_from_fork */
@@ -133,6 +134,7 @@ static inline struct thread_info *stack_thread_info(void)
133#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 134#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
134#define _TIF_SECCOMP (1<<TIF_SECCOMP) 135#define _TIF_SECCOMP (1<<TIF_SECCOMP)
135#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 136#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
137#define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY)
136#define _TIF_IA32 (1<<TIF_IA32) 138#define _TIF_IA32 (1<<TIF_IA32)
137#define _TIF_FORK (1<<TIF_FORK) 139#define _TIF_FORK (1<<TIF_FORK)
138#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 140#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index f6527e1b6c1c..6ed21f44d308 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -9,7 +9,6 @@
9#include <asm/8253pit.h> 9#include <asm/8253pit.h>
10#include <asm/msr.h> 10#include <asm/msr.h>
11#include <asm/vsyscall.h> 11#include <asm/vsyscall.h>
12#include <asm/hpet.h>
13#include <asm/system.h> 12#include <asm/system.h>
14#include <asm/processor.h> 13#include <asm/processor.h>
15#include <asm/tsc.h> 14#include <asm/tsc.h>
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 8516225a8389..888eb4abdd07 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -92,7 +92,11 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st
92 92
93#endif 93#endif
94 94
95#define flush_tlb_kernel_range(start, end) flush_tlb_all() 95static inline void flush_tlb_kernel_range(unsigned long start,
96 unsigned long end)
97{
98 flush_tlb_all();
99}
96 100
97static inline void flush_tlb_pgtables(struct mm_struct *mm, 101static inline void flush_tlb_pgtables(struct mm_struct *mm,
98 unsigned long start, unsigned long end) 102 unsigned long start, unsigned long end)
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 4fd6fb23953e..36e52fba7960 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -22,7 +22,7 @@ extern int __node_distance(int, int);
22#define parent_node(node) (node) 22#define parent_node(node) (node)
23#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node])) 23#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node]))
24#define node_to_cpumask(node) (node_to_cpumask[node]) 24#define node_to_cpumask(node) (node_to_cpumask[node])
25#define pcibus_to_node(bus) ((long)(bus->sysdata)) 25#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
26#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); 26#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
27 27
28#define numa_node_id() read_pda(nodenumber) 28#define numa_node_id() read_pda(nodenumber)
diff --git a/include/asm-x86_64/vgtod.h b/include/asm-x86_64/vgtod.h
new file mode 100644
index 000000000000..3301f0929342
--- /dev/null
+++ b/include/asm-x86_64/vgtod.h
@@ -0,0 +1,29 @@
1#ifndef _ASM_VGTOD_H
2#define _ASM_VGTOD_H 1
3
4#include <asm/vsyscall.h>
5#include <linux/clocksource.h>
6
7struct vsyscall_gtod_data {
8 seqlock_t lock;
9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 int sysctl_enabled;
15 struct timezone sys_tz;
16 struct { /* extract of a clocksource struct */
17 cycle_t (*vread)(void);
18 cycle_t cycle_last;
19 cycle_t mask;
20 u32 mult;
21 u32 shift;
22 } clock;
23 struct timespec wall_to_monotonic;
24};
25extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data;
27extern struct vsyscall_gtod_data vsyscall_gtod_data;
28
29#endif
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 82b4afe65c91..3b8ceb4af2cf 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -22,6 +22,8 @@ enum vsyscall_num {
22/* Definitions for CONFIG_GENERIC_TIME definitions */ 22/* Definitions for CONFIG_GENERIC_TIME definitions */
23#define __section_vsyscall_gtod_data __attribute__ \ 23#define __section_vsyscall_gtod_data __attribute__ \
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16))) 24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
25#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn"))) 27#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
26 28
27#define VGETCPU_RDTSCP 1 29#define VGETCPU_RDTSCP 1
@@ -36,7 +38,6 @@ extern volatile unsigned long __jiffies;
36/* kernel space (writeable) */ 38/* kernel space (writeable) */
37extern int vgetcpu_mode; 39extern int vgetcpu_mode;
38extern struct timezone sys_tz; 40extern struct timezone sys_tz;
39extern struct vsyscall_gtod_data_t vsyscall_gtod_data;
40 41
41#endif /* __KERNEL__ */ 42#endif /* __KERNEL__ */
42 43
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dc234c508a6f..d5680cd7746a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -88,10 +88,8 @@ int acpi_table_parse (char *id, acpi_table_handler handler);
88int __init acpi_table_parse_entries(char *id, unsigned long table_size, 88int __init acpi_table_parse_entries(char *id, unsigned long table_size,
89 int entry_id, acpi_table_entry_handler handler, unsigned int max_entries); 89 int entry_id, acpi_table_entry_handler handler, unsigned int max_entries);
90int acpi_table_parse_madt (enum acpi_madt_type id, acpi_table_entry_handler handler, unsigned int max_entries); 90int acpi_table_parse_madt (enum acpi_madt_type id, acpi_table_entry_handler handler, unsigned int max_entries);
91int acpi_table_parse_srat (enum acpi_srat_type id, acpi_table_entry_handler handler, unsigned int max_entries);
92int acpi_parse_mcfg (struct acpi_table_header *header); 91int acpi_parse_mcfg (struct acpi_table_header *header);
93void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); 92void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
94void acpi_table_print_srat_entry (struct acpi_subtable_header *srat);
95 93
96/* the following four functions are architecture-dependent */ 94/* the following four functions are architecture-dependent */
97#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT 95#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
@@ -233,6 +231,9 @@ extern int acpi_paddr_to_node(u64 start_addr, u64 size);
233 231
234extern int pnpacpi_disabled; 232extern int pnpacpi_disabled;
235 233
234#define PXM_INVAL (-1)
235#define NID_INVAL (-1)
236
236#else /* CONFIG_ACPI */ 237#else /* CONFIG_ACPI */
237 238
238static inline int acpi_boot_init(void) 239static inline int acpi_boot_init(void)
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 8486e78f7335..e0bd46eb2414 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -23,6 +23,7 @@ enum clock_event_mode {
23 CLOCK_EVT_MODE_SHUTDOWN, 23 CLOCK_EVT_MODE_SHUTDOWN,
24 CLOCK_EVT_MODE_PERIODIC, 24 CLOCK_EVT_MODE_PERIODIC,
25 CLOCK_EVT_MODE_ONESHOT, 25 CLOCK_EVT_MODE_ONESHOT,
26 CLOCK_EVT_MODE_RESUME,
26}; 27};
27 28
28/* Clock event notification values */ 29/* Clock event notification values */
@@ -119,10 +120,6 @@ extern void clockevents_register_device(struct clock_event_device *dev);
119 120
120extern void clockevents_exchange_device(struct clock_event_device *old, 121extern void clockevents_exchange_device(struct clock_event_device *old,
121 struct clock_event_device *new); 122 struct clock_event_device *new);
122extern
123struct clock_event_device *clockevents_request_device(unsigned int features,
124 cpumask_t cpumask);
125extern void clockevents_release_device(struct clock_event_device *dev);
126extern void clockevents_set_mode(struct clock_event_device *dev, 123extern void clockevents_set_mode(struct clock_event_device *dev,
127 enum clock_event_mode mode); 124 enum clock_event_mode mode);
128extern int clockevents_register_notifier(struct notifier_block *nb); 125extern int clockevents_register_notifier(struct notifier_block *nb);
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index c4079b403e9e..1c47a34aa794 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -36,7 +36,6 @@ extern const struct file_operations coda_ioctl_operations;
36 36
37/* operations shared over more than one file */ 37/* operations shared over more than one file */
38int coda_open(struct inode *i, struct file *f); 38int coda_open(struct inode *i, struct file *f);
39int coda_flush(struct file *f, fl_owner_t id);
40int coda_release(struct inode *i, struct file *f); 39int coda_release(struct inode *i, struct file *f);
41int coda_permission(struct inode *inode, int mask, struct nameidata *nd); 40int coda_permission(struct inode *inode, int mask, struct nameidata *nd);
42int coda_revalidate_inode(struct dentry *); 41int coda_revalidate_inode(struct dentry *);
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index aa8f454b3b77..07ae8f846055 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -33,9 +33,6 @@ int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
33int venus_lookup(struct super_block *sb, struct CodaFid *fid, 33int venus_lookup(struct super_block *sb, struct CodaFid *fid,
34 const char *name, int length, int *type, 34 const char *name, int length, int *type,
35 struct CodaFid *resfid); 35 struct CodaFid *resfid);
36int venus_store(struct super_block *sb, struct CodaFid *fid, int flags,
37 vuid_t uid);
38int venus_release(struct super_block *sb, struct CodaFid *fid, int flags);
39int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, 36int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
40 vuid_t uid); 37 vuid_t uid);
41int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, 38int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index a03e9398a6c2..14f7494280f0 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -23,3 +23,21 @@
23 * code 23 * code
24 */ 24 */
25#define uninitialized_var(x) x = x 25#define uninitialized_var(x) x = x
26
27#if !(__GNUC__ == 4 && __GNUC_MINOR__ < 3)
28/* Mark functions as cold. gcc will assume any path leading to a call
29 to them will be unlikely. This means a lot of manual unlikely()s
30 are unnecessary now for any paths leading to the usual suspects
31 like BUG(), printk(), panic() etc. [but let's keep them for now for
32 older compilers]
33
34 Early snapshots of gcc 4.3 don't support this and we can't detect this
35 in the preprocessor, but we can live with this because they're unreleased.
36 Maketime probing would be overkill here.
37
38 gcc also has a __attribute__((__hot__)) to move hot functions into
39 a special section, but I don't see any sense in this right now in
40 the kernel context */
41#define __cold __attribute__((__cold__))
42
43#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8287a72bb6a9..12a1291855e2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -174,4 +174,13 @@ extern void __chk_io_ptr(const void __iomem *);
174# define __attribute_const__ /* unimplemented */ 174# define __attribute_const__ /* unimplemented */
175#endif 175#endif
176 176
177/*
178 * Tell gcc if a function is cold. The compiler will assume any path
179 * directly leading to the call is unlikely.
180 */
181
182#ifndef __cold
183#define __cold
184#endif
185
177#endif /* __LINUX_COMPILER_H */ 186#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index 5b5285316339..f0d0e3295a9b 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,10 +40,10 @@
40 40
41/* These are for everybody (although not all archs will actually 41/* These are for everybody (although not all archs will actually
42 discard it in modules) */ 42 discard it in modules) */
43#define __init __attribute__ ((__section__ (".init.text"))) 43#define __init __attribute__ ((__section__ (".init.text"))) __cold
44#define __initdata __attribute__ ((__section__ (".init.data"))) 44#define __initdata __attribute__ ((__section__ (".init.data")))
45#define __exitdata __attribute__ ((__section__(".exit.data"))) 45#define __exitdata __attribute__ ((__section__(".exit.data")))
46#define __exit_call __attribute_used__ __attribute__ ((__section__ (".exitcall.exit"))) 46#define __exit_call __attribute_used__ __attribute__ ((__section__ (".exitcall.exit"))) __cold
47 47
48/* modpost check for section mismatches during the kernel build. 48/* modpost check for section mismatches during the kernel build.
49 * A section mismatch happens when there are references from a 49 * A section mismatch happens when there are references from a
@@ -59,9 +59,9 @@
59#define __initdata_refok __attribute__ ((__section__ (".data.init.refok"))) 59#define __initdata_refok __attribute__ ((__section__ (".data.init.refok")))
60 60
61#ifdef MODULE 61#ifdef MODULE
62#define __exit __attribute__ ((__section__(".exit.text"))) 62#define __exit __attribute__ ((__section__(".exit.text"))) __cold
63#else 63#else
64#define __exit __attribute_used__ __attribute__ ((__section__(".exit.text"))) 64#define __exit __attribute_used__ __attribute__ ((__section__(".exit.text"))) __cold
65#endif 65#endif
66 66
67/* For assembly routines */ 67/* For assembly routines */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1eb9cde550c4..4300bb462d29 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -106,7 +106,7 @@ extern int cond_resched(void);
106extern struct atomic_notifier_head panic_notifier_list; 106extern struct atomic_notifier_head panic_notifier_list;
107extern long (*panic_blink)(long time); 107extern long (*panic_blink)(long time);
108NORET_TYPE void panic(const char * fmt, ...) 108NORET_TYPE void panic(const char * fmt, ...)
109 __attribute__ ((NORET_AND format (printf, 1, 2))); 109 __attribute__ ((NORET_AND format (printf, 1, 2))) __cold;
110extern void oops_enter(void); 110extern void oops_enter(void);
111extern void oops_exit(void); 111extern void oops_exit(void);
112extern int oops_may_print(void); 112extern int oops_may_print(void);
@@ -155,14 +155,14 @@ extern void dump_thread(struct pt_regs *regs, struct user *dump);
155asmlinkage int vprintk(const char *fmt, va_list args) 155asmlinkage int vprintk(const char *fmt, va_list args)
156 __attribute__ ((format (printf, 1, 0))); 156 __attribute__ ((format (printf, 1, 0)));
157asmlinkage int printk(const char * fmt, ...) 157asmlinkage int printk(const char * fmt, ...)
158 __attribute__ ((format (printf, 1, 2))); 158 __attribute__ ((format (printf, 1, 2))) __cold;
159#else 159#else
160static inline int vprintk(const char *s, va_list args) 160static inline int vprintk(const char *s, va_list args)
161 __attribute__ ((format (printf, 1, 0))); 161 __attribute__ ((format (printf, 1, 0)));
162static inline int vprintk(const char *s, va_list args) { return 0; } 162static inline int vprintk(const char *s, va_list args) { return 0; }
163static inline int printk(const char *s, ...) 163static inline int printk(const char *s, ...)
164 __attribute__ ((format (printf, 1, 2))); 164 __attribute__ ((format (printf, 1, 2)));
165static inline int printk(const char *s, ...) { return 0; } 165static inline int __cold printk(const char *s, ...) { return 0; }
166#endif 166#endif
167 167
168unsigned long int_sqrt(unsigned long); 168unsigned long int_sqrt(unsigned long);
@@ -212,7 +212,7 @@ extern enum system_states {
212#define TAINT_USER (1<<6) 212#define TAINT_USER (1<<6)
213#define TAINT_DIE (1<<7) 213#define TAINT_DIE (1<<7)
214 214
215extern void dump_stack(void); 215extern void dump_stack(void) __cold;
216 216
217enum { 217enum {
218 DUMP_PREFIX_NONE, 218 DUMP_PREFIX_NONE,
diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h
index 81e9299ca148..f3f4f28c6960 100644
--- a/include/linux/resume-trace.h
+++ b/include/linux/resume-trace.h
@@ -2,6 +2,7 @@
2#define RESUME_TRACE_H 2#define RESUME_TRACE_H
3 3
4#ifdef CONFIG_PM_TRACE 4#ifdef CONFIG_PM_TRACE
5#include <asm/resume-trace.h>
5 6
6extern int pm_trace_enabled; 7extern int pm_trace_enabled;
7 8
@@ -9,20 +10,10 @@ struct device;
9extern void set_trace_device(struct device *); 10extern void set_trace_device(struct device *);
10extern void generate_resume_trace(void *tracedata, unsigned int user); 11extern void generate_resume_trace(void *tracedata, unsigned int user);
11 12
12#define TRACE_DEVICE(dev) set_trace_device(dev) 13#define TRACE_DEVICE(dev) do { \
13#define TRACE_RESUME(user) do { \ 14 if (pm_trace_enabled) \
14 if (pm_trace_enabled) { \ 15 set_trace_device(dev); \
15 void *tracedata; \ 16 } while(0)
16 asm volatile("movl $1f,%0\n" \
17 ".section .tracedata,\"a\"\n" \
18 "1:\t.word %c1\n" \
19 "\t.long %c2\n" \
20 ".previous" \
21 :"=r" (tracedata) \
22 : "i" (__LINE__), "i" (__FILE__)); \
23 generate_resume_trace(tracedata, user); \
24 } \
25} while (0)
26 17
27#else 18#else
28 19
diff --git a/include/linux/time.h b/include/linux/time.h
index ec3b0ced0afe..e6aea5146e5d 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -4,6 +4,7 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7# include <linux/cache.h>
7# include <linux/seqlock.h> 8# include <linux/seqlock.h>
8#endif 9#endif
9 10
@@ -94,6 +95,8 @@ extern struct timespec wall_to_monotonic;
94extern seqlock_t xtime_lock __attribute__((weak)); 95extern seqlock_t xtime_lock __attribute__((weak));
95 96
96extern unsigned long read_persistent_clock(void); 97extern unsigned long read_persistent_clock(void);
98extern int update_persistent_clock(struct timespec now);
99extern int no_sync_cmos_clock __read_mostly;
97void timekeeping_init(void); 100void timekeeping_init(void);
98 101
99static inline unsigned long get_seconds(void) 102static inline unsigned long get_seconds(void)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index c2b10cae5da5..89338b468d0d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -58,6 +58,13 @@ void vmalloc_sync_all(void);
58/* 58/*
59 * Lowlevel-APIs (not for driver use!) 59 * Lowlevel-APIs (not for driver use!)
60 */ 60 */
61
62static inline size_t get_vm_area_size(const struct vm_struct *area)
63{
64 /* return actual size without guard page */
65 return area->size - PAGE_SIZE;
66}
67
61extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 68extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
62extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 69extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
63 unsigned long start, unsigned long end); 70 unsigned long start, unsigned long end);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 72d034258ba1..eb1ddebd2c04 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -558,7 +558,8 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
558 */ 558 */
559static int hrtimer_switch_to_hres(void) 559static int hrtimer_switch_to_hres(void)
560{ 560{
561 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); 561 int cpu = smp_processor_id();
562 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
562 unsigned long flags; 563 unsigned long flags;
563 564
564 if (base->hres_active) 565 if (base->hres_active)
@@ -568,6 +569,8 @@ static int hrtimer_switch_to_hres(void)
568 569
569 if (tick_init_highres()) { 570 if (tick_init_highres()) {
570 local_irq_restore(flags); 571 local_irq_restore(flags);
572 printk(KERN_WARNING "Could not switch to high resolution "
573 "mode on CPU %d\n", cpu);
571 return 0; 574 return 0;
572 } 575 }
573 base->hres_active = 1; 576 base->hres_active = 1;
@@ -683,6 +686,7 @@ static void enqueue_hrtimer(struct hrtimer *timer,
683 struct rb_node **link = &base->active.rb_node; 686 struct rb_node **link = &base->active.rb_node;
684 struct rb_node *parent = NULL; 687 struct rb_node *parent = NULL;
685 struct hrtimer *entry; 688 struct hrtimer *entry;
689 int leftmost = 1;
686 690
687 /* 691 /*
688 * Find the right place in the rbtree: 692 * Find the right place in the rbtree:
@@ -694,18 +698,19 @@ static void enqueue_hrtimer(struct hrtimer *timer,
694 * We dont care about collisions. Nodes with 698 * We dont care about collisions. Nodes with
695 * the same expiry time stay together. 699 * the same expiry time stay together.
696 */ 700 */
697 if (timer->expires.tv64 < entry->expires.tv64) 701 if (timer->expires.tv64 < entry->expires.tv64) {
698 link = &(*link)->rb_left; 702 link = &(*link)->rb_left;
699 else 703 } else {
700 link = &(*link)->rb_right; 704 link = &(*link)->rb_right;
705 leftmost = 0;
706 }
701 } 707 }
702 708
703 /* 709 /*
704 * Insert the timer to the rbtree and check whether it 710 * Insert the timer to the rbtree and check whether it
705 * replaces the first pending timer 711 * replaces the first pending timer
706 */ 712 */
707 if (!base->first || timer->expires.tv64 < 713 if (leftmost) {
708 rb_entry(base->first, struct hrtimer, node)->expires.tv64) {
709 /* 714 /*
710 * Reprogram the clock event device. When the timer is already 715 * Reprogram the clock event device. When the timer is already
711 * expired hrtimer_enqueue_reprogram has either called the 716 * expired hrtimer_enqueue_reprogram has either called the
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index b4f1674fca79..50b81b98046a 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,7 +19,15 @@ static struct proc_dir_entry *root_irq_dir;
19static int irq_affinity_read_proc(char *page, char **start, off_t off, 19static int irq_affinity_read_proc(char *page, char **start, off_t off,
20 int count, int *eof, void *data) 20 int count, int *eof, void *data)
21{ 21{
22 int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity); 22 struct irq_desc *desc = irq_desc + (long)data;
23 cpumask_t *mask = &desc->affinity;
24 int len;
25
26#ifdef CONFIG_GENERIC_PENDING_IRQ
27 if (desc->status & IRQ_MOVE_PENDING)
28 mask = &desc->pending_mask;
29#endif
30 len = cpumask_scnprintf(page, count, *mask);
23 31
24 if (count - len < 2) 32 if (count - len < 2)
25 return -EINVAL; 33 return -EINVAL;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 7358609e4735..c1a106d87d90 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -57,7 +57,7 @@ config DISABLE_CONSOLE_SUSPEND
57 57
58config PM_TRACE 58config PM_TRACE
59 bool "Suspend/resume event tracing" 59 bool "Suspend/resume event tracing"
60 depends on PM_DEBUG && X86_32 && EXPERIMENTAL 60 depends on PM_DEBUG && X86 && EXPERIMENTAL
61 default n 61 default n
62 ---help--- 62 ---help---
63 This enables some cheesy code to save the last PM event point in the 63 This enables some cheesy code to save the last PM event point in the
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index b5e352597cbb..cd91237dbfe3 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/timer.h>
13#include <linux/timex.h> 14#include <linux/timex.h>
14#include <linux/jiffies.h> 15#include <linux/jiffies.h>
15#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
@@ -175,12 +176,64 @@ u64 current_tick_length(void)
175 return tick_length; 176 return tick_length;
176} 177}
177 178
179#ifdef CONFIG_GENERIC_CMOS_UPDATE
178 180
179void __attribute__ ((weak)) notify_arch_cmos_timer(void) 181/* Disable the cmos update - used by virtualization and embedded */
182int no_sync_cmos_clock __read_mostly;
183
184static void sync_cmos_clock(unsigned long dummy);
185
186static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
187
188static void sync_cmos_clock(unsigned long dummy)
189{
190 struct timespec now, next;
191 int fail = 1;
192
193 /*
194 * If we have an externally synchronized Linux clock, then update
195 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
196 * called as close as possible to 500 ms before the new second starts.
197 * This code is run on a timer. If the clock is set, that timer
198 * may not expire at the correct time. Thus, we adjust...
199 */
200 if (!ntp_synced())
201 /*
202 * Not synced, exit, do not restart a timer (if one is
203 * running, let it run out).
204 */
205 return;
206
207 getnstimeofday(&now);
208 if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
209 fail = update_persistent_clock(now);
210
211 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
212 if (next.tv_nsec <= 0)
213 next.tv_nsec += NSEC_PER_SEC;
214
215 if (!fail)
216 next.tv_sec = 659;
217 else
218 next.tv_sec = 0;
219
220 if (next.tv_nsec >= NSEC_PER_SEC) {
221 next.tv_sec++;
222 next.tv_nsec -= NSEC_PER_SEC;
223 }
224 mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next));
225}
226
227static void notify_cmos_timer(void)
180{ 228{
181 return; 229 if (no_sync_cmos_clock)
230 mod_timer(&sync_cmos_timer, jiffies + 1);
182} 231}
183 232
233#else
234static inline void notify_cmos_timer(void) { }
235#endif
236
184/* adjtimex mainly allows reading (and writing, if superuser) of 237/* adjtimex mainly allows reading (and writing, if superuser) of
185 * kernel time-keeping variables. used by xntpd. 238 * kernel time-keeping variables. used by xntpd.
186 */ 239 */
@@ -345,6 +398,6 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
345 txc->stbcnt = 0; 398 txc->stbcnt = 0;
346 write_sequnlock_irq(&xtime_lock); 399 write_sequnlock_irq(&xtime_lock);
347 do_gettimeofday(&txc->time); 400 do_gettimeofday(&txc->time);
348 notify_arch_cmos_timer(); 401 notify_cmos_timer();
349 return(result); 402 return(result);
350} 403}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 8001d37071f5..db8e0f3d409b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,6 +31,12 @@ struct tick_device tick_broadcast_device;
31static cpumask_t tick_broadcast_mask; 31static cpumask_t tick_broadcast_mask;
32static DEFINE_SPINLOCK(tick_broadcast_lock); 32static DEFINE_SPINLOCK(tick_broadcast_lock);
33 33
34#ifdef CONFIG_TICK_ONESHOT
35static void tick_broadcast_clear_oneshot(int cpu);
36#else
37static inline void tick_broadcast_clear_oneshot(int cpu) { }
38#endif
39
34/* 40/*
35 * Debugging: see timer_list.c 41 * Debugging: see timer_list.c
36 */ 42 */
@@ -49,7 +55,7 @@ cpumask_t *tick_get_broadcast_mask(void)
49 */ 55 */
50static void tick_broadcast_start_periodic(struct clock_event_device *bc) 56static void tick_broadcast_start_periodic(struct clock_event_device *bc)
51{ 57{
52 if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN) 58 if (bc)
53 tick_setup_periodic(bc, 1); 59 tick_setup_periodic(bc, 1);
54} 60}
55 61
@@ -99,8 +105,19 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
99 cpu_set(cpu, tick_broadcast_mask); 105 cpu_set(cpu, tick_broadcast_mask);
100 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 106 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
101 ret = 1; 107 ret = 1;
102 } 108 } else {
109 /*
110 * When the new device is not affected by the stop
111 * feature and the cpu is marked in the broadcast mask
112 * then clear the broadcast bit.
113 */
114 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
115 int cpu = smp_processor_id();
103 116
117 cpu_clear(cpu, tick_broadcast_mask);
118 tick_broadcast_clear_oneshot(cpu);
119 }
120 }
104 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 121 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
105 return ret; 122 return ret;
106} 123}
@@ -299,7 +316,7 @@ void tick_suspend_broadcast(void)
299 spin_lock_irqsave(&tick_broadcast_lock, flags); 316 spin_lock_irqsave(&tick_broadcast_lock, flags);
300 317
301 bc = tick_broadcast_device.evtdev; 318 bc = tick_broadcast_device.evtdev;
302 if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 319 if (bc)
303 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 320 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
304 321
305 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 322 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -316,6 +333,8 @@ int tick_resume_broadcast(void)
316 bc = tick_broadcast_device.evtdev; 333 bc = tick_broadcast_device.evtdev;
317 334
318 if (bc) { 335 if (bc) {
336 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
337
319 switch (tick_broadcast_device.mode) { 338 switch (tick_broadcast_device.mode) {
320 case TICKDEV_MODE_PERIODIC: 339 case TICKDEV_MODE_PERIODIC:
321 if(!cpus_empty(tick_broadcast_mask)) 340 if(!cpus_empty(tick_broadcast_mask))
@@ -485,6 +504,16 @@ out:
485 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 504 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
486} 505}
487 506
507/*
508 * Reset the one shot broadcast for a cpu
509 *
510 * Called with tick_broadcast_lock held
511 */
512static void tick_broadcast_clear_oneshot(int cpu)
513{
514 cpu_clear(cpu, tick_broadcast_oneshot_mask);
515}
516
488/** 517/**
489 * tick_broadcast_setup_highres - setup the broadcast device for highres 518 * tick_broadcast_setup_highres - setup the broadcast device for highres
490 */ 519 */
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index a96ec9ab3454..77a21abc8716 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -318,12 +318,17 @@ static void tick_resume(void)
318{ 318{
319 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 319 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
320 unsigned long flags; 320 unsigned long flags;
321 int broadcast = tick_resume_broadcast();
321 322
322 spin_lock_irqsave(&tick_device_lock, flags); 323 spin_lock_irqsave(&tick_device_lock, flags);
323 if (td->mode == TICKDEV_MODE_PERIODIC) 324 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
324 tick_setup_periodic(td->evtdev, 0); 325
325 else 326 if (!broadcast) {
326 tick_resume_oneshot(); 327 if (td->mode == TICKDEV_MODE_PERIODIC)
328 tick_setup_periodic(td->evtdev, 0);
329 else
330 tick_resume_oneshot();
331 }
327 spin_unlock_irqrestore(&tick_device_lock, flags); 332 spin_unlock_irqrestore(&tick_device_lock, flags);
328} 333}
329 334
@@ -360,8 +365,7 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
360 break; 365 break;
361 366
362 case CLOCK_EVT_NOTIFY_RESUME: 367 case CLOCK_EVT_NOTIFY_RESUME:
363 if (!tick_resume_broadcast()) 368 tick_resume();
364 tick_resume();
365 break; 369 break;
366 370
367 default: 371 default:
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index f6997ab0c3c9..0258d3115d54 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -73,8 +73,21 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
73 struct clock_event_device *dev = td->evtdev; 73 struct clock_event_device *dev = td->evtdev;
74 74
75 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || 75 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
76 !tick_device_is_functional(dev)) 76 !tick_device_is_functional(dev)) {
77
78 printk(KERN_INFO "Clockevents: "
79 "could not switch to one-shot mode:");
80 if (!dev) {
81 printk(" no tick device\n");
82 } else {
83 if (!tick_device_is_functional(dev))
84 printk(" %s is not functional.\n", dev->name);
85 else
86 printk(" %s does not support one-shot mode.\n",
87 dev->name);
88 }
77 return -EINVAL; 89 return -EINVAL;
90 }
78 91
79 td->mode = TICKDEV_MODE_ONESHOT; 92 td->mode = TICKDEV_MODE_ONESHOT;
80 dev->event_handler = handler; 93 dev->event_handler = handler;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 52db9e3c526e..b416995b9757 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -546,6 +546,7 @@ void tick_setup_sched_timer(void)
546{ 546{
547 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 547 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
548 ktime_t now = ktime_get(); 548 ktime_t now = ktime_get();
549 u64 offset;
549 550
550 /* 551 /*
551 * Emulate tick processing via per-CPU hrtimers: 552 * Emulate tick processing via per-CPU hrtimers:
@@ -554,8 +555,12 @@ void tick_setup_sched_timer(void)
554 ts->sched_timer.function = tick_sched_timer; 555 ts->sched_timer.function = tick_sched_timer;
555 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; 556 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
556 557
557 /* Get the next period */ 558 /* Get the next period (per cpu) */
558 ts->sched_timer.expires = tick_init_jiffy_update(); 559 ts->sched_timer.expires = tick_init_jiffy_update();
560 offset = ktime_to_ns(tick_period) >> 1;
561 do_div(offset, NR_CPUS);
562 offset *= smp_processor_id();
563 ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
559 564
560 for (;;) { 565 for (;;) {
561 hrtimer_forward(&ts->sched_timer, now, tick_period); 566 hrtimer_forward(&ts->sched_timer, now, tick_period);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10c13ad0d82d..a7381d55663a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -357,7 +357,8 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
357 * This is needed when we sync the memory. Then we sync the buffer if 357 * This is needed when we sync the memory. Then we sync the buffer if
358 * needed. 358 * needed.
359 */ 359 */
360 io_tlb_orig_addr[index] = buffer; 360 for (i = 0; i < nslots; i++)
361 io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
361 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 362 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
362 memcpy(dma_addr, buffer, size); 363 memcpy(dma_addr, buffer, size);
363 364
@@ -418,6 +419,8 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
418 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 419 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
419 char *buffer = io_tlb_orig_addr[index]; 420 char *buffer = io_tlb_orig_addr[index];
420 421
422 buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
423
421 switch (target) { 424 switch (target) {
422 case SYNC_FOR_CPU: 425 case SYNC_FOR_CPU:
423 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 426 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
diff --git a/mm/memory.c b/mm/memory.c
index 8aace3db3a54..ca8cac11bd2c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2659,8 +2659,6 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2659 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2659 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2660} 2660}
2661 2661
2662EXPORT_SYMBOL_GPL(handle_mm_fault);
2663
2664#ifndef __PAGETABLE_PUD_FOLDED 2662#ifndef __PAGETABLE_PUD_FOLDED
2665/* 2663/*
2666 * Allocate page upper directory. 2664 * Allocate page upper directory.
diff --git a/mm/nommu.c b/mm/nommu.c
index 1b105d28949f..9eef6a398555 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -54,12 +54,6 @@ DECLARE_RWSEM(nommu_vma_sem);
54struct vm_operations_struct generic_file_vm_ops = { 54struct vm_operations_struct generic_file_vm_ops = {
55}; 55};
56 56
57EXPORT_SYMBOL(vfree);
58EXPORT_SYMBOL(vmalloc_to_page);
59EXPORT_SYMBOL(vmalloc_32);
60EXPORT_SYMBOL(vmap);
61EXPORT_SYMBOL(vunmap);
62
63/* 57/*
64 * Handle all mappings that got truncated by a "truncate()" 58 * Handle all mappings that got truncated by a "truncate()"
65 * system call. 59 * system call.
@@ -168,7 +162,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
168finish_or_fault: 162finish_or_fault:
169 return i ? : -EFAULT; 163 return i ? : -EFAULT;
170} 164}
171
172EXPORT_SYMBOL(get_user_pages); 165EXPORT_SYMBOL(get_user_pages);
173 166
174DEFINE_RWLOCK(vmlist_lock); 167DEFINE_RWLOCK(vmlist_lock);
@@ -178,6 +171,7 @@ void vfree(void *addr)
178{ 171{
179 kfree(addr); 172 kfree(addr);
180} 173}
174EXPORT_SYMBOL(vfree);
181 175
182void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 176void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
183{ 177{
@@ -186,17 +180,19 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
186 */ 180 */
187 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 181 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
188} 182}
183EXPORT_SYMBOL(__vmalloc);
189 184
190struct page * vmalloc_to_page(void *addr) 185struct page * vmalloc_to_page(void *addr)
191{ 186{
192 return virt_to_page(addr); 187 return virt_to_page(addr);
193} 188}
189EXPORT_SYMBOL(vmalloc_to_page);
194 190
195unsigned long vmalloc_to_pfn(void *addr) 191unsigned long vmalloc_to_pfn(void *addr)
196{ 192{
197 return page_to_pfn(virt_to_page(addr)); 193 return page_to_pfn(virt_to_page(addr));
198} 194}
199 195EXPORT_SYMBOL(vmalloc_to_pfn);
200 196
201long vread(char *buf, char *addr, unsigned long count) 197long vread(char *buf, char *addr, unsigned long count)
202{ 198{
@@ -237,9 +233,8 @@ void *vmalloc_node(unsigned long size, int node)
237} 233}
238EXPORT_SYMBOL(vmalloc_node); 234EXPORT_SYMBOL(vmalloc_node);
239 235
240/* 236/**
241 * vmalloc_32 - allocate virtually continguos memory (32bit addressable) 237 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
242 *
243 * @size: allocation size 238 * @size: allocation size
244 * 239 *
245 * Allocate enough 32bit PA addressable pages to cover @size from the 240 * Allocate enough 32bit PA addressable pages to cover @size from the
@@ -249,17 +244,33 @@ void *vmalloc_32(unsigned long size)
249{ 244{
250 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 245 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
251} 246}
247EXPORT_SYMBOL(vmalloc_32);
248
249/**
250 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
251 * @size: allocation size
252 *
253 * The resulting memory area is 32bit addressable and zeroed so it can be
254 * mapped to userspace without leaking data.
255 */
256void *vmalloc_32_user(unsigned long size)
257{
258 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
259}
260EXPORT_SYMBOL(vmalloc_32_user);
252 261
253void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) 262void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
254{ 263{
255 BUG(); 264 BUG();
256 return NULL; 265 return NULL;
257} 266}
267EXPORT_SYMBOL(vmap);
258 268
259void vunmap(void *addr) 269void vunmap(void *addr)
260{ 270{
261 BUG(); 271 BUG();
262} 272}
273EXPORT_SYMBOL(vunmap);
263 274
264/* 275/*
265 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 276 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
@@ -269,6 +280,13 @@ void __attribute__((weak)) vmalloc_sync_all(void)
269{ 280{
270} 281}
271 282
283int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
284 struct page *page)
285{
286 return -EINVAL;
287}
288EXPORT_SYMBOL(vm_insert_page);
289
272/* 290/*
273 * sys_brk() for the most part doesn't need the global kernel 291 * sys_brk() for the most part doesn't need the global kernel
274 * lock, except when an application is doing something nasty 292 * lock, except when an application is doing something nasty
@@ -994,6 +1012,7 @@ unsigned long do_mmap_pgoff(struct file *file,
994 show_free_areas(); 1012 show_free_areas();
995 return -ENOMEM; 1013 return -ENOMEM;
996} 1014}
1015EXPORT_SYMBOL(do_mmap_pgoff);
997 1016
998/* 1017/*
999 * handle mapping disposal for uClinux 1018 * handle mapping disposal for uClinux
@@ -1074,6 +1093,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
1074 1093
1075 return 0; 1094 return 0;
1076} 1095}
1096EXPORT_SYMBOL(do_munmap);
1077 1097
1078asmlinkage long sys_munmap(unsigned long addr, size_t len) 1098asmlinkage long sys_munmap(unsigned long addr, size_t len)
1079{ 1099{
@@ -1164,6 +1184,7 @@ unsigned long do_mremap(unsigned long addr,
1164 1184
1165 return vma->vm_start; 1185 return vma->vm_start;
1166} 1186}
1187EXPORT_SYMBOL(do_mremap);
1167 1188
1168asmlinkage unsigned long sys_mremap(unsigned long addr, 1189asmlinkage unsigned long sys_mremap(unsigned long addr,
1169 unsigned long old_len, unsigned long new_len, 1190 unsigned long old_len, unsigned long new_len,
@@ -1231,7 +1252,6 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1231 1252
1232 return get_area(file, addr, len, pgoff, flags); 1253 return get_area(file, addr, len, pgoff, flags);
1233} 1254}
1234
1235EXPORT_SYMBOL(get_unmapped_area); 1255EXPORT_SYMBOL(get_unmapped_area);
1236 1256
1237/* 1257/*
@@ -1346,6 +1366,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1346 BUG(); 1366 BUG();
1347 return 0; 1367 return 0;
1348} 1368}
1369EXPORT_SYMBOL(filemap_fault);
1349 1370
1350/* 1371/*
1351 * Access another process' address space. 1372 * Access another process' address space.
diff --git a/mm/slob.c b/mm/slob.c
index d50920ecc02b..ec33fcdc852e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) 293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
294{ 294{
295 struct slob_page *sp; 295 struct slob_page *sp;
296 struct list_head *prev;
296 slob_t *b = NULL; 297 slob_t *b = NULL;
297 unsigned long flags; 298 unsigned long flags;
298 299
@@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
307 if (node != -1 && page_to_nid(&sp->page) != node) 308 if (node != -1 && page_to_nid(&sp->page) != node)
308 continue; 309 continue;
309#endif 310#endif
311 /* Enough room on this page? */
312 if (sp->units < SLOB_UNITS(size))
313 continue;
310 314
311 if (sp->units >= SLOB_UNITS(size)) { 315 /* Attempt to alloc */
312 b = slob_page_alloc(sp, size, align); 316 prev = sp->list.prev;
313 if (b) 317 b = slob_page_alloc(sp, size, align);
314 break; 318 if (!b)
315 } 319 continue;
320
321 /* Improve fragment distribution and reduce our average
322 * search time by starting our next search here. (see
323 * Knuth vol 1, sec 2.5, pg 449) */
324 if (free_slob_pages.next != prev->next)
325 list_move_tail(&free_slob_pages, prev->next);
326 break;
316 } 327 }
317 spin_unlock_irqrestore(&slob_lock, flags); 328 spin_unlock_irqrestore(&slob_lock, flags);
318 329