aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/async-tx-api.txt219
-rw-r--r--Documentation/devices.txt2
-rw-r--r--Documentation/lguest/lguest.c2
-rw-r--r--Documentation/lockstat.txt120
-rw-r--r--Documentation/sysrq.txt2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/bios32.c4
-rw-r--r--arch/arm/mach-s3c2440/mach-osiris.c18
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c285
-rw-r--r--arch/blackfin/mach-common/entry.S23
-rw-r--r--arch/i386/boot/memory.c39
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c60
-rw-r--r--arch/i386/xen/mmu.c5
-rw-r--r--arch/mips/au1000/common/pci.c1
-rw-r--r--arch/mips/au1000/mtx-1/board_setup.c4
-rw-r--r--arch/mips/au1000/pb1000/board_setup.c6
-rw-r--r--arch/mips/au1000/pb1100/board_setup.c4
-rw-r--r--arch/mips/au1000/pb1500/board_setup.c6
-rw-r--r--arch/mips/kernel/i8259.c5
-rw-r--r--arch/mips/kernel/irq-msc01.c10
-rw-r--r--arch/mips/kernel/irq.c10
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smtc.c5
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/mm/pg-r4k.c2
-rw-r--r--arch/mips/pci/ops-mace.c21
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c4
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts1
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/platforms/83xx/usb.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c2
-rw-r--r--arch/powerpc/sysdev/commproc.c2
-rw-r--r--arch/ppc/8xx_io/commproc.c2
-rw-r--r--arch/sparc/kernel/ebus.c2
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c4
-rw-r--r--arch/sparc64/kernel/ebus.c5
-rw-r--r--arch/sparc64/kernel/pci_common.c4
-rw-r--r--arch/sparc64/kernel/prom.c3
-rw-r--r--arch/sparc64/kernel/smp.c2
-rw-r--r--arch/sparc64/kernel/vio.c29
-rw-r--r--arch/sparc64/lib/NGcopy_from_user.S8
-rw-r--r--arch/sparc64/lib/NGcopy_to_user.S8
-rw-r--r--arch/sparc64/lib/NGmemcpy.S371
-rw-r--r--arch/x86_64/vdso/voffset.h2
-rw-r--r--crypto/async_tx/async_tx.c12
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/hardware/hwsleep.c10
-rw-r--r--drivers/acpi/sleep/Makefile2
-rw-r--r--drivers/acpi/sleep/main.c51
-rw-r--r--drivers/acpi/tables/tbutils.c2
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c3
-rw-r--r--drivers/ata/pata_marvell.c4
-rw-r--r--drivers/ata/pata_sis.c3
-rw-r--r--drivers/ata/sata_mv.c35
-rw-r--r--drivers/ata/sata_sil24.c16
-rw-r--r--drivers/base/core.c11
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/drm/i915_drv.h6
-rw-r--r--drivers/char/drm/i915_irq.c12
-rw-r--r--drivers/char/hpet.c9
-rw-r--r--drivers/char/mspec.c26
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/vt_ioctl.c19
-rw-r--r--drivers/firewire/Kconfig3
-rw-r--r--drivers/ide/ppc/pmac.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c62
-rw-r--r--drivers/input/joystick/Kconfig2
-rw-r--r--drivers/input/mouse/appletouch.c6
-rw-r--r--drivers/isdn/i4l/isdn_common.c5
-rw-r--r--drivers/lguest/lguest_asm.S6
-rw-r--r--drivers/md/raid5.c17
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c6
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c5
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/sony-laptop.c204
-rw-r--r--drivers/net/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/e1000/e1000_hw.c1
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/mv643xx_eth.c5
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/ppp_mppe.c14
-rwxr-xr-xdrivers/net/qla3xxx.c7
-rw-r--r--drivers/net/r8169.c30
-rw-r--r--drivers/net/sky2.c87
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c2
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c4
-rw-r--r--drivers/scsi/esp_scsi.c3
-rw-r--r--drivers/scsi/megaraid.c8
-rw-r--r--drivers/scsi/scsi_transport_spi.c28
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.h2
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--fs/aio.c2
-rw-r--r--fs/binfmt_flat.c6
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/lockd/svclock.c31
-rw-r--r--fs/nfs/client.c29
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/ocfs2/localalloc.c4
-rw-r--r--fs/splice.c46
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/xfs/xfs_buf_item.h5
-rw-r--r--fs/xfs/xfs_log_recover.c51
-rw-r--r--fs/xfs/xfs_trans_buf.c1
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/asm-blackfin/mach-bf533/bfin_serial_5xx.h11
-rw-r--r--include/asm-blackfin/mach-bf537/bfin_serial_5xx.h23
-rw-r--r--include/asm-blackfin/mach-bf537/portmux.h35
-rw-r--r--include/asm-blackfin/mach-bf561/bfin_serial_5xx.h11
-rw-r--r--include/asm-blackfin/portmux.h55
-rw-r--r--include/asm-blackfin/unistd.h56
-rw-r--r--include/asm-h8300/flat.h3
-rw-r--r--include/asm-i386/system.h5
-rw-r--r--include/asm-m32r/flat.h3
-rw-r--r--include/asm-m68knommu/flat.h3
-rw-r--r--include/asm-mips/cmpxchg.h107
-rw-r--r--include/asm-mips/fcntl.h1
-rw-r--r--include/asm-mips/irq.h32
-rw-r--r--include/asm-mips/local.h69
-rw-r--r--include/asm-mips/page.h2
-rw-r--r--include/asm-mips/system.h261
-rw-r--r--include/asm-sh/flat.h3
-rw-r--r--include/asm-v850/flat.h4
-rw-r--r--include/asm-x86_64/processor.h2
-rw-r--r--include/linux/cpufreq.h19
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/net/rose.h2
-rw-r--r--include/net/sctp/sm.h4
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/tcp.h6
-rw-r--r--kernel/futex.c26
-rw-r--r--kernel/futex_compat.c28
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/signal.c22
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/timer_stats.c5
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/Makefile4
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c23
-rw-r--r--mm/page-writeback.c4
-rw-r--r--net/ieee80211/ieee80211_rx.c6
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c56
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv6/ndisc.c9
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/mac80211/ieee80211.c2
-rw-r--r--net/mac80211/rc80211_simple.c2
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/rose/rose_loopback.c4
-rw-r--r--net/rose/rose_route.c15
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_sfq.c47
-rw-r--r--net/sctp/bind_addr.c2
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/sm_make_chunk.c46
-rw-r--r--net/sctp/sm_statefuns.c243
-rw-r--r--net/sctp/sm_statetable.c16
-rw-r--r--net/socket.c3
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--sound/core/memalloc.c68
183 files changed, 2502 insertions, 1244 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
new file mode 100644
index 000000000000..c1e9545c59bd
--- /dev/null
+++ b/Documentation/crypto/async-tx-api.txt
@@ -0,0 +1,219 @@
1 Asynchronous Transfers/Transforms API
2
31 INTRODUCTION
4
52 GENEALOGY
6
73 USAGE
83.1 General format of the API
93.2 Supported operations
103.3 Descriptor management
113.4 When does the operation execute?
123.5 When does the operation complete?
133.6 Constraints
143.7 Example
15
164 DRIVER DEVELOPER NOTES
174.1 Conformance points
184.2 "My application needs finer control of hardware channels"
19
205 SOURCE
21
22---
23
241 INTRODUCTION
25
26The async_tx API provides methods for describing a chain of asynchronous
27bulk memory transfers/transforms with support for inter-transactional
28dependencies. It is implemented as a dmaengine client that smooths over
29the details of different hardware offload engine implementations. Code
30that is written to the API can optimize for asynchronous operation and
31the API will fit the chain of operations to the available offload
32resources.
33
342 GENEALOGY
35
36The API was initially designed to offload the memory copy and
37xor-parity-calculations of the md-raid5 driver using the offload engines
38present in the Intel(R) Xscale series of I/O processors. It also built
39on the 'dmaengine' layer developed for offloading memory copies in the
40network stack using Intel(R) I/OAT engines. The following design
41features surfaced as a result:
421/ implicit synchronous path: users of the API do not need to know if
43 the platform they are running on has offload capabilities. The
44 operation will be offloaded when an engine is available and carried out
45 in software otherwise.
462/ cross channel dependency chains: the API allows a chain of dependent
47 operations to be submitted, like xor->copy->xor in the raid5 case. The
48 API automatically handles cases where the transition from one operation
49 to another implies a hardware channel switch.
503/ dmaengine extensions to support multiple clients and operation types
51 beyond 'memcpy'
52
533 USAGE
54
553.1 General format of the API:
56struct dma_async_tx_descriptor *
57async_<operation>(<op specific parameters>,
58 enum async_tx_flags flags,
59 struct dma_async_tx_descriptor *dependency,
60 dma_async_tx_callback callback_routine,
61 void *callback_parameter);
62
633.2 Supported operations:
64memcpy - memory copy between a source and a destination buffer
65memset - fill a destination buffer with a byte value
66xor - xor a series of source buffers and write the result to a
67 destination buffer
68xor_zero_sum - xor a series of source buffers and set a flag if the
69 result is zero. The implementation attempts to prevent
70 writes to memory
71
723.3 Descriptor management:
73The return value is non-NULL and points to a 'descriptor' when the operation
74has been queued to execute asynchronously. Descriptors are recycled
75resources, under control of the offload engine driver, to be reused as
76operations complete. When an application needs to submit a chain of
77operations it must guarantee that the descriptor is not automatically recycled
78before the dependency is submitted. This requires that all descriptors be
79acknowledged by the application before the offload engine driver is allowed to
80recycle (or free) the descriptor. A descriptor can be acked by one of the
81following methods:
821/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
832/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
84 descriptor of a new operation.
853/ calling async_tx_ack() on the descriptor.
86
873.4 When does the operation execute?
88Operations do not immediately issue after return from the
89async_<operation> call. Offload engine drivers batch operations to
90improve performance by reducing the number of mmio cycles needed to
91manage the channel. Once a driver-specific threshold is met the driver
92automatically issues pending operations. An application can force this
93event by calling async_tx_issue_pending_all(). This operates on all
94channels since the application has no knowledge of channel to operation
95mapping.
96
973.5 When does the operation complete?
98There are two methods for an application to learn about the completion
99of an operation.
1001/ Call dma_wait_for_async_tx(). This call causes the CPU to spin while
101 it polls for the completion of the operation. It handles dependency
102 chains and issuing pending operations.
1032/ Specify a completion callback. The callback routine runs in tasklet
104 context if the offload engine driver supports interrupts, or it is
105 called in application context if the operation is carried out
106 synchronously in software. The callback can be set in the call to
107 async_<operation>, or when the application needs to submit a chain of
108 unknown length it can use the async_trigger_callback() routine to set a
109 completion interrupt/callback at the end of the chain.
110
1113.6 Constraints:
1121/ Calls to async_<operation> are not permitted in IRQ context. Other
113 contexts are permitted provided constraint #2 is not violated.
1142/ Completion callback routines cannot submit new operations. This
115 results in recursion in the synchronous case and spin_locks being
116 acquired twice in the asynchronous case.
117
1183.7 Example:
119Perform a xor->copy->xor operation where each operation depends on the
120result from the previous operation:
121
122void complete_xor_copy_xor(void *param)
123{
124 printk("complete\n");
125}
126
127int run_xor_copy_xor(struct page **xor_srcs,
128 int xor_src_cnt,
129 struct page *xor_dest,
130 size_t xor_len,
131 struct page *copy_src,
132 struct page *copy_dest,
133 size_t copy_len)
134{
135 struct dma_async_tx_descriptor *tx;
136
137 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
138 ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL);
139 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,
140 ASYNC_TX_DEP_ACK, tx, NULL, NULL);
141 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
142 ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK,
143 tx, complete_xor_copy_xor, NULL);
144
145 async_tx_issue_pending_all();
146}
147
148See include/linux/async_tx.h for more information on the flags. See the
149ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more
150implementation examples.
151
1524 DRIVER DEVELOPMENT NOTES
1534.1 Conformance points:
154There are a few conformance points required in dmaengine drivers to
155accommodate assumptions made by applications using the async_tx API:
1561/ Completion callbacks are expected to happen in tasklet context
1572/ dma_async_tx_descriptor fields are never manipulated in IRQ context
1583/ Use async_tx_run_dependencies() in the descriptor clean up path to
159 handle submission of dependent operations
160
1614.2 "My application needs finer control of hardware channels"
162This requirement seems to arise from cases where a DMA engine driver is
163trying to support device-to-memory DMA. The dmaengine and async_tx
164implementations were designed for offloading memory-to-memory
165operations; however, there are some capabilities of the dmaengine layer
166that can be used for platform-specific channel management.
167Platform-specific constraints can be handled by registering the
168application as a 'dma_client' and implementing a 'dma_event_callback' to
169apply a filter to the available channels in the system. Before showing
170how to implement a custom dma_event callback some background of
171dmaengine's client support is required.
172
173The following routines in dmaengine support multiple clients requesting
174use of a channel:
175- dma_async_client_register(struct dma_client *client)
176- dma_async_client_chan_request(struct dma_client *client)
177
178dma_async_client_register takes a pointer to an initialized dma_client
179structure. It expects that the 'event_callback' and 'cap_mask' fields
180are already initialized.
181
182dma_async_client_chan_request triggers dmaengine to notify the client of
183all channels that satisfy the capability mask. It is up to the client's
184event_callback routine to track how many channels the client needs and
185how many it is currently using. The dma_event_callback routine returns a
186dma_state_client code to let dmaengine know the status of the
187allocation.
188
189Below is the example of how to extend this functionality for
190platform-specific filtering of the available channels beyond the
191standard capability mask:
192
193static enum dma_state_client
194my_dma_client_callback(struct dma_client *client,
195 struct dma_chan *chan, enum dma_state state)
196{
197 struct dma_device *dma_dev;
198 struct my_platform_specific_dma *plat_dma_dev;
199
200 dma_dev = chan->device;
201 plat_dma_dev = container_of(dma_dev,
202 struct my_platform_specific_dma,
203 dma_dev);
204
205 if (!plat_dma_dev->platform_specific_capability)
206 return DMA_DUP;
207
208 . . .
209}
210
2115 SOURCE
212include/linux/dmaengine.h: core header file for DMA drivers and clients
213drivers/dma/dmaengine.c: offload engine channel management routines
214drivers/dma/: location for offload engine drivers
215include/linux/async_tx.h: core header file for the async_tx api
216crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
217crypto/async_tx/async_memcpy.c: copy offload
218crypto/async_tx/async_memset.c: memory fill offload
219crypto/async_tx/async_xor.c: xor and xor zero sum offload
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 8de132a02ba9..6c46730c631a 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -94,6 +94,8 @@ Your cooperation is appreciated.
94 9 = /dev/urandom Faster, less secure random number gen. 94 9 = /dev/urandom Faster, less secure random number gen.
95 10 = /dev/aio Asynchronous I/O notification interface 95 10 = /dev/aio Asynchronous I/O notification interface
96 11 = /dev/kmsg Writes to this come out as printk's 96 11 = /dev/kmsg Writes to this come out as printk's
97 12 = /dev/oldmem Used by crashdump kernels to access
98 the memory of the kernel that crashed.
97 99
98 1 block RAM disk 100 1 block RAM disk
99 0 = /dev/ram0 First RAM disk 101 0 = /dev/ram0 First RAM disk
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index f7918401a007..73c5f1f3d5d2 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -882,7 +882,7 @@ static u32 handle_block_output(int fd, const struct iovec *iov,
882 * of the block file (possibly extending it). */ 882 * of the block file (possibly extending it). */
883 if (off + len > device_len) { 883 if (off + len > device_len) {
884 /* Trim it back to the correct length */ 884 /* Trim it back to the correct length */
885 ftruncate(dev->fd, device_len); 885 ftruncate64(dev->fd, device_len);
886 /* Die, bad Guest, die. */ 886 /* Die, bad Guest, die. */
887 errx(1, "Write past end %llu+%u", off, len); 887 errx(1, "Write past end %llu+%u", off, len);
888 } 888 }
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
new file mode 100644
index 000000000000..4ba4664ce5c3
--- /dev/null
+++ b/Documentation/lockstat.txt
@@ -0,0 +1,120 @@
1
2LOCK STATISTICS
3
4- WHAT
5
6As the name suggests, it provides statistics on locks.
7
8- WHY
9
10Because things like lock contention can severely impact performance.
11
12- HOW
13
14Lockdep already has hooks in the lock functions and maps lock instances to
15lock classes. We build on that. The graph below shows the relation between
16the lock functions and the various hooks therein.
17
18 __acquire
19 |
20 lock _____
21 | \
22 | __contended
23 | |
24 | <wait>
25 | _______/
26 |/
27 |
28 __acquired
29 |
30 .
31 <hold>
32 .
33 |
34 __release
35 |
36 unlock
37
38lock, unlock - the regular lock functions
39__* - the hooks
40<> - states
41
42With these hooks we provide the following statistics:
43
44 con-bounces - number of lock contention that involved x-cpu data
45 contentions - number of lock acquisitions that had to wait
46 wait time min - shortest (non-0) time we ever had to wait for a lock
47 max - longest time we ever had to wait for a lock
48 total - total time we spend waiting on this lock
49 acq-bounces - number of lock acquisitions that involved x-cpu data
50 acquisitions - number of times we took the lock
51 hold time min - shortest (non-0) time we ever held the lock
52 max - longest time we ever held the lock
53 total - total time this lock was held
54
55From these number various other statistics can be derived, such as:
56
57 hold time average = hold time total / acquisitions
58
59These numbers are gathered per lock class, per read/write state (when
60applicable).
61
62It also tracks 4 contention points per class. A contention point is a call site
63that had to wait on lock acquisition.
64
65 - USAGE
66
67Look at the current lock statistics:
68
69( line numbers not part of actual output, done for clarity in the explanation
70 below )
71
72# less /proc/lock_stat
73
7401 lock_stat version 0.2
7502 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
7603 class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total
7704 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
7805
7906 &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60
8007 &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38
8108 --------------------------
8209 &inode->i_data.tree_lock 0 [<ffffffff8027c08f>] add_to_page_cache+0x5f/0x190
8310
8411 ...............................................................................................................................................................................................
8512
8613 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24
8714 -----------
8815 dcache_lock 180 [<ffffffff802c0d7e>] sys_getcwd+0x11e/0x230
8916 dcache_lock 165 [<ffffffff802c002a>] d_alloc+0x15a/0x210
9017 dcache_lock 33 [<ffffffff8035818d>] _atomic_dec_and_lock+0x4d/0x70
9118 dcache_lock 1 [<ffffffff802beef8>] shrink_dcache_parent+0x18/0x130
92
93This excerpt shows the first two lock class statistics. Line 01 shows the
94output version - each time the format changes this will be updated. Line 02-04
95show the header with column descriptions. Lines 05-10 and 13-18 show the actual
96statistics. These statistics come in two parts; the actual stats separated by a
97short separator (line 08, 14) from the contention points.
98
99The first lock (05-10) is a read/write lock, and shows two lines above the
100short separator. The contention points don't match the column descriptors,
101they have two: contentions and [<IP>] symbol.
102
103
104View the top contending locks:
105
106# grep : /proc/lock_stat | head
107 &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60
108 &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38
109 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24
110 &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74
111 &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06
112 &inode->i_data.i_mmap_lock: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44
113 &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52
114 &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62
115 &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63
116 tasklist_lock-W: 15 15 1.45 10.87 32.70 1201 7390 0.58 62.55 13648.47
117
118Clear the statistics:
119
120# echo 0 > /proc/lock_stat
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index ef19142896ca..10c8f6922ef4 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -43,7 +43,7 @@ On x86 - You press the key combo 'ALT-SysRq-<command key>'. Note - Some
43 keyboards may not have a key labeled 'SysRq'. The 'SysRq' key is 43 keyboards may not have a key labeled 'SysRq'. The 'SysRq' key is
44 also known as the 'Print Screen' key. Also some keyboards cannot 44 also known as the 'Print Screen' key. Also some keyboards cannot
45 handle so many keys being pressed at the same time, so you might 45 handle so many keys being pressed at the same time, so you might
46 have better luck with "press Alt", "press SysRq", "release Alt", 46 have better luck with "press Alt", "press SysRq", "release SysRq",
47 "press <command key>", release everything. 47 "press <command key>", release everything.
48 48
49On SPARC - You press 'ALT-STOP-<command key>', I believe. 49On SPARC - You press 'ALT-STOP-<command key>', I believe.
diff --git a/Makefile b/Makefile
index c265e41ec55a..4635a64da36c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 23 3SUBLEVEL = 23
4EXTRAVERSION =-rc7 4EXTRAVERSION =
5NAME = Arr Matey! A Hairy Bilge Rat! 5NAME = Arr Matey! A Hairy Bilge Rat!
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 240c448ec31c..a2dd930d11ef 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
338 * pcibios_fixup_bus - Called after each bus is probed, 338 * pcibios_fixup_bus - Called after each bus is probed,
339 * but before its children are examined. 339 * but before its children are examined.
340 */ 340 */
341void __devinit pcibios_fixup_bus(struct pci_bus *bus) 341void pcibios_fixup_bus(struct pci_bus *bus)
342{ 342{
343 struct pci_sys_data *root = bus->sysdata; 343 struct pci_sys_data *root = bus->sysdata;
344 struct pci_dev *dev; 344 struct pci_dev *dev;
@@ -419,7 +419,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
419/* 419/*
420 * Convert from Linux-centric to bus-centric addresses for bridge devices. 420 * Convert from Linux-centric to bus-centric addresses for bridge devices.
421 */ 421 */
422void __devinit 422void
423pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 423pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
424 struct resource *res) 424 struct resource *res)
425{ 425{
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index 0ba7e9060c7b..c326983f4a8f 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -276,7 +276,21 @@ static unsigned char pm_osiris_ctrl0;
276 276
277static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) 277static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state)
278{ 278{
279 unsigned int tmp;
280
279 pm_osiris_ctrl0 = __raw_readb(OSIRIS_VA_CTRL0); 281 pm_osiris_ctrl0 = __raw_readb(OSIRIS_VA_CTRL0);
282 tmp = pm_osiris_ctrl0 & ~OSIRIS_CTRL0_NANDSEL;
283
284 /* ensure correct NAND slot is selected on resume */
285 if ((pm_osiris_ctrl0 & OSIRIS_CTRL0_BOOT_INT) == 0)
286 tmp |= 2;
287
288 __raw_writeb(tmp, OSIRIS_VA_CTRL0);
289
290 /* ensure that an nRESET is not generated on resume. */
291 s3c2410_gpio_setpin(S3C2410_GPA21, 1);
292 s3c2410_gpio_cfgpin(S3C2410_GPA21, S3C2410_GPA21_OUT);
293
280 return 0; 294 return 0;
281} 295}
282 296
@@ -285,6 +299,10 @@ static int osiris_pm_resume(struct sys_device *sd)
285 if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) 299 if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8)
286 __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); 300 __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1);
287 301
302 __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0);
303
304 s3c2410_gpio_cfgpin(S3C2410_GPA21, S3C2410_GPA21_nRSTOUT);
305
288 return 0; 306 return 0;
289} 307}
290 308
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index bafcfa52142b..5d488ef965ce 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -84,6 +84,7 @@
84#include <linux/err.h> 84#include <linux/err.h>
85#include <asm/blackfin.h> 85#include <asm/blackfin.h>
86#include <asm/gpio.h> 86#include <asm/gpio.h>
87#include <asm/portmux.h>
87#include <linux/irq.h> 88#include <linux/irq.h>
88 89
89#ifdef BF533_FAMILY 90#ifdef BF533_FAMILY
@@ -115,7 +116,11 @@ static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
115}; 116};
116#endif 117#endif
117 118
118static unsigned short reserved_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; 119static unsigned short reserved_gpio_map[gpio_bank(MAX_BLACKFIN_GPIOS)];
120static unsigned short reserved_peri_map[gpio_bank(MAX_BLACKFIN_GPIOS + 16)];
121char *str_ident = NULL;
122
123#define RESOURCE_LABEL_SIZE 16
119 124
120#ifdef CONFIG_PM 125#ifdef CONFIG_PM
121static unsigned short wakeup_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; 126static unsigned short wakeup_map[gpio_bank(MAX_BLACKFIN_GPIOS)];
@@ -143,22 +148,100 @@ inline int check_gpio(unsigned short gpio)
143 return 0; 148 return 0;
144} 149}
145 150
151static void set_label(unsigned short ident, const char *label)
152{
153
154 if (label && str_ident) {
155 strncpy(str_ident + ident * RESOURCE_LABEL_SIZE, label,
156 RESOURCE_LABEL_SIZE);
157 str_ident[ident * RESOURCE_LABEL_SIZE +
158 RESOURCE_LABEL_SIZE - 1] = 0;
159 }
160}
161
162static char *get_label(unsigned short ident)
163{
164 if (!str_ident)
165 return "UNKNOWN";
166
167 return (str_ident[ident * RESOURCE_LABEL_SIZE] ?
168 (str_ident + ident * RESOURCE_LABEL_SIZE) : "UNKNOWN");
169}
170
171static int cmp_label(unsigned short ident, const char *label)
172{
173 if (label && str_ident)
174 return strncmp(str_ident + ident * RESOURCE_LABEL_SIZE,
175 label, strlen(label));
176 else
177 return -EINVAL;
178}
179
146#ifdef BF537_FAMILY 180#ifdef BF537_FAMILY
147static void port_setup(unsigned short gpio, unsigned short usage) 181static void port_setup(unsigned short gpio, unsigned short usage)
148{ 182{
149 if (usage == GPIO_USAGE) { 183 if (!check_gpio(gpio)) {
150 if (*port_fer[gpio_bank(gpio)] & gpio_bit(gpio)) 184 if (usage == GPIO_USAGE) {
151 printk(KERN_WARNING "bfin-gpio: Possible Conflict with Peripheral " 185 *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
152 "usage and GPIO %d detected!\n", gpio); 186 } else
153 *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio); 187 *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
154 } else 188 SSYNC();
155 *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio); 189 }
156 SSYNC();
157} 190}
158#else 191#else
159# define port_setup(...) do { } while (0) 192# define port_setup(...) do { } while (0)
160#endif 193#endif
161 194
195#ifdef BF537_FAMILY
196
197#define PMUX_LUT_RES 0
198#define PMUX_LUT_OFFSET 1
199#define PMUX_LUT_ENTRIES 41
200#define PMUX_LUT_SIZE 2
201
202static unsigned short port_mux_lut[PMUX_LUT_ENTRIES][PMUX_LUT_SIZE] = {
203 {P_PPI0_D13, 11}, {P_PPI0_D14, 11}, {P_PPI0_D15, 11},
204 {P_SPORT1_TFS, 11}, {P_SPORT1_TSCLK, 11}, {P_SPORT1_DTPRI, 11},
205 {P_PPI0_D10, 10}, {P_PPI0_D11, 10}, {P_PPI0_D12, 10},
206 {P_SPORT1_RSCLK, 10}, {P_SPORT1_RFS, 10}, {P_SPORT1_DRPRI, 10},
207 {P_PPI0_D8, 9}, {P_PPI0_D9, 9}, {P_SPORT1_DRSEC, 9},
208 {P_SPORT1_DTSEC, 9}, {P_TMR2, 8}, {P_PPI0_FS3, 8}, {P_TMR3, 7},
209 {P_SPI0_SSEL4, 7}, {P_TMR4, 6}, {P_SPI0_SSEL5, 6}, {P_TMR5, 5},
210 {P_SPI0_SSEL6, 5}, {P_UART1_RX, 4}, {P_UART1_TX, 4}, {P_TMR6, 4},
211 {P_TMR7, 4}, {P_UART0_RX, 3}, {P_UART0_TX, 3}, {P_DMAR0, 3},
212 {P_DMAR1, 3}, {P_SPORT0_DTSEC, 1}, {P_SPORT0_DRSEC, 1},
213 {P_CAN0_RX, 1}, {P_CAN0_TX, 1}, {P_SPI0_SSEL7, 1},
214 {P_SPORT0_TFS, 0}, {P_SPORT0_DTPRI, 0}, {P_SPI0_SSEL2, 0},
215 {P_SPI0_SSEL3, 0}
216};
217
218static void portmux_setup(unsigned short per, unsigned short function)
219{
220 u16 y, muxreg, offset;
221
222 for (y = 0; y < PMUX_LUT_ENTRIES; y++) {
223 if (port_mux_lut[y][PMUX_LUT_RES] == per) {
224
225 /* SET PORTMUX REG */
226
227 offset = port_mux_lut[y][PMUX_LUT_OFFSET];
228 muxreg = bfin_read_PORT_MUX();
229
230 if (offset != 1) {
231 muxreg &= ~(1 << offset);
232 } else {
233 muxreg &= ~(3 << 1);
234 }
235
236 muxreg |= (function << offset);
237 bfin_write_PORT_MUX(muxreg);
238 }
239 }
240}
241
242#else
243# define portmux_setup(...) do { } while (0)
244#endif
162 245
163static void default_gpio(unsigned short gpio) 246static void default_gpio(unsigned short gpio)
164{ 247{
@@ -179,22 +262,15 @@ static void default_gpio(unsigned short gpio)
179 262
180static int __init bfin_gpio_init(void) 263static int __init bfin_gpio_init(void)
181{ 264{
182 int i;
183
184 printk(KERN_INFO "Blackfin GPIO Controller\n");
185 265
186 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) 266 str_ident = kzalloc(RESOURCE_LABEL_SIZE * 256, GFP_KERNEL);
187 reserved_map[gpio_bank(i)] = 0; 267 if (!str_ident)
268 return -ENOMEM;
188 269
189#if defined(BF537_FAMILY) && (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) 270 printk(KERN_INFO "Blackfin GPIO Controller\n");
190# if defined(CONFIG_BFIN_MAC_RMII)
191 reserved_map[gpio_bank(PORT_H)] = 0xC373;
192# else
193 reserved_map[gpio_bank(PORT_H)] = 0xFFFF;
194# endif
195#endif
196 271
197 return 0; 272 return 0;
273
198} 274}
199 275
200arch_initcall(bfin_gpio_init); 276arch_initcall(bfin_gpio_init);
@@ -223,7 +299,7 @@ arch_initcall(bfin_gpio_init);
223void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \ 299void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \
224{ \ 300{ \
225 unsigned long flags; \ 301 unsigned long flags; \
226 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ 302 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); \
227 local_irq_save(flags); \ 303 local_irq_save(flags); \
228 if (arg) \ 304 if (arg) \
229 gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ 305 gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
@@ -243,7 +319,7 @@ SET_GPIO(both)
243#define SET_GPIO_SC(name) \ 319#define SET_GPIO_SC(name) \
244void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \ 320void set_gpio_ ## name(unsigned short gpio, unsigned short arg) \
245{ \ 321{ \
246 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); \ 322 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))); \
247 if (arg) \ 323 if (arg) \
248 gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ 324 gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
249 else \ 325 else \
@@ -258,7 +334,7 @@ SET_GPIO_SC(maskb)
258void set_gpio_data(unsigned short gpio, unsigned short arg) 334void set_gpio_data(unsigned short gpio, unsigned short arg)
259{ 335{
260 unsigned long flags; 336 unsigned long flags;
261 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); 337 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)));
262 local_irq_save(flags); 338 local_irq_save(flags);
263 if (arg) 339 if (arg)
264 gpio_bankb[gpio_bank(gpio)]->data_set = gpio_bit(gpio); 340 gpio_bankb[gpio_bank(gpio)]->data_set = gpio_bit(gpio);
@@ -277,7 +353,7 @@ SET_GPIO_SC(data)
277void set_gpio_toggle(unsigned short gpio) 353void set_gpio_toggle(unsigned short gpio)
278{ 354{
279 unsigned long flags; 355 unsigned long flags;
280 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); 356 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)));
281 local_irq_save(flags); 357 local_irq_save(flags);
282 gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); 358 gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
283 bfin_read_CHIPID(); 359 bfin_read_CHIPID();
@@ -286,7 +362,7 @@ void set_gpio_toggle(unsigned short gpio)
286#else 362#else
287void set_gpio_toggle(unsigned short gpio) 363void set_gpio_toggle(unsigned short gpio)
288{ 364{
289 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); 365 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)));
290 gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); 366 gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
291} 367}
292#endif 368#endif
@@ -350,7 +426,7 @@ unsigned short get_gpio_data(unsigned short gpio)
350{ 426{
351 unsigned long flags; 427 unsigned long flags;
352 unsigned short ret; 428 unsigned short ret;
353 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); 429 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)));
354 local_irq_save(flags); 430 local_irq_save(flags);
355 ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)); 431 ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->data >> gpio_sub_n(gpio));
356 bfin_read_CHIPID(); 432 bfin_read_CHIPID();
@@ -494,13 +570,14 @@ u32 gpio_pm_setup(void)
494 gpio_bank_saved[bank].dir = gpio_bankb[bank]->dir; 570 gpio_bank_saved[bank].dir = gpio_bankb[bank]->dir;
495 gpio_bank_saved[bank].edge = gpio_bankb[bank]->edge; 571 gpio_bank_saved[bank].edge = gpio_bankb[bank]->edge;
496 gpio_bank_saved[bank].both = gpio_bankb[bank]->both; 572 gpio_bank_saved[bank].both = gpio_bankb[bank]->both;
497 gpio_bank_saved[bank].reserved = reserved_map[bank]; 573 gpio_bank_saved[bank].reserved =
574 reserved_gpio_map[bank];
498 575
499 gpio = i; 576 gpio = i;
500 577
501 while (mask) { 578 while (mask) {
502 if (mask & 1) { 579 if (mask & 1) {
503 reserved_map[gpio_bank(gpio)] |= 580 reserved_gpio_map[gpio_bank(gpio)] |=
504 gpio_bit(gpio); 581 gpio_bit(gpio);
505 bfin_gpio_wakeup_type(gpio, 582 bfin_gpio_wakeup_type(gpio,
506 wakeup_flags_map[gpio]); 583 wakeup_flags_map[gpio]);
@@ -540,7 +617,8 @@ void gpio_pm_restore(void)
540 gpio_bankb[bank]->edge = gpio_bank_saved[bank].edge; 617 gpio_bankb[bank]->edge = gpio_bank_saved[bank].edge;
541 gpio_bankb[bank]->both = gpio_bank_saved[bank].both; 618 gpio_bankb[bank]->both = gpio_bank_saved[bank].both;
542 619
543 reserved_map[bank] = gpio_bank_saved[bank].reserved; 620 reserved_gpio_map[bank] =
621 gpio_bank_saved[bank].reserved;
544 622
545 } 623 }
546 624
@@ -550,6 +628,141 @@ void gpio_pm_restore(void)
550 628
551#endif 629#endif
552 630
631
632
633
634int peripheral_request(unsigned short per, const char *label)
635{
636 unsigned long flags;
637 unsigned short ident = P_IDENT(per);
638
639 /*
640 * Don't cares are pins with only one dedicated function
641 */
642
643 if (per & P_DONTCARE)
644 return 0;
645
646 if (!(per & P_DEFINED))
647 return -ENODEV;
648
649 local_irq_save(flags);
650
651 if (!check_gpio(ident)) {
652
653 if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
654 printk(KERN_ERR
655 "%s: Peripheral %d is already reserved as GPIO by %s !\n",
656 __FUNCTION__, ident, get_label(ident));
657 dump_stack();
658 local_irq_restore(flags);
659 return -EBUSY;
660 }
661
662 }
663
664 if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) {
665
666 /*
667 * Pin functions like AMC address strobes my
668 * be requested and used by several drivers
669 */
670
671 if (!(per & P_MAYSHARE)) {
672
673 /*
674 * Allow that the identical pin function can
675 * be requested from the same driver twice
676 */
677
678 if (cmp_label(ident, label) == 0)
679 goto anyway;
680
681 printk(KERN_ERR
682 "%s: Peripheral %d function %d is already"
683 "reserved by %s !\n",
684 __FUNCTION__, ident, P_FUNCT2MUX(per),
685 get_label(ident));
686 dump_stack();
687 local_irq_restore(flags);
688 return -EBUSY;
689 }
690
691 }
692
693anyway:
694
695
696 portmux_setup(per, P_FUNCT2MUX(per));
697
698 port_setup(ident, PERIPHERAL_USAGE);
699
700 reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident);
701 local_irq_restore(flags);
702 set_label(ident, label);
703
704 return 0;
705}
706EXPORT_SYMBOL(peripheral_request);
707
708int peripheral_request_list(unsigned short per[], const char *label)
709{
710 u16 cnt;
711 int ret;
712
713 for (cnt = 0; per[cnt] != 0; cnt++) {
714 ret = peripheral_request(per[cnt], label);
715 if (ret < 0)
716 return ret;
717 }
718
719 return 0;
720}
721EXPORT_SYMBOL(peripheral_request_list);
722
723void peripheral_free(unsigned short per)
724{
725 unsigned long flags;
726 unsigned short ident = P_IDENT(per);
727
728 if (per & P_DONTCARE)
729 return;
730
731 if (!(per & P_DEFINED))
732 return;
733
734 if (check_gpio(ident) < 0)
735 return;
736
737 local_irq_save(flags);
738
739 if (unlikely(!(reserved_peri_map[gpio_bank(ident)]
740 & gpio_bit(ident)))) {
741 local_irq_restore(flags);
742 return;
743 }
744
745 if (!(per & P_MAYSHARE)) {
746 port_setup(ident, GPIO_USAGE);
747 }
748
749 reserved_peri_map[gpio_bank(ident)] &= ~gpio_bit(ident);
750
751 local_irq_restore(flags);
752}
753EXPORT_SYMBOL(peripheral_free);
754
755void peripheral_free_list(unsigned short per[])
756{
757 u16 cnt;
758
759 for (cnt = 0; per[cnt] != 0; cnt++) {
760 peripheral_free(per[cnt]);
761 }
762
763}
764EXPORT_SYMBOL(peripheral_free_list);
765
553/*********************************************************** 766/***********************************************************
554* 767*
555* FUNCTIONS: Blackfin GPIO Driver 768* FUNCTIONS: Blackfin GPIO Driver
@@ -574,13 +787,13 @@ int gpio_request(unsigned short gpio, const char *label)
574 787
575 local_irq_save(flags); 788 local_irq_save(flags);
576 789
577 if (unlikely(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))) { 790 if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
578 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved!\n", gpio); 791 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved!\n", gpio);
579 dump_stack(); 792 dump_stack();
580 local_irq_restore(flags); 793 local_irq_restore(flags);
581 return -EBUSY; 794 return -EBUSY;
582 } 795 }
583 reserved_map[gpio_bank(gpio)] |= gpio_bit(gpio); 796 reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio);
584 797
585 local_irq_restore(flags); 798 local_irq_restore(flags);
586 799
@@ -599,7 +812,7 @@ void gpio_free(unsigned short gpio)
599 812
600 local_irq_save(flags); 813 local_irq_save(flags);
601 814
602 if (unlikely(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { 815 if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
603 printk(KERN_ERR "bfin-gpio: GPIO %d wasn't reserved!\n", gpio); 816 printk(KERN_ERR "bfin-gpio: GPIO %d wasn't reserved!\n", gpio);
604 dump_stack(); 817 dump_stack();
605 local_irq_restore(flags); 818 local_irq_restore(flags);
@@ -608,7 +821,7 @@ void gpio_free(unsigned short gpio)
608 821
609 default_gpio(gpio); 822 default_gpio(gpio);
610 823
611 reserved_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); 824 reserved_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
612 825
613 local_irq_restore(flags); 826 local_irq_restore(flags);
614} 827}
@@ -618,7 +831,7 @@ void gpio_direction_input(unsigned short gpio)
618{ 831{
619 unsigned long flags; 832 unsigned long flags;
620 833
621 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); 834 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)));
622 835
623 local_irq_save(flags); 836 local_irq_save(flags);
624 gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); 837 gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
@@ -631,7 +844,7 @@ void gpio_direction_output(unsigned short gpio)
631{ 844{
632 unsigned long flags; 845 unsigned long flags;
633 846
634 BUG_ON(!(reserved_map[gpio_bank(gpio)] & gpio_bit(gpio))); 847 BUG_ON(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)));
635 848
636 local_irq_save(flags); 849 local_irq_save(flags);
637 gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); 850 gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index d61bba98fb54..960458808344 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -815,7 +815,7 @@ _extable:
815 815
816ALIGN 816ALIGN
817ENTRY(_sys_call_table) 817ENTRY(_sys_call_table)
818 .long _sys_ni_syscall /* 0 - old "setup()" system call*/ 818 .long _sys_restart_syscall /* 0 */
819 .long _sys_exit 819 .long _sys_exit
820 .long _sys_fork 820 .long _sys_fork
821 .long _sys_read 821 .long _sys_read
@@ -978,13 +978,13 @@ ENTRY(_sys_call_table)
978 .long _sys_sched_get_priority_min /* 160 */ 978 .long _sys_sched_get_priority_min /* 160 */
979 .long _sys_sched_rr_get_interval 979 .long _sys_sched_rr_get_interval
980 .long _sys_nanosleep 980 .long _sys_nanosleep
981 .long _sys_ni_syscall /* sys_mremap */ 981 .long _sys_mremap
982 .long _sys_setresuid /* setresuid16 */ 982 .long _sys_setresuid /* setresuid16 */
983 .long _sys_getresuid /* getresuid16 */ /* 165 */ 983 .long _sys_getresuid /* getresuid16 */ /* 165 */
984 .long _sys_ni_syscall /* for vm86 */ 984 .long _sys_ni_syscall /* for vm86 */
985 .long _sys_ni_syscall /* old "query_module" */ 985 .long _sys_ni_syscall /* old "query_module" */
986 .long _sys_ni_syscall /* sys_poll */ 986 .long _sys_ni_syscall /* sys_poll */
987 .long _sys_ni_syscall /* sys_nfsservctl */ 987 .long _sys_nfsservctl
988 .long _sys_setresgid /* setresgid16 */ /* 170 */ 988 .long _sys_setresgid /* setresgid16 */ /* 170 */
989 .long _sys_getresgid /* getresgid16 */ 989 .long _sys_getresgid /* getresgid16 */
990 .long _sys_prctl 990 .long _sys_prctl
@@ -1040,7 +1040,7 @@ ENTRY(_sys_call_table)
1040 .long _sys_ni_syscall /* reserved for TUX */ 1040 .long _sys_ni_syscall /* reserved for TUX */
1041 .long _sys_ni_syscall 1041 .long _sys_ni_syscall
1042 .long _sys_gettid 1042 .long _sys_gettid
1043 .long _sys_ni_syscall /* 225 */ /* sys_readahead */ 1043 .long _sys_readahead /* 225 */
1044 .long _sys_setxattr 1044 .long _sys_setxattr
1045 .long _sys_lsetxattr 1045 .long _sys_lsetxattr
1046 .long _sys_fsetxattr 1046 .long _sys_fsetxattr
@@ -1157,6 +1157,21 @@ ENTRY(_sys_call_table)
1157 .long _sys_shmctl 1157 .long _sys_shmctl
1158 .long _sys_shmdt /* 340 */ 1158 .long _sys_shmdt /* 340 */
1159 .long _sys_shmget 1159 .long _sys_shmget
1160 .long _sys_splice
1161 .long _sys_sync_file_range
1162 .long _sys_tee
1163 .long _sys_vmsplice /* 345 */
1164 .long _sys_epoll_pwait
1165 .long _sys_utimensat
1166 .long _sys_signalfd
1167 .long _sys_timerfd
1168 .long _sys_eventfd /* 350 */
1169 .long _sys_pread64
1170 .long _sys_pwrite64
1171 .long _sys_fadvise64
1172 .long _sys_set_robust_list
1173 .long _sys_get_robust_list /* 355 */
1174 .long _sys_fallocate
1160 .rept NR_syscalls-(.-_sys_call_table)/4 1175 .rept NR_syscalls-(.-_sys_call_table)/4
1161 .long _sys_ni_syscall 1176 .long _sys_ni_syscall
1162 .endr 1177 .endr
diff --git a/arch/i386/boot/memory.c b/arch/i386/boot/memory.c
index 1a2e62db8bed..378353956b5d 100644
--- a/arch/i386/boot/memory.c
+++ b/arch/i386/boot/memory.c
@@ -20,6 +20,7 @@
20 20
21static int detect_memory_e820(void) 21static int detect_memory_e820(void)
22{ 22{
23 int count = 0;
23 u32 next = 0; 24 u32 next = 0;
24 u32 size, id; 25 u32 size, id;
25 u8 err; 26 u8 err;
@@ -27,20 +28,33 @@ static int detect_memory_e820(void)
27 28
28 do { 29 do {
29 size = sizeof(struct e820entry); 30 size = sizeof(struct e820entry);
30 id = SMAP; 31
32 /* Important: %edx is clobbered by some BIOSes,
33 so it must be either used for the error output
34 or explicitly marked clobbered. */
31 asm("int $0x15; setc %0" 35 asm("int $0x15; setc %0"
32 : "=am" (err), "+b" (next), "+d" (id), "+c" (size), 36 : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
33 "=m" (*desc) 37 "=m" (*desc)
34 : "D" (desc), "a" (0xe820)); 38 : "D" (desc), "d" (SMAP), "a" (0xe820));
39
40 /* Some BIOSes stop returning SMAP in the middle of
41 the search loop. We don't know exactly how the BIOS
42 screwed up the map at that point, we might have a
43 partial map, the full map, or complete garbage, so
44 just return failure. */
45 if (id != SMAP) {
46 count = 0;
47 break;
48 }
35 49
36 if (err || id != SMAP) 50 if (err)
37 break; 51 break;
38 52
39 boot_params.e820_entries++; 53 count++;
40 desc++; 54 desc++;
41 } while (next && boot_params.e820_entries < E820MAX); 55 } while (next && count < E820MAX);
42 56
43 return boot_params.e820_entries; 57 return boot_params.e820_entries = count;
44} 58}
45 59
46static int detect_memory_e801(void) 60static int detect_memory_e801(void)
@@ -89,11 +103,16 @@ static int detect_memory_88(void)
89 103
90int detect_memory(void) 104int detect_memory(void)
91{ 105{
106 int err = -1;
107
92 if (detect_memory_e820() > 0) 108 if (detect_memory_e820() > 0)
93 return 0; 109 err = 0;
94 110
95 if (!detect_memory_e801()) 111 if (!detect_memory_e801())
96 return 0; 112 err = 0;
113
114 if (!detect_memory_88())
115 err = 0;
97 116
98 return detect_memory_88(); 117 return err;
99} 118}
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index ef8f0bc3fc71..f0cce3c2dc3a 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -76,6 +76,7 @@ static unsigned int longhaul_index;
76/* Module parameters */ 76/* Module parameters */
77static int scale_voltage; 77static int scale_voltage;
78static int disable_acpi_c3; 78static int disable_acpi_c3;
79static int revid_errata;
79 80
80#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) 81#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
81 82
@@ -168,7 +169,10 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index,
168 169
169 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); 170 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
170 /* Setup new frequency */ 171 /* Setup new frequency */
171 longhaul.bits.RevisionKey = longhaul.bits.RevisionID; 172 if (!revid_errata)
173 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
174 else
175 longhaul.bits.RevisionKey = 0;
172 longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; 176 longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
173 longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; 177 longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
174 /* Setup new voltage */ 178 /* Setup new voltage */
@@ -272,7 +276,7 @@ static void longhaul_setstate(unsigned int table_index)
272 276
273 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 277 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
274 fsb, mult/10, mult%10, print_speed(speed/1000)); 278 fsb, mult/10, mult%10, print_speed(speed/1000));
275 279retry_loop:
276 preempt_disable(); 280 preempt_disable();
277 local_irq_save(flags); 281 local_irq_save(flags);
278 282
@@ -344,6 +348,47 @@ static void longhaul_setstate(unsigned int table_index)
344 preempt_enable(); 348 preempt_enable();
345 349
346 freqs.new = calc_speed(longhaul_get_cpu_mult()); 350 freqs.new = calc_speed(longhaul_get_cpu_mult());
351 /* Check if requested frequency is set. */
352 if (unlikely(freqs.new != speed)) {
353 printk(KERN_INFO PFX "Failed to set requested frequency!\n");
354 /* Revision ID = 1 but processor is expecting revision key
355 * equal to 0. Jumpers at the bottom of processor will change
356 * multiplier and FSB, but will not change bits in Longhaul
357 * MSR nor enable voltage scaling. */
358 if (!revid_errata) {
359 printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
360 "option.\n");
361 revid_errata = 1;
362 msleep(200);
363 goto retry_loop;
364 }
365 /* Why ACPI C3 sometimes doesn't work is a mystery for me.
366 * But it does happen. Processor is entering ACPI C3 state,
367 * but it doesn't change frequency. I tried poking various
368 * bits in northbridge registers, but without success. */
369 if (longhaul_flags & USE_ACPI_C3) {
370 printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
371 longhaul_flags &= ~USE_ACPI_C3;
372 if (revid_errata) {
373 printk(KERN_INFO PFX "Disabling \"Ignore "
374 "Revision ID\" option.\n");
375 revid_errata = 0;
376 }
377 msleep(200);
378 goto retry_loop;
379 }
380 /* This shouldn't happen. Longhaul ver. 2 was reported not
381 * working on processors without voltage scaling, but with
382 * RevID = 1. RevID errata will make things right. Just
383 * to be 100% sure. */
384 if (longhaul_version == TYPE_LONGHAUL_V2) {
385 printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
386 longhaul_version = TYPE_LONGHAUL_V1;
387 msleep(200);
388 goto retry_loop;
389 }
390 }
391 /* Report true CPU frequency */
347 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 392 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
348 393
349 if (!bm_timeout) 394 if (!bm_timeout)
@@ -956,11 +1001,20 @@ static void __exit longhaul_exit(void)
956 kfree(longhaul_table); 1001 kfree(longhaul_table);
957} 1002}
958 1003
1004/* Even if BIOS is exporting ACPI C3 state, and it is used
1005 * with success when CPU is idle, this state doesn't
1006 * trigger frequency transition in some cases. */
959module_param (disable_acpi_c3, int, 0644); 1007module_param (disable_acpi_c3, int, 0644);
960MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); 1008MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
961 1009/* Change CPU voltage with frequency. Very usefull to save
1010 * power, but most VIA C3 processors aren't supporting it. */
962module_param (scale_voltage, int, 0644); 1011module_param (scale_voltage, int, 0644);
963MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); 1012MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
1013/* Force revision key to 0 for processors which doesn't
1014 * support voltage scaling, but are introducing itself as
1015 * such. */
1016module_param(revid_errata, int, 0644);
1017MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
964 1018
965MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); 1019MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
966MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); 1020MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
index 4ae038aa6c24..874db0cd1d2a 100644
--- a/arch/i386/xen/mmu.c
+++ b/arch/i386/xen/mmu.c
@@ -559,6 +559,9 @@ void xen_exit_mmap(struct mm_struct *mm)
559 put_cpu(); 559 put_cpu();
560 560
561 spin_lock(&mm->page_table_lock); 561 spin_lock(&mm->page_table_lock);
562 xen_pgd_unpin(mm->pgd); 562
563 /* pgd may not be pinned in the error exit path of execve */
564 if (PagePinned(virt_to_page(mm->pgd)))
565 xen_pgd_unpin(mm->pgd);
563 spin_unlock(&mm->page_table_lock); 566 spin_unlock(&mm->page_table_lock);
564} 567}
diff --git a/arch/mips/au1000/common/pci.c b/arch/mips/au1000/common/pci.c
index 6c25e6c09f78..9be99a68932a 100644
--- a/arch/mips/au1000/common/pci.c
+++ b/arch/mips/au1000/common/pci.c
@@ -74,6 +74,7 @@ static int __init au1x_pci_setup(void)
74 printk(KERN_ERR "Unable to ioremap pci space\n"); 74 printk(KERN_ERR "Unable to ioremap pci space\n");
75 return 1; 75 return 1;
76 } 76 }
77 au1x_controller.io_map_base = virt_io_addr;
77 78
78#ifdef CONFIG_DMA_NONCOHERENT 79#ifdef CONFIG_DMA_NONCOHERENT
79 { 80 {
diff --git a/arch/mips/au1000/mtx-1/board_setup.c b/arch/mips/au1000/mtx-1/board_setup.c
index 7bc5af8917da..2c460c116570 100644
--- a/arch/mips/au1000/mtx-1/board_setup.c
+++ b/arch/mips/au1000/mtx-1/board_setup.c
@@ -54,11 +54,11 @@ void board_reset (void)
54 54
55void __init board_setup(void) 55void __init board_setup(void)
56{ 56{
57#ifdef CONFIG_USB_OHCI 57#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
58 // enable USB power switch 58 // enable USB power switch
59 au_writel( au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR ); 59 au_writel( au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR );
60 au_writel( 0x100000, GPIO2_OUTPUT ); 60 au_writel( 0x100000, GPIO2_OUTPUT );
61#endif // defined (CONFIG_USB_OHCI) 61#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
62 62
63#ifdef CONFIG_PCI 63#ifdef CONFIG_PCI
64#if defined(__MIPSEB__) 64#if defined(__MIPSEB__)
diff --git a/arch/mips/au1000/pb1000/board_setup.c b/arch/mips/au1000/pb1000/board_setup.c
index 824cfafaff92..0aed89114bfc 100644
--- a/arch/mips/au1000/pb1000/board_setup.c
+++ b/arch/mips/au1000/pb1000/board_setup.c
@@ -54,7 +54,7 @@ void __init board_setup(void)
54 au_writel(0, SYS_PINSTATERD); 54 au_writel(0, SYS_PINSTATERD);
55 udelay(100); 55 udelay(100);
56 56
57#ifdef CONFIG_USB_OHCI 57#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
58 /* zero and disable FREQ2 */ 58 /* zero and disable FREQ2 */
59 sys_freqctrl = au_readl(SYS_FREQCTRL0); 59 sys_freqctrl = au_readl(SYS_FREQCTRL0);
60 sys_freqctrl &= ~0xFFF00000; 60 sys_freqctrl &= ~0xFFF00000;
@@ -102,7 +102,7 @@ void __init board_setup(void)
102 /* 102 /*
103 * Route 48MHz FREQ2 into USB Host and/or Device 103 * Route 48MHz FREQ2 into USB Host and/or Device
104 */ 104 */
105#ifdef CONFIG_USB_OHCI 105#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
106 sys_clksrc |= ((4<<12) | (0<<11) | (0<<10)); 106 sys_clksrc |= ((4<<12) | (0<<11) | (0<<10));
107#endif 107#endif
108 au_writel(sys_clksrc, SYS_CLKSRC); 108 au_writel(sys_clksrc, SYS_CLKSRC);
@@ -116,7 +116,7 @@ void __init board_setup(void)
116 au_writel(pin_func, SYS_PINFUNC); 116 au_writel(pin_func, SYS_PINFUNC);
117 au_writel(0x2800, SYS_TRIOUTCLR); 117 au_writel(0x2800, SYS_TRIOUTCLR);
118 au_writel(0x0030, SYS_OUTPUTCLR); 118 au_writel(0x0030, SYS_OUTPUTCLR);
119#endif // defined (CONFIG_USB_OHCI) 119#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
120 120
121 // make gpio 15 an input (for interrupt line) 121 // make gpio 15 an input (for interrupt line)
122 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x100); 122 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x100);
diff --git a/arch/mips/au1000/pb1100/board_setup.c b/arch/mips/au1000/pb1100/board_setup.c
index 6bc1f8e1b608..259ca05860c3 100644
--- a/arch/mips/au1000/pb1100/board_setup.c
+++ b/arch/mips/au1000/pb1100/board_setup.c
@@ -54,7 +54,7 @@ void __init board_setup(void)
54 au_writel(0, SYS_PININPUTEN); 54 au_writel(0, SYS_PININPUTEN);
55 udelay(100); 55 udelay(100);
56 56
57#ifdef CONFIG_USB_OHCI 57#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
58 { 58 {
59 u32 pin_func, sys_freqctrl, sys_clksrc; 59 u32 pin_func, sys_freqctrl, sys_clksrc;
60 60
@@ -98,7 +98,7 @@ void __init board_setup(void)
98 pin_func |= 0x8000; 98 pin_func |= 0x8000;
99 au_writel(pin_func, SYS_PINFUNC); 99 au_writel(pin_func, SYS_PINFUNC);
100 } 100 }
101#endif // defined (CONFIG_USB_OHCI) 101#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
102 102
103 /* Enable sys bus clock divider when IDLE state or no bus activity. */ 103 /* Enable sys bus clock divider when IDLE state or no bus activity. */
104 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); 104 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
diff --git a/arch/mips/au1000/pb1500/board_setup.c b/arch/mips/au1000/pb1500/board_setup.c
index c9b655616fb3..a2d850db8902 100644
--- a/arch/mips/au1000/pb1500/board_setup.c
+++ b/arch/mips/au1000/pb1500/board_setup.c
@@ -56,7 +56,7 @@ void __init board_setup(void)
56 au_writel(0, SYS_PINSTATERD); 56 au_writel(0, SYS_PINSTATERD);
57 udelay(100); 57 udelay(100);
58 58
59#ifdef CONFIG_USB_OHCI 59#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
60 60
61 /* GPIO201 is input for PCMCIA card detect */ 61 /* GPIO201 is input for PCMCIA card detect */
62 /* GPIO203 is input for PCMCIA interrupt request */ 62 /* GPIO203 is input for PCMCIA interrupt request */
@@ -85,7 +85,7 @@ void __init board_setup(void)
85 /* 85 /*
86 * Route 48MHz FREQ2 into USB Host and/or Device 86 * Route 48MHz FREQ2 into USB Host and/or Device
87 */ 87 */
88#ifdef CONFIG_USB_OHCI 88#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
89 sys_clksrc |= ((4<<12) | (0<<11) | (0<<10)); 89 sys_clksrc |= ((4<<12) | (0<<11) | (0<<10));
90#endif 90#endif
91 au_writel(sys_clksrc, SYS_CLKSRC); 91 au_writel(sys_clksrc, SYS_CLKSRC);
@@ -95,7 +95,7 @@ void __init board_setup(void)
95 // 2nd USB port is USB host 95 // 2nd USB port is USB host
96 pin_func |= 0x8000; 96 pin_func |= 0x8000;
97 au_writel(pin_func, SYS_PINFUNC); 97 au_writel(pin_func, SYS_PINFUNC);
98#endif // defined (CONFIG_USB_OHCI) 98#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
99 99
100 100
101 101
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b6c30800c667..3a2d255361bc 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -177,10 +177,7 @@ handle_real_irq:
177 outb(cached_master_mask, PIC_MASTER_IMR); 177 outb(cached_master_mask, PIC_MASTER_IMR);
178 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ 178 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
179 } 179 }
180#ifdef CONFIG_MIPS_MT_SMTC 180 smtc_im_ack_irq(irq);
181 if (irq_hwmask[irq] & ST0_IM)
182 set_c0_status(irq_hwmask[irq] & ST0_IM);
183#endif /* CONFIG_MIPS_MT_SMTC */
184 spin_unlock_irqrestore(&i8259A_lock, flags); 181 spin_unlock_irqrestore(&i8259A_lock, flags);
185 return; 182 return;
186 183
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 410868b5ea5f..1ecdd50bfc60 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
52 mask_msc_irq(irq); 52 mask_msc_irq(irq);
53 if (!cpu_has_veic) 53 if (!cpu_has_veic)
54 MSCIC_WRITE(MSC01_IC_EOI, 0); 54 MSCIC_WRITE(MSC01_IC_EOI, 0);
55#ifdef CONFIG_MIPS_MT_SMTC
56 /* This actually needs to be a call into platform code */ 55 /* This actually needs to be a call into platform code */
57 if (irq_hwmask[irq] & ST0_IM) 56 smtc_im_ack_irq(irq);
58 set_c0_status(irq_hwmask[irq] & ST0_IM);
59#endif /* CONFIG_MIPS_MT_SMTC */
60} 57}
61 58
62/* 59/*
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
73 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); 70 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
74 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); 71 MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
75 } 72 }
76#ifdef CONFIG_MIPS_MT_SMTC 73 smtc_im_ack_irq(irq);
77 if (irq_hwmask[irq] & ST0_IM)
78 set_c0_status(irq_hwmask[irq] & ST0_IM);
79#endif /* CONFIG_MIPS_MT_SMTC */
80} 74}
81 75
82/* 76/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index aeded6c17de5..a990aad2f049 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno);
74 */ 74 */
75void ack_bad_irq(unsigned int irq) 75void ack_bad_irq(unsigned int irq)
76{ 76{
77 smtc_im_ack_irq(irq);
77 printk("unexpected IRQ # %d\n", irq); 78 printk("unexpected IRQ # %d\n", irq);
78} 79}
79 80
80atomic_t irq_err_count; 81atomic_t irq_err_count;
81 82
82#ifdef CONFIG_MIPS_MT_SMTC
83/*
84 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
85 * in do_IRQ. These are passed in setup_irq_smtc() and stored
86 * in this table.
87 */
88unsigned long irq_hwmask[NR_IRQS];
89#endif /* CONFIG_MIPS_MT_SMTC */
90
91/* 83/*
92 * Generic, controller-independent functions: 84 * Generic, controller-independent functions:
93 */ 85 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b3ed731a24c6..dd68afce7da5 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -525,5 +525,5 @@ sys_call_table:
525 PTR compat_sys_signalfd 525 PTR compat_sys_signalfd
526 PTR compat_sys_timerfd 526 PTR compat_sys_timerfd
527 PTR sys_eventfd 527 PTR sys_eventfd
528 PTR sys_fallocate /* 4320 */ 528 PTR sys32_fallocate /* 4320 */
529 .size sys_call_table,.-sys_call_table 529 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 43826c16101d..f09404377ef1 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -25,8 +25,11 @@
25#include <asm/smtc_proc.h> 25#include <asm/smtc_proc.h>
26 26
27/* 27/*
28 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. 28 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
29 * in do_IRQ. These are passed in setup_irq_smtc() and stored
30 * in this table.
29 */ 31 */
32unsigned long irq_hwmask[NR_IRQS];
30 33
31#define LOCK_MT_PRA() \ 34#define LOCK_MT_PRA() \
32 local_irq_save(flags); \ 35 local_irq_save(flags); \
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 60bbaecde187..087ab997487d 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
45 __dbe_table : { *(__dbe_table) } 45 __dbe_table : { *(__dbe_table) }
46 __stop___dbe_table = .; 46 __stop___dbe_table = .;
47 47
48 NOTES
49
48 RODATA 50 RODATA
49 51
50 /* writeable */ 52 /* writeable */
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
index dc795be62807..e47e9e9486bf 100644
--- a/arch/mips/mm/pg-r4k.c
+++ b/arch/mips/mm/pg-r4k.c
@@ -209,7 +209,7 @@ static inline void build_cdex_p(void)
209 } 209 }
210 210
211 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 211 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
212 build_insn_word(0x3c01a000); /* lui $at, 0xa000 */ 212 build_insn_word(0x8c200000); /* lw $zero, ($at) */
213 213
214 mi.c_format.opcode = cache_op; 214 mi.c_format.opcode = cache_op;
215 mi.c_format.rs = 4; /* $a0 */ 215 mi.c_format.rs = 4; /* $a0 */
diff --git a/arch/mips/pci/ops-mace.c b/arch/mips/pci/ops-mace.c
index 8008e31c5e81..fe5451449304 100644
--- a/arch/mips/pci/ops-mace.c
+++ b/arch/mips/pci/ops-mace.c
@@ -29,22 +29,20 @@
29 * 4 N/C 29 * 4 N/C
30 */ 30 */
31 31
32#define chkslot(_bus,_devfn) \ 32static inline int mkaddr(struct pci_bus *bus, unsigned int devfn,
33do { \ 33 unsigned int reg)
34 if ((_bus)->number > 0 || PCI_SLOT (_devfn) < 1 \ 34{
35 || PCI_SLOT (_devfn) > 3) \ 35 return ((bus->number & 0xff) << 16) |
36 return PCIBIOS_DEVICE_NOT_FOUND; \ 36 ((devfn & 0xff) << 8) |
37} while (0) 37 (reg & 0xfc);
38}
38 39
39#define mkaddr(_devfn, _reg) \
40((((_devfn) & 0xffUL) << 8) | ((_reg) & 0xfcUL))
41 40
42static int 41static int
43mace_pci_read_config(struct pci_bus *bus, unsigned int devfn, 42mace_pci_read_config(struct pci_bus *bus, unsigned int devfn,
44 int reg, int size, u32 *val) 43 int reg, int size, u32 *val)
45{ 44{
46 chkslot(bus, devfn); 45 mace->pci.config_addr = mkaddr(bus, devfn, reg);
47 mace->pci.config_addr = mkaddr(devfn, reg);
48 switch (size) { 46 switch (size) {
49 case 1: 47 case 1:
50 *val = mace->pci.config_data.b[(reg & 3) ^ 3]; 48 *val = mace->pci.config_data.b[(reg & 3) ^ 3];
@@ -66,8 +64,7 @@ static int
66mace_pci_write_config(struct pci_bus *bus, unsigned int devfn, 64mace_pci_write_config(struct pci_bus *bus, unsigned int devfn,
67 int reg, int size, u32 val) 65 int reg, int size, u32 val)
68{ 66{
69 chkslot(bus, devfn); 67 mace->pci.config_addr = mkaddr(bus, devfn, reg);
70 mace->pci.config_addr = mkaddr(devfn, reg);
71 switch (size) { 68 switch (size) {
72 case 1: 69 case 1:
73 mace->pci.config_data.b[(reg & 3) ^ 3] = val; 70 mace->pci.config_data.b[(reg & 3) ^ 3] = val;
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
index ba3697ee7ff6..7309e48d163d 100644
--- a/arch/mips/sgi-ip32/ip32-platform.c
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -41,8 +41,8 @@ static struct platform_device uart8250_device = {
41 41
42static int __init uart8250_init(void) 42static int __init uart8250_init(void)
43{ 43{
44 uart8250_data[0].iobase = (unsigned long) &mace->isa.serial1; 44 uart8250_data[0].membase = (void __iomem *) &mace->isa.serial1;
45 uart8250_data[1].iobase = (unsigned long) &mace->isa.serial1; 45 uart8250_data[1].membase = (void __iomem *) &mace->isa.serial1;
46 46
47 return platform_device_register(&uart8250_device); 47 return platform_device_register(&uart8250_device);
48} 48}
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 502f47c01797..44c065a6b5e7 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -99,6 +99,7 @@
99 #size-cells = <0>; 99 #size-cells = <0>;
100 interrupt-parent = < &ipic >; 100 interrupt-parent = < &ipic >;
101 interrupts = <26 8>; 101 interrupts = <26 8>;
102 dr_mode = "peripheral";
102 phy_type = "ulpi"; 103 phy_type = "ulpi";
103 }; 104 };
104 105
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e477c9d0498b..8a1b001d0b11 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -605,6 +605,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
605 regs->ccr = 0; 605 regs->ccr = 0;
606 regs->gpr[1] = sp; 606 regs->gpr[1] = sp;
607 607
608 /*
609 * We have just cleared all the nonvolatile GPRs, so make
610 * FULL_REGS(regs) return true. This is necessary to allow
611 * ptrace to examine the thread immediately after exec.
612 */
613 regs->trap &= ~1UL;
614
608#ifdef CONFIG_PPC32 615#ifdef CONFIG_PPC32
609 regs->mq = 0; 616 regs->mq = 0;
610 regs->nip = start; 617 regs->nip = start;
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index e7fdf013cd39..eafe7605cdac 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -76,14 +76,14 @@ int mpc834x_usb_cfg(void)
76 if (port0_is_dr) 76 if (port0_is_dr)
77 printk(KERN_WARNING 77 printk(KERN_WARNING
78 "834x USB port0 can't be used by both DR and MPH!\n"); 78 "834x USB port0 can't be used by both DR and MPH!\n");
79 sicrl |= MPC834X_SICRL_USB0; 79 sicrl &= ~MPC834X_SICRL_USB0;
80 } 80 }
81 prop = of_get_property(np, "port1", NULL); 81 prop = of_get_property(np, "port1", NULL);
82 if (prop) { 82 if (prop) {
83 if (port1_is_dr) 83 if (port1_is_dr)
84 printk(KERN_WARNING 84 printk(KERN_WARNING
85 "834x USB port1 can't be used by both DR and MPH!\n"); 85 "834x USB port1 can't be used by both DR and MPH!\n");
86 sicrl |= MPC834X_SICRL_USB1; 86 sicrl &= ~MPC834X_SICRL_USB1;
87 } 87 }
88 of_node_put(np); 88 of_node_put(np);
89 } 89 }
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 4100ddc52f02..7de4e919687b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2177,8 +2177,8 @@ struct tree_descr spufs_dir_contents[] = {
2177 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2177 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2178 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2178 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2179 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2179 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2180 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2180 { "signal1", &spufs_signal1_fops, 0666, },
2181 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2181 { "signal2", &spufs_signal2_fops, 0666, },
2182 { "signal1_type", &spufs_signal1_type, 0666, }, 2182 { "signal1_type", &spufs_signal1_type, 0666, },
2183 { "signal2_type", &spufs_signal2_type, 0666, }, 2183 { "signal2_type", &spufs_signal2_type, 0666, },
2184 { "cntl", &spufs_cntl_fops, 0666, }, 2184 { "cntl", &spufs_cntl_fops, 0666, },
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 5bd90a7eb763..f0b5ff17d860 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -419,7 +419,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
419 * For the moment only implement delivery to all cpus or one cpu. 419 * For the moment only implement delivery to all cpus or one cpu.
420 * Get current irq_server for the given irq 420 * Get current irq_server for the given irq
421 */ 421 */
422 irq_server = get_irq_server(irq, 1); 422 irq_server = get_irq_server(virq, 1);
423 if (irq_server == -1) { 423 if (irq_server == -1) {
424 char cpulist[128]; 424 char cpulist[128];
425 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); 425 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c
index 4f67b89ba1d0..dd5417aec1b4 100644
--- a/arch/powerpc/sysdev/commproc.c
+++ b/arch/powerpc/sysdev/commproc.c
@@ -395,4 +395,4 @@ uint cpm_dpram_phys(u8* addr)
395{ 395{
396 return (dpram_pbase + (uint)(addr - dpram_vbase)); 396 return (dpram_pbase + (uint)(addr - dpram_vbase));
397} 397}
398EXPORT_SYMBOL(cpm_dpram_addr); 398EXPORT_SYMBOL(cpm_dpram_phys);
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
index 7088428e1fe2..9da880be4dc0 100644
--- a/arch/ppc/8xx_io/commproc.c
+++ b/arch/ppc/8xx_io/commproc.c
@@ -459,7 +459,7 @@ EXPORT_SYMBOL(cpm_dpdump);
459 459
460void *cpm_dpram_addr(unsigned long offset) 460void *cpm_dpram_addr(unsigned long offset)
461{ 461{
462 return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; 462 return (void *)(dpram_vbase + offset);
463} 463}
464EXPORT_SYMBOL(cpm_dpram_addr); 464EXPORT_SYMBOL(cpm_dpram_addr);
465 465
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
index e2d02fd13f35..d850785b2080 100644
--- a/arch/sparc/kernel/ebus.c
+++ b/arch/sparc/kernel/ebus.c
@@ -156,6 +156,8 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
156 dev->prom_node = dp; 156 dev->prom_node = dp;
157 157
158 regs = of_get_property(dp, "reg", &len); 158 regs = of_get_property(dp, "reg", &len);
159 if (!regs)
160 len = 0;
159 if (len % sizeof(struct linux_prom_registers)) { 161 if (len % sizeof(struct linux_prom_registers)) {
160 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", 162 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
161 dev->prom_node->name, len, 163 dev->prom_node->name, len,
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index f205fc7cbcd0..d208cc7804f2 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -177,7 +177,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
177 get_user(c,p++); 177 get_user(c,p++);
178 } while (c); 178 } while (c);
179 } 179 }
180 put_user(NULL,argv); 180 put_user(0,argv);
181 current->mm->arg_end = current->mm->env_start = (unsigned long) p; 181 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
182 while (envc-->0) { 182 while (envc-->0) {
183 char c; 183 char c;
@@ -186,7 +186,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
186 get_user(c,p++); 186 get_user(c,p++);
187 } while (c); 187 } while (c);
188 } 188 }
189 put_user(NULL,envp); 189 put_user(0,envp);
190 current->mm->env_end = (unsigned long) p; 190 current->mm->env_end = (unsigned long) p;
191 return sp; 191 return sp;
192} 192}
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index bc9ae36f7a43..04ab81cb4f48 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -375,7 +375,10 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
375 dev->num_addrs = 0; 375 dev->num_addrs = 0;
376 dev->num_irqs = 0; 376 dev->num_irqs = 0;
377 } else { 377 } else {
378 (void) of_get_property(dp, "reg", &len); 378 const int *regs = of_get_property(dp, "reg", &len);
379
380 if (!regs)
381 len = 0;
379 dev->num_addrs = len / sizeof(struct linux_prom_registers); 382 dev->num_addrs = len / sizeof(struct linux_prom_registers);
380 383
381 for (i = 0; i < dev->num_addrs; i++) 384 for (i = 0; i < dev->num_addrs; i++)
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index 2f61c4b12596..c76bfbb7da08 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -264,7 +264,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
264 unsigned int func = PCI_FUNC(devfn); 264 unsigned int func = PCI_FUNC(devfn);
265 unsigned long ret; 265 unsigned long ret;
266 266
267 if (bus_dev == pbm->pci_bus && devfn == 0x00) 267 if (!bus && devfn == 0x00)
268 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, 268 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
269 size, value); 269 size, value);
270 if (config_out_of_range(pbm, bus, devfn, where)) { 270 if (config_out_of_range(pbm, bus, devfn, where)) {
@@ -300,7 +300,7 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
300 unsigned int func = PCI_FUNC(devfn); 300 unsigned int func = PCI_FUNC(devfn);
301 unsigned long ret; 301 unsigned long ret;
302 302
303 if (bus_dev == pbm->pci_bus && devfn == 0x00) 303 if (!bus && devfn == 0x00)
304 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, 304 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
305 size, value); 305 size, value);
306 if (config_out_of_range(pbm, bus, devfn, where)) { 306 if (config_out_of_range(pbm, bus, devfn, where)) {
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 0614dff63d7c..a246e962e5a7 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -1046,7 +1046,8 @@ static void __init irq_trans_init(struct device_node *dp)
1046 if (!strcmp(dp->name, "fhc") && 1046 if (!strcmp(dp->name, "fhc") &&
1047 !strcmp(dp->parent->name, "central")) 1047 !strcmp(dp->parent->name, "central"))
1048 return central_irq_trans_init(dp); 1048 return central_irq_trans_init(dp);
1049 if (!strcmp(dp->name, "virtual-devices")) 1049 if (!strcmp(dp->name, "virtual-devices") ||
1050 !strcmp(dp->name, "niu"))
1050 return sun4v_vdev_irq_trans_init(dp); 1051 return sun4v_vdev_irq_trans_init(dp);
1051} 1052}
1052 1053
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b84c49e3697c..c73b7a48b036 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -353,6 +353,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
353 int timeout, ret; 353 int timeout, ret;
354 354
355 p = fork_idle(cpu); 355 p = fork_idle(cpu);
356 if (IS_ERR(p))
357 return PTR_ERR(p);
356 callin_flag = 0; 358 callin_flag = 0;
357 cpu_new_thread = task_thread_info(p); 359 cpu_new_thread = task_thread_info(p);
358 360
diff --git a/arch/sparc64/kernel/vio.c b/arch/sparc64/kernel/vio.c
index 1550ac5673da..0c1ee619d814 100644
--- a/arch/sparc64/kernel/vio.c
+++ b/arch/sparc64/kernel/vio.c
@@ -292,7 +292,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
292 } 292 }
293 vdev->dp = dp; 293 vdev->dp = dp;
294 294
295 printk(KERN_ERR "VIO: Adding device %s\n", vdev->dev.bus_id); 295 printk(KERN_INFO "VIO: Adding device %s\n", vdev->dev.bus_id);
296 296
297 err = device_register(&vdev->dev); 297 err = device_register(&vdev->dev);
298 if (err) { 298 if (err) {
@@ -342,8 +342,33 @@ static struct mdesc_notifier_client vio_device_notifier = {
342 .node_name = "virtual-device-port", 342 .node_name = "virtual-device-port",
343}; 343};
344 344
345/* We are only interested in domain service ports under the
346 * "domain-services" node. On control nodes there is another port
347 * under "openboot" that we should not mess with as aparently that is
348 * reserved exclusively for OBP use.
349 */
350static void vio_add_ds(struct mdesc_handle *hp, u64 node)
351{
352 int found;
353 u64 a;
354
355 found = 0;
356 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
357 u64 target = mdesc_arc_target(hp, a);
358 const char *name = mdesc_node_name(hp, target);
359
360 if (!strcmp(name, "domain-services")) {
361 found = 1;
362 break;
363 }
364 }
365
366 if (found)
367 (void) vio_create_one(hp, node, &root_vdev->dev);
368}
369
345static struct mdesc_notifier_client vio_ds_notifier = { 370static struct mdesc_notifier_client vio_ds_notifier = {
346 .add = vio_add, 371 .add = vio_add_ds,
347 .remove = vio_remove, 372 .remove = vio_remove,
348 .node_name = "domain-services-port", 373 .node_name = "domain-services-port",
349}; 374};
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S
index 2d93456f76dd..e7f433f71b42 100644
--- a/arch/sparc64/lib/NGcopy_from_user.S
+++ b/arch/sparc64/lib/NGcopy_from_user.S
@@ -1,6 +1,6 @@
1/* NGcopy_from_user.S: Niagara optimized copy from userspace. 1/* NGcopy_from_user.S: Niagara optimized copy from userspace.
2 * 2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_LD(x) \ 6#define EX_LD(x) \
@@ -8,8 +8,8 @@
8 .section .fixup; \ 8 .section .fixup; \
9 .align 4; \ 9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\ 1099: wr %g0, ASI_AIUS, %asi;\
11 retl; \ 11 ret; \
12 mov 1, %o0; \ 12 restore %g0, 1, %o0; \
13 .section __ex_table,"a";\ 13 .section __ex_table,"a";\
14 .align 4; \ 14 .align 4; \
15 .word 98b, 99b; \ 15 .word 98b, 99b; \
@@ -24,7 +24,7 @@
24#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest 24#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest
25#define LOAD_TWIN(addr_reg,dest0,dest1) \ 25#define LOAD_TWIN(addr_reg,dest0,dest1) \
26 ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 26 ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
27#define EX_RETVAL(x) 0 27#define EX_RETVAL(x) %g0
28 28
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30#define PREAMBLE \ 30#define PREAMBLE \
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S
index 34112d5054ef..6ea01c5532a0 100644
--- a/arch/sparc64/lib/NGcopy_to_user.S
+++ b/arch/sparc64/lib/NGcopy_to_user.S
@@ -1,6 +1,6 @@
1/* NGcopy_to_user.S: Niagara optimized copy to userspace. 1/* NGcopy_to_user.S: Niagara optimized copy to userspace.
2 * 2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#define EX_ST(x) \ 6#define EX_ST(x) \
@@ -8,8 +8,8 @@
8 .section .fixup; \ 8 .section .fixup; \
9 .align 4; \ 9 .align 4; \
1099: wr %g0, ASI_AIUS, %asi;\ 1099: wr %g0, ASI_AIUS, %asi;\
11 retl; \ 11 ret; \
12 mov 1, %o0; \ 12 restore %g0, 1, %o0; \
13 .section __ex_table,"a";\ 13 .section __ex_table,"a";\
14 .align 4; \ 14 .align 4; \
15 .word 98b, 99b; \ 15 .word 98b, 99b; \
@@ -23,7 +23,7 @@
23#define FUNC_NAME NGcopy_to_user 23#define FUNC_NAME NGcopy_to_user
24#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 24#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
25#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS 25#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS
26#define EX_RETVAL(x) 0 26#define EX_RETVAL(x) %g0
27 27
28#ifdef __KERNEL__ 28#ifdef __KERNEL__
29 /* Writing to %asi is _expensive_ so we hardcode it. 29 /* Writing to %asi is _expensive_ so we hardcode it.
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S
index 66063a9a66b8..96a14caf6966 100644
--- a/arch/sparc64/lib/NGmemcpy.S
+++ b/arch/sparc64/lib/NGmemcpy.S
@@ -1,6 +1,6 @@
1/* NGmemcpy.S: Niagara optimized memcpy. 1/* NGmemcpy.S: Niagara optimized memcpy.
2 * 2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
@@ -16,6 +16,12 @@
16 wr %g0, ASI_PNF, %asi 16 wr %g0, ASI_PNF, %asi
17#endif 17#endif
18 18
19#ifdef __sparc_v9__
20#define SAVE_AMOUNT 128
21#else
22#define SAVE_AMOUNT 64
23#endif
24
19#ifndef STORE_ASI 25#ifndef STORE_ASI
20#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P 26#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
21#endif 27#endif
@@ -50,7 +56,11 @@
50#endif 56#endif
51 57
52#ifndef STORE_INIT 58#ifndef STORE_INIT
59#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
53#define STORE_INIT(src,addr) stxa src, [addr] %asi 60#define STORE_INIT(src,addr) stxa src, [addr] %asi
61#else
62#define STORE_INIT(src,addr) stx src, [addr + 0x00]
63#endif
54#endif 64#endif
55 65
56#ifndef FUNC_NAME 66#ifndef FUNC_NAME
@@ -73,18 +83,19 @@
73 83
74 .globl FUNC_NAME 84 .globl FUNC_NAME
75 .type FUNC_NAME,#function 85 .type FUNC_NAME,#function
76FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 86FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
77 srlx %o2, 31, %g2 87 PREAMBLE
88 save %sp, -SAVE_AMOUNT, %sp
89 srlx %i2, 31, %g2
78 cmp %g2, 0 90 cmp %g2, 0
79 tne %xcc, 5 91 tne %xcc, 5
80 PREAMBLE 92 mov %i0, %o0
81 mov %o0, GLOBAL_SPARE 93 cmp %i2, 0
82 cmp %o2, 0
83 be,pn %XCC, 85f 94 be,pn %XCC, 85f
84 or %o0, %o1, %o3 95 or %o0, %i1, %i3
85 cmp %o2, 16 96 cmp %i2, 16
86 blu,a,pn %XCC, 80f 97 blu,a,pn %XCC, 80f
87 or %o3, %o2, %o3 98 or %i3, %i2, %i3
88 99
89 /* 2 blocks (128 bytes) is the minimum we can do the block 100 /* 2 blocks (128 bytes) is the minimum we can do the block
90 * copy with. We need to ensure that we'll iterate at least 101 * copy with. We need to ensure that we'll iterate at least
@@ -93,31 +104,31 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
93 * to (64 - 1) bytes from the length before we perform the 104 * to (64 - 1) bytes from the length before we perform the
94 * block copy loop. 105 * block copy loop.
95 */ 106 */
96 cmp %o2, (2 * 64) 107 cmp %i2, (2 * 64)
97 blu,pt %XCC, 70f 108 blu,pt %XCC, 70f
98 andcc %o3, 0x7, %g0 109 andcc %i3, 0x7, %g0
99 110
100 /* %o0: dst 111 /* %o0: dst
101 * %o1: src 112 * %i1: src
102 * %o2: len (known to be >= 128) 113 * %i2: len (known to be >= 128)
103 * 114 *
104 * The block copy loops will use %o4/%o5,%g2/%g3 as 115 * The block copy loops will use %i4/%i5,%g2/%g3 as
105 * temporaries while copying the data. 116 * temporaries while copying the data.
106 */ 117 */
107 118
108 LOAD(prefetch, %o1, #one_read) 119 LOAD(prefetch, %i1, #one_read)
109 wr %g0, STORE_ASI, %asi 120 wr %g0, STORE_ASI, %asi
110 121
111 /* Align destination on 64-byte boundary. */ 122 /* Align destination on 64-byte boundary. */
112 andcc %o0, (64 - 1), %o4 123 andcc %o0, (64 - 1), %i4
113 be,pt %XCC, 2f 124 be,pt %XCC, 2f
114 sub %o4, 64, %o4 125 sub %i4, 64, %i4
115 sub %g0, %o4, %o4 ! bytes to align dst 126 sub %g0, %i4, %i4 ! bytes to align dst
116 sub %o2, %o4, %o2 127 sub %i2, %i4, %i2
1171: subcc %o4, 1, %o4 1281: subcc %i4, 1, %i4
118 EX_LD(LOAD(ldub, %o1, %g1)) 129 EX_LD(LOAD(ldub, %i1, %g1))
119 EX_ST(STORE(stb, %g1, %o0)) 130 EX_ST(STORE(stb, %g1, %o0))
120 add %o1, 1, %o1 131 add %i1, 1, %i1
121 bne,pt %XCC, 1b 132 bne,pt %XCC, 1b
122 add %o0, 1, %o0 133 add %o0, 1, %o0
123 134
@@ -136,111 +147,155 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
136 * aligned store data at a time, this is easy to ensure. 147 * aligned store data at a time, this is easy to ensure.
137 */ 148 */
1382: 1492:
139 andcc %o1, (16 - 1), %o4 150 andcc %i1, (16 - 1), %i4
140 andn %o2, (64 - 1), %g1 ! block copy loop iterator 151 andn %i2, (64 - 1), %g1 ! block copy loop iterator
141 sub %o2, %g1, %o2 ! final sub-block copy bytes
142 be,pt %XCC, 50f 152 be,pt %XCC, 50f
143 cmp %o4, 8 153 sub %i2, %g1, %i2 ! final sub-block copy bytes
144 be,a,pt %XCC, 10f 154
145 sub %o1, 0x8, %o1 155 cmp %i4, 8
156 be,pt %XCC, 10f
157 sub %i1, %i4, %i1
146 158
147 /* Neither 8-byte nor 16-byte aligned, shift and mask. */ 159 /* Neither 8-byte nor 16-byte aligned, shift and mask. */
148 mov %g1, %o4 160 and %i4, 0x7, GLOBAL_SPARE
149 and %o1, 0x7, %g1 161 sll GLOBAL_SPARE, 3, GLOBAL_SPARE
150 sll %g1, 3, %g1 162 mov 64, %i5
151 mov 64, %o3 163 EX_LD(LOAD_TWIN(%i1, %g2, %g3))
152 andn %o1, 0x7, %o1 164 sub %i5, GLOBAL_SPARE, %i5
153 EX_LD(LOAD(ldx, %o1, %g2)) 165 mov 16, %o4
154 sub %o3, %g1, %o3 166 mov 32, %o5
155 sllx %g2, %g1, %g2 167 mov 48, %o7
168 mov 64, %i3
169
170 bg,pn %XCC, 9f
171 nop
156 172
157#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ 173#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \
158 EX_LD(LOAD(ldx, SRC, TMP1)); \ 174 sllx WORD1, POST_SHIFT, WORD1; \
159 srlx TMP1, PRE_SHIFT, TMP2; \ 175 srlx WORD2, PRE_SHIFT, TMP; \
160 or TMP2, PRE_VAL, TMP2; \ 176 sllx WORD2, POST_SHIFT, WORD2; \
161 EX_ST(STORE_INIT(TMP2, DST)); \ 177 or WORD1, TMP, WORD1; \
162 sllx TMP1, POST_SHIFT, PRE_VAL; 178 srlx WORD3, PRE_SHIFT, TMP; \
163 179 or WORD2, TMP, WORD2;
1641: add %o1, 0x8, %o1 180
165 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) 1818: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
166 add %o1, 0x8, %o1 182 MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
167 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) 183 LOAD(prefetch, %i1 + %i3, #one_read)
168 add %o1, 0x8, %o1 184
169 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) 185 EX_ST(STORE_INIT(%g2, %o0 + 0x00))
170 add %o1, 0x8, %o1 186 EX_ST(STORE_INIT(%g3, %o0 + 0x08))
171 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) 187
172 add %o1, 32, %o1 188 EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
173 LOAD(prefetch, %o1, #one_read) 189 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
174 sub %o1, 32 - 8, %o1 190
175 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) 191 EX_ST(STORE_INIT(%o2, %o0 + 0x10))
176 add %o1, 8, %o1 192 EX_ST(STORE_INIT(%o3, %o0 + 0x18))
177 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) 193
178 add %o1, 8, %o1 194 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
179 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) 195 MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
180 add %o1, 8, %o1 196
181 SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) 197 EX_ST(STORE_INIT(%g2, %o0 + 0x20))
182 subcc %o4, 64, %o4 198 EX_ST(STORE_INIT(%g3, %o0 + 0x28))
183 bne,pt %XCC, 1b 199
200 EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
201 add %i1, 64, %i1
202 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
203
204 EX_ST(STORE_INIT(%o2, %o0 + 0x30))
205 EX_ST(STORE_INIT(%o3, %o0 + 0x38))
206
207 subcc %g1, 64, %g1
208 bne,pt %XCC, 8b
184 add %o0, 64, %o0 209 add %o0, 64, %o0
185 210
186#undef SWIVEL_ONE_DWORD 211 ba,pt %XCC, 60f
212 add %i1, %i4, %i1
213
2149: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
215 MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
216 LOAD(prefetch, %i1 + %i3, #one_read)
217
218 EX_ST(STORE_INIT(%g3, %o0 + 0x00))
219 EX_ST(STORE_INIT(%o2, %o0 + 0x08))
220
221 EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
222 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
223
224 EX_ST(STORE_INIT(%o3, %o0 + 0x10))
225 EX_ST(STORE_INIT(%g2, %o0 + 0x18))
226
227 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
228 MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
229
230 EX_ST(STORE_INIT(%g3, %o0 + 0x20))
231 EX_ST(STORE_INIT(%o2, %o0 + 0x28))
232
233 EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
234 add %i1, 64, %i1
235 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
236
237 EX_ST(STORE_INIT(%o3, %o0 + 0x30))
238 EX_ST(STORE_INIT(%g2, %o0 + 0x38))
239
240 subcc %g1, 64, %g1
241 bne,pt %XCC, 9b
242 add %o0, 64, %o0
187 243
188 srl %g1, 3, %g1
189 ba,pt %XCC, 60f 244 ba,pt %XCC, 60f
190 add %o1, %g1, %o1 245 add %i1, %i4, %i1
191 246
19210: /* Destination is 64-byte aligned, source was only 8-byte 24710: /* Destination is 64-byte aligned, source was only 8-byte
193 * aligned but it has been subtracted by 8 and we perform 248 * aligned but it has been subtracted by 8 and we perform
194 * one twin load ahead, then add 8 back into source when 249 * one twin load ahead, then add 8 back into source when
195 * we finish the loop. 250 * we finish the loop.
196 */ 251 */
197 EX_LD(LOAD_TWIN(%o1, %o4, %o5)) 252 EX_LD(LOAD_TWIN(%i1, %o4, %o5))
1981: add %o1, 16, %o1 253 mov 16, %o7
199 EX_LD(LOAD_TWIN(%o1, %g2, %g3)) 254 mov 32, %g2
200 add %o1, 16 + 32, %o1 255 mov 48, %g3
201 LOAD(prefetch, %o1, #one_read) 256 mov 64, %o1
202 sub %o1, 32, %o1 2571: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
258 LOAD(prefetch, %i1 + %o1, #one_read)
203 EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line 259 EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
204 EX_ST(STORE_INIT(%g2, %o0 + 0x08)) 260 EX_ST(STORE_INIT(%o2, %o0 + 0x08))
205 EX_LD(LOAD_TWIN(%o1, %o4, %o5)) 261 EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
206 add %o1, 16, %o1 262 EX_ST(STORE_INIT(%o3, %o0 + 0x10))
207 EX_ST(STORE_INIT(%g3, %o0 + 0x10))
208 EX_ST(STORE_INIT(%o4, %o0 + 0x18)) 263 EX_ST(STORE_INIT(%o4, %o0 + 0x18))
209 EX_LD(LOAD_TWIN(%o1, %g2, %g3)) 264 EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
210 add %o1, 16, %o1
211 EX_ST(STORE_INIT(%o5, %o0 + 0x20)) 265 EX_ST(STORE_INIT(%o5, %o0 + 0x20))
212 EX_ST(STORE_INIT(%g2, %o0 + 0x28)) 266 EX_ST(STORE_INIT(%o2, %o0 + 0x28))
213 EX_LD(LOAD_TWIN(%o1, %o4, %o5)) 267 EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
214 EX_ST(STORE_INIT(%g3, %o0 + 0x30)) 268 add %i1, 64, %i1
269 EX_ST(STORE_INIT(%o3, %o0 + 0x30))
215 EX_ST(STORE_INIT(%o4, %o0 + 0x38)) 270 EX_ST(STORE_INIT(%o4, %o0 + 0x38))
216 subcc %g1, 64, %g1 271 subcc %g1, 64, %g1
217 bne,pt %XCC, 1b 272 bne,pt %XCC, 1b
218 add %o0, 64, %o0 273 add %o0, 64, %o0
219 274
220 ba,pt %XCC, 60f 275 ba,pt %XCC, 60f
221 add %o1, 0x8, %o1 276 add %i1, 0x8, %i1
222 277
22350: /* Destination is 64-byte aligned, and source is 16-byte 27850: /* Destination is 64-byte aligned, and source is 16-byte
224 * aligned. 279 * aligned.
225 */ 280 */
2261: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) 281 mov 16, %o7
227 add %o1, 16, %o1 282 mov 32, %g2
228 EX_LD(LOAD_TWIN(%o1, %g2, %g3)) 283 mov 48, %g3
229 add %o1, 16 + 32, %o1 284 mov 64, %o1
230 LOAD(prefetch, %o1, #one_read) 2851: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
231 sub %o1, 32, %o1 286 EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
287 LOAD(prefetch, %i1 + %o1, #one_read)
232 EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line 288 EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
233 EX_ST(STORE_INIT(%o5, %o0 + 0x08)) 289 EX_ST(STORE_INIT(%o5, %o0 + 0x08))
234 EX_LD(LOAD_TWIN(%o1, %o4, %o5)) 290 EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
235 add %o1, 16, %o1 291 EX_ST(STORE_INIT(%o2, %o0 + 0x10))
236 EX_ST(STORE_INIT(%g2, %o0 + 0x10)) 292 EX_ST(STORE_INIT(%o3, %o0 + 0x18))
237 EX_ST(STORE_INIT(%g3, %o0 + 0x18)) 293 EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
238 EX_LD(LOAD_TWIN(%o1, %g2, %g3)) 294 add %i1, 64, %i1
239 add %o1, 16, %o1
240 EX_ST(STORE_INIT(%o4, %o0 + 0x20)) 295 EX_ST(STORE_INIT(%o4, %o0 + 0x20))
241 EX_ST(STORE_INIT(%o5, %o0 + 0x28)) 296 EX_ST(STORE_INIT(%o5, %o0 + 0x28))
242 EX_ST(STORE_INIT(%g2, %o0 + 0x30)) 297 EX_ST(STORE_INIT(%o2, %o0 + 0x30))
243 EX_ST(STORE_INIT(%g3, %o0 + 0x38)) 298 EX_ST(STORE_INIT(%o3, %o0 + 0x38))
244 subcc %g1, 64, %g1 299 subcc %g1, 64, %g1
245 bne,pt %XCC, 1b 300 bne,pt %XCC, 1b
246 add %o0, 64, %o0 301 add %o0, 64, %o0
@@ -249,47 +304,47 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
24960: 30460:
250 membar #Sync 305 membar #Sync
251 306
252 /* %o2 contains any final bytes still needed to be copied 307 /* %i2 contains any final bytes still needed to be copied
253 * over. If anything is left, we copy it one byte at a time. 308 * over. If anything is left, we copy it one byte at a time.
254 */ 309 */
255 RESTORE_ASI(%o3) 310 RESTORE_ASI(%i3)
256 brz,pt %o2, 85f 311 brz,pt %i2, 85f
257 sub %o0, %o1, %o3 312 sub %o0, %i1, %i3
258 ba,a,pt %XCC, 90f 313 ba,a,pt %XCC, 90f
259 314
260 .align 64 315 .align 64
26170: /* 16 < len <= 64 */ 31670: /* 16 < len <= 64 */
262 bne,pn %XCC, 75f 317 bne,pn %XCC, 75f
263 sub %o0, %o1, %o3 318 sub %o0, %i1, %i3
264 319
26572: 32072:
266 andn %o2, 0xf, %o4 321 andn %i2, 0xf, %i4
267 and %o2, 0xf, %o2 322 and %i2, 0xf, %i2
2681: subcc %o4, 0x10, %o4 3231: subcc %i4, 0x10, %i4
269 EX_LD(LOAD(ldx, %o1, %o5)) 324 EX_LD(LOAD(ldx, %i1, %o4))
270 add %o1, 0x08, %o1 325 add %i1, 0x08, %i1
271 EX_LD(LOAD(ldx, %o1, %g1)) 326 EX_LD(LOAD(ldx, %i1, %g1))
272 sub %o1, 0x08, %o1 327 sub %i1, 0x08, %i1
273 EX_ST(STORE(stx, %o5, %o1 + %o3)) 328 EX_ST(STORE(stx, %o4, %i1 + %i3))
274 add %o1, 0x8, %o1 329 add %i1, 0x8, %i1
275 EX_ST(STORE(stx, %g1, %o1 + %o3)) 330 EX_ST(STORE(stx, %g1, %i1 + %i3))
276 bgu,pt %XCC, 1b 331 bgu,pt %XCC, 1b
277 add %o1, 0x8, %o1 332 add %i1, 0x8, %i1
27873: andcc %o2, 0x8, %g0 33373: andcc %i2, 0x8, %g0
279 be,pt %XCC, 1f 334 be,pt %XCC, 1f
280 nop 335 nop
281 sub %o2, 0x8, %o2 336 sub %i2, 0x8, %i2
282 EX_LD(LOAD(ldx, %o1, %o5)) 337 EX_LD(LOAD(ldx, %i1, %o4))
283 EX_ST(STORE(stx, %o5, %o1 + %o3)) 338 EX_ST(STORE(stx, %o4, %i1 + %i3))
284 add %o1, 0x8, %o1 339 add %i1, 0x8, %i1
2851: andcc %o2, 0x4, %g0 3401: andcc %i2, 0x4, %g0
286 be,pt %XCC, 1f 341 be,pt %XCC, 1f
287 nop 342 nop
288 sub %o2, 0x4, %o2 343 sub %i2, 0x4, %i2
289 EX_LD(LOAD(lduw, %o1, %o5)) 344 EX_LD(LOAD(lduw, %i1, %i5))
290 EX_ST(STORE(stw, %o5, %o1 + %o3)) 345 EX_ST(STORE(stw, %i5, %i1 + %i3))
291 add %o1, 0x4, %o1 346 add %i1, 0x4, %i1
2921: cmp %o2, 0 3471: cmp %i2, 0
293 be,pt %XCC, 85f 348 be,pt %XCC, 85f
294 nop 349 nop
295 ba,pt %xcc, 90f 350 ba,pt %xcc, 90f
@@ -300,71 +355,71 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
300 sub %g1, 0x8, %g1 355 sub %g1, 0x8, %g1
301 be,pn %icc, 2f 356 be,pn %icc, 2f
302 sub %g0, %g1, %g1 357 sub %g0, %g1, %g1
303 sub %o2, %g1, %o2 358 sub %i2, %g1, %i2
304 359
3051: subcc %g1, 1, %g1 3601: subcc %g1, 1, %g1
306 EX_LD(LOAD(ldub, %o1, %o5)) 361 EX_LD(LOAD(ldub, %i1, %i5))
307 EX_ST(STORE(stb, %o5, %o1 + %o3)) 362 EX_ST(STORE(stb, %i5, %i1 + %i3))
308 bgu,pt %icc, 1b 363 bgu,pt %icc, 1b
309 add %o1, 1, %o1 364 add %i1, 1, %i1
310 365
3112: add %o1, %o3, %o0 3662: add %i1, %i3, %o0
312 andcc %o1, 0x7, %g1 367 andcc %i1, 0x7, %g1
313 bne,pt %icc, 8f 368 bne,pt %icc, 8f
314 sll %g1, 3, %g1 369 sll %g1, 3, %g1
315 370
316 cmp %o2, 16 371 cmp %i2, 16
317 bgeu,pt %icc, 72b 372 bgeu,pt %icc, 72b
318 nop 373 nop
319 ba,a,pt %xcc, 73b 374 ba,a,pt %xcc, 73b
320 375
3218: mov 64, %o3 3768: mov 64, %i3
322 andn %o1, 0x7, %o1 377 andn %i1, 0x7, %i1
323 EX_LD(LOAD(ldx, %o1, %g2)) 378 EX_LD(LOAD(ldx, %i1, %g2))
324 sub %o3, %g1, %o3 379 sub %i3, %g1, %i3
325 andn %o2, 0x7, %o4 380 andn %i2, 0x7, %i4
326 sllx %g2, %g1, %g2 381 sllx %g2, %g1, %g2
3271: add %o1, 0x8, %o1 3821: add %i1, 0x8, %i1
328 EX_LD(LOAD(ldx, %o1, %g3)) 383 EX_LD(LOAD(ldx, %i1, %g3))
329 subcc %o4, 0x8, %o4 384 subcc %i4, 0x8, %i4
330 srlx %g3, %o3, %o5 385 srlx %g3, %i3, %i5
331 or %o5, %g2, %o5 386 or %i5, %g2, %i5
332 EX_ST(STORE(stx, %o5, %o0)) 387 EX_ST(STORE(stx, %i5, %o0))
333 add %o0, 0x8, %o0 388 add %o0, 0x8, %o0
334 bgu,pt %icc, 1b 389 bgu,pt %icc, 1b
335 sllx %g3, %g1, %g2 390 sllx %g3, %g1, %g2
336 391
337 srl %g1, 3, %g1 392 srl %g1, 3, %g1
338 andcc %o2, 0x7, %o2 393 andcc %i2, 0x7, %i2
339 be,pn %icc, 85f 394 be,pn %icc, 85f
340 add %o1, %g1, %o1 395 add %i1, %g1, %i1
341 ba,pt %xcc, 90f 396 ba,pt %xcc, 90f
342 sub %o0, %o1, %o3 397 sub %o0, %i1, %i3
343 398
344 .align 64 399 .align 64
34580: /* 0 < len <= 16 */ 40080: /* 0 < len <= 16 */
346 andcc %o3, 0x3, %g0 401 andcc %i3, 0x3, %g0
347 bne,pn %XCC, 90f 402 bne,pn %XCC, 90f
348 sub %o0, %o1, %o3 403 sub %o0, %i1, %i3
349 404
3501: 4051:
351 subcc %o2, 4, %o2 406 subcc %i2, 4, %i2
352 EX_LD(LOAD(lduw, %o1, %g1)) 407 EX_LD(LOAD(lduw, %i1, %g1))
353 EX_ST(STORE(stw, %g1, %o1 + %o3)) 408 EX_ST(STORE(stw, %g1, %i1 + %i3))
354 bgu,pt %XCC, 1b 409 bgu,pt %XCC, 1b
355 add %o1, 4, %o1 410 add %i1, 4, %i1
356 411
35785: retl 41285: ret
358 mov EX_RETVAL(GLOBAL_SPARE), %o0 413 restore EX_RETVAL(%i0), %g0, %o0
359 414
360 .align 32 415 .align 32
36190: 41690:
362 subcc %o2, 1, %o2 417 subcc %i2, 1, %i2
363 EX_LD(LOAD(ldub, %o1, %g1)) 418 EX_LD(LOAD(ldub, %i1, %g1))
364 EX_ST(STORE(stb, %g1, %o1 + %o3)) 419 EX_ST(STORE(stb, %g1, %i1 + %i3))
365 bgu,pt %XCC, 90b 420 bgu,pt %XCC, 90b
366 add %o1, 1, %o1 421 add %i1, 1, %i1
367 retl 422 ret
368 mov EX_RETVAL(GLOBAL_SPARE), %o0 423 restore EX_RETVAL(%i0), %g0, %o0
369 424
370 .size FUNC_NAME, .-FUNC_NAME 425 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/x86_64/vdso/voffset.h b/arch/x86_64/vdso/voffset.h
index 5304204911f2..4af67c79085f 100644
--- a/arch/x86_64/vdso/voffset.h
+++ b/arch/x86_64/vdso/voffset.h
@@ -1 +1 @@
#define VDSO_TEXT_OFFSET 0x500 #define VDSO_TEXT_OFFSET 0x600
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 035007145e78..bc18cbb8ea79 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -80,6 +80,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
80{ 80{
81 enum dma_status status; 81 enum dma_status status;
82 struct dma_async_tx_descriptor *iter; 82 struct dma_async_tx_descriptor *iter;
83 struct dma_async_tx_descriptor *parent;
83 84
84 if (!tx) 85 if (!tx)
85 return DMA_SUCCESS; 86 return DMA_SUCCESS;
@@ -87,8 +88,15 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
87 /* poll through the dependency chain, return when tx is complete */ 88 /* poll through the dependency chain, return when tx is complete */
88 do { 89 do {
89 iter = tx; 90 iter = tx;
90 while (iter->cookie == -EBUSY) 91
91 iter = iter->parent; 92 /* find the root of the unsubmitted dependency chain */
93 while (iter->cookie == -EBUSY) {
94 parent = iter->parent;
95 if (parent && parent->cookie == -EBUSY)
96 iter = iter->parent;
97 else
98 break;
99 }
92 100
93 status = dma_sync_wait(iter->chan, iter->cookie); 101 status = dma_sync_wait(iter->chan, iter->cookie);
94 } while (status == DMA_IN_PROGRESS || (iter != tx)); 102 } while (status == DMA_IN_PROGRESS || (iter != tx));
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4875f0149eb4..9685b75898ed 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -117,6 +117,7 @@ config ACPI_BUTTON
117config ACPI_VIDEO 117config ACPI_VIDEO
118 tristate "Video" 118 tristate "Video"
119 depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL 119 depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
120 depends on INPUT
120 help 121 help
121 This driver implement the ACPI Extensions For Display Adapters 122 This driver implement the ACPI Extensions For Display Adapters
122 for integrated graphics devices on motherboard, as specified in 123 for integrated graphics devices on motherboard, as specified in
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index cf69c0040a39..8181afbd1d4d 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -234,15 +234,11 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
234 "While executing method _SST")); 234 "While executing method _SST"));
235 } 235 }
236 236
237 /* 237 /* Disable/Clear all GPEs */
238 * 1) Disable/Clear all GPEs 238
239 */
240 status = acpi_hw_disable_all_gpes(); 239 status = acpi_hw_disable_all_gpes();
241 if (ACPI_FAILURE(status)) {
242 return_ACPI_STATUS(status);
243 }
244 240
245 return_ACPI_STATUS(AE_OK); 241 return_ACPI_STATUS(status);
246} 242}
247 243
248ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) 244ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
index ba9bd403d443..f1fb888c2d29 100644
--- a/drivers/acpi/sleep/Makefile
+++ b/drivers/acpi/sleep/Makefile
@@ -1,5 +1,5 @@
1obj-y := wakeup.o 1obj-y := wakeup.o
2obj-$(CONFIG_ACPI_SLEEP) += main.o 2obj-y += main.o
3obj-$(CONFIG_ACPI_SLEEP) += proc.o 3obj-$(CONFIG_ACPI_SLEEP) += proc.o
4 4
5EXTRA_CFLAGS += $(ACPI_CFLAGS) 5EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 85633c585aab..caf8721ae6fb 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -24,7 +24,30 @@
24 24
25u8 sleep_states[ACPI_S_STATE_COUNT]; 25u8 sleep_states[ACPI_S_STATE_COUNT];
26 26
27#ifdef CONFIG_PM_SLEEP
27static u32 acpi_target_sleep_state = ACPI_STATE_S0; 28static u32 acpi_target_sleep_state = ACPI_STATE_S0;
29#endif
30
31int acpi_sleep_prepare(u32 acpi_state)
32{
33#ifdef CONFIG_ACPI_SLEEP
34 /* do we have a wakeup address for S2 and S3? */
35 if (acpi_state == ACPI_STATE_S3) {
36 if (!acpi_wakeup_address) {
37 return -EFAULT;
38 }
39 acpi_set_firmware_waking_vector((acpi_physical_address)
40 virt_to_phys((void *)
41 acpi_wakeup_address));
42
43 }
44 ACPI_FLUSH_CPU_CACHE();
45 acpi_enable_wakeup_device_prep(acpi_state);
46#endif
47 acpi_gpe_sleep_prepare(acpi_state);
48 acpi_enter_sleep_state_prep(acpi_state);
49 return 0;
50}
28 51
29#ifdef CONFIG_SUSPEND 52#ifdef CONFIG_SUSPEND
30static struct pm_ops acpi_pm_ops; 53static struct pm_ops acpi_pm_ops;
@@ -60,27 +83,6 @@ static int acpi_pm_set_target(suspend_state_t pm_state)
60 return error; 83 return error;
61} 84}
62 85
63int acpi_sleep_prepare(u32 acpi_state)
64{
65#ifdef CONFIG_ACPI_SLEEP
66 /* do we have a wakeup address for S2 and S3? */
67 if (acpi_state == ACPI_STATE_S3) {
68 if (!acpi_wakeup_address) {
69 return -EFAULT;
70 }
71 acpi_set_firmware_waking_vector((acpi_physical_address)
72 virt_to_phys((void *)
73 acpi_wakeup_address));
74
75 }
76 ACPI_FLUSH_CPU_CACHE();
77 acpi_enable_wakeup_device_prep(acpi_state);
78#endif
79 acpi_gpe_sleep_prepare(acpi_state);
80 acpi_enter_sleep_state_prep(acpi_state);
81 return 0;
82}
83
84/** 86/**
85 * acpi_pm_prepare - Do preliminary suspend work. 87 * acpi_pm_prepare - Do preliminary suspend work.
86 * @pm_state: ignored 88 * @pm_state: ignored
@@ -254,6 +256,11 @@ static int acpi_hibernation_enter(void)
254 256
255static void acpi_hibernation_finish(void) 257static void acpi_hibernation_finish(void)
256{ 258{
259 /*
260 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
261 * enable it here.
262 */
263 acpi_enable();
257 acpi_leave_sleep_state(ACPI_STATE_S4); 264 acpi_leave_sleep_state(ACPI_STATE_S4);
258 acpi_disable_wakeup_device(ACPI_STATE_S4); 265 acpi_disable_wakeup_device(ACPI_STATE_S4);
259 266
@@ -299,6 +306,7 @@ int acpi_suspend(u32 acpi_state)
299 return -EINVAL; 306 return -EINVAL;
300} 307}
301 308
309#ifdef CONFIG_PM_SLEEP
302/** 310/**
303 * acpi_pm_device_sleep_state - return preferred power state of ACPI device 311 * acpi_pm_device_sleep_state - return preferred power state of ACPI device
304 * in the system sleep state given by %acpi_target_sleep_state 312 * in the system sleep state given by %acpi_target_sleep_state
@@ -373,6 +381,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
373 *d_min_p = d_min; 381 *d_min_p = d_min;
374 return d_max; 382 return d_max;
375} 383}
384#endif
376 385
377static void acpi_power_off_prepare(void) 386static void acpi_power_off_prepare(void)
378{ 387{
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index 8cc9492ffbf2..5f1d85f2ffe4 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -400,7 +400,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
400 u32 table_count; 400 u32 table_count;
401 struct acpi_table_header *table; 401 struct acpi_table_header *table;
402 acpi_physical_address address; 402 acpi_physical_address address;
403 acpi_physical_address rsdt_address; 403 acpi_physical_address uninitialized_var(rsdt_address);
404 u32 length; 404 u32 length;
405 u8 *table_entry; 405 u8 *table_entry;
406 acpi_status status; 406 acpi_status status;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3b8bf1812dc8..6996eb5b7506 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -921,6 +921,13 @@ static int piix_broken_suspend(void)
921{ 921{
922 static struct dmi_system_id sysids[] = { 922 static struct dmi_system_id sysids[] = {
923 { 923 {
924 .ident = "TECRA M3",
925 .matches = {
926 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
927 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
928 },
929 },
930 {
924 .ident = "TECRA M5", 931 .ident = "TECRA M5",
925 .matches = { 932 .matches = {
926 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 933 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 4ca7fd6118d5..5dea3584c6c2 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -189,6 +189,9 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
189 data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000); 189 data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
190 data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000); 190 data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
191 191
192 if (!data->cs0 || !data->cs1)
193 return -ENOMEM;
194
192 irq = platform_get_irq(pdev, 0); 195 irq = platform_get_irq(pdev, 0);
193 if (irq) 196 if (irq)
194 set_irq_type(irq, IRQT_RISING); 197 set_irq_type(irq, IRQT_RISING);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index ae206f35f747..b45506f1ef73 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -44,10 +44,10 @@ static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline)
44 return -ENOMEM; 44 return -ENOMEM;
45 printk("BAR5:"); 45 printk("BAR5:");
46 for(i = 0; i <= 0x0F; i++) 46 for(i = 0; i <= 0x0F; i++)
47 printk("%02X:%02X ", i, readb(barp + i)); 47 printk("%02X:%02X ", i, ioread8(barp + i));
48 printk("\n"); 48 printk("\n");
49 49
50 devices = readl(barp + 0x0C); 50 devices = ioread32(barp + 0x0C);
51 pci_iounmap(pdev, barp); 51 pci_iounmap(pdev, barp);
52 52
53 if ((pdev->device == 0x6145) && (ap->port_no == 0) && 53 if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 2bd7645f1a88..cce2834b2b60 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -375,8 +375,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
375 int drive_pci = sis_old_port_base(adev); 375 int drive_pci = sis_old_port_base(adev);
376 u16 timing; 376 u16 timing;
377 377
378 /* MWDMA 0-2 and UDMA 0-5 */
378 const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; 379 const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
379 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; 380 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
380 381
381 pci_read_config_word(pdev, drive_pci, &timing); 382 pci_read_config_word(pdev, drive_pci, &timing);
382 383
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 11bf6c7ac122..cb7dec97fee6 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -313,7 +313,10 @@ enum {
313#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 313#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
314 314
315enum { 315enum {
316 MV_DMA_BOUNDARY = 0xffffffffU, 316 /* DMA boundary 0xffff is required by the s/g splitting
317 * we need on /length/ in mv_fill-sg().
318 */
319 MV_DMA_BOUNDARY = 0xffffU,
317 320
318 /* mask of register bits containing lower 32 bits 321 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address 322 * of EDMA request queue DMA address
@@ -448,7 +451,7 @@ static struct scsi_host_template mv5_sht = {
448 .queuecommand = ata_scsi_queuecmd, 451 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE, 452 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID, 453 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT, 454 .sg_tablesize = MV_MAX_SG_CT / 2,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 455 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED, 456 .emulated = ATA_SHT_EMULATED,
454 .use_clustering = 1, 457 .use_clustering = 1,
@@ -466,7 +469,7 @@ static struct scsi_host_template mv6_sht = {
466 .queuecommand = ata_scsi_queuecmd, 469 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE, 470 .can_queue = ATA_DEF_QUEUE,
468 .this_id = ATA_SHT_THIS_ID, 471 .this_id = ATA_SHT_THIS_ID,
469 .sg_tablesize = MV_MAX_SG_CT, 472 .sg_tablesize = MV_MAX_SG_CT / 2,
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 473 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED, 474 .emulated = ATA_SHT_EMULATED,
472 .use_clustering = 1, 475 .use_clustering = 1,
@@ -1139,15 +1142,27 @@ static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1139 dma_addr_t addr = sg_dma_address(sg); 1142 dma_addr_t addr = sg_dma_address(sg);
1140 u32 sg_len = sg_dma_len(sg); 1143 u32 sg_len = sg_dma_len(sg);
1141 1144
1142 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1145 while (sg_len) {
1143 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1146 u32 offset = addr & 0xffff;
1144 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff); 1147 u32 len = sg_len;
1148
1149 if ((offset + sg_len > 0x10000))
1150 len = 0x10000 - offset;
1151
1152 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1153 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1154 mv_sg->flags_size = cpu_to_le32(len);
1145 1155
1146 if (ata_sg_is_last(sg, qc)) 1156 sg_len -= len;
1147 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1157 addr += len;
1158
1159 if (!sg_len && ata_sg_is_last(sg, qc))
1160 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1161
1162 mv_sg++;
1163 n_sg++;
1164 }
1148 1165
1149 mv_sg++;
1150 n_sg++;
1151 } 1166 }
1152 1167
1153 return n_sg; 1168 return n_sg;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index ef83e6b1e314..233e88693395 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -888,6 +888,16 @@ static inline void sil24_host_intr(struct ata_port *ap)
888 u32 slot_stat, qc_active; 888 u32 slot_stat, qc_active;
889 int rc; 889 int rc;
890 890
891 /* If PCIX_IRQ_WOC, there's an inherent race window between
892 * clearing IRQ pending status and reading PORT_SLOT_STAT
893 * which may cause spurious interrupts afterwards. This is
894 * unavoidable and much better than losing interrupts which
895 * happens if IRQ pending is cleared after reading
896 * PORT_SLOT_STAT.
897 */
898 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
899 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
900
891 slot_stat = readl(port + PORT_SLOT_STAT); 901 slot_stat = readl(port + PORT_SLOT_STAT);
892 902
893 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { 903 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
@@ -895,9 +905,6 @@ static inline void sil24_host_intr(struct ata_port *ap)
895 return; 905 return;
896 } 906 }
897 907
898 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
899 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
900
901 qc_active = slot_stat & ~HOST_SSTAT_ATTN; 908 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
902 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); 909 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
903 if (rc > 0) 910 if (rc > 0)
@@ -910,7 +917,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
910 return; 917 return;
911 } 918 }
912 919
913 if (ata_ratelimit()) 920 /* spurious interrupts are expected if PCIX_IRQ_WOC */
921 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
914 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 922 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
915 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", 923 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
916 slot_stat, ap->active_tag, ap->sactive); 924 slot_stat, ap->active_tag, ap->sactive);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6de33d7a29ba..ec86d6fc2360 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -284,6 +284,7 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
284 284
285 /* let the kset specific function add its keys */ 285 /* let the kset specific function add its keys */
286 pos = data; 286 pos = data;
287 memset(envp, 0, sizeof(envp));
287 retval = kset->uevent_ops->uevent(kset, &dev->kobj, 288 retval = kset->uevent_ops->uevent(kset, &dev->kobj,
288 envp, ARRAY_SIZE(envp), 289 envp, ARRAY_SIZE(envp),
289 pos, PAGE_SIZE); 290 pos, PAGE_SIZE);
@@ -585,9 +586,13 @@ void device_initialize(struct device *dev)
585static struct kobject * get_device_parent(struct device *dev, 586static struct kobject * get_device_parent(struct device *dev,
586 struct device *parent) 587 struct device *parent)
587{ 588{
588 /* Set the parent to the class, not the parent device */ 589 /*
589 /* this keeps sysfs from having a symlink to make old udevs happy */ 590 * Set the parent to the class, not the parent device
590 if (dev->class) 591 * for topmost devices in class hierarchy.
592 * This keeps sysfs from having a symlink to make old
593 * udevs happy
594 */
595 if (dev->class && (!parent || parent->class != dev->class))
591 return &dev->class->subsys.kobj; 596 return &dev->class->subsys.kobj;
592 else if (parent) 597 else if (parent)
593 return &parent->kobj; 598 return &parent->kobj;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 67ee3d4b2878..79245714f0a7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1032,6 +1032,10 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
1032 check_disk_change(ip->i_bdev); 1032 check_disk_change(ip->i_bdev);
1033 return 0; 1033 return 0;
1034err_release: 1034err_release:
1035 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
1036 cdi->ops->lock_door(cdi, 0);
1037 cdinfo(CD_OPEN, "door unlocked.\n");
1038 }
1035 cdi->ops->release(cdi); 1039 cdi->ops->release(cdi);
1036err: 1040err:
1037 cdi->use_count--; 1041 cdi->use_count--;
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d68ddbe70f73..c78ff26647ee 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -129,7 +129,7 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
129 129
130ifdef GENERATE_KEYMAP 130ifdef GENERATE_KEYMAP
131 131
132$(obj)/defkeymap.c $(obj)/%.c: $(src)/%.map 132$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
133 loadkeys --mktable $< > $@.tmp 133 loadkeys --mktable $< > $@.tmp
134 sed -e 's/^static *//' $@.tmp > $@ 134 sed -e 's/^static *//' $@.tmp > $@
135 rm $@.tmp 135 rm $@.tmp
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a5d0e95a227a..141ca176c397 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -506,11 +506,6 @@ static void intel_i830_init_gtt_entries(void)
506 break; 506 break;
507 } 507 }
508 } else { 508 } else {
509 /* G33's GTT stolen memory is separate from gfx data
510 * stolen memory.
511 */
512 if (IS_G33)
513 size = 0;
514 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 509 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
515 case I855_GMCH_GMS_STOLEN_1M: 510 case I855_GMCH_GMS_STOLEN_1M:
516 gtt_entries = MB(1) - KB(size); 511 gtt_entries = MB(1) - KB(size);
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 737088bd0780..28b98733beb8 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -210,6 +210,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
210#define I915REG_INT_MASK_R 0x020a8 210#define I915REG_INT_MASK_R 0x020a8
211#define I915REG_INT_ENABLE_R 0x020a0 211#define I915REG_INT_ENABLE_R 0x020a0
212 212
213#define I915REG_PIPEASTAT 0x70024
214#define I915REG_PIPEBSTAT 0x71024
215
216#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
217#define I915_VBLANK_CLEAR (1UL<<1)
218
213#define SRX_INDEX 0x3c4 219#define SRX_INDEX 0x3c4
214#define SRX_DATA 0x3c5 220#define SRX_DATA 0x3c5
215#define SR01 1 221#define SR01 1
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 4b4b2ce89863..bb8e9e9c8201 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -214,6 +214,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
214 struct drm_device *dev = (struct drm_device *) arg; 214 struct drm_device *dev = (struct drm_device *) arg;
215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
216 u16 temp; 216 u16 temp;
217 u32 pipea_stats, pipeb_stats;
218
219 pipea_stats = I915_READ(I915REG_PIPEASTAT);
220 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
217 221
218 temp = I915_READ16(I915REG_INT_IDENTITY_R); 222 temp = I915_READ16(I915REG_INT_IDENTITY_R);
219 223
@@ -225,6 +229,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
225 return IRQ_NONE; 229 return IRQ_NONE;
226 230
227 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 231 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
232 (void) I915_READ16(I915REG_INT_IDENTITY_R);
233 DRM_READMEMORYBARRIER();
228 234
229 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 235 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
230 236
@@ -252,6 +258,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
252 258
253 if (dev_priv->swaps_pending > 0) 259 if (dev_priv->swaps_pending > 0)
254 drm_locked_tasklet(dev, i915_vblank_tasklet); 260 drm_locked_tasklet(dev, i915_vblank_tasklet);
261 I915_WRITE(I915REG_PIPEASTAT,
262 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
263 I915_VBLANK_CLEAR);
264 I915_WRITE(I915REG_PIPEBSTAT,
265 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
266 I915_VBLANK_CLEAR);
255 } 267 }
256 268
257 return IRQ_HANDLED; 269 return IRQ_HANDLED;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 7ecffc9c738f..4c16778e3f84 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -62,6 +62,8 @@
62 62
63static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 63static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
64 64
65/* This clocksource driver currently only works on ia64 */
66#ifdef CONFIG_IA64
65static void __iomem *hpet_mctr; 67static void __iomem *hpet_mctr;
66 68
67static cycle_t read_hpet(void) 69static cycle_t read_hpet(void)
@@ -79,6 +81,7 @@ static struct clocksource clocksource_hpet = {
79 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 81 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
80}; 82};
81static struct clocksource *hpet_clocksource; 83static struct clocksource *hpet_clocksource;
84#endif
82 85
83/* A lock for concurrent access by app and isr hpet activity. */ 86/* A lock for concurrent access by app and isr hpet activity. */
84static DEFINE_SPINLOCK(hpet_lock); 87static DEFINE_SPINLOCK(hpet_lock);
@@ -943,14 +946,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
943 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 946 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
944 __FUNCTION__, hdp->hd_phys_address); 947 __FUNCTION__, hdp->hd_phys_address);
945 iounmap(hdp->hd_address); 948 iounmap(hdp->hd_address);
946 return -EBUSY; 949 return AE_ALREADY_EXISTS;
947 } 950 }
948 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { 951 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
949 struct acpi_resource_fixed_memory32 *fixmem32; 952 struct acpi_resource_fixed_memory32 *fixmem32;
950 953
951 fixmem32 = &res->data.fixed_memory32; 954 fixmem32 = &res->data.fixed_memory32;
952 if (!fixmem32) 955 if (!fixmem32)
953 return -EINVAL; 956 return AE_NO_MEMORY;
954 957
955 hdp->hd_phys_address = fixmem32->address; 958 hdp->hd_phys_address = fixmem32->address;
956 hdp->hd_address = ioremap(fixmem32->address, 959 hdp->hd_address = ioremap(fixmem32->address,
@@ -960,7 +963,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
960 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 963 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
961 __FUNCTION__, hdp->hd_phys_address); 964 __FUNCTION__, hdp->hd_phys_address);
962 iounmap(hdp->hd_address); 965 iounmap(hdp->hd_address);
963 return -EBUSY; 966 return AE_ALREADY_EXISTS;
964 } 967 }
965 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { 968 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
966 struct acpi_resource_extended_irq *irqp; 969 struct acpi_resource_extended_irq *irqp;
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 049a46cc9f87..04ac155d3a07 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
155 * mspec_close 155 * mspec_close
156 * 156 *
157 * Called when unmapping a device mapping. Frees all mspec pages 157 * Called when unmapping a device mapping. Frees all mspec pages
158 * belonging to the vma. 158 * belonging to all the vma's sharing this vma_data structure.
159 */ 159 */
160static void 160static void
161mspec_close(struct vm_area_struct *vma) 161mspec_close(struct vm_area_struct *vma)
162{ 162{
163 struct vma_data *vdata; 163 struct vma_data *vdata;
164 int index, last_index, result; 164 int index, last_index;
165 unsigned long my_page; 165 unsigned long my_page;
166 166
167 vdata = vma->vm_private_data; 167 vdata = vma->vm_private_data;
168 168
169 BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); 169 if (!atomic_dec_and_test(&vdata->refcnt))
170 return;
170 171
171 spin_lock(&vdata->lock); 172 last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
172 index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; 173 for (index = 0; index < last_index; index++) {
173 last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
174 for (; index < last_index; index++) {
175 if (vdata->maddr[index] == 0) 174 if (vdata->maddr[index] == 0)
176 continue; 175 continue;
177 /* 176 /*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
180 */ 179 */
181 my_page = vdata->maddr[index]; 180 my_page = vdata->maddr[index];
182 vdata->maddr[index] = 0; 181 vdata->maddr[index] = 0;
183 spin_unlock(&vdata->lock); 182 if (!mspec_zero_block(my_page, PAGE_SIZE))
184 result = mspec_zero_block(my_page, PAGE_SIZE);
185 if (!result)
186 uncached_free_page(my_page); 183 uncached_free_page(my_page);
187 else 184 else
188 printk(KERN_WARNING "mspec_close(): " 185 printk(KERN_WARNING "mspec_close(): "
189 "failed to zero page %i\n", 186 "failed to zero page %ld\n", my_page);
190 result);
191 spin_lock(&vdata->lock);
192 } 187 }
193 spin_unlock(&vdata->lock);
194
195 if (!atomic_dec_and_test(&vdata->refcnt))
196 return;
197 188
198 if (vdata->flags & VMD_VMALLOCED) 189 if (vdata->flags & VMD_VMALLOCED)
199 vfree(vdata); 190 vfree(vdata);
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
201 kfree(vdata); 192 kfree(vdata);
202} 193}
203 194
204
205/* 195/*
206 * mspec_nopfn 196 * mspec_nopfn
207 * 197 *
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 397c714cf2ba..af274e5a25ee 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1550,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1550 * As close as possible to RFC 793, which 1550 * As close as possible to RFC 793, which
1551 * suggests using a 250 kHz clock. 1551 * suggests using a 250 kHz clock.
1552 * Further reading shows this assumes 2 Mb/s networks. 1552 * Further reading shows this assumes 2 Mb/s networks.
1553 * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. 1553 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
1554 * That's funny, Linux has one built in! Use it! 1554 * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
1555 * (Networks are faster now - should this be increased?) 1555 * we also need to limit the resolution so that the u32 seq
1556 * overlaps less than one time per MSL (2 minutes).
1557 * Choosing a clock of 64 ns period is OK. (period of 274 s)
1556 */ 1558 */
1557 seq += ktime_get_real().tv64; 1559 seq += ktime_get_real().tv64 >> 6;
1558#if 0 1560#if 0
1559 printk("init_seq(%lx, %lx, %d, %d) = %d\n", 1561 printk("init_seq(%lx, %lx, %d, %d) = %d\n",
1560 saddr, daddr, sport, dport, seq); 1562 saddr, daddr, sport, dport, seq);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index c6f6f4209739..7a61a2a9aafe 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
770 /* 770 /*
771 * Switching-from response 771 * Switching-from response
772 */ 772 */
773 acquire_console_sem();
773 if (vc->vt_newvt >= 0) { 774 if (vc->vt_newvt >= 0) {
774 if (arg == 0) 775 if (arg == 0)
775 /* 776 /*
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
784 * complete the switch. 785 * complete the switch.
785 */ 786 */
786 int newvt; 787 int newvt;
787 acquire_console_sem();
788 newvt = vc->vt_newvt; 788 newvt = vc->vt_newvt;
789 vc->vt_newvt = -1; 789 vc->vt_newvt = -1;
790 i = vc_allocate(newvt); 790 i = vc_allocate(newvt);
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
798 * other console switches.. 798 * other console switches..
799 */ 799 */
800 complete_change_console(vc_cons[newvt].d); 800 complete_change_console(vc_cons[newvt].d);
801 release_console_sem();
802 } 801 }
803 } 802 }
804 803
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
810 /* 809 /*
811 * If it's just an ACK, ignore it 810 * If it's just an ACK, ignore it
812 */ 811 */
813 if (arg != VT_ACKACQ) 812 if (arg != VT_ACKACQ) {
813 release_console_sem();
814 return -EINVAL; 814 return -EINVAL;
815 }
815 } 816 }
817 release_console_sem();
816 818
817 return 0; 819 return 0;
818 820
@@ -1030,7 +1032,7 @@ static DECLARE_WAIT_QUEUE_HEAD(vt_activate_queue);
1030 1032
1031/* 1033/*
1032 * Sleeps until a vt is activated, or the task is interrupted. Returns 1034 * Sleeps until a vt is activated, or the task is interrupted. Returns
1033 * 0 if activation, -EINTR if interrupted. 1035 * 0 if activation, -EINTR if interrupted by a signal handler.
1034 */ 1036 */
1035int vt_waitactive(int vt) 1037int vt_waitactive(int vt)
1036{ 1038{
@@ -1055,7 +1057,7 @@ int vt_waitactive(int vt)
1055 break; 1057 break;
1056 } 1058 }
1057 release_console_sem(); 1059 release_console_sem();
1058 retval = -EINTR; 1060 retval = -ERESTARTNOHAND;
1059 if (signal_pending(current)) 1061 if (signal_pending(current))
1060 break; 1062 break;
1061 schedule(); 1063 schedule();
@@ -1208,15 +1210,18 @@ void change_console(struct vc_data *new_vc)
1208 /* 1210 /*
1209 * Send the signal as privileged - kill_pid() will 1211 * Send the signal as privileged - kill_pid() will
1210 * tell us if the process has gone or something else 1212 * tell us if the process has gone or something else
1211 * is awry 1213 * is awry.
1214 *
1215 * We need to set vt_newvt *before* sending the signal or we
1216 * have a race.
1212 */ 1217 */
1218 vc->vt_newvt = new_vc->vc_num;
1213 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { 1219 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
1214 /* 1220 /*
1215 * It worked. Mark the vt to switch to and 1221 * It worked. Mark the vt to switch to and
1216 * return. The process needs to send us a 1222 * return. The process needs to send us a
1217 * VT_RELDISP ioctl to complete the switch. 1223 * VT_RELDISP ioctl to complete the switch.
1218 */ 1224 */
1219 vc->vt_newvt = new_vc->vc_num;
1220 return; 1225 return;
1221 } 1226 }
1222 1227
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index d011a76f8e7a..fe9e768cfbc4 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -11,7 +11,8 @@ config FIREWIRE
11 This is the "Juju" FireWire stack, a new alternative implementation 11 This is the "Juju" FireWire stack, a new alternative implementation
12 designed for robustness and simplicity. You can build either this 12 designed for robustness and simplicity. You can build either this
13 stack, or the classic stack (the ieee1394 driver, ohci1394 etc.) 13 stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
14 or both. 14 or both. Please read http://wiki.linux1394.org/JujuMigration before
15 you enable the new stack.
15 16
16 To compile this driver as a module, say M here: the module will be 17 To compile this driver as a module, say M here: the module will be
17 called firewire-core. It functionally replaces ieee1394, raw1394, 18 called firewire-core. It functionally replaces ieee1394, raw1394,
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index f19eb6daeefd..2fb047b898aa 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1546,6 +1546,7 @@ static struct pci_device_id pmac_ide_pci_match[] = {
1546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1547 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA, 1547 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA,
1548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1549 {},
1549}; 1550};
1550 1551
1551static struct pci_driver pmac_ide_pci_driver = { 1552static struct pci_driver pmac_ide_pci_driver = {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ba0428d872aa..85c51bdc36f1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1211,12 +1211,42 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
1211 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1211 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1212} 1212}
1213 1213
1214static void set_data_seg(struct mlx4_wqe_data_seg *dseg, 1214static void set_mlx_icrc_seg(void *dseg)
1215 struct ib_sge *sg) 1215{
1216 u32 *t = dseg;
1217 struct mlx4_wqe_inline_seg *iseg = dseg;
1218
1219 t[1] = 0;
1220
1221 /*
1222 * Need a barrier here before writing the byte_count field to
1223 * make sure that all the data is visible before the
1224 * byte_count field is set. Otherwise, if the segment begins
1225 * a new cacheline, the HCA prefetcher could grab the 64-byte
1226 * chunk and get a valid (!= * 0xffffffff) byte count but
1227 * stale data, and end up sending the wrong data.
1228 */
1229 wmb();
1230
1231 iseg->byte_count = cpu_to_be32((1 << 31) | 4);
1232}
1233
1234static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1216{ 1235{
1217 dseg->byte_count = cpu_to_be32(sg->length);
1218 dseg->lkey = cpu_to_be32(sg->lkey); 1236 dseg->lkey = cpu_to_be32(sg->lkey);
1219 dseg->addr = cpu_to_be64(sg->addr); 1237 dseg->addr = cpu_to_be64(sg->addr);
1238
1239 /*
1240 * Need a barrier here before writing the byte_count field to
1241 * make sure that all the data is visible before the
1242 * byte_count field is set. Otherwise, if the segment begins
1243 * a new cacheline, the HCA prefetcher could grab the 64-byte
1244 * chunk and get a valid (!= * 0xffffffff) byte count but
1245 * stale data, and end up sending the wrong data.
1246 */
1247 wmb();
1248
1249 dseg->byte_count = cpu_to_be32(sg->length);
1220} 1250}
1221 1251
1222int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1252int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1225,6 +1255,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1225 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1255 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1226 void *wqe; 1256 void *wqe;
1227 struct mlx4_wqe_ctrl_seg *ctrl; 1257 struct mlx4_wqe_ctrl_seg *ctrl;
1258 struct mlx4_wqe_data_seg *dseg;
1228 unsigned long flags; 1259 unsigned long flags;
1229 int nreq; 1260 int nreq;
1230 int err = 0; 1261 int err = 0;
@@ -1324,22 +1355,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1324 break; 1355 break;
1325 } 1356 }
1326 1357
1327 for (i = 0; i < wr->num_sge; ++i) { 1358 /*
1328 set_data_seg(wqe, wr->sg_list + i); 1359 * Write data segments in reverse order, so as to
1360 * overwrite cacheline stamp last within each
1361 * cacheline. This avoids issues with WQE
1362 * prefetching.
1363 */
1329 1364
1330 wqe += sizeof (struct mlx4_wqe_data_seg); 1365 dseg = wqe;
1331 size += sizeof (struct mlx4_wqe_data_seg) / 16; 1366 dseg += wr->num_sge - 1;
1332 } 1367 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
1333 1368
1334 /* Add one more inline data segment for ICRC for MLX sends */ 1369 /* Add one more inline data segment for ICRC for MLX sends */
1335 if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) { 1370 if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||
1336 ((struct mlx4_wqe_inline_seg *) wqe)->byte_count = 1371 qp->ibqp.qp_type == IB_QPT_GSI)) {
1337 cpu_to_be32((1 << 31) | 4); 1372 set_mlx_icrc_seg(dseg + 1);
1338 ((u32 *) wqe)[1] = 0;
1339 wqe += sizeof (struct mlx4_wqe_data_seg);
1340 size += sizeof (struct mlx4_wqe_data_seg) / 16; 1373 size += sizeof (struct mlx4_wqe_data_seg) / 16;
1341 } 1374 }
1342 1375
1376 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
1377 set_data_seg(dseg, wr->sg_list + i);
1378
1343 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1379 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1344 MLX4_WQE_CTRL_FENCE : 0) | size; 1380 MLX4_WQE_CTRL_FENCE : 0) | size;
1345 1381
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index e2abe18e575d..7c662ee594a3 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -277,7 +277,7 @@ config JOYSTICK_XPAD_FF
277 277
278config JOYSTICK_XPAD_LEDS 278config JOYSTICK_XPAD_LEDS
279 bool "LED Support for Xbox360 controller 'BigX' LED" 279 bool "LED Support for Xbox360 controller 'BigX' LED"
280 depends on LEDS_CLASS && JOYSTICK_XPAD 280 depends on JOYSTICK_XPAD && (LEDS_CLASS=y || LEDS_CLASS=JOYSTICK_XPAD)
281 ---help--- 281 ---help---
282 This option enables support for the LED which surrounds the Big X on 282 This option enables support for the LED which surrounds the Big X on
283 XBox 360 controller. 283 XBox 360 controller.
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 2bea1b2c631c..a1804bfdbb8c 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb)
328{ 328{
329 int x, y, x_z, y_z, x_f, y_f; 329 int x, y, x_z, y_z, x_f, y_f;
330 int retval, i, j; 330 int retval, i, j;
331 int key;
331 struct atp *dev = urb->context; 332 struct atp *dev = urb->context;
332 333
333 switch (urb->status) { 334 switch (urb->status) {
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb)
468 ATP_XFACT, &x_z, &x_f); 469 ATP_XFACT, &x_z, &x_f);
469 y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS, 470 y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
470 ATP_YFACT, &y_z, &y_f); 471 ATP_YFACT, &y_z, &y_f);
472 key = dev->data[dev->datalen - 1] & 1;
471 473
472 if (x && y) { 474 if (x && y) {
473 if (dev->x_old != -1) { 475 if (dev->x_old != -1) {
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb)
505 the first touch unless reinitialised. Do so if it's been 507 the first touch unless reinitialised. Do so if it's been
506 idle for a while in order to avoid waking the kernel up 508 idle for a while in order to avoid waking the kernel up
507 several hundred times a second */ 509 several hundred times a second */
508 if (atp_is_geyser_3(dev)) { 510 if (!key && atp_is_geyser_3(dev)) {
509 dev->idlecount++; 511 dev->idlecount++;
510 if (dev->idlecount == 10) { 512 if (dev->idlecount == 10) {
511 dev->valid = 0; 513 dev->valid = 0;
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb)
514 } 516 }
515 } 517 }
516 518
517 input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1); 519 input_report_key(dev->input, BTN_LEFT, key);
518 input_sync(dev->input); 520 input_sync(dev->input);
519 521
520exit: 522exit:
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index ec5f4046412f..4910bca52640 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1135,7 +1135,7 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t * off)
1135 if (count > dev->drv[drvidx]->stavail) 1135 if (count > dev->drv[drvidx]->stavail)
1136 count = dev->drv[drvidx]->stavail; 1136 count = dev->drv[drvidx]->stavail;
1137 len = dev->drv[drvidx]->interface->readstat(buf, count, 1137 len = dev->drv[drvidx]->interface->readstat(buf, count,
1138 drvidx, isdn_minor2chan(minor)); 1138 drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL));
1139 if (len < 0) { 1139 if (len < 0) {
1140 retval = len; 1140 retval = len;
1141 goto out; 1141 goto out;
@@ -1207,7 +1207,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off
1207 */ 1207 */
1208 if (dev->drv[drvidx]->interface->writecmd) 1208 if (dev->drv[drvidx]->interface->writecmd)
1209 retval = dev->drv[drvidx]->interface-> 1209 retval = dev->drv[drvidx]->interface->
1210 writecmd(buf, count, drvidx, isdn_minor2chan(minor)); 1210 writecmd(buf, count, drvidx,
1211 isdn_minor2chan(minor - ISDN_MINOR_CTRL));
1211 else 1212 else
1212 retval = count; 1213 retval = count;
1213 goto out; 1214 goto out;
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
index f182c6a36209..1ddcd5cd20f6 100644
--- a/drivers/lguest/lguest_asm.S
+++ b/drivers/lguest/lguest_asm.S
@@ -22,8 +22,9 @@
22 jmp lguest_init 22 jmp lguest_init
23 23
24/*G:055 We create a macro which puts the assembler code between lgstart_ and 24/*G:055 We create a macro which puts the assembler code between lgstart_ and
25 * lgend_ markers. These templates end up in the .init.text section, so they 25 * lgend_ markers. These templates are put in the .text section: they can't be
26 * are discarded after boot. */ 26 * discarded after boot as we may need to patch modules, too. */
27.text
27#define LGUEST_PATCH(name, insns...) \ 28#define LGUEST_PATCH(name, insns...) \
28 lgstart_##name: insns; lgend_##name:; \ 29 lgstart_##name: insns; lgend_##name:; \
29 .globl lgstart_##name; .globl lgend_##name 30 .globl lgstart_##name; .globl lgend_##name
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
34LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) 35LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
35/*:*/ 36/*:*/
36 37
37.text
38/* These demark the EIP range where host should never deliver interrupts. */ 38/* These demark the EIP range where host should never deliver interrupts. */
39.global lguest_noirq_start 39.global lguest_noirq_start
40.global lguest_noirq_end 40.global lguest_noirq_end
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4d63773ee73a..f96dea975fa5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
514 struct stripe_head *sh = stripe_head_ref; 514 struct stripe_head *sh = stripe_head_ref;
515 struct bio *return_bi = NULL; 515 struct bio *return_bi = NULL;
516 raid5_conf_t *conf = sh->raid_conf; 516 raid5_conf_t *conf = sh->raid_conf;
517 int i, more_to_read = 0; 517 int i;
518 518
519 pr_debug("%s: stripe %llu\n", __FUNCTION__, 519 pr_debug("%s: stripe %llu\n", __FUNCTION__,
520 (unsigned long long)sh->sector); 520 (unsigned long long)sh->sector);
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
522 /* clear completed biofills */ 522 /* clear completed biofills */
523 for (i = sh->disks; i--; ) { 523 for (i = sh->disks; i--; ) {
524 struct r5dev *dev = &sh->dev[i]; 524 struct r5dev *dev = &sh->dev[i];
525 /* check if this stripe has new incoming reads */
526 if (dev->toread)
527 more_to_read++;
528 525
529 /* acknowledge completion of a biofill operation */ 526 /* acknowledge completion of a biofill operation */
530 /* and check if we need to reply to a read request 527 /* and check if we need to reply to a read request,
531 */ 528 * new R5_Wantfill requests are held off until
532 if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { 529 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
530 */
531 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
533 struct bio *rbi, *rbi2; 532 struct bio *rbi, *rbi2;
534 clear_bit(R5_Wantfill, &dev->flags);
535 533
536 /* The access to dev->read is outside of the 534 /* The access to dev->read is outside of the
537 * spin_lock_irq(&conf->device_lock), but is protected 535 * spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
558 556
559 return_io(return_bi); 557 return_io(return_bi);
560 558
561 if (more_to_read) 559 set_bit(STRIPE_HANDLE, &sh->state);
562 set_bit(STRIPE_HANDLE, &sh->state);
563 release_stripe(sh); 560 release_stripe(sh);
564} 561}
565 562
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 0285c4a830eb..66ea3cbc369c 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -754,9 +754,11 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
754 ivtv_yuv_close(itv); 754 ivtv_yuv_close(itv);
755 } 755 }
756 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) 756 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
757 itv->output_mode = OUT_NONE; 757 itv->output_mode = OUT_NONE;
758 else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
759 itv->output_mode = OUT_NONE;
758 else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) 760 else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
759 itv->output_mode = OUT_NONE; 761 itv->output_mode = OUT_NONE;
760 762
761 itv->speed = 0; 763 itv->speed = 0;
762 clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); 764 clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index e3371f972240..0cb006f2943d 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1387,7 +1387,6 @@ static const struct file_operations usbvision_fops = {
1387 .ioctl = video_ioctl2, 1387 .ioctl = video_ioctl2,
1388 .llseek = no_llseek, 1388 .llseek = no_llseek,
1389/* .poll = video_poll, */ 1389/* .poll = video_poll, */
1390 .mmap = usbvision_v4l2_mmap,
1391 .compat_ioctl = v4l_compat_ioctl32, 1390 .compat_ioctl = v4l_compat_ioctl32,
1392}; 1391};
1393static struct video_device usbvision_video_template = { 1392static struct video_device usbvision_video_template = {
@@ -1413,7 +1412,7 @@ static struct video_device usbvision_video_template = {
1413 .vidioc_s_input = vidioc_s_input, 1412 .vidioc_s_input = vidioc_s_input,
1414 .vidioc_queryctrl = vidioc_queryctrl, 1413 .vidioc_queryctrl = vidioc_queryctrl,
1415 .vidioc_g_audio = vidioc_g_audio, 1414 .vidioc_g_audio = vidioc_g_audio,
1416 .vidioc_g_audio = vidioc_s_audio, 1415 .vidioc_s_audio = vidioc_s_audio,
1417 .vidioc_g_ctrl = vidioc_g_ctrl, 1416 .vidioc_g_ctrl = vidioc_g_ctrl,
1418 .vidioc_s_ctrl = vidioc_s_ctrl, 1417 .vidioc_s_ctrl = vidioc_s_ctrl,
1419 .vidioc_streamon = vidioc_streamon, 1418 .vidioc_streamon = vidioc_streamon,
@@ -1459,7 +1458,7 @@ static struct video_device usbvision_radio_template=
1459 .vidioc_s_input = vidioc_s_input, 1458 .vidioc_s_input = vidioc_s_input,
1460 .vidioc_queryctrl = vidioc_queryctrl, 1459 .vidioc_queryctrl = vidioc_queryctrl,
1461 .vidioc_g_audio = vidioc_g_audio, 1460 .vidioc_g_audio = vidioc_g_audio,
1462 .vidioc_g_audio = vidioc_s_audio, 1461 .vidioc_s_audio = vidioc_s_audio,
1463 .vidioc_g_ctrl = vidioc_g_ctrl, 1462 .vidioc_g_ctrl = vidioc_g_ctrl,
1464 .vidioc_s_ctrl = vidioc_s_ctrl, 1463 .vidioc_s_ctrl = vidioc_s_ctrl,
1465 .vidioc_g_tuner = vidioc_g_tuner, 1464 .vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 73e248fb2ff1..7b580c3152de 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -134,6 +134,7 @@ config SONY_LAPTOP
134 tristate "Sony Laptop Extras" 134 tristate "Sony Laptop Extras"
135 depends on X86 && ACPI 135 depends on X86 && ACPI
136 select BACKLIGHT_CLASS_DEVICE 136 select BACKLIGHT_CLASS_DEVICE
137 depends on INPUT
137 ---help--- 138 ---help---
138 This mini-driver drives the SNC and SPIC devices present in the ACPI 139 This mini-driver drives the SNC and SPIC devices present in the ACPI
139 BIOS of the Sony Vaio laptops. 140 BIOS of the Sony Vaio laptops.
@@ -156,6 +157,7 @@ config THINKPAD_ACPI
156 select BACKLIGHT_CLASS_DEVICE 157 select BACKLIGHT_CLASS_DEVICE
157 select HWMON 158 select HWMON
158 select NVRAM 159 select NVRAM
160 depends on INPUT
159 ---help--- 161 ---help---
160 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds 162 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
161 support for Fn-Fx key combinations, Bluetooth control, video 163 support for Fn-Fx key combinations, Bluetooth control, video
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index d38ddce592c0..f248080828f2 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -1173,7 +1173,8 @@ static struct acpi_driver sony_nc_driver = {
1173#define SONYPI_TYPE3_OFFSET 0x12 1173#define SONYPI_TYPE3_OFFSET 0x12
1174 1174
1175struct sony_pic_ioport { 1175struct sony_pic_ioport {
1176 struct acpi_resource_io io; 1176 struct acpi_resource_io io1;
1177 struct acpi_resource_io io2;
1177 struct list_head list; 1178 struct list_head list;
1178}; 1179};
1179 1180
@@ -1443,11 +1444,11 @@ static u8 sony_pic_call1(u8 dev)
1443{ 1444{
1444 u8 v1, v2; 1445 u8 v1, v2;
1445 1446
1446 wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, 1447 wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
1447 ITERATIONS_LONG); 1448 ITERATIONS_LONG);
1448 outb(dev, spic_dev.cur_ioport->io.minimum + 4); 1449 outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
1449 v1 = inb_p(spic_dev.cur_ioport->io.minimum + 4); 1450 v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
1450 v2 = inb_p(spic_dev.cur_ioport->io.minimum); 1451 v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
1451 dprintk("sony_pic_call1: 0x%.4x\n", (v2 << 8) | v1); 1452 dprintk("sony_pic_call1: 0x%.4x\n", (v2 << 8) | v1);
1452 return v2; 1453 return v2;
1453} 1454}
@@ -1456,13 +1457,13 @@ static u8 sony_pic_call2(u8 dev, u8 fn)
1456{ 1457{
1457 u8 v1; 1458 u8 v1;
1458 1459
1459 wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, 1460 wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
1460 ITERATIONS_LONG); 1461 ITERATIONS_LONG);
1461 outb(dev, spic_dev.cur_ioport->io.minimum + 4); 1462 outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
1462 wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, 1463 wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
1463 ITERATIONS_LONG); 1464 ITERATIONS_LONG);
1464 outb(fn, spic_dev.cur_ioport->io.minimum); 1465 outb(fn, spic_dev.cur_ioport->io1.minimum);
1465 v1 = inb_p(spic_dev.cur_ioport->io.minimum); 1466 v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
1466 dprintk("sony_pic_call2: 0x%.4x\n", v1); 1467 dprintk("sony_pic_call2: 0x%.4x\n", v1);
1467 return v1; 1468 return v1;
1468} 1469}
@@ -1471,13 +1472,13 @@ static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
1471{ 1472{
1472 u8 v1; 1473 u8 v1;
1473 1474
1474 wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); 1475 wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
1475 outb(dev, spic_dev.cur_ioport->io.minimum + 4); 1476 outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
1476 wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); 1477 wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
1477 outb(fn, spic_dev.cur_ioport->io.minimum); 1478 outb(fn, spic_dev.cur_ioport->io1.minimum);
1478 wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG); 1479 wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
1479 outb(v, spic_dev.cur_ioport->io.minimum); 1480 outb(v, spic_dev.cur_ioport->io1.minimum);
1480 v1 = inb_p(spic_dev.cur_ioport->io.minimum); 1481 v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
1481 dprintk("sony_pic_call3: 0x%.4x\n", v1); 1482 dprintk("sony_pic_call3: 0x%.4x\n", v1);
1482 return v1; 1483 return v1;
1483} 1484}
@@ -2074,7 +2075,18 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2074 2075
2075 switch (resource->type) { 2076 switch (resource->type) {
2076 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 2077 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
2078 {
2079 /* start IO enumeration */
2080 struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
2081 if (!ioport)
2082 return AE_ERROR;
2083
2084 list_add(&ioport->list, &dev->ioports);
2085 return AE_OK;
2086 }
2087
2077 case ACPI_RESOURCE_TYPE_END_DEPENDENT: 2088 case ACPI_RESOURCE_TYPE_END_DEPENDENT:
2089 /* end IO enumeration */
2078 return AE_OK; 2090 return AE_OK;
2079 2091
2080 case ACPI_RESOURCE_TYPE_IRQ: 2092 case ACPI_RESOURCE_TYPE_IRQ:
@@ -2101,7 +2113,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2101 if (!interrupt) 2113 if (!interrupt)
2102 return AE_ERROR; 2114 return AE_ERROR;
2103 2115
2104 list_add_tail(&interrupt->list, &dev->interrupts); 2116 list_add(&interrupt->list, &dev->interrupts);
2105 interrupt->irq.triggering = p->triggering; 2117 interrupt->irq.triggering = p->triggering;
2106 interrupt->irq.polarity = p->polarity; 2118 interrupt->irq.polarity = p->polarity;
2107 interrupt->irq.sharable = p->sharable; 2119 interrupt->irq.sharable = p->sharable;
@@ -2113,18 +2125,27 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2113 case ACPI_RESOURCE_TYPE_IO: 2125 case ACPI_RESOURCE_TYPE_IO:
2114 { 2126 {
2115 struct acpi_resource_io *io = &resource->data.io; 2127 struct acpi_resource_io *io = &resource->data.io;
2116 struct sony_pic_ioport *ioport = NULL; 2128 struct sony_pic_ioport *ioport =
2129 list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
2117 if (!io) { 2130 if (!io) {
2118 dprintk("Blank IO resource\n"); 2131 dprintk("Blank IO resource\n");
2119 return AE_OK; 2132 return AE_OK;
2120 } 2133 }
2121 2134
2122 ioport = kzalloc(sizeof(*ioport), GFP_KERNEL); 2135 if (!ioport->io1.minimum) {
2123 if (!ioport) 2136 memcpy(&ioport->io1, io, sizeof(*io));
2137 dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
2138 ioport->io1.address_length);
2139 }
2140 else if (!ioport->io2.minimum) {
2141 memcpy(&ioport->io2, io, sizeof(*io));
2142 dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum,
2143 ioport->io2.address_length);
2144 }
2145 else {
2146 printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
2124 return AE_ERROR; 2147 return AE_ERROR;
2125 2148 }
2126 list_add_tail(&ioport->list, &dev->ioports);
2127 memcpy(&ioport->io, io, sizeof(*io));
2128 return AE_OK; 2149 return AE_OK;
2129 } 2150 }
2130 default: 2151 default:
@@ -2199,10 +2220,22 @@ static int sony_pic_enable(struct acpi_device *device,
2199{ 2220{
2200 acpi_status status; 2221 acpi_status status;
2201 int result = 0; 2222 int result = 0;
2223 /* Type 1 resource layout is:
2224 * IO
2225 * IO
2226 * IRQNoFlags
2227 * End
2228 *
2229 * Type 2 and 3 resource layout is:
2230 * IO
2231 * IRQNoFlags
2232 * End
2233 */
2202 struct { 2234 struct {
2203 struct acpi_resource io_res; 2235 struct acpi_resource res1;
2204 struct acpi_resource irq_res; 2236 struct acpi_resource res2;
2205 struct acpi_resource end; 2237 struct acpi_resource res3;
2238 struct acpi_resource res4;
2206 } *resource; 2239 } *resource;
2207 struct acpi_buffer buffer = { 0, NULL }; 2240 struct acpi_buffer buffer = { 0, NULL };
2208 2241
@@ -2217,21 +2250,49 @@ static int sony_pic_enable(struct acpi_device *device,
2217 buffer.length = sizeof(*resource) + 1; 2250 buffer.length = sizeof(*resource) + 1;
2218 buffer.pointer = resource; 2251 buffer.pointer = resource;
2219 2252
2220 /* setup io resource */ 2253 /* setup Type 1 resources */
2221 resource->io_res.type = ACPI_RESOURCE_TYPE_IO; 2254 if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
2222 resource->io_res.length = sizeof(struct acpi_resource);
2223 memcpy(&resource->io_res.data.io, &ioport->io,
2224 sizeof(struct acpi_resource_io));
2225 2255
2226 /* setup irq resource */ 2256 /* setup io resources */
2227 resource->irq_res.type = ACPI_RESOURCE_TYPE_IRQ; 2257 resource->res1.type = ACPI_RESOURCE_TYPE_IO;
2228 resource->irq_res.length = sizeof(struct acpi_resource); 2258 resource->res1.length = sizeof(struct acpi_resource);
2229 memcpy(&resource->irq_res.data.irq, &irq->irq, 2259 memcpy(&resource->res1.data.io, &ioport->io1,
2230 sizeof(struct acpi_resource_irq)); 2260 sizeof(struct acpi_resource_io));
2231 /* we requested a shared irq */
2232 resource->irq_res.data.irq.sharable = ACPI_SHARED;
2233 2261
2234 resource->end.type = ACPI_RESOURCE_TYPE_END_TAG; 2262 resource->res2.type = ACPI_RESOURCE_TYPE_IO;
2263 resource->res2.length = sizeof(struct acpi_resource);
2264 memcpy(&resource->res2.data.io, &ioport->io2,
2265 sizeof(struct acpi_resource_io));
2266
2267 /* setup irq resource */
2268 resource->res3.type = ACPI_RESOURCE_TYPE_IRQ;
2269 resource->res3.length = sizeof(struct acpi_resource);
2270 memcpy(&resource->res3.data.irq, &irq->irq,
2271 sizeof(struct acpi_resource_irq));
2272 /* we requested a shared irq */
2273 resource->res3.data.irq.sharable = ACPI_SHARED;
2274
2275 resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
2276
2277 }
2278 /* setup Type 2/3 resources */
2279 else {
2280 /* setup io resource */
2281 resource->res1.type = ACPI_RESOURCE_TYPE_IO;
2282 resource->res1.length = sizeof(struct acpi_resource);
2283 memcpy(&resource->res1.data.io, &ioport->io1,
2284 sizeof(struct acpi_resource_io));
2285
2286 /* setup irq resource */
2287 resource->res2.type = ACPI_RESOURCE_TYPE_IRQ;
2288 resource->res2.length = sizeof(struct acpi_resource);
2289 memcpy(&resource->res2.data.irq, &irq->irq,
2290 sizeof(struct acpi_resource_irq));
2291 /* we requested a shared irq */
2292 resource->res2.data.irq.sharable = ACPI_SHARED;
2293
2294 resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
2295 }
2235 2296
2236 /* Attempt to set the resource */ 2297 /* Attempt to set the resource */
2237 dprintk("Evaluating _SRS\n"); 2298 dprintk("Evaluating _SRS\n");
@@ -2239,7 +2300,7 @@ static int sony_pic_enable(struct acpi_device *device,
2239 2300
2240 /* check for total failure */ 2301 /* check for total failure */
2241 if (ACPI_FAILURE(status)) { 2302 if (ACPI_FAILURE(status)) {
2242 printk(KERN_ERR DRV_PFX "Error evaluating _SRS"); 2303 printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n");
2243 result = -ENODEV; 2304 result = -ENODEV;
2244 goto end; 2305 goto end;
2245 } 2306 }
@@ -2268,11 +2329,14 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2268 2329
2269 struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id; 2330 struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
2270 2331
2271 ev = inb_p(dev->cur_ioport->io.minimum); 2332 ev = inb_p(dev->cur_ioport->io1.minimum);
2272 data_mask = inb_p(dev->cur_ioport->io.minimum + dev->evport_offset); 2333 if (dev->cur_ioport->io2.minimum)
2334 data_mask = inb_p(dev->cur_ioport->io2.minimum);
2335 else
2336 data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset);
2273 2337
2274 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2338 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2275 ev, data_mask, dev->cur_ioport->io.minimum, dev->evport_offset); 2339 ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset);
2276 2340
2277 if (ev == 0x00 || ev == 0xff) 2341 if (ev == 0x00 || ev == 0xff)
2278 return IRQ_HANDLED; 2342 return IRQ_HANDLED;
@@ -2323,8 +2387,11 @@ static int sony_pic_remove(struct acpi_device *device, int type)
2323 } 2387 }
2324 2388
2325 free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); 2389 free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
2326 release_region(spic_dev.cur_ioport->io.minimum, 2390 release_region(spic_dev.cur_ioport->io1.minimum,
2327 spic_dev.cur_ioport->io.address_length); 2391 spic_dev.cur_ioport->io1.address_length);
2392 if (spic_dev.cur_ioport->io2.minimum)
2393 release_region(spic_dev.cur_ioport->io2.minimum,
2394 spic_dev.cur_ioport->io2.address_length);
2328 2395
2329 sonypi_compat_exit(); 2396 sonypi_compat_exit();
2330 2397
@@ -2397,14 +2464,36 @@ static int sony_pic_add(struct acpi_device *device)
2397 goto err_remove_input; 2464 goto err_remove_input;
2398 2465
2399 /* request io port */ 2466 /* request io port */
2400 list_for_each_entry(io, &spic_dev.ioports, list) { 2467 list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
2401 if (request_region(io->io.minimum, io->io.address_length, 2468 if (request_region(io->io1.minimum, io->io1.address_length,
2402 "Sony Programable I/O Device")) { 2469 "Sony Programable I/O Device")) {
2403 dprintk("I/O port: 0x%.4x (0x%.4x) + 0x%.2x\n", 2470 dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
2404 io->io.minimum, io->io.maximum, 2471 io->io1.minimum, io->io1.maximum,
2405 io->io.address_length); 2472 io->io1.address_length);
2406 spic_dev.cur_ioport = io; 2473 /* Type 1 have 2 ioports */
2407 break; 2474 if (io->io2.minimum) {
2475 if (request_region(io->io2.minimum,
2476 io->io2.address_length,
2477 "Sony Programable I/O Device")) {
2478 dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
2479 io->io2.minimum, io->io2.maximum,
2480 io->io2.address_length);
2481 spic_dev.cur_ioport = io;
2482 break;
2483 }
2484 else {
2485 dprintk("Unable to get I/O port2: "
2486 "0x%.4x (0x%.4x) + 0x%.2x\n",
2487 io->io2.minimum, io->io2.maximum,
2488 io->io2.address_length);
2489 release_region(io->io1.minimum,
2490 io->io1.address_length);
2491 }
2492 }
2493 else {
2494 spic_dev.cur_ioport = io;
2495 break;
2496 }
2408 } 2497 }
2409 } 2498 }
2410 if (!spic_dev.cur_ioport) { 2499 if (!spic_dev.cur_ioport) {
@@ -2414,7 +2503,7 @@ static int sony_pic_add(struct acpi_device *device)
2414 } 2503 }
2415 2504
2416 /* request IRQ */ 2505 /* request IRQ */
2417 list_for_each_entry(irq, &spic_dev.interrupts, list) { 2506 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
2418 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, 2507 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
2419 IRQF_SHARED, "sony-laptop", &spic_dev)) { 2508 IRQF_SHARED, "sony-laptop", &spic_dev)) {
2420 dprintk("IRQ: %d - triggering: %d - " 2509 dprintk("IRQ: %d - triggering: %d - "
@@ -2462,8 +2551,11 @@ err_free_irq:
2462 free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); 2551 free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
2463 2552
2464err_release_region: 2553err_release_region:
2465 release_region(spic_dev.cur_ioport->io.minimum, 2554 release_region(spic_dev.cur_ioport->io1.minimum,
2466 spic_dev.cur_ioport->io.address_length); 2555 spic_dev.cur_ioport->io1.address_length);
2556 if (spic_dev.cur_ioport->io2.minimum)
2557 release_region(spic_dev.cur_ioport->io2.minimum,
2558 spic_dev.cur_ioport->io2.address_length);
2467 2559
2468err_remove_compat: 2560err_remove_compat:
2469 sonypi_compat_exit(); 2561 sonypi_compat_exit();
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 4c3785c9d4b8..9ecc3adcf6c1 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1726,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
1726 case E1000_DEV_ID_82571EB_QUAD_COPPER: 1726 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1727 case E1000_DEV_ID_82571EB_QUAD_FIBER: 1727 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1728 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 1728 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1729 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1729 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1730 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1730 /* quad port adapters only support WoL on port A */ 1731 /* quad port adapters only support WoL on port A */
1731 if (!adapter->quad_port_a) { 1732 if (!adapter->quad_port_a) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index ba120f7fb0be..8604adbe351c 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -387,6 +387,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
387 case E1000_DEV_ID_82571EB_SERDES_DUAL: 387 case E1000_DEV_ID_82571EB_SERDES_DUAL:
388 case E1000_DEV_ID_82571EB_SERDES_QUAD: 388 case E1000_DEV_ID_82571EB_SERDES_QUAD:
389 case E1000_DEV_ID_82571EB_QUAD_COPPER: 389 case E1000_DEV_ID_82571EB_QUAD_COPPER:
390 case E1000_DEV_ID_82571PT_QUAD_COPPER:
390 case E1000_DEV_ID_82571EB_QUAD_FIBER: 391 case E1000_DEV_ID_82571EB_QUAD_FIBER:
391 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 392 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
392 hw->mac_type = e1000_82571; 393 hw->mac_type = e1000_82571;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index fe8714655c90..07f0ea73676e 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -475,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
475#define E1000_DEV_ID_82571EB_FIBER 0x105F 475#define E1000_DEV_ID_82571EB_FIBER 0x105F
476#define E1000_DEV_ID_82571EB_SERDES 0x1060 476#define E1000_DEV_ID_82571EB_SERDES 0x1060
477#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 477#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
478#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
478#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 479#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
479#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC 480#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
480#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 481#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4a225950fb43..e7c8951f47fa 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -108,6 +108,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
108 INTEL_E1000_ETHERNET_DEVICE(0x10BC), 108 INTEL_E1000_ETHERNET_DEVICE(0x10BC),
109 INTEL_E1000_ETHERNET_DEVICE(0x10C4), 109 INTEL_E1000_ETHERNET_DEVICE(0x10C4),
110 INTEL_E1000_ETHERNET_DEVICE(0x10C5), 110 INTEL_E1000_ETHERNET_DEVICE(0x10C5),
111 INTEL_E1000_ETHERNET_DEVICE(0x10D5),
111 INTEL_E1000_ETHERNET_DEVICE(0x10D9), 112 INTEL_E1000_ETHERNET_DEVICE(0x10D9),
112 INTEL_E1000_ETHERNET_DEVICE(0x10DA), 113 INTEL_E1000_ETHERNET_DEVICE(0x10DA),
113 /* required last entry */ 114 /* required last entry */
@@ -1101,6 +1102,7 @@ e1000_probe(struct pci_dev *pdev,
1101 case E1000_DEV_ID_82571EB_QUAD_COPPER: 1102 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1102 case E1000_DEV_ID_82571EB_QUAD_FIBER: 1103 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1103 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 1104 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1105 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1104 /* if quad port adapter, disable WoL on all but port A */ 1106 /* if quad port adapter, disable WoL on all but port A */
1105 if (global_quad_port_a != 0) 1107 if (global_quad_port_a != 0)
1106 adapter->eeprom_wol = 0; 1108 adapter->eeprom_wol = 0;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 6a117e9968cb..315335671f0f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
534 } 534 }
535 535
536 /* PHY status changed */ 536 /* PHY status changed */
537 if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { 537 if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {
538 struct ethtool_cmd cmd; 538 struct ethtool_cmd cmd;
539 539
540 if (mii_link_ok(&mp->mii)) { 540 if (mii_link_ok(&mp->mii)) {
@@ -1357,7 +1357,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1357#endif 1357#endif
1358 1358
1359 dev->watchdog_timeo = 2 * HZ; 1359 dev->watchdog_timeo = 2 * HZ;
1360 dev->tx_queue_len = mp->tx_ring_size;
1361 dev->base_addr = 0; 1360 dev->base_addr = 0;
1362 dev->change_mtu = mv643xx_eth_change_mtu; 1361 dev->change_mtu = mv643xx_eth_change_mtu;
1363 dev->do_ioctl = mv643xx_eth_do_ioctl; 1362 dev->do_ioctl = mv643xx_eth_do_ioctl;
@@ -2768,8 +2767,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
2768 .get_stats_count = mv643xx_get_stats_count, 2767 .get_stats_count = mv643xx_get_stats_count,
2769 .get_ethtool_stats = mv643xx_get_ethtool_stats, 2768 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2770 .get_strings = mv643xx_get_strings, 2769 .get_strings = mv643xx_get_strings,
2771 .get_stats_count = mv643xx_get_stats_count,
2772 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2773 .nway_reset = mv643xx_eth_nway_restart, 2770 .nway_reset = mv643xx_eth_nway_restart,
2774}; 2771};
2775 2772
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 82f8c0cbfb64..565b96696aca 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -64,7 +64,9 @@
64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) 64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) 65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
66#define ETH_INT_CAUSE_PHY 0x00010000 66#define ETH_INT_CAUSE_PHY 0x00010000
67#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) 67#define ETH_INT_CAUSE_STATE 0x00100000
68#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
69 ETH_INT_CAUSE_STATE)
68 70
69#define ETH_INT_MASK_ALL 0x00000000 71#define ETH_INT_MASK_ALL 0x00000000
70#define ETH_INT_MASK_ALL_EXT 0x00000000 72#define ETH_INT_MASK_ALL_EXT 0x00000000
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index c06cae3f0b56..503f2685fb73 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -116,7 +116,7 @@ struct el3_private {
116 spinlock_t lock; 116 spinlock_t lock;
117}; 117};
118 118
119static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" }; 119static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
120 120
121/*====================================================================*/ 121/*====================================================================*/
122 122
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index f79cf87a2bff..c0b6d19d1457 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -136,7 +136,7 @@ struct ppp_mppe_state {
136 * Key Derivation, from RFC 3078, RFC 3079. 136 * Key Derivation, from RFC 3078, RFC 3079.
137 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. 137 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
138 */ 138 */
139static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) 139static void get_new_key_from_sha(struct ppp_mppe_state * state)
140{ 140{
141 struct hash_desc desc; 141 struct hash_desc desc;
142 struct scatterlist sg[4]; 142 struct scatterlist sg[4];
@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
153 desc.flags = 0; 153 desc.flags = 0;
154 154
155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); 155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
156
157 memcpy(InterimKey, state->sha1_digest, state->keylen);
158} 156}
159 157
160/* 158/*
@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
163 */ 161 */
164static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) 162static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
165{ 163{
166 unsigned char InterimKey[MPPE_MAX_KEY_LEN];
167 struct scatterlist sg_in[1], sg_out[1]; 164 struct scatterlist sg_in[1], sg_out[1];
168 struct blkcipher_desc desc = { .tfm = state->arc4 }; 165 struct blkcipher_desc desc = { .tfm = state->arc4 };
169 166
170 get_new_key_from_sha(state, InterimKey); 167 get_new_key_from_sha(state);
171 if (!initial_key) { 168 if (!initial_key) {
172 crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); 169 crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
173 setup_sg(sg_in, InterimKey, state->keylen); 170 state->keylen);
171 setup_sg(sg_in, state->sha1_digest, state->keylen);
174 setup_sg(sg_out, state->session_key, state->keylen); 172 setup_sg(sg_out, state->session_key, state->keylen);
175 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 173 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
176 state->keylen) != 0) { 174 state->keylen) != 0) {
177 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); 175 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
178 } 176 }
179 } else { 177 } else {
180 memcpy(state->session_key, InterimKey, state->keylen); 178 memcpy(state->session_key, state->sha1_digest, state->keylen);
181 } 179 }
182 if (state->keylen == 8) { 180 if (state->keylen == 8) {
183 /* See RFC 3078 */ 181 /* See RFC 3078 */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 69da95b5ad0c..ea151315050c 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2248,6 +2248,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2248 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2248 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2249 2249
2250 net_rsp = qdev->rsp_current; 2250 net_rsp = qdev->rsp_current;
2251 rmb();
2252 /*
2253 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
2254 * inbound completion is for a VLAN.
2255 */
2256 if (qdev->device_id == QL3032_DEVICE_ID)
2257 net_rsp->opcode &= 0x7f;
2251 switch (net_rsp->opcode) { 2258 switch (net_rsp->opcode) {
2252 2259
2253 case OPCODE_OB_MAC_IOCB_FN0: 2260 case OPCODE_OB_MAC_IOCB_FN0:
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b85ab4a8f2a3..c76dd29c8e9a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1228 return; 1228 return;
1229 } 1229 }
1230 1230
1231 /* phy config for RTL8169s mac_version C chip */ 1231 if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1232 (tp->mac_version != RTL_GIGA_MAC_VER_03))
1233 return;
1234
1232 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 1235 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1233 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 1236 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1234 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 1237 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
@@ -1915,7 +1918,11 @@ static void rtl_hw_start_8169(struct net_device *dev)
1915 1918
1916 rtl_set_rx_max_size(ioaddr); 1919 rtl_set_rx_max_size(ioaddr);
1917 1920
1918 rtl_set_rx_tx_config_registers(tp); 1921 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1922 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1923 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1924 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1925 rtl_set_rx_tx_config_registers(tp);
1919 1926
1920 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 1927 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1921 1928
@@ -1938,6 +1945,14 @@ static void rtl_hw_start_8169(struct net_device *dev)
1938 1945
1939 rtl_set_rx_tx_desc_registers(tp, ioaddr); 1946 rtl_set_rx_tx_desc_registers(tp, ioaddr);
1940 1947
1948 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1949 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1950 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1951 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1952 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1953 rtl_set_rx_tx_config_registers(tp);
1954 }
1955
1941 RTL_W8(Cfg9346, Cfg9346_Lock); 1956 RTL_W8(Cfg9346, Cfg9346_Lock);
1942 1957
1943 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ 1958 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
@@ -1952,8 +1967,6 @@ static void rtl_hw_start_8169(struct net_device *dev)
1952 1967
1953 /* Enable all known interrupts by setting the interrupt mask. */ 1968 /* Enable all known interrupts by setting the interrupt mask. */
1954 RTL_W16(IntrMask, tp->intr_event); 1969 RTL_W16(IntrMask, tp->intr_event);
1955
1956 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1957} 1970}
1958 1971
1959static void rtl_hw_start_8168(struct net_device *dev) 1972static void rtl_hw_start_8168(struct net_device *dev)
@@ -2567,6 +2580,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
2567 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 2580 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2568 netif_wake_queue(dev); 2581 netif_wake_queue(dev);
2569 } 2582 }
2583 /*
2584 * 8168 hack: TxPoll requests are lost when the Tx packets are
2585 * too close. Let's kick an extra TxPoll request when a burst
2586 * of start_xmit activity is detected (if it is not detected,
2587 * it is slow enough). -- FR
2588 */
2589 smp_rmb();
2590 if (tp->cur_tx != dirty_tx)
2591 RTL_W8(TxPoll, NPQ);
2570 } 2592 }
2571} 2593}
2572 2594
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index eaffe551d1d8..ea117fc3d5e3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -338,6 +338,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
338 if (!(hw->flags & SKY2_HW_GIGABIT)) { 338 if (!(hw->flags & SKY2_HW_GIGABIT)) {
339 /* enable automatic crossover */ 339 /* enable automatic crossover */
340 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; 340 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
341
342 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
343 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
344 u16 spec;
345
346 /* Enable Class A driver for FE+ A0 */
347 spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
348 spec |= PHY_M_FESC_SEL_CL_A;
349 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
350 }
341 } else { 351 } else {
342 /* disable energy detect */ 352 /* disable energy detect */
343 ctrl &= ~PHY_M_PC_EN_DET_MSK; 353 ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -816,7 +826,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
816 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 826 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
817 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 827 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
818 828
819 if (!(hw->flags & SKY2_HW_RAMBUFFER)) { 829 /* On chips without ram buffer, pause is controled by MAC level */
830 if (sky2_read8(hw, B2_E_0) == 0) {
820 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 831 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
821 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 832 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
822 833
@@ -899,6 +910,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
899 return le; 910 return le;
900} 911}
901 912
913static void tx_init(struct sky2_port *sky2)
914{
915 struct sky2_tx_le *le;
916
917 sky2->tx_prod = sky2->tx_cons = 0;
918 sky2->tx_tcpsum = 0;
919 sky2->tx_last_mss = 0;
920
921 le = get_tx_le(sky2);
922 le->addr = 0;
923 le->opcode = OP_ADDR64 | HW_OWNER;
924 sky2->tx_addr64 = 0;
925}
926
902static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, 927static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
903 struct sky2_tx_le *le) 928 struct sky2_tx_le *le)
904{ 929{
@@ -1271,7 +1296,7 @@ static int sky2_up(struct net_device *dev)
1271 struct sky2_port *sky2 = netdev_priv(dev); 1296 struct sky2_port *sky2 = netdev_priv(dev);
1272 struct sky2_hw *hw = sky2->hw; 1297 struct sky2_hw *hw = sky2->hw;
1273 unsigned port = sky2->port; 1298 unsigned port = sky2->port;
1274 u32 imask; 1299 u32 imask, ramsize;
1275 int cap, err = -ENOMEM; 1300 int cap, err = -ENOMEM;
1276 struct net_device *otherdev = hw->dev[sky2->port^1]; 1301 struct net_device *otherdev = hw->dev[sky2->port^1];
1277 1302
@@ -1309,7 +1334,8 @@ static int sky2_up(struct net_device *dev)
1309 GFP_KERNEL); 1334 GFP_KERNEL);
1310 if (!sky2->tx_ring) 1335 if (!sky2->tx_ring)
1311 goto err_out; 1336 goto err_out;
1312 sky2->tx_prod = sky2->tx_cons = 0; 1337
1338 tx_init(sky2);
1313 1339
1314 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, 1340 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1315 &sky2->rx_le_map); 1341 &sky2->rx_le_map);
@@ -1326,13 +1352,12 @@ static int sky2_up(struct net_device *dev)
1326 1352
1327 sky2_mac_init(hw, port); 1353 sky2_mac_init(hw, port);
1328 1354
1329 if (hw->flags & SKY2_HW_RAMBUFFER) { 1355 /* Register is number of 4K blocks on internal RAM buffer. */
1330 /* Register is number of 4K blocks on internal RAM buffer. */ 1356 ramsize = sky2_read8(hw, B2_E_0) * 4;
1331 u32 ramsize = sky2_read8(hw, B2_E_0) * 4; 1357 if (ramsize > 0) {
1332 u32 rxspace; 1358 u32 rxspace;
1333 1359
1334 printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize); 1360 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1335
1336 if (ramsize < 16) 1361 if (ramsize < 16)
1337 rxspace = ramsize / 2; 1362 rxspace = ramsize / 2;
1338 else 1363 else
@@ -1995,7 +2020,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1995 2020
1996 synchronize_irq(hw->pdev->irq); 2021 synchronize_irq(hw->pdev->irq);
1997 2022
1998 if (!(hw->flags & SKY2_HW_RAMBUFFER)) 2023 if (sky2_read8(hw, B2_E_0) == 0)
1999 sky2_set_tx_stfwd(hw, port); 2024 sky2_set_tx_stfwd(hw, port);
2000 2025
2001 ctl = gma_read16(hw, port, GM_GP_CTRL); 2026 ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2138,6 +2163,15 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2138 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2163 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2139 prefetch(sky2->rx_ring + sky2->rx_next); 2164 prefetch(sky2->rx_ring + sky2->rx_next);
2140 2165
2166 /* This chip has hardware problems that generates bogus status.
2167 * So do only marginal checking and expect higher level protocols
2168 * to handle crap frames.
2169 */
2170 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2171 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2172 length != count)
2173 goto okay;
2174
2141 if (status & GMR_FS_ANY_ERR) 2175 if (status & GMR_FS_ANY_ERR)
2142 goto error; 2176 goto error;
2143 2177
@@ -2146,8 +2180,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2146 2180
2147 /* if length reported by DMA does not match PHY, packet was truncated */ 2181 /* if length reported by DMA does not match PHY, packet was truncated */
2148 if (length != count) 2182 if (length != count)
2149 goto len_mismatch; 2183 goto len_error;
2150 2184
2185okay:
2151 if (length < copybreak) 2186 if (length < copybreak)
2152 skb = receive_copy(sky2, re, length); 2187 skb = receive_copy(sky2, re, length);
2153 else 2188 else
@@ -2157,13 +2192,13 @@ resubmit:
2157 2192
2158 return skb; 2193 return skb;
2159 2194
2160len_mismatch: 2195len_error:
2161 /* Truncation of overlength packets 2196 /* Truncation of overlength packets
2162 causes PHY length to not match MAC length */ 2197 causes PHY length to not match MAC length */
2163 ++sky2->net_stats.rx_length_errors; 2198 ++sky2->net_stats.rx_length_errors;
2164 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2199 if (netif_msg_rx_err(sky2) && net_ratelimit())
2165 pr_info(PFX "%s: rx length mismatch: length %d status %#x\n", 2200 pr_info(PFX "%s: rx length error: status %#x length %d\n",
2166 dev->name, length, status); 2201 dev->name, status, length);
2167 goto resubmit; 2202 goto resubmit;
2168 2203
2169error: 2204error:
@@ -2526,7 +2561,7 @@ static void sky2_watchdog(unsigned long arg)
2526 ++active; 2561 ++active;
2527 2562
2528 /* For chips with Rx FIFO, check if stuck */ 2563 /* For chips with Rx FIFO, check if stuck */
2529 if ((hw->flags & SKY2_HW_RAMBUFFER) && 2564 if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
2530 sky2_rx_hung(dev)) { 2565 sky2_rx_hung(dev)) {
2531 pr_info(PFX "%s: receiver hang detected\n", 2566 pr_info(PFX "%s: receiver hang detected\n",
2532 dev->name); 2567 dev->name);
@@ -2684,8 +2719,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2684 switch(hw->chip_id) { 2719 switch(hw->chip_id) {
2685 case CHIP_ID_YUKON_XL: 2720 case CHIP_ID_YUKON_XL:
2686 hw->flags = SKY2_HW_GIGABIT 2721 hw->flags = SKY2_HW_GIGABIT
2687 | SKY2_HW_NEWER_PHY 2722 | SKY2_HW_NEWER_PHY;
2688 | SKY2_HW_RAMBUFFER; 2723 if (hw->chip_rev < 3)
2724 hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
2725
2689 break; 2726 break;
2690 2727
2691 case CHIP_ID_YUKON_EC_U: 2728 case CHIP_ID_YUKON_EC_U:
@@ -2711,11 +2748,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2711 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); 2748 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2712 return -EOPNOTSUPP; 2749 return -EOPNOTSUPP;
2713 } 2750 }
2714 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER; 2751 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
2715 break; 2752 break;
2716 2753
2717 case CHIP_ID_YUKON_FE: 2754 case CHIP_ID_YUKON_FE:
2718 hw->flags = SKY2_HW_RAMBUFFER;
2719 break; 2755 break;
2720 2756
2721 case CHIP_ID_YUKON_FE_P: 2757 case CHIP_ID_YUKON_FE_P:
@@ -3923,13 +3959,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3923 sky2->hw = hw; 3959 sky2->hw = hw;
3924 sky2->msg_enable = netif_msg_init(debug, default_msg); 3960 sky2->msg_enable = netif_msg_init(debug, default_msg);
3925 3961
3926 /* This chip has hardware problems that generates
3927 * bogus PHY receive status so by default shut up the message.
3928 */
3929 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
3930 hw->chip_rev == CHIP_REV_YU_FE2_A0)
3931 sky2->msg_enable &= ~NETIF_MSG_RX_ERR;
3932
3933 /* Auto speed and flow control */ 3962 /* Auto speed and flow control */
3934 sky2->autoneg = AUTONEG_ENABLE; 3963 sky2->autoneg = AUTONEG_ENABLE;
3935 sky2->flow_mode = FC_BOTH; 3964 sky2->flow_mode = FC_BOTH;
@@ -3953,8 +3982,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3953 dev->features |= NETIF_F_HIGHDMA; 3982 dev->features |= NETIF_F_HIGHDMA;
3954 3983
3955#ifdef SKY2_VLAN_TAG_USED 3984#ifdef SKY2_VLAN_TAG_USED
3956 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3985 /* The workaround for FE+ status conflicts with VLAN tag detection. */
3957 dev->vlan_rx_register = sky2_vlan_rx_register; 3986 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
3987 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
3988 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3989 dev->vlan_rx_register = sky2_vlan_rx_register;
3990 }
3958#endif 3991#endif
3959 3992
3960 /* read the mac address */ 3993 /* read the mac address */
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 69cd98400fe6..8bc5c54e3efa 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2063,7 +2063,7 @@ struct sky2_hw {
2063#define SKY2_HW_FIBRE_PHY 0x00000002 2063#define SKY2_HW_FIBRE_PHY 0x00000002
2064#define SKY2_HW_GIGABIT 0x00000004 2064#define SKY2_HW_GIGABIT 0x00000004
2065#define SKY2_HW_NEWER_PHY 0x00000008 2065#define SKY2_HW_NEWER_PHY 0x00000008
2066#define SKY2_HW_RAMBUFFER 0x00000010 /* chip has RAM FIFO */ 2066#define SKY2_HW_FIFO_HANG_CHECK 0x00000010
2067#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2067#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2068#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2068#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2069#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2069#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 16c7a0e87850..a2de32fabc17 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
405 dev->net->ethtool_ops = &dm9601_ethtool_ops; 405 dev->net->ethtool_ops = &dm9601_ethtool_ops;
406 dev->net->hard_header_len += DM_TX_OVERHEAD; 406 dev->net->hard_header_len += DM_TX_OVERHEAD;
407 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 407 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
408 dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD; 408 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
409 409
410 dev->mii.dev = dev->net; 410 dev->mii.dev = dev->net;
411 dev->mii.mdio_read = dm9601_mdio_read; 411 dev->mii.mdio_read = dm9601_mdio_read;
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index ef35bc6c4a22..4eb6d9752881 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
43obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o 43obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
44 44
45obj-$(CONFIG_USB_ZD1201) += zd1201.o 45obj-$(CONFIG_USB_ZD1201) += zd1201.o
46obj-$(CONFIG_LIBERTAS_USB) += libertas/ 46obj-$(CONFIG_LIBERTAS) += libertas/
47 47
48rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o 48rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
49obj-$(CONFIG_RTL8187) += rtl8187.o 49obj-$(CONFIG_RTL8187) += rtl8187.o
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index d6d9413d7f23..6acfdc49dccd 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -444,7 +444,7 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
444 u16 maxpower; 444 u16 maxpower;
445 445
446 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) { 446 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
447 printk(PFX KERN_ERR "TX power not in dBm.\n"); 447 printk(KERN_ERR PFX "TX power not in dBm.\n");
448 return -EOPNOTSUPP; 448 return -EOPNOTSUPP;
449 } 449 }
450 450
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7dcaa09b3c20..50f2dd9e1bb2 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1444,7 +1444,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
1444static void __devinit quirk_e100_interrupt(struct pci_dev *dev) 1444static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1445{ 1445{
1446 u16 command; 1446 u16 command;
1447 u32 bar;
1448 u8 __iomem *csr; 1447 u8 __iomem *csr;
1449 u8 cmd_hi; 1448 u8 cmd_hi;
1450 1449
@@ -1476,12 +1475,12 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1476 * re-enable them when it's ready. 1475 * re-enable them when it's ready.
1477 */ 1476 */
1478 pci_read_config_word(dev, PCI_COMMAND, &command); 1477 pci_read_config_word(dev, PCI_COMMAND, &command);
1479 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
1480 1478
1481 if (!(command & PCI_COMMAND_MEMORY) || !bar) 1479 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
1482 return; 1480 return;
1483 1481
1484 csr = ioremap(bar, 8); 1482 /* Convert from PCI bus to resource space. */
1483 csr = ioremap(pci_resource_start(dev, 0), 8);
1485 if (!csr) { 1484 if (!csr) {
1486 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", 1485 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
1487 pci_name(dev)); 1486 pci_name(dev));
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index c7c4574729b1..de3155b21285 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -289,6 +289,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
289 if (ret) 289 if (ret)
290 goto out; 290 goto out;
291 } 291 }
292 envp[i] = NULL;
292 293
293out: 294out:
294 free_page((unsigned long)prop_buf); 295 free_page((unsigned long)prop_buf);
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index d5d8caba3560..ab13824df856 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -451,7 +451,7 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
451 struct scb *scb; 451 struct scb *scb;
452 452
453 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, 453 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
454 PCI_DMA_FROMDEVICE); 454 PCI_DMA_TODEVICE);
455 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, 455 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
456 PCI_DMA_FROMDEVICE); 456 PCI_DMA_FROMDEVICE);
457 457
@@ -486,7 +486,7 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
486 486
487 BUG_ON(!task); 487 BUG_ON(!task);
488 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, 488 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
489 PCI_DMA_FROMDEVICE); 489 PCI_DMA_TODEVICE);
490 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, 490 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
491 PCI_DMA_FROMDEVICE); 491 PCI_DMA_FROMDEVICE);
492} 492}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 77b06a983fa7..95cf7b6cd622 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2314,6 +2314,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2314 esp->host->transportt = esp_transport_template; 2314 esp->host->transportt = esp_transport_template;
2315 esp->host->max_lun = ESP_MAX_LUN; 2315 esp->host->max_lun = ESP_MAX_LUN;
2316 esp->host->cmd_per_lun = 2; 2316 esp->host->cmd_per_lun = 2;
2317 esp->host->unique_id = instance;
2317 2318
2318 esp_set_clock_params(esp); 2319 esp_set_clock_params(esp);
2319 2320
@@ -2337,7 +2338,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2337 if (err) 2338 if (err)
2338 return err; 2339 return err;
2339 2340
2340 esp->host->unique_id = instance++; 2341 instance++;
2341 2342
2342 scsi_scan_host(esp->host); 2343 scsi_scan_host(esp->host);
2343 2344
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3907f6718ede..da56163c30a8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1753,6 +1753,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1753 1753
1754 *len = 0; 1754 *len = 0;
1755 1755
1756 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1757 sg = scsi_sglist(cmd);
1758 scb->dma_h_bulkdata = sg_dma_address(sg);
1759 *buf = (u32)scb->dma_h_bulkdata;
1760 *len = sg_dma_len(sg);
1761 return 0;
1762 }
1763
1756 scsi_for_each_sg(cmd, sg, sgcnt, idx) { 1764 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1757 if (adapter->has_64bit_addr) { 1765 if (adapter->has_64bit_addr) {
1758 scb->sgl64[idx].address = sg_dma_address(sg); 1766 scb->sgl64[idx].address = sg_dma_address(sg);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 6f56f8750635..4df21c92ff1e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
787 struct scsi_target *starget = sdev->sdev_target; 787 struct scsi_target *starget = sdev->sdev_target;
788 struct Scsi_Host *shost = sdev->host; 788 struct Scsi_Host *shost = sdev->host;
789 int len = sdev->inquiry_len; 789 int len = sdev->inquiry_len;
790 int min_period = spi_min_period(starget);
791 int max_width = spi_max_width(starget);
790 /* first set us up for narrow async */ 792 /* first set us up for narrow async */
791 DV_SET(offset, 0); 793 DV_SET(offset, 0);
792 DV_SET(width, 0); 794 DV_SET(width, 0);
793 795
794 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) 796 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
795 != SPI_COMPARE_SUCCESS) { 797 != SPI_COMPARE_SUCCESS) {
796 starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); 798 starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
@@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
798 return; 800 return;
799 } 801 }
800 802
803 if (!scsi_device_wide(sdev)) {
804 spi_max_width(starget) = 0;
805 max_width = 0;
806 }
807
801 /* test width */ 808 /* test width */
802 if (i->f->set_width && spi_max_width(starget) && 809 if (i->f->set_width && max_width) {
803 scsi_device_wide(sdev)) {
804 i->f->set_width(starget, 1); 810 i->f->set_width(starget, 1);
805 811
806 if (spi_dv_device_compare_inquiry(sdev, buffer, 812 if (spi_dv_device_compare_inquiry(sdev, buffer,
@@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
809 != SPI_COMPARE_SUCCESS) { 815 != SPI_COMPARE_SUCCESS) {
810 starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); 816 starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
811 i->f->set_width(starget, 0); 817 i->f->set_width(starget, 0);
818 /* Make sure we don't force wide back on by asking
819 * for a transfer period that requires it */
820 max_width = 0;
821 if (min_period < 10)
822 min_period = 10;
812 } 823 }
813 } 824 }
814 825
@@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
828 839
829 /* now set up to the maximum */ 840 /* now set up to the maximum */
830 DV_SET(offset, spi_max_offset(starget)); 841 DV_SET(offset, spi_max_offset(starget));
831 DV_SET(period, spi_min_period(starget)); 842 DV_SET(period, min_period);
843
832 /* try QAS requests; this should be harmless to set if the 844 /* try QAS requests; this should be harmless to set if the
833 * target supports it */ 845 * target supports it */
834 if (scsi_device_qas(sdev)) { 846 if (scsi_device_qas(sdev)) {
@@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
837 DV_SET(qas, 0); 849 DV_SET(qas, 0);
838 } 850 }
839 851
840 if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { 852 if (scsi_device_ius(sdev) && min_period < 9) {
841 /* This u320 (or u640). Set IU transfers */ 853 /* This u320 (or u640). Set IU transfers */
842 DV_SET(iu, 1); 854 DV_SET(iu, 1);
843 /* Then set the optional parameters */ 855 /* Then set the optional parameters */
844 DV_SET(rd_strm, 1); 856 DV_SET(rd_strm, 1);
845 DV_SET(wr_flow, 1); 857 DV_SET(wr_flow, 1);
846 DV_SET(rti, 1); 858 DV_SET(rti, 1);
847 if (spi_min_period(starget) == 8) 859 if (min_period == 8)
848 DV_SET(pcomp_en, 1); 860 DV_SET(pcomp_en, 1);
849 } else { 861 } else {
850 DV_SET(iu, 0); 862 DV_SET(iu, 0);
@@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
862 } else { 874 } else {
863 DV_SET(dt, 1); 875 DV_SET(dt, 1);
864 } 876 }
877 /* set width last because it will pull all the other
878 * parameters down to required values */
879 DV_SET(width, max_width);
880
865 /* Do the read only INQUIRY tests */ 881 /* Do the read only INQUIRY tests */
866 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 882 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
867 spi_dv_device_compare_inquiry); 883 spi_dv_device_compare_inquiry);
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
index a99e45e2b6d8..2a6477834c3e 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
@@ -37,6 +37,6 @@ static inline void cpm_set_smc_fcr(volatile smc_uart_t * up)
37 up->smc_tfcr = SMC_EB; 37 up->smc_tfcr = SMC_EB;
38} 38}
39 39
40#define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0]) 40#define DPRAM_BASE ((unsigned char *)cpm_dpram_addr(0))
41 41
42#endif 42#endif
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index a0ea43598515..7c8d78fbbbfb 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -943,6 +943,7 @@ static struct pcmcia_device_id serial_ids[] = {
943 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), 943 PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
944 PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), 944 PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
945 PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4), 945 PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
946 PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b),
946 /* too generic */ 947 /* too generic */
947 /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */ 948 /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
948 /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */ 949 /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index e348ba684050..ff610c23314b 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -38,7 +38,7 @@
38#include <asm/prom.h> 38#include <asm/prom.h>
39#include <asm/of_device.h> 39#include <asm/of_device.h>
40 40
41#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 41#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
42#define SUPPORT_SYSRQ 42#define SUPPORT_SYSRQ
43#endif 43#endif
44 44
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8d7ab74170d5..a593f900eff4 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
431 err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, 431 err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
432 &cur_len, "W1_SLAVE_ID=%024LX", 432 &cur_len, "W1_SLAVE_ID=%024LX",
433 (unsigned long long)sl->reg_num.id); 433 (unsigned long long)sl->reg_num.id);
434 envp[cur_index] = NULL;
434 if (err) 435 if (err)
435 return err; 436 return err;
436 437
diff --git a/fs/aio.c b/fs/aio.c
index dbe699e9828c..ea2e19820381 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1562,6 +1562,7 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1562 fput(file); 1562 fput(file);
1563 return -EAGAIN; 1563 return -EAGAIN;
1564 } 1564 }
1565 req->ki_filp = file;
1565 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1566 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1566 /* 1567 /*
1567 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1568 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
@@ -1576,7 +1577,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1576 } 1577 }
1577 } 1578 }
1578 1579
1579 req->ki_filp = file;
1580 ret = put_user(req->ki_key, &user_iocb->aio_key); 1580 ret = put_user(req->ki_key, &user_iocb->aio_key);
1581 if (unlikely(ret)) { 1581 if (unlikely(ret)) {
1582 dprintk("EFAULT: aio_key\n"); 1582 dprintk("EFAULT: aio_key\n");
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 861141b4f6d6..fcb3405bb14e 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -742,6 +742,7 @@ static int load_flat_file(struct linux_binprm * bprm,
742 * __start to address 4 so that is okay). 742 * __start to address 4 so that is okay).
743 */ 743 */
744 if (rev > OLD_FLAT_VERSION) { 744 if (rev > OLD_FLAT_VERSION) {
745 unsigned long persistent = 0;
745 for (i=0; i < relocs; i++) { 746 for (i=0; i < relocs; i++) {
746 unsigned long addr, relval; 747 unsigned long addr, relval;
747 748
@@ -749,6 +750,8 @@ static int load_flat_file(struct linux_binprm * bprm,
749 relocated (of course, the address has to be 750 relocated (of course, the address has to be
750 relocated first). */ 751 relocated first). */
751 relval = ntohl(reloc[i]); 752 relval = ntohl(reloc[i]);
753 if (flat_set_persistent (relval, &persistent))
754 continue;
752 addr = flat_get_relocate_addr(relval); 755 addr = flat_get_relocate_addr(relval);
753 rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); 756 rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
754 if (rp == (unsigned long *)RELOC_FAILED) { 757 if (rp == (unsigned long *)RELOC_FAILED) {
@@ -757,7 +760,8 @@ static int load_flat_file(struct linux_binprm * bprm,
757 } 760 }
758 761
759 /* Get the pointer's value. */ 762 /* Get the pointer's value. */
760 addr = flat_get_addr_from_rp(rp, relval, flags); 763 addr = flat_get_addr_from_rp(rp, relval, flags,
764 &persistent);
761 if (addr != 0) { 765 if (addr != 0) {
762 /* 766 /*
763 * Do the relocation. PIC relocs in the data section are 767 * Do the relocation. PIC relocs in the data section are
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 5a5b7116cefb..37310b0e8107 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -3190,6 +3190,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY)
3190COMPATIBLE_IOCTL(SIOCGIWRETRY) 3190COMPATIBLE_IOCTL(SIOCGIWRETRY)
3191COMPATIBLE_IOCTL(SIOCSIWPOWER) 3191COMPATIBLE_IOCTL(SIOCSIWPOWER)
3192COMPATIBLE_IOCTL(SIOCGIWPOWER) 3192COMPATIBLE_IOCTL(SIOCGIWPOWER)
3193COMPATIBLE_IOCTL(SIOCSIWAUTH)
3194COMPATIBLE_IOCTL(SIOCGIWAUTH)
3193/* hiddev */ 3195/* hiddev */
3194COMPATIBLE_IOCTL(HIDIOCGVERSION) 3196COMPATIBLE_IOCTL(HIDIOCGVERSION)
3195COMPATIBLE_IOCTL(HIDIOCAPPLICATION) 3197COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index a21e4bc5444b..d120ec39bcb0 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -171,19 +171,14 @@ found:
171 * GRANTED_RES message by cookie, without having to rely on the client's IP 171 * GRANTED_RES message by cookie, without having to rely on the client's IP
172 * address. --okir 172 * address. --okir
173 */ 173 */
174static inline struct nlm_block * 174static struct nlm_block *
175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, 175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
176 struct nlm_lock *lock, struct nlm_cookie *cookie) 176 struct nlm_file *file, struct nlm_lock *lock,
177 struct nlm_cookie *cookie)
177{ 178{
178 struct nlm_block *block; 179 struct nlm_block *block;
179 struct nlm_host *host;
180 struct nlm_rqst *call = NULL; 180 struct nlm_rqst *call = NULL;
181 181
182 /* Create host handle for callback */
183 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
184 if (host == NULL)
185 return NULL;
186
187 call = nlm_alloc_call(host); 182 call = nlm_alloc_call(host);
188 if (call == NULL) 183 if (call == NULL)
189 return NULL; 184 return NULL;
@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
366 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) 361 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
367{ 362{
368 struct nlm_block *block = NULL; 363 struct nlm_block *block = NULL;
364 struct nlm_host *host;
369 int error; 365 int error;
370 __be32 ret; 366 __be32 ret;
371 367
@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
377 (long long)lock->fl.fl_end, 373 (long long)lock->fl.fl_end,
378 wait); 374 wait);
379 375
376 /* Create host handle for callback */
377 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
378 if (host == NULL)
379 return nlm_lck_denied_nolocks;
380 380
381 /* Lock file against concurrent access */ 381 /* Lock file against concurrent access */
382 mutex_lock(&file->f_mutex); 382 mutex_lock(&file->f_mutex);
@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
385 */ 385 */
386 block = nlmsvc_lookup_block(file, lock); 386 block = nlmsvc_lookup_block(file, lock);
387 if (block == NULL) { 387 if (block == NULL) {
388 block = nlmsvc_create_block(rqstp, file, lock, cookie); 388 block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
389 lock, cookie);
389 ret = nlm_lck_denied_nolocks; 390 ret = nlm_lck_denied_nolocks;
390 if (block == NULL) 391 if (block == NULL)
391 goto out; 392 goto out;
@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
449out: 450out:
450 mutex_unlock(&file->f_mutex); 451 mutex_unlock(&file->f_mutex);
451 nlmsvc_release_block(block); 452 nlmsvc_release_block(block);
453 nlm_release_host(host);
452 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 454 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
453 return ret; 455 return ret;
454} 456}
@@ -477,10 +479,17 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
477 479
478 if (block == NULL) { 480 if (block == NULL) {
479 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); 481 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
482 struct nlm_host *host;
480 483
481 if (conf == NULL) 484 if (conf == NULL)
482 return nlm_granted; 485 return nlm_granted;
483 block = nlmsvc_create_block(rqstp, file, lock, cookie); 486 /* Create host handle for callback */
487 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
488 if (host == NULL) {
489 kfree(conf);
490 return nlm_lck_denied_nolocks;
491 }
492 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
484 if (block == NULL) { 493 if (block == NULL) {
485 kfree(conf); 494 kfree(conf);
486 return nlm_granted; 495 return nlm_granted;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a49f9feff776..a204484072f3 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -588,16 +588,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat
588 server->namelen = data->namlen; 588 server->namelen = data->namlen;
589 /* Create a client RPC handle for the NFSv3 ACL management interface */ 589 /* Create a client RPC handle for the NFSv3 ACL management interface */
590 nfs_init_server_aclclient(server); 590 nfs_init_server_aclclient(server);
591 if (clp->cl_nfsversion == 3) {
592 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
593 server->namelen = NFS3_MAXNAMLEN;
594 if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
595 server->caps |= NFS_CAP_READDIRPLUS;
596 } else {
597 if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
598 server->namelen = NFS2_MAXNAMLEN;
599 }
600
601 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); 591 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
602 return 0; 592 return 0;
603 593
@@ -794,6 +784,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data,
794 error = nfs_probe_fsinfo(server, mntfh, &fattr); 784 error = nfs_probe_fsinfo(server, mntfh, &fattr);
795 if (error < 0) 785 if (error < 0)
796 goto error; 786 goto error;
787 if (server->nfs_client->rpc_ops->version == 3) {
788 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
789 server->namelen = NFS3_MAXNAMLEN;
790 if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
791 server->caps |= NFS_CAP_READDIRPLUS;
792 } else {
793 if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
794 server->namelen = NFS2_MAXNAMLEN;
795 }
796
797 if (!(fattr.valid & NFS_ATTR_FATTR)) { 797 if (!(fattr.valid & NFS_ATTR_FATTR)) {
798 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); 798 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
799 if (error < 0) { 799 if (error < 0) {
@@ -984,6 +984,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data,
984 if (error < 0) 984 if (error < 0)
985 goto error; 985 goto error;
986 986
987 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
988 server->namelen = NFS4_MAXNAMLEN;
989
987 BUG_ON(!server->nfs_client); 990 BUG_ON(!server->nfs_client);
988 BUG_ON(!server->nfs_client->rpc_ops); 991 BUG_ON(!server->nfs_client->rpc_ops);
989 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); 992 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
@@ -1056,6 +1059,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
1056 if (error < 0) 1059 if (error < 0)
1057 goto error; 1060 goto error;
1058 1061
1062 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1063 server->namelen = NFS4_MAXNAMLEN;
1064
1059 dprintk("Referral FSID: %llx:%llx\n", 1065 dprintk("Referral FSID: %llx:%llx\n",
1060 (unsigned long long) server->fsid.major, 1066 (unsigned long long) server->fsid.major,
1061 (unsigned long long) server->fsid.minor); 1067 (unsigned long long) server->fsid.minor);
@@ -1115,6 +1121,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
1115 if (error < 0) 1121 if (error < 0)
1116 goto out_free_server; 1122 goto out_free_server;
1117 1123
1124 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1125 server->namelen = NFS4_MAXNAMLEN;
1126
1118 dprintk("Cloned FSID: %llx:%llx\n", 1127 dprintk("Cloned FSID: %llx:%llx\n",
1119 (unsigned long long) server->fsid.major, 1128 (unsigned long long) server->fsid.major,
1120 (unsigned long long) server->fsid.minor); 1129 (unsigned long long) server->fsid.minor);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ea97408e423e..e4a04d16b8b0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1162,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
1162 } 1162 }
1163 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) 1163 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
1164 return NULL; 1164 return NULL;
1165 if (name.len > NFS_SERVER(dir)->namelen)
1166 return NULL;
1165 /* Note: caller is already holding the dir->i_mutex! */ 1167 /* Note: caller is already holding the dir->i_mutex! */
1166 dentry = d_alloc(parent, &name); 1168 dentry = d_alloc(parent, &name);
1167 if (dentry == NULL) 1169 if (dentry == NULL)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index d1cbf0a0fbb2..522e5ad4d8ad 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -175,6 +175,9 @@ next_component:
175 path++; 175 path++;
176 name.len = path - (const char *) name.name; 176 name.len = path - (const char *) name.name;
177 177
178 if (name.len > NFS4_MAXNAMLEN)
179 return -ENAMETOOLONG;
180
178eat_dot_dir: 181eat_dot_dir:
179 while (*path == '/') 182 while (*path == '/')
180 path++; 183 path++;
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index de984d272576..d272847d5a07 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -514,8 +514,10 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
514 ac->ac_bh = osb->local_alloc_bh; 514 ac->ac_bh = osb->local_alloc_bh;
515 status = 0; 515 status = 0;
516bail: 516bail:
517 if (status < 0 && local_alloc_inode) 517 if (status < 0 && local_alloc_inode) {
518 mutex_unlock(&local_alloc_inode->i_mutex);
518 iput(local_alloc_inode); 519 iput(local_alloc_inode);
520 }
519 521
520 mlog_exit(status); 522 mlog_exit(status);
521 return status; 523 return status;
diff --git a/fs/splice.c b/fs/splice.c
index c010a72ca2d2..e95a36228863 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1224,6 +1224,33 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1224} 1224}
1225 1225
1226/* 1226/*
1227 * Do a copy-from-user while holding the mmap_semaphore for reading, in a
1228 * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
1229 * for writing) and page faulting on the user memory pointed to by src.
1230 * This assumes that we will very rarely hit the partial != 0 path, or this
1231 * will not be a win.
1232 */
1233static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
1234{
1235 int partial;
1236
1237 pagefault_disable();
1238 partial = __copy_from_user_inatomic(dst, src, n);
1239 pagefault_enable();
1240
1241 /*
1242 * Didn't copy everything, drop the mmap_sem and do a faulting copy
1243 */
1244 if (unlikely(partial)) {
1245 up_read(&current->mm->mmap_sem);
1246 partial = copy_from_user(dst, src, n);
1247 down_read(&current->mm->mmap_sem);
1248 }
1249
1250 return partial;
1251}
1252
1253/*
1227 * Map an iov into an array of pages and offset/length tupples. With the 1254 * Map an iov into an array of pages and offset/length tupples. With the
1228 * partial_page structure, we can map several non-contiguous ranges into 1255 * partial_page structure, we can map several non-contiguous ranges into
1229 * our ones pages[] map instead of splitting that operation into pieces. 1256 * our ones pages[] map instead of splitting that operation into pieces.
@@ -1236,31 +1263,26 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1236{ 1263{
1237 int buffers = 0, error = 0; 1264 int buffers = 0, error = 0;
1238 1265
1239 /*
1240 * It's ok to take the mmap_sem for reading, even
1241 * across a "get_user()".
1242 */
1243 down_read(&current->mm->mmap_sem); 1266 down_read(&current->mm->mmap_sem);
1244 1267
1245 while (nr_vecs) { 1268 while (nr_vecs) {
1246 unsigned long off, npages; 1269 unsigned long off, npages;
1270 struct iovec entry;
1247 void __user *base; 1271 void __user *base;
1248 size_t len; 1272 size_t len;
1249 int i; 1273 int i;
1250 1274
1251 /* 1275 error = -EFAULT;
1252 * Get user address base and length for this iovec. 1276 if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
1253 */
1254 error = get_user(base, &iov->iov_base);
1255 if (unlikely(error))
1256 break;
1257 error = get_user(len, &iov->iov_len);
1258 if (unlikely(error))
1259 break; 1277 break;
1260 1278
1279 base = entry.iov_base;
1280 len = entry.iov_len;
1281
1261 /* 1282 /*
1262 * Sanity check this iovec. 0 read succeeds. 1283 * Sanity check this iovec. 0 read succeeds.
1263 */ 1284 */
1285 error = 0;
1264 if (unlikely(!len)) 1286 if (unlikely(!len))
1265 break; 1287 break;
1266 error = -EFAULT; 1288 error = -EFAULT;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 73402c5eeb8a..38eb0b7a1f3d 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -894,7 +894,7 @@ magic_found:
894 goto again; 894 goto again;
895 } 895 }
896 896
897 897 sbi->s_flags = flags;/*after that line some functions use s_flags*/
898 ufs_print_super_stuff(sb, usb1, usb2, usb3); 898 ufs_print_super_stuff(sb, usb1, usb2, usb3);
899 899
900 /* 900 /*
@@ -1025,8 +1025,6 @@ magic_found:
1025 UFS_MOUNT_UFSTYPE_44BSD) 1025 UFS_MOUNT_UFSTYPE_44BSD)
1026 uspi->s_maxsymlinklen = 1026 uspi->s_maxsymlinklen =
1027 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); 1027 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
1028
1029 sbi->s_flags = flags;
1030 1028
1031 inode = iget(sb, UFS_ROOTINO); 1029 inode = iget(sb, UFS_ROOTINO);
1032 if (!inode || is_bad_inode(inode)) 1030 if (!inode || is_bad_inode(inode))
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index fa25b7dcc6c3..d7e136143066 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -52,11 +52,6 @@ typedef struct xfs_buf_log_format_t {
52#define XFS_BLI_UDQUOT_BUF 0x4 52#define XFS_BLI_UDQUOT_BUF 0x4
53#define XFS_BLI_PDQUOT_BUF 0x8 53#define XFS_BLI_PDQUOT_BUF 0x8
54#define XFS_BLI_GDQUOT_BUF 0x10 54#define XFS_BLI_GDQUOT_BUF 0x10
55/*
56 * This flag indicates that the buffer contains newly allocated
57 * inodes.
58 */
59#define XFS_BLI_INODE_NEW_BUF 0x20
60 55
61#define XFS_BLI_CHUNK 128 56#define XFS_BLI_CHUNK 128
62#define XFS_BLI_SHIFT 7 57#define XFS_BLI_SHIFT 7
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 7174991f4bef..8ae6e8e5f3db 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1874,7 +1874,6 @@ xlog_recover_do_inode_buffer(
1874/*ARGSUSED*/ 1874/*ARGSUSED*/
1875STATIC void 1875STATIC void
1876xlog_recover_do_reg_buffer( 1876xlog_recover_do_reg_buffer(
1877 xfs_mount_t *mp,
1878 xlog_recover_item_t *item, 1877 xlog_recover_item_t *item,
1879 xfs_buf_t *bp, 1878 xfs_buf_t *bp,
1880 xfs_buf_log_format_t *buf_f) 1879 xfs_buf_log_format_t *buf_f)
@@ -1885,50 +1884,6 @@ xlog_recover_do_reg_buffer(
1885 unsigned int *data_map = NULL; 1884 unsigned int *data_map = NULL;
1886 unsigned int map_size = 0; 1885 unsigned int map_size = 0;
1887 int error; 1886 int error;
1888 int stale_buf = 1;
1889
1890 /*
1891 * Scan through the on-disk inode buffer and attempt to
1892 * determine if it has been written to since it was logged.
1893 *
1894 * - If any of the magic numbers are incorrect then the buffer is stale
1895 * - If any of the modes are non-zero then the buffer is not stale
1896 * - If all of the modes are zero and at least one of the generation
1897 * counts is non-zero then the buffer is stale
1898 *
1899 * If the end result is a stale buffer then the log buffer is replayed
1900 * otherwise it is skipped.
1901 *
1902 * This heuristic is not perfect. It can be improved by scanning the
1903 * entire inode chunk for evidence that any of the inode clusters have
1904 * been updated. To fix this problem completely we will need a major
1905 * architectural change to the logging system.
1906 */
1907 if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) {
1908 xfs_dinode_t *dip;
1909 int inodes_per_buf;
1910 int mode_count = 0;
1911 int gen_count = 0;
1912
1913 stale_buf = 0;
1914 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1915 for (i = 0; i < inodes_per_buf; i++) {
1916 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
1917 i * mp->m_sb.sb_inodesize);
1918 if (be16_to_cpu(dip->di_core.di_magic) !=
1919 XFS_DINODE_MAGIC) {
1920 stale_buf = 1;
1921 break;
1922 }
1923 if (dip->di_core.di_mode)
1924 mode_count++;
1925 if (dip->di_core.di_gen)
1926 gen_count++;
1927 }
1928
1929 if (!mode_count && gen_count)
1930 stale_buf = 1;
1931 }
1932 1887
1933 switch (buf_f->blf_type) { 1888 switch (buf_f->blf_type) {
1934 case XFS_LI_BUF: 1889 case XFS_LI_BUF:
@@ -1962,7 +1917,7 @@ xlog_recover_do_reg_buffer(
1962 -1, 0, XFS_QMOPT_DOWARN, 1917 -1, 0, XFS_QMOPT_DOWARN,
1963 "dquot_buf_recover"); 1918 "dquot_buf_recover");
1964 } 1919 }
1965 if (!error && stale_buf) 1920 if (!error)
1966 memcpy(xfs_buf_offset(bp, 1921 memcpy(xfs_buf_offset(bp,
1967 (uint)bit << XFS_BLI_SHIFT), /* dest */ 1922 (uint)bit << XFS_BLI_SHIFT), /* dest */
1968 item->ri_buf[i].i_addr, /* source */ 1923 item->ri_buf[i].i_addr, /* source */
@@ -2134,7 +2089,7 @@ xlog_recover_do_dquot_buffer(
2134 if (log->l_quotaoffs_flag & type) 2089 if (log->l_quotaoffs_flag & type)
2135 return; 2090 return;
2136 2091
2137 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2092 xlog_recover_do_reg_buffer(item, bp, buf_f);
2138} 2093}
2139 2094
2140/* 2095/*
@@ -2235,7 +2190,7 @@ xlog_recover_do_buffer_trans(
2235 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { 2190 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2236 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2191 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2237 } else { 2192 } else {
2238 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2193 xlog_recover_do_reg_buffer(item, bp, buf_f);
2239 } 2194 }
2240 if (error) 2195 if (error)
2241 return XFS_ERROR(error); 2196 return XFS_ERROR(error);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 95fff6872a2f..60b6b898022b 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -966,7 +966,6 @@ xfs_trans_inode_alloc_buf(
966 ASSERT(atomic_read(&bip->bli_refcount) > 0); 966 ASSERT(atomic_read(&bip->bli_refcount) > 0);
967 967
968 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; 968 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
969 bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF;
970} 969}
971 970
972 971
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 202acb9ff4d0..f85f77a538aa 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -147,10 +147,6 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
147/*-------------------------------------------------------------------------- 147/*--------------------------------------------------------------------------
148 Suspend/Resume 148 Suspend/Resume
149 -------------------------------------------------------------------------- */ 149 -------------------------------------------------------------------------- */
150#ifdef CONFIG_ACPI_SLEEP
151extern int acpi_sleep_init(void); 150extern int acpi_sleep_init(void);
152#else
153static inline int acpi_sleep_init(void) { return 0; }
154#endif
155 151
156#endif /*__ACPI_DRIVERS_H__*/ 152#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h
index e043cafa3c42..69b9f8e120e9 100644
--- a/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h
+++ b/include/asm-blackfin/mach-bf533/bfin_serial_5xx.h
@@ -1,5 +1,6 @@
1#include <linux/serial.h> 1#include <linux/serial.h>
2#include <asm/dma.h> 2#include <asm/dma.h>
3#include <asm/portmux.h>
3 4
4#define NR_PORTS 1 5#define NR_PORTS 1
5 6
@@ -92,18 +93,24 @@ struct bfin_serial_res bfin_serial_resource[] = {
92 } 93 }
93}; 94};
94 95
96#define DRIVER_NAME "bfin-uart"
95 97
96int nr_ports = NR_PORTS; 98int nr_ports = NR_PORTS;
97static void bfin_serial_hw_init(struct bfin_serial_port *uart) 99static void bfin_serial_hw_init(struct bfin_serial_port *uart)
98{ 100{
99 101
102#ifdef CONFIG_SERIAL_BFIN_UART0
103 peripheral_request(P_UART0_TX, DRIVER_NAME);
104 peripheral_request(P_UART0_RX, DRIVER_NAME);
105#endif
106
100#ifdef CONFIG_SERIAL_BFIN_CTSRTS 107#ifdef CONFIG_SERIAL_BFIN_CTSRTS
101 if (uart->cts_pin >= 0) { 108 if (uart->cts_pin >= 0) {
102 gpio_request(uart->cts_pin, NULL); 109 gpio_request(uart->cts_pin, DRIVER_NAME);
103 gpio_direction_input(uart->cts_pin); 110 gpio_direction_input(uart->cts_pin);
104 } 111 }
105 if (uart->rts_pin >= 0) { 112 if (uart->rts_pin >= 0) {
106 gpio_request(uart->rts_pin, NULL); 113 gpio_request(uart->rts_pin, DRIVER_NAME);
107 gpio_direction_input(uart->rts_pin); 114 gpio_direction_input(uart->rts_pin);
108 } 115 }
109#endif 116#endif
diff --git a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h
index 8f5d9c4d8d5b..6fb328f5186a 100644
--- a/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h
+++ b/include/asm-blackfin/mach-bf537/bfin_serial_5xx.h
@@ -1,5 +1,6 @@
1#include <linux/serial.h> 1#include <linux/serial.h>
2#include <asm/dma.h> 2#include <asm/dma.h>
3#include <asm/portmux.h>
3 4
4#define NR_PORTS 2 5#define NR_PORTS 2
5 6
@@ -122,25 +123,29 @@ struct bfin_serial_res bfin_serial_resource[] = {
122 123
123int nr_ports = ARRAY_SIZE(bfin_serial_resource); 124int nr_ports = ARRAY_SIZE(bfin_serial_resource);
124 125
126#define DRIVER_NAME "bfin-uart"
127
125static void bfin_serial_hw_init(struct bfin_serial_port *uart) 128static void bfin_serial_hw_init(struct bfin_serial_port *uart)
126{ 129{
127 unsigned short val;
128 val = bfin_read16(BFIN_PORT_MUX);
129 val &= ~(PFDE | PFTE);
130 bfin_write16(BFIN_PORT_MUX, val);
131 130
132 val = bfin_read16(PORTF_FER); 131#ifdef CONFIG_SERIAL_BFIN_UART0
133 val |= 0xF; 132 peripheral_request(P_UART0_TX, DRIVER_NAME);
134 bfin_write16(PORTF_FER, val); 133 peripheral_request(P_UART0_RX, DRIVER_NAME);
134#endif
135
136#ifdef CONFIG_SERIAL_BFIN_UART1
137 peripheral_request(P_UART1_TX, DRIVER_NAME);
138 peripheral_request(P_UART1_RX, DRIVER_NAME);
139#endif
135 140
136#ifdef CONFIG_SERIAL_BFIN_CTSRTS 141#ifdef CONFIG_SERIAL_BFIN_CTSRTS
137 if (uart->cts_pin >= 0) { 142 if (uart->cts_pin >= 0) {
138 gpio_request(uart->cts_pin, NULL); 143 gpio_request(uart->cts_pin, DRIVER_NAME);
139 gpio_direction_input(uart->cts_pin); 144 gpio_direction_input(uart->cts_pin);
140 } 145 }
141 146
142 if (uart->rts_pin >= 0) { 147 if (uart->rts_pin >= 0) {
143 gpio_request(uart->rts_pin, NULL); 148 gpio_request(uart->rts_pin, DRIVER_NAME);
144 gpio_direction_output(uart->rts_pin); 149 gpio_direction_output(uart->rts_pin);
145 } 150 }
146#endif 151#endif
diff --git a/include/asm-blackfin/mach-bf537/portmux.h b/include/asm-blackfin/mach-bf537/portmux.h
index 23e13c5abc4d..ae6c53b28452 100644
--- a/include/asm-blackfin/mach-bf537/portmux.h
+++ b/include/asm-blackfin/mach-bf537/portmux.h
@@ -106,4 +106,37 @@
106#define P_SPI0_SSEL2 (P_DEFINED | P_IDENT(PORT_PJ11) | P_FUNCT(1)) 106#define P_SPI0_SSEL2 (P_DEFINED | P_IDENT(PORT_PJ11) | P_FUNCT(1))
107#define P_SPI0_SSEL7 (P_DEFINED | P_IDENT(PORT_PJ5) | P_FUNCT(2)) 107#define P_SPI0_SSEL7 (P_DEFINED | P_IDENT(PORT_PJ5) | P_FUNCT(2))
108 108
109#endif /* _MACH_PORTMUX_H_ */ 109#define P_MII0 {\
110 P_MII0_ETxD0, \
111 P_MII0_ETxD1, \
112 P_MII0_ETxD2, \
113 P_MII0_ETxD3, \
114 P_MII0_ETxEN, \
115 P_MII0_TxCLK, \
116 P_MII0_PHYINT, \
117 P_MII0_COL, \
118 P_MII0_ERxD0, \
119 P_MII0_ERxD1, \
120 P_MII0_ERxD2, \
121 P_MII0_ERxD3, \
122 P_MII0_ERxDV, \
123 P_MII0_ERxCLK, \
124 P_MII0_ERxER, \
125 P_MII0_CRS, \
126 P_MDC, \
127 P_MDIO, 0}
128
129
130#define P_RMII0 {\
131 P_MII0_ETxD0, \
132 P_MII0_ETxD1, \
133 P_MII0_ETxEN, \
134 P_MII0_ERxD0, \
135 P_MII0_ERxD1, \
136 P_MII0_ERxER, \
137 P_RMII0_REF_CLK, \
138 P_RMII0_MDINT, \
139 P_RMII0_CRS_DV, \
140 P_MDC, \
141 P_MDIO, 0}
142#endif /* _MACH_PORTMUX_H_ */
diff --git a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h
index e043cafa3c42..69b9f8e120e9 100644
--- a/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h
+++ b/include/asm-blackfin/mach-bf561/bfin_serial_5xx.h
@@ -1,5 +1,6 @@
1#include <linux/serial.h> 1#include <linux/serial.h>
2#include <asm/dma.h> 2#include <asm/dma.h>
3#include <asm/portmux.h>
3 4
4#define NR_PORTS 1 5#define NR_PORTS 1
5 6
@@ -92,18 +93,24 @@ struct bfin_serial_res bfin_serial_resource[] = {
92 } 93 }
93}; 94};
94 95
96#define DRIVER_NAME "bfin-uart"
95 97
96int nr_ports = NR_PORTS; 98int nr_ports = NR_PORTS;
97static void bfin_serial_hw_init(struct bfin_serial_port *uart) 99static void bfin_serial_hw_init(struct bfin_serial_port *uart)
98{ 100{
99 101
102#ifdef CONFIG_SERIAL_BFIN_UART0
103 peripheral_request(P_UART0_TX, DRIVER_NAME);
104 peripheral_request(P_UART0_RX, DRIVER_NAME);
105#endif
106
100#ifdef CONFIG_SERIAL_BFIN_CTSRTS 107#ifdef CONFIG_SERIAL_BFIN_CTSRTS
101 if (uart->cts_pin >= 0) { 108 if (uart->cts_pin >= 0) {
102 gpio_request(uart->cts_pin, NULL); 109 gpio_request(uart->cts_pin, DRIVER_NAME);
103 gpio_direction_input(uart->cts_pin); 110 gpio_direction_input(uart->cts_pin);
104 } 111 }
105 if (uart->rts_pin >= 0) { 112 if (uart->rts_pin >= 0) {
106 gpio_request(uart->rts_pin, NULL); 113 gpio_request(uart->rts_pin, DRIVER_NAME);
107 gpio_direction_input(uart->rts_pin); 114 gpio_direction_input(uart->rts_pin);
108 } 115 }
109#endif 116#endif
diff --git a/include/asm-blackfin/portmux.h b/include/asm-blackfin/portmux.h
index 9d3681e42111..0d3f650d2d99 100644
--- a/include/asm-blackfin/portmux.h
+++ b/include/asm-blackfin/portmux.h
@@ -14,6 +14,12 @@
14#define P_MAYSHARE 0x2000 14#define P_MAYSHARE 0x2000
15#define P_DONTCARE 0x1000 15#define P_DONTCARE 0x1000
16 16
17
18int peripheral_request(unsigned short per, const char *label);
19void peripheral_free(unsigned short per);
20int peripheral_request_list(unsigned short per[], const char *label);
21void peripheral_free_list(unsigned short per[]);
22
17#include <asm/gpio.h> 23#include <asm/gpio.h>
18#include <asm/mach/portmux.h> 24#include <asm/mach/portmux.h>
19 25
@@ -145,6 +151,22 @@
145#define P_SPI2_SSEL3 P_UNDEF 151#define P_SPI2_SSEL3 P_UNDEF
146#endif 152#endif
147 153
154#ifndef P_SPI2_SSEL4
155#define P_SPI2_SSEL4 P_UNDEF
156#endif
157
158#ifndef P_SPI2_SSEL5
159#define P_SPI2_SSEL5 P_UNDEF
160#endif
161
162#ifndef P_SPI2_SSEL6
163#define P_SPI2_SSEL6 P_UNDEF
164#endif
165
166#ifndef P_SPI2_SSEL7
167#define P_SPI2_SSEL7 P_UNDEF
168#endif
169
148#ifndef P_SPI2_SCK 170#ifndef P_SPI2_SCK
149#define P_SPI2_SCK P_UNDEF 171#define P_SPI2_SCK P_UNDEF
150#endif 172#endif
@@ -513,6 +535,22 @@
513#define P_SPI0_SSEL3 P_UNDEF 535#define P_SPI0_SSEL3 P_UNDEF
514#endif 536#endif
515 537
538#ifndef P_SPI0_SSEL4
539#define P_SPI0_SSEL4 P_UNDEF
540#endif
541
542#ifndef P_SPI0_SSEL5
543#define P_SPI0_SSEL5 P_UNDEF
544#endif
545
546#ifndef P_SPI0_SSEL6
547#define P_SPI0_SSEL6 P_UNDEF
548#endif
549
550#ifndef P_SPI0_SSEL7
551#define P_SPI0_SSEL7 P_UNDEF
552#endif
553
516#ifndef P_UART0_TX 554#ifndef P_UART0_TX
517#define P_UART0_TX P_UNDEF 555#define P_UART0_TX P_UNDEF
518#endif 556#endif
@@ -741,6 +779,23 @@
741#define P_SPI1_SSEL3 P_UNDEF 779#define P_SPI1_SSEL3 P_UNDEF
742#endif 780#endif
743 781
782
783#ifndef P_SPI1_SSEL4
784#define P_SPI1_SSEL4 P_UNDEF
785#endif
786
787#ifndef P_SPI1_SSEL5
788#define P_SPI1_SSEL5 P_UNDEF
789#endif
790
791#ifndef P_SPI1_SSEL6
792#define P_SPI1_SSEL6 P_UNDEF
793#endif
794
795#ifndef P_SPI1_SSEL7
796#define P_SPI1_SSEL7 P_UNDEF
797#endif
798
744#ifndef P_SPI1_SCK 799#ifndef P_SPI1_SCK
745#define P_SPI1_SCK P_UNDEF 800#define P_SPI1_SCK P_UNDEF
746#endif 801#endif
diff --git a/include/asm-blackfin/unistd.h b/include/asm-blackfin/unistd.h
index 0df9f2d322a3..07ffe8b718c5 100644
--- a/include/asm-blackfin/unistd.h
+++ b/include/asm-blackfin/unistd.h
@@ -3,6 +3,7 @@
3/* 3/*
4 * This file contains the system call numbers. 4 * This file contains the system call numbers.
5 */ 5 */
6#define __NR_restart_syscall 0
6#define __NR_exit 1 7#define __NR_exit 1
7#define __NR_fork 2 8#define __NR_fork 2
8#define __NR_read 3 9#define __NR_read 3
@@ -165,13 +166,13 @@
165#define __NR_sched_get_priority_min 160 166#define __NR_sched_get_priority_min 160
166#define __NR_sched_rr_get_interval 161 167#define __NR_sched_rr_get_interval 161
167#define __NR_nanosleep 162 168#define __NR_nanosleep 162
168 /* 163 __NR_mremap */ 169#define __NR_mremap 163
169#define __NR_setresuid 164 170#define __NR_setresuid 164
170#define __NR_getresuid 165 171#define __NR_getresuid 165
171 /* 166 __NR_vm86 */ 172 /* 166 __NR_vm86 */
172 /* 167 __NR_query_module */ 173 /* 167 __NR_query_module */
173 /* 168 __NR_poll */ 174 /* 168 __NR_poll */
174 /* 169 __NR_nfsservctl */ 175#define __NR_nfsservctl 169
175#define __NR_setresgid 170 176#define __NR_setresgid 170
176#define __NR_getresgid 171 177#define __NR_getresgid 171
177#define __NR_prctl 172 178#define __NR_prctl 172
@@ -227,7 +228,7 @@
227 /* 222 reserved for TUX */ 228 /* 222 reserved for TUX */
228 /* 223 reserved for TUX */ 229 /* 223 reserved for TUX */
229#define __NR_gettid 224 230#define __NR_gettid 224
230 /* 225 __NR_readahead */ 231#define __NR_readahead 225
231#define __NR_setxattr 226 232#define __NR_setxattr 226
232#define __NR_lsetxattr 227 233#define __NR_lsetxattr 227
233#define __NR_fsetxattr 228 234#define __NR_fsetxattr 228
@@ -287,7 +288,7 @@
287#define __NR_mq_timedreceive (__NR_mq_open+3) 288#define __NR_mq_timedreceive (__NR_mq_open+3)
288#define __NR_mq_notify (__NR_mq_open+4) 289#define __NR_mq_notify (__NR_mq_open+4)
289#define __NR_mq_getsetattr (__NR_mq_open+5) 290#define __NR_mq_getsetattr (__NR_mq_open+5)
290 /* 284 __NR_sys_kexec_load */ 291#define __NR_kexec_load 284
291#define __NR_waitid 285 292#define __NR_waitid 285
292#define __NR_add_key 286 293#define __NR_add_key 286
293#define __NR_request_key 287 294#define __NR_request_key 287
@@ -352,9 +353,54 @@
352#define __NR_shmdt 340 353#define __NR_shmdt 340
353#define __NR_shmget 341 354#define __NR_shmget 341
354 355
355#define __NR_syscall 342 356#define __NR_splice 342
357#define __NR_sync_file_range 343
358#define __NR_tee 344
359#define __NR_vmsplice 345
360
361#define __NR_epoll_pwait 346
362#define __NR_utimensat 347
363#define __NR_signalfd 348
364#define __NR_timerfd 349
365#define __NR_eventfd 350
366#define __NR_pread64 351
367#define __NR_pwrite64 352
368#define __NR_fadvise64 353
369#define __NR_set_robust_list 354
370#define __NR_get_robust_list 355
371#define __NR_fallocate 356
372
373#define __NR_syscall 357
356#define NR_syscalls __NR_syscall 374#define NR_syscalls __NR_syscall
357 375
376/* Old optional stuff no one actually uses */
377#define __IGNORE_sysfs
378#define __IGNORE_uselib
379
380/* Implement the newer interfaces */
381#define __IGNORE_mmap
382#define __IGNORE_poll
383#define __IGNORE_select
384#define __IGNORE_utime
385
386/* Not relevant on no-mmu */
387#define __IGNORE_swapon
388#define __IGNORE_swapoff
389#define __IGNORE_msync
390#define __IGNORE_mlock
391#define __IGNORE_munlock
392#define __IGNORE_mlockall
393#define __IGNORE_munlockall
394#define __IGNORE_mincore
395#define __IGNORE_madvise
396#define __IGNORE_remap_file_pages
397#define __IGNORE_mbind
398#define __IGNORE_get_mempolicy
399#define __IGNORE_set_mempolicy
400#define __IGNORE_migrate_pages
401#define __IGNORE_move_pages
402#define __IGNORE_getcpu
403
358#ifdef __KERNEL__ 404#ifdef __KERNEL__
359#define __ARCH_WANT_IPC_PARSE_VERSION 405#define __ARCH_WANT_IPC_PARSE_VERSION
360#define __ARCH_WANT_STAT64 406#define __ARCH_WANT_STAT64
diff --git a/include/asm-h8300/flat.h b/include/asm-h8300/flat.h
index c20eee767d6f..2a873508a9a1 100644
--- a/include/asm-h8300/flat.h
+++ b/include/asm-h8300/flat.h
@@ -9,6 +9,7 @@
9#define flat_argvp_envp_on_stack() 1 9#define flat_argvp_envp_on_stack() 1
10#define flat_old_ram_flag(flags) 1 10#define flat_old_ram_flag(flags) 1
11#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 11#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
12#define flat_set_persistent(relval, p) 0
12 13
13/* 14/*
14 * on the H8 a couple of the relocations have an instruction in the 15 * on the H8 a couple of the relocations have an instruction in the
@@ -18,7 +19,7 @@
18 */ 19 */
19 20
20#define flat_get_relocate_addr(rel) (rel) 21#define flat_get_relocate_addr(rel) (rel)
21#define flat_get_addr_from_rp(rp, relval, flags) \ 22#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
22 (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff)) 23 (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff))
23#define flat_put_addr_at_rp(rp, addr, rel) \ 24#define flat_put_addr_at_rp(rp, addr, rel) \
24 put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp) 25 put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp)
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 609756c61676..d69ba937e092 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment)
214 */ 214 */
215 215
216 216
217/*
218 * Actually only lfence would be needed for mb() because all stores done
219 * by the kernel should be already ordered. But keep a full barrier for now.
220 */
221
222#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) 217#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
223#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) 218#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
224 219
diff --git a/include/asm-m32r/flat.h b/include/asm-m32r/flat.h
index 1b285f65cab6..d851cf0c4aa5 100644
--- a/include/asm-m32r/flat.h
+++ b/include/asm-m32r/flat.h
@@ -15,9 +15,10 @@
15#define flat_stack_align(sp) (*sp += (*sp & 3 ? (4 - (*sp & 3)): 0)) 15#define flat_stack_align(sp) (*sp += (*sp & 3 ? (4 - (*sp & 3)): 0))
16#define flat_argvp_envp_on_stack() 0 16#define flat_argvp_envp_on_stack() 0
17#define flat_old_ram_flag(flags) (flags) 17#define flat_old_ram_flag(flags) (flags)
18#define flat_set_persistent(relval, p) 0
18#define flat_reloc_valid(reloc, size) \ 19#define flat_reloc_valid(reloc, size) \
19 (((reloc) - textlen_for_m32r_lo16_data) <= (size)) 20 (((reloc) - textlen_for_m32r_lo16_data) <= (size))
20#define flat_get_addr_from_rp(rp, relval, flags) \ 21#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
21 m32r_flat_get_addr_from_rp(rp, relval, (text_len) ) 22 m32r_flat_get_addr_from_rp(rp, relval, (text_len) )
22 23
23#define flat_put_addr_at_rp(rp, addr, relval) \ 24#define flat_put_addr_at_rp(rp, addr, relval) \
diff --git a/include/asm-m68knommu/flat.h b/include/asm-m68knommu/flat.h
index 2d836edc4344..814b5174a8e0 100644
--- a/include/asm-m68knommu/flat.h
+++ b/include/asm-m68knommu/flat.h
@@ -9,8 +9,9 @@
9#define flat_argvp_envp_on_stack() 1 9#define flat_argvp_envp_on_stack() 1
10#define flat_old_ram_flag(flags) (flags) 10#define flat_old_ram_flag(flags) (flags)
11#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 11#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
12#define flat_get_addr_from_rp(rp, relval, flags) get_unaligned(rp) 12#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
13#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) 13#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
14#define flat_get_relocate_addr(rel) (rel) 14#define flat_get_relocate_addr(rel) (rel)
15#define flat_set_persistent(relval, p) 0
15 16
16#endif /* __M68KNOMMU_FLAT_H__ */ 17#endif /* __M68KNOMMU_FLAT_H__ */
diff --git a/include/asm-mips/cmpxchg.h b/include/asm-mips/cmpxchg.h
new file mode 100644
index 000000000000..c5b4708e003b
--- /dev/null
+++ b/include/asm-mips/cmpxchg.h
@@ -0,0 +1,107 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_CMPXCHG_H
9#define __ASM_CMPXCHG_H
10
11#include <linux/irqflags.h>
12
13#define __HAVE_ARCH_CMPXCHG 1
14
15#define __cmpxchg_asm(ld, st, m, old, new) \
16({ \
17 __typeof(*(m)) __ret; \
18 \
19 if (cpu_has_llsc && R10000_LLSC_WAR) { \
20 __asm__ __volatile__( \
21 " .set push \n" \
22 " .set noat \n" \
23 " .set mips3 \n" \
24 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
25 " bne %0, %z3, 2f \n" \
26 " .set mips0 \n" \
27 " move $1, %z4 \n" \
28 " .set mips3 \n" \
29 " " st " $1, %1 \n" \
30 " beqzl $1, 1b \n" \
31 "2: \n" \
32 " .set pop \n" \
33 : "=&r" (__ret), "=R" (*m) \
34 : "R" (*m), "Jr" (old), "Jr" (new) \
35 : "memory"); \
36 } else if (cpu_has_llsc) { \
37 __asm__ __volatile__( \
38 " .set push \n" \
39 " .set noat \n" \
40 " .set mips3 \n" \
41 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
42 " bne %0, %z3, 2f \n" \
43 " .set mips0 \n" \
44 " move $1, %z4 \n" \
45 " .set mips3 \n" \
46 " " st " $1, %1 \n" \
47 " beqz $1, 3f \n" \
48 "2: \n" \
49 " .subsection 2 \n" \
50 "3: b 1b \n" \
51 " .previous \n" \
52 " .set pop \n" \
53 : "=&r" (__ret), "=R" (*m) \
54 : "R" (*m), "Jr" (old), "Jr" (new) \
55 : "memory"); \
56 } else { \
57 unsigned long __flags; \
58 \
59 raw_local_irq_save(__flags); \
60 __ret = *m; \
61 if (__ret == old) \
62 *m = new; \
63 raw_local_irq_restore(__flags); \
64 } \
65 \
66 __ret; \
67})
68
69/*
70 * This function doesn't exist, so you'll get a linker error
71 * if something tries to do an invalid cmpxchg().
72 */
73extern void __cmpxchg_called_with_bad_pointer(void);
74
75#define __cmpxchg(ptr,old,new,barrier) \
76({ \
77 __typeof__(ptr) __ptr = (ptr); \
78 __typeof__(*(ptr)) __old = (old); \
79 __typeof__(*(ptr)) __new = (new); \
80 __typeof__(*(ptr)) __res = 0; \
81 \
82 barrier; \
83 \
84 switch (sizeof(*(__ptr))) { \
85 case 4: \
86 __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
87 break; \
88 case 8: \
89 if (sizeof(long) == 8) { \
90 __res = __cmpxchg_asm("lld", "scd", __ptr, \
91 __old, __new); \
92 break; \
93 } \
94 default: \
95 __cmpxchg_called_with_bad_pointer(); \
96 break; \
97 } \
98 \
99 barrier; \
100 \
101 __res; \
102})
103
104#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb())
105#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new,)
106
107#endif /* __ASM_CMPXCHG_H */
diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h
index 00a50ec1c19f..2a52333a062d 100644
--- a/include/asm-mips/fcntl.h
+++ b/include/asm-mips/fcntl.h
@@ -13,6 +13,7 @@
13#define O_SYNC 0x0010 13#define O_SYNC 0x0010
14#define O_NONBLOCK 0x0080 14#define O_NONBLOCK 0x0080
15#define O_CREAT 0x0100 /* not fcntl */ 15#define O_CREAT 0x0100 /* not fcntl */
16#define O_TRUNC 0x0200 /* not fcntl */
16#define O_EXCL 0x0400 /* not fcntl */ 17#define O_EXCL 0x0400 /* not fcntl */
17#define O_NOCTTY 0x0800 /* not fcntl */ 18#define O_NOCTTY 0x0800 /* not fcntl */
18#define FASYNC 0x1000 /* fcntl, for BSD compatibility */ 19#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 97102ebc54b1..2cb52cf8bd4e 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -24,7 +24,30 @@ static inline int irq_canonicalize(int irq)
24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
25#endif 25#endif
26 26
27#ifdef CONFIG_MIPS_MT_SMTC
28
29struct irqaction;
30
31extern unsigned long irq_hwmask[];
32extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
33 unsigned long hwmask);
34
35static inline void smtc_im_ack_irq(unsigned int irq)
36{
37 if (irq_hwmask[irq] & ST0_IM)
38 set_c0_status(irq_hwmask[irq] & ST0_IM);
39}
40
41#else
42
43static inline void smtc_im_ack_irq(unsigned int irq)
44{
45}
46
47#endif /* CONFIG_MIPS_MT_SMTC */
48
27#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 49#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
50
28/* 51/*
29 * Clear interrupt mask handling "backstop" if irq_hwmask 52 * Clear interrupt mask handling "backstop" if irq_hwmask
30 * entry so indicates. This implies that the ack() or end() 53 * entry so indicates. This implies that the ack() or end()
@@ -38,6 +61,7 @@ do { \
38 ~(irq_hwmask[irq] & 0x0000ff00)); \ 61 ~(irq_hwmask[irq] & 0x0000ff00)); \
39} while (0) 62} while (0)
40#else 63#else
64
41#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) 65#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
42#endif 66#endif
43 67
@@ -60,14 +84,6 @@ do { \
60extern void arch_init_irq(void); 84extern void arch_init_irq(void);
61extern void spurious_interrupt(void); 85extern void spurious_interrupt(void);
62 86
63#ifdef CONFIG_MIPS_MT_SMTC
64struct irqaction;
65
66extern unsigned long irq_hwmask[];
67extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
68 unsigned long hwmask);
69#endif /* CONFIG_MIPS_MT_SMTC */
70
71extern int allocate_irqno(void); 87extern int allocate_irqno(void);
72extern void alloc_legacy_irqno(void); 88extern void alloc_legacy_irqno(void);
73extern void free_irqno(unsigned int irq); 89extern void free_irqno(unsigned int irq);
diff --git a/include/asm-mips/local.h b/include/asm-mips/local.h
index ed882c88e0ca..f9a5ce5c9af1 100644
--- a/include/asm-mips/local.h
+++ b/include/asm-mips/local.h
@@ -4,6 +4,7 @@
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <asm/atomic.h> 6#include <asm/atomic.h>
7#include <asm/cmpxchg.h>
7#include <asm/war.h> 8#include <asm/war.h>
8 9
9typedef struct 10typedef struct
@@ -114,68 +115,6 @@ static __inline__ long local_sub_return(long i, local_t * l)
114 return result; 115 return result;
115} 116}
116 117
117/*
118 * local_sub_if_positive - conditionally subtract integer from atomic variable
119 * @i: integer value to subtract
120 * @l: pointer of type local_t
121 *
122 * Atomically test @l and subtract @i if @l is greater or equal than @i.
123 * The function returns the old value of @l minus @i.
124 */
125static __inline__ long local_sub_if_positive(long i, local_t * l)
126{
127 unsigned long result;
128
129 if (cpu_has_llsc && R10000_LLSC_WAR) {
130 unsigned long temp;
131
132 __asm__ __volatile__(
133 " .set mips3 \n"
134 "1:" __LL "%1, %2 # local_sub_if_positive\n"
135 " dsubu %0, %1, %3 \n"
136 " bltz %0, 1f \n"
137 __SC "%0, %2 \n"
138 " .set noreorder \n"
139 " beqzl %0, 1b \n"
140 " dsubu %0, %1, %3 \n"
141 " .set reorder \n"
142 "1: \n"
143 " .set mips0 \n"
144 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
145 : "Ir" (i), "m" (l->a.counter)
146 : "memory");
147 } else if (cpu_has_llsc) {
148 unsigned long temp;
149
150 __asm__ __volatile__(
151 " .set mips3 \n"
152 "1:" __LL "%1, %2 # local_sub_if_positive\n"
153 " dsubu %0, %1, %3 \n"
154 " bltz %0, 1f \n"
155 __SC "%0, %2 \n"
156 " .set noreorder \n"
157 " beqz %0, 1b \n"
158 " dsubu %0, %1, %3 \n"
159 " .set reorder \n"
160 "1: \n"
161 " .set mips0 \n"
162 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
163 : "Ir" (i), "m" (l->a.counter)
164 : "memory");
165 } else {
166 unsigned long flags;
167
168 local_irq_save(flags);
169 result = l->a.counter;
170 result -= i;
171 if (result >= 0)
172 l->a.counter = result;
173 local_irq_restore(flags);
174 }
175
176 return result;
177}
178
179#define local_cmpxchg(l, o, n) \ 118#define local_cmpxchg(l, o, n) \
180 ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) 119 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
181#define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n))) 120#define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n)))
@@ -234,12 +173,6 @@ static __inline__ long local_sub_if_positive(long i, local_t * l)
234#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) 173#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
235 174
236/* 175/*
237 * local_dec_if_positive - decrement by 1 if old value positive
238 * @l: pointer of type local_t
239 */
240#define local_dec_if_positive(l) local_sub_if_positive(1, l)
241
242/*
243 * local_add_negative - add and test if negative 176 * local_add_negative - add and test if negative
244 * @l: pointer of type local_t 177 * @l: pointer of type local_t
245 * @i: integer value to add 178 * @i: integer value to add
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index b92dd8c760da..e3301e54d559 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -142,7 +142,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
142/* 142/*
143 * __pa()/__va() should be used only during mem init. 143 * __pa()/__va() should be used only during mem init.
144 */ 144 */
145#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 145#ifdef CONFIG_64BIT
146#define __pa(x) \ 146#define __pa(x) \
147({ \ 147({ \
148 unsigned long __x = (unsigned long)(x); \ 148 unsigned long __x = (unsigned long)(x); \
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 357251f42518..480b574e2483 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -17,6 +17,7 @@
17 17
18#include <asm/addrspace.h> 18#include <asm/addrspace.h>
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20#include <asm/cmpxchg.h>
20#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
21#include <asm/dsp.h> 22#include <asm/dsp.h>
22#include <asm/war.h> 23#include <asm/war.h>
@@ -194,266 +195,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
194 195
195#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 196#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
196 197
197#define __HAVE_ARCH_CMPXCHG 1
198
199static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
200 unsigned long new)
201{
202 __u32 retval;
203
204 if (cpu_has_llsc && R10000_LLSC_WAR) {
205 __asm__ __volatile__(
206 " .set push \n"
207 " .set noat \n"
208 " .set mips3 \n"
209 "1: ll %0, %2 # __cmpxchg_u32 \n"
210 " bne %0, %z3, 2f \n"
211 " .set mips0 \n"
212 " move $1, %z4 \n"
213 " .set mips3 \n"
214 " sc $1, %1 \n"
215 " beqzl $1, 1b \n"
216 "2: \n"
217 " .set pop \n"
218 : "=&r" (retval), "=R" (*m)
219 : "R" (*m), "Jr" (old), "Jr" (new)
220 : "memory");
221 } else if (cpu_has_llsc) {
222 __asm__ __volatile__(
223 " .set push \n"
224 " .set noat \n"
225 " .set mips3 \n"
226 "1: ll %0, %2 # __cmpxchg_u32 \n"
227 " bne %0, %z3, 2f \n"
228 " .set mips0 \n"
229 " move $1, %z4 \n"
230 " .set mips3 \n"
231 " sc $1, %1 \n"
232 " beqz $1, 3f \n"
233 "2: \n"
234 " .subsection 2 \n"
235 "3: b 1b \n"
236 " .previous \n"
237 " .set pop \n"
238 : "=&r" (retval), "=R" (*m)
239 : "R" (*m), "Jr" (old), "Jr" (new)
240 : "memory");
241 } else {
242 unsigned long flags;
243
244 raw_local_irq_save(flags);
245 retval = *m;
246 if (retval == old)
247 *m = new;
248 raw_local_irq_restore(flags); /* implies memory barrier */
249 }
250
251 smp_llsc_mb();
252
253 return retval;
254}
255
256static inline unsigned long __cmpxchg_u32_local(volatile int * m,
257 unsigned long old, unsigned long new)
258{
259 __u32 retval;
260
261 if (cpu_has_llsc && R10000_LLSC_WAR) {
262 __asm__ __volatile__(
263 " .set push \n"
264 " .set noat \n"
265 " .set mips3 \n"
266 "1: ll %0, %2 # __cmpxchg_u32 \n"
267 " bne %0, %z3, 2f \n"
268 " .set mips0 \n"
269 " move $1, %z4 \n"
270 " .set mips3 \n"
271 " sc $1, %1 \n"
272 " beqzl $1, 1b \n"
273 "2: \n"
274 " .set pop \n"
275 : "=&r" (retval), "=R" (*m)
276 : "R" (*m), "Jr" (old), "Jr" (new)
277 : "memory");
278 } else if (cpu_has_llsc) {
279 __asm__ __volatile__(
280 " .set push \n"
281 " .set noat \n"
282 " .set mips3 \n"
283 "1: ll %0, %2 # __cmpxchg_u32 \n"
284 " bne %0, %z3, 2f \n"
285 " .set mips0 \n"
286 " move $1, %z4 \n"
287 " .set mips3 \n"
288 " sc $1, %1 \n"
289 " beqz $1, 1b \n"
290 "2: \n"
291 " .set pop \n"
292 : "=&r" (retval), "=R" (*m)
293 : "R" (*m), "Jr" (old), "Jr" (new)
294 : "memory");
295 } else {
296 unsigned long flags;
297
298 local_irq_save(flags);
299 retval = *m;
300 if (retval == old)
301 *m = new;
302 local_irq_restore(flags); /* implies memory barrier */
303 }
304
305 return retval;
306}
307
308#ifdef CONFIG_64BIT
309static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
310 unsigned long new)
311{
312 __u64 retval;
313
314 if (cpu_has_llsc && R10000_LLSC_WAR) {
315 __asm__ __volatile__(
316 " .set push \n"
317 " .set noat \n"
318 " .set mips3 \n"
319 "1: lld %0, %2 # __cmpxchg_u64 \n"
320 " bne %0, %z3, 2f \n"
321 " move $1, %z4 \n"
322 " scd $1, %1 \n"
323 " beqzl $1, 1b \n"
324 "2: \n"
325 " .set pop \n"
326 : "=&r" (retval), "=R" (*m)
327 : "R" (*m), "Jr" (old), "Jr" (new)
328 : "memory");
329 } else if (cpu_has_llsc) {
330 __asm__ __volatile__(
331 " .set push \n"
332 " .set noat \n"
333 " .set mips3 \n"
334 "1: lld %0, %2 # __cmpxchg_u64 \n"
335 " bne %0, %z3, 2f \n"
336 " move $1, %z4 \n"
337 " scd $1, %1 \n"
338 " beqz $1, 3f \n"
339 "2: \n"
340 " .subsection 2 \n"
341 "3: b 1b \n"
342 " .previous \n"
343 " .set pop \n"
344 : "=&r" (retval), "=R" (*m)
345 : "R" (*m), "Jr" (old), "Jr" (new)
346 : "memory");
347 } else {
348 unsigned long flags;
349
350 raw_local_irq_save(flags);
351 retval = *m;
352 if (retval == old)
353 *m = new;
354 raw_local_irq_restore(flags); /* implies memory barrier */
355 }
356
357 smp_llsc_mb();
358
359 return retval;
360}
361
362static inline unsigned long __cmpxchg_u64_local(volatile int * m,
363 unsigned long old, unsigned long new)
364{
365 __u64 retval;
366
367 if (cpu_has_llsc && R10000_LLSC_WAR) {
368 __asm__ __volatile__(
369 " .set push \n"
370 " .set noat \n"
371 " .set mips3 \n"
372 "1: lld %0, %2 # __cmpxchg_u64 \n"
373 " bne %0, %z3, 2f \n"
374 " move $1, %z4 \n"
375 " scd $1, %1 \n"
376 " beqzl $1, 1b \n"
377 "2: \n"
378 " .set pop \n"
379 : "=&r" (retval), "=R" (*m)
380 : "R" (*m), "Jr" (old), "Jr" (new)
381 : "memory");
382 } else if (cpu_has_llsc) {
383 __asm__ __volatile__(
384 " .set push \n"
385 " .set noat \n"
386 " .set mips3 \n"
387 "1: lld %0, %2 # __cmpxchg_u64 \n"
388 " bne %0, %z3, 2f \n"
389 " move $1, %z4 \n"
390 " scd $1, %1 \n"
391 " beqz $1, 1b \n"
392 "2: \n"
393 " .set pop \n"
394 : "=&r" (retval), "=R" (*m)
395 : "R" (*m), "Jr" (old), "Jr" (new)
396 : "memory");
397 } else {
398 unsigned long flags;
399
400 local_irq_save(flags);
401 retval = *m;
402 if (retval == old)
403 *m = new;
404 local_irq_restore(flags); /* implies memory barrier */
405 }
406
407 return retval;
408}
409
410#else
411extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
412 volatile int * m, unsigned long old, unsigned long new);
413#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
414extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
415 volatile int * m, unsigned long old, unsigned long new);
416#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
417#endif
418
419/* This function doesn't exist, so you'll get a linker error
420 if something tries to do an invalid cmpxchg(). */
421extern void __cmpxchg_called_with_bad_pointer(void);
422
423static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
424 unsigned long new, int size)
425{
426 switch (size) {
427 case 4:
428 return __cmpxchg_u32(ptr, old, new);
429 case 8:
430 return __cmpxchg_u64(ptr, old, new);
431 }
432 __cmpxchg_called_with_bad_pointer();
433 return old;
434}
435
436static inline unsigned long __cmpxchg_local(volatile void * ptr,
437 unsigned long old, unsigned long new, int size)
438{
439 switch (size) {
440 case 4:
441 return __cmpxchg_u32_local(ptr, old, new);
442 case 8:
443 return __cmpxchg_u64_local(ptr, old, new);
444 }
445 __cmpxchg_called_with_bad_pointer();
446 return old;
447}
448
449#define cmpxchg(ptr,old,new) \
450 ((__typeof__(*(ptr)))__cmpxchg((ptr), \
451 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
452
453#define cmpxchg_local(ptr,old,new) \
454 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
455 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
456
457extern void set_handler (unsigned long offset, void *addr, unsigned long len); 198extern void set_handler (unsigned long offset, void *addr, unsigned long len);
458extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); 199extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
459 200
diff --git a/include/asm-sh/flat.h b/include/asm-sh/flat.h
index 0d5cc04ab005..dc4f5950dafa 100644
--- a/include/asm-sh/flat.h
+++ b/include/asm-sh/flat.h
@@ -16,8 +16,9 @@
16#define flat_argvp_envp_on_stack() 0 16#define flat_argvp_envp_on_stack() 0
17#define flat_old_ram_flag(flags) (flags) 17#define flat_old_ram_flag(flags) (flags)
18#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 18#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
19#define flat_get_addr_from_rp(rp, relval, flags) get_unaligned(rp) 19#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
20#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) 20#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
21#define flat_get_relocate_addr(rel) (rel) 21#define flat_get_relocate_addr(rel) (rel)
22#define flat_set_persistent(relval, p) 0
22 23
23#endif /* __ASM_SH_FLAT_H */ 24#endif /* __ASM_SH_FLAT_H */
diff --git a/include/asm-v850/flat.h b/include/asm-v850/flat.h
index 3888f59d6881..17f0ea566611 100644
--- a/include/asm-v850/flat.h
+++ b/include/asm-v850/flat.h
@@ -25,6 +25,7 @@
25#define flat_stack_align(sp) /* nothing needed */ 25#define flat_stack_align(sp) /* nothing needed */
26#define flat_argvp_envp_on_stack() 0 26#define flat_argvp_envp_on_stack() 0
27#define flat_old_ram_flag(flags) (flags) 27#define flat_old_ram_flag(flags) (flags)
28#define flat_set_persistent(relval, p) 0
28 29
29/* We store the type of relocation in the top 4 bits of the `relval.' */ 30/* We store the type of relocation in the top 4 bits of the `relval.' */
30 31
@@ -46,7 +47,8 @@ flat_get_relocate_addr (unsigned long relval)
46 For the v850, RP should always be half-word aligned. */ 47 For the v850, RP should always be half-word aligned. */
47static inline unsigned long flat_get_addr_from_rp (unsigned long *rp, 48static inline unsigned long flat_get_addr_from_rp (unsigned long *rp,
48 unsigned long relval, 49 unsigned long relval,
49 unsigned long flags) 50 unsigned long flags,
51 unsigned long *persistent)
50{ 52{
51 short *srp = (short *)rp; 53 short *srp = (short *)rp;
52 54
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 19525175b91c..31f579b828f2 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -371,7 +371,7 @@ static inline void sync_core(void)
371#define ARCH_HAS_PREFETCH 371#define ARCH_HAS_PREFETCH
372static inline void prefetch(void *x) 372static inline void prefetch(void *x)
373{ 373{
374 asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 374 asm volatile("prefetcht0 (%0)" :: "r" (x));
375} 375}
376 376
377#define ARCH_HAS_PREFETCHW 1 377#define ARCH_HAS_PREFETCHW 1
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 963051a967d6..3ec6e7ff5fbd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -32,15 +32,7 @@
32 * CPUFREQ NOTIFIER INTERFACE * 32 * CPUFREQ NOTIFIER INTERFACE *
33 *********************************************************************/ 33 *********************************************************************/
34 34
35#ifdef CONFIG_CPU_FREQ
36int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 35int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
37#else
38static inline int cpufreq_register_notifier(struct notifier_block *nb,
39 unsigned int list)
40{
41 return 0;
42}
43#endif
44int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); 36int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
45 37
46#define CPUFREQ_TRANSITION_NOTIFIER (0) 38#define CPUFREQ_TRANSITION_NOTIFIER (0)
@@ -268,22 +260,17 @@ struct freq_attr {
268int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 260int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
269int cpufreq_update_policy(unsigned int cpu); 261int cpufreq_update_policy(unsigned int cpu);
270 262
263/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
264unsigned int cpufreq_get(unsigned int cpu);
271 265
272/* 266/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */
273 * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
274 */
275#ifdef CONFIG_CPU_FREQ 267#ifdef CONFIG_CPU_FREQ
276unsigned int cpufreq_quick_get(unsigned int cpu); 268unsigned int cpufreq_quick_get(unsigned int cpu);
277unsigned int cpufreq_get(unsigned int cpu);
278#else 269#else
279static inline unsigned int cpufreq_quick_get(unsigned int cpu) 270static inline unsigned int cpufreq_quick_get(unsigned int cpu)
280{ 271{
281 return 0; 272 return 0;
282} 273}
283static inline unsigned int cpufreq_get(unsigned int cpu)
284{
285 return 0;
286}
287#endif 274#endif
288 275
289 276
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a01ac6dd5f5e..313c6b6e774f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -113,7 +113,7 @@ extern unsigned long avenrun[]; /* Load averages */
113 113
114#define FSHIFT 11 /* nr of bits of precision */ 114#define FSHIFT 11 /* nr of bits of precision */
115#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 115#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
116#define LOAD_FREQ (5*HZ) /* 5 sec intervals */ 116#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
117#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 117#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
118#define EXP_5 2014 /* 1/exp(5sec/5min) */ 118#define EXP_5 2014 /* 1/exp(5sec/5min) */
119#define EXP_15 2037 /* 1/exp(5sec/15min) */ 119#define EXP_15 2037 /* 1/exp(5sec/15min) */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 4ef4d22e5e43..b4af6bcb7b7a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -127,7 +127,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
127 loff_t pos, loff_t count); 127 loff_t pos, loff_t count);
128int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, 128int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
129 loff_t pos, loff_t count); 129 loff_t pos, loff_t count);
130void set_page_dirty_balance(struct page *page); 130void set_page_dirty_balance(struct page *page, int page_mkwrite);
131void writeback_set_ratelimit(void); 131void writeback_set_ratelimit(void);
132 132
133/* pdflush.c */ 133/* pdflush.c */
diff --git a/include/net/rose.h b/include/net/rose.h
index a4047d3cf5dd..e5bb084d8754 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -188,7 +188,7 @@ extern void rose_kick(struct sock *);
188extern void rose_enquiry_response(struct sock *); 188extern void rose_enquiry_response(struct sock *);
189 189
190/* rose_route.c */ 190/* rose_route.c */
191extern struct rose_neigh rose_loopback_neigh; 191extern struct rose_neigh *rose_loopback_neigh;
192extern const struct file_operations rose_neigh_fops; 192extern const struct file_operations rose_neigh_fops;
193extern const struct file_operations rose_nodes_fops; 193extern const struct file_operations rose_nodes_fops;
194extern const struct file_operations rose_routes_fops; 194extern const struct file_operations rose_routes_fops;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 991c85bb9e36..e8e3a64eb322 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -114,7 +114,6 @@ sctp_state_fn_t sctp_sf_do_4_C;
114sctp_state_fn_t sctp_sf_eat_data_6_2; 114sctp_state_fn_t sctp_sf_eat_data_6_2;
115sctp_state_fn_t sctp_sf_eat_data_fast_4_4; 115sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
116sctp_state_fn_t sctp_sf_eat_sack_6_2; 116sctp_state_fn_t sctp_sf_eat_sack_6_2;
117sctp_state_fn_t sctp_sf_tabort_8_4_8;
118sctp_state_fn_t sctp_sf_operr_notify; 117sctp_state_fn_t sctp_sf_operr_notify;
119sctp_state_fn_t sctp_sf_t1_init_timer_expire; 118sctp_state_fn_t sctp_sf_t1_init_timer_expire;
120sctp_state_fn_t sctp_sf_t1_cookie_timer_expire; 119sctp_state_fn_t sctp_sf_t1_cookie_timer_expire;
@@ -247,6 +246,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
247 int, __be16); 246 int, __be16);
248struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, 247struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
249 union sctp_addr *addr); 248 union sctp_addr *addr);
249int sctp_verify_asconf(const struct sctp_association *asoc,
250 struct sctp_paramhdr *param_hdr, void *chunk_end,
251 struct sctp_paramhdr **errp);
250struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, 252struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
251 struct sctp_chunk *asconf); 253 struct sctp_chunk *asconf);
252int sctp_process_asconf_ack(struct sctp_association *asoc, 254int sctp_process_asconf_ack(struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index c2fe2dcc9afc..baff49dfcdbd 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -421,6 +421,7 @@ struct sctp_signed_cookie {
421 * internally. 421 * internally.
422 */ 422 */
423union sctp_addr_param { 423union sctp_addr_param {
424 struct sctp_paramhdr p;
424 struct sctp_ipv4addr_param v4; 425 struct sctp_ipv4addr_param v4;
425 struct sctp_ipv6addr_param v6; 426 struct sctp_ipv6addr_param v6;
426}; 427};
@@ -1156,7 +1157,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
1156int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1157int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
1157 __u8 use_as_src, gfp_t gfp); 1158 __u8 use_as_src, gfp_t gfp);
1158int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1159int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
1159 void (*rcu_call)(struct rcu_head *, 1160 void fastcall (*rcu_call)(struct rcu_head *,
1160 void (*func)(struct rcu_head *))); 1161 void (*func)(struct rcu_head *)));
1161int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1162int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
1162 struct sctp_sock *); 1163 struct sctp_sock *);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 185c7ecce4cc..54053de0bdd7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1059,14 +1059,12 @@ struct tcp_md5sig_key {
1059}; 1059};
1060 1060
1061struct tcp4_md5sig_key { 1061struct tcp4_md5sig_key {
1062 u8 *key; 1062 struct tcp_md5sig_key base;
1063 u16 keylen;
1064 __be32 addr; 1063 __be32 addr;
1065}; 1064};
1066 1065
1067struct tcp6_md5sig_key { 1066struct tcp6_md5sig_key {
1068 u8 *key; 1067 struct tcp_md5sig_key base;
1069 u16 keylen;
1070#if 0 1068#if 0
1071 u32 scope_id; /* XXX */ 1069 u32 scope_id; /* XXX */
1072#endif 1070#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index e8935b195e88..fcc94e7b4086 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
1943void exit_robust_list(struct task_struct *curr) 1943void exit_robust_list(struct task_struct *curr)
1944{ 1944{
1945 struct robust_list_head __user *head = curr->robust_list; 1945 struct robust_list_head __user *head = curr->robust_list;
1946 struct robust_list __user *entry, *pending; 1946 struct robust_list __user *entry, *next_entry, *pending;
1947 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 1947 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1948 unsigned long futex_offset; 1948 unsigned long futex_offset;
1949 int rc;
1949 1950
1950 /* 1951 /*
1951 * Fetch the list head (which was registered earlier, via 1952 * Fetch the list head (which was registered earlier, via
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr)
1965 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 1966 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1966 return; 1967 return;
1967 1968
1968 if (pending) 1969 next_entry = NULL; /* avoid warning with gcc */
1969 handle_futex_death((void __user *)pending + futex_offset,
1970 curr, pip);
1971
1972 while (entry != &head->list) { 1970 while (entry != &head->list) {
1973 /* 1971 /*
1972 * Fetch the next entry in the list before calling
1973 * handle_futex_death:
1974 */
1975 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1976 /*
1974 * A pending lock might already be on the list, so 1977 * A pending lock might already be on the list, so
1975 * don't process it twice: 1978 * don't process it twice:
1976 */ 1979 */
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
1978 if (handle_futex_death((void __user *)entry + futex_offset, 1981 if (handle_futex_death((void __user *)entry + futex_offset,
1979 curr, pi)) 1982 curr, pi))
1980 return; 1983 return;
1981 /* 1984 if (rc)
1982 * Fetch the next entry in the list:
1983 */
1984 if (fetch_robust_entry(&entry, &entry->next, &pi))
1985 return; 1985 return;
1986 entry = next_entry;
1987 pi = next_pi;
1986 /* 1988 /*
1987 * Avoid excessively long or circular lists: 1989 * Avoid excessively long or circular lists:
1988 */ 1990 */
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
1991 1993
1992 cond_resched(); 1994 cond_resched();
1993 } 1995 }
1996
1997 if (pending)
1998 handle_futex_death((void __user *)pending + futex_offset,
1999 curr, pip);
1994} 2000}
1995 2001
1996long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 2002long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7e52eb051f22..2c2e2954b713 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
38void compat_exit_robust_list(struct task_struct *curr) 38void compat_exit_robust_list(struct task_struct *curr)
39{ 39{
40 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
41 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *next_entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 42 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
43 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, next_uentry, upending;
44 compat_long_t futex_offset; 44 compat_long_t futex_offset;
45 int rc;
45 46
46 /* 47 /*
47 * Fetch the list head (which was registered earlier, via 48 * Fetch the list head (which was registered earlier, via
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr)
61 if (fetch_robust_entry(&upending, &pending, 62 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pip)) 63 &head->list_op_pending, &pip))
63 return; 64 return;
64 if (pending)
65 handle_futex_death((void __user *)pending + futex_offset, curr, pip);
66 65
66 next_entry = NULL; /* avoid warning with gcc */
67 while (entry != (struct robust_list __user *) &head->list) { 67 while (entry != (struct robust_list __user *) &head->list) {
68 /* 68 /*
69 * Fetch the next entry in the list before calling
70 * handle_futex_death:
71 */
72 rc = fetch_robust_entry(&next_uentry, &next_entry,
73 (compat_uptr_t __user *)&entry->next, &next_pi);
74 /*
69 * A pending lock might already be on the list, so 75 * A pending lock might already be on the list, so
70 * dont process it twice: 76 * dont process it twice:
71 */ 77 */
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
74 curr, pi)) 80 curr, pi))
75 return; 81 return;
76 82
77 /* 83 if (rc)
78 * Fetch the next entry in the list:
79 */
80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t __user *)&entry->next, &pi))
82 return; 84 return;
85 uentry = next_uentry;
86 entry = next_entry;
87 pi = next_pi;
83 /* 88 /*
84 * Avoid excessively long or circular lists: 89 * Avoid excessively long or circular lists:
85 */ 90 */
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
88 93
89 cond_resched(); 94 cond_resched();
90 } 95 }
96 if (pending)
97 handle_futex_death((void __user *)pending + futex_offset,
98 curr, pip);
91} 99}
92 100
93asmlinkage long 101asmlinkage long
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index c8580a1e6873..14b0e10dc95c 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -110,7 +110,7 @@ config SUSPEND
110 110
111config HIBERNATION_UP_POSSIBLE 111config HIBERNATION_UP_POSSIBLE
112 bool 112 bool
113 depends on X86 || PPC64_SWSUSP || FRV || PPC32 113 depends on X86 || PPC64_SWSUSP || PPC32
114 depends on !SMP 114 depends on !SMP
115 default y 115 default y
116 116
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c9fbe8e73a45..67c67a87146e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -639,6 +639,16 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
639 639
640 se->block_start = 0; 640 se->block_start = 0;
641 se->sum_sleep_runtime += delta; 641 se->sum_sleep_runtime += delta;
642
643 /*
644 * Blocking time is in units of nanosecs, so shift by 20 to
645 * get a milliseconds-range estimation of the amount of
646 * time that the task spent sleeping:
647 */
648 if (unlikely(prof_on == SLEEP_PROFILING)) {
649 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
650 delta >> 20);
651 }
642 } 652 }
643#endif 653#endif
644} 654}
diff --git a/kernel/signal.c b/kernel/signal.c
index 9fb91a32edda..792952381092 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -531,18 +531,18 @@ static int check_kill_permission(int sig, struct siginfo *info,
531 if (!valid_signal(sig)) 531 if (!valid_signal(sig))
532 return error; 532 return error;
533 533
534 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 534 if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
535 if (error) 535 error = audit_signal_info(sig, t); /* Let audit system see the signal */
536 return error; 536 if (error)
537 537 return error;
538 error = -EPERM; 538 error = -EPERM;
539 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 539 if (((sig != SIGCONT) ||
540 && ((sig != SIGCONT) || 540 (process_session(current) != process_session(t)))
541 (process_session(current) != process_session(t))) 541 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
542 && (current->euid ^ t->suid) && (current->euid ^ t->uid) 542 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
543 && (current->uid ^ t->suid) && (current->uid ^ t->uid) 543 && !capable(CAP_KILL))
544 && !capable(CAP_KILL))
545 return error; 544 return error;
545 }
546 546
547 return security_task_kill(t, info, sig, 0); 547 return security_task_kill(t, info, sig, 0);
548} 548}
diff --git a/kernel/sys.c b/kernel/sys.c
index 1b33b05d346b..8ae2e636eb1b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
32#include <linux/getcpu.h> 32#include <linux/getcpu.h>
33#include <linux/task_io_accounting_ops.h> 33#include <linux/task_io_accounting_ops.h>
34#include <linux/seccomp.h> 34#include <linux/seccomp.h>
35#include <linux/cpu.h>
35 36
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/syscalls.h> 38#include <linux/syscalls.h>
@@ -878,6 +879,7 @@ void kernel_power_off(void)
878 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 879 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
879 if (pm_power_off_prepare) 880 if (pm_power_off_prepare)
880 pm_power_off_prepare(); 881 pm_power_off_prepare();
882 disable_nonboot_cpus();
881 sysdev_shutdown(); 883 sysdev_shutdown();
882 printk(KERN_EMERG "Power down.\n"); 884 printk(KERN_EMERG "Power down.\n");
883 machine_power_off(); 885 machine_power_off();
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 3c38fb5eae1b..c36bb7ed0301 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -327,8 +327,9 @@ static int tstats_show(struct seq_file *m, void *v)
327 ms = 1; 327 ms = 1;
328 328
329 if (events && period.tv_sec) 329 if (events && period.tv_sec)
330 seq_printf(m, "%ld total events, %ld.%ld events/sec\n", events, 330 seq_printf(m, "%ld total events, %ld.%03ld events/sec\n",
331 events / period.tv_sec, events * 1000 / ms); 331 events, events * 1000 / ms,
332 (events * 1000000 / ms) % 1000);
332 else 333 else
333 seq_printf(m, "%ld total events\n", events); 334 seq_printf(m, "%ld total events\n", events);
334 335
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 50a94eee4d92..cdc9b099e620 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -284,7 +284,7 @@ config LOCKDEP
284 select KALLSYMS_ALL 284 select KALLSYMS_ALL
285 285
286config LOCK_STAT 286config LOCK_STAT
287 bool "Lock usage statisitics" 287 bool "Lock usage statistics"
288 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 288 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
289 select LOCKDEP 289 select LOCKDEP
290 select DEBUG_SPINLOCK 290 select DEBUG_SPINLOCK
@@ -294,6 +294,8 @@ config LOCK_STAT
294 help 294 help
295 This feature enables tracking lock contention points 295 This feature enables tracking lock contention points
296 296
297 For more details, see Documentation/lockstat.txt
298
297config DEBUG_LOCKDEP 299config DEBUG_LOCKDEP
298 bool "Lock dependency engine debugging" 300 bool "Lock dependency engine debugging"
299 depends on DEBUG_KERNEL && LOCKDEP 301 depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/Makefile b/lib/Makefile
index 6b0ba8cf4e5f..4f3f3e256501 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for some libs needed in the kernel. 2# Makefile for some libs needed in the kernel.
3# 3#
4 4
5lib-y := ctype.o string.o vsprintf.o kasprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 rbtree.o radix-tree.o dump_stack.o \ 6 rbtree.o radix-tree.o dump_stack.o \
7 idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o argv_split.o 8 sha1.o irq_regs.o reciprocal_div.o argv_split.o
@@ -13,7 +13,7 @@ lib-$(CONFIG_SMP) += cpumask.o
13lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
14 14
15obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 15obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
16 bust_spinlocks.o hexdump.o 16 bust_spinlocks.o hexdump.o kasprintf.o
17 17
18ifeq ($(CONFIG_DEBUG_KOBJECT),y) 18ifeq ($(CONFIG_DEBUG_KOBJECT),y)
19CFLAGS_kobject.o += -DDEBUG 19CFLAGS_kobject.o += -DDEBUG
diff --git a/mm/Kconfig b/mm/Kconfig
index e24d348083c3..a7609cbcb00d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -137,6 +137,7 @@ config SPLIT_PTLOCK_CPUS
137 int 137 int
138 default "4096" if ARM && !CPU_CACHE_VIPT 138 default "4096" if ARM && !CPU_CACHE_VIPT
139 default "4096" if PARISC && !PA20 139 default "4096" if PARISC && !PA20
140 default "4096" if XEN
140 default "4" 141 default "4"
141 142
142# 143#
diff --git a/mm/filemap.c b/mm/filemap.c
index 90b657b50f81..15c8413ee929 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1388,6 +1388,7 @@ retry_find:
1388 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1388 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1389 if (unlikely(vmf->pgoff >= size)) { 1389 if (unlikely(vmf->pgoff >= size)) {
1390 unlock_page(page); 1390 unlock_page(page);
1391 page_cache_release(page);
1391 goto outside_data_content; 1392 goto outside_data_content;
1392 } 1393 }
1393 1394
diff --git a/mm/fremap.c b/mm/fremap.c
index c395b1abf082..95bcb5641c72 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -160,7 +160,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
160 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) 160 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
161 goto out; 161 goto out;
162 162
163 if (!vma->vm_flags & VM_CAN_NONLINEAR) 163 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
164 goto out; 164 goto out;
165 165
166 if (end <= start || start < vma->vm_start || end > vma->vm_end) 166 if (end <= start || start < vma->vm_start || end > vma->vm_end)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 84c795ee2d65..eab8c428cc93 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
42 might_sleep(); 42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched(); 44 cond_resched();
45 clear_user_highpage(page + i, addr); 45 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
46 } 46 }
47} 47}
48 48
diff --git a/mm/memory.c b/mm/memory.c
index ca8cac11bd2c..f82b359b2745 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1639 struct page *old_page, *new_page; 1639 struct page *old_page, *new_page;
1640 pte_t entry; 1640 pte_t entry;
1641 int reuse = 0, ret = 0; 1641 int reuse = 0, ret = 0;
1642 int page_mkwrite = 0;
1642 struct page *dirty_page = NULL; 1643 struct page *dirty_page = NULL;
1643 1644
1644 old_page = vm_normal_page(vma, address, orig_pte); 1645 old_page = vm_normal_page(vma, address, orig_pte);
@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1687 page_cache_release(old_page); 1688 page_cache_release(old_page);
1688 if (!pte_same(*page_table, orig_pte)) 1689 if (!pte_same(*page_table, orig_pte))
1689 goto unlock; 1690 goto unlock;
1691
1692 page_mkwrite = 1;
1690 } 1693 }
1691 dirty_page = old_page; 1694 dirty_page = old_page;
1692 get_page(dirty_page); 1695 get_page(dirty_page);
@@ -1774,7 +1777,7 @@ unlock:
1774 * do_no_page is protected similarly. 1777 * do_no_page is protected similarly.
1775 */ 1778 */
1776 wait_on_page_locked(dirty_page); 1779 wait_on_page_locked(dirty_page);
1777 set_page_dirty_balance(dirty_page); 1780 set_page_dirty_balance(dirty_page, page_mkwrite);
1778 put_page(dirty_page); 1781 put_page(dirty_page);
1779 } 1782 }
1780 return ret; 1783 return ret;
@@ -2307,13 +2310,14 @@ oom:
2307 * do not need to flush old virtual caches or the TLB. 2310 * do not need to flush old virtual caches or the TLB.
2308 * 2311 *
2309 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2312 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2310 * but allow concurrent faults), and pte mapped but not yet locked. 2313 * but allow concurrent faults), and pte neither mapped nor locked.
2311 * We return with mmap_sem still held, but pte unmapped and unlocked. 2314 * We return with mmap_sem still held, but pte unmapped and unlocked.
2312 */ 2315 */
2313static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2316static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2314 unsigned long address, pte_t *page_table, pmd_t *pmd, 2317 unsigned long address, pmd_t *pmd,
2315 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2318 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2316{ 2319{
2320 pte_t *page_table;
2317 spinlock_t *ptl; 2321 spinlock_t *ptl;
2318 struct page *page; 2322 struct page *page;
2319 pte_t entry; 2323 pte_t entry;
@@ -2321,13 +2325,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2321 struct page *dirty_page = NULL; 2325 struct page *dirty_page = NULL;
2322 struct vm_fault vmf; 2326 struct vm_fault vmf;
2323 int ret; 2327 int ret;
2328 int page_mkwrite = 0;
2324 2329
2325 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2330 vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2326 vmf.pgoff = pgoff; 2331 vmf.pgoff = pgoff;
2327 vmf.flags = flags; 2332 vmf.flags = flags;
2328 vmf.page = NULL; 2333 vmf.page = NULL;
2329 2334
2330 pte_unmap(page_table);
2331 BUG_ON(vma->vm_flags & VM_PFNMAP); 2335 BUG_ON(vma->vm_flags & VM_PFNMAP);
2332 2336
2333 if (likely(vma->vm_ops->fault)) { 2337 if (likely(vma->vm_ops->fault)) {
@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2398 anon = 1; /* no anon but release vmf.page */ 2402 anon = 1; /* no anon but release vmf.page */
2399 goto out; 2403 goto out;
2400 } 2404 }
2405 page_mkwrite = 1;
2401 } 2406 }
2402 } 2407 }
2403 2408
@@ -2453,7 +2458,7 @@ out_unlocked:
2453 if (anon) 2458 if (anon)
2454 page_cache_release(vmf.page); 2459 page_cache_release(vmf.page);
2455 else if (dirty_page) { 2460 else if (dirty_page) {
2456 set_page_dirty_balance(dirty_page); 2461 set_page_dirty_balance(dirty_page, page_mkwrite);
2457 put_page(dirty_page); 2462 put_page(dirty_page);
2458 } 2463 }
2459 2464
@@ -2468,8 +2473,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2468 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; 2473 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
2469 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 2474 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2470 2475
2471 return __do_fault(mm, vma, address, page_table, pmd, pgoff, 2476 pte_unmap(page_table);
2472 flags, orig_pte); 2477 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2473} 2478}
2474 2479
2475 2480
@@ -2552,9 +2557,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2552 } 2557 }
2553 2558
2554 pgoff = pte_to_pgoff(orig_pte); 2559 pgoff = pte_to_pgoff(orig_pte);
2555 2560 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2556 return __do_fault(mm, vma, address, page_table, pmd, pgoff,
2557 flags, orig_pte);
2558} 2561}
2559 2562
2560/* 2563/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 63512a9ed57e..44720363374c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping)
274 pdflush_operation(background_writeout, 0); 274 pdflush_operation(background_writeout, 0);
275} 275}
276 276
277void set_page_dirty_balance(struct page *page) 277void set_page_dirty_balance(struct page *page, int page_mkwrite)
278{ 278{
279 if (set_page_dirty(page)) { 279 if (set_page_dirty(page) || page_mkwrite) {
280 struct address_space *mapping = page_mapping(page); 280 struct address_space *mapping = page_mapping(page);
281 281
282 if (mapping) 282 if (mapping)
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index f2de2e48b021..6284c99b456e 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
366 frag = WLAN_GET_SEQ_FRAG(sc); 366 frag = WLAN_GET_SEQ_FRAG(sc);
367 hdrlen = ieee80211_get_hdrlen(fc); 367 hdrlen = ieee80211_get_hdrlen(fc);
368 368
369 if (skb->len < hdrlen) {
370 printk(KERN_INFO "%s: invalid SKB length %d\n",
371 dev->name, skb->len);
372 goto rx_dropped;
373 }
374
369 /* Put this code here so that we avoid duplicating it in all 375 /* Put this code here so that we avoid duplicating it in all
370 * Rx paths. - Jean II */ 376 * Rx paths. - Jean II */
371#ifdef CONFIG_WIRELESS_EXT 377#ifdef CONFIG_WIRELESS_EXT
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index afb6c6698b27..e475f2e1be13 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work)
273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); 273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
274 if (ieee80211softmac_start_scan(mac)) { 274 if (ieee80211softmac_start_scan(mac)) {
275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); 275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
276 mac->associnfo.associating = 0;
277 mac->associnfo.associated = 0;
278 } 276 }
279 goto out; 277 goto out;
280 } else { 278 } else {
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index d054e9224b3e..5742dc803b79 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
70 char *extra) 70 char *extra)
71{ 71{
72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); 72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
73 struct ieee80211softmac_network *n;
74 struct ieee80211softmac_auth_queue_item *authptr; 73 struct ieee80211softmac_auth_queue_item *authptr;
75 int length = 0; 74 int length = 0;
76 75
77check_assoc_again: 76check_assoc_again:
78 mutex_lock(&sm->associnfo.mutex); 77 mutex_lock(&sm->associnfo.mutex);
79 /* Check if we're already associating to this or another network
80 * If it's another network, cancel and start over with our new network
81 * If it's our network, ignore the change, we're already doing it!
82 */
83 if((sm->associnfo.associating || sm->associnfo.associated) && 78 if((sm->associnfo.associating || sm->associnfo.associated) &&
84 (data->essid.flags && data->essid.length)) { 79 (data->essid.flags && data->essid.length)) {
85 /* Get the associating network */ 80 dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
86 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); 81 /* Cancel assoc work */
87 if(n && n->essid.len == data->essid.length && 82 cancel_delayed_work(&sm->associnfo.work);
88 !memcmp(n->essid.data, extra, n->essid.len)) { 83 /* We don't have to do this, but it's a little cleaner */
89 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n", 84 list_for_each_entry(authptr, &sm->auth_queue, list)
90 MAC_ARG(sm->associnfo.bssid)); 85 cancel_delayed_work(&authptr->work);
91 goto out; 86 sm->associnfo.bssvalid = 0;
92 } else { 87 sm->associnfo.bssfixed = 0;
93 dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); 88 sm->associnfo.associating = 0;
94 /* Cancel assoc work */ 89 sm->associnfo.associated = 0;
95 cancel_delayed_work(&sm->associnfo.work); 90 /* We must unlock to avoid deadlocks with the assoc workqueue
96 /* We don't have to do this, but it's a little cleaner */ 91 * on the associnfo.mutex */
97 list_for_each_entry(authptr, &sm->auth_queue, list) 92 mutex_unlock(&sm->associnfo.mutex);
98 cancel_delayed_work(&authptr->work); 93 flush_scheduled_work();
99 sm->associnfo.bssvalid = 0; 94 /* Avoid race! Check assoc status again. Maybe someone started an
100 sm->associnfo.bssfixed = 0; 95 * association while we flushed. */
101 sm->associnfo.associating = 0; 96 goto check_assoc_again;
102 sm->associnfo.associated = 0;
103 /* We must unlock to avoid deadlocks with the assoc workqueue
104 * on the associnfo.mutex */
105 mutex_unlock(&sm->associnfo.mutex);
106 flush_scheduled_work();
107 /* Avoid race! Check assoc status again. Maybe someone started an
108 * association while we flushed. */
109 goto check_assoc_again;
110 }
111 } 97 }
112 98
113 sm->associnfo.static_essid = 0; 99 sm->associnfo.static_essid = 0;
@@ -128,7 +114,7 @@ check_assoc_again:
128 sm->associnfo.associating = 1; 114 sm->associnfo.associating = 1;
129 /* queue lower level code to do work (if necessary) */ 115 /* queue lower level code to do work (if necessary) */
130 schedule_delayed_work(&sm->associnfo.work, 0); 116 schedule_delayed_work(&sm->associnfo.work, 0);
131out: 117
132 mutex_unlock(&sm->associnfo.mutex); 118 mutex_unlock(&sm->associnfo.mutex);
133 119
134 return 0; 120 return 0;
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
153 data->essid.length = sm->associnfo.req_essid.len; 139 data->essid.length = sm->associnfo.req_essid.len;
154 data->essid.flags = 1; /* active */ 140 data->essid.flags = 1; /* active */
155 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); 141 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
156 } 142 dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
157 143 } else if (sm->associnfo.associated || sm->associnfo.associating) {
158 /* If we're associating/associated, return that */ 144 /* If we're associating/associated, return that */
159 if (sm->associnfo.associated || sm->associnfo.associating) {
160 data->essid.length = sm->associnfo.associate_essid.len; 145 data->essid.length = sm->associnfo.associate_essid.len;
161 data->essid.flags = 1; /* active */ 146 data->essid.flags = 1; /* active */
162 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); 147 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
148 dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
163 } 149 }
164 mutex_unlock(&sm->associnfo.mutex); 150 mutex_unlock(&sm->associnfo.mutex);
165 151
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bbad2cdb74b7..f893e90061eb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2420,6 +2420,9 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2420 __u32 dval = min(tp->fackets_out, packets_acked); 2420 __u32 dval = min(tp->fackets_out, packets_acked);
2421 tp->fackets_out -= dval; 2421 tp->fackets_out -= dval;
2422 } 2422 }
2423 /* hint's skb might be NULL but we don't need to care */
2424 tp->fastpath_cnt_hint -= min_t(u32, packets_acked,
2425 tp->fastpath_cnt_hint);
2423 tp->packets_out -= packets_acked; 2426 tp->packets_out -= packets_acked;
2424 2427
2425 BUG_ON(tcp_skb_pcount(skb) == 0); 2428 BUG_ON(tcp_skb_pcount(skb) == 0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9c94627c8c7e..e089a978e128 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
833 return NULL; 833 return NULL;
834 for (i = 0; i < tp->md5sig_info->entries4; i++) { 834 for (i = 0; i < tp->md5sig_info->entries4; i++) {
835 if (tp->md5sig_info->keys4[i].addr == addr) 835 if (tp->md5sig_info->keys4[i].addr == addr)
836 return (struct tcp_md5sig_key *) 836 return &tp->md5sig_info->keys4[i].base;
837 &tp->md5sig_info->keys4[i];
838 } 837 }
839 return NULL; 838 return NULL;
840} 839}
@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
865 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); 864 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
866 if (key) { 865 if (key) {
867 /* Pre-existing entry - just update that one. */ 866 /* Pre-existing entry - just update that one. */
868 kfree(key->key); 867 kfree(key->base.key);
869 key->key = newkey; 868 key->base.key = newkey;
870 key->keylen = newkeylen; 869 key->base.keylen = newkeylen;
871 } else { 870 } else {
872 struct tcp_md5sig_info *md5sig; 871 struct tcp_md5sig_info *md5sig;
873 872
@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
906 md5sig->alloced4++; 905 md5sig->alloced4++;
907 } 906 }
908 md5sig->entries4++; 907 md5sig->entries4++;
909 md5sig->keys4[md5sig->entries4 - 1].addr = addr; 908 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
910 md5sig->keys4[md5sig->entries4 - 1].key = newkey; 909 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
911 md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; 910 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
912 } 911 }
913 return 0; 912 return 0;
914} 913}
@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
930 for (i = 0; i < tp->md5sig_info->entries4; i++) { 929 for (i = 0; i < tp->md5sig_info->entries4; i++) {
931 if (tp->md5sig_info->keys4[i].addr == addr) { 930 if (tp->md5sig_info->keys4[i].addr == addr) {
932 /* Free the key */ 931 /* Free the key */
933 kfree(tp->md5sig_info->keys4[i].key); 932 kfree(tp->md5sig_info->keys4[i].base.key);
934 tp->md5sig_info->entries4--; 933 tp->md5sig_info->entries4--;
935 934
936 if (tp->md5sig_info->entries4 == 0) { 935 if (tp->md5sig_info->entries4 == 0) {
@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk)
964 if (tp->md5sig_info->entries4) { 963 if (tp->md5sig_info->entries4) {
965 int i; 964 int i;
966 for (i = 0; i < tp->md5sig_info->entries4; i++) 965 for (i = 0; i < tp->md5sig_info->entries4; i++)
967 kfree(tp->md5sig_info->keys4[i].key); 966 kfree(tp->md5sig_info->keys4[i].base.key);
968 tp->md5sig_info->entries4 = 0; 967 tp->md5sig_info->entries4 = 0;
969 tcp_free_md5sig_pool(); 968 tcp_free_md5sig_pool();
970 } 969 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 73a894a2152c..5b596659177c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1268,9 +1268,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1268 1268
1269 if (ipv6_addr_equal(dest, target)) { 1269 if (ipv6_addr_equal(dest, target)) {
1270 on_link = 1; 1270 on_link = 1;
1271 } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { 1271 } else if (ipv6_addr_type(target) !=
1272 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1272 ND_PRINTK2(KERN_WARNING 1273 ND_PRINTK2(KERN_WARNING
1273 "ICMPv6 Redirect: target address is not link-local.\n"); 1274 "ICMPv6 Redirect: target address is not link-local unicast.\n");
1274 return; 1275 return;
1275 } 1276 }
1276 1277
@@ -1344,9 +1345,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1344 } 1345 }
1345 1346
1346 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && 1347 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
1347 !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { 1348 ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1348 ND_PRINTK2(KERN_WARNING 1349 ND_PRINTK2(KERN_WARNING
1349 "ICMPv6 Redirect: target address is not link-local.\n"); 1350 "ICMPv6 Redirect: target address is not link-local unicast.\n");
1350 return; 1351 return;
1351 } 1352 }
1352 1353
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0f7defb482e9..3e06799b37a6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
539 539
540 for (i = 0; i < tp->md5sig_info->entries6; i++) { 540 for (i = 0; i < tp->md5sig_info->entries6; i++) {
541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) 541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
542 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; 542 return &tp->md5sig_info->keys6[i].base;
543 } 543 }
544 return NULL; 544 return NULL;
545} 545}
@@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); 567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
568 if (key) { 568 if (key) {
569 /* modify existing entry - just update that one */ 569 /* modify existing entry - just update that one */
570 kfree(key->key); 570 kfree(key->base.key);
571 key->key = newkey; 571 key->base.key = newkey;
572 key->keylen = newkeylen; 572 key->base.keylen = newkeylen;
573 } else { 573 } else {
574 /* reallocate new list if current one is full. */ 574 /* reallocate new list if current one is full. */
575 if (!tp->md5sig_info) { 575 if (!tp->md5sig_info) {
@@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
603 603
604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, 604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
605 peer); 605 peer);
606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; 606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; 607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
608 608
609 tp->md5sig_info->entries6++; 609 tp->md5sig_info->entries6++;
610 } 610 }
@@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
626 for (i = 0; i < tp->md5sig_info->entries6; i++) { 626 for (i = 0; i < tp->md5sig_info->entries6; i++) {
627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { 627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
628 /* Free the key */ 628 /* Free the key */
629 kfree(tp->md5sig_info->keys6[i].key); 629 kfree(tp->md5sig_info->keys6[i].base.key);
630 tp->md5sig_info->entries6--; 630 tp->md5sig_info->entries6--;
631 631
632 if (tp->md5sig_info->entries6 == 0) { 632 if (tp->md5sig_info->entries6 == 0) {
@@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
657 657
658 if (tp->md5sig_info->entries6) { 658 if (tp->md5sig_info->entries6) {
659 for (i = 0; i < tp->md5sig_info->entries6; i++) 659 for (i = 0; i < tp->md5sig_info->entries6; i++)
660 kfree(tp->md5sig_info->keys6[i].key); 660 kfree(tp->md5sig_info->keys6[i].base.key);
661 tp->md5sig_info->entries6 = 0; 661 tp->md5sig_info->entries6 = 0;
662 tcp_free_md5sig_pool(); 662 tcp_free_md5sig_pool();
663 } 663 }
@@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
668 668
669 if (tp->md5sig_info->entries4) { 669 if (tp->md5sig_info->entries4) {
670 for (i = 0; i < tp->md5sig_info->entries4; i++) 670 for (i = 0; i < tp->md5sig_info->entries4; i++)
671 kfree(tp->md5sig_info->keys4[i].key); 671 kfree(tp->md5sig_info->keys4[i].base.key);
672 tp->md5sig_info->entries4 = 0; 672 tp->md5sig_info->entries4 = 0;
673 tcp_free_md5sig_pool(); 673 tcp_free_md5sig_pool();
674 } 674 }
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 7286c389a4d0..ff2172ffd861 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void)
5259} 5259}
5260 5260
5261 5261
5262module_init(ieee80211_init); 5262subsys_initcall(ieee80211_init);
5263module_exit(ieee80211_exit); 5263module_exit(ieee80211_exit);
5264 5264
5265MODULE_DESCRIPTION("IEEE 802.11 subsystem"); 5265MODULE_DESCRIPTION("IEEE 802.11 subsystem");
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index f6780d63b342..17b9f46bbf2b 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void)
431} 431}
432 432
433 433
434module_init(rate_control_simple_init); 434subsys_initcall(rate_control_simple_init);
435module_exit(rate_control_simple_exit); 435module_exit(rate_control_simple_exit);
436 436
437MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211"); 437MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89ce81529694..7ab82b376e1b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
424 skb_queue_head_init(&q->requeued[i]); 424 skb_queue_head_init(&q->requeued[i]);
425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, 425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
426 qd->handle); 426 qd->handle);
427 if (q->queues[i] == 0) { 427 if (!q->queues[i]) {
428 q->queues[i] = &noop_qdisc; 428 q->queues[i] = &noop_qdisc;
429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); 429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
430 } 430 }
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index cd01642f0491..114df6eec8c3 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -79,7 +79,7 @@ static void rose_loopback_timer(unsigned long param)
79 79
80 skb_reset_transport_header(skb); 80 skb_reset_transport_header(skb);
81 81
82 sk = rose_find_socket(lci_o, &rose_loopback_neigh); 82 sk = rose_find_socket(lci_o, rose_loopback_neigh);
83 if (sk) { 83 if (sk) {
84 if (rose_process_rx_frame(sk, skb) == 0) 84 if (rose_process_rx_frame(sk, skb) == 0)
85 kfree_skb(skb); 85 kfree_skb(skb);
@@ -88,7 +88,7 @@ static void rose_loopback_timer(unsigned long param)
88 88
89 if (frametype == ROSE_CALL_REQUEST) { 89 if (frametype == ROSE_CALL_REQUEST) {
90 if ((dev = rose_dev_get(dest)) != NULL) { 90 if ((dev = rose_dev_get(dest)) != NULL) {
91 if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0) 91 if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
92 kfree_skb(skb); 92 kfree_skb(skb);
93 } else { 93 } else {
94 kfree_skb(skb); 94 kfree_skb(skb);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index bbcbad1da0d0..96f61a71b252 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock);
45static struct rose_route *rose_route_list; 45static struct rose_route *rose_route_list;
46static DEFINE_SPINLOCK(rose_route_list_lock); 46static DEFINE_SPINLOCK(rose_route_list_lock);
47 47
48struct rose_neigh rose_loopback_neigh; 48struct rose_neigh *rose_loopback_neigh;
49 49
50/* 50/*
51 * Add a new route to a node, and in the process add the node and the 51 * Add a new route to a node, and in the process add the node and the
@@ -362,7 +362,12 @@ out:
362 */ 362 */
363void rose_add_loopback_neigh(void) 363void rose_add_loopback_neigh(void)
364{ 364{
365 struct rose_neigh *sn = &rose_loopback_neigh; 365 struct rose_neigh *sn;
366
367 rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
368 if (!rose_loopback_neigh)
369 return;
370 sn = rose_loopback_neigh;
366 371
367 sn->callsign = null_ax25_address; 372 sn->callsign = null_ax25_address;
368 sn->digipeat = NULL; 373 sn->digipeat = NULL;
@@ -417,13 +422,13 @@ int rose_add_loopback_node(rose_address *address)
417 rose_node->mask = 10; 422 rose_node->mask = 10;
418 rose_node->count = 1; 423 rose_node->count = 1;
419 rose_node->loopback = 1; 424 rose_node->loopback = 1;
420 rose_node->neighbour[0] = &rose_loopback_neigh; 425 rose_node->neighbour[0] = rose_loopback_neigh;
421 426
422 /* Insert at the head of list. Address is always mask=10 */ 427 /* Insert at the head of list. Address is always mask=10 */
423 rose_node->next = rose_node_list; 428 rose_node->next = rose_node_list;
424 rose_node_list = rose_node; 429 rose_node_list = rose_node;
425 430
426 rose_loopback_neigh.count++; 431 rose_loopback_neigh->count++;
427 432
428out: 433out:
429 spin_unlock_bh(&rose_node_list_lock); 434 spin_unlock_bh(&rose_node_list_lock);
@@ -454,7 +459,7 @@ void rose_del_loopback_node(rose_address *address)
454 459
455 rose_remove_node(rose_node); 460 rose_remove_node(rose_node);
456 461
457 rose_loopback_neigh.count--; 462 rose_loopback_neigh->count--;
458 463
459out: 464out:
460 spin_unlock_bh(&rose_node_list_lock); 465 spin_unlock_bh(&rose_node_list_lock);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 8dbe36912ecb..d4d5d2f271d2 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -502,7 +502,7 @@ static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
502 502
503#ifdef CONFIG_NET_CLS_IND 503#ifdef CONFIG_NET_CLS_IND
504 if (tb[TCA_U32_INDEV-1]) { 504 if (tb[TCA_U32_INDEV-1]) {
505 int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); 505 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
506 if (err < 0) 506 if (err < 0)
507 goto errout; 507 goto errout;
508 } 508 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3a23e30bc79e..b542c875e154 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/ipv6.h> 20#include <linux/ipv6.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/jhash.h>
22#include <net/ip.h> 23#include <net/ip.h>
23#include <net/netlink.h> 24#include <net/netlink.h>
24#include <net/pkt_sched.h> 25#include <net/pkt_sched.h>
@@ -95,7 +96,7 @@ struct sfq_sched_data
95 96
96/* Variables */ 97/* Variables */
97 struct timer_list perturb_timer; 98 struct timer_list perturb_timer;
98 int perturbation; 99 u32 perturbation;
99 sfq_index tail; /* Index of current slot in round */ 100 sfq_index tail; /* Index of current slot in round */
100 sfq_index max_depth; /* Maximal depth */ 101 sfq_index max_depth; /* Maximal depth */
101 102
@@ -109,12 +110,7 @@ struct sfq_sched_data
109 110
110static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) 111static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
111{ 112{
112 int pert = q->perturbation; 113 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
113
114 /* Have we any rotation primitives? If not, WHY? */
115 h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
116 h ^= h>>10;
117 return h & 0x3FF;
118} 114}
119 115
120static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) 116static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
@@ -256,6 +252,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
256 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 252 q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
257 q->hash[x] = hash; 253 q->hash[x] = hash;
258 } 254 }
255 /* If selected queue has length q->limit, this means that
256 * all another queues are empty and that we do simple tail drop,
257 * i.e. drop _this_ packet.
258 */
259 if (q->qs[x].qlen >= q->limit)
260 return qdisc_drop(skb, sch);
261
259 sch->qstats.backlog += skb->len; 262 sch->qstats.backlog += skb->len;
260 __skb_queue_tail(&q->qs[x], skb); 263 __skb_queue_tail(&q->qs[x], skb);
261 sfq_inc(q, x); 264 sfq_inc(q, x);
@@ -294,6 +297,19 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
294 } 297 }
295 sch->qstats.backlog += skb->len; 298 sch->qstats.backlog += skb->len;
296 __skb_queue_head(&q->qs[x], skb); 299 __skb_queue_head(&q->qs[x], skb);
300 /* If selected queue has length q->limit+1, this means that
301 * all another queues are empty and we do simple tail drop.
302 * This packet is still requeued at head of queue, tail packet
303 * is dropped.
304 */
305 if (q->qs[x].qlen > q->limit) {
306 skb = q->qs[x].prev;
307 __skb_unlink(skb, &q->qs[x]);
308 sch->qstats.drops++;
309 sch->qstats.backlog -= skb->len;
310 kfree_skb(skb);
311 return NET_XMIT_CN;
312 }
297 sfq_inc(q, x); 313 sfq_inc(q, x);
298 if (q->qs[x].qlen == 1) { /* The flow is new */ 314 if (q->qs[x].qlen == 1) { /* The flow is new */
299 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 315 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
@@ -370,12 +386,10 @@ static void sfq_perturbation(unsigned long arg)
370 struct Qdisc *sch = (struct Qdisc*)arg; 386 struct Qdisc *sch = (struct Qdisc*)arg;
371 struct sfq_sched_data *q = qdisc_priv(sch); 387 struct sfq_sched_data *q = qdisc_priv(sch);
372 388
373 q->perturbation = net_random()&0x1F; 389 get_random_bytes(&q->perturbation, 4);
374 390
375 if (q->perturb_period) { 391 if (q->perturb_period)
376 q->perturb_timer.expires = jiffies + q->perturb_period; 392 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
377 add_timer(&q->perturb_timer);
378 }
379} 393}
380 394
381static int sfq_change(struct Qdisc *sch, struct rtattr *opt) 395static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
@@ -391,7 +405,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
391 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 405 q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
392 q->perturb_period = ctl->perturb_period*HZ; 406 q->perturb_period = ctl->perturb_period*HZ;
393 if (ctl->limit) 407 if (ctl->limit)
394 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 2); 408 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
395 409
396 qlen = sch->q.qlen; 410 qlen = sch->q.qlen;
397 while (sch->q.qlen > q->limit) 411 while (sch->q.qlen > q->limit)
@@ -400,8 +414,8 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
400 414
401 del_timer(&q->perturb_timer); 415 del_timer(&q->perturb_timer);
402 if (q->perturb_period) { 416 if (q->perturb_period) {
403 q->perturb_timer.expires = jiffies + q->perturb_period; 417 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
404 add_timer(&q->perturb_timer); 418 get_random_bytes(&q->perturbation, 4);
405 } 419 }
406 sch_tree_unlock(sch); 420 sch_tree_unlock(sch);
407 return 0; 421 return 0;
@@ -423,12 +437,13 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
423 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 437 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH;
424 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 438 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH;
425 } 439 }
426 q->limit = SFQ_DEPTH - 2; 440 q->limit = SFQ_DEPTH - 1;
427 q->max_depth = 0; 441 q->max_depth = 0;
428 q->tail = SFQ_DEPTH; 442 q->tail = SFQ_DEPTH;
429 if (opt == NULL) { 443 if (opt == NULL) {
430 q->quantum = psched_mtu(sch->dev); 444 q->quantum = psched_mtu(sch->dev);
431 q->perturb_period = 0; 445 q->perturb_period = 0;
446 get_random_bytes(&q->perturbation, 4);
432 } else { 447 } else {
433 int err = sfq_change(sch, opt); 448 int err = sfq_change(sch, opt);
434 if (err) 449 if (err)
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index d35cbf5aae33..dfffa94fb9f6 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -181,7 +181,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
181 * structure. 181 * structure.
182 */ 182 */
183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
184 void (*rcu_call)(struct rcu_head *head, 184 void fastcall (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head))) 185 void (*func)(struct rcu_head *head)))
186{ 186{
187 struct sctp_sockaddr_entry *addr, *temp; 187 struct sctp_sockaddr_entry *addr, *temp;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 47e56017f4ce..f9a0c9276e3b 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
622 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) 622 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
623 goto discard; 623 goto discard;
624 624
625 /* RFC 4460, 2.11.2
626 * This will discard packets with INIT chunk bundled as
627 * subsequent chunks in the packet. When INIT is first,
628 * the normal INIT processing will discard the chunk.
629 */
630 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
631 goto discard;
632
625 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR 633 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
626 * or a COOKIE ACK the SCTP Packet should be silently 634 * or a COOKIE ACK the SCTP Packet should be silently
627 * discarded. 635 * discarded.
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 88aa22407549..e4ea7fdf36ed 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
130 /* Force chunk->skb->data to chunk->chunk_end. */ 130 /* Force chunk->skb->data to chunk->chunk_end. */
131 skb_pull(chunk->skb, 131 skb_pull(chunk->skb,
132 chunk->chunk_end - chunk->skb->data); 132 chunk->chunk_end - chunk->skb->data);
133
134 /* Verify that we have at least chunk headers
135 * worth of buffer left.
136 */
137 if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
138 sctp_chunk_free(chunk);
139 chunk = queue->in_progress = NULL;
140 }
133 } 141 }
134 } 142 }
135 143
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2e34220d94cd..23ae37ec8711 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2499,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2499 return SCTP_ERROR_NO_ERROR; 2499 return SCTP_ERROR_NO_ERROR;
2500} 2500}
2501 2501
2502/* Verify the ASCONF packet before we process it. */
2503int sctp_verify_asconf(const struct sctp_association *asoc,
2504 struct sctp_paramhdr *param_hdr, void *chunk_end,
2505 struct sctp_paramhdr **errp) {
2506 sctp_addip_param_t *asconf_param;
2507 union sctp_params param;
2508 int length, plen;
2509
2510 param.v = (sctp_paramhdr_t *) param_hdr;
2511 while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
2512 length = ntohs(param.p->length);
2513 *errp = param.p;
2514
2515 if (param.v > chunk_end - length ||
2516 length < sizeof(sctp_paramhdr_t))
2517 return 0;
2518
2519 switch (param.p->type) {
2520 case SCTP_PARAM_ADD_IP:
2521 case SCTP_PARAM_DEL_IP:
2522 case SCTP_PARAM_SET_PRIMARY:
2523 asconf_param = (sctp_addip_param_t *)param.v;
2524 plen = ntohs(asconf_param->param_hdr.length);
2525 if (plen < sizeof(sctp_addip_param_t) +
2526 sizeof(sctp_paramhdr_t))
2527 return 0;
2528 break;
2529 case SCTP_PARAM_SUCCESS_REPORT:
2530 case SCTP_PARAM_ADAPTATION_LAYER_IND:
2531 if (length != sizeof(sctp_addip_param_t))
2532 return 0;
2533
2534 break;
2535 default:
2536 break;
2537 }
2538
2539 param.v += WORD_ROUND(length);
2540 }
2541
2542 if (param.v != chunk_end)
2543 return 0;
2544
2545 return 1;
2546}
2547
2502/* Process an incoming ASCONF chunk with the next expected serial no. and 2548/* Process an incoming ASCONF chunk with the next expected serial no. and
2503 * return an ASCONF_ACK chunk to be sent in response. 2549 * return an ASCONF_ACK chunk to be sent in response.
2504 */ 2550 */
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 177528ed3e1b..a583d67cab63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
90 const sctp_subtype_t type, 90 const sctp_subtype_t type,
91 void *arg, 91 void *arg,
92 sctp_cmd_seq_t *commands); 92 sctp_cmd_seq_t *commands);
93static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
94 const struct sctp_association *asoc,
95 const sctp_subtype_t type,
96 void *arg,
97 sctp_cmd_seq_t *commands);
93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 98static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
94 99
95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 100static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
98 struct sctp_transport *transport); 103 struct sctp_transport *transport);
99 104
100static sctp_disposition_t sctp_sf_abort_violation( 105static sctp_disposition_t sctp_sf_abort_violation(
106 const struct sctp_endpoint *ep,
101 const struct sctp_association *asoc, 107 const struct sctp_association *asoc,
102 void *arg, 108 void *arg,
103 sctp_cmd_seq_t *commands, 109 sctp_cmd_seq_t *commands,
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
111 void *arg, 117 void *arg,
112 sctp_cmd_seq_t *commands); 118 sctp_cmd_seq_t *commands);
113 119
120static sctp_disposition_t sctp_sf_violation_paramlen(
121 const struct sctp_endpoint *ep,
122 const struct sctp_association *asoc,
123 const sctp_subtype_t type,
124 void *arg,
125 sctp_cmd_seq_t *commands);
126
114static sctp_disposition_t sctp_sf_violation_ctsn( 127static sctp_disposition_t sctp_sf_violation_ctsn(
115 const struct sctp_endpoint *ep, 128 const struct sctp_endpoint *ep,
116 const struct sctp_association *asoc, 129 const struct sctp_association *asoc,
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
118 void *arg, 131 void *arg,
119 sctp_cmd_seq_t *commands); 132 sctp_cmd_seq_t *commands);
120 133
134static sctp_disposition_t sctp_sf_violation_chunk(
135 const struct sctp_endpoint *ep,
136 const struct sctp_association *asoc,
137 const sctp_subtype_t type,
138 void *arg,
139 sctp_cmd_seq_t *commands);
140
121/* Small helper function that checks if the chunk length 141/* Small helper function that checks if the chunk length
122 * is of the appropriate length. The 'required_length' argument 142 * is of the appropriate length. The 'required_length' argument
123 * is set to be the size of a specific chunk we are testing. 143 * is set to be the size of a specific chunk we are testing.
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
181 struct sctp_chunk *chunk = arg; 201 struct sctp_chunk *chunk = arg;
182 struct sctp_ulpevent *ev; 202 struct sctp_ulpevent *ev;
183 203
204 if (!sctp_vtag_verify_either(chunk, asoc))
205 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
206
184 /* RFC 2960 6.10 Bundling 207 /* RFC 2960 6.10 Bundling
185 * 208 *
186 * An endpoint MUST NOT bundle INIT, INIT ACK or 209 * An endpoint MUST NOT bundle INIT, INIT ACK or
187 * SHUTDOWN COMPLETE with any other chunks. 210 * SHUTDOWN COMPLETE with any other chunks.
188 */ 211 */
189 if (!chunk->singleton) 212 if (!chunk->singleton)
190 return SCTP_DISPOSITION_VIOLATION; 213 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
191 214
192 if (!sctp_vtag_verify_either(chunk, asoc)) 215 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
193 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 216 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
217 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
218 commands);
194 219
195 /* RFC 2960 10.2 SCTP-to-ULP 220 /* RFC 2960 10.2 SCTP-to-ULP
196 * 221 *
@@ -450,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
450 if (!sctp_vtag_verify(chunk, asoc)) 475 if (!sctp_vtag_verify(chunk, asoc))
451 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 476 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
452 477
453 /* Make sure that the INIT-ACK chunk has a valid length */
454 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
455 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
456 commands);
457 /* 6.10 Bundling 478 /* 6.10 Bundling
458 * An endpoint MUST NOT bundle INIT, INIT ACK or 479 * An endpoint MUST NOT bundle INIT, INIT ACK or
459 * SHUTDOWN COMPLETE with any other chunks. 480 * SHUTDOWN COMPLETE with any other chunks.
460 */ 481 */
461 if (!chunk->singleton) 482 if (!chunk->singleton)
462 return SCTP_DISPOSITION_VIOLATION; 483 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
463 484
485 /* Make sure that the INIT-ACK chunk has a valid length */
486 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
487 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
488 commands);
464 /* Grab the INIT header. */ 489 /* Grab the INIT header. */
465 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; 490 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
466 491
@@ -585,7 +610,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
585 * control endpoint, respond with an ABORT. 610 * control endpoint, respond with an ABORT.
586 */ 611 */
587 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 612 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
588 return sctp_sf_ootb(ep, asoc, type, arg, commands); 613 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
589 614
590 /* Make sure that the COOKIE_ECHO chunk has a valid length. 615 /* Make sure that the COOKIE_ECHO chunk has a valid length.
591 * In this case, we check that we have enough for at least a 616 * In this case, we check that we have enough for at least a
@@ -2496,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
2496 struct sctp_chunk *chunk = (struct sctp_chunk *) arg; 2521 struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
2497 struct sctp_chunk *reply; 2522 struct sctp_chunk *reply;
2498 2523
2524 /* Make sure that the chunk has a valid length */
2525 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
2526 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
2527 commands);
2528
2499 /* Since we are not going to really process this INIT, there 2529 /* Since we are not going to really process this INIT, there
2500 * is no point in verifying chunk boundries. Just generate 2530 * is no point in verifying chunk boundries. Just generate
2501 * the SHUTDOWN ACK. 2531 * the SHUTDOWN ACK.
@@ -2929,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
2929 * 2959 *
2930 * The return value is the disposition of the chunk. 2960 * The return value is the disposition of the chunk.
2931*/ 2961*/
2932sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, 2962static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2933 const struct sctp_association *asoc, 2963 const struct sctp_association *asoc,
2934 const sctp_subtype_t type, 2964 const sctp_subtype_t type,
2935 void *arg, 2965 void *arg,
@@ -2965,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2965 2995
2966 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 2996 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
2967 2997
2998 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2968 return SCTP_DISPOSITION_CONSUME; 2999 return SCTP_DISPOSITION_CONSUME;
2969 } 3000 }
2970 3001
@@ -3125,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3125 3156
3126 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; 3157 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
3127 do { 3158 do {
3128 /* Break out if chunk length is less then minimal. */ 3159 /* Report violation if the chunk is less then minimal */
3129 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) 3160 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
3130 break; 3161 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3131 3162 commands);
3132 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3133 if (ch_end > skb_tail_pointer(skb))
3134 break;
3135 3163
3164 /* Now that we know we at least have a chunk header,
3165 * do things that are type appropriate.
3166 */
3136 if (SCTP_CID_SHUTDOWN_ACK == ch->type) 3167 if (SCTP_CID_SHUTDOWN_ACK == ch->type)
3137 ootb_shut_ack = 1; 3168 ootb_shut_ack = 1;
3138 3169
@@ -3144,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3144 if (SCTP_CID_ABORT == ch->type) 3175 if (SCTP_CID_ABORT == ch->type)
3145 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3176 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3146 3177
3178 /* Report violation if chunk len overflows */
3179 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3180 if (ch_end > skb_tail_pointer(skb))
3181 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3182 commands);
3183
3147 ch = (sctp_chunkhdr_t *) ch_end; 3184 ch = (sctp_chunkhdr_t *) ch_end;
3148 } while (ch_end < skb_tail_pointer(skb)); 3185 } while (ch_end < skb_tail_pointer(skb));
3149 3186
3150 if (ootb_shut_ack) 3187 if (ootb_shut_ack)
3151 sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3188 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3152 else 3189 else
3153 sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3190 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3154
3155 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3156} 3191}
3157 3192
3158/* 3193/*
@@ -3218,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3218 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 3253 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3219 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3254 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3220 3255
3221 return SCTP_DISPOSITION_CONSUME; 3256 /* We need to discard the rest of the packet to prevent
3257 * potential bomming attacks from additional bundled chunks.
3258 * This is documented in SCTP Threats ID.
3259 */
3260 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3222 } 3261 }
3223 3262
3224 return SCTP_DISPOSITION_NOMEM; 3263 return SCTP_DISPOSITION_NOMEM;
@@ -3241,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
3241 void *arg, 3280 void *arg,
3242 sctp_cmd_seq_t *commands) 3281 sctp_cmd_seq_t *commands)
3243{ 3282{
3283 struct sctp_chunk *chunk = arg;
3284
3285 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3286 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3287 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3288 commands);
3289
3244 /* Although we do have an association in this case, it corresponds 3290 /* Although we do have an association in this case, it corresponds
3245 * to a restarted association. So the packet is treated as an OOTB 3291 * to a restarted association. So the packet is treated as an OOTB
3246 * packet and the state function that handles OOTB SHUTDOWN_ACK is 3292 * packet and the state function that handles OOTB SHUTDOWN_ACK is
@@ -3257,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3257{ 3303{
3258 struct sctp_chunk *chunk = arg; 3304 struct sctp_chunk *chunk = arg;
3259 struct sctp_chunk *asconf_ack = NULL; 3305 struct sctp_chunk *asconf_ack = NULL;
3306 struct sctp_paramhdr *err_param = NULL;
3260 sctp_addiphdr_t *hdr; 3307 sctp_addiphdr_t *hdr;
3308 union sctp_addr_param *addr_param;
3261 __u32 serial; 3309 __u32 serial;
3310 int length;
3262 3311
3263 if (!sctp_vtag_verify(chunk, asoc)) { 3312 if (!sctp_vtag_verify(chunk, asoc)) {
3264 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3313 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3274,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3274 hdr = (sctp_addiphdr_t *)chunk->skb->data; 3323 hdr = (sctp_addiphdr_t *)chunk->skb->data;
3275 serial = ntohl(hdr->serial); 3324 serial = ntohl(hdr->serial);
3276 3325
3326 addr_param = (union sctp_addr_param *)hdr->params;
3327 length = ntohs(addr_param->p.length);
3328 if (length < sizeof(sctp_paramhdr_t))
3329 return sctp_sf_violation_paramlen(ep, asoc, type,
3330 (void *)addr_param, commands);
3331
3332 /* Verify the ASCONF chunk before processing it. */
3333 if (!sctp_verify_asconf(asoc,
3334 (sctp_paramhdr_t *)((void *)addr_param + length),
3335 (void *)chunk->chunk_end,
3336 &err_param))
3337 return sctp_sf_violation_paramlen(ep, asoc, type,
3338 (void *)&err_param, commands);
3339
3277 /* ADDIP 4.2 C1) Compare the value of the serial number to the value 3340 /* ADDIP 4.2 C1) Compare the value of the serial number to the value
3278 * the endpoint stored in a new association variable 3341 * the endpoint stored in a new association variable
3279 * 'Peer-Serial-Number'. 3342 * 'Peer-Serial-Number'.
@@ -3328,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3328 struct sctp_chunk *asconf_ack = arg; 3391 struct sctp_chunk *asconf_ack = arg;
3329 struct sctp_chunk *last_asconf = asoc->addip_last_asconf; 3392 struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
3330 struct sctp_chunk *abort; 3393 struct sctp_chunk *abort;
3394 struct sctp_paramhdr *err_param = NULL;
3331 sctp_addiphdr_t *addip_hdr; 3395 sctp_addiphdr_t *addip_hdr;
3332 __u32 sent_serial, rcvd_serial; 3396 __u32 sent_serial, rcvd_serial;
3333 3397
@@ -3345,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3345 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; 3409 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
3346 rcvd_serial = ntohl(addip_hdr->serial); 3410 rcvd_serial = ntohl(addip_hdr->serial);
3347 3411
3412 /* Verify the ASCONF-ACK chunk before processing it. */
3413 if (!sctp_verify_asconf(asoc,
3414 (sctp_paramhdr_t *)addip_hdr->params,
3415 (void *)asconf_ack->chunk_end,
3416 &err_param))
3417 return sctp_sf_violation_paramlen(ep, asoc, type,
3418 (void *)&err_param, commands);
3419
3348 if (last_asconf) { 3420 if (last_asconf) {
3349 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; 3421 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
3350 sent_serial = ntohl(addip_hdr->serial); 3422 sent_serial = ntohl(addip_hdr->serial);
@@ -3655,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
3655 void *arg, 3727 void *arg,
3656 sctp_cmd_seq_t *commands) 3728 sctp_cmd_seq_t *commands)
3657{ 3729{
3730 struct sctp_chunk *chunk = arg;
3731
3732 /* Make sure that the chunk has a valid length.
3733 * Since we don't know the chunk type, we use a general
3734 * chunkhdr structure to make a comparison.
3735 */
3736 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3737 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3738 commands);
3739
3658 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); 3740 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
3659 return SCTP_DISPOSITION_DISCARD; 3741 return SCTP_DISPOSITION_DISCARD;
3660} 3742}
@@ -3710,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3710 void *arg, 3792 void *arg,
3711 sctp_cmd_seq_t *commands) 3793 sctp_cmd_seq_t *commands)
3712{ 3794{
3795 struct sctp_chunk *chunk = arg;
3796
3797 /* Make sure that the chunk has a valid length. */
3798 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3799 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3800 commands);
3801
3713 return SCTP_DISPOSITION_VIOLATION; 3802 return SCTP_DISPOSITION_VIOLATION;
3714} 3803}
3715 3804
@@ -3717,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3717 * Common function to handle a protocol violation. 3806 * Common function to handle a protocol violation.
3718 */ 3807 */
3719static sctp_disposition_t sctp_sf_abort_violation( 3808static sctp_disposition_t sctp_sf_abort_violation(
3809 const struct sctp_endpoint *ep,
3720 const struct sctp_association *asoc, 3810 const struct sctp_association *asoc,
3721 void *arg, 3811 void *arg,
3722 sctp_cmd_seq_t *commands, 3812 sctp_cmd_seq_t *commands,
3723 const __u8 *payload, 3813 const __u8 *payload,
3724 const size_t paylen) 3814 const size_t paylen)
3725{ 3815{
3816 struct sctp_packet *packet = NULL;
3726 struct sctp_chunk *chunk = arg; 3817 struct sctp_chunk *chunk = arg;
3727 struct sctp_chunk *abort = NULL; 3818 struct sctp_chunk *abort = NULL;
3728 3819
@@ -3731,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation(
3731 if (!abort) 3822 if (!abort)
3732 goto nomem; 3823 goto nomem;
3733 3824
3734 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 3825 if (asoc) {
3735 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 3826 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3827 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
3736 3828
3737 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 3829 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
3738 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3830 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3739 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 3831 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
3740 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3832 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3741 SCTP_ERROR(ECONNREFUSED)); 3833 SCTP_ERROR(ECONNREFUSED));
3742 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 3834 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
3743 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3835 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3836 } else {
3837 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3838 SCTP_ERROR(ECONNABORTED));
3839 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3840 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3841 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
3842 }
3744 } else { 3843 } else {
3745 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3844 packet = sctp_ootb_pkt_new(asoc, chunk);
3746 SCTP_ERROR(ECONNABORTED)); 3845
3747 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3846 if (!packet)
3748 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3847 goto nomem_pkt;
3749 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3848
3849 if (sctp_test_T_bit(abort))
3850 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
3851
3852 abort->skb->sk = ep->base.sk;
3853
3854 sctp_packet_append_chunk(packet, abort);
3855
3856 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3857 SCTP_PACKET(packet));
3858
3859 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
3750 } 3860 }
3751 3861
3752 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 3862 sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
3753 3863
3754 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3864 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
3755 3865
3756 return SCTP_DISPOSITION_ABORT; 3866 return SCTP_DISPOSITION_ABORT;
3757 3867
3868nomem_pkt:
3869 sctp_chunk_free(abort);
3758nomem: 3870nomem:
3759 return SCTP_DISPOSITION_NOMEM; 3871 return SCTP_DISPOSITION_NOMEM;
3760} 3872}
@@ -3787,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3787{ 3899{
3788 char err_str[]="The following chunk had invalid length:"; 3900 char err_str[]="The following chunk had invalid length:";
3789 3901
3790 return sctp_sf_abort_violation(asoc, arg, commands, err_str, 3902 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3903 sizeof(err_str));
3904}
3905
3906/*
3907 * Handle a protocol violation when the parameter length is invalid.
3908 * "Invalid" length is identified as smaller then the minimal length a
3909 * given parameter can be.
3910 */
3911static sctp_disposition_t sctp_sf_violation_paramlen(
3912 const struct sctp_endpoint *ep,
3913 const struct sctp_association *asoc,
3914 const sctp_subtype_t type,
3915 void *arg,
3916 sctp_cmd_seq_t *commands) {
3917 char err_str[] = "The following parameter had invalid length:";
3918
3919 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3791 sizeof(err_str)); 3920 sizeof(err_str));
3792} 3921}
3793 3922
@@ -3806,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
3806{ 3935{
3807 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; 3936 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
3808 3937
3809 return sctp_sf_abort_violation(asoc, arg, commands, err_str, 3938 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3810 sizeof(err_str)); 3939 sizeof(err_str));
3811} 3940}
3812 3941
3942/* Handle protocol violation of an invalid chunk bundling. For example,
3943 * when we have an association and we recieve bundled INIT-ACK, or
3944 * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
3945 * statement from the specs. Additinally, there might be an attacker
3946 * on the path and we may not want to continue this communication.
3947 */
3948static sctp_disposition_t sctp_sf_violation_chunk(
3949 const struct sctp_endpoint *ep,
3950 const struct sctp_association *asoc,
3951 const sctp_subtype_t type,
3952 void *arg,
3953 sctp_cmd_seq_t *commands)
3954{
3955 char err_str[]="The following chunk violates protocol:";
3956
3957 if (!asoc)
3958 return sctp_sf_violation(ep, asoc, type, arg, commands);
3959
3960 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3961 sizeof(err_str));
3962}
3813/*************************************************************************** 3963/***************************************************************************
3814 * These are the state functions for handling primitive (Section 10) events. 3964 * These are the state functions for handling primitive (Section 10) events.
3815 ***************************************************************************/ 3965 ***************************************************************************/
@@ -5176,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
5176 * association exists, otherwise, use the peer's vtag. 5326 * association exists, otherwise, use the peer's vtag.
5177 */ 5327 */
5178 if (asoc) { 5328 if (asoc) {
5179 vtag = asoc->peer.i.init_tag; 5329 /* Special case the INIT-ACK as there is no peer's vtag
5330 * yet.
5331 */
5332 switch(chunk->chunk_hdr->type) {
5333 case SCTP_CID_INIT_ACK:
5334 {
5335 sctp_initack_chunk_t *initack;
5336
5337 initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
5338 vtag = ntohl(initack->init_hdr.init_tag);
5339 break;
5340 }
5341 default:
5342 vtag = asoc->peer.i.init_tag;
5343 break;
5344 }
5180 } else { 5345 } else {
5181 /* Special case the INIT and stale COOKIE_ECHO as there is no 5346 /* Special case the INIT and stale COOKIE_ECHO as there is no
5182 * vtag yet. 5347 * vtag yet.
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 70a91ece3c49..ddb0ba3974b0 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
110 /* SCTP_STATE_EMPTY */ \ 110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 112 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 113 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 114 /* SCTP_STATE_COOKIE_WAIT */ \
115 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 115 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
116 /* SCTP_STATE_COOKIE_ECHOED */ \ 116 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
173 /* SCTP_STATE_EMPTY */ \ 173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 175 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 176 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 177 /* SCTP_STATE_COOKIE_WAIT */ \
178 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 178 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
179 /* SCTP_STATE_COOKIE_ECHOED */ \ 179 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
194 /* SCTP_STATE_EMPTY */ \ 194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 196 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 197 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 198 /* SCTP_STATE_COOKIE_WAIT */ \
199 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 199 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
200 /* SCTP_STATE_COOKIE_ECHOED */ \ 200 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
216 /* SCTP_STATE_EMPTY */ \ 216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 218 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 219 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 220 /* SCTP_STATE_COOKIE_WAIT */ \
221 TYPE_SCTP_FUNC(sctp_sf_violation), \ 221 TYPE_SCTP_FUNC(sctp_sf_violation), \
222 /* SCTP_STATE_COOKIE_ECHOED */ \ 222 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
258 /* SCTP_STATE_EMPTY */ \ 258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 260 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 261 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 262 /* SCTP_STATE_COOKIE_WAIT */ \
263 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 263 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
264 /* SCTP_STATE_COOKIE_ECHOED */ \ 264 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
300 /* SCTP_STATE_EMPTY */ \ 300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 302 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 303 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 304 /* SCTP_STATE_COOKIE_WAIT */ \
305 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 305 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
306 /* SCTP_STATE_COOKIE_ECHOED */ \ 306 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
499 /* SCTP_STATE_EMPTY */ \ 499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 501 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 502 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 503 /* SCTP_STATE_COOKIE_WAIT */ \
504 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 504 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
505 /* SCTP_STATE_COOKIE_ECHOED */ \ 505 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
528 /* SCTP_STATE_EMPTY */ 528 /* SCTP_STATE_EMPTY */
529 TYPE_SCTP_FUNC(sctp_sf_ootb), 529 TYPE_SCTP_FUNC(sctp_sf_ootb),
530 /* SCTP_STATE_CLOSED */ 530 /* SCTP_STATE_CLOSED */
531 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), 531 TYPE_SCTP_FUNC(sctp_sf_ootb),
532 /* SCTP_STATE_COOKIE_WAIT */ 532 /* SCTP_STATE_COOKIE_WAIT */
533 TYPE_SCTP_FUNC(sctp_sf_unk_chunk), 533 TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
534 /* SCTP_STATE_COOKIE_ECHOED */ 534 /* SCTP_STATE_COOKIE_ECHOED */
diff --git a/net/socket.c b/net/socket.c
index 7d44453dfae1..b09eb9036a17 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -777,9 +777,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
777 if (pos != 0) 777 if (pos != 0)
778 return -ESPIPE; 778 return -ESPIPE;
779 779
780 if (iocb->ki_left == 0) /* Match SYS5 behaviour */
781 return 0;
782
783 x = alloc_sock_iocb(iocb, &siocb); 780 x = alloc_sock_iocb(iocb, &siocb);
784 if (!x) 781 if (!x)
785 return -ENOMEM; 782 return -ENOMEM;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7eabd55417a5..9771451eae21 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -213,7 +213,7 @@ out_fail_notifier:
213out_fail_sysfs: 213out_fail_sysfs:
214 return err; 214 return err;
215} 215}
216module_init(cfg80211_init); 216subsys_initcall(cfg80211_init);
217 217
218static void cfg80211_exit(void) 218static void cfg80211_exit(void)
219{ 219{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 88aaacd9f822..2d5d2255a27c 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev)
52 cfg80211_dev_free(rdev); 52 cfg80211_dev_free(rdev);
53} 53}
54 54
55#ifdef CONFIG_HOTPLUG
55static int wiphy_uevent(struct device *dev, char **envp, 56static int wiphy_uevent(struct device *dev, char **envp,
56 int num_envp, char *buf, int size) 57 int num_envp, char *buf, int size)
57{ 58{
58 /* TODO, we probably need stuff here */ 59 /* TODO, we probably need stuff here */
59 return 0; 60 return 0;
60} 61}
62#endif
61 63
62struct class ieee80211_class = { 64struct class ieee80211_class = {
63 .name = "ieee80211", 65 .name = "ieee80211",
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f057430db0d0..9b5656d8bcca 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -27,6 +27,7 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/seq_file.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
32#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
@@ -481,53 +482,54 @@ static void free_all_reserved_pages(void)
481#define SND_MEM_PROC_FILE "driver/snd-page-alloc" 482#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
482static struct proc_dir_entry *snd_mem_proc; 483static struct proc_dir_entry *snd_mem_proc;
483 484
484static int snd_mem_proc_read(char *page, char **start, off_t off, 485static int snd_mem_proc_read(struct seq_file *seq, void *offset)
485 int count, int *eof, void *data)
486{ 486{
487 int len = 0;
488 long pages = snd_allocated_pages >> (PAGE_SHIFT-12); 487 long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
489 struct snd_mem_list *mem; 488 struct snd_mem_list *mem;
490 int devno; 489 int devno;
491 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; 490 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
492 491
493 mutex_lock(&list_mutex); 492 mutex_lock(&list_mutex);
494 len += snprintf(page + len, count - len, 493 seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
495 "pages : %li bytes (%li pages per %likB)\n", 494 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
496 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
497 devno = 0; 495 devno = 0;
498 list_for_each_entry(mem, &mem_list_head, list) { 496 list_for_each_entry(mem, &mem_list_head, list) {
499 devno++; 497 devno++;
500 len += snprintf(page + len, count - len, 498 seq_printf(seq, "buffer %d : ID %08x : type %s\n",
501 "buffer %d : ID %08x : type %s\n", 499 devno, mem->id, types[mem->buffer.dev.type]);
502 devno, mem->id, types[mem->buffer.dev.type]); 500 seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
503 len += snprintf(page + len, count - len, 501 (unsigned long)mem->buffer.addr,
504 " addr = 0x%lx, size = %d bytes\n", 502 (int)mem->buffer.bytes);
505 (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
506 } 503 }
507 mutex_unlock(&list_mutex); 504 mutex_unlock(&list_mutex);
508 return len; 505 return 0;
506}
507
508static int snd_mem_proc_open(struct inode *inode, struct file *file)
509{
510 return single_open(file, snd_mem_proc_read, NULL);
509} 511}
510 512
511/* FIXME: for pci only - other bus? */ 513/* FIXME: for pci only - other bus? */
512#ifdef CONFIG_PCI 514#ifdef CONFIG_PCI
513#define gettoken(bufp) strsep(bufp, " \t\n") 515#define gettoken(bufp) strsep(bufp, " \t\n")
514 516
515static int snd_mem_proc_write(struct file *file, const char __user *buffer, 517static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
516 unsigned long count, void *data) 518 size_t count, loff_t * ppos)
517{ 519{
518 char buf[128]; 520 char buf[128];
519 char *token, *p; 521 char *token, *p;
520 522
521 if (count > ARRAY_SIZE(buf) - 1) 523 if (count > sizeof(buf) - 1)
522 count = ARRAY_SIZE(buf) - 1; 524 return -EINVAL;
523 if (copy_from_user(buf, buffer, count)) 525 if (copy_from_user(buf, buffer, count))
524 return -EFAULT; 526 return -EFAULT;
525 buf[ARRAY_SIZE(buf) - 1] = '\0'; 527 buf[count] = '\0';
526 528
527 p = buf; 529 p = buf;
528 token = gettoken(&p); 530 token = gettoken(&p);
529 if (! token || *token == '#') 531 if (! token || *token == '#')
530 return (int)count; 532 return count;
531 if (strcmp(token, "add") == 0) { 533 if (strcmp(token, "add") == 0) {
532 char *endp; 534 char *endp;
533 int vendor, device, size, buffers; 535 int vendor, device, size, buffers;
@@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
548 (buffers = simple_strtol(token, NULL, 0)) <= 0 || 550 (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
549 buffers > 4) { 551 buffers > 4) {
550 printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); 552 printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
551 return (int)count; 553 return count;
552 } 554 }
553 vendor &= 0xffff; 555 vendor &= 0xffff;
554 device &= 0xffff; 556 device &= 0xffff;
@@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
560 if (pci_set_dma_mask(pci, mask) < 0 || 562 if (pci_set_dma_mask(pci, mask) < 0 ||
561 pci_set_consistent_dma_mask(pci, mask) < 0) { 563 pci_set_consistent_dma_mask(pci, mask) < 0) {
562 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); 564 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
563 return (int)count; 565 return count;
564 } 566 }
565 } 567 }
566 for (i = 0; i < buffers; i++) { 568 for (i = 0; i < buffers; i++) {
@@ -570,7 +572,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
570 size, &dmab) < 0) { 572 size, &dmab) < 0) {
571 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); 573 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
572 pci_dev_put(pci); 574 pci_dev_put(pci);
573 return (int)count; 575 return count;
574 } 576 }
575 snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); 577 snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
576 } 578 }
@@ -596,9 +598,21 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
596 free_all_reserved_pages(); 598 free_all_reserved_pages();
597 else 599 else
598 printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); 600 printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
599 return (int)count; 601 return count;
600} 602}
601#endif /* CONFIG_PCI */ 603#endif /* CONFIG_PCI */
604
605static const struct file_operations snd_mem_proc_fops = {
606 .owner = THIS_MODULE,
607 .open = snd_mem_proc_open,
608 .read = seq_read,
609#ifdef CONFIG_PCI
610 .write = snd_mem_proc_write,
611#endif
612 .llseek = seq_lseek,
613 .release = single_release,
614};
615
602#endif /* CONFIG_PROC_FS */ 616#endif /* CONFIG_PROC_FS */
603 617
604/* 618/*
@@ -609,12 +623,8 @@ static int __init snd_mem_init(void)
609{ 623{
610#ifdef CONFIG_PROC_FS 624#ifdef CONFIG_PROC_FS
611 snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); 625 snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
612 if (snd_mem_proc) { 626 if (snd_mem_proc)
613 snd_mem_proc->read_proc = snd_mem_proc_read; 627 snd_mem_proc->proc_fops = &snd_mem_proc_fops;
614#ifdef CONFIG_PCI
615 snd_mem_proc->write_proc = snd_mem_proc_write;
616#endif
617 }
618#endif 628#endif
619 return 0; 629 return 0;
620} 630}