aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/fb/framebuffer.txt6
-rw-r--r--Documentation/filesystems/caching/fscache.txt110
-rw-r--r--Documentation/filesystems/caching/netfs-api.txt21
-rw-r--r--Documentation/filesystems/ocfs2.txt6
-rw-r--r--Documentation/slow-work.txt160
-rw-r--r--MAINTAINERS60
-rw-r--r--Makefile2
-rw-r--r--arch/arm/include/asm/kmap_types.h6
-rw-r--r--arch/arm/kernel/signal.c8
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c2
-rw-r--r--arch/arm/tools/mach-types119
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c9
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/process.c2
-rw-r--r--arch/blackfin/kernel/ptrace.c2
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h20
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h23
-rw-r--r--arch/blackfin/mach-bf561/atomic.S14
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h25
-rw-r--r--arch/blackfin/mach-common/arch_checks.c5
-rw-r--r--arch/blackfin/mach-common/smp.c6
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/include/asm/kmap_types.h11
-rw-r--r--arch/sparc/mm/init_64.h2
-rw-r--r--arch/x86/kernel/acpi/processor.c3
-rw-r--r--crypto/async_tx/Kconfig5
-rw-r--r--crypto/async_tx/async_pq.c14
-rw-r--r--crypto/async_tx/async_xor.c15
-rw-r--r--crypto/gcm.c107
-rw-r--r--drivers/acpi/acpica/acpredef.h5
-rw-r--r--drivers/acpi/blacklist.c17
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/ata/sata_fsl.c84
-rw-r--r--drivers/base/power/runtime.c12
-rw-r--r--drivers/block/cciss.c16
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/keyboard.c2
-rw-r--r--drivers/char/vt_ioctl.c6
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/ioat/dca.c6
-rw-r--r--drivers/dma/ioat/dma.h4
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c44
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/registers.h4
-rw-r--r--drivers/dma/shdma.c12
-rw-r--r--drivers/firewire/ohci.c41
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/i2c/busses/i2c-pnx.c9
-rw-r--r--drivers/i2c/chips/tsl2550.c3
-rw-r--r--drivers/i2c/i2c-core.c11
-rw-r--r--drivers/ide/ide-ioctls.c2
-rw-r--r--drivers/media/common/ir-functions.c2
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c10
-rw-r--r--drivers/media/dvb/siano/Kconfig2
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c2
-rw-r--r--drivers/media/video/davinci/vpif_display.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/video/mx1_camera.c1
-rw-r--r--drivers/media/video/mx3_camera.c1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c7
-rw-r--r--drivers/media/video/soc_camera.c17
-rw-r--r--drivers/media/video/videobuf-dma-contig.c1
-rw-r--r--drivers/misc/eeprom/at24.c76
-rw-r--r--drivers/net/arm/ep93xx_eth.c12
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/e100.c17
-rw-r--r--drivers/net/r8169.c7
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/smsc9420.c14
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/pci/dmar.c3
-rw-r--r--drivers/platform/x86/acerhdf.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c26
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/scsi_scan.c18
-rw-r--r--drivers/scsi/scsi_sysfs.c63
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/serial/suncore.c37
-rw-r--r--drivers/serial/suncore.h5
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c9
-rw-r--r--drivers/serial/sunsu.c36
-rw-r--r--drivers/serial/sunzilog.c8
-rw-r--r--drivers/staging/go7007/s2250-board.c4
-rw-r--r--drivers/staging/go7007/s2250-loader.h24
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet-spi.c2
-rw-r--r--drivers/staging/octeon/ethernet.c53
-rw-r--r--drivers/watchdog/pnx4008_wdt.c4
-rw-r--r--fs/9p/cache.c14
-rw-r--r--fs/afs/file.c15
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c128
-rw-r--r--fs/cifs/CHANGES9
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/fscache/Kconfig7
-rw-r--r--fs/fscache/Makefile1
-rw-r--r--fs/fscache/cache.c5
-rw-r--r--fs/fscache/cookie.c26
-rw-r--r--fs/fscache/internal.h56
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object-list.c432
-rw-r--r--fs/fscache/object.c104
-rw-r--r--fs/fscache/operation.c120
-rw-r--r--fs/fscache/page.c273
-rw-r--r--fs/fscache/proc.c13
-rw-r--r--fs/fscache/stats.c94
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/recovery.c2
-rw-r--r--fs/jffs2/read.c9
-rw-r--r--fs/nfs/fscache.c10
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/ocfs2.h7
-rw-r--r--fs/ocfs2/refcounttree.c69
-rw-r--r--fs/ocfs2/super.c20
-rw-r--r--fs/ocfs2/uptodate.c5
-rw-r--r--include/linux/fscache-cache.h40
-rw-r--r--include/linux/fscache.h27
-rw-r--r--include/linux/i2c-pnx.h2
-rw-r--r--include/linux/slow-work.h72
-rw-r--r--include/linux/vt.h4
-rw-r--r--include/net/sctp/structs.h1
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--init/Kconfig10
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/slow-work-proc.c227
-rw-r--r--kernel/slow-work.c494
-rw-r--r--kernel/slow-work.h72
-rw-r--r--lib/radix-tree.c5
-rw-r--r--mm/backing-dev.c8
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/sctp/outqueue.c10
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/transport.c5
-rw-r--r--net/sunrpc/addr.c18
-rw-r--r--scripts/kconfig/Makefile4
-rw-r--r--scripts/kconfig/streamline_config.pl12
-rw-r--r--sound/arm/aaci.c6
-rw-r--r--sound/soc/codecs/tlv320aic23.c3
-rw-r--r--sound/soc/soc-dapm.c20
172 files changed, 3767 insertions, 769 deletions
diff --git a/.gitignore b/.gitignore
index b93fb7eff942..946c7ec5c922 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,7 @@
25*.elf 25*.elf
26*.bin 26*.bin
27*.gz 27*.gz
28*.bz2
28*.lzma 29*.lzma
29*.patch 30*.patch
30*.gcno 31*.gcno
diff --git a/Documentation/fb/framebuffer.txt b/Documentation/fb/framebuffer.txt
index b3e3a0356839..fe79e3c8847d 100644
--- a/Documentation/fb/framebuffer.txt
+++ b/Documentation/fb/framebuffer.txt
@@ -312,10 +312,8 @@ and to the following documentation:
3128. Mailing list 3128. Mailing list
313--------------- 313---------------
314 314
315There are several frame buffer device related mailing lists at SourceForge: 315There is a frame buffer device related mailing list at kernel.org:
316 - linux-fbdev-announce@lists.sourceforge.net, for announcements, 316linux-fbdev@vger.kernel.org.
317 - linux-fbdev-user@lists.sourceforge.net, for generic user support,
318 - linux-fbdev-devel@lists.sourceforge.net, for project developers.
319 317
320Point your web browser to http://sourceforge.net/projects/linux-fbdev/ for 318Point your web browser to http://sourceforge.net/projects/linux-fbdev/ for
321subscription information and archive browsing. 319subscription information and archive browsing.
diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt
index 9e94b9491d89..a91e2e2095b0 100644
--- a/Documentation/filesystems/caching/fscache.txt
+++ b/Documentation/filesystems/caching/fscache.txt
@@ -235,6 +235,7 @@ proc files.
235 neg=N Number of negative lookups made 235 neg=N Number of negative lookups made
236 pos=N Number of positive lookups made 236 pos=N Number of positive lookups made
237 crt=N Number of objects created by lookup 237 crt=N Number of objects created by lookup
238 tmo=N Number of lookups timed out and requeued
238 Updates n=N Number of update cookie requests seen 239 Updates n=N Number of update cookie requests seen
239 nul=N Number of upd reqs given a NULL parent 240 nul=N Number of upd reqs given a NULL parent
240 run=N Number of upd reqs granted CPU time 241 run=N Number of upd reqs granted CPU time
@@ -250,8 +251,10 @@ proc files.
250 ok=N Number of successful alloc reqs 251 ok=N Number of successful alloc reqs
251 wt=N Number of alloc reqs that waited on lookup completion 252 wt=N Number of alloc reqs that waited on lookup completion
252 nbf=N Number of alloc reqs rejected -ENOBUFS 253 nbf=N Number of alloc reqs rejected -ENOBUFS
254 int=N Number of alloc reqs aborted -ERESTARTSYS
253 ops=N Number of alloc reqs submitted 255 ops=N Number of alloc reqs submitted
254 owt=N Number of alloc reqs waited for CPU time 256 owt=N Number of alloc reqs waited for CPU time
257 abt=N Number of alloc reqs aborted due to object death
255 Retrvls n=N Number of retrieval (read) requests seen 258 Retrvls n=N Number of retrieval (read) requests seen
256 ok=N Number of successful retr reqs 259 ok=N Number of successful retr reqs
257 wt=N Number of retr reqs that waited on lookup completion 260 wt=N Number of retr reqs that waited on lookup completion
@@ -261,6 +264,7 @@ proc files.
261 oom=N Number of retr reqs failed -ENOMEM 264 oom=N Number of retr reqs failed -ENOMEM
262 ops=N Number of retr reqs submitted 265 ops=N Number of retr reqs submitted
263 owt=N Number of retr reqs waited for CPU time 266 owt=N Number of retr reqs waited for CPU time
267 abt=N Number of retr reqs aborted due to object death
264 Stores n=N Number of storage (write) requests seen 268 Stores n=N Number of storage (write) requests seen
265 ok=N Number of successful store reqs 269 ok=N Number of successful store reqs
266 agn=N Number of store reqs on a page already pending storage 270 agn=N Number of store reqs on a page already pending storage
@@ -268,12 +272,37 @@ proc files.
268 oom=N Number of store reqs failed -ENOMEM 272 oom=N Number of store reqs failed -ENOMEM
269 ops=N Number of store reqs submitted 273 ops=N Number of store reqs submitted
270 run=N Number of store reqs granted CPU time 274 run=N Number of store reqs granted CPU time
275 pgs=N Number of pages given store req processing time
276 rxd=N Number of store reqs deleted from tracking tree
277 olm=N Number of store reqs over store limit
278 VmScan nos=N Number of release reqs against pages with no pending store
279 gon=N Number of release reqs against pages stored by time lock granted
280 bsy=N Number of release reqs ignored due to in-progress store
281 can=N Number of page stores cancelled due to release req
271 Ops pend=N Number of times async ops added to pending queues 282 Ops pend=N Number of times async ops added to pending queues
272 run=N Number of times async ops given CPU time 283 run=N Number of times async ops given CPU time
273 enq=N Number of times async ops queued for processing 284 enq=N Number of times async ops queued for processing
285 can=N Number of async ops cancelled
286 rej=N Number of async ops rejected due to object lookup/create failure
274 dfr=N Number of async ops queued for deferred release 287 dfr=N Number of async ops queued for deferred release
275 rel=N Number of async ops released 288 rel=N Number of async ops released
276 gc=N Number of deferred-release async ops garbage collected 289 gc=N Number of deferred-release async ops garbage collected
290 CacheOp alo=N Number of in-progress alloc_object() cache ops
291 luo=N Number of in-progress lookup_object() cache ops
292 luc=N Number of in-progress lookup_complete() cache ops
293 gro=N Number of in-progress grab_object() cache ops
294 upo=N Number of in-progress update_object() cache ops
295 dro=N Number of in-progress drop_object() cache ops
296 pto=N Number of in-progress put_object() cache ops
297 syn=N Number of in-progress sync_cache() cache ops
298 atc=N Number of in-progress attr_changed() cache ops
299 rap=N Number of in-progress read_or_alloc_page() cache ops
300 ras=N Number of in-progress read_or_alloc_pages() cache ops
301 alp=N Number of in-progress allocate_page() cache ops
302 als=N Number of in-progress allocate_pages() cache ops
303 wrp=N Number of in-progress write_page() cache ops
304 ucp=N Number of in-progress uncache_page() cache ops
305 dsp=N Number of in-progress dissociate_pages() cache ops
277 306
278 307
279 (*) /proc/fs/fscache/histogram 308 (*) /proc/fs/fscache/histogram
@@ -299,6 +328,87 @@ proc files.
299 jiffy range covered, and the SECS field the equivalent number of seconds. 328 jiffy range covered, and the SECS field the equivalent number of seconds.
300 329
301 330
331===========
332OBJECT LIST
333===========
334
335If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a
336list of all the objects currently allocated and allow them to be viewed
337through:
338
339 /proc/fs/fscache/objects
340
341This will look something like:
342
343 [root@andromeda ~]# head /proc/fs/fscache/objects
344 OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA
345 ======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================
346 17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a
347 1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a
348
349where the first set of columns before the '|' describe the object:
350
351 COLUMN DESCRIPTION
352 ======= ===============================================================
353 OBJECT Object debugging ID (appears as OBJ%x in some debug messages)
354 PARENT Debugging ID of parent object
355 STAT Object state
356 CHLDN Number of child objects of this object
357 OPS Number of outstanding operations on this object
358 OOP Number of outstanding child object management operations
359 IPR
360 EX Number of outstanding exclusive operations
361 READS Number of outstanding read operations
362 EM Object's event mask
363 EV Events raised on this object
364 F Object flags
365 S Object slow-work work item flags
366
367and the second set of columns describe the object's cookie, if present:
368
369 COLUMN DESCRIPTION
370 =============== =======================================================
371 NETFS_COOKIE_DEF Name of netfs cookie definition
372 TY Cookie type (IX - index, DT - data, hex - special)
373 FL Cookie flags
374 NETFS_DATA Netfs private data stored in the cookie
375 OBJECT_KEY Object key } 1 column, with separating comma
376 AUX_DATA Object aux data } presence may be configured
377
378The data shown may be filtered by attaching the a key to an appropriate keyring
379before viewing the file. Something like:
380
381 keyctl add user fscache:objlist <restrictions> @s
382
383where <restrictions> are a selection of the following letters:
384
385 K Show hexdump of object key (don't show if not given)
386 A Show hexdump of object aux data (don't show if not given)
387
388and the following paired letters:
389
390 C Show objects that have a cookie
391 c Show objects that don't have a cookie
392 B Show objects that are busy
393 b Show objects that aren't busy
394 W Show objects that have pending writes
395 w Show objects that don't have pending writes
396 R Show objects that have outstanding reads
397 r Show objects that don't have outstanding reads
398 S Show objects that have slow work queued
399 s Show objects that don't have slow work queued
400
401If neither side of a letter pair is given, then both are implied. For example:
402
403 keyctl add user fscache:objlist KB @s
404
405shows objects that are busy, and lists their object keys, but does not dump
406their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is
407not implied.
408
409By default all objects and all fields will be shown.
410
411
302========= 412=========
303DEBUGGING 413DEBUGGING
304========= 414=========
diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt
index 2666b1ed5e9e..1902c57b72ef 100644
--- a/Documentation/filesystems/caching/netfs-api.txt
+++ b/Documentation/filesystems/caching/netfs-api.txt
@@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below).
641 641
642Furthermore, note that this does not cancel the asynchronous read or write 642Furthermore, note that this does not cancel the asynchronous read or write
643operation started by the read/alloc and write functions, so the page 643operation started by the read/alloc and write functions, so the page
644invalidation and release functions must use: 644invalidation functions must use:
645 645
646 bool fscache_check_page_write(struct fscache_cookie *cookie, 646 bool fscache_check_page_write(struct fscache_cookie *cookie,
647 struct page *page); 647 struct page *page);
@@ -654,6 +654,25 @@ to see if a page is being written to the cache, and:
654to wait for it to finish if it is. 654to wait for it to finish if it is.
655 655
656 656
657When releasepage() is being implemented, a special FS-Cache function exists to
658manage the heuristics of coping with vmscan trying to eject pages, which may
659conflict with the cache trying to write pages to the cache (which may itself
660need to allocate memory):
661
662 bool fscache_maybe_release_page(struct fscache_cookie *cookie,
663 struct page *page,
664 gfp_t gfp);
665
666This takes the netfs cookie, and the page and gfp arguments as supplied to
667releasepage(). It will return false if the page cannot be released yet for
668some reason and if it returns true, the page has been uncached and can now be
669released.
670
671To make a page available for release, this function may wait for an outstanding
672storage request to complete, or it may attempt to cancel the storage request -
673in which case the page will not be stored in the cache this time.
674
675
657========================== 676==========================
658INDEX AND DATA FILE UPDATE 677INDEX AND DATA FILE UPDATE
659========================== 678==========================
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index c2a0871280a0..c58b9f5ba002 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -20,15 +20,16 @@ Lots of code taken from ext3 and other projects.
20Authors in alphabetical order: 20Authors in alphabetical order:
21Joel Becker <joel.becker@oracle.com> 21Joel Becker <joel.becker@oracle.com>
22Zach Brown <zach.brown@oracle.com> 22Zach Brown <zach.brown@oracle.com>
23Mark Fasheh <mark.fasheh@oracle.com> 23Mark Fasheh <mfasheh@suse.com>
24Kurt Hackel <kurt.hackel@oracle.com> 24Kurt Hackel <kurt.hackel@oracle.com>
25Tao Ma <tao.ma@oracle.com>
25Sunil Mushran <sunil.mushran@oracle.com> 26Sunil Mushran <sunil.mushran@oracle.com>
26Manish Singh <manish.singh@oracle.com> 27Manish Singh <manish.singh@oracle.com>
28Tiger Yang <tiger.yang@oracle.com>
27 29
28Caveats 30Caveats
29======= 31=======
30Features which OCFS2 does not support yet: 32Features which OCFS2 does not support yet:
31 - quotas
32 - Directory change notification (F_NOTIFY) 33 - Directory change notification (F_NOTIFY)
33 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease) 34 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
34 35
@@ -70,7 +71,6 @@ commit=nrsec (*) Ocfs2 can be told to sync all its data and metadata
70 performance. 71 performance.
71localalloc=8(*) Allows custom localalloc size in MB. If the value is too 72localalloc=8(*) Allows custom localalloc size in MB. If the value is too
72 large, the fs will silently revert it to the default. 73 large, the fs will silently revert it to the default.
73 Localalloc is not enabled for local mounts.
74localflocks This disables cluster aware flock. 74localflocks This disables cluster aware flock.
75inode64 Indicates that Ocfs2 is allowed to create inodes at 75inode64 Indicates that Ocfs2 is allowed to create inodes at
76 any location in the filesystem, including those which 76 any location in the filesystem, including those which
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt
index ebc50f808ea4..52bc31433723 100644
--- a/Documentation/slow-work.txt
+++ b/Documentation/slow-work.txt
@@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long.
41Operations of both types may sleep during execution, thus tying up the thread 41Operations of both types may sleep during execution, thus tying up the thread
42loaned to it. 42loaned to it.
43 43
44A further class of work item is available, based on the slow work item class:
45
46 (*) Delayed slow work items.
47
48These are slow work items that have a timer to defer queueing of the item for
49a while.
50
44 51
45THREAD-TO-CLASS ALLOCATION 52THREAD-TO-CLASS ALLOCATION
46-------------------------- 53--------------------------
@@ -64,9 +71,11 @@ USING SLOW WORK ITEMS
64Firstly, a module or subsystem wanting to make use of slow work items must 71Firstly, a module or subsystem wanting to make use of slow work items must
65register its interest: 72register its interest:
66 73
67 int ret = slow_work_register_user(); 74 int ret = slow_work_register_user(struct module *module);
68 75
69This will return 0 if successful, or a -ve error upon failure. 76This will return 0 if successful, or a -ve error upon failure. The module
77pointer should be the module interested in using this facility (almost
78certainly THIS_MODULE).
70 79
71 80
72Slow work items may then be set up by: 81Slow work items may then be set up by:
@@ -93,6 +102,10 @@ Slow work items may then be set up by:
93 102
94 or: 103 or:
95 104
105 delayed_slow_work_init(&myitem, &myitem_ops);
106
107 or:
108
96 vslow_work_init(&myitem, &myitem_ops); 109 vslow_work_init(&myitem, &myitem_ops);
97 110
98 depending on its class. 111 depending on its class.
@@ -102,15 +115,92 @@ A suitably set up work item can then be enqueued for processing:
102 int ret = slow_work_enqueue(&myitem); 115 int ret = slow_work_enqueue(&myitem);
103 116
104This will return a -ve error if the thread pool is unable to gain a reference 117This will return a -ve error if the thread pool is unable to gain a reference
105on the item, 0 otherwise. 118on the item, 0 otherwise, or (for delayed work):
119
120 int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay);
106 121
107 122
108The items are reference counted, so there ought to be no need for a flush 123The items are reference counted, so there ought to be no need for a flush
109operation. When all a module's slow work items have been processed, and the 124operation. But as the reference counting is optional, means to cancel
125existing work items are also included:
126
127 cancel_slow_work(&myitem);
128 cancel_delayed_slow_work(&myitem);
129
130can be used to cancel pending work. The above cancel function waits for
131existing work to have been executed (or prevent execution of them, depending
132on timing).
133
134
135When all a module's slow work items have been processed, and the
110module has no further interest in the facility, it should unregister its 136module has no further interest in the facility, it should unregister its
111interest: 137interest:
112 138
113 slow_work_unregister_user(); 139 slow_work_unregister_user(struct module *module);
140
141The module pointer is used to wait for all outstanding work items for that
142module before completing the unregistration. This prevents the put_ref() code
143from being taken away before it completes. module should almost certainly be
144THIS_MODULE.
145
146
147================
148HELPER FUNCTIONS
149================
150
151The slow-work facility provides a function by which it can be determined
152whether or not an item is queued for later execution:
153
154 bool queued = slow_work_is_queued(struct slow_work *work);
155
156If it returns false, then the item is not on the queue (it may be executing
157with a requeue pending). This can be used to work out whether an item on which
158another depends is on the queue, thus allowing a dependent item to be queued
159after it.
160
161If the above shows an item on which another depends not to be queued, then the
162owner of the dependent item might need to wait. However, to avoid locking up
163the threads unnecessarily be sleeping in them, it can make sense under some
164circumstances to return the work item to the queue, thus deferring it until
165some other items have had a chance to make use of the yielded thread.
166
167To yield a thread and defer an item, the work function should simply enqueue
168the work item again and return. However, this doesn't work if there's nothing
169actually on the queue, as the thread just vacated will jump straight back into
170the item's work function, thus busy waiting on a CPU.
171
172Instead, the item should use the thread to wait for the dependency to go away,
173but rather than using schedule() or schedule_timeout() to sleep, it should use
174the following function:
175
176 bool requeue = slow_work_sleep_till_thread_needed(
177 struct slow_work *work,
178 signed long *_timeout);
179
180This will add a second wait and then sleep, such that it will be woken up if
181either something appears on the queue that could usefully make use of the
182thread - and behind which this item can be queued, or if the event the caller
183set up to wait for happens. True will be returned if something else appeared
184on the queue and this work function should perhaps return, of false if
185something else woke it up. The timeout is as for schedule_timeout().
186
187For example:
188
189 wq = bit_waitqueue(&my_flags, MY_BIT);
190 init_wait(&wait);
191 requeue = false;
192 do {
193 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
194 if (!test_bit(MY_BIT, &my_flags))
195 break;
196 requeue = slow_work_sleep_till_thread_needed(&my_work,
197 &timeout);
198 } while (timeout > 0 && !requeue);
199 finish_wait(wq, &wait);
200 if (!test_bit(MY_BIT, &my_flags)
201 goto do_my_thing;
202 if (requeue)
203 return; // to slow_work
114 204
115 205
116=============== 206===============
@@ -118,7 +208,8 @@ ITEM OPERATIONS
118=============== 208===============
119 209
120Each work item requires a table of operations of type struct slow_work_ops. 210Each work item requires a table of operations of type struct slow_work_ops.
121All members are required: 211Only ->execute() is required; the getting and putting of a reference and the
212describing of an item are all optional.
122 213
123 (*) Get a reference on an item: 214 (*) Get a reference on an item:
124 215
@@ -148,6 +239,16 @@ All members are required:
148 This should perform the work required of the item. It may sleep, it may 239 This should perform the work required of the item. It may sleep, it may
149 perform disk I/O and it may wait for locks. 240 perform disk I/O and it may wait for locks.
150 241
242 (*) View an item through /proc:
243
244 void (*desc)(struct slow_work *work, struct seq_file *m);
245
246 If supplied, this should print to 'm' a small string describing the work
247 the item is to do. This should be no more than about 40 characters, and
248 shouldn't include a newline character.
249
250 See the 'Viewing executing and queued items' section below.
251
151 252
152================== 253==================
153POOL CONFIGURATION 254POOL CONFIGURATION
@@ -172,3 +273,50 @@ The slow-work thread pool has a number of configurables:
172 is bounded to between 1 and one fewer than the number of active threads. 273 is bounded to between 1 and one fewer than the number of active threads.
173 This ensures there is always at least one thread that can process very 274 This ensures there is always at least one thread that can process very
174 slow work items, and always at least one thread that won't. 275 slow work items, and always at least one thread that won't.
276
277
278==================================
279VIEWING EXECUTING AND QUEUED ITEMS
280==================================
281
282If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available:
283
284 /proc/slow_work_rq
285
286through which the list of work items being executed and the queues of items to
287be executed may be viewed. The owner of a work item is given the chance to
288add some information of its own.
289
290The contents look something like the following:
291
292 THR PID ITEM ADDR FL MARK DESC
293 === ===== ================ == ===== ==========
294 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK
295 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2
296 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK
297 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN
298 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2
299 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2
300 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2
301 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN
302 vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2
303 vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2
304 vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2
305 vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2
306 vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2
307 vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2
308 vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK
309 vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK
310 vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK
311 vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK
312 vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK
313 vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK
314 vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK
315 vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK
316
317In the 'THR' column, executing items show the thread they're occupying and
318queued threads indicate which queue they're on. 'PID' shows the process ID of
319a slow-work thread that's executing something. 'FL' shows the work item flags.
320'MARK' indicates how long since an item was queued or began executing. Lastly,
321the 'DESC' column permits the owner of an item to give some information.
322
diff --git a/MAINTAINERS b/MAINTAINERS
index c824b4d62754..16129e67678a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -512,10 +512,32 @@ W: http://www.arm.linux.org.uk/
512S: Maintained 512S: Maintained
513F: arch/arm/ 513F: arch/arm/
514 514
515ARM PRIMECELL AACI PL041 DRIVER
516M: Russell King <linux@arm.linux.org.uk>
517S: Maintained
518F: sound/arm/aaci.*
519
520ARM PRIMECELL CLCD PL110 DRIVER
521M: Russell King <linux@arm.linux.org.uk>
522S: Maintained
523F: drivers/video/amba-clcd.*
524
525ARM PRIMECELL KMI PL050 DRIVER
526M: Russell King <linux@arm.linux.org.uk>
527S: Maintained
528F: drivers/input/serio/ambakmi.*
529F: include/linux/amba/kmi.h
530
515ARM PRIMECELL MMCI PL180/1 DRIVER 531ARM PRIMECELL MMCI PL180/1 DRIVER
516S: Orphan 532S: Orphan
517F: drivers/mmc/host/mmci.* 533F: drivers/mmc/host/mmci.*
518 534
535ARM PRIMECELL BUS SUPPORT
536M: Russell King <linux@arm.linux.org.uk>
537S: Maintained
538F: drivers/amba/
539F: include/linux/amba/bus.h
540
519ARM/ADI ROADRUNNER MACHINE SUPPORT 541ARM/ADI ROADRUNNER MACHINE SUPPORT
520M: Lennert Buytenhek <kernel@wantstofly.org> 542M: Lennert Buytenhek <kernel@wantstofly.org>
521L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 543L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1027,7 +1049,7 @@ F: drivers/serial/atmel_serial.c
1027 1049
1028ATMEL LCDFB DRIVER 1050ATMEL LCDFB DRIVER
1029M: Nicolas Ferre <nicolas.ferre@atmel.com> 1051M: Nicolas Ferre <nicolas.ferre@atmel.com>
1030L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 1052L: linux-fbdev@vger.kernel.org
1031S: Maintained 1053S: Maintained
1032F: drivers/video/atmel_lcdfb.c 1054F: drivers/video/atmel_lcdfb.c
1033F: include/video/atmel_lcdc.h 1055F: include/video/atmel_lcdc.h
@@ -2113,7 +2135,7 @@ F: drivers/net/wan/dlci.c
2113F: drivers/net/wan/sdla.c 2135F: drivers/net/wan/sdla.c
2114 2136
2115FRAMEBUFFER LAYER 2137FRAMEBUFFER LAYER
2116L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2138L: linux-fbdev@vger.kernel.org
2117W: http://linux-fbdev.sourceforge.net/ 2139W: http://linux-fbdev.sourceforge.net/
2118S: Orphan 2140S: Orphan
2119F: Documentation/fb/ 2141F: Documentation/fb/
@@ -2136,7 +2158,7 @@ F: drivers/i2c/busses/i2c-cpm.c
2136 2158
2137FREESCALE IMX / MXC FRAMEBUFFER DRIVER 2159FREESCALE IMX / MXC FRAMEBUFFER DRIVER
2138M: Sascha Hauer <kernel@pengutronix.de> 2160M: Sascha Hauer <kernel@pengutronix.de>
2139L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2161L: linux-fbdev@vger.kernel.org
2140L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2162L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2141S: Maintained 2163S: Maintained
2142F: arch/arm/plat-mxc/include/mach/imxfb.h 2164F: arch/arm/plat-mxc/include/mach/imxfb.h
@@ -2312,6 +2334,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
2312S: Maintained 2334S: Maintained
2313F: drivers/media/video/gspca/finepix.c 2335F: drivers/media/video/gspca/finepix.c
2314 2336
2337GSPCA GL860 SUBDRIVER
2338M: Olivier Lorin <o.lorin@laposte.net>
2339L: linux-media@vger.kernel.org
2340T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
2341S: Maintained
2342F: drivers/media/video/gspca/gl860/
2343
2315GSPCA M5602 SUBDRIVER 2344GSPCA M5602 SUBDRIVER
2316M: Erik Andren <erik.andren@gmail.com> 2345M: Erik Andren <erik.andren@gmail.com>
2317L: linux-media@vger.kernel.org 2346L: linux-media@vger.kernel.org
@@ -2533,8 +2562,7 @@ S: Maintained
2533F: Documentation/i2c/ 2562F: Documentation/i2c/
2534F: drivers/i2c/ 2563F: drivers/i2c/
2535F: include/linux/i2c.h 2564F: include/linux/i2c.h
2536F: include/linux/i2c-dev.h 2565F: include/linux/i2c-*.h
2537F: include/linux/i2c-id.h
2538 2566
2539I2C-TINY-USB DRIVER 2567I2C-TINY-USB DRIVER
2540M: Till Harbaum <till@harbaum.org> 2568M: Till Harbaum <till@harbaum.org>
@@ -2635,7 +2663,7 @@ S: Supported
2635F: security/integrity/ima/ 2663F: security/integrity/ima/
2636 2664
2637IMS TWINTURBO FRAMEBUFFER DRIVER 2665IMS TWINTURBO FRAMEBUFFER DRIVER
2638L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2666L: linux-fbdev@vger.kernel.org
2639S: Orphan 2667S: Orphan
2640F: drivers/video/imsttfb.c 2668F: drivers/video/imsttfb.c
2641 2669
@@ -2670,14 +2698,14 @@ F: drivers/input/
2670 2698
2671INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) 2699INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
2672M: Sylvain Meyer <sylvain.meyer@worldonline.fr> 2700M: Sylvain Meyer <sylvain.meyer@worldonline.fr>
2673L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2701L: linux-fbdev@vger.kernel.org
2674S: Maintained 2702S: Maintained
2675F: Documentation/fb/intelfb.txt 2703F: Documentation/fb/intelfb.txt
2676F: drivers/video/intelfb/ 2704F: drivers/video/intelfb/
2677 2705
2678INTEL 810/815 FRAMEBUFFER DRIVER 2706INTEL 810/815 FRAMEBUFFER DRIVER
2679M: Antonino Daplas <adaplas@gmail.com> 2707M: Antonino Daplas <adaplas@gmail.com>
2680L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2708L: linux-fbdev@vger.kernel.org
2681S: Maintained 2709S: Maintained
2682F: drivers/video/i810/ 2710F: drivers/video/i810/
2683 2711
@@ -3391,7 +3419,7 @@ S: Supported
3391 3419
3392MATROX FRAMEBUFFER DRIVER 3420MATROX FRAMEBUFFER DRIVER
3393M: Petr Vandrovec <vandrove@vc.cvut.cz> 3421M: Petr Vandrovec <vandrove@vc.cvut.cz>
3394L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 3422L: linux-fbdev@vger.kernel.org
3395S: Maintained 3423S: Maintained
3396F: drivers/video/matrox/matroxfb_* 3424F: drivers/video/matrox/matroxfb_*
3397F: include/linux/matroxfb.h 3425F: include/linux/matroxfb.h
@@ -3778,7 +3806,7 @@ F: fs/ntfs/
3778 3806
3779NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER 3807NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
3780M: Antonino Daplas <adaplas@gmail.com> 3808M: Antonino Daplas <adaplas@gmail.com>
3781L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 3809L: linux-fbdev@vger.kernel.org
3782S: Maintained 3810S: Maintained
3783F: drivers/video/riva/ 3811F: drivers/video/riva/
3784F: drivers/video/nvidia/ 3812F: drivers/video/nvidia/
@@ -3813,7 +3841,7 @@ F: sound/soc/omap/
3813 3841
3814OMAP FRAMEBUFFER SUPPORT 3842OMAP FRAMEBUFFER SUPPORT
3815M: Imre Deak <imre.deak@nokia.com> 3843M: Imre Deak <imre.deak@nokia.com>
3816L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 3844L: linux-fbdev@vger.kernel.org
3817L: linux-omap@vger.kernel.org 3845L: linux-omap@vger.kernel.org
3818S: Maintained 3846S: Maintained
3819F: drivers/video/omap/ 3847F: drivers/video/omap/
@@ -4319,14 +4347,14 @@ F: include/linux/qnxtypes.h
4319 4347
4320RADEON FRAMEBUFFER DISPLAY DRIVER 4348RADEON FRAMEBUFFER DISPLAY DRIVER
4321M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 4349M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
4322L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 4350L: linux-fbdev@vger.kernel.org
4323S: Maintained 4351S: Maintained
4324F: drivers/video/aty/radeon* 4352F: drivers/video/aty/radeon*
4325F: include/linux/radeonfb.h 4353F: include/linux/radeonfb.h
4326 4354
4327RAGE128 FRAMEBUFFER DISPLAY DRIVER 4355RAGE128 FRAMEBUFFER DISPLAY DRIVER
4328M: Paul Mackerras <paulus@samba.org> 4356M: Paul Mackerras <paulus@samba.org>
4329L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 4357L: linux-fbdev@vger.kernel.org
4330S: Maintained 4358S: Maintained
4331F: drivers/video/aty/aty128fb.c 4359F: drivers/video/aty/aty128fb.c
4332 4360
@@ -4465,7 +4493,7 @@ F: drivers/net/wireless/rtl818x/rtl8187*
4465 4493
4466S3 SAVAGE FRAMEBUFFER DRIVER 4494S3 SAVAGE FRAMEBUFFER DRIVER
4467M: Antonino Daplas <adaplas@gmail.com> 4495M: Antonino Daplas <adaplas@gmail.com>
4468L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 4496L: linux-fbdev@vger.kernel.org
4469S: Maintained 4497S: Maintained
4470F: drivers/video/savage/ 4498F: drivers/video/savage/
4471 4499
@@ -5628,7 +5656,7 @@ S: Maintained
5628 5656
5629UVESAFB DRIVER 5657UVESAFB DRIVER
5630M: Michal Januszewski <spock@gentoo.org> 5658M: Michal Januszewski <spock@gentoo.org>
5631L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 5659L: linux-fbdev@vger.kernel.org
5632W: http://dev.gentoo.org/~spock/projects/uvesafb/ 5660W: http://dev.gentoo.org/~spock/projects/uvesafb/
5633S: Maintained 5661S: Maintained
5634F: Documentation/fb/uvesafb.txt 5662F: Documentation/fb/uvesafb.txt
@@ -5661,7 +5689,7 @@ F: drivers/mmc/host/via-sdmmc.c
5661VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER 5689VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
5662M: Joseph Chan <JosephChan@via.com.tw> 5690M: Joseph Chan <JosephChan@via.com.tw>
5663M: Scott Fang <ScottFang@viatech.com.cn> 5691M: Scott Fang <ScottFang@viatech.com.cn>
5664L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 5692L: linux-fbdev@vger.kernel.org
5665S: Maintained 5693S: Maintained
5666F: drivers/video/via/ 5694F: drivers/video/via/
5667 5695
diff --git a/Makefile b/Makefile
index aa3e13a7e353..ad8260102f64 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 32 3SUBLEVEL = 32
4EXTRAVERSION = -rc7 4EXTRAVERSION = -rc8
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index d16ec97ec9a9..c019949a5189 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -22,4 +22,10 @@ enum km_type {
22 KM_TYPE_NR 22 KM_TYPE_NR
23}; 23};
24 24
25#ifdef CONFIG_DEBUG_HIGHMEM
26#define KM_NMI (-1)
27#define KM_NMI_PTE (-1)
28#define KM_IRQ_PTE (-1)
29#endif
30
25#endif 31#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 2a573d4fea24..e7714f367eb8 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -662,8 +662,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
662 regs->ARM_sp -= 4; 662 regs->ARM_sp -= 4;
663 usp = (u32 __user *)regs->ARM_sp; 663 usp = (u32 __user *)regs->ARM_sp;
664 664
665 put_user(regs->ARM_pc, usp); 665 if (put_user(regs->ARM_pc, usp) == 0) {
666 regs->ARM_pc = KERN_RESTART_CODE; 666 regs->ARM_pc = KERN_RESTART_CODE;
667 } else {
668 regs->ARM_sp += 4;
669 force_sigsegv(0, current);
670 }
667#endif 671#endif
668 } 672 }
669 } 673 }
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 983cc8c20081..9e4d9816726a 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -447,6 +447,7 @@ static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
447 pxa27x_freq_table[i].frequency = freq; 447 pxa27x_freq_table[i].frequency = freq;
448 pxa27x_freq_table[i].index = i; 448 pxa27x_freq_table[i].index = i;
449 } 449 }
450 pxa27x_freq_table[i].index = i;
450 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; 451 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
451 452
452 /* 453 /*
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 67f34a8d8e60..149cdd9aee4d 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -102,7 +102,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
102 table[i].index = i; 102 table[i].index = i;
103 table[i].frequency = freqs[i].cpufreq_mhz * 1000; 103 table[i].frequency = freqs[i].cpufreq_mhz * 1000;
104 } 104 }
105 table[num].frequency = i; 105 table[num].index = i;
106 table[num].frequency = CPUFREQ_TABLE_END; 106 table[num].frequency = CPUFREQ_TABLE_END;
107 107
108 pxa3xx_freqs = freqs; 108 pxa3xx_freqs = freqs;
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 3da45d051743..d98023f55503 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -802,10 +802,12 @@ static void __init spitz_init(void)
802{ 802{
803 spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON; 803 spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON;
804 804
805#ifdef CONFIG_MACH_BORZOI
805 if (machine_is_borzoi()) { 806 if (machine_is_borzoi()) {
806 sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt; 807 sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
807 sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo; 808 sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
808 } 809 }
810#endif
809 811
810 platform_scoop_config = &spitz_pcmcia_config; 812 platform_scoop_config = &spitz_pcmcia_config;
811 813
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 94be7bb6cb9a..07b976da6174 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Fri Sep 18 21:42:00 2009 15# Last update: Wed Nov 25 22:14:58 2009
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -928,7 +928,7 @@ palmt5 MACH_PALMT5 PALMT5 917
928palmtc MACH_PALMTC PALMTC 918 928palmtc MACH_PALMTC PALMTC 918
929omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919 929omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
930mxc30030evb MACH_MXC30030EVB MXC30030EVB 920 930mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
931rea_2d MACH_REA_2D REA_2D 921 931rea_cpu2 MACH_REA_2D REA_2D 921
932eti3e524 MACH_TI3E524 TI3E524 922 932eti3e524 MACH_TI3E524 TI3E524 922
933ateb9200 MACH_ATEB9200 ATEB9200 923 933ateb9200 MACH_ATEB9200 ATEB9200 923
934auckland MACH_AUCKLAND AUCKLAND 924 934auckland MACH_AUCKLAND AUCKLAND 924
@@ -2421,3 +2421,118 @@ liberty MACH_LIBERTY LIBERTY 2434
2421mh355 MACH_MH355 MH355 2435 2421mh355 MACH_MH355 MH355 2435
2422pc7802 MACH_PC7802 PC7802 2436 2422pc7802 MACH_PC7802 PC7802 2436
2423gnet_sgc MACH_GNET_SGC GNET_SGC 2437 2423gnet_sgc MACH_GNET_SGC GNET_SGC 2437
2424einstein15 MACH_EINSTEIN15 EINSTEIN15 2438
2425cmpd MACH_CMPD CMPD 2439
2426davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440
2427lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441
2428ea313x MACH_EA313X EA313X 2442
2429fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443
2430fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444
2431pelco_moe MACH_PELCO_MOE PELCO_MOE 2445
2432minimix27 MACH_MINIMIX27 MINIMIX27 2446
2433omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447
2434passionc MACH_PASSIONC PASSIONC 2448
2435mx27amata MACH_MX27AMATA MX27AMATA 2449
2436bgat1 MACH_BGAT1 BGAT1 2450
2437buzz MACH_BUZZ BUZZ 2451
2438mb9g20 MACH_MB9G20 MB9G20 2452
2439yushan MACH_YUSHAN YUSHAN 2453
2440lizard MACH_LIZARD LIZARD 2454
2441omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455
2442smdkv210 MACH_SMDKV210 SMDKV210 2456
2443bravo MACH_BRAVO BRAVO 2457
2444siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458
2445siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459
2446sm3k MACH_SM3K SM3K 2460
2447acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461
2448sst61vc010_dev MACH_SST61VC010_DEV SST61VC010_DEV 2462
2449glittertind MACH_GLITTERTIND GLITTERTIND 2463
2450omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
2451omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
2452cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
2453torino_s MACH_TORINO_S TORINO_S 2467
2454havana MACH_HAVANA HAVANA 2468
2455beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469
2456vanguard MACH_VANGUARD VANGUARD 2470
2457s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471
2458cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472
2459aster MACH_ASTER ASTER 2473
2460voguesv210 MACH_VOGUESV210 VOGUESV210 2474
2461acm500x MACH_ACM500X ACM500X 2475
2462km9260 MACH_KM9260 KM9260 2476
2463nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477
2464ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478
2465smartq7 MACH_SMARTQ7 SMARTQ7 2479
2466at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480
2467asusp527 MACH_ASUSP527 ASUSP527 2481
2468at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482
2469topasa900 MACH_TOPASA900 TOPASA900 2483
2470electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484
2471mx51grb MACH_MX51GRB MX51GRB 2485
2472xea300 MACH_XEA300 XEA300 2486
2473htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487
2474lima MACH_LIMA LIMA 2488
2475csb740 MACH_CSB740 CSB740 2489
2476usb_s8815 MACH_USB_S8815 USB_S8815 2490
2477watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
2478milkyway MACH_MILKYWAY MILKYWAY 2492
2479g4evm MACH_G4EVM G4EVM 2493
2480picomod6 MACH_PICOMOD6 PICOMOD6 2494
2481omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
2482ip6000 MACH_IP6000 IP6000 2496
2483ip6010 MACH_IP6010 IP6010 2497
2484utm400 MACH_UTM400 UTM400 2498
2485omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499
2486wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500
2487sx560 MACH_SX560 SX560 2501
2488ts41x MACH_TS41X TS41X 2502
2489elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503
2490rhobot MACH_RHOBOT RHOBOT 2504
2491mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505
2492ls9260 MACH_LS9260 LS9260 2506
2493shank MACH_SHANK SHANK 2507
2494qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508
2495at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509
2496hiram MACH_HIRAM HIRAM 2510
2497phy3250 MACH_PHY3250 PHY3250 2511
2498ea3250 MACH_EA3250 EA3250 2512
2499fdi3250 MACH_FDI3250 FDI3250 2513
2500whitestone MACH_WHITESTONE WHITESTONE 2514
2501at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515
2502ccmx51 MACH_CCMX51 CCMX51 2516
2503ccmx51js MACH_CCMX51JS CCMX51JS 2517
2504ccwmx51 MACH_CCWMX51 CCWMX51 2518
2505ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519
2506mini6410 MACH_MINI6410 MINI6410 2520
2507tiny6410 MACH_TINY6410 TINY6410 2521
2508nano6410 MACH_NANO6410 NANO6410 2522
2509at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523
2510htcleo MACH_HTCLEO HTCLEO 2524
2511avp13 MACH_AVP13 AVP13 2525
2512xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526
2513vpnext MACH_VPNEXT VPNEXT 2527
2514swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528
2515tx51 MACH_TX51 TX51 2529
2516dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530
2517mx28evk MACH_MX28EVK MX28EVK 2531
2518phoenix260 MACH_PHOENIX260 PHOENIX260 2532
2519uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533
2520smartq5 MACH_SMARTQ5 SMARTQ5 2534
2521all3078 MACH_ALL3078 ALL3078 2535
2522ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536
2523siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537
2524epb5000 MACH_EPB5000 EPB5000 2538
2525hy9263 MACH_HY9263 HY9263 2539
2526acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540
2527acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541
2528acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542
2529acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543
2530acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544
2531bonnell MACH_BONNELL BONNELL 2545
2532oht_mx27 MACH_OHT_MX27 OHT_MX27 2546
2533htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547
2534davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
2535c3ax03 MACH_C3AX03 C3AX03 2549
2536mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
2537esyx MACH_ESYX ESYX 2551
2538bulldog MACH_BULLDOG BULLDOG 2553
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 1f170216d2f9..3946aff4f414 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -225,8 +225,13 @@ int blackfin_dma_suspend(void)
225void blackfin_dma_resume(void) 225void blackfin_dma_resume(void)
226{ 226{
227 int i; 227 int i;
228 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) 228
229 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; 229 for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
230 dma_ch[i].regs->cfg = 0;
231
232 if (i < MAX_DMA_SUSPEND_CHANNELS)
233 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
234 }
230} 235}
231#endif 236#endif
232 237
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index f7b9cdce8239..b52c1f8c4bc0 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -38,7 +38,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
38 38
39#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE 39#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
40 d_cache = CPLB_L1_CHBL; 40 d_cache = CPLB_L1_CHBL;
41#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH 41#ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
42 d_cache |= CPLB_L1_AOW | CPLB_WT; 42 d_cache |= CPLB_L1_AOW | CPLB_WT;
43#endif 43#endif
44#endif 44#endif
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 430ae39456e8..5cc7e2e9e415 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -151,7 +151,7 @@ void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_
151 regs->pc = new_ip; 151 regs->pc = new_ip;
152 if (current->mm) 152 if (current->mm)
153 regs->p5 = current->mm->start_data; 153 regs->p5 = current->mm->start_data;
154#ifdef CONFIG_SMP 154#ifndef CONFIG_SMP
155 task_thread_info(current)->l1_task_info.stack_start = 155 task_thread_info(current)->l1_task_info.stack_start =
156 (void *)current->mm->context.stack_start; 156 (void *)current->mm->context.stack_start;
157 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp; 157 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 0982b5d5af10..56b0ba12175f 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -315,7 +315,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
315 case BFIN_MEM_ACCESS_CORE: 315 case BFIN_MEM_ACCESS_CORE:
316 case BFIN_MEM_ACCESS_CORE_ONLY: 316 case BFIN_MEM_ACCESS_CORE_ONLY:
317 copied = access_process_vm(child, addr, &data, 317 copied = access_process_vm(child, addr, &data,
318 to_copy, 0); 318 to_copy, 1);
319 if (copied) 319 if (copied)
320 break; 320 break;
321 321
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index e9c65390edd1..2829dd0400f1 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf518/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -70,6 +74,10 @@
70#define ANOMALY_05000461 (1) 74#define ANOMALY_05000461 (1)
71/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 75/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
72#define ANOMALY_05000462 (1) 76#define ANOMALY_05000462 (1)
77/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
78#define ANOMALY_05000473 (1)
79/* TESTSET Instruction Cannot Be Interrupted */
80#define ANOMALY_05000477 (1)
73 81
74/* Anomalies that don't exist on this proc */ 82/* Anomalies that don't exist on this proc */
75#define ANOMALY_05000099 (0) 83#define ANOMALY_05000099 (0)
@@ -133,5 +141,7 @@
133#define ANOMALY_05000450 (0) 141#define ANOMALY_05000450 (0)
134#define ANOMALY_05000465 (0) 142#define ANOMALY_05000465 (0)
135#define ANOMALY_05000467 (0) 143#define ANOMALY_05000467 (0)
144#define ANOMALY_05000474 (0)
145#define ANOMALY_05000475 (0)
136 146
137#endif 147#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 3f9052687fa8..02040df8ec80 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -1,14 +1,18 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf527/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
10 * - Revision D, 08/14/2009; ADSP-BF526 Blackfin Processor Anomaly List 14 * - Revision D, 08/14/2009; ADSP-BF526 Blackfin Processor Anomaly List
11 * - Revision F, 03/03/2009; ADSP-BF527 Blackfin Processor Anomaly List 15 * - Revision G, 08/25/2009; ADSP-BF527 Blackfin Processor Anomaly List
12 */ 16 */
13 17
14#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -200,6 +204,10 @@
200#define ANOMALY_05000467 (1) 204#define ANOMALY_05000467 (1)
201/* PLL Latches Incorrect Settings During Reset */ 205/* PLL Latches Incorrect Settings During Reset */
202#define ANOMALY_05000469 (1) 206#define ANOMALY_05000469 (1)
207/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
208#define ANOMALY_05000473 (1)
209/* TESTSET Instruction Cannot Be Interrupted */
210#define ANOMALY_05000477 (1)
203 211
204/* Anomalies that don't exist on this proc */ 212/* Anomalies that don't exist on this proc */
205#define ANOMALY_05000099 (0) 213#define ANOMALY_05000099 (0)
@@ -250,5 +258,7 @@
250#define ANOMALY_05000412 (0) 258#define ANOMALY_05000412 (0)
251#define ANOMALY_05000447 (0) 259#define ANOMALY_05000447 (0)
252#define ANOMALY_05000448 (0) 260#define ANOMALY_05000448 (0)
261#define ANOMALY_05000474 (0)
262#define ANOMALY_05000475 (0)
253 263
254#endif 264#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index cd83db2fb1a1..9b3f7a27714d 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf533/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -202,6 +206,10 @@
202#define ANOMALY_05000443 (1) 206#define ANOMALY_05000443 (1)
203/* False Hardware Error when RETI Points to Invalid Memory */ 207/* False Hardware Error when RETI Points to Invalid Memory */
204#define ANOMALY_05000461 (1) 208#define ANOMALY_05000461 (1)
209/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
210#define ANOMALY_05000473 (1)
211/* TESTSET Instruction Cannot Be Interrupted */
212#define ANOMALY_05000477 (1)
205 213
206/* These anomalies have been "phased" out of analog.com anomaly sheets and are 214/* These anomalies have been "phased" out of analog.com anomaly sheets and are
207 * here to show running on older silicon just isn't feasible. 215 * here to show running on older silicon just isn't feasible.
@@ -349,5 +357,7 @@
349#define ANOMALY_05000450 (0) 357#define ANOMALY_05000450 (0)
350#define ANOMALY_05000465 (0) 358#define ANOMALY_05000465 (0)
351#define ANOMALY_05000467 (0) 359#define ANOMALY_05000467 (0)
360#define ANOMALY_05000474 (0)
361#define ANOMALY_05000475 (0)
352 362
353#endif 363#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index f091ad2d8ea8..d2c427bc6656 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf537/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -156,6 +160,10 @@
156#define ANOMALY_05000443 (1) 160#define ANOMALY_05000443 (1)
157/* False Hardware Error when RETI Points to Invalid Memory */ 161/* False Hardware Error when RETI Points to Invalid Memory */
158#define ANOMALY_05000461 (1) 162#define ANOMALY_05000461 (1)
163/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
164#define ANOMALY_05000473 (1)
165/* TESTSET Instruction Cannot Be Interrupted */
166#define ANOMALY_05000477 (1)
159 167
160/* Anomalies that don't exist on this proc */ 168/* Anomalies that don't exist on this proc */
161#define ANOMALY_05000099 (0) 169#define ANOMALY_05000099 (0)
@@ -202,5 +210,7 @@
202#define ANOMALY_05000450 (0) 210#define ANOMALY_05000450 (0)
203#define ANOMALY_05000465 (0) 211#define ANOMALY_05000465 (0)
204#define ANOMALY_05000467 (0) 212#define ANOMALY_05000467 (0)
213#define ANOMALY_05000474 (0)
214#define ANOMALY_05000475 (0)
205 215
206#endif 216#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 26b76083e14c..d882b7e6f59b 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf538/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -128,6 +132,10 @@
128#define ANOMALY_05000443 (1) 132#define ANOMALY_05000443 (1)
129/* False Hardware Error when RETI Points to Invalid Memory */ 133/* False Hardware Error when RETI Points to Invalid Memory */
130#define ANOMALY_05000461 (1) 134#define ANOMALY_05000461 (1)
135/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
136#define ANOMALY_05000473 (1)
137/* TESTSET Instruction Cannot Be Interrupted */
138#define ANOMALY_05000477 (1)
131 139
132/* Anomalies that don't exist on this proc */ 140/* Anomalies that don't exist on this proc */
133#define ANOMALY_05000099 (0) 141#define ANOMALY_05000099 (0)
@@ -176,5 +184,7 @@
176#define ANOMALY_05000450 (0) 184#define ANOMALY_05000450 (0)
177#define ANOMALY_05000465 (0) 185#define ANOMALY_05000465 (0)
178#define ANOMALY_05000467 (0) 186#define ANOMALY_05000467 (0)
187#define ANOMALY_05000474 (0)
188#define ANOMALY_05000475 (0)
179 189
180#endif 190#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 52b116ae522a..7d08c7524498 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf548/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -24,6 +28,8 @@
24#define ANOMALY_05000119 (1) 28#define ANOMALY_05000119 (1)
25/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 29/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
26#define ANOMALY_05000122 (1) 30#define ANOMALY_05000122 (1)
31/* Data Corruption with Cached External Memory and Non-Cached On-Chip L2 Memory */
32#define ANOMALY_05000220 (1)
27/* False Hardware Error from an Access in the Shadow of a Conditional Branch */ 33/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
28#define ANOMALY_05000245 (1) 34#define ANOMALY_05000245 (1)
29/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ 35/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
@@ -200,6 +206,14 @@
200#define ANOMALY_05000466 (1) 206#define ANOMALY_05000466 (1)
201/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */ 207/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
202#define ANOMALY_05000467 (1) 208#define ANOMALY_05000467 (1)
209/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
210#define ANOMALY_05000473 (1)
211/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */
212#define ANOMALY_05000474 (1)
213/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
214#define ANOMALY_05000475 (1)
215/* TESTSET Instruction Cannot Be Interrupted */
216#define ANOMALY_05000477 (1)
203 217
204/* Anomalies that don't exist on this proc */ 218/* Anomalies that don't exist on this proc */
205#define ANOMALY_05000099 (0) 219#define ANOMALY_05000099 (0)
@@ -215,7 +229,6 @@
215#define ANOMALY_05000198 (0) 229#define ANOMALY_05000198 (0)
216#define ANOMALY_05000202 (0) 230#define ANOMALY_05000202 (0)
217#define ANOMALY_05000215 (0) 231#define ANOMALY_05000215 (0)
218#define ANOMALY_05000220 (0)
219#define ANOMALY_05000227 (0) 232#define ANOMALY_05000227 (0)
220#define ANOMALY_05000230 (0) 233#define ANOMALY_05000230 (0)
221#define ANOMALY_05000231 (0) 234#define ANOMALY_05000231 (0)
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 0261a5e751b3..f99f174b129f 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -19,6 +19,16 @@
19 \reg\().h = _corelock; 19 \reg\().h = _corelock;
20.endm 20.endm
21 21
22.macro safe_testset addr:req, scratch:req
23#if ANOMALY_05000477
24 cli \scratch;
25 testset (\addr);
26 sti \scratch;
27#else
28 testset (\addr);
29#endif
30.endm
31
22/* 32/*
23 * r0 = address of atomic data to flush and invalidate (32bit). 33 * r0 = address of atomic data to flush and invalidate (32bit).
24 * 34 *
@@ -33,7 +43,7 @@ ENTRY(_get_core_lock)
33 cli r0; 43 cli r0;
34 coreslot_loadaddr p0; 44 coreslot_loadaddr p0;
35.Lretry_corelock: 45.Lretry_corelock:
36 testset (p0); 46 safe_testset p0, r2;
37 if cc jump .Ldone_corelock; 47 if cc jump .Ldone_corelock;
38 SSYNC(r2); 48 SSYNC(r2);
39 jump .Lretry_corelock 49 jump .Lretry_corelock
@@ -56,7 +66,7 @@ ENTRY(_get_core_lock_noflush)
56 cli r0; 66 cli r0;
57 coreslot_loadaddr p0; 67 coreslot_loadaddr p0;
58.Lretry_corelock_noflush: 68.Lretry_corelock_noflush:
59 testset (p0); 69 safe_testset p0, r2;
60 if cc jump .Ldone_corelock_noflush; 70 if cc jump .Ldone_corelock_noflush;
61 SSYNC(r2); 71 SSYNC(r2);
62 jump .Lretry_corelock_noflush 72 jump .Lretry_corelock_noflush
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 70da495c9665..5ddc981e9937 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf561/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -213,7 +217,11 @@
213/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 217/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
214#define ANOMALY_05000278 (__SILICON_REVISION__ < 5) 218#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
215/* False Hardware Error Exception when ISR Context Is Not Restored */ 219/* False Hardware Error Exception when ISR Context Is Not Restored */
216#define ANOMALY_05000281 (__SILICON_REVISION__ < 5) 220/* Temporarily walk around for bug 5423 till this issue is confirmed by
221 * official anomaly document. It looks 05000281 still exists on bf561
222 * v0.5.
223 */
224#define ANOMALY_05000281 (__SILICON_REVISION__ <= 5)
217/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */ 225/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
218#define ANOMALY_05000283 (1) 226#define ANOMALY_05000283 (1)
219/* Reads Will Receive Incorrect Data under Certain Conditions */ 227/* Reads Will Receive Incorrect Data under Certain Conditions */
@@ -280,6 +288,12 @@
280#define ANOMALY_05000443 (1) 288#define ANOMALY_05000443 (1)
281/* False Hardware Error when RETI Points to Invalid Memory */ 289/* False Hardware Error when RETI Points to Invalid Memory */
282#define ANOMALY_05000461 (1) 290#define ANOMALY_05000461 (1)
291/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
292#define ANOMALY_05000473 (1)
293/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
294#define ANOMALY_05000475 (__SILICON_REVISION__ < 4)
295/* TESTSET Instruction Cannot Be Interrupted */
296#define ANOMALY_05000477 (1)
283 297
284/* Anomalies that don't exist on this proc */ 298/* Anomalies that don't exist on this proc */
285#define ANOMALY_05000119 (0) 299#define ANOMALY_05000119 (0)
@@ -304,5 +318,6 @@
304#define ANOMALY_05000450 (0) 318#define ANOMALY_05000450 (0)
305#define ANOMALY_05000465 (0) 319#define ANOMALY_05000465 (0)
306#define ANOMALY_05000467 (0) 320#define ANOMALY_05000467 (0)
321#define ANOMALY_05000474 (0)
307 322
308#endif 323#endif
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 9dbafcdcf479..f2ca211a76a0 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -57,3 +57,8 @@
57 (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK))) 57 (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
58# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB. 58# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
59#endif 59#endif
60
61#if ANOMALY_05000475 && \
62 (defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
63# error "Anomaly 475 does not allow you to use Write Back cache with L2 or External Memory"
64#endif
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index d98585f3237d..d92b168c8328 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -276,10 +276,9 @@ void smp_send_reschedule(int cpu)
276 if (cpu_is_offline(cpu)) 276 if (cpu_is_offline(cpu))
277 return; 277 return;
278 278
279 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 279 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
280 if (!msg) 280 if (!msg)
281 return; 281 return;
282 memset(msg, 0, sizeof(msg));
283 INIT_LIST_HEAD(&msg->list); 282 INIT_LIST_HEAD(&msg->list);
284 msg->type = BFIN_IPI_RESCHEDULE; 283 msg->type = BFIN_IPI_RESCHEDULE;
285 284
@@ -305,10 +304,9 @@ void smp_send_stop(void)
305 if (cpus_empty(callmap)) 304 if (cpus_empty(callmap))
306 return; 305 return;
307 306
308 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 307 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
309 if (!msg) 308 if (!msg)
310 return; 309 return;
311 memset(msg, 0, sizeof(msg));
312 INIT_LIST_HEAD(&msg->list); 310 INIT_LIST_HEAD(&msg->list);
313 msg->type = BFIN_IPI_CPU_STOP; 311 msg->type = BFIN_IPI_CPU_STOP;
314 312
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 69dad5a850a8..a36799e85693 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -28,7 +28,7 @@
28#define dbg(x...) 28#define dbg(x...)
29#endif 29#endif
30 30
31#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000) 31#define KERNEL_START (KERNEL_BINARY_TEXT_START)
32 32
33extern struct unwind_table_entry __start___unwind[]; 33extern struct unwind_table_entry __start___unwind[];
34extern struct unwind_table_entry __stop___unwind[]; 34extern struct unwind_table_entry __stop___unwind[];
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fda4baa059b5..9dab4a4e09f7 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -78,9 +78,6 @@ SECTIONS
78 */ 78 */
79 . = ALIGN(PAGE_SIZE); 79 . = ALIGN(PAGE_SIZE);
80 data_start = .; 80 data_start = .;
81 EXCEPTION_TABLE(16)
82
83 NOTES
84 81
85 /* unwind info */ 82 /* unwind info */
86 .PARISC.unwind : { 83 .PARISC.unwind : {
@@ -89,6 +86,9 @@ SECTIONS
89 __stop___unwind = .; 86 __stop___unwind = .;
90 } 87 }
91 88
89 EXCEPTION_TABLE(16)
90 NOTES
91
92 /* Data */ 92 /* Data */
93 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 93 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
94 94
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index b6bac6f61c16..916369575c97 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -29,5 +29,16 @@ enum km_type {
29 KM_TYPE_NR 29 KM_TYPE_NR
30}; 30};
31 31
32/*
33 * This is a temporary build fix that (so they say on lkml....) should no longer
34 * be required after 2.6.33, because of changes planned to the kmap code.
35 * Let's try to remove this cruft then.
36 */
37#ifdef CONFIG_DEBUG_HIGHMEM
38#define KM_NMI (-1)
39#define KM_NMI_PTE (-1)
40#define KM_IRQ_PTE (-1)
41#endif
42
32#endif /* __KERNEL__ */ 43#endif /* __KERNEL__ */
33#endif /* _ASM_POWERPC_KMAP_TYPES_H */ 44#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index c2f772dbd556..77d1b313e344 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -45,7 +45,7 @@ extern void free_initmem(void);
45#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) 45#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
46 46
47#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ 47#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
48 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) 48 sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
49extern unsigned long vmemmap_table[VMEMMAP_SIZE]; 49extern unsigned long vmemmap_table[VMEMMAP_SIZE];
50#endif 50#endif
51 51
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index d296f4a195c9..d85d1b2432ba 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -79,7 +79,8 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
79 struct cpuinfo_x86 *c = &cpu_data(pr->id); 79 struct cpuinfo_x86 *c = &cpu_data(pr->id);
80 80
81 pr->pdc = NULL; 81 pr->pdc = NULL;
82 if (c->x86_vendor == X86_VENDOR_INTEL) 82 if (c->x86_vendor == X86_VENDOR_INTEL ||
83 c->x86_vendor == X86_VENDOR_CENTAUR)
83 init_intel_pdc(pr, c); 84 init_intel_pdc(pr, c);
84 85
85 return; 86 return;
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index e5aeb2b79e6f..e28e276ac611 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
23 select ASYNC_CORE 23 select ASYNC_CORE
24 select ASYNC_PQ 24 select ASYNC_PQ
25 25
26config ASYNC_TX_DISABLE_PQ_VAL_DMA
27 bool
28
29config ASYNC_TX_DISABLE_XOR_VAL_DMA
30 bool
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 6b5cc4fba59f..ec87f53d5059 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
240} 240}
241EXPORT_SYMBOL_GPL(async_gen_syndrome); 241EXPORT_SYMBOL_GPL(async_gen_syndrome);
242 242
243static inline struct dma_chan *
244pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
245{
246 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
247 return NULL;
248 #endif
249 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
250 disks, len);
251}
252
243/** 253/**
244 * async_syndrome_val - asynchronously validate a raid6 syndrome 254 * async_syndrome_val - asynchronously validate a raid6 syndrome
245 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 255 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
260 size_t len, enum sum_check_flags *pqres, struct page *spare, 270 size_t len, enum sum_check_flags *pqres, struct page *spare,
261 struct async_submit_ctl *submit) 271 struct async_submit_ctl *submit)
262{ 272{
263 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, 273 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
264 NULL, 0, blocks, disks,
265 len);
266 struct dma_device *device = chan ? chan->device : NULL; 274 struct dma_device *device = chan ? chan->device : NULL;
267 struct dma_async_tx_descriptor *tx; 275 struct dma_async_tx_descriptor *tx;
268 unsigned char coefs[disks-2]; 276 unsigned char coefs[disks-2];
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 79182dcb91b7..079ae8ca590b 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
234 memcmp(a, a + 4, len - 4) == 0); 234 memcmp(a, a + 4, len - 4) == 0);
235} 235}
236 236
237static inline struct dma_chan *
238xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
239 struct page **src_list, int src_cnt, size_t len)
240{
241 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
242 return NULL;
243 #endif
244 return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
245 src_cnt, len);
246}
247
237/** 248/**
238 * async_xor_val - attempt a xor parity check with a dma engine. 249 * async_xor_val - attempt a xor parity check with a dma engine.
239 * @dest: destination page used if the xor is performed synchronously 250 * @dest: destination page used if the xor is performed synchronously
@@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
255 int src_cnt, size_t len, enum sum_check_flags *result, 266 int src_cnt, size_t len, enum sum_check_flags *result,
256 struct async_submit_ctl *submit) 267 struct async_submit_ctl *submit)
257{ 268{
258 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, 269 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
259 &dest, 1, src_list,
260 src_cnt, len);
261 struct dma_device *device = chan ? chan->device : NULL; 270 struct dma_device *device = chan ? chan->device : NULL;
262 struct dma_async_tx_descriptor *tx = NULL; 271 struct dma_async_tx_descriptor *tx = NULL;
263 dma_addr_t *dma_src = NULL; 272 dma_addr_t *dma_src = NULL;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 5fc3292483ef..c6547130624c 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx {
40struct crypto_gcm_ghash_ctx { 40struct crypto_gcm_ghash_ctx {
41 unsigned int cryptlen; 41 unsigned int cryptlen;
42 struct scatterlist *src; 42 struct scatterlist *src;
43 crypto_completion_t complete; 43 void (*complete)(struct aead_request *req, int err);
44}; 44};
45 45
46struct crypto_gcm_req_priv_ctx { 46struct crypto_gcm_req_priv_ctx {
@@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req,
267 return crypto_ahash_final(ahreq); 267 return crypto_ahash_final(ahreq);
268} 268}
269 269
270static void gcm_hash_final_done(struct crypto_async_request *areq, 270static void __gcm_hash_final_done(struct aead_request *req, int err)
271 int err)
272{ 271{
273 struct aead_request *req = areq->data;
274 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 272 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
275 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 273 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
276 274
277 if (!err) 275 if (!err)
278 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); 276 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
279 277
280 gctx->complete(areq, err); 278 gctx->complete(req, err);
281} 279}
282 280
283static void gcm_hash_len_done(struct crypto_async_request *areq, 281static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
284 int err)
285{ 282{
286 struct aead_request *req = areq->data; 283 struct aead_request *req = areq->data;
284
285 __gcm_hash_final_done(req, err);
286}
287
288static void __gcm_hash_len_done(struct aead_request *req, int err)
289{
287 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 290 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
288 291
289 if (!err) { 292 if (!err) {
@@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq,
292 return; 295 return;
293 } 296 }
294 297
295 gcm_hash_final_done(areq, err); 298 __gcm_hash_final_done(req, err);
296} 299}
297 300
298static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, 301static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
299 int err)
300{ 302{
301 struct aead_request *req = areq->data; 303 struct aead_request *req = areq->data;
304
305 __gcm_hash_len_done(req, err);
306}
307
308static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
309{
302 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 310 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
303 311
304 if (!err) { 312 if (!err) {
@@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
307 return; 315 return;
308 } 316 }
309 317
310 gcm_hash_len_done(areq, err); 318 __gcm_hash_len_done(req, err);
311} 319}
312 320
313static void gcm_hash_crypt_done(struct crypto_async_request *areq, 321static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
314 int err) 322 int err)
315{ 323{
316 struct aead_request *req = areq->data; 324 struct aead_request *req = areq->data;
325
326 __gcm_hash_crypt_remain_done(req, err);
327}
328
329static void __gcm_hash_crypt_done(struct aead_request *req, int err)
330{
317 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 331 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
318 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 332 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
319 unsigned int remain; 333 unsigned int remain;
@@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq,
327 return; 341 return;
328 } 342 }
329 343
330 gcm_hash_crypt_remain_done(areq, err); 344 __gcm_hash_crypt_remain_done(req, err);
331} 345}
332 346
333static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, 347static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
334 int err)
335{ 348{
336 struct aead_request *req = areq->data; 349 struct aead_request *req = areq->data;
350
351 __gcm_hash_crypt_done(req, err);
352}
353
354static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
355{
337 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 356 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
338 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 357 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
339 crypto_completion_t complete; 358 crypto_completion_t complete;
@@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
350 } 369 }
351 370
352 if (remain) 371 if (remain)
353 gcm_hash_crypt_done(areq, err); 372 __gcm_hash_crypt_done(req, err);
354 else 373 else
355 gcm_hash_crypt_remain_done(areq, err); 374 __gcm_hash_crypt_remain_done(req, err);
356} 375}
357 376
358static void gcm_hash_assoc_done(struct crypto_async_request *areq, 377static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
359 int err) 378 int err)
360{ 379{
361 struct aead_request *req = areq->data; 380 struct aead_request *req = areq->data;
381
382 __gcm_hash_assoc_remain_done(req, err);
383}
384
385static void __gcm_hash_assoc_done(struct aead_request *req, int err)
386{
362 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 387 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
363 unsigned int remain; 388 unsigned int remain;
364 389
@@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq,
371 return; 396 return;
372 } 397 }
373 398
374 gcm_hash_assoc_remain_done(areq, err); 399 __gcm_hash_assoc_remain_done(req, err);
375} 400}
376 401
377static void gcm_hash_init_done(struct crypto_async_request *areq, 402static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
378 int err)
379{ 403{
380 struct aead_request *req = areq->data; 404 struct aead_request *req = areq->data;
405
406 __gcm_hash_assoc_done(req, err);
407}
408
409static void __gcm_hash_init_done(struct aead_request *req, int err)
410{
381 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 411 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
382 crypto_completion_t complete; 412 crypto_completion_t complete;
383 unsigned int remain = 0; 413 unsigned int remain = 0;
@@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq,
393 } 423 }
394 424
395 if (remain) 425 if (remain)
396 gcm_hash_assoc_done(areq, err); 426 __gcm_hash_assoc_done(req, err);
397 else 427 else
398 gcm_hash_assoc_remain_done(areq, err); 428 __gcm_hash_assoc_remain_done(req, err);
429}
430
431static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
432{
433 struct aead_request *req = areq->data;
434
435 __gcm_hash_init_done(req, err);
399} 436}
400 437
401static int gcm_hash(struct aead_request *req, 438static int gcm_hash(struct aead_request *req,
@@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req,
457 crypto_aead_authsize(aead), 1); 494 crypto_aead_authsize(aead), 1);
458} 495}
459 496
460static void gcm_enc_hash_done(struct crypto_async_request *areq, 497static void gcm_enc_hash_done(struct aead_request *req, int err)
461 int err)
462{ 498{
463 struct aead_request *req = areq->data;
464 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 499 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
465 500
466 if (!err) 501 if (!err)
@@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq,
469 aead_request_complete(req, err); 504 aead_request_complete(req, err);
470} 505}
471 506
472static void gcm_encrypt_done(struct crypto_async_request *areq, 507static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
473 int err)
474{ 508{
475 struct aead_request *req = areq->data; 509 struct aead_request *req = areq->data;
476 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 510 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq,
479 err = gcm_hash(req, pctx); 513 err = gcm_hash(req, pctx);
480 if (err == -EINPROGRESS || err == -EBUSY) 514 if (err == -EINPROGRESS || err == -EBUSY)
481 return; 515 return;
516 else if (!err) {
517 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
518 gcm_enc_copy_hash(req, pctx);
519 }
482 } 520 }
483 521
484 gcm_enc_hash_done(areq, err); 522 aead_request_complete(req, err);
485} 523}
486 524
487static int crypto_gcm_encrypt(struct aead_request *req) 525static int crypto_gcm_encrypt(struct aead_request *req)
@@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
538 aead_request_complete(req, err); 576 aead_request_complete(req, err);
539} 577}
540 578
541static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) 579static void gcm_dec_hash_done(struct aead_request *req, int err)
542{ 580{
543 struct aead_request *req = areq->data;
544 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 581 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
545 struct ablkcipher_request *abreq = &pctx->u.abreq; 582 struct ablkcipher_request *abreq = &pctx->u.abreq;
546 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 583 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
552 err = crypto_ablkcipher_decrypt(abreq); 589 err = crypto_ablkcipher_decrypt(abreq);
553 if (err == -EINPROGRESS || err == -EBUSY) 590 if (err == -EINPROGRESS || err == -EBUSY)
554 return; 591 return;
592 else if (!err)
593 err = crypto_gcm_verify(req, pctx);
555 } 594 }
556 595
557 gcm_decrypt_done(areq, err); 596 aead_request_complete(req, err);
558} 597}
559 598
560static int crypto_gcm_decrypt(struct aead_request *req) 599static int crypto_gcm_decrypt(struct aead_request *req)
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index cd80d1dd1950..57bdaf6ffab1 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -203,8 +203,9 @@ static const union acpi_predefined_info predefined_names[] =
203 {{"_BCT", 1, ACPI_RTYPE_INTEGER}}, 203 {{"_BCT", 1, ACPI_RTYPE_INTEGER}},
204 {{"_BDN", 0, ACPI_RTYPE_INTEGER}}, 204 {{"_BDN", 0, ACPI_RTYPE_INTEGER}},
205 {{"_BFS", 1, 0}}, 205 {{"_BFS", 1, 0}},
206 {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */ 206 {{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */
207 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4,0}}, 207 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
208 ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
208 209
209 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */ 210 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
210 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4, 211 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index e56b2a7b53db..23e5a0519af5 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -224,6 +224,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
224 * _OSI(Linux) helps sound 224 * _OSI(Linux) helps sound
225 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"), 225 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
226 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), 226 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
227 * T400, T500
227 * _OSI(Linux) has Linux specific hooks 228 * _OSI(Linux) has Linux specific hooks
228 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), 229 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
229 * _OSI(Linux) is a NOP: 230 * _OSI(Linux) is a NOP:
@@ -254,6 +255,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
254 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), 255 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
255 }, 256 },
256 }, 257 },
258 {
259 .callback = dmi_enable_osi_linux,
260 .ident = "Lenovo ThinkPad T400",
261 .matches = {
262 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
263 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
264 },
265 },
266 {
267 .callback = dmi_enable_osi_linux,
268 .ident = "Lenovo ThinkPad T500",
269 .matches = {
270 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
271 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
272 },
273 },
257 {} 274 {}
258}; 275};
259 276
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 4cc1b8116e76..5f2c379ab7bf 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -430,6 +430,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
430 }, 430 },
431 { 431 {
432 .callback = init_set_sci_en_on_resume, 432 .callback = init_set_sci_en_on_resume,
433 .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
434 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
436 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
437 },
438 },
439 {
440 .callback = init_set_sci_en_on_resume,
433 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC", 441 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
434 .matches = { 442 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 443 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index d344db42a002..172b57e6543f 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -707,34 +707,17 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
707 return ata_dev_classify(&tf); 707 return ata_dev_classify(&tf);
708} 708}
709 709
710static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline) 710static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
711{
712 /* FIXME: Never skip softreset, sata_fsl_softreset() is
713 * combination of soft and hard resets. sata_fsl_softreset()
714 * needs to be splitted into soft and hard resets.
715 */
716 return 0;
717}
718
719static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
720 unsigned long deadline) 711 unsigned long deadline)
721{ 712{
722 struct ata_port *ap = link->ap; 713 struct ata_port *ap = link->ap;
723 struct sata_fsl_port_priv *pp = ap->private_data;
724 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 714 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
725 void __iomem *hcr_base = host_priv->hcr_base; 715 void __iomem *hcr_base = host_priv->hcr_base;
726 int pmp = sata_srst_pmp(link);
727 u32 temp; 716 u32 temp;
728 struct ata_taskfile tf;
729 u8 *cfis;
730 u32 Serror;
731 int i = 0; 717 int i = 0;
732 unsigned long start_jiffies; 718 unsigned long start_jiffies;
733 719
734 DPRINTK("in xx_softreset\n"); 720 DPRINTK("in xx_hardreset\n");
735
736 if (pmp != SATA_PMP_CTRL_PORT)
737 goto issue_srst;
738 721
739try_offline_again: 722try_offline_again:
740 /* 723 /*
@@ -749,7 +732,7 @@ try_offline_again:
749 732
750 if (temp & ONLINE) { 733 if (temp & ONLINE) {
751 ata_port_printk(ap, KERN_ERR, 734 ata_port_printk(ap, KERN_ERR,
752 "Softreset failed, not off-lined %d\n", i); 735 "Hardreset failed, not off-lined %d\n", i);
753 736
754 /* 737 /*
755 * Try to offline controller atleast twice 738 * Try to offline controller atleast twice
@@ -761,7 +744,7 @@ try_offline_again:
761 goto try_offline_again; 744 goto try_offline_again;
762 } 745 }
763 746
764 DPRINTK("softreset, controller off-lined\n"); 747 DPRINTK("hardreset, controller off-lined\n");
765 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); 748 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
766 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); 749 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
767 750
@@ -786,11 +769,11 @@ try_offline_again:
786 769
787 if (!(temp & ONLINE)) { 770 if (!(temp & ONLINE)) {
788 ata_port_printk(ap, KERN_ERR, 771 ata_port_printk(ap, KERN_ERR,
789 "Softreset failed, not on-lined\n"); 772 "Hardreset failed, not on-lined\n");
790 goto err; 773 goto err;
791 } 774 }
792 775
793 DPRINTK("softreset, controller off-lined & on-lined\n"); 776 DPRINTK("hardreset, controller off-lined & on-lined\n");
794 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); 777 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
795 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); 778 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
796 779
@@ -806,7 +789,7 @@ try_offline_again:
806 "No Device OR PHYRDY change,Hstatus = 0x%x\n", 789 "No Device OR PHYRDY change,Hstatus = 0x%x\n",
807 ioread32(hcr_base + HSTATUS)); 790 ioread32(hcr_base + HSTATUS));
808 *class = ATA_DEV_NONE; 791 *class = ATA_DEV_NONE;
809 goto out; 792 return 0;
810 } 793 }
811 794
812 /* 795 /*
@@ -819,11 +802,44 @@ try_offline_again:
819 if ((temp & 0xFF) != 0x18) { 802 if ((temp & 0xFF) != 0x18) {
820 ata_port_printk(ap, KERN_WARNING, "No Signature Update\n"); 803 ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
821 *class = ATA_DEV_NONE; 804 *class = ATA_DEV_NONE;
822 goto out; 805 goto do_followup_srst;
823 } else { 806 } else {
824 ata_port_printk(ap, KERN_INFO, 807 ata_port_printk(ap, KERN_INFO,
825 "Signature Update detected @ %d msecs\n", 808 "Signature Update detected @ %d msecs\n",
826 jiffies_to_msecs(jiffies - start_jiffies)); 809 jiffies_to_msecs(jiffies - start_jiffies));
810 *class = sata_fsl_dev_classify(ap);
811 return 0;
812 }
813
814do_followup_srst:
815 /*
816 * request libATA to perform follow-up softreset
817 */
818 return -EAGAIN;
819
820err:
821 return -EIO;
822}
823
824static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
825 unsigned long deadline)
826{
827 struct ata_port *ap = link->ap;
828 struct sata_fsl_port_priv *pp = ap->private_data;
829 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
830 void __iomem *hcr_base = host_priv->hcr_base;
831 int pmp = sata_srst_pmp(link);
832 u32 temp;
833 struct ata_taskfile tf;
834 u8 *cfis;
835 u32 Serror;
836
837 DPRINTK("in xx_softreset\n");
838
839 if (ata_link_offline(link)) {
840 DPRINTK("PHY reports no device\n");
841 *class = ATA_DEV_NONE;
842 return 0;
827 } 843 }
828 844
829 /* 845 /*
@@ -834,7 +850,6 @@ try_offline_again:
834 * reached here, we can send a command to the target device 850 * reached here, we can send a command to the target device
835 */ 851 */
836 852
837issue_srst:
838 DPRINTK("Sending SRST/device reset\n"); 853 DPRINTK("Sending SRST/device reset\n");
839 854
840 ata_tf_init(link->device, &tf); 855 ata_tf_init(link->device, &tf);
@@ -860,6 +875,8 @@ issue_srst:
860 ioread32(CA + hcr_base), ioread32(CC + hcr_base)); 875 ioread32(CA + hcr_base), ioread32(CC + hcr_base));
861 876
862 iowrite32(0xFFFF, CC + hcr_base); 877 iowrite32(0xFFFF, CC + hcr_base);
878 if (pmp != SATA_PMP_CTRL_PORT)
879 iowrite32(pmp, CQPMP + hcr_base);
863 iowrite32(1, CQ + hcr_base); 880 iowrite32(1, CQ + hcr_base);
864 881
865 temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000); 882 temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
@@ -926,7 +943,6 @@ issue_srst:
926 VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); 943 VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
927 } 944 }
928 945
929out:
930 return 0; 946 return 0;
931 947
932err: 948err:
@@ -988,18 +1004,6 @@ static void sata_fsl_error_intr(struct ata_port *ap)
988 ehi->err_mask |= AC_ERR_ATA_BUS; 1004 ehi->err_mask |= AC_ERR_ATA_BUS;
989 ehi->action |= ATA_EH_SOFTRESET; 1005 ehi->action |= ATA_EH_SOFTRESET;
990 1006
991 /*
992 * Ignore serror in case of fatal errors as we always want
993 * to do a soft-reset of the FSL SATA controller. Analyzing
994 * serror may cause libata to schedule a hard-reset action,
995 * and hard-reset currently does not do controller
996 * offline/online, causing command timeouts and leads to an
997 * un-recoverable state, hence make libATA ignore
998 * autopsy in case of fatal errors.
999 */
1000
1001 ehi->flags |= ATA_EHI_NO_AUTOPSY;
1002
1003 freeze = 1; 1007 freeze = 1;
1004 } 1008 }
1005 1009
@@ -1267,8 +1271,8 @@ static struct ata_port_operations sata_fsl_ops = {
1267 1271
1268 .freeze = sata_fsl_freeze, 1272 .freeze = sata_fsl_freeze,
1269 .thaw = sata_fsl_thaw, 1273 .thaw = sata_fsl_thaw,
1270 .prereset = sata_fsl_prereset,
1271 .softreset = sata_fsl_softreset, 1274 .softreset = sata_fsl_softreset,
1275 .hardreset = sata_fsl_hardreset,
1272 .pmp_softreset = sata_fsl_softreset, 1276 .pmp_softreset = sata_fsl_softreset,
1273 .error_handler = sata_fsl_error_handler, 1277 .error_handler = sata_fsl_error_handler,
1274 .post_internal_cmd = sata_fsl_post_internal_cmd, 1278 .post_internal_cmd = sata_fsl_post_internal_cmd,
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index a770498a74ec..846d89e3d122 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -328,11 +328,11 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
328 * necessary. 328 * necessary.
329 */ 329 */
330 parent = dev->parent; 330 parent = dev->parent;
331 spin_unlock_irq(&dev->power.lock); 331 spin_unlock(&dev->power.lock);
332 332
333 pm_runtime_get_noresume(parent); 333 pm_runtime_get_noresume(parent);
334 334
335 spin_lock_irq(&parent->power.lock); 335 spin_lock(&parent->power.lock);
336 /* 336 /*
337 * We can resume if the parent's run-time PM is disabled or it 337 * We can resume if the parent's run-time PM is disabled or it
338 * is set to ignore children. 338 * is set to ignore children.
@@ -343,9 +343,9 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
343 if (parent->power.runtime_status != RPM_ACTIVE) 343 if (parent->power.runtime_status != RPM_ACTIVE)
344 retval = -EBUSY; 344 retval = -EBUSY;
345 } 345 }
346 spin_unlock_irq(&parent->power.lock); 346 spin_unlock(&parent->power.lock);
347 347
348 spin_lock_irq(&dev->power.lock); 348 spin_lock(&dev->power.lock);
349 if (retval) 349 if (retval)
350 goto out; 350 goto out;
351 goto repeat; 351 goto repeat;
@@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
777 } 777 }
778 778
779 if (parent) { 779 if (parent) {
780 spin_lock_irq(&parent->power.lock); 780 spin_lock(&parent->power.lock);
781 781
782 /* 782 /*
783 * It is invalid to put an active child under a parent that is 783 * It is invalid to put an active child under a parent that is
@@ -793,7 +793,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
793 atomic_inc(&parent->power.child_count); 793 atomic_inc(&parent->power.child_count);
794 } 794 }
795 795
796 spin_unlock_irq(&parent->power.lock); 796 spin_unlock(&parent->power.lock);
797 797
798 if (error) 798 if (error)
799 goto out; 799 goto out;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6399e5090df4..92b126394fa1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -482,7 +482,7 @@ static ssize_t host_store_rescan(struct device *dev,
482 482
483 return count; 483 return count;
484} 484}
485DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 485static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
486 486
487static ssize_t dev_show_unique_id(struct device *dev, 487static ssize_t dev_show_unique_id(struct device *dev,
488 struct device_attribute *attr, 488 struct device_attribute *attr,
@@ -512,7 +512,7 @@ static ssize_t dev_show_unique_id(struct device *dev,
512 sn[8], sn[9], sn[10], sn[11], 512 sn[8], sn[9], sn[10], sn[11],
513 sn[12], sn[13], sn[14], sn[15]); 513 sn[12], sn[13], sn[14], sn[15]);
514} 514}
515DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); 515static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
516 516
517static ssize_t dev_show_vendor(struct device *dev, 517static ssize_t dev_show_vendor(struct device *dev,
518 struct device_attribute *attr, 518 struct device_attribute *attr,
@@ -536,7 +536,7 @@ static ssize_t dev_show_vendor(struct device *dev,
536 else 536 else
537 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); 537 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
538} 538}
539DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); 539static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
540 540
541static ssize_t dev_show_model(struct device *dev, 541static ssize_t dev_show_model(struct device *dev,
542 struct device_attribute *attr, 542 struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t dev_show_model(struct device *dev,
560 else 560 else
561 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); 561 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
562} 562}
563DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); 563static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
564 564
565static ssize_t dev_show_rev(struct device *dev, 565static ssize_t dev_show_rev(struct device *dev,
566 struct device_attribute *attr, 566 struct device_attribute *attr,
@@ -584,7 +584,7 @@ static ssize_t dev_show_rev(struct device *dev,
584 else 584 else
585 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); 585 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
586} 586}
587DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 587static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
588 588
589static ssize_t cciss_show_lunid(struct device *dev, 589static ssize_t cciss_show_lunid(struct device *dev,
590 struct device_attribute *attr, char *buf) 590 struct device_attribute *attr, char *buf)
@@ -609,7 +609,7 @@ static ssize_t cciss_show_lunid(struct device *dev,
609 lunid[0], lunid[1], lunid[2], lunid[3], 609 lunid[0], lunid[1], lunid[2], lunid[3],
610 lunid[4], lunid[5], lunid[6], lunid[7]); 610 lunid[4], lunid[5], lunid[6], lunid[7]);
611} 611}
612DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); 612static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
613 613
614static ssize_t cciss_show_raid_level(struct device *dev, 614static ssize_t cciss_show_raid_level(struct device *dev,
615 struct device_attribute *attr, char *buf) 615 struct device_attribute *attr, char *buf)
@@ -632,7 +632,7 @@ static ssize_t cciss_show_raid_level(struct device *dev,
632 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", 632 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
633 raid_label[raid]); 633 raid_label[raid]);
634} 634}
635DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); 635static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
636 636
637static ssize_t cciss_show_usage_count(struct device *dev, 637static ssize_t cciss_show_usage_count(struct device *dev,
638 struct device_attribute *attr, char *buf) 638 struct device_attribute *attr, char *buf)
@@ -651,7 +651,7 @@ static ssize_t cciss_show_usage_count(struct device *dev,
651 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 651 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
652 return snprintf(buf, 20, "%d\n", count); 652 return snprintf(buf, 20, "%d\n", count);
653} 653}
654DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); 654static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
655 655
656static struct attribute *cciss_host_attrs[] = { 656static struct attribute *cciss_host_attrs[] = {
657 &dev_attr_rescan.attr, 657 &dev_attr_rescan.attr,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 10e1f0390bbb..3cb56a049e24 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -62,6 +62,7 @@
62#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 62#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
63#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 63#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
64#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062 64#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
65#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a
65#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 66#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
66 67
67/* cover 915 and 945 variants */ 68/* cover 915 and 945 variants */
@@ -96,7 +97,8 @@
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ 97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ 99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) 100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
100 102
101extern int agp_memory_reserved; 103extern int agp_memory_reserved;
102 104
@@ -1358,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1358 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1360 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1359 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1361 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1360 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: 1362 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
1363 case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
1361 *gtt_offset = *gtt_size = MB(2); 1364 *gtt_offset = *gtt_size = MB(2);
1362 break; 1365 break;
1363 default: 1366 default:
@@ -2359,6 +2362,8 @@ static const struct intel_driver_description {
2359 "IGDNG/M", NULL, &intel_i965_driver }, 2362 "IGDNG/M", NULL, &intel_i965_driver },
2360 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2363 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2361 "IGDNG/MA", NULL, &intel_i965_driver }, 2364 "IGDNG/MA", NULL, &intel_i965_driver },
2365 { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2366 "IGDNG/MC2", NULL, &intel_i965_driver },
2362 { 0, 0, 0, NULL, NULL, NULL } 2367 { 0, 0, 0, NULL, NULL, NULL }
2363}; 2368};
2364 2369
@@ -2560,6 +2565,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2560 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2565 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2561 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2566 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2562 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), 2567 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
2568 ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
2563 { } 2569 { }
2564}; 2570};
2565 2571
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 737be953cc58..950837cf9e9c 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -1249,7 +1249,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
1249 1249
1250 if (keycode >= NR_KEYS) 1250 if (keycode >= NR_KEYS)
1251 if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8) 1251 if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
1252 keysym = K(KT_BRL, keycode - KEY_BRL_DOT1 + 1); 1252 keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
1253 else 1253 else
1254 return; 1254 return;
1255 else 1255 else
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index ed86d3bf249a..6aa10284104a 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -103,8 +103,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
103 ve->event.event = event; 103 ve->event.event = event;
104 /* kernel view is consoles 0..n-1, user space view is 104 /* kernel view is consoles 0..n-1, user space view is
105 console 1..n with 0 meaning current, so we must bias */ 105 console 1..n with 0 meaning current, so we must bias */
106 ve->event.old = old + 1; 106 ve->event.oldev = old + 1;
107 ve->event.new = new + 1; 107 ve->event.newev = new + 1;
108 wake = 1; 108 wake = 1;
109 ve->done = 1; 109 ve->done = 1;
110 } 110 }
@@ -186,7 +186,7 @@ int vt_waitactive(int n)
186 vt_event_wait(&vw); 186 vt_event_wait(&vw);
187 if (vw.done == 0) 187 if (vw.done == 0)
188 return -EINTR; 188 return -EINTR;
189 } while (vw.event.new != n); 189 } while (vw.event.newev != n);
190 return 0; 190 return 0;
191} 191}
192 192
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index a9952b1236b0..84c51e177269 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -236,7 +236,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. 236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
237 * We could avoid some copying here but it's probably not worth it. 237 * We could avoid some copying here but it's probably not worth it.
238 */ 238 */
239 if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { 239 if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
240 ecb_crypt_copy(in, out, key, cword, count); 240 ecb_crypt_copy(in, out, key, cword, count);
241 return; 241 return;
242 } 242 }
@@ -248,7 +248,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
248 u8 *iv, struct cword *cword, int count) 248 u8 *iv, struct cword *cword, int count)
249{ 249{
250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ 250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
251 if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE)) 251 if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
252 return cbc_crypt_copy(in, out, key, iv, cword, count); 252 return cbc_crypt_copy(in, out, key, iv, cword, count);
253 253
254 return rep_xcrypt_cbc(in, out, key, iv, cword, count); 254 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5903a88351bf..b401dadad4a8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -26,6 +26,8 @@ config INTEL_IOATDMA
26 select DMA_ENGINE 26 select DMA_ENGINE
27 select DCA 27 select DCA
28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH 28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH
29 select ASYNC_TX_DISABLE_PQ_VAL_DMA
30 select ASYNC_TX_DISABLE_XOR_VAL_DMA
29 help 31 help
30 Enable support for the Intel(R) I/OAT DMA engine present 32 Enable support for the Intel(R) I/OAT DMA engine present
31 in recent Intel Xeon chipsets. 33 in recent Intel Xeon chipsets.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bd0b248de2cf..8f99354082ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
633 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 633 if (!dma_has_cap(DMA_XOR, device->cap_mask))
634 return false; 634 return false;
635
636 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
637 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
638 return false;
639 #endif
635 #endif 640 #endif
636 641
637 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 642 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
638 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 643 if (!dma_has_cap(DMA_PQ, device->cap_mask))
639 return false; 644 return false;
645
646 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
647 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
648 return false;
649 #endif
640 #endif 650 #endif
641 651
642 return true; 652 return true;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 69d02615c4d6..abd9038e06b1 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
98 cpuid_level_9 = cpuid_eax(9); 98 cpuid_level_9 = cpuid_eax(9);
99 res = test_bit(0, &cpuid_level_9); 99 res = test_bit(0, &cpuid_level_9);
100 if (!res) 100 if (!res)
101 dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); 101 dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
102 102
103 return res; 103 return res;
104} 104}
105 105
106static int system_has_dca_enabled(struct pci_dev *pdev) 106int system_has_dca_enabled(struct pci_dev *pdev)
107{ 107{
108 if (boot_cpu_has(X86_FEATURE_DCA)) 108 if (boot_cpu_has(X86_FEATURE_DCA))
109 return dca_enabled_in_bios(pdev); 109 return dca_enabled_in_bios(pdev);
110 110
111 dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); 111 dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
112 return 0; 112 return 0;
113} 113}
114 114
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c14fdfeb7f33..45edde996480 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -297,9 +297,7 @@ static inline bool is_ioat_suspended(unsigned long status)
297/* channel was fatally programmed */ 297/* channel was fatally programmed */
298static inline bool is_ioat_bug(unsigned long err) 298static inline bool is_ioat_bug(unsigned long err)
299{ 299{
300 return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| 300 return !!err;
301 IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
302 IOAT_CHANERR_LENGTH_ERR));
303} 301}
304 302
305static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, 303static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 96ffab7d37a7..8f1f7f05deaa 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -279,6 +279,8 @@ void ioat2_timer_event(unsigned long data)
279 u32 chanerr; 279 u32 chanerr;
280 280
281 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 281 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
282 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
283 __func__, chanerr);
282 BUG_ON(is_ioat_bug(chanerr)); 284 BUG_ON(is_ioat_bug(chanerr));
283 } 285 }
284 286
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 35d1e33afd5b..42f6f10fb0cc 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data)
378 u32 chanerr; 378 u32 chanerr;
379 379
380 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 380 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
381 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
382 __func__, chanerr);
381 BUG_ON(is_ioat_bug(chanerr)); 383 BUG_ON(is_ioat_bug(chanerr));
382 } 384 }
383 385
@@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
569 dump_desc_dbg(ioat, compl_desc); 571 dump_desc_dbg(ioat, compl_desc);
570 572
571 /* we leave the channel locked to ensure in order submission */ 573 /* we leave the channel locked to ensure in order submission */
572 return &desc->txd; 574 return &compl_desc->txd;
573} 575}
574 576
575static struct dma_async_tx_descriptor * 577static struct dma_async_tx_descriptor *
@@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
728 dump_desc_dbg(ioat, compl_desc); 730 dump_desc_dbg(ioat, compl_desc);
729 731
730 /* we leave the channel locked to ensure in order submission */ 732 /* we leave the channel locked to ensure in order submission */
731 return &desc->txd; 733 return &compl_desc->txd;
732} 734}
733 735
734static struct dma_async_tx_descriptor * 736static struct dma_async_tx_descriptor *
@@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
736 unsigned int src_cnt, const unsigned char *scf, size_t len, 738 unsigned int src_cnt, const unsigned char *scf, size_t len,
737 unsigned long flags) 739 unsigned long flags)
738{ 740{
741 /* specify valid address for disabled result */
742 if (flags & DMA_PREP_PQ_DISABLE_P)
743 dst[0] = dst[1];
744 if (flags & DMA_PREP_PQ_DISABLE_Q)
745 dst[1] = dst[0];
746
739 /* handle the single source multiply case from the raid6 747 /* handle the single source multiply case from the raid6
740 * recovery path 748 * recovery path
741 */ 749 */
742 if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { 750 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
743 dma_addr_t single_source[2]; 751 dma_addr_t single_source[2];
744 unsigned char single_source_coef[2]; 752 unsigned char single_source_coef[2];
745 753
@@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
761 unsigned int src_cnt, const unsigned char *scf, size_t len, 769 unsigned int src_cnt, const unsigned char *scf, size_t len,
762 enum sum_check_flags *pqres, unsigned long flags) 770 enum sum_check_flags *pqres, unsigned long flags)
763{ 771{
772 /* specify valid address for disabled result */
773 if (flags & DMA_PREP_PQ_DISABLE_P)
774 pq[0] = pq[1];
775 if (flags & DMA_PREP_PQ_DISABLE_Q)
776 pq[1] = pq[0];
777
764 /* the cleanup routine only sets bits on validate failure, it 778 /* the cleanup routine only sets bits on validate failure, it
765 * does not clear bits on validate success... so clear it here 779 * does not clear bits on validate success... so clear it here
766 */ 780 */
@@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
778 dma_addr_t pq[2]; 792 dma_addr_t pq[2];
779 793
780 memset(scf, 0, src_cnt); 794 memset(scf, 0, src_cnt);
781 flags |= DMA_PREP_PQ_DISABLE_Q;
782 pq[0] = dst; 795 pq[0] = dst;
783 pq[1] = ~0; 796 flags |= DMA_PREP_PQ_DISABLE_Q;
797 pq[1] = dst; /* specify valid address for disabled result */
784 798
785 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 799 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
786 flags); 800 flags);
@@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
800 *result = 0; 814 *result = 0;
801 815
802 memset(scf, 0, src_cnt); 816 memset(scf, 0, src_cnt);
803 flags |= DMA_PREP_PQ_DISABLE_Q;
804 pq[0] = src[0]; 817 pq[0] = src[0];
805 pq[1] = ~0; 818 flags |= DMA_PREP_PQ_DISABLE_Q;
819 pq[1] = pq[0]; /* specify valid address for disabled result */
806 820
807 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, 821 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
808 len, flags); 822 len, flags);
@@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1117int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1131int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1118{ 1132{
1119 struct pci_dev *pdev = device->pdev; 1133 struct pci_dev *pdev = device->pdev;
1134 int dca_en = system_has_dca_enabled(pdev);
1120 struct dma_device *dma; 1135 struct dma_device *dma;
1121 struct dma_chan *c; 1136 struct dma_chan *c;
1122 struct ioat_chan_common *chan; 1137 struct ioat_chan_common *chan;
@@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1137 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1152 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1138 1153
1139 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1154 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1155
1156 /* dca is incompatible with raid operations */
1157 if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1158 cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1159
1140 if (cap & IOAT_CAP_XOR) { 1160 if (cap & IOAT_CAP_XOR) {
1141 is_raid_device = true; 1161 is_raid_device = true;
1142 dma->max_xor = 8; 1162 dma->max_xor = 8;
@@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1186 device->timer_fn = ioat2_timer_event; 1206 device->timer_fn = ioat2_timer_event;
1187 } 1207 }
1188 1208
1209 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1210 dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
1211 dma->device_prep_dma_pq_val = NULL;
1212 #endif
1213
1214 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1215 dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
1216 dma->device_prep_dma_xor_val = NULL;
1217 #endif
1218
1189 /* -= IOAT ver.3 workarounds =- */ 1219 /* -= IOAT ver.3 workarounds =- */
1190 /* Write CHANERRMSK_INT with 3E07h to mask out the errors 1220 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1191 * that can cause stability issues for IOAT ver.3 1221 * that can cause stability issues for IOAT ver.3
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 99afb12bd409..60e675455b6a 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -39,6 +39,8 @@
39#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 39#define IOAT_VER_3_0 0x30 /* Version 3.0 */
40#define IOAT_VER_3_2 0x32 /* Version 3.2 */ 40#define IOAT_VER_3_2 0x32 /* Version 3.2 */
41 41
42int system_has_dca_enabled(struct pci_dev *pdev);
43
42struct ioat_dma_descriptor { 44struct ioat_dma_descriptor {
43 uint32_t size; 45 uint32_t size;
44 union { 46 union {
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 63038e18ab03..f015ec196700 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -92,9 +92,7 @@
92#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 92#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
93#define IOAT_CHANCTRL_INT_REARM 0x0001 93#define IOAT_CHANCTRL_INT_REARM 0x0001
94#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ 94#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
95 IOAT_CHANCTRL_ERR_COMPLETION_EN |\ 95 IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
96 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
97 IOAT_CHANCTRL_ERR_INT_EN)
98 96
99#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ 97#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
100#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ 98#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index b3b065c4e5c1..034ecf0ace03 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -640,17 +640,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
640#endif 640#endif
641 struct sh_dmae_device *shdev; 641 struct sh_dmae_device *shdev;
642 642
643 /* get platform data */
644 if (!pdev->dev.platform_data)
645 return -ENODEV;
646
643 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 647 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
644 if (!shdev) { 648 if (!shdev) {
645 dev_err(&pdev->dev, "No enough memory\n"); 649 dev_err(&pdev->dev, "No enough memory\n");
646 err = -ENOMEM; 650 return -ENOMEM;
647 goto shdev_err;
648 } 651 }
649 652
650 /* get platform data */
651 if (!pdev->dev.platform_data)
652 goto shdev_err;
653
654 /* platform data */ 653 /* platform data */
655 memcpy(&shdev->pdata, pdev->dev.platform_data, 654 memcpy(&shdev->pdata, pdev->dev.platform_data,
656 sizeof(struct sh_dmae_pdata)); 655 sizeof(struct sh_dmae_pdata));
@@ -722,7 +721,6 @@ eirq_err:
722rst_err: 721rst_err:
723 kfree(shdev); 722 kfree(shdev);
724 723
725shdev_err:
726 return err; 724 return err;
727} 725}
728 726
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5d524254499e..94260aa76aa3 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -275,7 +275,7 @@ static void log_irqs(u32 evt)
275 !(evt & OHCI1394_busReset)) 275 !(evt & OHCI1394_busReset))
276 return; 276 return;
277 277
278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
279 evt & OHCI1394_selfIDComplete ? " selfID" : "", 279 evt & OHCI1394_selfIDComplete ? " selfID" : "",
280 evt & OHCI1394_RQPkt ? " AR_req" : "", 280 evt & OHCI1394_RQPkt ? " AR_req" : "",
281 evt & OHCI1394_RSPkt ? " AR_resp" : "", 281 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -286,6 +286,7 @@ static void log_irqs(u32 evt)
286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
289 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
289 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 290 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
290 evt & OHCI1394_busReset ? " busReset" : "", 291 evt & OHCI1394_busReset ? " busReset" : "",
291 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 292 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
@@ -293,6 +294,7 @@ static void log_irqs(u32 evt)
293 OHCI1394_respTxComplete | OHCI1394_isochRx | 294 OHCI1394_respTxComplete | OHCI1394_isochRx |
294 OHCI1394_isochTx | OHCI1394_postedWriteErr | 295 OHCI1394_isochTx | OHCI1394_postedWriteErr |
295 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 296 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
297 OHCI1394_cycleInconsistent |
296 OHCI1394_regAccessFail | OHCI1394_busReset) 298 OHCI1394_regAccessFail | OHCI1394_busReset)
297 ? " ?" : ""); 299 ? " ?" : "");
298} 300}
@@ -1439,6 +1441,17 @@ static irqreturn_t irq_handler(int irq, void *data)
1439 OHCI1394_LinkControl_cycleMaster); 1441 OHCI1394_LinkControl_cycleMaster);
1440 } 1442 }
1441 1443
1444 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1445 /*
1446 * We need to clear this event bit in order to make
1447 * cycleMatch isochronous I/O work. In theory we should
1448 * stop active cycleMatch iso contexts now and restart
1449 * them at least two cycles later. (FIXME?)
1450 */
1451 if (printk_ratelimit())
1452 fw_notify("isochronous cycle inconsistent\n");
1453 }
1454
1442 if (event & OHCI1394_cycle64Seconds) { 1455 if (event & OHCI1394_cycle64Seconds) {
1443 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1456 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1444 if ((cycle_time & 0x80000000) == 0) 1457 if ((cycle_time & 0x80000000) == 0)
@@ -1528,6 +1541,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1528 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1541 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1529 OHCI1394_isochRx | OHCI1394_isochTx | 1542 OHCI1394_isochRx | OHCI1394_isochTx |
1530 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1543 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1544 OHCI1394_cycleInconsistent |
1531 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail | 1545 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1532 OHCI1394_masterIntEnable); 1546 OHCI1394_masterIntEnable);
1533 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1547 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
@@ -1890,15 +1904,30 @@ static int handle_it_packet(struct context *context,
1890{ 1904{
1891 struct iso_context *ctx = 1905 struct iso_context *ctx =
1892 container_of(context, struct iso_context, context); 1906 container_of(context, struct iso_context, context);
1907 int i;
1908 struct descriptor *pd;
1893 1909
1894 if (last->transfer_status == 0) 1910 for (pd = d; pd <= last; pd++)
1895 /* This descriptor isn't done yet, stop iteration. */ 1911 if (pd->transfer_status)
1912 break;
1913 if (pd > last)
1914 /* Descriptor(s) not done yet, stop iteration */
1896 return 0; 1915 return 0;
1897 1916
1898 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 1917 i = ctx->header_length;
1918 if (i + 4 < PAGE_SIZE) {
1919 /* Present this value as big-endian to match the receive code */
1920 *(__be32 *)(ctx->header + i) = cpu_to_be32(
1921 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
1922 le16_to_cpu(pd->res_count));
1923 ctx->header_length += 4;
1924 }
1925 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1899 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1926 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1900 0, NULL, ctx->base.callback_data); 1927 ctx->header_length, ctx->header,
1901 1928 ctx->base.callback_data);
1929 ctx->header_length = 0;
1930 }
1902 return 1; 1931 return 1;
1903} 1932}
1904 1933
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f831ea159291..96eddd17e050 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -92,6 +92,7 @@ config DRM_I830
92config DRM_I915 92config DRM_I915
93 tristate "i915 driver" 93 tristate "i915 driver"
94 depends on AGP_INTEL 94 depends on AGP_INTEL
95 select SHMEM
95 select DRM_KMS_HELPER 96 select DRM_KMS_HELPER
96 select FB_CFB_FILLRECT 97 select FB_CFB_FILLRECT
97 select FB_CFB_COPYAREA 98 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index cea665d86dd3..b54ba63d506e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -662,6 +662,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
662 return NULL; 662 return NULL;
663 } 663 }
664 664
665 /* Some EDIDs have bogus h/vtotal values */
666 if (mode->hsync_end > mode->htotal)
667 mode->htotal = mode->hsync_end + 1;
668 if (mode->vsync_end > mode->vtotal)
669 mode->vtotal = mode->vsync_end + 1;
670
665 drm_mode_set_name(mode); 671 drm_mode_set_name(mode);
666 672
667 if (pt->misc & DRM_EDID_PT_INTERLACED) 673 if (pt->misc & DRM_EDID_PT_INTERLACED)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dc8e374a0b55..65ef011fa8ba 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -599,7 +599,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
599 struct drm_framebuffer *fb = fb_helper->fb; 599 struct drm_framebuffer *fb = fb_helper->fb;
600 int depth; 600 int depth;
601 601
602 if (var->pixclock == -1 || !var->pixclock) 602 if (var->pixclock != 0)
603 return -EINVAL; 603 return -EINVAL;
604 604
605 /* Need to resize the fb object !!! */ 605 /* Need to resize the fb object !!! */
@@ -691,7 +691,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
691 int ret; 691 int ret;
692 int i; 692 int i;
693 693
694 if (var->pixclock != -1) { 694 if (var->pixclock != 0) {
695 DRM_ERROR("PIXEL CLCOK SET\n"); 695 DRM_ERROR("PIXEL CLCOK SET\n");
696 return -EINVAL; 696 return -EINVAL;
697 } 697 }
@@ -904,7 +904,7 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
904 fb_helper->fb = fb; 904 fb_helper->fb = fb;
905 905
906 if (new_fb) { 906 if (new_fb) {
907 info->var.pixclock = -1; 907 info->var.pixclock = 0;
908 if (register_framebuffer(info) < 0) 908 if (register_framebuffer(info) < 0)
909 return -EINVAL; 909 return -EINVAL;
910 } else { 910 } else {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 80391995bdec..e9dbb481c469 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -552,7 +552,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
552 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 552 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
553 vma->vm_ops = obj->dev->driver->gem_vm_ops; 553 vma->vm_ops = obj->dev->driver->gem_vm_ops;
554 vma->vm_private_data = map->handle; 554 vma->vm_private_data = map->handle;
555 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 555 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
556 556
557 /* Take a ref for this mapping of the object, so that the fault 557 /* Take a ref for this mapping of the object, so that the fault
558 * handler can dereference the mmap offset's pointer to the object. 558 * handler can dereference the mmap offset's pointer to the object.
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c861d80fd779..97dc5a4f0de4 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
103 return child; 103 return child;
104} 104}
105 105
106/* drm_mm_pre_get() - pre allocate drm_mm_node structure
107 * drm_mm: memory manager struct we are pre-allocating for
108 *
109 * Returns 0 on success or -ENOMEM if allocation fails.
110 */
106int drm_mm_pre_get(struct drm_mm *mm) 111int drm_mm_pre_get(struct drm_mm *mm)
107{ 112{
108 struct drm_mm_node *node; 113 struct drm_mm_node *node;
@@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
253 prev_node->size += next_node->size; 258 prev_node->size += next_node->size;
254 list_del(&next_node->ml_entry); 259 list_del(&next_node->ml_entry);
255 list_del(&next_node->fl_entry); 260 list_del(&next_node->fl_entry);
261 spin_lock(&mm->unused_lock);
256 if (mm->num_unused < MM_UNUSED_TARGET) { 262 if (mm->num_unused < MM_UNUSED_TARGET) {
257 list_add(&next_node->fl_entry, 263 list_add(&next_node->fl_entry,
258 &mm->unused_nodes); 264 &mm->unused_nodes);
259 ++mm->num_unused; 265 ++mm->num_unused;
260 } else 266 } else
261 kfree(next_node); 267 kfree(next_node);
268 spin_unlock(&mm->unused_lock);
262 } else { 269 } else {
263 next_node->size += cur->size; 270 next_node->size += cur->size;
264 next_node->start = cur->start; 271 next_node->start = cur->start;
@@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
271 list_add(&cur->fl_entry, &mm->fl_entry); 278 list_add(&cur->fl_entry, &mm->fl_entry);
272 } else { 279 } else {
273 list_del(&cur->ml_entry); 280 list_del(&cur->ml_entry);
281 spin_lock(&mm->unused_lock);
274 if (mm->num_unused < MM_UNUSED_TARGET) { 282 if (mm->num_unused < MM_UNUSED_TARGET) {
275 list_add(&cur->fl_entry, &mm->unused_nodes); 283 list_add(&cur->fl_entry, &mm->unused_nodes);
276 ++mm->num_unused; 284 ++mm->num_unused;
277 } else 285 } else
278 kfree(cur); 286 kfree(cur);
287 spin_unlock(&mm->unused_lock);
279 } 288 }
280} 289}
281 290
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f8ce9a3a420d..26bf0552b3cb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -267,10 +267,10 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
267 uint32_t *mem; 267 uint32_t *mem;
268 268
269 for (page = 0; page < page_count; page++) { 269 for (page = 0; page < page_count; page++) {
270 mem = kmap(pages[page]); 270 mem = kmap_atomic(pages[page], KM_USER0);
271 for (i = 0; i < PAGE_SIZE; i += 4) 271 for (i = 0; i < PAGE_SIZE; i += 4)
272 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 272 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
273 kunmap(pages[page]); 273 kunmap_atomic(pages[page], KM_USER0);
274 } 274 }
275} 275}
276 276
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57204e298975..a725f6591192 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -296,6 +296,7 @@ typedef struct drm_i915_private {
296 u32 saveVBLANK_A; 296 u32 saveVBLANK_A;
297 u32 saveVSYNC_A; 297 u32 saveVSYNC_A;
298 u32 saveBCLRPAT_A; 298 u32 saveBCLRPAT_A;
299 u32 saveTRANSACONF;
299 u32 saveTRANS_HTOTAL_A; 300 u32 saveTRANS_HTOTAL_A;
300 u32 saveTRANS_HBLANK_A; 301 u32 saveTRANS_HBLANK_A;
301 u32 saveTRANS_HSYNC_A; 302 u32 saveTRANS_HSYNC_A;
@@ -326,6 +327,7 @@ typedef struct drm_i915_private {
326 u32 saveVBLANK_B; 327 u32 saveVBLANK_B;
327 u32 saveVSYNC_B; 328 u32 saveVSYNC_B;
328 u32 saveBCLRPAT_B; 329 u32 saveBCLRPAT_B;
330 u32 saveTRANSBCONF;
329 u32 saveTRANS_HTOTAL_B; 331 u32 saveTRANS_HTOTAL_B;
330 u32 saveTRANS_HBLANK_B; 332 u32 saveTRANS_HBLANK_B;
331 u32 saveTRANS_HSYNC_B; 333 u32 saveTRANS_HSYNC_B;
@@ -414,6 +416,16 @@ typedef struct drm_i915_private {
414 u32 savePFB_WIN_SZ; 416 u32 savePFB_WIN_SZ;
415 u32 savePFA_WIN_POS; 417 u32 savePFA_WIN_POS;
416 u32 savePFB_WIN_POS; 418 u32 savePFB_WIN_POS;
419 u32 savePCH_DREF_CONTROL;
420 u32 saveDISP_ARB_CTL;
421 u32 savePIPEA_DATA_M1;
422 u32 savePIPEA_DATA_N1;
423 u32 savePIPEA_LINK_M1;
424 u32 savePIPEA_LINK_N1;
425 u32 savePIPEB_DATA_M1;
426 u32 savePIPEB_DATA_N1;
427 u32 savePIPEB_LINK_M1;
428 u32 savePIPEB_LINK_N1;
417 429
418 struct { 430 struct {
419 struct drm_mm gtt_space; 431 struct drm_mm gtt_space;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c3ceffa46ea0..aa7fd82aa6eb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -254,10 +254,15 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
254{ 254{
255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 int ret = IRQ_NONE; 256 int ret = IRQ_NONE;
257 u32 de_iir, gt_iir; 257 u32 de_iir, gt_iir, de_ier;
258 u32 new_de_iir, new_gt_iir; 258 u32 new_de_iir, new_gt_iir;
259 struct drm_i915_master_private *master_priv; 259 struct drm_i915_master_private *master_priv;
260 260
261 /* disable master interrupt before clearing iir */
262 de_ier = I915_READ(DEIER);
263 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
264 (void)I915_READ(DEIER);
265
261 de_iir = I915_READ(DEIIR); 266 de_iir = I915_READ(DEIIR);
262 gt_iir = I915_READ(GTIIR); 267 gt_iir = I915_READ(GTIIR);
263 268
@@ -290,6 +295,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
290 gt_iir = new_gt_iir; 295 gt_iir = new_gt_iir;
291 } 296 }
292 297
298 I915_WRITE(DEIER, de_ier);
299 (void)I915_READ(DEIER);
300
293 return ret; 301 return ret;
294} 302}
295 303
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 992d5617e798..6eec8171a44e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 if (IS_IGDNG(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
245 }
246
242 /* Pipe & plane A info */ 247 /* Pipe & plane A info */
243 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
244 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -263,6 +268,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
263 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
264 269
265 if (IS_IGDNG(dev)) { 270 if (IS_IGDNG(dev)) {
271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
274 dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
275
266 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); 276 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
267 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); 277 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
268 278
@@ -270,6 +280,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
270 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); 280 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
271 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); 281 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
272 282
283 dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
273 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); 284 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
274 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); 285 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
275 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); 286 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
@@ -314,6 +325,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
314 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
315 326
316 if (IS_IGDNG(dev)) { 327 if (IS_IGDNG(dev)) {
328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
331 dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
332
317 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); 333 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
318 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); 334 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
319 335
@@ -321,6 +337,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
321 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); 337 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
322 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); 338 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
323 339
340 dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
324 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); 341 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
325 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); 342 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
326 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); 343 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
@@ -368,6 +385,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
368 fpb1_reg = FPB1; 385 fpb1_reg = FPB1;
369 } 386 }
370 387
388 if (IS_IGDNG(dev)) {
389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
391 }
392
371 /* Pipe & plane A info */ 393 /* Pipe & plane A info */
372 /* Prime the clock */ 394 /* Prime the clock */
373 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 395 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -395,6 +417,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
395 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
396 418
397 if (IS_IGDNG(dev)) { 419 if (IS_IGDNG(dev)) {
420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
423 I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
424
398 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); 425 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
399 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); 426 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
400 427
@@ -402,6 +429,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
402 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); 429 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
403 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); 430 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
404 431
432 I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
405 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); 433 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
406 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); 434 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
407 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); 435 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
@@ -439,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
439 /* Actually enable it */ 467 /* Actually enable it */
440 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
441 DRM_UDELAY(150); 469 DRM_UDELAY(150);
442 if (IS_I965G(dev)) 470 if (IS_I965G(dev) && !IS_IGDNG(dev))
443 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
444 DRM_UDELAY(150); 472 DRM_UDELAY(150);
445 473
@@ -454,6 +482,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
454 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
455 483
456 if (IS_IGDNG(dev)) { 484 if (IS_IGDNG(dev)) {
485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
488 I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
489
457 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); 490 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
458 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); 491 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
459 492
@@ -461,6 +494,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
461 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); 494 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
462 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); 495 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
463 496
497 I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
464 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); 498 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
465 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); 499 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
466 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); 500 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 212e22740fc1..e5051446c48e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -262,8 +262,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
262 } while (time_after(timeout, jiffies)); 262 } while (time_after(timeout, jiffies));
263 } 263 }
264 264
265 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == 265 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
266 CRT_HOTPLUG_MONITOR_COLOR) 266 CRT_HOTPLUG_MONITOR_NONE)
267 return true; 267 return true;
268 268
269 return false; 269 return false;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3ba6546b7c7f..099f420de57a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -863,10 +863,8 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
863 struct drm_device *dev = crtc->dev; 863 struct drm_device *dev = crtc->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private; 864 struct drm_i915_private *dev_priv = dev->dev_private;
865 intel_clock_t clock; 865 intel_clock_t clock;
866 int max_n;
867 bool found;
868 int err_most = 47; 866 int err_most = 47;
869 found = false; 867 int err_min = 10000;
870 868
871 /* eDP has only 2 clock choice, no n/m/p setting */ 869 /* eDP has only 2 clock choice, no n/m/p setting */
872 if (HAS_eDP) 870 if (HAS_eDP)
@@ -890,10 +888,9 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
890 } 888 }
891 889
892 memset(best_clock, 0, sizeof(*best_clock)); 890 memset(best_clock, 0, sizeof(*best_clock));
893 max_n = limit->n.max;
894 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
895 /* based on hardware requriment prefer smaller n to precision */ 892 /* based on hardware requriment prefer smaller n to precision */
896 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 893 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
897 /* based on hardware requirment prefere larger m1,m2 */ 894 /* based on hardware requirment prefere larger m1,m2 */
898 for (clock.m1 = limit->m1.max; 895 for (clock.m1 = limit->m1.max;
899 clock.m1 >= limit->m1.min; clock.m1--) { 896 clock.m1 >= limit->m1.min; clock.m1--) {
@@ -907,18 +904,18 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
907 this_err = abs((10000 - (target*10000/clock.dot))); 904 this_err = abs((10000 - (target*10000/clock.dot)));
908 if (this_err < err_most) { 905 if (this_err < err_most) {
909 *best_clock = clock; 906 *best_clock = clock;
910 err_most = this_err;
911 max_n = clock.n;
912 found = true;
913 /* found on first matching */ 907 /* found on first matching */
914 goto out; 908 goto out;
909 } else if (this_err < err_min) {
910 *best_clock = clock;
911 err_min = this_err;
915 } 912 }
916 } 913 }
917 } 914 }
918 } 915 }
919 } 916 }
920out: 917out:
921 return found; 918 return true;
922} 919}
923 920
924/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 921/* DisplayPort has only two frequencies, 162MHz and 270MHz */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 663ab6de0b58..c33451aec1bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -77,14 +77,32 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
77 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 77 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
78 u32 temp; 78 u32 temp;
79 79
80 if (mode != DRM_MODE_DPMS_ON) { 80 temp = I915_READ(hdmi_priv->sdvox_reg);
81 temp = I915_READ(hdmi_priv->sdvox_reg); 81
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing.
84 */
85 if (IS_IGDNG(dev)) {
82 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg);
88 }
89
90 if (mode != DRM_MODE_DPMS_ON) {
91 temp &= ~SDVO_ENABLE;
83 } else { 92 } else {
84 temp = I915_READ(hdmi_priv->sdvox_reg); 93 temp |= SDVO_ENABLE;
85 I915_WRITE(hdmi_priv->sdvox_reg, temp | SDVO_ENABLE);
86 } 94 }
95
96 I915_WRITE(hdmi_priv->sdvox_reg, temp);
87 POSTING_READ(hdmi_priv->sdvox_reg); 97 POSTING_READ(hdmi_priv->sdvox_reg);
98
99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked.
101 */
102 if (IS_IGDNG(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg);
105 }
88} 106}
89 107
90static void intel_hdmi_save(struct drm_connector *connector) 108static void intel_hdmi_save(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 901befe03da2..d67c42555ab9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -107,6 +107,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
107 base += 3; 107 base += 3;
108 break; 108 break;
109 case ATOM_IIO_WRITE: 109 case ATOM_IIO_WRITE:
110 (void)ctx->card->reg_read(ctx->card, CU16(base + 1));
110 ctx->card->reg_write(ctx->card, CU16(base + 1), temp); 111 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
111 base += 3; 112 base += 3;
112 break; 113 break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 757f5cd37744..224506a2f7b1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -519,6 +519,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
519 * AGP 519 * AGP
520 */ 520 */
521int radeon_agp_init(struct radeon_device *rdev); 521int radeon_agp_init(struct radeon_device *rdev);
522void radeon_agp_resume(struct radeon_device *rdev);
522void radeon_agp_fini(struct radeon_device *rdev); 523void radeon_agp_fini(struct radeon_device *rdev);
523 524
524 525
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 23ea9955ac59..54bf49a6d676 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,18 @@ int radeon_agp_init(struct radeon_device *rdev)
237#endif 237#endif
238} 238}
239 239
240void radeon_agp_resume(struct radeon_device *rdev)
241{
242#if __OS_HAS_AGP
243 int r;
244 if (rdev->flags & RADEON_IS_AGP) {
245 r = radeon_agp_init(rdev);
246 if (r)
247 dev_warn(rdev->dev, "radeon AGP reinit failed\n");
248 }
249#endif
250}
251
240void radeon_agp_fini(struct radeon_device *rdev) 252void radeon_agp_fini(struct radeon_device *rdev)
241{ 253{
242#if __OS_HAS_AGP 254#if __OS_HAS_AGP
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index fce4c4087fda..29763ceae3af 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -566,8 +566,9 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
566 radeon_i2c_do_lock(radeon_connector, 0); 566 radeon_i2c_do_lock(radeon_connector, 0);
567 567
568 if (!radeon_connector->edid) { 568 if (!radeon_connector->edid) {
569 DRM_ERROR("DDC responded but not EDID found for %s\n", 569 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
570 drm_get_connector_name(connector)); 570 drm_get_connector_name(connector));
571 ret = connector_status_connected;
571 } else { 572 } else {
572 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 573 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
573 574
@@ -720,8 +721,8 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
720 radeon_i2c_do_lock(radeon_connector, 0); 721 radeon_i2c_do_lock(radeon_connector, 0);
721 722
722 if (!radeon_connector->edid) { 723 if (!radeon_connector->edid) {
723 DRM_ERROR("DDC responded but not EDID found for %s\n", 724 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
724 drm_get_connector_name(connector)); 725 drm_get_connector_name(connector));
725 } else { 726 } else {
726 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 727 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
727 728
@@ -1149,6 +1150,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
1149 if (ret) 1150 if (ret)
1150 goto failed; 1151 goto failed;
1151 radeon_connector->dac_load_detect = true; 1152 radeon_connector->dac_load_detect = true;
1153 /* RS400,RC410,RS480 chipset seems to report a lot
1154 * of false positive on load detect, we haven't yet
1155 * found a way to make load detect reliable on those
1156 * chipset, thus just disable it for TV.
1157 */
1158 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1159 radeon_connector->dac_load_detect = false;
1152 drm_connector_attach_property(&radeon_connector->base, 1160 drm_connector_attach_property(&radeon_connector->base,
1153 rdev->mode_info.load_detect_property, 1161 rdev->mode_info.load_detect_property,
1154 1); 1162 1);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e3f9edfa40fe..41bb76fbe734 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -688,6 +688,8 @@ int radeon_resume_kms(struct drm_device *dev)
688 return -1; 688 return -1;
689 } 689 }
690 pci_set_master(dev->pdev); 690 pci_set_master(dev->pdev);
691 /* resume AGP if in use */
692 radeon_agp_resume(rdev);
691 radeon_resume(rdev); 693 radeon_resume(rdev);
692 radeon_restore_bios_scratch_regs(rdev); 694 radeon_restore_bios_scratch_regs(rdev);
693 fb_set_suspend(rdev->fbdev_info, 0); 695 fb_set_suspend(rdev->fbdev_info, 0);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7935f793bf62..ba68c9fe90a1 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -137,8 +137,6 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
137 137
138void rv515_vga_render_disable(struct radeon_device *rdev) 138void rv515_vga_render_disable(struct radeon_device *rdev)
139{ 139{
140 WREG32(R_000330_D1VGA_CONTROL, 0);
141 WREG32(R_000338_D2VGA_CONTROL, 0);
142 WREG32(R_000300_VGA_RENDER_CONTROL, 140 WREG32(R_000300_VGA_RENDER_CONTROL,
143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); 141 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
144} 142}
@@ -382,7 +380,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
382 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); 380 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
383 381
384 /* Stop all video */ 382 /* Stop all video */
385 WREG32(R_000330_D1VGA_CONTROL, 0);
386 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 383 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
387 WREG32(R_000300_VGA_RENDER_CONTROL, 0); 384 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
388 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); 385 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
@@ -391,6 +388,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
391 WREG32(R_006880_D2CRTC_CONTROL, 0); 388 WREG32(R_006880_D2CRTC_CONTROL, 0);
392 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); 389 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
393 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 390 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
391 WREG32(R_000330_D1VGA_CONTROL, 0);
392 WREG32(R_000338_D2VGA_CONTROL, 0);
394} 393}
395 394
396void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 395void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -404,14 +403,14 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
404 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); 403 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
405 mdelay(1); 404 mdelay(1);
406 /* Restore video state */ 405 /* Restore video state */
406 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
407 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
407 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); 408 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
408 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); 409 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
409 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); 410 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
410 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); 411 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
411 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); 412 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
412 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 413 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
413 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
414 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
415 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 414 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
416} 415}
417 416
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 6ff6c20f1e78..fbab6846ae64 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -19,7 +19,9 @@
19#include <linux/completion.h> 19#include <linux/completion.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/i2c-pnx.h> 21#include <linux/i2c-pnx.h>
22#include <linux/io.h>
22#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <mach/i2c.h>
23#include <asm/irq.h> 25#include <asm/irq.h>
24#include <asm/uaccess.h> 26#include <asm/uaccess.h>
25 27
@@ -54,6 +56,9 @@ static inline void i2c_pnx_arm_timer(struct i2c_adapter *adap)
54 struct timer_list *timer = &data->mif.timer; 56 struct timer_list *timer = &data->mif.timer;
55 int expires = I2C_PNX_TIMEOUT / (1000 / HZ); 57 int expires = I2C_PNX_TIMEOUT / (1000 / HZ);
56 58
59 if (expires <= 1)
60 expires = 2;
61
57 del_timer_sync(timer); 62 del_timer_sync(timer);
58 63
59 dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n", 64 dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n",
@@ -645,7 +650,7 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
645 return 0; 650 return 0;
646 651
647out_irq: 652out_irq:
648 free_irq(alg_data->irq, alg_data); 653 free_irq(alg_data->irq, i2c_pnx->adapter);
649out_clock: 654out_clock:
650 i2c_pnx->set_clock_stop(pdev); 655 i2c_pnx->set_clock_stop(pdev);
651out_unmap: 656out_unmap:
@@ -664,7 +669,7 @@ static int __devexit i2c_pnx_remove(struct platform_device *pdev)
664 struct i2c_adapter *adap = i2c_pnx->adapter; 669 struct i2c_adapter *adap = i2c_pnx->adapter;
665 struct i2c_pnx_algo_data *alg_data = adap->algo_data; 670 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
666 671
667 free_irq(alg_data->irq, alg_data); 672 free_irq(alg_data->irq, i2c_pnx->adapter);
668 i2c_del_adapter(adap); 673 i2c_del_adapter(adap);
669 i2c_pnx->set_clock_stop(pdev); 674 i2c_pnx->set_clock_stop(pdev);
670 iounmap((void *)alg_data->ioaddr); 675 iounmap((void *)alg_data->ioaddr);
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
index aa96bd2d27ea..a0702f36a72f 100644
--- a/drivers/i2c/chips/tsl2550.c
+++ b/drivers/i2c/chips/tsl2550.c
@@ -257,6 +257,7 @@ static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
257 257
258static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf) 258static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
259{ 259{
260 struct tsl2550_data *data = i2c_get_clientdata(client);
260 u8 ch0, ch1; 261 u8 ch0, ch1;
261 int ret; 262 int ret;
262 263
@@ -274,6 +275,8 @@ static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
274 ret = tsl2550_calculate_lux(ch0, ch1); 275 ret = tsl2550_calculate_lux(ch0, ch1);
275 if (ret < 0) 276 if (ret < 0)
276 return ret; 277 return ret;
278 if (data->operating_mode == 1)
279 ret *= 5;
277 280
278 return sprintf(buf, "%d\n", ret); 281 return sprintf(buf, "%d\n", ret);
279} 282}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 8d80fceca6a4..296504355142 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -762,6 +762,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
762{ 762{
763 int res = 0; 763 int res = 0;
764 struct i2c_adapter *found; 764 struct i2c_adapter *found;
765 struct i2c_client *client, *next;
765 766
766 /* First make sure that this adapter was ever added */ 767 /* First make sure that this adapter was ever added */
767 mutex_lock(&core_lock); 768 mutex_lock(&core_lock);
@@ -781,6 +782,16 @@ int i2c_del_adapter(struct i2c_adapter *adap)
781 if (res) 782 if (res)
782 return res; 783 return res;
783 784
785 /* Remove devices instantiated from sysfs */
786 list_for_each_entry_safe(client, next, &userspace_devices, detected) {
787 if (client->adapter == adap) {
788 dev_dbg(&adap->dev, "Removing %s at 0x%x\n",
789 client->name, client->addr);
790 list_del(&client->detected);
791 i2c_unregister_device(client);
792 }
793 }
794
784 /* Detach any active clients. This can't fail, thus we do not 795 /* Detach any active clients. This can't fail, thus we do not
785 checking the returned value. */ 796 checking the returned value. */
786 res = device_for_each_child(&adap->dev, NULL, __unregister_client); 797 res = device_for_each_child(&adap->dev, NULL, __unregister_client);
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d3440b5010a5..6e7ae2b6cfc6 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -162,7 +162,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
162 if (tf->command == ATA_CMD_SET_FEATURES && 162 if (tf->command == ATA_CMD_SET_FEATURES &&
163 tf->feature == SETFEATURES_XFER && 163 tf->feature == SETFEATURES_XFER &&
164 tf->nsect >= XFER_SW_DMA_0) { 164 tf->nsect >= XFER_SW_DMA_0) {
165 xfer_rate = ide_find_dma_mode(drive, XFER_UDMA_6); 165 xfer_rate = ide_find_dma_mode(drive, tf->nsect);
166 if (xfer_rate != tf->nsect) { 166 if (xfer_rate != tf->nsect) {
167 err = -EINVAL; 167 err = -EINVAL;
168 goto abort; 168 goto abort;
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/common/ir-functions.c
index 655474b29e21..abd4791acb0e 100644
--- a/drivers/media/common/ir-functions.c
+++ b/drivers/media/common/ir-functions.c
@@ -64,7 +64,7 @@ void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
64 64
65 ir->ir_type = ir_type; 65 ir->ir_type = ir_type;
66 66
67 memset(ir->ir_codes, sizeof(ir->ir_codes), 0); 67 memset(ir->ir_codes, 0, sizeof(ir->ir_codes));
68 68
69 /* 69 /*
70 * FIXME: This is a temporary workaround to use the new IR tables 70 * FIXME: This is a temporary workaround to use the new IR tables
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index f65591fb7cec..2a53dd096eef 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -663,6 +663,14 @@ static struct zl10353_config cxusb_zl10353_xc3028_config = {
663 .parallel_ts = 1, 663 .parallel_ts = 1,
664}; 664};
665 665
666static struct zl10353_config cxusb_zl10353_xc3028_config_no_i2c_gate = {
667 .demod_address = 0x0f,
668 .if2 = 45600,
669 .no_tuner = 1,
670 .parallel_ts = 1,
671 .disable_i2c_gate_ctrl = 1,
672};
673
666static struct mt352_config cxusb_mt352_xc3028_config = { 674static struct mt352_config cxusb_mt352_xc3028_config = {
667 .demod_address = 0x0f, 675 .demod_address = 0x0f,
668 .if2 = 4560, 676 .if2 = 4560,
@@ -894,7 +902,7 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
894 cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1); 902 cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
895 903
896 if ((adap->fe = dvb_attach(zl10353_attach, 904 if ((adap->fe = dvb_attach(zl10353_attach,
897 &cxusb_zl10353_xc3028_config, 905 &cxusb_zl10353_xc3028_config_no_i2c_gate,
898 &adap->dev->i2c_adap)) == NULL) 906 &adap->dev->i2c_adap)) == NULL)
899 return -EIO; 907 return -EIO;
900 908
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig
index 8c1aed77ea30..85a222c4eaa0 100644
--- a/drivers/media/dvb/siano/Kconfig
+++ b/drivers/media/dvb/siano/Kconfig
@@ -4,7 +4,7 @@
4 4
5config SMS_SIANO_MDTV 5config SMS_SIANO_MDTV
6 tristate "Siano SMS1xxx based MDTV receiver" 6 tristate "Siano SMS1xxx based MDTV receiver"
7 depends on DVB_CORE && INPUT 7 depends on DVB_CORE && INPUT && HAS_DMA
8 ---help--- 8 ---help---
9 Choose Y or M here if you have MDTV receiver with a Siano chipset. 9 Choose Y or M here if you have MDTV receiver with a Siano chipset.
10 10
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index c3f579de6e71..c6cf11661868 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -181,12 +181,10 @@ static void gemtek_pci_mute(struct gemtek_pci *card)
181 181
182static void gemtek_pci_unmute(struct gemtek_pci *card) 182static void gemtek_pci_unmute(struct gemtek_pci *card)
183{ 183{
184 mutex_lock(&card->lock);
185 if (card->mute) { 184 if (card->mute) {
186 gemtek_pci_setfrequency(card, card->current_frequency); 185 gemtek_pci_setfrequency(card, card->current_frequency);
187 card->mute = false; 186 card->mute = false;
188 } 187 }
189 mutex_unlock(&card->lock);
190} 188}
191 189
192static int gemtek_pci_getsignal(struct gemtek_pci *card) 190static int gemtek_pci_getsignal(struct gemtek_pci *card)
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index c015da813dda..d14cfb200ed0 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1426,7 +1426,6 @@ static __init int vpif_probe(struct platform_device *pdev)
1426 struct vpif_display_config *config; 1426 struct vpif_display_config *config;
1427 int i, j = 0, k, q, m, err = 0; 1427 int i, j = 0, k, q, m, err = 0;
1428 struct i2c_adapter *i2c_adap; 1428 struct i2c_adapter *i2c_adap;
1429 struct vpif_config *config;
1430 struct common_obj *common; 1429 struct common_obj *common;
1431 struct channel_obj *ch; 1430 struct channel_obj *ch;
1432 struct video_device *vfd; 1431 struct video_device *vfd;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index bdb249bd9d5d..c0fd5c6feeac 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -1584,8 +1584,8 @@ struct em28xx_board em28xx_boards[] = {
1584 [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = { 1584 [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = {
1585 .name = "Reddo DVB-C USB TV Box", 1585 .name = "Reddo DVB-C USB TV Box",
1586 .tuner_type = TUNER_ABSENT, 1586 .tuner_type = TUNER_ABSENT,
1587 .tuner_gpio = reddo_dvb_c_usb_box,
1587 .has_dvb = 1, 1588 .has_dvb = 1,
1588 .dvb_gpio = reddo_dvb_c_usb_box,
1589 }, 1589 },
1590}; 1590};
1591const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); 1591const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 5f37952c75cf..72802291e812 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -28,6 +28,7 @@
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/sched.h>
31#include <linux/time.h> 32#include <linux/time.h>
32#include <linux/version.h> 33#include <linux/version.h>
33#include <linux/videodev2.h> 34#include <linux/videodev2.h>
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index dff2e5e2d8c6..7db82bdf6f31 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -17,6 +17,7 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h>
20 21
21#include <media/v4l2-common.h> 22#include <media/v4l2-common.h>
22#include <media/v4l2-dev.h> 23#include <media/v4l2-dev.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 2f78b4f263f5..9c8b7c7b89ee 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -31,6 +31,7 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/videodev2.h> 32#include <linux/videodev2.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/sched.h>
34 35
35#include <media/v4l2-common.h> 36#include <media/v4l2-common.h>
36#include <media/v4l2-dev.h> 37#include <media/v4l2-dev.h>
@@ -1723,11 +1724,12 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
1723 1724
1724 err = soc_camera_host_register(&pcdev->ici); 1725 err = soc_camera_host_register(&pcdev->ici);
1725 if (err) 1726 if (err)
1726 goto exit_free_irq; 1727 goto exit_free_clk;
1727 1728
1728 return 0; 1729 return 0;
1729 1730
1730exit_free_irq: 1731exit_free_clk:
1732 pm_runtime_disable(&pdev->dev);
1731 free_irq(pcdev->irq, pcdev); 1733 free_irq(pcdev->irq, pcdev);
1732exit_release_mem: 1734exit_release_mem:
1733 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) 1735 if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
@@ -1747,6 +1749,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
1747 struct sh_mobile_ceu_dev, ici); 1749 struct sh_mobile_ceu_dev, ici);
1748 1750
1749 soc_camera_host_unregister(soc_host); 1751 soc_camera_host_unregister(soc_host);
1752 pm_runtime_disable(&pdev->dev);
1750 free_irq(pcdev->irq, pcdev); 1753 free_irq(pcdev->irq, pcdev);
1751 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) 1754 if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
1752 dma_release_declared_memory(&pdev->dev); 1755 dma_release_declared_memory(&pdev->dev);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 36e617bd13c7..95fdeb23c2c1 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1097,6 +1097,13 @@ static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
1097 return v4l2_subdev_call(sd, video, s_crop, a); 1097 return v4l2_subdev_call(sd, video, s_crop, a);
1098} 1098}
1099 1099
1100static void soc_camera_device_init(struct device *dev, void *pdata)
1101{
1102 dev->platform_data = pdata;
1103 dev->bus = &soc_camera_bus_type;
1104 dev->release = dummy_release;
1105}
1106
1100int soc_camera_host_register(struct soc_camera_host *ici) 1107int soc_camera_host_register(struct soc_camera_host *ici)
1101{ 1108{
1102 struct soc_camera_host *ix; 1109 struct soc_camera_host *ix;
@@ -1158,6 +1165,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1158 1165
1159 list_for_each_entry(icd, &devices, list) { 1166 list_for_each_entry(icd, &devices, list) {
1160 if (icd->iface == ici->nr) { 1167 if (icd->iface == ici->nr) {
1168 void *pdata = icd->dev.platform_data;
1161 /* The bus->remove will be called */ 1169 /* The bus->remove will be called */
1162 device_unregister(&icd->dev); 1170 device_unregister(&icd->dev);
1163 /* 1171 /*
@@ -1169,6 +1177,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1169 * device private data. 1177 * device private data.
1170 */ 1178 */
1171 memset(&icd->dev, 0, sizeof(icd->dev)); 1179 memset(&icd->dev, 0, sizeof(icd->dev));
1180 soc_camera_device_init(&icd->dev, pdata);
1172 } 1181 }
1173 } 1182 }
1174 1183
@@ -1200,10 +1209,7 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
1200 * man, stay reasonable... */ 1209 * man, stay reasonable... */
1201 return -ENOMEM; 1210 return -ENOMEM;
1202 1211
1203 icd->devnum = num; 1212 icd->devnum = num;
1204 icd->dev.bus = &soc_camera_bus_type;
1205
1206 icd->dev.release = dummy_release;
1207 icd->use_count = 0; 1213 icd->use_count = 0;
1208 icd->host_priv = NULL; 1214 icd->host_priv = NULL;
1209 mutex_init(&icd->video_lock); 1215 mutex_init(&icd->video_lock);
@@ -1311,12 +1317,13 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1311 icd->iface = icl->bus_id; 1317 icd->iface = icl->bus_id;
1312 icd->pdev = &pdev->dev; 1318 icd->pdev = &pdev->dev;
1313 platform_set_drvdata(pdev, icd); 1319 platform_set_drvdata(pdev, icd);
1314 icd->dev.platform_data = icl;
1315 1320
1316 ret = soc_camera_device_register(icd); 1321 ret = soc_camera_device_register(icd);
1317 if (ret < 0) 1322 if (ret < 0)
1318 goto escdevreg; 1323 goto escdevreg;
1319 1324
1325 soc_camera_device_init(&icd->dev, icl);
1326
1320 icd->user_width = DEFAULT_WIDTH; 1327 icd->user_width = DEFAULT_WIDTH;
1321 icd->user_height = DEFAULT_HEIGHT; 1328 icd->user_height = DEFAULT_HEIGHT;
1322 1329
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 635ffc7b0391..c3065c4bcba9 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/sched.h>
22#include <media/videobuf-dma-contig.h> 23#include <media/videobuf-dma-contig.h>
23 24
24struct videobuf_dma_contig_memory { 25struct videobuf_dma_contig_memory {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db39f4a52f53..2cb2736d65aa 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -158,6 +158,7 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
158 struct i2c_msg msg[2]; 158 struct i2c_msg msg[2];
159 u8 msgbuf[2]; 159 u8 msgbuf[2];
160 struct i2c_client *client; 160 struct i2c_client *client;
161 unsigned long timeout, read_time;
161 int status, i; 162 int status, i;
162 163
163 memset(msg, 0, sizeof(msg)); 164 memset(msg, 0, sizeof(msg));
@@ -183,47 +184,60 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
183 if (count > io_limit) 184 if (count > io_limit)
184 count = io_limit; 185 count = io_limit;
185 186
186 /* Smaller eeproms can work given some SMBus extension calls */
187 if (at24->use_smbus) { 187 if (at24->use_smbus) {
188 /* Smaller eeproms can work given some SMBus extension calls */
188 if (count > I2C_SMBUS_BLOCK_MAX) 189 if (count > I2C_SMBUS_BLOCK_MAX)
189 count = I2C_SMBUS_BLOCK_MAX; 190 count = I2C_SMBUS_BLOCK_MAX;
190 status = i2c_smbus_read_i2c_block_data(client, offset, 191 } else {
191 count, buf); 192 /*
192 dev_dbg(&client->dev, "smbus read %zu@%d --> %d\n", 193 * When we have a better choice than SMBus calls, use a
193 count, offset, status); 194 * combined I2C message. Write address; then read up to
194 return (status < 0) ? -EIO : status; 195 * io_limit data bytes. Note that read page rollover helps us
196 * here (unlike writes). msgbuf is u8 and will cast to our
197 * needs.
198 */
199 i = 0;
200 if (at24->chip.flags & AT24_FLAG_ADDR16)
201 msgbuf[i++] = offset >> 8;
202 msgbuf[i++] = offset;
203
204 msg[0].addr = client->addr;
205 msg[0].buf = msgbuf;
206 msg[0].len = i;
207
208 msg[1].addr = client->addr;
209 msg[1].flags = I2C_M_RD;
210 msg[1].buf = buf;
211 msg[1].len = count;
195 } 212 }
196 213
197 /* 214 /*
198 * When we have a better choice than SMBus calls, use a combined 215 * Reads fail if the previous write didn't complete yet. We may
199 * I2C message. Write address; then read up to io_limit data bytes. 216 * loop a few times until this one succeeds, waiting at least
200 * Note that read page rollover helps us here (unlike writes). 217 * long enough for one entire page write to work.
201 * msgbuf is u8 and will cast to our needs.
202 */ 218 */
203 i = 0; 219 timeout = jiffies + msecs_to_jiffies(write_timeout);
204 if (at24->chip.flags & AT24_FLAG_ADDR16) 220 do {
205 msgbuf[i++] = offset >> 8; 221 read_time = jiffies;
206 msgbuf[i++] = offset; 222 if (at24->use_smbus) {
207 223 status = i2c_smbus_read_i2c_block_data(client, offset,
208 msg[0].addr = client->addr; 224 count, buf);
209 msg[0].buf = msgbuf; 225 } else {
210 msg[0].len = i; 226 status = i2c_transfer(client->adapter, msg, 2);
227 if (status == 2)
228 status = count;
229 }
230 dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
231 count, offset, status, jiffies);
211 232
212 msg[1].addr = client->addr; 233 if (status == count)
213 msg[1].flags = I2C_M_RD; 234 return count;
214 msg[1].buf = buf;
215 msg[1].len = count;
216 235
217 status = i2c_transfer(client->adapter, msg, 2); 236 /* REVISIT: at HZ=100, this is sloooow */
218 dev_dbg(&client->dev, "i2c read %zu@%d --> %d\n", 237 msleep(1);
219 count, offset, status); 238 } while (time_before(read_time, timeout));
220 239
221 if (status == 2) 240 return -ETIMEDOUT;
222 return count;
223 else if (status >= 0)
224 return -EIO;
225 else
226 return status;
227} 241}
228 242
229static ssize_t at24_read(struct at24_data *at24, 243static ssize_t at24_read(struct at24_data *at24,
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 2be49c817995..b25467ac895c 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -628,15 +628,6 @@ static int ep93xx_open(struct net_device *dev)
628 if (ep93xx_alloc_buffers(ep)) 628 if (ep93xx_alloc_buffers(ep))
629 return -ENOMEM; 629 return -ENOMEM;
630 630
631 if (is_zero_ether_addr(dev->dev_addr)) {
632 random_ether_addr(dev->dev_addr);
633 printk(KERN_INFO "%s: generated random MAC address "
634 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
635 dev->dev_addr[0], dev->dev_addr[1],
636 dev->dev_addr[2], dev->dev_addr[3],
637 dev->dev_addr[4], dev->dev_addr[5]);
638 }
639
640 napi_enable(&ep->napi); 631 napi_enable(&ep->napi);
641 632
642 if (ep93xx_start_hw(dev)) { 633 if (ep93xx_start_hw(dev)) {
@@ -877,6 +868,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
877 ep->mii.mdio_write = ep93xx_mdio_write; 868 ep->mii.mdio_write = ep93xx_mdio_write;
878 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ 869 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
879 870
871 if (is_zero_ether_addr(dev->dev_addr))
872 random_ether_addr(dev->dev_addr);
873
880 err = register_netdev(dev); 874 err = register_netdev(dev);
881 if (err) { 875 if (err) {
882 dev_err(&pdev->dev, "Failed to register netdev\n"); 876 dev_err(&pdev->dev, "Failed to register netdev\n");
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index e046943ef29d..2a9132343b66 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -912,9 +912,6 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
912 bp->istat = istat; 912 bp->istat = istat;
913 __b44_disable_ints(bp); 913 __b44_disable_ints(bp);
914 __napi_schedule(&bp->napi); 914 __napi_schedule(&bp->napi);
915 } else {
916 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
917 dev->name);
918 } 915 }
919 916
920irq_ack: 917irq_ack:
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3c29a20b751e..d269a68ce354 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -157,6 +157,7 @@
157#include <linux/init.h> 157#include <linux/init.h>
158#include <linux/pci.h> 158#include <linux/pci.h>
159#include <linux/dma-mapping.h> 159#include <linux/dma-mapping.h>
160#include <linux/dmapool.h>
160#include <linux/netdevice.h> 161#include <linux/netdevice.h>
161#include <linux/etherdevice.h> 162#include <linux/etherdevice.h>
162#include <linux/mii.h> 163#include <linux/mii.h>
@@ -602,6 +603,7 @@ struct nic {
602 struct mem *mem; 603 struct mem *mem;
603 dma_addr_t dma_addr; 604 dma_addr_t dma_addr;
604 605
606 struct pci_pool *cbs_pool;
605 dma_addr_t cbs_dma_addr; 607 dma_addr_t cbs_dma_addr;
606 u8 adaptive_ifs; 608 u8 adaptive_ifs;
607 u8 tx_threshold; 609 u8 tx_threshold;
@@ -1793,9 +1795,7 @@ static void e100_clean_cbs(struct nic *nic)
1793 nic->cb_to_clean = nic->cb_to_clean->next; 1795 nic->cb_to_clean = nic->cb_to_clean->next;
1794 nic->cbs_avail++; 1796 nic->cbs_avail++;
1795 } 1797 }
1796 pci_free_consistent(nic->pdev, 1798 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1797 sizeof(struct cb) * nic->params.cbs.count,
1798 nic->cbs, nic->cbs_dma_addr);
1799 nic->cbs = NULL; 1799 nic->cbs = NULL;
1800 nic->cbs_avail = 0; 1800 nic->cbs_avail = 0;
1801 } 1801 }
@@ -1813,8 +1813,8 @@ static int e100_alloc_cbs(struct nic *nic)
1813 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; 1813 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1814 nic->cbs_avail = 0; 1814 nic->cbs_avail = 0;
1815 1815
1816 nic->cbs = pci_alloc_consistent(nic->pdev, 1816 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1817 sizeof(struct cb) * count, &nic->cbs_dma_addr); 1817 &nic->cbs_dma_addr);
1818 if (!nic->cbs) 1818 if (!nic->cbs)
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 1820
@@ -2841,7 +2841,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2841 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); 2841 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2842 goto err_out_free; 2842 goto err_out_free;
2843 } 2843 }
2844 2844 nic->cbs_pool = pci_pool_create(netdev->name,
2845 nic->pdev,
2846 nic->params.cbs.count * sizeof(struct cb),
2847 sizeof(u32),
2848 0);
2845 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", 2849 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
2846 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), 2850 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2847 pdev->irq, netdev->dev_addr); 2851 pdev->irq, netdev->dev_addr);
@@ -2871,6 +2875,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
2871 unregister_netdev(netdev); 2875 unregister_netdev(netdev);
2872 e100_free(nic); 2876 e100_free(nic);
2873 pci_iounmap(pdev, nic->csr); 2877 pci_iounmap(pdev, nic->csr);
2878 pci_pool_destroy(nic->cbs_pool);
2874 free_netdev(netdev); 2879 free_netdev(netdev);
2875 pci_release_regions(pdev); 2880 pci_release_regions(pdev);
2876 pci_disable_device(pdev); 2881 pci_disable_device(pdev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b9221bdc7184..0fe2fc90f207 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3235,6 +3235,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3235 flush_scheduled_work(); 3235 flush_scheduled_work();
3236 3236
3237 unregister_netdev(dev); 3237 unregister_netdev(dev);
3238
3239 /* restore original MAC address */
3240 rtl_rar_set(tp, dev->perm_addr);
3241
3238 rtl_disable_msi(pdev, tp); 3242 rtl_disable_msi(pdev, tp);
3239 rtl8169_release_board(pdev, dev, tp->mmio_addr); 3243 rtl8169_release_board(pdev, dev, tp->mmio_addr);
3240 pci_set_drvdata(pdev, NULL); 3244 pci_set_drvdata(pdev, NULL);
@@ -4881,6 +4885,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
4881 4885
4882 rtl8169_net_suspend(dev); 4886 rtl8169_net_suspend(dev);
4883 4887
4888 /* restore original MAC address */
4889 rtl_rar_set(tp, dev->perm_addr);
4890
4884 spin_lock_irq(&tp->lock); 4891 spin_lock_irq(&tp->lock);
4885 4892
4886 rtl8169_asic_down(ioaddr); 4893 rtl8169_asic_down(ioaddr);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 05c91ee6921e..f12206bdbb75 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2283,7 +2283,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
2283 2283
2284 ndev->irq = ires->start; 2284 ndev->irq = ires->start;
2285 2285
2286 if (ires->flags & IRQF_TRIGGER_MASK) 2286 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2287 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2287 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2288 2288
2289 ret = smc_request_attrib(pdev, ndev); 2289 ret = smc_request_attrib(pdev, ndev);
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index b4909a2dec66..0f7909276237 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -252,6 +252,9 @@ static int smsc9420_ethtool_get_settings(struct net_device *dev,
252{ 252{
253 struct smsc9420_pdata *pd = netdev_priv(dev); 253 struct smsc9420_pdata *pd = netdev_priv(dev);
254 254
255 if (!pd->phy_dev)
256 return -ENODEV;
257
255 cmd->maxtxpkt = 1; 258 cmd->maxtxpkt = 1;
256 cmd->maxrxpkt = 1; 259 cmd->maxrxpkt = 1;
257 return phy_ethtool_gset(pd->phy_dev, cmd); 260 return phy_ethtool_gset(pd->phy_dev, cmd);
@@ -262,6 +265,9 @@ static int smsc9420_ethtool_set_settings(struct net_device *dev,
262{ 265{
263 struct smsc9420_pdata *pd = netdev_priv(dev); 266 struct smsc9420_pdata *pd = netdev_priv(dev);
264 267
268 if (!pd->phy_dev)
269 return -ENODEV;
270
265 return phy_ethtool_sset(pd->phy_dev, cmd); 271 return phy_ethtool_sset(pd->phy_dev, cmd);
266} 272}
267 273
@@ -290,6 +296,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
290static int smsc9420_ethtool_nway_reset(struct net_device *netdev) 296static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
291{ 297{
292 struct smsc9420_pdata *pd = netdev_priv(netdev); 298 struct smsc9420_pdata *pd = netdev_priv(netdev);
299
300 if (!pd->phy_dev)
301 return -ENODEV;
302
293 return phy_start_aneg(pd->phy_dev); 303 return phy_start_aneg(pd->phy_dev);
294} 304}
295 305
@@ -312,6 +322,10 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
312 for (i = 0; i < 0x100; i += (sizeof(u32))) 322 for (i = 0; i < 0x100; i += (sizeof(u32)))
313 data[j++] = smsc9420_reg_read(pd, i); 323 data[j++] = smsc9420_reg_read(pd, i);
314 324
325 // cannot read phy registers if the net device is down
326 if (!phy_dev)
327 return;
328
315 for (i = 0; i <= 31; i++) 329 for (i = 0; i <= 31; i++)
316 data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); 330 data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i);
317} 331}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index e2c33c06190b..8e25ca7080c7 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -907,6 +907,7 @@ static ssize_t cosa_write(struct file *file,
907 current->state = TASK_RUNNING; 907 current->state = TASK_RUNNING;
908 chan->tx_status = 1; 908 chan->tx_status = 1;
909 spin_unlock_irqrestore(&cosa->lock, flags); 909 spin_unlock_irqrestore(&cosa->lock, flags);
910 up(&chan->wsem);
910 return -ERESTARTSYS; 911 return -ERESTARTSYS;
911 } 912 }
912 } 913 }
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index e5f8fc164fd3..b952ebc7a78b 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -609,6 +609,9 @@ int __init check_zero_address(void)
609 dmi_get_system_info(DMI_BIOS_VENDOR), 609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION), 610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION)); 611 dmi_get_system_info(DMI_PRODUCT_VERSION));
612#ifdef CONFIG_DMAR
613 dmar_disabled = 1;
614#endif
612 return 0; 615 return 0;
613 } 616 }
614 break; 617 break;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 0a8f735f6c4a..ab64522aaa64 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -52,7 +52,7 @@
52 */ 52 */
53#undef START_IN_KERNEL_MODE 53#undef START_IN_KERNEL_MODE
54 54
55#define DRV_VER "0.5.17" 55#define DRV_VER "0.5.18"
56 56
57/* 57/*
58 * According to the Atom N270 datasheet, 58 * According to the Atom N270 datasheet,
@@ -61,7 +61,7 @@
61 * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So, 61 * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
62 * assume 89°C is critical temperature. 62 * assume 89°C is critical temperature.
63 */ 63 */
64#define ACERHDF_TEMP_CRIT 89 64#define ACERHDF_TEMP_CRIT 89000
65#define ACERHDF_FAN_OFF 0 65#define ACERHDF_FAN_OFF 0
66#define ACERHDF_FAN_AUTO 1 66#define ACERHDF_FAN_AUTO 1
67 67
@@ -69,7 +69,7 @@
69 * No matter what value the user puts into the fanon variable, turn on the fan 69 * No matter what value the user puts into the fanon variable, turn on the fan
70 * at 80 degree Celsius to prevent hardware damage 70 * at 80 degree Celsius to prevent hardware damage
71 */ 71 */
72#define ACERHDF_MAX_FANON 80 72#define ACERHDF_MAX_FANON 80000
73 73
74/* 74/*
75 * Maximum interval between two temperature checks is 15 seconds, as the die 75 * Maximum interval between two temperature checks is 15 seconds, as the die
@@ -85,8 +85,8 @@ static int kernelmode;
85#endif 85#endif
86 86
87static unsigned int interval = 10; 87static unsigned int interval = 10;
88static unsigned int fanon = 63; 88static unsigned int fanon = 63000;
89static unsigned int fanoff = 58; 89static unsigned int fanoff = 58000;
90static unsigned int verbose; 90static unsigned int verbose;
91static unsigned int fanstate = ACERHDF_FAN_AUTO; 91static unsigned int fanstate = ACERHDF_FAN_AUTO;
92static char force_bios[16]; 92static char force_bios[16];
@@ -171,7 +171,7 @@ static int acerhdf_get_temp(int *temp)
171 if (ec_read(bios_cfg->tempreg, &read_temp)) 171 if (ec_read(bios_cfg->tempreg, &read_temp))
172 return -EINVAL; 172 return -EINVAL;
173 173
174 *temp = read_temp; 174 *temp = read_temp * 1000;
175 175
176 return 0; 176 return 0;
177} 177}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d93108d148fc..a848c7e20aeb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1680,36 +1680,48 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv)
1680 | (__bv1) << 8 | (__bv2) } 1680 | (__bv1) << 8 | (__bv2) }
1681 1681
1682#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \ 1682#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \
1683 __eid1, __eid2, __ev1, __ev2) \ 1683 __eid, __ev1, __ev2) \
1684 { .vendor = (__v), \ 1684 { .vendor = (__v), \
1685 .bios = TPID(__bid1, __bid2), \ 1685 .bios = TPID(__bid1, __bid2), \
1686 .ec = TPID(__eid1, __eid2), \ 1686 .ec = __eid, \
1687 .quirks = (__ev1) << 24 | (__ev2) << 16 \ 1687 .quirks = (__ev1) << 24 | (__ev2) << 16 \
1688 | (__bv1) << 8 | (__bv2) } 1688 | (__bv1) << 8 | (__bv2) }
1689 1689
1690#define TPV_QI0(__id1, __id2, __bv1, __bv2) \ 1690#define TPV_QI0(__id1, __id2, __bv1, __bv2) \
1691 TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2) 1691 TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
1692 1692
1693/* Outdated IBM BIOSes often lack the EC id string */
1693#define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \ 1694#define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
1694 TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \ 1695 TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
1695 __bv1, __bv2, __id1, __id2, __ev1, __ev2) 1696 __bv1, __bv2, TPID(__id1, __id2), \
1697 __ev1, __ev2), \
1698 TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
1699 __bv1, __bv2, TPACPI_MATCH_UNKNOWN, \
1700 __ev1, __ev2)
1696 1701
1702/* Outdated IBM BIOSes often lack the EC id string */
1697#define TPV_QI2(__bid1, __bid2, __bv1, __bv2, \ 1703#define TPV_QI2(__bid1, __bid2, __bv1, __bv2, \
1698 __eid1, __eid2, __ev1, __ev2) \ 1704 __eid1, __eid2, __ev1, __ev2) \
1699 TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \ 1705 TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
1700 __bv1, __bv2, __eid1, __eid2, __ev1, __ev2) 1706 __bv1, __bv2, TPID(__eid1, __eid2), \
1707 __ev1, __ev2), \
1708 TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
1709 __bv1, __bv2, TPACPI_MATCH_UNKNOWN, \
1710 __ev1, __ev2)
1701 1711
1702#define TPV_QL0(__id1, __id2, __bv1, __bv2) \ 1712#define TPV_QL0(__id1, __id2, __bv1, __bv2) \
1703 TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2) 1713 TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2)
1704 1714
1705#define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \ 1715#define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
1706 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2, \ 1716 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2, \
1707 __bv1, __bv2, __id1, __id2, __ev1, __ev2) 1717 __bv1, __bv2, TPID(__id1, __id2), \
1718 __ev1, __ev2)
1708 1719
1709#define TPV_QL2(__bid1, __bid2, __bv1, __bv2, \ 1720#define TPV_QL2(__bid1, __bid2, __bv1, __bv2, \
1710 __eid1, __eid2, __ev1, __ev2) \ 1721 __eid1, __eid2, __ev1, __ev2) \
1711 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2, \ 1722 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2, \
1712 __bv1, __bv2, __eid1, __eid2, __ev1, __ev2) 1723 __bv1, __bv2, TPID(__eid1, __eid2), \
1724 __ev1, __ev2)
1713 1725
1714static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = { 1726static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
1715 /* Numeric models ------------------ */ 1727 /* Numeric models ------------------ */
@@ -6313,7 +6325,7 @@ static int brightness_write(char *buf)
6313 * Doing it this way makes the syscall restartable in case of EINTR 6325 * Doing it this way makes the syscall restartable in case of EINTR
6314 */ 6326 */
6315 rc = brightness_set(level); 6327 rc = brightness_set(level);
6316 return (rc == -EINTR)? ERESTARTSYS : rc; 6328 return (rc == -EINTR)? -ERESTARTSYS : rc;
6317} 6329}
6318 6330
6319static struct ibm_struct brightness_driver_data = { 6331static struct ibm_struct brightness_driver_data = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5fd2da494d08..c968cc31cd86 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -164,8 +164,8 @@ void scsi_remove_host(struct Scsi_Host *shost)
164 return; 164 return;
165 } 165 }
166 spin_unlock_irqrestore(shost->host_lock, flags); 166 spin_unlock_irqrestore(shost->host_lock, flags);
167 mutex_unlock(&shost->scan_mutex);
168 scsi_forget_host(shost); 167 scsi_forget_host(shost);
168 mutex_unlock(&shost->scan_mutex);
169 scsi_proc_host_rm(shost); 169 scsi_proc_host_rm(shost);
170 170
171 spin_lock_irqsave(shost->host_lock, flags); 171 spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0547a7f44d42..47291bcff0d5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -952,16 +952,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
952 return SCSI_SCAN_LUN_PRESENT; 952 return SCSI_SCAN_LUN_PRESENT;
953} 953}
954 954
955static inline void scsi_destroy_sdev(struct scsi_device *sdev)
956{
957 scsi_device_set_state(sdev, SDEV_DEL);
958 if (sdev->host->hostt->slave_destroy)
959 sdev->host->hostt->slave_destroy(sdev);
960 transport_destroy_device(&sdev->sdev_gendev);
961 put_device(&sdev->sdev_dev);
962 put_device(&sdev->sdev_gendev);
963}
964
965#ifdef CONFIG_SCSI_LOGGING 955#ifdef CONFIG_SCSI_LOGGING
966/** 956/**
967 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace 957 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
@@ -1139,7 +1129,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1139 } 1129 }
1140 } 1130 }
1141 } else 1131 } else
1142 scsi_destroy_sdev(sdev); 1132 __scsi_remove_device(sdev);
1143 out: 1133 out:
1144 return res; 1134 return res;
1145} 1135}
@@ -1500,7 +1490,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1500 /* 1490 /*
1501 * the sdev we used didn't appear in the report luns scan 1491 * the sdev we used didn't appear in the report luns scan
1502 */ 1492 */
1503 scsi_destroy_sdev(sdev); 1493 __scsi_remove_device(sdev);
1504 return ret; 1494 return ret;
1505} 1495}
1506 1496
@@ -1710,7 +1700,7 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1710 shost_for_each_device(sdev, shost) { 1700 shost_for_each_device(sdev, shost) {
1711 if (!scsi_host_scan_allowed(shost) || 1701 if (!scsi_host_scan_allowed(shost) ||
1712 scsi_sysfs_add_sdev(sdev) != 0) 1702 scsi_sysfs_add_sdev(sdev) != 0)
1713 scsi_destroy_sdev(sdev); 1703 __scsi_remove_device(sdev);
1714 } 1704 }
1715} 1705}
1716 1706
@@ -1943,7 +1933,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
1943{ 1933{
1944 BUG_ON(sdev->id != sdev->host->this_id); 1934 BUG_ON(sdev->id != sdev->host->this_id);
1945 1935
1946 scsi_destroy_sdev(sdev); 1936 __scsi_remove_device(sdev);
1947} 1937}
1948EXPORT_SYMBOL(scsi_free_host_dev); 1938EXPORT_SYMBOL(scsi_free_host_dev);
1949 1939
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5c7eb63a19d1..392d8db33905 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -854,82 +854,73 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
854 transport_configure_device(&starget->dev); 854 transport_configure_device(&starget->dev);
855 error = device_add(&sdev->sdev_gendev); 855 error = device_add(&sdev->sdev_gendev);
856 if (error) { 856 if (error) {
857 put_device(sdev->sdev_gendev.parent);
858 printk(KERN_INFO "error 1\n"); 857 printk(KERN_INFO "error 1\n");
859 return error; 858 goto out_remove;
860 } 859 }
861 error = device_add(&sdev->sdev_dev); 860 error = device_add(&sdev->sdev_dev);
862 if (error) { 861 if (error) {
863 printk(KERN_INFO "error 2\n"); 862 printk(KERN_INFO "error 2\n");
864 goto clean_device; 863 device_del(&sdev->sdev_gendev);
864 goto out_remove;
865 } 865 }
866 transport_add_device(&sdev->sdev_gendev);
867 sdev->is_visible = 1;
866 868
867 /* create queue files, which may be writable, depending on the host */ 869 /* create queue files, which may be writable, depending on the host */
868 if (sdev->host->hostt->change_queue_depth) 870 if (sdev->host->hostt->change_queue_depth)
869 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); 871 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw);
870 else 872 else
871 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); 873 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
872 if (error) { 874 if (error)
873 __scsi_remove_device(sdev); 875 goto out_remove;
874 goto out; 876
875 }
876 if (sdev->host->hostt->change_queue_type) 877 if (sdev->host->hostt->change_queue_type)
877 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); 878 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
878 else 879 else
879 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); 880 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
880 if (error) { 881 if (error)
881 __scsi_remove_device(sdev); 882 goto out_remove;
882 goto out;
883 }
884 883
885 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 884 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
886 885
887 if (error) 886 if (error)
887 /* we're treating error on bsg register as non-fatal,
888 * so pretend nothing went wrong */
888 sdev_printk(KERN_INFO, sdev, 889 sdev_printk(KERN_INFO, sdev,
889 "Failed to register bsg queue, errno=%d\n", error); 890 "Failed to register bsg queue, errno=%d\n", error);
890 891
891 /* we're treating error on bsg register as non-fatal, so pretend
892 * nothing went wrong */
893 error = 0;
894
895 /* add additional host specific attributes */ 892 /* add additional host specific attributes */
896 if (sdev->host->hostt->sdev_attrs) { 893 if (sdev->host->hostt->sdev_attrs) {
897 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { 894 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
898 error = device_create_file(&sdev->sdev_gendev, 895 error = device_create_file(&sdev->sdev_gendev,
899 sdev->host->hostt->sdev_attrs[i]); 896 sdev->host->hostt->sdev_attrs[i]);
900 if (error) { 897 if (error)
901 __scsi_remove_device(sdev); 898 goto out_remove;
902 goto out;
903 }
904 } 899 }
905 } 900 }
906 901
907 transport_add_device(&sdev->sdev_gendev); 902 return 0;
908 out:
909 return error;
910
911 clean_device:
912 scsi_device_set_state(sdev, SDEV_CANCEL);
913
914 device_del(&sdev->sdev_gendev);
915 transport_destroy_device(&sdev->sdev_gendev);
916 put_device(&sdev->sdev_dev);
917 put_device(&sdev->sdev_gendev);
918 903
904 out_remove:
905 __scsi_remove_device(sdev);
919 return error; 906 return error;
907
920} 908}
921 909
922void __scsi_remove_device(struct scsi_device *sdev) 910void __scsi_remove_device(struct scsi_device *sdev)
923{ 911{
924 struct device *dev = &sdev->sdev_gendev; 912 struct device *dev = &sdev->sdev_gendev;
925 913
926 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 914 if (sdev->is_visible) {
927 return; 915 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
916 return;
928 917
929 bsg_unregister_queue(sdev->request_queue); 918 bsg_unregister_queue(sdev->request_queue);
930 device_unregister(&sdev->sdev_dev); 919 device_unregister(&sdev->sdev_dev);
931 transport_remove_device(dev); 920 transport_remove_device(dev);
932 device_del(dev); 921 device_del(dev);
922 } else
923 put_device(&sdev->sdev_dev);
933 scsi_device_set_state(sdev, SDEV_DEL); 924 scsi_device_set_state(sdev, SDEV_DEL);
934 if (sdev->host->hostt->slave_destroy) 925 if (sdev->host->hostt->slave_destroy)
935 sdev->host->hostt->slave_destroy(sdev); 926 sdev->host->hostt->slave_destroy(sdev);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 88da97745710..84be62149c6c 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -418,7 +418,7 @@ error:
418 __func__, virt, phys, be32_to_cpu(sdt->ref_tag), 418 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
419 be16_to_cpu(sdt->app_tag)); 419 be16_to_cpu(sdt->app_tag));
420 420
421 return -EIO; 421 return -EILSEQ;
422} 422}
423 423
424/* 424/*
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index a2d4a19550ab..ed7d958b0a01 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -53,20 +53,21 @@ void sunserial_unregister_minors(struct uart_driver *drv, int count)
53EXPORT_SYMBOL(sunserial_unregister_minors); 53EXPORT_SYMBOL(sunserial_unregister_minors);
54 54
55int sunserial_console_match(struct console *con, struct device_node *dp, 55int sunserial_console_match(struct console *con, struct device_node *dp,
56 struct uart_driver *drv, int line) 56 struct uart_driver *drv, int line, bool ignore_line)
57{ 57{
58 int off;
59
60 if (!con || of_console_device != dp) 58 if (!con || of_console_device != dp)
61 return 0; 59 return 0;
62 60
63 off = 0; 61 if (!ignore_line) {
64 if (of_console_options && 62 int off = 0;
65 *of_console_options == 'b')
66 off = 1;
67 63
68 if ((line & 1) != off) 64 if (of_console_options &&
69 return 0; 65 *of_console_options == 'b')
66 off = 1;
67
68 if ((line & 1) != off)
69 return 0;
70 }
70 71
71 con->index = line; 72 con->index = line;
72 drv->cons = con; 73 drv->cons = con;
@@ -76,23 +77,24 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
76} 77}
77EXPORT_SYMBOL(sunserial_console_match); 78EXPORT_SYMBOL(sunserial_console_match);
78 79
79void 80void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
80sunserial_console_termios(struct console *con)
81{ 81{
82 struct device_node *dp; 82 const char *mode, *s;
83 const char *od, *mode, *s;
84 char mode_prop[] = "ttyX-mode"; 83 char mode_prop[] = "ttyX-mode";
85 int baud, bits, stop, cflag; 84 int baud, bits, stop, cflag;
86 char parity; 85 char parity;
87 86
88 dp = of_find_node_by_path("/options"); 87 if (!strcmp(uart_dp->name, "rsc") ||
89 od = of_get_property(dp, "output-device", NULL); 88 !strcmp(uart_dp->name, "rsc-console") ||
90 if (!strcmp(od, "rsc")) { 89 !strcmp(uart_dp->name, "rsc-control")) {
91 mode = of_get_property(of_console_device, 90 mode = of_get_property(uart_dp,
92 "ssp-console-modes", NULL); 91 "ssp-console-modes", NULL);
93 if (!mode) 92 if (!mode)
94 mode = "115200,8,n,1,-"; 93 mode = "115200,8,n,1,-";
94 } else if (!strcmp(uart_dp->name, "lom-console")) {
95 mode = "9600,8,n,1,-";
95 } else { 96 } else {
97 struct device_node *dp;
96 char c; 98 char c;
97 99
98 c = 'a'; 100 c = 'a';
@@ -101,6 +103,7 @@ sunserial_console_termios(struct console *con)
101 103
102 mode_prop[3] = c; 104 mode_prop[3] = c;
103 105
106 dp = of_find_node_by_path("/options");
104 mode = of_get_property(dp, mode_prop, NULL); 107 mode = of_get_property(dp, mode_prop, NULL);
105 if (!mode) 108 if (!mode)
106 mode = "9600,8,n,1,-"; 109 mode = "9600,8,n,1,-";
diff --git a/drivers/serial/suncore.h b/drivers/serial/suncore.h
index 042668aa602e..db2057936c31 100644
--- a/drivers/serial/suncore.h
+++ b/drivers/serial/suncore.h
@@ -26,7 +26,8 @@ extern int sunserial_register_minors(struct uart_driver *, int);
26extern void sunserial_unregister_minors(struct uart_driver *, int); 26extern void sunserial_unregister_minors(struct uart_driver *, int);
27 27
28extern int sunserial_console_match(struct console *, struct device_node *, 28extern int sunserial_console_match(struct console *, struct device_node *,
29 struct uart_driver *, int); 29 struct uart_driver *, int, bool);
30extern void sunserial_console_termios(struct console *); 30extern void sunserial_console_termios(struct console *,
31 struct device_node *);
31 32
32#endif /* !(_SERIAL_SUN_H) */ 33#endif /* !(_SERIAL_SUN_H) */
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index d548652dee50..d14cca7fb88d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -566,7 +566,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
566 goto out_free_con_read_page; 566 goto out_free_con_read_page;
567 567
568 sunserial_console_match(&sunhv_console, op->node, 568 sunserial_console_match(&sunhv_console, op->node,
569 &sunhv_reg, port->line); 569 &sunhv_reg, port->line, false);
570 570
571 err = uart_add_one_port(&sunhv_reg, port); 571 err = uart_add_one_port(&sunhv_reg, port);
572 if (err) 572 if (err)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index d1ad34128635..d514e28d0755 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options)
883 printk("Console: ttyS%d (SAB82532)\n", 883 printk("Console: ttyS%d (SAB82532)\n",
884 (sunsab_reg.minor - 64) + con->index); 884 (sunsab_reg.minor - 64) + con->index);
885 885
886 sunserial_console_termios(con); 886 sunserial_console_termios(con, to_of_device(up->port.dev)->node);
887 887
888 switch (con->cflag & CBAUD) { 888 switch (con->cflag & CBAUD) {
889 case B150: baud = 150; break; 889 case B150: baud = 150; break;
@@ -1027,10 +1027,12 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
1027 goto out1; 1027 goto out1;
1028 1028
1029 sunserial_console_match(SUNSAB_CONSOLE(), op->node, 1029 sunserial_console_match(SUNSAB_CONSOLE(), op->node,
1030 &sunsab_reg, up[0].port.line); 1030 &sunsab_reg, up[0].port.line,
1031 false);
1031 1032
1032 sunserial_console_match(SUNSAB_CONSOLE(), op->node, 1033 sunserial_console_match(SUNSAB_CONSOLE(), op->node,
1033 &sunsab_reg, up[1].port.line); 1034 &sunsab_reg, up[1].port.line,
1035 false);
1034 1036
1035 err = uart_add_one_port(&sunsab_reg, &up[0].port); 1037 err = uart_add_one_port(&sunsab_reg, &up[0].port);
1036 if (err) 1038 if (err)
@@ -1116,7 +1118,6 @@ static int __init sunsab_init(void)
1116 if (!sunsab_ports) 1118 if (!sunsab_ports)
1117 return -ENOMEM; 1119 return -ENOMEM;
1118 1120
1119 sunsab_reg.cons = SUNSAB_CONSOLE();
1120 err = sunserial_register_minors(&sunsab_reg, num_channels); 1121 err = sunserial_register_minors(&sunsab_reg, num_channels);
1121 if (err) { 1122 if (err) {
1122 kfree(sunsab_ports); 1123 kfree(sunsab_ports);
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 68d262b15749..170d3d68c8f0 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1329,11 +1329,9 @@ static void sunsu_console_write(struct console *co, const char *s,
1329 */ 1329 */
1330static int __init sunsu_console_setup(struct console *co, char *options) 1330static int __init sunsu_console_setup(struct console *co, char *options)
1331{ 1331{
1332 static struct ktermios dummy;
1333 struct ktermios termios;
1332 struct uart_port *port; 1334 struct uart_port *port;
1333 int baud = 9600;
1334 int bits = 8;
1335 int parity = 'n';
1336 int flow = 'n';
1337 1335
1338 printk("Console: ttyS%d (SU)\n", 1336 printk("Console: ttyS%d (SU)\n",
1339 (sunsu_reg.minor - 64) + co->index); 1337 (sunsu_reg.minor - 64) + co->index);
@@ -1352,10 +1350,15 @@ static int __init sunsu_console_setup(struct console *co, char *options)
1352 */ 1350 */
1353 spin_lock_init(&port->lock); 1351 spin_lock_init(&port->lock);
1354 1352
1355 if (options) 1353 /* Get firmware console settings. */
1356 uart_parse_options(options, &baud, &parity, &bits, &flow); 1354 sunserial_console_termios(co, to_of_device(port->dev)->node);
1357 1355
1358 return uart_set_options(port, co, baud, parity, bits, flow); 1356 memset(&termios, 0, sizeof(struct ktermios));
1357 termios.c_cflag = co->cflag;
1358 port->mctrl |= TIOCM_DTR;
1359 port->ops->set_termios(port, &termios, &dummy);
1360
1361 return 0;
1359} 1362}
1360 1363
1361static struct console sunsu_console = { 1364static struct console sunsu_console = {
@@ -1409,6 +1412,7 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
1409 struct uart_sunsu_port *up; 1412 struct uart_sunsu_port *up;
1410 struct resource *rp; 1413 struct resource *rp;
1411 enum su_type type; 1414 enum su_type type;
1415 bool ignore_line;
1412 int err; 1416 int err;
1413 1417
1414 type = su_get_type(dp); 1418 type = su_get_type(dp);
@@ -1467,8 +1471,14 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
1467 1471
1468 up->port.ops = &sunsu_pops; 1472 up->port.ops = &sunsu_pops;
1469 1473
1474 ignore_line = false;
1475 if (!strcmp(dp->name, "rsc-console") ||
1476 !strcmp(dp->name, "lom-console"))
1477 ignore_line = true;
1478
1470 sunserial_console_match(SUNSU_CONSOLE(), dp, 1479 sunserial_console_match(SUNSU_CONSOLE(), dp,
1471 &sunsu_reg, up->port.line); 1480 &sunsu_reg, up->port.line,
1481 ignore_line);
1472 err = uart_add_one_port(&sunsu_reg, &up->port); 1482 err = uart_add_one_port(&sunsu_reg, &up->port);
1473 if (err) 1483 if (err)
1474 goto out_unmap; 1484 goto out_unmap;
@@ -1517,6 +1527,10 @@ static const struct of_device_id su_match[] = {
1517 .name = "serial", 1527 .name = "serial",
1518 .compatible = "su", 1528 .compatible = "su",
1519 }, 1529 },
1530 {
1531 .type = "serial",
1532 .compatible = "su",
1533 },
1520 {}, 1534 {},
1521}; 1535};
1522MODULE_DEVICE_TABLE(of, su_match); 1536MODULE_DEVICE_TABLE(of, su_match);
@@ -1548,6 +1562,12 @@ static int __init sunsu_init(void)
1548 num_uart++; 1562 num_uart++;
1549 } 1563 }
1550 } 1564 }
1565 for_each_node_by_type(dp, "serial") {
1566 if (of_device_is_compatible(dp, "su")) {
1567 if (su_get_type(dp) == SU_PORT_PORT)
1568 num_uart++;
1569 }
1570 }
1551 1571
1552 if (num_uart) { 1572 if (num_uart) {
1553 err = sunserial_register_minors(&sunsu_reg, num_uart); 1573 err = sunserial_register_minors(&sunsu_reg, num_uart);
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index ef693ae22e7f..2c7a66af4f52 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1180,7 +1180,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1180 (sunzilog_reg.minor - 64) + con->index, con->index); 1180 (sunzilog_reg.minor - 64) + con->index, con->index);
1181 1181
1182 /* Get firmware console settings. */ 1182 /* Get firmware console settings. */
1183 sunserial_console_termios(con); 1183 sunserial_console_termios(con, to_of_device(up->port.dev)->node);
1184 1184
1185 /* Firmware console speed is limited to 150-->38400 baud so 1185 /* Firmware console speed is limited to 150-->38400 baud so
1186 * this hackish cflag thing is OK. 1186 * this hackish cflag thing is OK.
@@ -1416,7 +1416,8 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1416 1416
1417 if (!keyboard_mouse) { 1417 if (!keyboard_mouse) {
1418 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, 1418 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
1419 &sunzilog_reg, up[0].port.line)) 1419 &sunzilog_reg, up[0].port.line,
1420 false))
1420 up->flags |= SUNZILOG_FLAG_IS_CONS; 1421 up->flags |= SUNZILOG_FLAG_IS_CONS;
1421 err = uart_add_one_port(&sunzilog_reg, &up[0].port); 1422 err = uart_add_one_port(&sunzilog_reg, &up[0].port);
1422 if (err) { 1423 if (err) {
@@ -1425,7 +1426,8 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1425 return err; 1426 return err;
1426 } 1427 }
1427 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, 1428 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
1428 &sunzilog_reg, up[1].port.line)) 1429 &sunzilog_reg, up[1].port.line,
1430 false))
1429 up->flags |= SUNZILOG_FLAG_IS_CONS; 1431 up->flags |= SUNZILOG_FLAG_IS_CONS;
1430 err = uart_add_one_port(&sunzilog_reg, &up[1].port); 1432 err = uart_add_one_port(&sunzilog_reg, &up[1].port);
1431 if (err) { 1433 if (err) {
diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/go7007/s2250-board.c
index 8c85a9c3665a..f4a6541c3e60 100644
--- a/drivers/staging/go7007/s2250-board.c
+++ b/drivers/staging/go7007/s2250-board.c
@@ -261,7 +261,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
261 261
262 memset(buf, 0xcd, 6); 262 memset(buf, 0xcd, 6);
263 usb = go->hpi_context; 263 usb = go->hpi_context;
264 if (down_interruptible(&usb->i2c_lock) != 0) { 264 if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
265 printk(KERN_INFO "i2c lock failed\n"); 265 printk(KERN_INFO "i2c lock failed\n");
266 kfree(buf); 266 kfree(buf);
267 return -EINTR; 267 return -EINTR;
@@ -270,7 +270,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
270 kfree(buf); 270 kfree(buf);
271 return -EFAULT; 271 return -EFAULT;
272 } 272 }
273 up(&usb->i2c_lock); 273 mutex_unlock(&usb->i2c_lock);
274 274
275 *val = (buf[0] << 8) | buf[1]; 275 *val = (buf[0] << 8) | buf[1];
276 kfree(buf); 276 kfree(buf);
diff --git a/drivers/staging/go7007/s2250-loader.h b/drivers/staging/go7007/s2250-loader.h
new file mode 100644
index 000000000000..b7c301af16cc
--- /dev/null
+++ b/drivers/staging/go7007/s2250-loader.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2005-2006 Micronas USA Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */
17
18#ifndef _S2250_LOADER_H_
19#define _S2250_LOADER_H_
20
21extern int s2250loader_init(void);
22extern void s2250loader_cleanup(void);
23
24#endif
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 42230e62a222..31a58e508924 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -170,7 +170,7 @@ static u32 cvm_oct_get_link(struct net_device *dev)
170 return ret; 170 return ret;
171} 171}
172 172
173struct const ethtool_ops cvm_oct_ethtool_ops = { 173const struct ethtool_ops cvm_oct_ethtool_ops = {
174 .get_drvinfo = cvm_oct_get_drvinfo, 174 .get_drvinfo = cvm_oct_get_drvinfo,
175 .get_settings = cvm_oct_get_settings, 175 .get_settings = cvm_oct_get_settings,
176 .set_settings = cvm_oct_set_settings, 176 .set_settings = cvm_oct_set_settings,
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 66190b0cb68f..00dc0f4bad19 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -317,6 +317,6 @@ void cvm_oct_spi_uninit(struct net_device *dev)
317 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0); 317 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
318 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0); 318 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
319 } 319 }
320 free_irq(8 + 46, &number_spi_ports); 320 free_irq(OCTEON_IRQ_RML, &number_spi_ports);
321 } 321 }
322} 322}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index b8479517dce2..492c5029992d 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -111,6 +111,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
111 "\tallows packets to be sent without lock contention in the packet\n" 111 "\tallows packets to be sent without lock contention in the packet\n"
112 "\tscheduler resulting in some cases in improved throughput.\n"); 112 "\tscheduler resulting in some cases in improved throughput.\n");
113 113
114
115/*
116 * The offset from mac_addr_base that should be used for the next port
117 * that is configured. By convention, if any mgmt ports exist on the
118 * chip, they get the first mac addresses, The ports controlled by
119 * this driver are numbered sequencially following any mgmt addresses
120 * that may exist.
121 */
122static unsigned int cvm_oct_mac_addr_offset;
123
114/** 124/**
115 * Periodic timer to check auto negotiation 125 * Periodic timer to check auto negotiation
116 */ 126 */
@@ -474,16 +484,30 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
474 */ 484 */
475int cvm_oct_common_init(struct net_device *dev) 485int cvm_oct_common_init(struct net_device *dev)
476{ 486{
477 static int count;
478 char mac[8] = { 0x00, 0x00,
479 octeon_bootinfo->mac_addr_base[0],
480 octeon_bootinfo->mac_addr_base[1],
481 octeon_bootinfo->mac_addr_base[2],
482 octeon_bootinfo->mac_addr_base[3],
483 octeon_bootinfo->mac_addr_base[4],
484 octeon_bootinfo->mac_addr_base[5] + count
485 };
486 struct octeon_ethernet *priv = netdev_priv(dev); 487 struct octeon_ethernet *priv = netdev_priv(dev);
488 struct sockaddr sa;
489 u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
490 ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
491 ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
492 ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
493 ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
494 (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
495
496 mac += cvm_oct_mac_addr_offset;
497 sa.sa_data[0] = (mac >> 40) & 0xff;
498 sa.sa_data[1] = (mac >> 32) & 0xff;
499 sa.sa_data[2] = (mac >> 24) & 0xff;
500 sa.sa_data[3] = (mac >> 16) & 0xff;
501 sa.sa_data[4] = (mac >> 8) & 0xff;
502 sa.sa_data[5] = mac & 0xff;
503
504 if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
505 printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
506 " %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
507 sa.sa_data[0] & 0xff, sa.sa_data[1] & 0xff,
508 sa.sa_data[2] & 0xff, sa.sa_data[3] & 0xff,
509 sa.sa_data[4] & 0xff, sa.sa_data[5] & 0xff);
510 cvm_oct_mac_addr_offset++;
487 511
488 /* 512 /*
489 * Force the interface to use the POW send if always_use_pow 513 * Force the interface to use the POW send if always_use_pow
@@ -496,14 +520,12 @@ int cvm_oct_common_init(struct net_device *dev)
496 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) 520 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
497 dev->features |= NETIF_F_IP_CSUM; 521 dev->features |= NETIF_F_IP_CSUM;
498 522
499 count++;
500
501 /* We do our own locking, Linux doesn't need to */ 523 /* We do our own locking, Linux doesn't need to */
502 dev->features |= NETIF_F_LLTX; 524 dev->features |= NETIF_F_LLTX;
503 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 525 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
504 526
505 cvm_oct_mdio_setup_device(dev); 527 cvm_oct_mdio_setup_device(dev);
506 dev->netdev_ops->ndo_set_mac_address(dev, mac); 528 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
507 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); 529 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
508 530
509 /* 531 /*
@@ -620,6 +642,13 @@ static int __init cvm_oct_init_module(void)
620 642
621 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); 643 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
622 644
645 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
646 cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
647 else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
648 cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
649 else
650 cvm_oct_mac_addr_offset = 0;
651
623 cvm_oct_proc_initialize(); 652 cvm_oct_proc_initialize();
624 cvm_oct_rx_initialize(); 653 cvm_oct_rx_initialize();
625 cvm_oct_configure_common_hw(); 654 cvm_oct_configure_common_hw();
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index f24d04132eda..4d227b152001 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -317,7 +317,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
317 317
318static struct platform_driver platform_wdt_driver = { 318static struct platform_driver platform_wdt_driver = {
319 .driver = { 319 .driver = {
320 .name = "watchdog", 320 .name = "pnx4008-watchdog",
321 .owner = THIS_MODULE, 321 .owner = THIS_MODULE,
322 }, 322 },
323 .probe = pnx4008_wdt_probe, 323 .probe = pnx4008_wdt_probe,
@@ -352,4 +352,4 @@ MODULE_PARM_DESC(nowayout,
352 352
353MODULE_LICENSE("GPL"); 353MODULE_LICENSE("GPL");
354MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 354MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
355MODULE_ALIAS("platform:watchdog"); 355MODULE_ALIAS("platform:pnx4008-watchdog");
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 51c94e26a346..bcc5357a9069 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
343 343
344 BUG_ON(!vcookie->fscache); 344 BUG_ON(!vcookie->fscache);
345 345
346 if (PageFsCache(page)) { 346 return fscache_maybe_release_page(vnode->cache, page, gfp);
347 if (fscache_check_page_write(vcookie->fscache, page)) {
348 if (!(gfp & __GFP_WAIT))
349 return 0;
350 fscache_wait_on_page_write(vcookie->fscache, page);
351 }
352
353 fscache_uncache_page(vcookie->fscache, page);
354 ClearPageFsCache(page);
355 }
356
357 return 1;
358} 347}
359 348
360void __v9fs_fscache_invalidate_page(struct page *page) 349void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
368 fscache_wait_on_page_write(vcookie->fscache, page); 357 fscache_wait_on_page_write(vcookie->fscache, page);
369 BUG_ON(!PageLocked(page)); 358 BUG_ON(!PageLocked(page));
370 fscache_uncache_page(vcookie->fscache, page); 359 fscache_uncache_page(vcookie->fscache, page);
371 ClearPageFsCache(page);
372 } 360 }
373} 361}
374 362
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 681c2a7b013f..39b301662f22 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
316 fscache_wait_on_page_write(vnode->cache, page); 316 fscache_wait_on_page_write(vnode->cache, page);
317 fscache_uncache_page(vnode->cache, page); 317 fscache_uncache_page(vnode->cache, page);
318 ClearPageFsCache(page);
319 } 318 }
320#endif 319#endif
321 320
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
349 /* deny if page is being written to the cache and the caller hasn't 348 /* deny if page is being written to the cache and the caller hasn't
350 * elected to wait */ 349 * elected to wait */
351#ifdef CONFIG_AFS_FSCACHE 350#ifdef CONFIG_AFS_FSCACHE
352 if (PageFsCache(page)) { 351 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
353 if (fscache_check_page_write(vnode->cache, page)) { 352 _leave(" = F [cache busy]");
354 if (!(gfp_flags & __GFP_WAIT)) { 353 return 0;
355 _leave(" = F [cache busy]");
356 return 0;
357 }
358 fscache_wait_on_page_write(vnode->cache, page);
359 }
360
361 fscache_uncache_page(vnode->cache, page);
362 ClearPageFsCache(page);
363 } 354 }
364#endif 355#endif
365 356
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 431accd475a7..27089311fbea 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -114,8 +114,9 @@ nomem_lookup_data:
114 114
115/* 115/*
116 * attempt to look up the nominated node in this cache 116 * attempt to look up the nominated node in this cache
117 * - return -ETIMEDOUT to be scheduled again
117 */ 118 */
118static void cachefiles_lookup_object(struct fscache_object *_object) 119static int cachefiles_lookup_object(struct fscache_object *_object)
119{ 120{
120 struct cachefiles_lookup_data *lookup_data; 121 struct cachefiles_lookup_data *lookup_data;
121 struct cachefiles_object *parent, *object; 122 struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
145 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) 146 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
146 cachefiles_attr_changed(&object->fscache); 147 cachefiles_attr_changed(&object->fscache);
147 148
148 if (ret < 0) { 149 if (ret < 0 && ret != -ETIMEDOUT) {
149 printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n", 150 if (ret != -ENOBUFS)
150 ret); 151 printk(KERN_WARNING
152 "CacheFiles: Lookup failed error %d\n", ret);
151 fscache_object_lookup_error(&object->fscache); 153 fscache_object_lookup_error(&object->fscache);
152 } 154 }
153 155
154 _leave(" [%d]", ret); 156 _leave(" [%d]", ret);
157 return ret;
155} 158}
156 159
157/* 160/*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
331 } 334 }
332 335
333 cache = object->fscache.cache; 336 cache = object->fscache.cache;
337 fscache_object_destroy(&object->fscache);
334 kmem_cache_free(cachefiles_object_jar, object); 338 kmem_cache_free(cachefiles_object_jar, object);
335 fscache_object_destroyed(cache); 339 fscache_object_destroyed(cache);
336 } 340 }
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
403 if (oi_size == ni_size) 407 if (oi_size == ni_size)
404 return 0; 408 return 0;
405 409
406 newattrs.ia_size = ni_size;
407 newattrs.ia_valid = ATTR_SIZE;
408
409 cachefiles_begin_secure(cache, &saved_cred); 410 cachefiles_begin_secure(cache, &saved_cred);
410 mutex_lock(&object->backer->d_inode->i_mutex); 411 mutex_lock(&object->backer->d_inode->i_mutex);
412
413 /* if there's an extension to a partial page at the end of the backing
414 * file, we need to discard the partial page so that we pick up new
415 * data after it */
416 if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
417 _debug("discard tail %llx", oi_size);
418 newattrs.ia_valid = ATTR_SIZE;
419 newattrs.ia_size = oi_size & PAGE_MASK;
420 ret = notify_change(object->backer, &newattrs);
421 if (ret < 0)
422 goto truncate_failed;
423 }
424
425 newattrs.ia_valid = ATTR_SIZE;
426 newattrs.ia_size = ni_size;
411 ret = notify_change(object->backer, &newattrs); 427 ret = notify_change(object->backer, &newattrs);
428
429truncate_failed:
412 mutex_unlock(&object->backer->d_inode->i_mutex); 430 mutex_unlock(&object->backer->d_inode->i_mutex);
413 cachefiles_end_secure(cache, saved_cred); 431 cachefiles_end_secure(cache, saved_cred);
414 432
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ce818ae39ea..14ac4806e291 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -21,17 +21,81 @@
21#include <linux/security.h> 21#include <linux/security.h>
22#include "internal.h" 22#include "internal.h"
23 23
24static int cachefiles_wait_bit(void *flags) 24#define CACHEFILES_KEYBUF_SIZE 512
25
26/*
27 * dump debugging info about an object
28 */
29static noinline
30void __cachefiles_printk_object(struct cachefiles_object *object,
31 const char *prefix,
32 u8 *keybuf)
25{ 33{
26 schedule(); 34 struct fscache_cookie *cookie;
27 return 0; 35 unsigned keylen, loop;
36
37 printk(KERN_ERR "%sobject: OBJ%x\n",
38 prefix, object->fscache.debug_id);
39 printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
40 prefix, fscache_object_states[object->fscache.state],
41 object->fscache.flags, object->fscache.work.flags,
42 object->fscache.events,
43 object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
44 printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
45 prefix, object->fscache.n_ops, object->fscache.n_in_progress,
46 object->fscache.n_exclusive);
47 printk(KERN_ERR "%sparent=%p\n",
48 prefix, object->fscache.parent);
49
50 spin_lock(&object->fscache.lock);
51 cookie = object->fscache.cookie;
52 if (cookie) {
53 printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
54 prefix,
55 object->fscache.cookie,
56 object->fscache.cookie->parent,
57 object->fscache.cookie->netfs_data,
58 object->fscache.cookie->flags);
59 if (keybuf)
60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
61 CACHEFILES_KEYBUF_SIZE);
62 else
63 keylen = 0;
64 } else {
65 printk(KERN_ERR "%scookie=NULL\n", prefix);
66 keylen = 0;
67 }
68 spin_unlock(&object->fscache.lock);
69
70 if (keylen) {
71 printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
72 for (loop = 0; loop < keylen; loop++)
73 printk("%02x", keybuf[loop]);
74 printk("'\n");
75 }
76}
77
78/*
79 * dump debugging info about a pair of objects
80 */
81static noinline void cachefiles_printk_object(struct cachefiles_object *object,
82 struct cachefiles_object *xobject)
83{
84 u8 *keybuf;
85
86 keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
87 if (object)
88 __cachefiles_printk_object(object, "", keybuf);
89 if (xobject)
90 __cachefiles_printk_object(xobject, "x", keybuf);
91 kfree(keybuf);
28} 92}
29 93
30/* 94/*
31 * record the fact that an object is now active 95 * record the fact that an object is now active
32 */ 96 */
33static void cachefiles_mark_object_active(struct cachefiles_cache *cache, 97static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
34 struct cachefiles_object *object) 98 struct cachefiles_object *object)
35{ 99{
36 struct cachefiles_object *xobject; 100 struct cachefiles_object *xobject;
37 struct rb_node **_p, *_parent = NULL; 101 struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
42try_again: 106try_again:
43 write_lock(&cache->active_lock); 107 write_lock(&cache->active_lock);
44 108
45 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) 109 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
110 printk(KERN_ERR "CacheFiles: Error: Object already active\n");
111 cachefiles_printk_object(object, NULL);
46 BUG(); 112 BUG();
113 }
47 114
48 dentry = object->dentry; 115 dentry = object->dentry;
49 _p = &cache->active_nodes.rb_node; 116 _p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
66 rb_insert_color(&object->active_node, &cache->active_nodes); 133 rb_insert_color(&object->active_node, &cache->active_nodes);
67 134
68 write_unlock(&cache->active_lock); 135 write_unlock(&cache->active_lock);
69 _leave(""); 136 _leave(" = 0");
70 return; 137 return 0;
71 138
72 /* an old object from a previous incarnation is hogging the slot - we 139 /* an old object from a previous incarnation is hogging the slot - we
73 * need to wait for it to be destroyed */ 140 * need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
76 printk(KERN_ERR "\n"); 143 printk(KERN_ERR "\n");
77 printk(KERN_ERR "CacheFiles: Error:" 144 printk(KERN_ERR "CacheFiles: Error:"
78 " Unexpected object collision\n"); 145 " Unexpected object collision\n");
79 printk(KERN_ERR "xobject: OBJ%x\n", 146 cachefiles_printk_object(object, xobject);
80 xobject->fscache.debug_id);
81 printk(KERN_ERR "xobjstate=%s\n",
82 fscache_object_states[xobject->fscache.state]);
83 printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
84 printk(KERN_ERR "xobjevent=%lx [%lx]\n",
85 xobject->fscache.events, xobject->fscache.event_mask);
86 printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
87 xobject->fscache.n_ops, xobject->fscache.n_in_progress,
88 xobject->fscache.n_exclusive);
89 printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
90 xobject->fscache.cookie,
91 xobject->fscache.cookie->parent,
92 xobject->fscache.cookie->netfs_data,
93 xobject->fscache.cookie->flags);
94 printk(KERN_ERR "xparent=%p\n",
95 xobject->fscache.parent);
96 printk(KERN_ERR "object: OBJ%x\n",
97 object->fscache.debug_id);
98 printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
99 object->fscache.cookie,
100 object->fscache.cookie->parent,
101 object->fscache.cookie->netfs_data,
102 object->fscache.cookie->flags);
103 printk(KERN_ERR "parent=%p\n",
104 object->fscache.parent);
105 BUG(); 147 BUG();
106 } 148 }
107 atomic_inc(&xobject->usage); 149 atomic_inc(&xobject->usage);
108 write_unlock(&cache->active_lock); 150 write_unlock(&cache->active_lock);
109 151
110 _debug(">>> wait"); 152 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
111 wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE, 153 wait_queue_head_t *wq;
112 cachefiles_wait_bit, TASK_UNINTERRUPTIBLE); 154
113 _debug("<<< waited"); 155 signed long timeout = 60 * HZ;
156 wait_queue_t wait;
157 bool requeue;
158
159 /* if the object we're waiting for is queued for processing,
160 * then just put ourselves on the queue behind it */
161 if (slow_work_is_queued(&xobject->fscache.work)) {
162 _debug("queue OBJ%x behind OBJ%x immediately",
163 object->fscache.debug_id,
164 xobject->fscache.debug_id);
165 goto requeue;
166 }
167
168 /* otherwise we sleep until either the object we're waiting for
169 * is done, or the slow-work facility wants the thread back to
170 * do other work */
171 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
172 init_wait(&wait);
173 requeue = false;
174 do {
175 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
176 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
177 break;
178 requeue = slow_work_sleep_till_thread_needed(
179 &object->fscache.work, &timeout);
180 } while (timeout > 0 && !requeue);
181 finish_wait(wq, &wait);
182
183 if (requeue &&
184 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
185 _debug("queue OBJ%x behind OBJ%x after wait",
186 object->fscache.debug_id,
187 xobject->fscache.debug_id);
188 goto requeue;
189 }
190
191 if (timeout <= 0) {
192 printk(KERN_ERR "\n");
193 printk(KERN_ERR "CacheFiles: Error: Overlong"
194 " wait for old active object to go away\n");
195 cachefiles_printk_object(object, xobject);
196 goto requeue;
197 }
198 }
199
200 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
114 201
115 cache->cache.ops->put_object(&xobject->fscache); 202 cache->cache.ops->put_object(&xobject->fscache);
116 goto try_again; 203 goto try_again;
204
205requeue:
206 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
207 cache->cache.ops->put_object(&xobject->fscache);
208 _leave(" = -ETIMEDOUT");
209 return -ETIMEDOUT;
117} 210}
118 211
119/* 212/*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
254 347
255 dir = dget_parent(object->dentry); 348 dir = dget_parent(object->dentry);
256 349
257 mutex_lock(&dir->d_inode->i_mutex); 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
258 ret = cachefiles_bury_object(cache, dir, object->dentry); 351 ret = cachefiles_bury_object(cache, dir, object->dentry);
259 352
260 dput(dir); 353 dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
307 /* search the current directory for the element name */ 400 /* search the current directory for the element name */
308 _debug("lookup '%s'", name); 401 _debug("lookup '%s'", name);
309 402
310 mutex_lock(&dir->d_inode->i_mutex); 403 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
311 404
312 start = jiffies; 405 start = jiffies;
313 next = lookup_one_len(name, dir, nlen); 406 next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
418 } 511 }
419 512
420 /* note that we're now using this object */ 513 /* note that we're now using this object */
421 cachefiles_mark_object_active(cache, object); 514 ret = cachefiles_mark_object_active(cache, object);
422 515
423 mutex_unlock(&dir->d_inode->i_mutex); 516 mutex_unlock(&dir->d_inode->i_mutex);
424 dput(dir); 517 dput(dir);
425 dir = NULL; 518 dir = NULL;
426 519
520 if (ret == -ETIMEDOUT)
521 goto mark_active_timed_out;
522
427 _debug("=== OBTAINED_OBJECT ==="); 523 _debug("=== OBTAINED_OBJECT ===");
428 524
429 if (object->new) { 525 if (object->new) {
@@ -467,6 +563,10 @@ create_error:
467 cachefiles_io_error(cache, "Create/mkdir failed"); 563 cachefiles_io_error(cache, "Create/mkdir failed");
468 goto error; 564 goto error;
469 565
566mark_active_timed_out:
567 _debug("mark active timed out");
568 goto release_dentry;
569
470check_error: 570check_error:
471 _debug("check error %d", ret); 571 _debug("check error %d", ret);
472 write_lock(&cache->active_lock); 572 write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
474 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 574 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
475 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 575 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
476 write_unlock(&cache->active_lock); 576 write_unlock(&cache->active_lock);
477 577release_dentry:
478 dput(object->dentry); 578 dput(object->dentry);
479 object->dentry = NULL; 579 object->dentry = NULL;
480 goto error_out; 580 goto error_out;
@@ -495,9 +595,6 @@ error:
495error_out2: 595error_out2:
496 dput(dir); 596 dput(dir);
497error_out: 597error_out:
498 if (ret == -ENOSPC)
499 ret = -ENOBUFS;
500
501 _leave(" = error %d", -ret); 598 _leave(" = error %d", -ret);
502 return ret; 599 return ret;
503} 600}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a69787e7dd96..1d8332563863 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -40,8 +40,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
40 40
41 _debug("--- monitor %p %lx ---", page, page->flags); 41 _debug("--- monitor %p %lx ---", page, page->flags);
42 42
43 if (!PageUptodate(page) && !PageError(page)) 43 if (!PageUptodate(page) && !PageError(page)) {
44 dump_stack(); 44 /* unlocked, not uptodate and not erronous? */
45 _debug("page probably truncated");
46 }
45 47
46 /* remove from the waitqueue */ 48 /* remove from the waitqueue */
47 list_del(&wait->task_list); 49 list_del(&wait->task_list);
@@ -61,6 +63,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
61} 63}
62 64
63/* 65/*
66 * handle a probably truncated page
67 * - check to see if the page is still relevant and reissue the read if
68 * possible
69 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
70 * must wait again and 0 if successful
71 */
72static int cachefiles_read_reissue(struct cachefiles_object *object,
73 struct cachefiles_one_read *monitor)
74{
75 struct address_space *bmapping = object->backer->d_inode->i_mapping;
76 struct page *backpage = monitor->back_page, *backpage2;
77 int ret;
78
79 kenter("{ino=%lx},{%lx,%lx}",
80 object->backer->d_inode->i_ino,
81 backpage->index, backpage->flags);
82
83 /* skip if the page was truncated away completely */
84 if (backpage->mapping != bmapping) {
85 kleave(" = -ENODATA [mapping]");
86 return -ENODATA;
87 }
88
89 backpage2 = find_get_page(bmapping, backpage->index);
90 if (!backpage2) {
91 kleave(" = -ENODATA [gone]");
92 return -ENODATA;
93 }
94
95 if (backpage != backpage2) {
96 put_page(backpage2);
97 kleave(" = -ENODATA [different]");
98 return -ENODATA;
99 }
100
101 /* the page is still there and we already have a ref on it, so we don't
102 * need a second */
103 put_page(backpage2);
104
105 INIT_LIST_HEAD(&monitor->op_link);
106 add_page_wait_queue(backpage, &monitor->monitor);
107
108 if (trylock_page(backpage)) {
109 ret = -EIO;
110 if (PageError(backpage))
111 goto unlock_discard;
112 ret = 0;
113 if (PageUptodate(backpage))
114 goto unlock_discard;
115
116 kdebug("reissue read");
117 ret = bmapping->a_ops->readpage(NULL, backpage);
118 if (ret < 0)
119 goto unlock_discard;
120 }
121
122 /* but the page may have been read before the monitor was installed, so
123 * the monitor may miss the event - so we have to ensure that we do get
124 * one in such a case */
125 if (trylock_page(backpage)) {
126 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
127 unlock_page(backpage);
128 }
129
130 /* it'll reappear on the todo list */
131 kleave(" = -EINPROGRESS");
132 return -EINPROGRESS;
133
134unlock_discard:
135 unlock_page(backpage);
136 spin_lock_irq(&object->work_lock);
137 list_del(&monitor->op_link);
138 spin_unlock_irq(&object->work_lock);
139 kleave(" = %d", ret);
140 return ret;
141}
142
143/*
64 * copy data from backing pages to netfs pages to complete a read operation 144 * copy data from backing pages to netfs pages to complete a read operation
65 * - driven by FS-Cache's thread pool 145 * - driven by FS-Cache's thread pool
66 */ 146 */
@@ -92,20 +172,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
92 172
93 _debug("- copy {%lu}", monitor->back_page->index); 173 _debug("- copy {%lu}", monitor->back_page->index);
94 174
95 error = -EIO; 175 recheck:
96 if (PageUptodate(monitor->back_page)) { 176 if (PageUptodate(monitor->back_page)) {
97 copy_highpage(monitor->netfs_page, monitor->back_page); 177 copy_highpage(monitor->netfs_page, monitor->back_page);
98 178
99 pagevec_add(&pagevec, monitor->netfs_page); 179 pagevec_add(&pagevec, monitor->netfs_page);
100 fscache_mark_pages_cached(monitor->op, &pagevec); 180 fscache_mark_pages_cached(monitor->op, &pagevec);
101 error = 0; 181 error = 0;
102 } 182 } else if (!PageError(monitor->back_page)) {
103 183 /* the page has probably been truncated */
104 if (error) 184 error = cachefiles_read_reissue(object, monitor);
185 if (error == -EINPROGRESS)
186 goto next;
187 goto recheck;
188 } else {
105 cachefiles_io_error_obj( 189 cachefiles_io_error_obj(
106 object, 190 object,
107 "Readpage failed on backing file %lx", 191 "Readpage failed on backing file %lx",
108 (unsigned long) monitor->back_page->flags); 192 (unsigned long) monitor->back_page->flags);
193 error = -EIO;
194 }
109 195
110 page_cache_release(monitor->back_page); 196 page_cache_release(monitor->back_page);
111 197
@@ -114,6 +200,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
114 fscache_put_retrieval(op); 200 fscache_put_retrieval(op);
115 kfree(monitor); 201 kfree(monitor);
116 202
203 next:
117 /* let the thread pool have some air occasionally */ 204 /* let the thread pool have some air occasionally */
118 max--; 205 max--;
119 if (max < 0 || need_resched()) { 206 if (max < 0 || need_resched()) {
@@ -333,7 +420,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
333 420
334 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 421 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
335 422
336 op->op.flags = FSCACHE_OP_FAST; 423 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
424 op->op.flags |= FSCACHE_OP_FAST;
337 op->op.processor = cachefiles_read_copier; 425 op->op.processor = cachefiles_read_copier;
338 426
339 pagevec_init(&pagevec, 0); 427 pagevec_init(&pagevec, 0);
@@ -639,7 +727,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
639 727
640 pagevec_init(&pagevec, 0); 728 pagevec_init(&pagevec, 0);
641 729
642 op->op.flags = FSCACHE_OP_FAST; 730 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
731 op->op.flags |= FSCACHE_OP_FAST;
643 op->op.processor = cachefiles_read_copier; 732 op->op.processor = cachefiles_read_copier;
644 733
645 INIT_LIST_HEAD(&backpages); 734 INIT_LIST_HEAD(&backpages);
@@ -801,7 +890,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
801 struct cachefiles_cache *cache; 890 struct cachefiles_cache *cache;
802 mm_segment_t old_fs; 891 mm_segment_t old_fs;
803 struct file *file; 892 struct file *file;
804 loff_t pos; 893 loff_t pos, eof;
894 size_t len;
805 void *data; 895 void *data;
806 int ret; 896 int ret;
807 897
@@ -835,15 +925,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
835 ret = -EIO; 925 ret = -EIO;
836 if (file->f_op->write) { 926 if (file->f_op->write) {
837 pos = (loff_t) page->index << PAGE_SHIFT; 927 pos = (loff_t) page->index << PAGE_SHIFT;
928
929 /* we mustn't write more data than we have, so we have
930 * to beware of a partial page at EOF */
931 eof = object->fscache.store_limit_l;
932 len = PAGE_SIZE;
933 if (eof & ~PAGE_MASK) {
934 ASSERTCMP(pos, <, eof);
935 if (eof - pos < PAGE_SIZE) {
936 _debug("cut short %llx to %llx",
937 pos, eof);
938 len = eof - pos;
939 ASSERTCMP(pos + len, ==, eof);
940 }
941 }
942
838 data = kmap(page); 943 data = kmap(page);
839 old_fs = get_fs(); 944 old_fs = get_fs();
840 set_fs(KERNEL_DS); 945 set_fs(KERNEL_DS);
841 ret = file->f_op->write( 946 ret = file->f_op->write(
842 file, (const void __user *) data, PAGE_SIZE, 947 file, (const void __user *) data, len, &pos);
843 &pos);
844 set_fs(old_fs); 948 set_fs(old_fs);
845 kunmap(page); 949 kunmap(page);
846 if (ret != PAGE_SIZE) 950 if (ret != len)
847 ret = -EIO; 951 ret = -EIO;
848 } 952 }
849 fput(file); 953 fput(file);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 145540a316ab..094ea65afc85 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,12 @@
1Version 1.61
2------------
3Fix append problem to Samba servers (files opened with O_APPEND could
4have duplicated data). Fix oops in cifs_lookup. Workaround problem
5mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
6Disable use of server inode numbers when server only
7partially supports them (e.g. for one server querying inode numbers on
8FindFirst fails but QPathInfo queries works).
9
1Version 1.60 10Version 1.60
2------------- 11-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path. 12Fix memory leak in reconnect. Fix oops in DFS mount error path.
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9a5e4f5f3122..29f1da761bbf 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1037,7 +1037,7 @@ init_cifs(void)
1037 if (rc) 1037 if (rc)
1038 goto out_unregister_key_type; 1038 goto out_unregister_key_type;
1039#endif 1039#endif
1040 rc = slow_work_register_user(); 1040 rc = slow_work_register_user(THIS_MODULE);
1041 if (rc) 1041 if (rc)
1042 goto out_unregister_resolver_key; 1042 goto out_unregister_resolver_key;
1043 1043
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 627a60a6c1b1..1f42f772865a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -214,8 +214,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
214 posix_flags |= SMB_O_EXCL; 214 posix_flags |= SMB_O_EXCL;
215 if (oflags & O_TRUNC) 215 if (oflags & O_TRUNC)
216 posix_flags |= SMB_O_TRUNC; 216 posix_flags |= SMB_O_TRUNC;
217 if (oflags & O_APPEND)
218 posix_flags |= SMB_O_APPEND;
219 if (oflags & O_SYNC) 217 if (oflags & O_SYNC)
220 posix_flags |= SMB_O_SYNC; 218 posix_flags |= SMB_O_SYNC;
221 if (oflags & O_DIRECTORY) 219 if (oflags & O_DIRECTORY)
@@ -643,9 +641,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
643 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let 641 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
644 * the VFS handle the create. 642 * the VFS handle the create.
645 */ 643 */
646 if (nd->flags & LOOKUP_EXCL) { 644 if (nd && (nd->flags & LOOKUP_EXCL)) {
647 d_instantiate(direntry, NULL); 645 d_instantiate(direntry, NULL);
648 return 0; 646 return NULL;
649 } 647 }
650 648
651 /* can not grab the rename sem here since it would 649 /* can not grab the rename sem here since it would
@@ -675,7 +673,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
675 * reduction in network traffic in the other paths. 673 * reduction in network traffic in the other paths.
676 */ 674 */
677 if (pTcon->unix_ext) { 675 if (pTcon->unix_ext) {
678 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 676 if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
679 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 677 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
680 (nd->intent.open.flags & O_CREAT)) { 678 (nd->intent.open.flags & O_CREAT)) {
681 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, 679 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index 9bbb8ce7bea0..864dac20a242 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
54 enabled by setting bits in /sys/modules/fscache/parameter/debug. 54 enabled by setting bits in /sys/modules/fscache/parameter/debug.
55 55
56 See Documentation/filesystems/caching/fscache.txt for more information. 56 See Documentation/filesystems/caching/fscache.txt for more information.
57
58config FSCACHE_OBJECT_LIST
59 bool "Maintain global object list for debugging purposes"
60 depends on FSCACHE && PROC_FS
61 help
62 Maintain a global list of active fscache objects that can be
63 retrieved through /proc/fs/fscache/objects for debugging purposes
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
index 91571b95aacc..6d561531cb36 100644
--- a/fs/fscache/Makefile
+++ b/fs/fscache/Makefile
@@ -15,5 +15,6 @@ fscache-y := \
15fscache-$(CONFIG_PROC_FS) += proc.o 15fscache-$(CONFIG_PROC_FS) += proc.o
16fscache-$(CONFIG_FSCACHE_STATS) += stats.o 16fscache-$(CONFIG_FSCACHE_STATS) += stats.o
17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o 17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
18fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
18 19
19obj-$(CONFIG_FSCACHE) := fscache.o 20obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index e21985bbb1fb..6a3c48abd677 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
263 spin_lock(&cache->object_list_lock); 263 spin_lock(&cache->object_list_lock);
264 list_add_tail(&ifsdef->cache_link, &cache->object_list); 264 list_add_tail(&ifsdef->cache_link, &cache->object_list);
265 spin_unlock(&cache->object_list_lock); 265 spin_unlock(&cache->object_list_lock);
266 fscache_objlist_add(ifsdef);
266 267
267 /* add the cache's netfs definition index object to the top level index 268 /* add the cache's netfs definition index object to the top level index
268 * cookie as a known backing object */ 269 * cookie as a known backing object */
@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
380 381
381 /* make sure all pages pinned by operations on behalf of the netfs are 382 /* make sure all pages pinned by operations on behalf of the netfs are
382 * written to disk */ 383 * written to disk */
384 fscache_stat(&fscache_n_cop_sync_cache);
383 cache->ops->sync_cache(cache); 385 cache->ops->sync_cache(cache);
386 fscache_stat_d(&fscache_n_cop_sync_cache);
384 387
385 /* dissociate all the netfs pages backed by this cache from the block 388 /* dissociate all the netfs pages backed by this cache from the block
386 * mappings in the cache */ 389 * mappings in the cache */
390 fscache_stat(&fscache_n_cop_dissociate_pages);
387 cache->ops->dissociate_pages(cache); 391 cache->ops->dissociate_pages(cache);
392 fscache_stat_d(&fscache_n_cop_dissociate_pages);
388 393
389 /* we now have to destroy all the active objects pertaining to this 394 /* we now have to destroy all the active objects pertaining to this
390 * cache - which we do by passing them off to thread pool to be 395 * cache - which we do by passing them off to thread pool to be
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 72fd18f6c71f..990535071a8a 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
36 36
37 memset(cookie, 0, sizeof(*cookie)); 37 memset(cookie, 0, sizeof(*cookie));
38 spin_lock_init(&cookie->lock); 38 spin_lock_init(&cookie->lock);
39 spin_lock_init(&cookie->stores_lock);
39 INIT_HLIST_HEAD(&cookie->backing_objects); 40 INIT_HLIST_HEAD(&cookie->backing_objects);
40} 41}
41 42
@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
102 cookie->netfs_data = netfs_data; 103 cookie->netfs_data = netfs_data;
103 cookie->flags = 0; 104 cookie->flags = 0;
104 105
105 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS); 106 /* radix tree insertion won't use the preallocation pool unless it's
107 * told it may not wait */
108 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
106 109
107 switch (cookie->def->type) { 110 switch (cookie->def->type) {
108 case FSCACHE_COOKIE_TYPE_INDEX: 111 case FSCACHE_COOKIE_TYPE_INDEX:
@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
249 252
250 /* ask the cache to allocate an object (we may end up with duplicate 253 /* ask the cache to allocate an object (we may end up with duplicate
251 * objects at this stage, but we sort that out later) */ 254 * objects at this stage, but we sort that out later) */
255 fscache_stat(&fscache_n_cop_alloc_object);
252 object = cache->ops->alloc_object(cache, cookie); 256 object = cache->ops->alloc_object(cache, cookie);
257 fscache_stat_d(&fscache_n_cop_alloc_object);
253 if (IS_ERR(object)) { 258 if (IS_ERR(object)) {
254 fscache_stat(&fscache_n_object_no_alloc); 259 fscache_stat(&fscache_n_object_no_alloc);
255 ret = PTR_ERR(object); 260 ret = PTR_ERR(object);
@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
270 /* only attach if we managed to allocate all we needed, otherwise 275 /* only attach if we managed to allocate all we needed, otherwise
271 * discard the object we just allocated and instead use the one 276 * discard the object we just allocated and instead use the one
272 * attached to the cookie */ 277 * attached to the cookie */
273 if (fscache_attach_object(cookie, object) < 0) 278 if (fscache_attach_object(cookie, object) < 0) {
279 fscache_stat(&fscache_n_cop_put_object);
274 cache->ops->put_object(object); 280 cache->ops->put_object(object);
281 fscache_stat_d(&fscache_n_cop_put_object);
282 }
275 283
276 _leave(" = 0"); 284 _leave(" = 0");
277 return 0; 285 return 0;
@@ -287,7 +295,9 @@ object_already_extant:
287 return 0; 295 return 0;
288 296
289error_put: 297error_put:
298 fscache_stat(&fscache_n_cop_put_object);
290 cache->ops->put_object(object); 299 cache->ops->put_object(object);
300 fscache_stat_d(&fscache_n_cop_put_object);
291error: 301error:
292 _leave(" = %d", ret); 302 _leave(" = %d", ret);
293 return ret; 303 return ret;
@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
349 object->cookie = cookie; 359 object->cookie = cookie;
350 atomic_inc(&cookie->usage); 360 atomic_inc(&cookie->usage);
351 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 361 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
362
363 fscache_objlist_add(object);
352 ret = 0; 364 ret = 0;
353 365
354cant_attach_object: 366cant_attach_object:
@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
403 unsigned long event; 415 unsigned long event;
404 416
405 fscache_stat(&fscache_n_relinquishes); 417 fscache_stat(&fscache_n_relinquishes);
418 if (retire)
419 fscache_stat(&fscache_n_relinquishes_retire);
406 420
407 if (!cookie) { 421 if (!cookie) {
408 fscache_stat(&fscache_n_relinquishes_null); 422 fscache_stat(&fscache_n_relinquishes_null);
@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
428 442
429 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; 443 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
430 444
431 /* detach pointers back to the netfs */
432 spin_lock(&cookie->lock); 445 spin_lock(&cookie->lock);
433 446
434 cookie->netfs_data = NULL;
435 cookie->def = NULL;
436
437 /* break links with all the active objects */ 447 /* break links with all the active objects */
438 while (!hlist_empty(&cookie->backing_objects)) { 448 while (!hlist_empty(&cookie->backing_objects)) {
439 object = hlist_entry(cookie->backing_objects.first, 449 object = hlist_entry(cookie->backing_objects.first,
@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
456 BUG(); 466 BUG();
457 } 467 }
458 468
469 /* detach pointers back to the netfs */
470 cookie->netfs_data = NULL;
471 cookie->def = NULL;
472
459 spin_unlock(&cookie->lock); 473 spin_unlock(&cookie->lock);
460 474
461 if (cookie->parent) { 475 if (cookie->parent) {
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 1c341304621f..edd7434ab6e5 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -17,6 +17,7 @@
17 * - cache->object_list_lock 17 * - cache->object_list_lock
18 * - object->lock 18 * - object->lock
19 * - object->parent->lock 19 * - object->parent->lock
20 * - cookie->stores_lock
20 * - fscache_thread_lock 21 * - fscache_thread_lock
21 * 22 *
22 */ 23 */
@@ -88,17 +89,31 @@ extern int fscache_wait_bit_interruptible(void *);
88/* 89/*
89 * object.c 90 * object.c
90 */ 91 */
92extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
93
91extern void fscache_withdrawing_object(struct fscache_cache *, 94extern void fscache_withdrawing_object(struct fscache_cache *,
92 struct fscache_object *); 95 struct fscache_object *);
93extern void fscache_enqueue_object(struct fscache_object *); 96extern void fscache_enqueue_object(struct fscache_object *);
94 97
95/* 98/*
99 * object-list.c
100 */
101#ifdef CONFIG_FSCACHE_OBJECT_LIST
102extern const struct file_operations fscache_objlist_fops;
103
104extern void fscache_objlist_add(struct fscache_object *);
105#else
106#define fscache_objlist_add(object) do {} while(0)
107#endif
108
109/*
96 * operation.c 110 * operation.c
97 */ 111 */
98extern int fscache_submit_exclusive_op(struct fscache_object *, 112extern int fscache_submit_exclusive_op(struct fscache_object *,
99 struct fscache_operation *); 113 struct fscache_operation *);
100extern int fscache_submit_op(struct fscache_object *, 114extern int fscache_submit_op(struct fscache_object *,
101 struct fscache_operation *); 115 struct fscache_operation *);
116extern int fscache_cancel_op(struct fscache_operation *);
102extern void fscache_abort_object(struct fscache_object *); 117extern void fscache_abort_object(struct fscache_object *);
103extern void fscache_start_operations(struct fscache_object *); 118extern void fscache_start_operations(struct fscache_object *);
104extern void fscache_operation_gc(struct work_struct *); 119extern void fscache_operation_gc(struct work_struct *);
@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
127extern atomic_t fscache_n_op_deferred_release; 142extern atomic_t fscache_n_op_deferred_release;
128extern atomic_t fscache_n_op_release; 143extern atomic_t fscache_n_op_release;
129extern atomic_t fscache_n_op_gc; 144extern atomic_t fscache_n_op_gc;
145extern atomic_t fscache_n_op_cancelled;
146extern atomic_t fscache_n_op_rejected;
130 147
131extern atomic_t fscache_n_attr_changed; 148extern atomic_t fscache_n_attr_changed;
132extern atomic_t fscache_n_attr_changed_ok; 149extern atomic_t fscache_n_attr_changed_ok;
@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
138extern atomic_t fscache_n_allocs_ok; 155extern atomic_t fscache_n_allocs_ok;
139extern atomic_t fscache_n_allocs_wait; 156extern atomic_t fscache_n_allocs_wait;
140extern atomic_t fscache_n_allocs_nobufs; 157extern atomic_t fscache_n_allocs_nobufs;
158extern atomic_t fscache_n_allocs_intr;
159extern atomic_t fscache_n_allocs_object_dead;
141extern atomic_t fscache_n_alloc_ops; 160extern atomic_t fscache_n_alloc_ops;
142extern atomic_t fscache_n_alloc_op_waits; 161extern atomic_t fscache_n_alloc_op_waits;
143 162
@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
148extern atomic_t fscache_n_retrievals_nobufs; 167extern atomic_t fscache_n_retrievals_nobufs;
149extern atomic_t fscache_n_retrievals_intr; 168extern atomic_t fscache_n_retrievals_intr;
150extern atomic_t fscache_n_retrievals_nomem; 169extern atomic_t fscache_n_retrievals_nomem;
170extern atomic_t fscache_n_retrievals_object_dead;
151extern atomic_t fscache_n_retrieval_ops; 171extern atomic_t fscache_n_retrieval_ops;
152extern atomic_t fscache_n_retrieval_op_waits; 172extern atomic_t fscache_n_retrieval_op_waits;
153 173
@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
158extern atomic_t fscache_n_stores_oom; 178extern atomic_t fscache_n_stores_oom;
159extern atomic_t fscache_n_store_ops; 179extern atomic_t fscache_n_store_ops;
160extern atomic_t fscache_n_store_calls; 180extern atomic_t fscache_n_store_calls;
181extern atomic_t fscache_n_store_pages;
182extern atomic_t fscache_n_store_radix_deletes;
183extern atomic_t fscache_n_store_pages_over_limit;
184
185extern atomic_t fscache_n_store_vmscan_not_storing;
186extern atomic_t fscache_n_store_vmscan_gone;
187extern atomic_t fscache_n_store_vmscan_busy;
188extern atomic_t fscache_n_store_vmscan_cancelled;
161 189
162extern atomic_t fscache_n_marks; 190extern atomic_t fscache_n_marks;
163extern atomic_t fscache_n_uncaches; 191extern atomic_t fscache_n_uncaches;
@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
176extern atomic_t fscache_n_relinquishes; 204extern atomic_t fscache_n_relinquishes;
177extern atomic_t fscache_n_relinquishes_null; 205extern atomic_t fscache_n_relinquishes_null;
178extern atomic_t fscache_n_relinquishes_waitcrt; 206extern atomic_t fscache_n_relinquishes_waitcrt;
207extern atomic_t fscache_n_relinquishes_retire;
179 208
180extern atomic_t fscache_n_cookie_index; 209extern atomic_t fscache_n_cookie_index;
181extern atomic_t fscache_n_cookie_data; 210extern atomic_t fscache_n_cookie_data;
@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
186extern atomic_t fscache_n_object_lookups; 215extern atomic_t fscache_n_object_lookups;
187extern atomic_t fscache_n_object_lookups_negative; 216extern atomic_t fscache_n_object_lookups_negative;
188extern atomic_t fscache_n_object_lookups_positive; 217extern atomic_t fscache_n_object_lookups_positive;
218extern atomic_t fscache_n_object_lookups_timed_out;
189extern atomic_t fscache_n_object_created; 219extern atomic_t fscache_n_object_created;
190extern atomic_t fscache_n_object_avail; 220extern atomic_t fscache_n_object_avail;
191extern atomic_t fscache_n_object_dead; 221extern atomic_t fscache_n_object_dead;
@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
195extern atomic_t fscache_n_checkaux_update; 225extern atomic_t fscache_n_checkaux_update;
196extern atomic_t fscache_n_checkaux_obsolete; 226extern atomic_t fscache_n_checkaux_obsolete;
197 227
228extern atomic_t fscache_n_cop_alloc_object;
229extern atomic_t fscache_n_cop_lookup_object;
230extern atomic_t fscache_n_cop_lookup_complete;
231extern atomic_t fscache_n_cop_grab_object;
232extern atomic_t fscache_n_cop_update_object;
233extern atomic_t fscache_n_cop_drop_object;
234extern atomic_t fscache_n_cop_put_object;
235extern atomic_t fscache_n_cop_sync_cache;
236extern atomic_t fscache_n_cop_attr_changed;
237extern atomic_t fscache_n_cop_read_or_alloc_page;
238extern atomic_t fscache_n_cop_read_or_alloc_pages;
239extern atomic_t fscache_n_cop_allocate_page;
240extern atomic_t fscache_n_cop_allocate_pages;
241extern atomic_t fscache_n_cop_write_page;
242extern atomic_t fscache_n_cop_uncache_page;
243extern atomic_t fscache_n_cop_dissociate_pages;
244
198static inline void fscache_stat(atomic_t *stat) 245static inline void fscache_stat(atomic_t *stat)
199{ 246{
200 atomic_inc(stat); 247 atomic_inc(stat);
201} 248}
202 249
250static inline void fscache_stat_d(atomic_t *stat)
251{
252 atomic_dec(stat);
253}
254
255#define __fscache_stat(stat) (stat)
256
203extern const struct file_operations fscache_stats_fops; 257extern const struct file_operations fscache_stats_fops;
204#else 258#else
205 259
260#define __fscache_stat(stat) (NULL)
206#define fscache_stat(stat) do {} while (0) 261#define fscache_stat(stat) do {} while (0)
262#define fscache_stat_d(stat) do {} while (0)
207#endif 263#endif
208 264
209/* 265/*
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4de41b597499..add6bdb53f04 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
48{ 48{
49 int ret; 49 int ret;
50 50
51 ret = slow_work_register_user(); 51 ret = slow_work_register_user(THIS_MODULE);
52 if (ret < 0) 52 if (ret < 0)
53 goto error_slow_work; 53 goto error_slow_work;
54 54
@@ -80,7 +80,7 @@ error_kobj:
80error_cookie_jar: 80error_cookie_jar:
81 fscache_proc_cleanup(); 81 fscache_proc_cleanup();
82error_proc: 82error_proc:
83 slow_work_unregister_user(); 83 slow_work_unregister_user(THIS_MODULE);
84error_slow_work: 84error_slow_work:
85 return ret; 85 return ret;
86} 86}
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
97 kobject_put(fscache_root); 97 kobject_put(fscache_root);
98 kmem_cache_destroy(fscache_cookie_jar); 98 kmem_cache_destroy(fscache_cookie_jar);
99 fscache_proc_cleanup(); 99 fscache_proc_cleanup();
100 slow_work_unregister_user(); 100 slow_work_unregister_user(THIS_MODULE);
101 printk(KERN_NOTICE "FS-Cache: Unloaded\n"); 101 printk(KERN_NOTICE "FS-Cache: Unloaded\n");
102} 102}
103 103
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
new file mode 100644
index 000000000000..e590242fa41a
--- /dev/null
+++ b/fs/fscache/object-list.c
@@ -0,0 +1,432 @@
1/* Global fscache object list maintainer and viewer
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL COOKIE
13#include <linux/module.h>
14#include <linux/seq_file.h>
15#include <linux/key.h>
16#include <keys/user-type.h>
17#include "internal.h"
18
19static struct rb_root fscache_object_list;
20static DEFINE_RWLOCK(fscache_object_list_lock);
21
22struct fscache_objlist_data {
23 unsigned long config; /* display configuration */
24#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
25#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
26#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
27#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
28#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
29#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
30#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
31#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
32#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
33#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
34#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
35#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
36#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */
37#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */
38
39 u8 buf[512]; /* key and aux data buffer */
40};
41
42/*
43 * Add an object to the object list
44 * - we use the address of the fscache_object structure as the key into the
45 * tree
46 */
47void fscache_objlist_add(struct fscache_object *obj)
48{
49 struct fscache_object *xobj;
50 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
51
52 write_lock(&fscache_object_list_lock);
53
54 while (*p) {
55 parent = *p;
56 xobj = rb_entry(parent, struct fscache_object, objlist_link);
57
58 if (obj < xobj)
59 p = &(*p)->rb_left;
60 else if (obj > xobj)
61 p = &(*p)->rb_right;
62 else
63 BUG();
64 }
65
66 rb_link_node(&obj->objlist_link, parent, p);
67 rb_insert_color(&obj->objlist_link, &fscache_object_list);
68
69 write_unlock(&fscache_object_list_lock);
70}
71
72/**
73 * fscache_object_destroy - Note that a cache object is about to be destroyed
74 * @object: The object to be destroyed
75 *
76 * Note the imminent destruction and deallocation of a cache object record.
77 */
78void fscache_object_destroy(struct fscache_object *obj)
79{
80 write_lock(&fscache_object_list_lock);
81
82 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
83 rb_erase(&obj->objlist_link, &fscache_object_list);
84
85 write_unlock(&fscache_object_list_lock);
86}
87EXPORT_SYMBOL(fscache_object_destroy);
88
89/*
90 * find the object in the tree on or after the specified index
91 */
92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
93{
94 struct fscache_object *pobj, *obj, *minobj = NULL;
95 struct rb_node *p;
96 unsigned long pos;
97
98 if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
99 return NULL;
100 pos = *_pos;
101
102 /* banners (can't represent line 0 by pos 0 as that would involve
103 * returning a NULL pointer) */
104 if (pos == 0)
105 return (struct fscache_object *) ++(*_pos);
106 if (pos < 3)
107 return (struct fscache_object *)pos;
108
109 pobj = (struct fscache_object *)pos;
110 p = fscache_object_list.rb_node;
111 while (p) {
112 obj = rb_entry(p, struct fscache_object, objlist_link);
113 if (pobj < obj) {
114 if (!minobj || minobj > obj)
115 minobj = obj;
116 p = p->rb_left;
117 } else if (pobj > obj) {
118 p = p->rb_right;
119 } else {
120 minobj = obj;
121 break;
122 }
123 obj = NULL;
124 }
125
126 if (!minobj)
127 *_pos = (unsigned long) ERR_PTR(-ENOENT);
128 else if (minobj != obj)
129 *_pos = (unsigned long) minobj;
130 return minobj;
131}
132
133/*
134 * set up the iterator to start reading from the first line
135 */
136static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
137 __acquires(&fscache_object_list_lock)
138{
139 read_lock(&fscache_object_list_lock);
140 return fscache_objlist_lookup(_pos);
141}
142
143/*
144 * move to the next line
145 */
146static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
147{
148 (*_pos)++;
149 return fscache_objlist_lookup(_pos);
150}
151
152/*
153 * clean up after reading
154 */
155static void fscache_objlist_stop(struct seq_file *m, void *v)
156 __releases(&fscache_object_list_lock)
157{
158 read_unlock(&fscache_object_list_lock);
159}
160
161/*
162 * display an object
163 */
164static int fscache_objlist_show(struct seq_file *m, void *v)
165{
166 struct fscache_objlist_data *data = m->private;
167 struct fscache_object *obj = v;
168 unsigned long config = data->config;
169 uint16_t keylen, auxlen;
170 char _type[3], *type;
171 bool no_cookie;
172 u8 *buf = data->buf, *p;
173
174 if ((unsigned long) v == 1) {
175 seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
176 " EM EV F S"
177 " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
178 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
179 FSCACHE_OBJLIST_CONFIG_AUX))
180 seq_puts(m, " ");
181 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
182 seq_puts(m, "OBJECT_KEY");
183 if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
184 FSCACHE_OBJLIST_CONFIG_AUX)) ==
185 (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
186 seq_puts(m, ", ");
187 if (config & FSCACHE_OBJLIST_CONFIG_AUX)
188 seq_puts(m, "AUX_DATA");
189 seq_puts(m, "\n");
190 return 0;
191 }
192
193 if ((unsigned long) v == 2) {
194 seq_puts(m, "======== ======== ==== ===== === === === == ====="
195 " == == = ="
196 " | ================ == == ================");
197 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
198 FSCACHE_OBJLIST_CONFIG_AUX))
199 seq_puts(m, " ================");
200 seq_puts(m, "\n");
201 return 0;
202 }
203
204 /* filter out any unwanted objects */
205#define FILTER(criterion, _yes, _no) \
206 do { \
207 unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
208 unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
209 if (criterion) { \
210 if (!(config & yes)) \
211 return 0; \
212 } else { \
213 if (!(config & no)) \
214 return 0; \
215 } \
216 } while(0)
217
218 if (~config) {
219 FILTER(obj->cookie,
220 COOKIE, NOCOOKIE);
221 FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
222 obj->n_ops != 0 ||
223 obj->n_obj_ops != 0 ||
224 obj->flags ||
225 !list_empty(&obj->dependents),
226 BUSY, IDLE);
227 FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
228 PENDWR, NOPENDWR);
229 FILTER(atomic_read(&obj->n_reads),
230 READS, NOREADS);
231 FILTER(obj->events & obj->event_mask,
232 EVENTS, NOEVENTS);
233 FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
234 WORK, NOWORK);
235 }
236
237 seq_printf(m,
238 "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
239 obj->debug_id,
240 obj->parent ? obj->parent->debug_id : -1,
241 fscache_object_states_short[obj->state],
242 obj->n_children,
243 obj->n_ops,
244 obj->n_obj_ops,
245 obj->n_in_progress,
246 obj->n_exclusive,
247 atomic_read(&obj->n_reads),
248 obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
249 obj->events,
250 obj->flags,
251 obj->work.flags);
252
253 no_cookie = true;
254 keylen = auxlen = 0;
255 if (obj->cookie) {
256 spin_lock(&obj->lock);
257 if (obj->cookie) {
258 switch (obj->cookie->def->type) {
259 case 0:
260 type = "IX";
261 break;
262 case 1:
263 type = "DT";
264 break;
265 default:
266 sprintf(_type, "%02u",
267 obj->cookie->def->type);
268 type = _type;
269 break;
270 }
271
272 seq_printf(m, "%-16s %s %2lx %16p",
273 obj->cookie->def->name,
274 type,
275 obj->cookie->flags,
276 obj->cookie->netfs_data);
277
278 if (obj->cookie->def->get_key &&
279 config & FSCACHE_OBJLIST_CONFIG_KEY)
280 keylen = obj->cookie->def->get_key(
281 obj->cookie->netfs_data,
282 buf, 400);
283
284 if (obj->cookie->def->get_aux &&
285 config & FSCACHE_OBJLIST_CONFIG_AUX)
286 auxlen = obj->cookie->def->get_aux(
287 obj->cookie->netfs_data,
288 buf + keylen, 512 - keylen);
289
290 no_cookie = false;
291 }
292 spin_unlock(&obj->lock);
293
294 if (!no_cookie && (keylen > 0 || auxlen > 0)) {
295 seq_printf(m, " ");
296 for (p = buf; keylen > 0; keylen--)
297 seq_printf(m, "%02x", *p++);
298 if (auxlen > 0) {
299 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
300 seq_printf(m, ", ");
301 for (; auxlen > 0; auxlen--)
302 seq_printf(m, "%02x", *p++);
303 }
304 }
305 }
306
307 if (no_cookie)
308 seq_printf(m, "<no_cookie>\n");
309 else
310 seq_printf(m, "\n");
311 return 0;
312}
313
314static const struct seq_operations fscache_objlist_ops = {
315 .start = fscache_objlist_start,
316 .stop = fscache_objlist_stop,
317 .next = fscache_objlist_next,
318 .show = fscache_objlist_show,
319};
320
321/*
322 * get the configuration for filtering the list
323 */
324static void fscache_objlist_config(struct fscache_objlist_data *data)
325{
326#ifdef CONFIG_KEYS
327 struct user_key_payload *confkey;
328 unsigned long config;
329 struct key *key;
330 const char *buf;
331 int len;
332
333 key = request_key(&key_type_user, "fscache:objlist", NULL);
334 if (IS_ERR(key))
335 goto no_config;
336
337 config = 0;
338 rcu_read_lock();
339
340 confkey = key->payload.data;
341 buf = confkey->data;
342
343 for (len = confkey->datalen - 1; len >= 0; len--) {
344 switch (buf[len]) {
345 case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
346 case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
347 case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
348 case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
349 case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
350 case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
351 case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
352 case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
353 case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
354 case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
355 case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
356 case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
357 }
358 }
359
360 rcu_read_unlock();
361 key_put(key);
362
363 if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
364 config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
365 if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
366 config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
367 if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
368 config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
369 if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
370 config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
371 if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
372 config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
373 if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
374 config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
375
376 data->config = config;
377 return;
378
379no_config:
380#endif
381 data->config = ULONG_MAX;
382}
383
384/*
385 * open "/proc/fs/fscache/objects" to provide a list of active objects
386 * - can be configured by a user-defined key added to the caller's keyrings
387 */
388static int fscache_objlist_open(struct inode *inode, struct file *file)
389{
390 struct fscache_objlist_data *data;
391 struct seq_file *m;
392 int ret;
393
394 ret = seq_open(file, &fscache_objlist_ops);
395 if (ret < 0)
396 return ret;
397
398 m = file->private_data;
399
400 /* buffer for key extraction */
401 data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
402 if (!data) {
403 seq_release(inode, file);
404 return -ENOMEM;
405 }
406
407 /* get the configuration key */
408 fscache_objlist_config(data);
409
410 m->private = data;
411 return 0;
412}
413
414/*
415 * clean up on close
416 */
417static int fscache_objlist_release(struct inode *inode, struct file *file)
418{
419 struct seq_file *m = file->private_data;
420
421 kfree(m->private);
422 m->private = NULL;
423 return seq_release(inode, file);
424}
425
426const struct file_operations fscache_objlist_fops = {
427 .owner = THIS_MODULE,
428 .open = fscache_objlist_open,
429 .read = seq_read,
430 .llseek = seq_lseek,
431 .release = fscache_objlist_release,
432};
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 392a41b1b79d..e513ac599c8e 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,9 +14,10 @@
14 14
15#define FSCACHE_DEBUG_LEVEL COOKIE 15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/seq_file.h>
17#include "internal.h" 18#include "internal.h"
18 19
19const char *fscache_object_states[] = { 20const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
20 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", 21 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
21 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", 22 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
22 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", 23 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
33}; 34};
34EXPORT_SYMBOL(fscache_object_states); 35EXPORT_SYMBOL(fscache_object_states);
35 36
37const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
38 [FSCACHE_OBJECT_INIT] = "INIT",
39 [FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
40 [FSCACHE_OBJECT_CREATING] = "CRTN",
41 [FSCACHE_OBJECT_AVAILABLE] = "AVBL",
42 [FSCACHE_OBJECT_ACTIVE] = "ACTV",
43 [FSCACHE_OBJECT_UPDATING] = "UPDT",
44 [FSCACHE_OBJECT_DYING] = "DYNG",
45 [FSCACHE_OBJECT_LC_DYING] = "LCDY",
46 [FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
47 [FSCACHE_OBJECT_RELEASING] = "RELS",
48 [FSCACHE_OBJECT_RECYCLING] = "RCYC",
49 [FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
50 [FSCACHE_OBJECT_DEAD] = "DEAD",
51};
52
36static void fscache_object_slow_work_put_ref(struct slow_work *); 53static void fscache_object_slow_work_put_ref(struct slow_work *);
37static int fscache_object_slow_work_get_ref(struct slow_work *); 54static int fscache_object_slow_work_get_ref(struct slow_work *);
38static void fscache_object_slow_work_execute(struct slow_work *); 55static void fscache_object_slow_work_execute(struct slow_work *);
56#ifdef CONFIG_SLOW_WORK_PROC
57static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
58#endif
39static void fscache_initialise_object(struct fscache_object *); 59static void fscache_initialise_object(struct fscache_object *);
40static void fscache_lookup_object(struct fscache_object *); 60static void fscache_lookup_object(struct fscache_object *);
41static void fscache_object_available(struct fscache_object *); 61static void fscache_object_available(struct fscache_object *);
@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
45static void fscache_dequeue_object(struct fscache_object *); 65static void fscache_dequeue_object(struct fscache_object *);
46 66
47const struct slow_work_ops fscache_object_slow_work_ops = { 67const struct slow_work_ops fscache_object_slow_work_ops = {
68 .owner = THIS_MODULE,
48 .get_ref = fscache_object_slow_work_get_ref, 69 .get_ref = fscache_object_slow_work_get_ref,
49 .put_ref = fscache_object_slow_work_put_ref, 70 .put_ref = fscache_object_slow_work_put_ref,
50 .execute = fscache_object_slow_work_execute, 71 .execute = fscache_object_slow_work_execute,
72#ifdef CONFIG_SLOW_WORK_PROC
73 .desc = fscache_object_slow_work_desc,
74#endif
51}; 75};
52EXPORT_SYMBOL(fscache_object_slow_work_ops); 76EXPORT_SYMBOL(fscache_object_slow_work_ops);
53 77
@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
81static void fscache_object_state_machine(struct fscache_object *object) 105static void fscache_object_state_machine(struct fscache_object *object)
82{ 106{
83 enum fscache_object_state new_state; 107 enum fscache_object_state new_state;
108 struct fscache_cookie *cookie;
84 109
85 ASSERT(object != NULL); 110 ASSERT(object != NULL);
86 111
@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
120 case FSCACHE_OBJECT_UPDATING: 145 case FSCACHE_OBJECT_UPDATING:
121 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); 146 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
122 fscache_stat(&fscache_n_updates_run); 147 fscache_stat(&fscache_n_updates_run);
148 fscache_stat(&fscache_n_cop_update_object);
123 object->cache->ops->update_object(object); 149 object->cache->ops->update_object(object);
150 fscache_stat_d(&fscache_n_cop_update_object);
124 goto active_transit; 151 goto active_transit;
125 152
126 /* handle an object dying during lookup or creation */ 153 /* handle an object dying during lookup or creation */
127 case FSCACHE_OBJECT_LC_DYING: 154 case FSCACHE_OBJECT_LC_DYING:
128 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); 155 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
156 fscache_stat(&fscache_n_cop_lookup_complete);
129 object->cache->ops->lookup_complete(object); 157 object->cache->ops->lookup_complete(object);
158 fscache_stat_d(&fscache_n_cop_lookup_complete);
130 159
131 spin_lock(&object->lock); 160 spin_lock(&object->lock);
132 object->state = FSCACHE_OBJECT_DYING; 161 object->state = FSCACHE_OBJECT_DYING;
133 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, 162 cookie = object->cookie;
134 &object->cookie->flags)) 163 if (cookie) {
135 wake_up_bit(&object->cookie->flags, 164 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
136 FSCACHE_COOKIE_CREATING); 165 &cookie->flags))
166 wake_up_bit(&cookie->flags,
167 FSCACHE_COOKIE_LOOKING_UP);
168 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
169 &cookie->flags))
170 wake_up_bit(&cookie->flags,
171 FSCACHE_COOKIE_CREATING);
172 }
137 spin_unlock(&object->lock); 173 spin_unlock(&object->lock);
138 174
139 fscache_done_parent_op(object); 175 fscache_done_parent_op(object);
@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
165 } 201 }
166 spin_unlock(&object->lock); 202 spin_unlock(&object->lock);
167 fscache_enqueue_dependents(object); 203 fscache_enqueue_dependents(object);
204 fscache_start_operations(object);
168 goto terminal_transit; 205 goto terminal_transit;
169 206
170 /* handle an abort during initialisation */ 207 /* handle an abort during initialisation */
@@ -316,14 +353,29 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
316 353
317 _enter("{OBJ%x}", object->debug_id); 354 _enter("{OBJ%x}", object->debug_id);
318 355
319 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
320
321 start = jiffies; 356 start = jiffies;
322 fscache_object_state_machine(object); 357 fscache_object_state_machine(object);
323 fscache_hist(fscache_objs_histogram, start); 358 fscache_hist(fscache_objs_histogram, start);
324 if (object->events & object->event_mask) 359 if (object->events & object->event_mask)
325 fscache_enqueue_object(object); 360 fscache_enqueue_object(object);
361 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
362}
363
364/*
365 * describe an object for slow-work debugging
366 */
367#ifdef CONFIG_SLOW_WORK_PROC
368static void fscache_object_slow_work_desc(struct slow_work *work,
369 struct seq_file *m)
370{
371 struct fscache_object *object =
372 container_of(work, struct fscache_object, work);
373
374 seq_printf(m, "FSC: OBJ%x: %s",
375 object->debug_id,
376 fscache_object_states_short[object->state]);
326} 377}
378#endif
327 379
328/* 380/*
329 * initialise an object 381 * initialise an object
@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
376 * binding on to us, so we need to make sure we don't 428 * binding on to us, so we need to make sure we don't
377 * add ourself to the list multiple times */ 429 * add ourself to the list multiple times */
378 if (list_empty(&object->dep_link)) { 430 if (list_empty(&object->dep_link)) {
431 fscache_stat(&fscache_n_cop_grab_object);
379 object->cache->ops->grab_object(object); 432 object->cache->ops->grab_object(object);
433 fscache_stat_d(&fscache_n_cop_grab_object);
380 list_add(&object->dep_link, 434 list_add(&object->dep_link,
381 &parent->dependents); 435 &parent->dependents);
382 436
@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
414{ 468{
415 struct fscache_cookie *cookie = object->cookie; 469 struct fscache_cookie *cookie = object->cookie;
416 struct fscache_object *parent; 470 struct fscache_object *parent;
471 int ret;
417 472
418 _enter(""); 473 _enter("");
419 474
@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
438 object->cache->tag->name); 493 object->cache->tag->name);
439 494
440 fscache_stat(&fscache_n_object_lookups); 495 fscache_stat(&fscache_n_object_lookups);
441 object->cache->ops->lookup_object(object); 496 fscache_stat(&fscache_n_cop_lookup_object);
497 ret = object->cache->ops->lookup_object(object);
498 fscache_stat_d(&fscache_n_cop_lookup_object);
442 499
443 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) 500 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
444 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); 501 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
445 502
503 if (ret == -ETIMEDOUT) {
504 /* probably stuck behind another object, so move this one to
505 * the back of the queue */
506 fscache_stat(&fscache_n_object_lookups_timed_out);
507 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
508 }
509
446 _leave(""); 510 _leave("");
447} 511}
448 512
@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
546 610
547 spin_lock(&object->lock); 611 spin_lock(&object->lock);
548 612
549 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) 613 if (object->cookie &&
614 test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
550 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); 615 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
551 616
552 fscache_done_parent_op(object); 617 fscache_done_parent_op(object);
@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
562 } 627 }
563 spin_unlock(&object->lock); 628 spin_unlock(&object->lock);
564 629
630 fscache_stat(&fscache_n_cop_lookup_complete);
565 object->cache->ops->lookup_complete(object); 631 object->cache->ops->lookup_complete(object);
632 fscache_stat_d(&fscache_n_cop_lookup_complete);
566 fscache_enqueue_dependents(object); 633 fscache_enqueue_dependents(object);
567 634
568 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); 635 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
581 648
582 _enter("{OBJ%x,%d}", object->debug_id, object->n_children); 649 _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
583 650
651 ASSERTCMP(object->cookie, ==, NULL);
652 ASSERT(hlist_unhashed(&object->cookie_link));
653
584 spin_lock(&cache->object_list_lock); 654 spin_lock(&cache->object_list_lock);
585 list_del_init(&object->cache_link); 655 list_del_init(&object->cache_link);
586 spin_unlock(&cache->object_list_lock); 656 spin_unlock(&cache->object_list_lock);
587 657
658 fscache_stat(&fscache_n_cop_drop_object);
588 cache->ops->drop_object(object); 659 cache->ops->drop_object(object);
660 fscache_stat_d(&fscache_n_cop_drop_object);
589 661
590 if (parent) { 662 if (parent) {
591 _debug("release parent OBJ%x {%d}", 663 _debug("release parent OBJ%x {%d}",
@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
600 } 672 }
601 673
602 /* this just shifts the object release to the slow work processor */ 674 /* this just shifts the object release to the slow work processor */
675 fscache_stat(&fscache_n_cop_put_object);
603 object->cache->ops->put_object(object); 676 object->cache->ops->put_object(object);
677 fscache_stat_d(&fscache_n_cop_put_object);
604 678
605 _leave(""); 679 _leave("");
606} 680}
@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
690{ 764{
691 struct fscache_object *object = 765 struct fscache_object *object =
692 container_of(work, struct fscache_object, work); 766 container_of(work, struct fscache_object, work);
767 int ret;
693 768
694 return object->cache->ops->grab_object(object) ? 0 : -EAGAIN; 769 fscache_stat(&fscache_n_cop_grab_object);
770 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
771 fscache_stat_d(&fscache_n_cop_grab_object);
772 return ret;
695} 773}
696 774
697/* 775/*
@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
702 struct fscache_object *object = 780 struct fscache_object *object =
703 container_of(work, struct fscache_object, work); 781 container_of(work, struct fscache_object, work);
704 782
705 return object->cache->ops->put_object(object); 783 fscache_stat(&fscache_n_cop_put_object);
784 object->cache->ops->put_object(object);
785 fscache_stat_d(&fscache_n_cop_put_object);
706} 786}
707 787
708/* 788/*
@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
739 819
740 /* sort onto appropriate lists */ 820 /* sort onto appropriate lists */
741 fscache_enqueue_object(dep); 821 fscache_enqueue_object(dep);
822 fscache_stat(&fscache_n_cop_put_object);
742 dep->cache->ops->put_object(dep); 823 dep->cache->ops->put_object(dep);
824 fscache_stat_d(&fscache_n_cop_put_object);
743 825
744 if (!list_empty(&object->dependents)) 826 if (!list_empty(&object->dependents))
745 cond_resched_lock(&object->lock); 827 cond_resched_lock(&object->lock);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e7f8d53b8b6b..313e79a14266 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -13,6 +13,7 @@
13 13
14#define FSCACHE_DEBUG_LEVEL OPERATION 14#define FSCACHE_DEBUG_LEVEL OPERATION
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/seq_file.h>
16#include "internal.h" 17#include "internal.h"
17 18
18atomic_t fscache_op_debug_id; 19atomic_t fscache_op_debug_id;
@@ -31,32 +32,33 @@ void fscache_enqueue_operation(struct fscache_operation *op)
31 _enter("{OBJ%x OP%x,%u}", 32 _enter("{OBJ%x OP%x,%u}",
32 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 33 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
33 34
35 fscache_set_op_state(op, "EnQ");
36
37 ASSERT(list_empty(&op->pend_link));
34 ASSERT(op->processor != NULL); 38 ASSERT(op->processor != NULL);
35 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 39 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36 ASSERTCMP(atomic_read(&op->usage), >, 0); 40 ASSERTCMP(atomic_read(&op->usage), >, 0);
37 41
38 if (list_empty(&op->pend_link)) { 42 fscache_stat(&fscache_n_op_enqueue);
39 switch (op->flags & FSCACHE_OP_TYPE) { 43 switch (op->flags & FSCACHE_OP_TYPE) {
40 case FSCACHE_OP_FAST: 44 case FSCACHE_OP_FAST:
41 _debug("queue fast"); 45 _debug("queue fast");
42 atomic_inc(&op->usage); 46 atomic_inc(&op->usage);
43 if (!schedule_work(&op->fast_work)) 47 if (!schedule_work(&op->fast_work))
44 fscache_put_operation(op); 48 fscache_put_operation(op);
45 break; 49 break;
46 case FSCACHE_OP_SLOW: 50 case FSCACHE_OP_SLOW:
47 _debug("queue slow"); 51 _debug("queue slow");
48 slow_work_enqueue(&op->slow_work); 52 slow_work_enqueue(&op->slow_work);
49 break; 53 break;
50 case FSCACHE_OP_MYTHREAD: 54 case FSCACHE_OP_MYTHREAD:
51 _debug("queue for caller's attention"); 55 _debug("queue for caller's attention");
52 break; 56 break;
53 default: 57 default:
54 printk(KERN_ERR "FS-Cache: Unexpected op type %lx", 58 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
55 op->flags); 59 op->flags);
56 BUG(); 60 BUG();
57 break; 61 break;
58 }
59 fscache_stat(&fscache_n_op_enqueue);
60 } 62 }
61} 63}
62EXPORT_SYMBOL(fscache_enqueue_operation); 64EXPORT_SYMBOL(fscache_enqueue_operation);
@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
67static void fscache_run_op(struct fscache_object *object, 69static void fscache_run_op(struct fscache_object *object,
68 struct fscache_operation *op) 70 struct fscache_operation *op)
69{ 71{
72 fscache_set_op_state(op, "Run");
73
70 object->n_in_progress++; 74 object->n_in_progress++;
71 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 75 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
72 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 76 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
87 91
88 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 92 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
89 93
94 fscache_set_op_state(op, "SubmitX");
95
90 spin_lock(&object->lock); 96 spin_lock(&object->lock);
91 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 97 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
92 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 98 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
99 ASSERT(list_empty(&op->pend_link));
93 100
94 ret = -ENOBUFS; 101 ret = -ENOBUFS;
95 if (fscache_object_is_active(object)) { 102 if (fscache_object_is_active(object)) {
@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
190 197
191 ASSERTCMP(atomic_read(&op->usage), >, 0); 198 ASSERTCMP(atomic_read(&op->usage), >, 0);
192 199
200 fscache_set_op_state(op, "Submit");
201
193 spin_lock(&object->lock); 202 spin_lock(&object->lock);
194 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 203 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
195 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 204 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
205 ASSERT(list_empty(&op->pend_link));
196 206
197 ostate = object->state; 207 ostate = object->state;
198 smp_rmb(); 208 smp_rmb();
@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
222 list_add_tail(&op->pend_link, &object->pending_ops); 232 list_add_tail(&op->pend_link, &object->pending_ops);
223 fscache_stat(&fscache_n_op_pend); 233 fscache_stat(&fscache_n_op_pend);
224 ret = 0; 234 ret = 0;
235 } else if (object->state == FSCACHE_OBJECT_DYING ||
236 object->state == FSCACHE_OBJECT_LC_DYING ||
237 object->state == FSCACHE_OBJECT_WITHDRAWING) {
238 fscache_stat(&fscache_n_op_rejected);
239 ret = -ENOBUFS;
225 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { 240 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
226 fscache_report_unexpected_submission(object, op, ostate); 241 fscache_report_unexpected_submission(object, op, ostate);
227 ASSERT(!fscache_object_is_active(object)); 242 ASSERT(!fscache_object_is_active(object));
@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
264 stop = true; 279 stop = true;
265 } 280 }
266 list_del_init(&op->pend_link); 281 list_del_init(&op->pend_link);
267 object->n_in_progress++; 282 fscache_run_op(object, op);
268
269 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
270 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
271 if (op->processor)
272 fscache_enqueue_operation(op);
273 283
274 /* the pending queue was holding a ref on the object */ 284 /* the pending queue was holding a ref on the object */
275 fscache_put_operation(op); 285 fscache_put_operation(op);
@@ -282,6 +292,36 @@ void fscache_start_operations(struct fscache_object *object)
282} 292}
283 293
284/* 294/*
295 * cancel an operation that's pending on an object
296 */
297int fscache_cancel_op(struct fscache_operation *op)
298{
299 struct fscache_object *object = op->object;
300 int ret;
301
302 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
303
304 spin_lock(&object->lock);
305
306 ret = -EBUSY;
307 if (!list_empty(&op->pend_link)) {
308 fscache_stat(&fscache_n_op_cancelled);
309 list_del_init(&op->pend_link);
310 object->n_ops--;
311 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
312 object->n_exclusive--;
313 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
314 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
315 fscache_put_operation(op);
316 ret = 0;
317 }
318
319 spin_unlock(&object->lock);
320 _leave(" = %d", ret);
321 return ret;
322}
323
324/*
285 * release an operation 325 * release an operation
286 * - queues pending ops if this is the last in-progress op 326 * - queues pending ops if this is the last in-progress op
287 */ 327 */
@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
298 if (!atomic_dec_and_test(&op->usage)) 338 if (!atomic_dec_and_test(&op->usage))
299 return; 339 return;
300 340
341 fscache_set_op_state(op, "Put");
342
301 _debug("PUT OP"); 343 _debug("PUT OP");
302 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 344 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
303 BUG(); 345 BUG();
@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
311 353
312 object = op->object; 354 object = op->object;
313 355
356 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
357 atomic_dec(&object->n_reads);
358
314 /* now... we may get called with the object spinlock held, so we 359 /* now... we may get called with the object spinlock held, so we
315 * complete the cleanup here only if we can immediately acquire the 360 * complete the cleanup here only if we can immediately acquire the
316 * lock, and defer it otherwise */ 361 * lock, and defer it otherwise */
@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
452 _leave(""); 497 _leave("");
453} 498}
454 499
500/*
501 * describe an operation for slow-work debugging
502 */
503#ifdef CONFIG_SLOW_WORK_PROC
504static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
505{
506 struct fscache_operation *op =
507 container_of(work, struct fscache_operation, slow_work);
508
509 seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
510 op->object->debug_id, op->debug_id,
511 op->name, op->state, op->flags);
512}
513#endif
514
455const struct slow_work_ops fscache_op_slow_work_ops = { 515const struct slow_work_ops fscache_op_slow_work_ops = {
516 .owner = THIS_MODULE,
456 .get_ref = fscache_op_get_ref, 517 .get_ref = fscache_op_get_ref,
457 .put_ref = fscache_op_put_ref, 518 .put_ref = fscache_op_put_ref,
458 .execute = fscache_op_execute, 519 .execute = fscache_op_execute,
520#ifdef CONFIG_SLOW_WORK_PROC
521 .desc = fscache_op_desc,
522#endif
459}; 523};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2568e0eb644f..c598ea4c4e7d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -43,18 +43,102 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
43EXPORT_SYMBOL(__fscache_wait_on_page_write); 43EXPORT_SYMBOL(__fscache_wait_on_page_write);
44 44
45/* 45/*
46 * note that a page has finished being written to the cache 46 * decide whether a page can be released, possibly by cancelling a store to it
47 * - we're allowed to sleep if __GFP_WAIT is flagged
47 */ 48 */
48static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) 49bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50 struct page *page,
51 gfp_t gfp)
49{ 52{
50 struct page *xpage; 53 struct page *xpage;
54 void *val;
55
56 _enter("%p,%p,%x", cookie, page, gfp);
57
58 rcu_read_lock();
59 val = radix_tree_lookup(&cookie->stores, page->index);
60 if (!val) {
61 rcu_read_unlock();
62 fscache_stat(&fscache_n_store_vmscan_not_storing);
63 __fscache_uncache_page(cookie, page);
64 return true;
65 }
66
67 /* see if the page is actually undergoing storage - if so we can't get
68 * rid of it till the cache has finished with it */
69 if (radix_tree_tag_get(&cookie->stores, page->index,
70 FSCACHE_COOKIE_STORING_TAG)) {
71 rcu_read_unlock();
72 goto page_busy;
73 }
74
75 /* the page is pending storage, so we attempt to cancel the store and
76 * discard the store request so that the page can be reclaimed */
77 spin_lock(&cookie->stores_lock);
78 rcu_read_unlock();
79
80 if (radix_tree_tag_get(&cookie->stores, page->index,
81 FSCACHE_COOKIE_STORING_TAG)) {
82 /* the page started to undergo storage whilst we were looking,
83 * so now we can only wait or return */
84 spin_unlock(&cookie->stores_lock);
85 goto page_busy;
86 }
51 87
52 spin_lock(&cookie->lock);
53 xpage = radix_tree_delete(&cookie->stores, page->index); 88 xpage = radix_tree_delete(&cookie->stores, page->index);
54 spin_unlock(&cookie->lock); 89 spin_unlock(&cookie->stores_lock);
55 ASSERT(xpage != NULL); 90
91 if (xpage) {
92 fscache_stat(&fscache_n_store_vmscan_cancelled);
93 fscache_stat(&fscache_n_store_radix_deletes);
94 ASSERTCMP(xpage, ==, page);
95 } else {
96 fscache_stat(&fscache_n_store_vmscan_gone);
97 }
56 98
57 wake_up_bit(&cookie->flags, 0); 99 wake_up_bit(&cookie->flags, 0);
100 if (xpage)
101 page_cache_release(xpage);
102 __fscache_uncache_page(cookie, page);
103 return true;
104
105page_busy:
106 /* we might want to wait here, but that could deadlock the allocator as
107 * the slow-work threads writing to the cache may all end up sleeping
108 * on memory allocation */
109 fscache_stat(&fscache_n_store_vmscan_busy);
110 return false;
111}
112EXPORT_SYMBOL(__fscache_maybe_release_page);
113
114/*
115 * note that a page has finished being written to the cache
116 */
117static void fscache_end_page_write(struct fscache_object *object,
118 struct page *page)
119{
120 struct fscache_cookie *cookie;
121 struct page *xpage = NULL;
122
123 spin_lock(&object->lock);
124 cookie = object->cookie;
125 if (cookie) {
126 /* delete the page from the tree if it is now no longer
127 * pending */
128 spin_lock(&cookie->stores_lock);
129 radix_tree_tag_clear(&cookie->stores, page->index,
130 FSCACHE_COOKIE_STORING_TAG);
131 if (!radix_tree_tag_get(&cookie->stores, page->index,
132 FSCACHE_COOKIE_PENDING_TAG)) {
133 fscache_stat(&fscache_n_store_radix_deletes);
134 xpage = radix_tree_delete(&cookie->stores, page->index);
135 }
136 spin_unlock(&cookie->stores_lock);
137 wake_up_bit(&cookie->flags, 0);
138 }
139 spin_unlock(&object->lock);
140 if (xpage)
141 page_cache_release(xpage);
58} 142}
59 143
60/* 144/*
@@ -63,14 +147,21 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p
63static void fscache_attr_changed_op(struct fscache_operation *op) 147static void fscache_attr_changed_op(struct fscache_operation *op)
64{ 148{
65 struct fscache_object *object = op->object; 149 struct fscache_object *object = op->object;
150 int ret;
66 151
67 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 152 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68 153
69 fscache_stat(&fscache_n_attr_changed_calls); 154 fscache_stat(&fscache_n_attr_changed_calls);
70 155
71 if (fscache_object_is_active(object) && 156 if (fscache_object_is_active(object)) {
72 object->cache->ops->attr_changed(object) < 0) 157 fscache_set_op_state(op, "CallFS");
73 fscache_abort_object(object); 158 fscache_stat(&fscache_n_cop_attr_changed);
159 ret = object->cache->ops->attr_changed(object);
160 fscache_stat_d(&fscache_n_cop_attr_changed);
161 fscache_set_op_state(op, "Done");
162 if (ret < 0)
163 fscache_abort_object(object);
164 }
74 165
75 _leave(""); 166 _leave("");
76} 167}
@@ -99,6 +190,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
99 fscache_operation_init(op, NULL); 190 fscache_operation_init(op, NULL);
100 fscache_operation_init_slow(op, fscache_attr_changed_op); 191 fscache_operation_init_slow(op, fscache_attr_changed_op);
101 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); 192 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
193 fscache_set_op_name(op, "Attr");
102 194
103 spin_lock(&cookie->lock); 195 spin_lock(&cookie->lock);
104 196
@@ -184,6 +276,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
184 op->start_time = jiffies; 276 op->start_time = jiffies;
185 INIT_WORK(&op->op.fast_work, fscache_retrieval_work); 277 INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
186 INIT_LIST_HEAD(&op->to_do); 278 INIT_LIST_HEAD(&op->to_do);
279 fscache_set_op_name(&op->op, "Retr");
187 return op; 280 return op;
188} 281}
189 282
@@ -221,6 +314,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
221} 314}
222 315
223/* 316/*
317 * wait for an object to become active (or dead)
318 */
319static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320 struct fscache_retrieval *op,
321 atomic_t *stat_op_waits,
322 atomic_t *stat_object_dead)
323{
324 int ret;
325
326 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
327 goto check_if_dead;
328
329 _debug(">>> WT");
330 fscache_stat(stat_op_waits);
331 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
332 fscache_wait_bit_interruptible,
333 TASK_INTERRUPTIBLE) < 0) {
334 ret = fscache_cancel_op(&op->op);
335 if (ret == 0)
336 return -ERESTARTSYS;
337
338 /* it's been removed from the pending queue by another party,
339 * so we should get to run shortly */
340 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
341 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
342 }
343 _debug("<<< GO");
344
345check_if_dead:
346 if (unlikely(fscache_object_is_dead(object))) {
347 fscache_stat(stat_object_dead);
348 return -ENOBUFS;
349 }
350 return 0;
351}
352
353/*
224 * read a page from the cache or allocate a block in which to store it 354 * read a page from the cache or allocate a block in which to store it
225 * - we return: 355 * - we return:
226 * -ENOMEM - out of memory, nothing done 356 * -ENOMEM - out of memory, nothing done
@@ -257,6 +387,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
257 _leave(" = -ENOMEM"); 387 _leave(" = -ENOMEM");
258 return -ENOMEM; 388 return -ENOMEM;
259 } 389 }
390 fscache_set_op_name(&op->op, "RetrRA1");
260 391
261 spin_lock(&cookie->lock); 392 spin_lock(&cookie->lock);
262 393
@@ -267,6 +398,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
267 398
268 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 399 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
269 400
401 atomic_inc(&object->n_reads);
402 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
403
270 if (fscache_submit_op(object, &op->op) < 0) 404 if (fscache_submit_op(object, &op->op) < 0)
271 goto nobufs_unlock; 405 goto nobufs_unlock;
272 spin_unlock(&cookie->lock); 406 spin_unlock(&cookie->lock);
@@ -279,23 +413,27 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
279 413
280 /* we wait for the operation to become active, and then process it 414 /* we wait for the operation to become active, and then process it
281 * *here*, in this thread, and not in the thread pool */ 415 * *here*, in this thread, and not in the thread pool */
282 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 416 ret = fscache_wait_for_retrieval_activation(
283 _debug(">>> WT"); 417 object, op,
284 fscache_stat(&fscache_n_retrieval_op_waits); 418 __fscache_stat(&fscache_n_retrieval_op_waits),
285 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 419 __fscache_stat(&fscache_n_retrievals_object_dead));
286 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 420 if (ret < 0)
287 _debug("<<< GO"); 421 goto error;
288 }
289 422
290 /* ask the cache to honour the operation */ 423 /* ask the cache to honour the operation */
291 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 424 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
425 fscache_stat(&fscache_n_cop_allocate_page);
292 ret = object->cache->ops->allocate_page(op, page, gfp); 426 ret = object->cache->ops->allocate_page(op, page, gfp);
427 fscache_stat_d(&fscache_n_cop_allocate_page);
293 if (ret == 0) 428 if (ret == 0)
294 ret = -ENODATA; 429 ret = -ENODATA;
295 } else { 430 } else {
431 fscache_stat(&fscache_n_cop_read_or_alloc_page);
296 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 432 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
433 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
297 } 434 }
298 435
436error:
299 if (ret == -ENOMEM) 437 if (ret == -ENOMEM)
300 fscache_stat(&fscache_n_retrievals_nomem); 438 fscache_stat(&fscache_n_retrievals_nomem);
301 else if (ret == -ERESTARTSYS) 439 else if (ret == -ERESTARTSYS)
@@ -347,7 +485,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
347 void *context, 485 void *context,
348 gfp_t gfp) 486 gfp_t gfp)
349{ 487{
350 fscache_pages_retrieval_func_t func;
351 struct fscache_retrieval *op; 488 struct fscache_retrieval *op;
352 struct fscache_object *object; 489 struct fscache_object *object;
353 int ret; 490 int ret;
@@ -369,6 +506,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
369 op = fscache_alloc_retrieval(mapping, end_io_func, context); 506 op = fscache_alloc_retrieval(mapping, end_io_func, context);
370 if (!op) 507 if (!op)
371 return -ENOMEM; 508 return -ENOMEM;
509 fscache_set_op_name(&op->op, "RetrRAN");
372 510
373 spin_lock(&cookie->lock); 511 spin_lock(&cookie->lock);
374 512
@@ -377,6 +515,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
377 object = hlist_entry(cookie->backing_objects.first, 515 object = hlist_entry(cookie->backing_objects.first,
378 struct fscache_object, cookie_link); 516 struct fscache_object, cookie_link);
379 517
518 atomic_inc(&object->n_reads);
519 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
520
380 if (fscache_submit_op(object, &op->op) < 0) 521 if (fscache_submit_op(object, &op->op) < 0)
381 goto nobufs_unlock; 522 goto nobufs_unlock;
382 spin_unlock(&cookie->lock); 523 spin_unlock(&cookie->lock);
@@ -389,21 +530,27 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
389 530
390 /* we wait for the operation to become active, and then process it 531 /* we wait for the operation to become active, and then process it
391 * *here*, in this thread, and not in the thread pool */ 532 * *here*, in this thread, and not in the thread pool */
392 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 533 ret = fscache_wait_for_retrieval_activation(
393 _debug(">>> WT"); 534 object, op,
394 fscache_stat(&fscache_n_retrieval_op_waits); 535 __fscache_stat(&fscache_n_retrieval_op_waits),
395 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 536 __fscache_stat(&fscache_n_retrievals_object_dead));
396 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 537 if (ret < 0)
397 _debug("<<< GO"); 538 goto error;
398 }
399 539
400 /* ask the cache to honour the operation */ 540 /* ask the cache to honour the operation */
401 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) 541 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
402 func = object->cache->ops->allocate_pages; 542 fscache_stat(&fscache_n_cop_allocate_pages);
403 else 543 ret = object->cache->ops->allocate_pages(
404 func = object->cache->ops->read_or_alloc_pages; 544 op, pages, nr_pages, gfp);
405 ret = func(op, pages, nr_pages, gfp); 545 fscache_stat_d(&fscache_n_cop_allocate_pages);
546 } else {
547 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
548 ret = object->cache->ops->read_or_alloc_pages(
549 op, pages, nr_pages, gfp);
550 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
551 }
406 552
553error:
407 if (ret == -ENOMEM) 554 if (ret == -ENOMEM)
408 fscache_stat(&fscache_n_retrievals_nomem); 555 fscache_stat(&fscache_n_retrievals_nomem);
409 else if (ret == -ERESTARTSYS) 556 else if (ret == -ERESTARTSYS)
@@ -461,6 +608,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
461 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 608 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
462 if (!op) 609 if (!op)
463 return -ENOMEM; 610 return -ENOMEM;
611 fscache_set_op_name(&op->op, "RetrAL1");
464 612
465 spin_lock(&cookie->lock); 613 spin_lock(&cookie->lock);
466 614
@@ -475,18 +623,22 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
475 623
476 fscache_stat(&fscache_n_alloc_ops); 624 fscache_stat(&fscache_n_alloc_ops);
477 625
478 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 626 ret = fscache_wait_for_retrieval_activation(
479 _debug(">>> WT"); 627 object, op,
480 fscache_stat(&fscache_n_alloc_op_waits); 628 __fscache_stat(&fscache_n_alloc_op_waits),
481 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 629 __fscache_stat(&fscache_n_allocs_object_dead));
482 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 630 if (ret < 0)
483 _debug("<<< GO"); 631 goto error;
484 }
485 632
486 /* ask the cache to honour the operation */ 633 /* ask the cache to honour the operation */
634 fscache_stat(&fscache_n_cop_allocate_page);
487 ret = object->cache->ops->allocate_page(op, page, gfp); 635 ret = object->cache->ops->allocate_page(op, page, gfp);
636 fscache_stat_d(&fscache_n_cop_allocate_page);
488 637
489 if (ret < 0) 638error:
639 if (ret == -ERESTARTSYS)
640 fscache_stat(&fscache_n_allocs_intr);
641 else if (ret < 0)
490 fscache_stat(&fscache_n_allocs_nobufs); 642 fscache_stat(&fscache_n_allocs_nobufs);
491 else 643 else
492 fscache_stat(&fscache_n_allocs_ok); 644 fscache_stat(&fscache_n_allocs_ok);
@@ -521,7 +673,7 @@ static void fscache_write_op(struct fscache_operation *_op)
521 struct fscache_storage *op = 673 struct fscache_storage *op =
522 container_of(_op, struct fscache_storage, op); 674 container_of(_op, struct fscache_storage, op);
523 struct fscache_object *object = op->op.object; 675 struct fscache_object *object = op->op.object;
524 struct fscache_cookie *cookie = object->cookie; 676 struct fscache_cookie *cookie;
525 struct page *page; 677 struct page *page;
526 unsigned n; 678 unsigned n;
527 void *results[1]; 679 void *results[1];
@@ -529,16 +681,19 @@ static void fscache_write_op(struct fscache_operation *_op)
529 681
530 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 682 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
531 683
532 spin_lock(&cookie->lock); 684 fscache_set_op_state(&op->op, "GetPage");
685
533 spin_lock(&object->lock); 686 spin_lock(&object->lock);
687 cookie = object->cookie;
534 688
535 if (!fscache_object_is_active(object)) { 689 if (!fscache_object_is_active(object) || !cookie) {
536 spin_unlock(&object->lock); 690 spin_unlock(&object->lock);
537 spin_unlock(&cookie->lock);
538 _leave(""); 691 _leave("");
539 return; 692 return;
540 } 693 }
541 694
695 spin_lock(&cookie->stores_lock);
696
542 fscache_stat(&fscache_n_store_calls); 697 fscache_stat(&fscache_n_store_calls);
543 698
544 /* find a page to store */ 699 /* find a page to store */
@@ -549,23 +704,35 @@ static void fscache_write_op(struct fscache_operation *_op)
549 goto superseded; 704 goto superseded;
550 page = results[0]; 705 page = results[0];
551 _debug("gang %d [%lx]", n, page->index); 706 _debug("gang %d [%lx]", n, page->index);
552 if (page->index > op->store_limit) 707 if (page->index > op->store_limit) {
708 fscache_stat(&fscache_n_store_pages_over_limit);
553 goto superseded; 709 goto superseded;
710 }
554 711
555 radix_tree_tag_clear(&cookie->stores, page->index, 712 if (page) {
556 FSCACHE_COOKIE_PENDING_TAG); 713 radix_tree_tag_set(&cookie->stores, page->index,
714 FSCACHE_COOKIE_STORING_TAG);
715 radix_tree_tag_clear(&cookie->stores, page->index,
716 FSCACHE_COOKIE_PENDING_TAG);
717 }
557 718
719 spin_unlock(&cookie->stores_lock);
558 spin_unlock(&object->lock); 720 spin_unlock(&object->lock);
559 spin_unlock(&cookie->lock);
560 721
561 if (page) { 722 if (page) {
723 fscache_set_op_state(&op->op, "Store");
724 fscache_stat(&fscache_n_store_pages);
725 fscache_stat(&fscache_n_cop_write_page);
562 ret = object->cache->ops->write_page(op, page); 726 ret = object->cache->ops->write_page(op, page);
563 fscache_end_page_write(cookie, page); 727 fscache_stat_d(&fscache_n_cop_write_page);
564 page_cache_release(page); 728 fscache_set_op_state(&op->op, "EndWrite");
565 if (ret < 0) 729 fscache_end_page_write(object, page);
730 if (ret < 0) {
731 fscache_set_op_state(&op->op, "Abort");
566 fscache_abort_object(object); 732 fscache_abort_object(object);
567 else 733 } else {
568 fscache_enqueue_operation(&op->op); 734 fscache_enqueue_operation(&op->op);
735 }
569 } 736 }
570 737
571 _leave(""); 738 _leave("");
@@ -575,9 +742,9 @@ superseded:
575 /* this writer is going away and there aren't any more things to 742 /* this writer is going away and there aren't any more things to
576 * write */ 743 * write */
577 _debug("cease"); 744 _debug("cease");
745 spin_unlock(&cookie->stores_lock);
578 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 746 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
579 spin_unlock(&object->lock); 747 spin_unlock(&object->lock);
580 spin_unlock(&cookie->lock);
581 _leave(""); 748 _leave("");
582} 749}
583 750
@@ -634,6 +801,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
634 fscache_operation_init(&op->op, fscache_release_write_op); 801 fscache_operation_init(&op->op, fscache_release_write_op);
635 fscache_operation_init_slow(&op->op, fscache_write_op); 802 fscache_operation_init_slow(&op->op, fscache_write_op);
636 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); 803 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
804 fscache_set_op_name(&op->op, "Write1");
637 805
638 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 806 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
639 if (ret < 0) 807 if (ret < 0)
@@ -652,6 +820,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
652 /* add the page to the pending-storage radix tree on the backing 820 /* add the page to the pending-storage radix tree on the backing
653 * object */ 821 * object */
654 spin_lock(&object->lock); 822 spin_lock(&object->lock);
823 spin_lock(&cookie->stores_lock);
655 824
656 _debug("store limit %llx", (unsigned long long) object->store_limit); 825 _debug("store limit %llx", (unsigned long long) object->store_limit);
657 826
@@ -672,6 +841,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
672 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 841 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
673 goto already_pending; 842 goto already_pending;
674 843
844 spin_unlock(&cookie->stores_lock);
675 spin_unlock(&object->lock); 845 spin_unlock(&object->lock);
676 846
677 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 847 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -693,6 +863,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
693already_queued: 863already_queued:
694 fscache_stat(&fscache_n_stores_again); 864 fscache_stat(&fscache_n_stores_again);
695already_pending: 865already_pending:
866 spin_unlock(&cookie->stores_lock);
696 spin_unlock(&object->lock); 867 spin_unlock(&object->lock);
697 spin_unlock(&cookie->lock); 868 spin_unlock(&cookie->lock);
698 radix_tree_preload_end(); 869 radix_tree_preload_end();
@@ -702,7 +873,9 @@ already_pending:
702 return 0; 873 return 0;
703 874
704submit_failed: 875submit_failed:
876 spin_lock(&cookie->stores_lock);
705 radix_tree_delete(&cookie->stores, page->index); 877 radix_tree_delete(&cookie->stores, page->index);
878 spin_unlock(&cookie->stores_lock);
706 page_cache_release(page); 879 page_cache_release(page);
707 ret = -ENOBUFS; 880 ret = -ENOBUFS;
708 goto nobufs; 881 goto nobufs;
@@ -763,7 +936,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
763 if (TestClearPageFsCache(page) && 936 if (TestClearPageFsCache(page) &&
764 object->cache->ops->uncache_page) { 937 object->cache->ops->uncache_page) {
765 /* the cache backend releases the cookie lock */ 938 /* the cache backend releases the cookie lock */
939 fscache_stat(&fscache_n_cop_uncache_page);
766 object->cache->ops->uncache_page(object, page); 940 object->cache->ops->uncache_page(object, page);
941 fscache_stat_d(&fscache_n_cop_uncache_page);
767 goto done; 942 goto done;
768 } 943 }
769 944
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index beeab44bc31a..1d9e4951a597 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
37 goto error_histogram; 37 goto error_histogram;
38#endif 38#endif
39 39
40#ifdef CONFIG_FSCACHE_OBJECT_LIST
41 if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
42 &fscache_objlist_fops))
43 goto error_objects;
44#endif
45
40 _leave(" = 0"); 46 _leave(" = 0");
41 return 0; 47 return 0;
42 48
49#ifdef CONFIG_FSCACHE_OBJECT_LIST
50error_objects:
51#endif
43#ifdef CONFIG_FSCACHE_HISTOGRAM 52#ifdef CONFIG_FSCACHE_HISTOGRAM
53 remove_proc_entry("fs/fscache/histogram", NULL);
44error_histogram: 54error_histogram:
45#endif 55#endif
46#ifdef CONFIG_FSCACHE_STATS 56#ifdef CONFIG_FSCACHE_STATS
@@ -58,6 +68,9 @@ error_dir:
58 */ 68 */
59void fscache_proc_cleanup(void) 69void fscache_proc_cleanup(void)
60{ 70{
71#ifdef CONFIG_FSCACHE_OBJECT_LIST
72 remove_proc_entry("fs/fscache/objects", NULL);
73#endif
61#ifdef CONFIG_FSCACHE_HISTOGRAM 74#ifdef CONFIG_FSCACHE_HISTOGRAM
62 remove_proc_entry("fs/fscache/histogram", NULL); 75 remove_proc_entry("fs/fscache/histogram", NULL);
63#endif 76#endif
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 65deb99e756b..46435f3aae68 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
25atomic_t fscache_n_op_deferred_release; 25atomic_t fscache_n_op_deferred_release;
26atomic_t fscache_n_op_release; 26atomic_t fscache_n_op_release;
27atomic_t fscache_n_op_gc; 27atomic_t fscache_n_op_gc;
28atomic_t fscache_n_op_cancelled;
29atomic_t fscache_n_op_rejected;
28 30
29atomic_t fscache_n_attr_changed; 31atomic_t fscache_n_attr_changed;
30atomic_t fscache_n_attr_changed_ok; 32atomic_t fscache_n_attr_changed_ok;
@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
36atomic_t fscache_n_allocs_ok; 38atomic_t fscache_n_allocs_ok;
37atomic_t fscache_n_allocs_wait; 39atomic_t fscache_n_allocs_wait;
38atomic_t fscache_n_allocs_nobufs; 40atomic_t fscache_n_allocs_nobufs;
41atomic_t fscache_n_allocs_intr;
42atomic_t fscache_n_allocs_object_dead;
39atomic_t fscache_n_alloc_ops; 43atomic_t fscache_n_alloc_ops;
40atomic_t fscache_n_alloc_op_waits; 44atomic_t fscache_n_alloc_op_waits;
41 45
@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
46atomic_t fscache_n_retrievals_nobufs; 50atomic_t fscache_n_retrievals_nobufs;
47atomic_t fscache_n_retrievals_intr; 51atomic_t fscache_n_retrievals_intr;
48atomic_t fscache_n_retrievals_nomem; 52atomic_t fscache_n_retrievals_nomem;
53atomic_t fscache_n_retrievals_object_dead;
49atomic_t fscache_n_retrieval_ops; 54atomic_t fscache_n_retrieval_ops;
50atomic_t fscache_n_retrieval_op_waits; 55atomic_t fscache_n_retrieval_op_waits;
51 56
@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
56atomic_t fscache_n_stores_oom; 61atomic_t fscache_n_stores_oom;
57atomic_t fscache_n_store_ops; 62atomic_t fscache_n_store_ops;
58atomic_t fscache_n_store_calls; 63atomic_t fscache_n_store_calls;
64atomic_t fscache_n_store_pages;
65atomic_t fscache_n_store_radix_deletes;
66atomic_t fscache_n_store_pages_over_limit;
67
68atomic_t fscache_n_store_vmscan_not_storing;
69atomic_t fscache_n_store_vmscan_gone;
70atomic_t fscache_n_store_vmscan_busy;
71atomic_t fscache_n_store_vmscan_cancelled;
59 72
60atomic_t fscache_n_marks; 73atomic_t fscache_n_marks;
61atomic_t fscache_n_uncaches; 74atomic_t fscache_n_uncaches;
@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
74atomic_t fscache_n_relinquishes; 87atomic_t fscache_n_relinquishes;
75atomic_t fscache_n_relinquishes_null; 88atomic_t fscache_n_relinquishes_null;
76atomic_t fscache_n_relinquishes_waitcrt; 89atomic_t fscache_n_relinquishes_waitcrt;
90atomic_t fscache_n_relinquishes_retire;
77 91
78atomic_t fscache_n_cookie_index; 92atomic_t fscache_n_cookie_index;
79atomic_t fscache_n_cookie_data; 93atomic_t fscache_n_cookie_data;
@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
84atomic_t fscache_n_object_lookups; 98atomic_t fscache_n_object_lookups;
85atomic_t fscache_n_object_lookups_negative; 99atomic_t fscache_n_object_lookups_negative;
86atomic_t fscache_n_object_lookups_positive; 100atomic_t fscache_n_object_lookups_positive;
101atomic_t fscache_n_object_lookups_timed_out;
87atomic_t fscache_n_object_created; 102atomic_t fscache_n_object_created;
88atomic_t fscache_n_object_avail; 103atomic_t fscache_n_object_avail;
89atomic_t fscache_n_object_dead; 104atomic_t fscache_n_object_dead;
@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
93atomic_t fscache_n_checkaux_update; 108atomic_t fscache_n_checkaux_update;
94atomic_t fscache_n_checkaux_obsolete; 109atomic_t fscache_n_checkaux_obsolete;
95 110
111atomic_t fscache_n_cop_alloc_object;
112atomic_t fscache_n_cop_lookup_object;
113atomic_t fscache_n_cop_lookup_complete;
114atomic_t fscache_n_cop_grab_object;
115atomic_t fscache_n_cop_update_object;
116atomic_t fscache_n_cop_drop_object;
117atomic_t fscache_n_cop_put_object;
118atomic_t fscache_n_cop_sync_cache;
119atomic_t fscache_n_cop_attr_changed;
120atomic_t fscache_n_cop_read_or_alloc_page;
121atomic_t fscache_n_cop_read_or_alloc_pages;
122atomic_t fscache_n_cop_allocate_page;
123atomic_t fscache_n_cop_allocate_pages;
124atomic_t fscache_n_cop_write_page;
125atomic_t fscache_n_cop_uncache_page;
126atomic_t fscache_n_cop_dissociate_pages;
127
96/* 128/*
97 * display the general statistics 129 * display the general statistics
98 */ 130 */
@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
129 atomic_read(&fscache_n_acquires_nobufs), 161 atomic_read(&fscache_n_acquires_nobufs),
130 atomic_read(&fscache_n_acquires_oom)); 162 atomic_read(&fscache_n_acquires_oom));
131 163
132 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", 164 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
133 atomic_read(&fscache_n_object_lookups), 165 atomic_read(&fscache_n_object_lookups),
134 atomic_read(&fscache_n_object_lookups_negative), 166 atomic_read(&fscache_n_object_lookups_negative),
135 atomic_read(&fscache_n_object_lookups_positive), 167 atomic_read(&fscache_n_object_lookups_positive),
168 atomic_read(&fscache_n_object_lookups_timed_out),
136 atomic_read(&fscache_n_object_created)); 169 atomic_read(&fscache_n_object_created));
137 170
138 seq_printf(m, "Updates: n=%u nul=%u run=%u\n", 171 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
140 atomic_read(&fscache_n_updates_null), 173 atomic_read(&fscache_n_updates_null),
141 atomic_read(&fscache_n_updates_run)); 174 atomic_read(&fscache_n_updates_run));
142 175
143 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n", 176 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
144 atomic_read(&fscache_n_relinquishes), 177 atomic_read(&fscache_n_relinquishes),
145 atomic_read(&fscache_n_relinquishes_null), 178 atomic_read(&fscache_n_relinquishes_null),
146 atomic_read(&fscache_n_relinquishes_waitcrt)); 179 atomic_read(&fscache_n_relinquishes_waitcrt),
180 atomic_read(&fscache_n_relinquishes_retire));
147 181
148 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", 182 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
149 atomic_read(&fscache_n_attr_changed), 183 atomic_read(&fscache_n_attr_changed),
@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
152 atomic_read(&fscache_n_attr_changed_nomem), 186 atomic_read(&fscache_n_attr_changed_nomem),
153 atomic_read(&fscache_n_attr_changed_calls)); 187 atomic_read(&fscache_n_attr_changed_calls));
154 188
155 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n", 189 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
156 atomic_read(&fscache_n_allocs), 190 atomic_read(&fscache_n_allocs),
157 atomic_read(&fscache_n_allocs_ok), 191 atomic_read(&fscache_n_allocs_ok),
158 atomic_read(&fscache_n_allocs_wait), 192 atomic_read(&fscache_n_allocs_wait),
159 atomic_read(&fscache_n_allocs_nobufs)); 193 atomic_read(&fscache_n_allocs_nobufs),
160 seq_printf(m, "Allocs : ops=%u owt=%u\n", 194 atomic_read(&fscache_n_allocs_intr));
195 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
161 atomic_read(&fscache_n_alloc_ops), 196 atomic_read(&fscache_n_alloc_ops),
162 atomic_read(&fscache_n_alloc_op_waits)); 197 atomic_read(&fscache_n_alloc_op_waits),
198 atomic_read(&fscache_n_allocs_object_dead));
163 199
164 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" 200 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
165 " int=%u oom=%u\n", 201 " int=%u oom=%u\n",
@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
170 atomic_read(&fscache_n_retrievals_nobufs), 206 atomic_read(&fscache_n_retrievals_nobufs),
171 atomic_read(&fscache_n_retrievals_intr), 207 atomic_read(&fscache_n_retrievals_intr),
172 atomic_read(&fscache_n_retrievals_nomem)); 208 atomic_read(&fscache_n_retrievals_nomem));
173 seq_printf(m, "Retrvls: ops=%u owt=%u\n", 209 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
174 atomic_read(&fscache_n_retrieval_ops), 210 atomic_read(&fscache_n_retrieval_ops),
175 atomic_read(&fscache_n_retrieval_op_waits)); 211 atomic_read(&fscache_n_retrieval_op_waits),
212 atomic_read(&fscache_n_retrievals_object_dead));
176 213
177 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", 214 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
178 atomic_read(&fscache_n_stores), 215 atomic_read(&fscache_n_stores),
@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
180 atomic_read(&fscache_n_stores_again), 217 atomic_read(&fscache_n_stores_again),
181 atomic_read(&fscache_n_stores_nobufs), 218 atomic_read(&fscache_n_stores_nobufs),
182 atomic_read(&fscache_n_stores_oom)); 219 atomic_read(&fscache_n_stores_oom));
183 seq_printf(m, "Stores : ops=%u run=%u\n", 220 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
184 atomic_read(&fscache_n_store_ops), 221 atomic_read(&fscache_n_store_ops),
185 atomic_read(&fscache_n_store_calls)); 222 atomic_read(&fscache_n_store_calls),
223 atomic_read(&fscache_n_store_pages),
224 atomic_read(&fscache_n_store_radix_deletes),
225 atomic_read(&fscache_n_store_pages_over_limit));
186 226
187 seq_printf(m, "Ops : pend=%u run=%u enq=%u\n", 227 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
228 atomic_read(&fscache_n_store_vmscan_not_storing),
229 atomic_read(&fscache_n_store_vmscan_gone),
230 atomic_read(&fscache_n_store_vmscan_busy),
231 atomic_read(&fscache_n_store_vmscan_cancelled));
232
233 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
188 atomic_read(&fscache_n_op_pend), 234 atomic_read(&fscache_n_op_pend),
189 atomic_read(&fscache_n_op_run), 235 atomic_read(&fscache_n_op_run),
190 atomic_read(&fscache_n_op_enqueue)); 236 atomic_read(&fscache_n_op_enqueue),
237 atomic_read(&fscache_n_op_cancelled),
238 atomic_read(&fscache_n_op_rejected));
191 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", 239 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
192 atomic_read(&fscache_n_op_deferred_release), 240 atomic_read(&fscache_n_op_deferred_release),
193 atomic_read(&fscache_n_op_release), 241 atomic_read(&fscache_n_op_release),
194 atomic_read(&fscache_n_op_gc)); 242 atomic_read(&fscache_n_op_gc));
243
244 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
245 atomic_read(&fscache_n_cop_alloc_object),
246 atomic_read(&fscache_n_cop_lookup_object),
247 atomic_read(&fscache_n_cop_lookup_complete),
248 atomic_read(&fscache_n_cop_grab_object));
249 seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
250 atomic_read(&fscache_n_cop_update_object),
251 atomic_read(&fscache_n_cop_drop_object),
252 atomic_read(&fscache_n_cop_put_object),
253 atomic_read(&fscache_n_cop_attr_changed),
254 atomic_read(&fscache_n_cop_sync_cache));
255 seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
256 atomic_read(&fscache_n_cop_read_or_alloc_page),
257 atomic_read(&fscache_n_cop_read_or_alloc_pages),
258 atomic_read(&fscache_n_cop_allocate_page),
259 atomic_read(&fscache_n_cop_allocate_pages),
260 atomic_read(&fscache_n_cop_write_page),
261 atomic_read(&fscache_n_cop_uncache_page),
262 atomic_read(&fscache_n_cop_dissociate_pages));
195 return 0; 263 return 0;
196} 264}
197 265
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8ada78aade58..4787ae6c5c1c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -385,6 +385,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
385 if (fc->no_create) 385 if (fc->no_create)
386 return -ENOSYS; 386 return -ENOSYS;
387 387
388 if (flags & O_DIRECT)
389 return -EINVAL;
390
388 forget_req = fuse_get_req(fc); 391 forget_req = fuse_get_req(fc);
389 if (IS_ERR(forget_req)) 392 if (IS_ERR(forget_req))
390 return PTR_ERR(forget_req); 393 return PTR_ERR(forget_req);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index eacd78a5d082..5b31f7741a8f 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
114 if (error) 114 if (error)
115 goto fail_unregister; 115 goto fail_unregister;
116 116
117 error = slow_work_register_user(); 117 error = slow_work_register_user(THIS_MODULE);
118 if (error) 118 if (error)
119 goto fail_slow; 119 goto fail_slow;
120 120
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
163 gfs2_unregister_debugfs(); 163 gfs2_unregister_debugfs();
164 unregister_filesystem(&gfs2_fs_type); 164 unregister_filesystem(&gfs2_fs_type);
165 unregister_filesystem(&gfs2meta_fs_type); 165 unregister_filesystem(&gfs2meta_fs_type);
166 slow_work_unregister_user(); 166 slow_work_unregister_user(THIS_MODULE);
167 167
168 kmem_cache_destroy(gfs2_quotad_cachep); 168 kmem_cache_destroy(gfs2_quotad_cachep);
169 kmem_cache_destroy(gfs2_rgrpd_cachep); 169 kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 59d2695509d3..09fa31965576 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -7,6 +7,7 @@
7 * of the GNU General Public License version 2. 7 * of the GNU General Public License version 2.
8 */ 8 */
9 9
10#include <linux/module.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/completion.h> 13#include <linux/completion.h>
@@ -593,6 +594,7 @@ fail:
593} 594}
594 595
595struct slow_work_ops gfs2_recover_ops = { 596struct slow_work_ops gfs2_recover_ops = {
597 .owner = THIS_MODULE,
596 .get_ref = gfs2_recover_get_ref, 598 .get_ref = gfs2_recover_get_ref,
597 .put_ref = gfs2_recover_put_ref, 599 .put_ref = gfs2_recover_put_ref,
598 .execute = gfs2_recover_work, 600 .execute = gfs2_recover_work,
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index cfe05c1966a5..3f39be1b0455 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -164,12 +164,15 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
164 164
165 /* XXX FIXME: Where a single physical node actually shows up in two 165 /* XXX FIXME: Where a single physical node actually shows up in two
166 frags, we read it twice. Don't do that. */ 166 frags, we read it twice. Don't do that. */
167 /* Now we're pointing at the first frag which overlaps our page */ 167 /* Now we're pointing at the first frag which overlaps our page
168 * (or perhaps is before it, if we've been asked to read off the
169 * end of the file). */
168 while(offset < end) { 170 while(offset < end) {
169 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); 171 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
170 if (unlikely(!frag || frag->ofs > offset)) { 172 if (unlikely(!frag || frag->ofs > offset ||
173 frag->ofs + frag->size <= offset)) {
171 uint32_t holesize = end - offset; 174 uint32_t holesize = end - offset;
172 if (frag) { 175 if (frag && frag->ofs > offset) {
173 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); 176 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
174 holesize = min(holesize, frag->ofs - offset); 177 holesize = min(holesize, frag->ofs - offset);
175 } 178 }
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 70fad69eb959..fa588006588d 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
359 359
360 BUG_ON(!cookie); 360 BUG_ON(!cookie);
361 361
362 if (fscache_check_page_write(cookie, page)) {
363 if (!(gfp & __GFP_WAIT))
364 return 0;
365 fscache_wait_on_page_write(cookie, page);
366 }
367
368 if (PageFsCache(page)) { 362 if (PageFsCache(page)) {
369 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 363 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
370 cookie, page, nfsi); 364 cookie, page, nfsi);
371 365
372 fscache_uncache_page(cookie, page); 366 if (!fscache_maybe_release_page(cookie, page, gfp))
367 return 0;
368
373 nfs_add_fscache_stats(page->mapping->host, 369 nfs_add_fscache_stats(page->mapping->host,
374 NFSIOS_FSCACHE_PAGES_UNCACHED, 1); 370 NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
375 } 371 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ff37454fa783..741a562177fc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2767,7 +2767,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2767 .pages = &page, 2767 .pages = &page,
2768 .pgbase = 0, 2768 .pgbase = 0,
2769 .count = count, 2769 .count = count,
2770 .bitmask = NFS_SERVER(dentry->d_inode)->cache_consistency_bitmask, 2770 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
2771 }; 2771 };
2772 struct nfs4_readdir_res res; 2772 struct nfs4_readdir_res res;
2773 struct rpc_message msg = { 2773 struct rpc_message msg = {
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 89fc8ee1f5a5..de059f490586 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1712,7 +1712,8 @@ int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1712 struct super_block *sb = inode->i_sb; 1712 struct super_block *sb = inode->i_sb;
1713 1713
1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || 1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) 1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
1716 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1716 return 0; 1717 return 0;
1717 1718
1718 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; 1719 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index eae404602424..d963d8638709 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -35,12 +35,7 @@
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/lockdep.h> 37#include <linux/lockdep.h>
38#ifndef CONFIG_OCFS2_COMPAT_JBD 38#include <linux/jbd2.h>
39# include <linux/jbd2.h>
40#else
41# include <linux/jbd.h>
42# include "ocfs2_jbd_compat.h"
43#endif
44 39
45/* For union ocfs2_dlm_lksb */ 40/* For union ocfs2_dlm_lksb */
46#include "stackglue.h" 41#include "stackglue.h"
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 60287fc56bcb..3a0df7a1b810 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3743,6 +3743,9 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3743 goto out; 3743 goto out;
3744 } 3744 }
3745 3745
3746 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3747 goto attach_xattr;
3748
3746 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3749 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3747 3750
3748 size = i_size_read(inode); 3751 size = i_size_read(inode);
@@ -3769,6 +3772,7 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3769 cpos += num_clusters; 3772 cpos += num_clusters;
3770 } 3773 }
3771 3774
3775attach_xattr:
3772 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3776 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3773 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3777 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3774 &ref_tree->rf_ci, 3778 &ref_tree->rf_ci,
@@ -3858,6 +3862,49 @@ out:
3858 return ret; 3862 return ret;
3859} 3863}
3860 3864
3865static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3866 struct buffer_head *s_bh,
3867 struct inode *t_inode,
3868 struct buffer_head *t_bh)
3869{
3870 int ret;
3871 handle_t *handle;
3872 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3873 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3874 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3875
3876 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3877
3878 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3879 if (IS_ERR(handle)) {
3880 ret = PTR_ERR(handle);
3881 mlog_errno(ret);
3882 goto out;
3883 }
3884
3885 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3886 OCFS2_JOURNAL_ACCESS_WRITE);
3887 if (ret) {
3888 mlog_errno(ret);
3889 goto out_commit;
3890 }
3891
3892 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3893 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3894 le16_to_cpu(s_di->id2.i_data.id_count));
3895 spin_lock(&OCFS2_I(t_inode)->ip_lock);
3896 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3897 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3898 spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3899
3900 ocfs2_journal_dirty(handle, t_bh);
3901
3902out_commit:
3903 ocfs2_commit_trans(osb, handle);
3904out:
3905 return ret;
3906}
3907
3861static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3908static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3862 struct inode *t_inode, 3909 struct inode *t_inode,
3863 struct buffer_head *t_bh, 3910 struct buffer_head *t_bh,
@@ -3997,6 +4044,14 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
3997 goto out; 4044 goto out;
3998 } 4045 }
3999 4046
4047 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4048 ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4049 t_inode, t_bh);
4050 if (ret)
4051 mlog_errno(ret);
4052 goto out;
4053 }
4054
4000 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4055 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4001 1, &ref_tree, &ref_root_bh); 4056 1, &ref_tree, &ref_root_bh);
4002 if (ret) { 4057 if (ret) {
@@ -4013,10 +4068,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
4013 goto out_unlock_refcount; 4068 goto out_unlock_refcount;
4014 } 4069 }
4015 4070
4016 ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
4017 if (ret)
4018 mlog_errno(ret);
4019
4020out_unlock_refcount: 4071out_unlock_refcount:
4021 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4072 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4022 brelse(ref_root_bh); 4073 brelse(ref_root_bh);
@@ -4068,9 +4119,17 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4068 ret = ocfs2_reflink_xattrs(inode, old_bh, 4119 ret = ocfs2_reflink_xattrs(inode, old_bh,
4069 new_inode, new_bh, 4120 new_inode, new_bh,
4070 preserve); 4121 preserve);
4071 if (ret) 4122 if (ret) {
4072 mlog_errno(ret); 4123 mlog_errno(ret);
4124 goto inode_unlock;
4125 }
4073 } 4126 }
4127
4128 ret = ocfs2_complete_reflink(inode, old_bh,
4129 new_inode, new_bh, preserve);
4130 if (ret)
4131 mlog_errno(ret);
4132
4074inode_unlock: 4133inode_unlock:
4075 ocfs2_inode_unlock(new_inode, 1); 4134 ocfs2_inode_unlock(new_inode, 1);
4076 brelse(new_bh); 4135 brelse(new_bh);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c0e48aeebb1c..14f47d2bfe02 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -773,18 +773,20 @@ static int ocfs2_sb_probe(struct super_block *sb,
773 if (tmpstat < 0) { 773 if (tmpstat < 0) {
774 status = tmpstat; 774 status = tmpstat;
775 mlog_errno(status); 775 mlog_errno(status);
776 goto bail; 776 break;
777 } 777 }
778 di = (struct ocfs2_dinode *) (*bh)->b_data; 778 di = (struct ocfs2_dinode *) (*bh)->b_data;
779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); 779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
780 spin_lock_init(&stats->b_lock); 780 spin_lock_init(&stats->b_lock);
781 status = ocfs2_verify_volume(di, *bh, blksize, stats); 781 tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats);
782 if (status >= 0) 782 if (tmpstat < 0) {
783 goto bail; 783 brelse(*bh);
784 brelse(*bh); 784 *bh = NULL;
785 *bh = NULL; 785 }
786 if (status != -EAGAIN) 786 if (tmpstat != -EAGAIN) {
787 status = tmpstat;
787 break; 788 break;
789 }
788 } 790 }
789 791
790bail: 792bail:
@@ -1645,6 +1647,10 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1645 buf->f_bavail = buf->f_bfree; 1647 buf->f_bavail = buf->f_bfree;
1646 buf->f_files = numbits; 1648 buf->f_files = numbits;
1647 buf->f_ffree = freebits; 1649 buf->f_ffree = freebits;
1650 buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN)
1651 & 0xFFFFFFFFUL;
1652 buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN,
1653 OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL;
1648 1654
1649 brelse(bh); 1655 brelse(bh);
1650 1656
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index b6284f235d2f..c61369342a27 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -53,11 +53,6 @@
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/rbtree.h> 55#include <linux/rbtree.h>
56#ifndef CONFIG_OCFS2_COMPAT_JBD
57# include <linux/jbd2.h>
58#else
59# include <linux/jbd.h>
60#endif
61 56
62#define MLOG_MASK_PREFIX ML_UPTODATE 57#define MLOG_MASK_PREFIX ML_UPTODATE
63 58
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 84d3532dd3ea..7be0c6fbe880 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -91,6 +91,8 @@ struct fscache_operation {
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ 91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ 92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */ 93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
95#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
94 96
95 atomic_t usage; 97 atomic_t usage;
96 unsigned debug_id; /* debugging ID */ 98 unsigned debug_id; /* debugging ID */
@@ -102,6 +104,16 @@ struct fscache_operation {
102 104
103 /* operation releaser */ 105 /* operation releaser */
104 fscache_operation_release_t release; 106 fscache_operation_release_t release;
107
108#ifdef CONFIG_SLOW_WORK_PROC
109 const char *name; /* operation name */
110 const char *state; /* operation state */
111#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
112#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
113#else
114#define fscache_set_op_name(OP, N) do { } while(0)
115#define fscache_set_op_state(OP, S) do { } while(0)
116#endif
105}; 117};
106 118
107extern atomic_t fscache_op_debug_id; 119extern atomic_t fscache_op_debug_id;
@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id); 137 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release; 138 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link); 139 INIT_LIST_HEAD(&op->pend_link);
140 fscache_set_op_state(op, "Init");
128} 141}
129 142
130/** 143/**
@@ -221,8 +234,10 @@ struct fscache_cache_ops {
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache, 234 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie); 235 struct fscache_cookie *cookie);
223 236
224 /* look up the object for a cookie */ 237 /* look up the object for a cookie
225 void (*lookup_object)(struct fscache_object *object); 238 * - return -ETIMEDOUT to be requeued
239 */
240 int (*lookup_object)(struct fscache_object *object);
226 241
227 /* finished looking up */ 242 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object); 243 void (*lookup_complete)(struct fscache_object *object);
@@ -297,12 +312,14 @@ struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */ 312 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */ 313 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock; 314 spinlock_t lock;
315 spinlock_t stores_lock; /* lock on page store tree */
300 struct hlist_head backing_objects; /* object(s) backing this file/index */ 316 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */ 317 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */ 318 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */ 319 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */ 320 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ 321#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
322#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
306 323
307 unsigned long flags; 324 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ 325#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
@@ -337,6 +354,7 @@ struct fscache_object {
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */ 354 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ 355 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */ 356 FSCACHE_OBJECT_DEAD, /* object is now dead */
357 FSCACHE_OBJECT__NSTATES
340 } state; 358 } state;
341 359
342 int debug_id; /* debugging ID */ 360 int debug_id; /* debugging ID */
@@ -345,6 +363,7 @@ struct fscache_object {
345 int n_obj_ops; /* number of object ops outstanding on object */ 363 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */ 364 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */ 365 int n_exclusive; /* number of exclusive ops queued */
366 atomic_t n_reads; /* number of read ops in progress */
348 spinlock_t lock; /* state and operations lock */ 367 spinlock_t lock; /* state and operations lock */
349 368
350 unsigned long lookup_jif; /* time at which lookup started */ 369 unsigned long lookup_jif; /* time at which lookup started */
@@ -358,6 +377,7 @@ struct fscache_object {
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ 377#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ 378#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ 379#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
380#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
361 381
362 unsigned long flags; 382 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ 383#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
@@ -373,7 +393,11 @@ struct fscache_object {
373 struct list_head dependents; /* FIFO of dependent objects */ 393 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */ 394 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */ 395 struct list_head pending_ops; /* unstarted operations on this object */
396#ifdef CONFIG_FSCACHE_OBJECT_LIST
397 struct rb_node objlist_link; /* link in global object list */
398#endif
376 pgoff_t store_limit; /* current storage limit */ 399 pgoff_t store_limit; /* current storage limit */
400 loff_t store_limit_l; /* current storage limit */
377}; 401};
378 402
379extern const char *fscache_object_states[]; 403extern const char *fscache_object_states[];
@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ 407 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING) 408 (obj)->state < FSCACHE_OBJECT_DYING)
385 409
410#define fscache_object_is_dead(obj) \
411 (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
412 (obj)->state >= FSCACHE_OBJECT_DYING)
413
386extern const struct slow_work_ops fscache_object_slow_work_ops; 414extern const struct slow_work_ops fscache_object_slow_work_ops;
387 415
388/** 416/**
@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
414 object->events = object->event_mask = 0; 442 object->events = object->event_mask = 0;
415 object->flags = 0; 443 object->flags = 0;
416 object->store_limit = 0; 444 object->store_limit = 0;
445 object->store_limit_l = 0;
417 object->cache = cache; 446 object->cache = cache;
418 object->cookie = cookie; 447 object->cookie = cookie;
419 object->parent = NULL; 448 object->parent = NULL;
@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
422extern void fscache_object_lookup_negative(struct fscache_object *object); 451extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object); 452extern void fscache_obtained_object(struct fscache_object *object);
424 453
454#ifdef CONFIG_FSCACHE_OBJECT_LIST
455extern void fscache_object_destroy(struct fscache_object *object);
456#else
457#define fscache_object_destroy(object) do {} while(0)
458#endif
459
425/** 460/**
426 * fscache_object_destroyed - Note destruction of an object in a cache 461 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came 462 * @cache: The cache from which the object came
@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
460static inline 495static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) 496void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{ 497{
498 object->store_limit_l = i_size;
463 object->store_limit = i_size >> PAGE_SHIFT; 499 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK) 500 if (i_size & ~PAGE_MASK)
465 object->store_limit++; 501 object->store_limit++;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6d8ee466e0a0..595ce49288b7 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); 202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); 203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); 204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
206 gfp_t);
205 207
206/** 208/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services 209 * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
615 __fscache_wait_on_page_write(cookie, page); 617 __fscache_wait_on_page_write(cookie, page);
616} 618}
617 619
620/**
621 * fscache_maybe_release_page - Consider releasing a page, cancelling a store
622 * @cookie: The cookie representing the cache object
623 * @page: The netfs page that is being cached.
624 * @gfp: The gfp flags passed to releasepage()
625 *
626 * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
627 * releasepage() call. A storage request on the page may cancelled if it is
628 * not currently being processed.
629 *
630 * The function returns true if the page no longer has a storage request on it,
631 * and false if a storage request is left in place. If true is returned, the
632 * page will have been passed to fscache_uncache_page(). If false is returned
633 * the page cannot be freed yet.
634 */
635static inline
636bool fscache_maybe_release_page(struct fscache_cookie *cookie,
637 struct page *page,
638 gfp_t gfp)
639{
640 if (fscache_cookie_valid(cookie) && PageFsCache(page))
641 return __fscache_maybe_release_page(cookie, page, gfp);
642 return false;
643}
644
618#endif /* _LINUX_FSCACHE_H */ 645#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
index f13255e06406..9eb07bbc6522 100644
--- a/include/linux/i2c-pnx.h
+++ b/include/linux/i2c-pnx.h
@@ -21,7 +21,7 @@ struct i2c_pnx_mif {
21 int mode; /* Interface mode */ 21 int mode; /* Interface mode */
22 struct completion complete; /* I/O completion */ 22 struct completion complete; /* I/O completion */
23 struct timer_list timer; /* Timeout */ 23 struct timer_list timer; /* Timeout */
24 char * buf; /* Data buffer */ 24 u8 * buf; /* Data buffer */
25 int len; /* Length of data buffer */ 25 int len; /* Length of data buffer */
26}; 26};
27 27
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
index b65c8881f07a..5035a2691739 100644
--- a/include/linux/slow-work.h
+++ b/include/linux/slow-work.h
@@ -17,13 +17,20 @@
17#ifdef CONFIG_SLOW_WORK 17#ifdef CONFIG_SLOW_WORK
18 18
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/timer.h>
20 21
21struct slow_work; 22struct slow_work;
23#ifdef CONFIG_SLOW_WORK_PROC
24struct seq_file;
25#endif
22 26
23/* 27/*
24 * The operations used to support slow work items 28 * The operations used to support slow work items
25 */ 29 */
26struct slow_work_ops { 30struct slow_work_ops {
31 /* owner */
32 struct module *owner;
33
27 /* get a ref on a work item 34 /* get a ref on a work item
28 * - return 0 if successful, -ve if not 35 * - return 0 if successful, -ve if not
29 */ 36 */
@@ -34,6 +41,11 @@ struct slow_work_ops {
34 41
35 /* execute a work item */ 42 /* execute a work item */
36 void (*execute)(struct slow_work *work); 43 void (*execute)(struct slow_work *work);
44
45#ifdef CONFIG_SLOW_WORK_PROC
46 /* describe a work item for /proc */
47 void (*desc)(struct slow_work *work, struct seq_file *m);
48#endif
37}; 49};
38 50
39/* 51/*
@@ -42,13 +54,24 @@ struct slow_work_ops {
42 * queued 54 * queued
43 */ 55 */
44struct slow_work { 56struct slow_work {
57 struct module *owner; /* the owning module */
45 unsigned long flags; 58 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ 59#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */ 60#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ 61#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ 62#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
63#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
64#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
50 const struct slow_work_ops *ops; /* operations table for this item */ 65 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */ 66 struct list_head link; /* link in queue */
67#ifdef CONFIG_SLOW_WORK_PROC
68 struct timespec mark; /* jiffies at which queued or exec begun */
69#endif
70};
71
72struct delayed_slow_work {
73 struct slow_work work;
74 struct timer_list timer;
52}; 75};
53 76
54/** 77/**
@@ -67,6 +90,20 @@ static inline void slow_work_init(struct slow_work *work,
67} 90}
68 91
69/** 92/**
93 * slow_work_init - Initialise a delayed slow work item
94 * @work: The work item to initialise
95 * @ops: The operations to use to handle the slow work item
96 *
97 * Initialise a delayed slow work item.
98 */
99static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
100 const struct slow_work_ops *ops)
101{
102 init_timer(&dwork->timer);
103 slow_work_init(&dwork->work, ops);
104}
105
106/**
70 * vslow_work_init - Initialise a very slow work item 107 * vslow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise 108 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item 109 * @ops: The operations to use to handle the slow work item
@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work,
83 INIT_LIST_HEAD(&work->link); 120 INIT_LIST_HEAD(&work->link);
84} 121}
85 122
123/**
124 * slow_work_is_queued - Determine if a slow work item is on the work queue
125 * work: The work item to test
126 *
127 * Determine if the specified slow-work item is on the work queue. This
128 * returns true if it is actually on the queue.
129 *
130 * If the item is executing and has been marked for requeue when execution
131 * finishes, then false will be returned.
132 *
133 * Anyone wishing to wait for completion of execution can wait on the
134 * SLOW_WORK_EXECUTING bit.
135 */
136static inline bool slow_work_is_queued(struct slow_work *work)
137{
138 unsigned long flags = work->flags;
139 return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
140}
141
86extern int slow_work_enqueue(struct slow_work *work); 142extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void); 143extern void slow_work_cancel(struct slow_work *work);
88extern void slow_work_unregister_user(void); 144extern int slow_work_register_user(struct module *owner);
145extern void slow_work_unregister_user(struct module *owner);
146
147extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
148 unsigned long delay);
149
150static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
151{
152 slow_work_cancel(&dwork->work);
153}
154
155extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
156 signed long *_timeout);
89 157
90#ifdef CONFIG_SYSCTL 158#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[]; 159extern ctl_table slow_work_sysctls[];
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7afca0d72139..7ffa11f06232 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -70,8 +70,8 @@ struct vt_event {
70#define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */ 70#define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */
71#define VT_EVENT_RESIZE 0x0008 /* Resize display */ 71#define VT_EVENT_RESIZE 0x0008 /* Resize display */
72#define VT_MAX_EVENT 0x000F 72#define VT_MAX_EVENT 0x000F
73 unsigned int old; /* Old console */ 73 unsigned int oldev; /* Old console */
74 unsigned int new; /* New console (if changing) */ 74 unsigned int newev; /* New console (if changing) */
75 unsigned int pad[4]; /* Padding for expansion */ 75 unsigned int pad[4]; /* Padding for expansion */
76}; 76};
77 77
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index cd2e18778f81..0a474568b003 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -893,7 +893,6 @@ struct sctp_transport {
893 */ 893 */
894 /* RTO : The current retransmission timeout value. */ 894 /* RTO : The current retransmission timeout value. */
895 unsigned long rto; 895 unsigned long rto;
896 unsigned long last_rto;
897 896
898 __u32 rtt; /* This is the most recent RTT. */ 897 __u32 rtt; /* This is the most recent RTT. */
899 898
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9af48cbf0036..f097ae340bc1 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -145,6 +145,7 @@ struct scsi_device {
145 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ 145 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
146 unsigned last_sector_bug:1; /* do not use multisector accesses on 146 unsigned last_sector_bug:1; /* do not use multisector accesses on
147 SD_LAST_BUGGY_SECTORS */ 147 SD_LAST_BUGGY_SECTORS */
148 unsigned is_visible:1; /* is the device visible in sysfs */
148 149
149 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ 150 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
150 struct list_head event_list; /* asserted events */ 151 struct list_head event_list; /* asserted events */
diff --git a/init/Kconfig b/init/Kconfig
index 9e03ef8b311e..ab5c64801fe5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1098,6 +1098,16 @@ config SLOW_WORK
1098 1098
1099 See Documentation/slow-work.txt. 1099 See Documentation/slow-work.txt.
1100 1100
1101config SLOW_WORK_PROC
1102 bool "Slow work debugging through /proc"
1103 default n
1104 depends on SLOW_WORK && PROC_FS
1105 help
1106 Display the contents of the slow work run queue through /proc,
1107 including items currently executing.
1108
1109 See Documentation/slow-work.txt.
1110
1101endmenu # General setup 1111endmenu # General setup
1102 1112
1103config HAVE_GENERIC_DMA_COHERENT 1113config HAVE_GENERIC_DMA_COHERENT
diff --git a/kernel/Makefile b/kernel/Makefile
index b8d4cd8ac0b9..776ffed1556d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
94obj-$(CONFIG_RING_BUFFER) += trace/ 94obj-$(CONFIG_RING_BUFFER) += trace/
95obj-$(CONFIG_SMP) += sched_cpupri.o 95obj-$(CONFIG_SMP) += sched_cpupri.o
96obj-$(CONFIG_SLOW_WORK) += slow-work.o 96obj-$(CONFIG_SLOW_WORK) += slow-work.o
97obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o
97obj-$(CONFIG_PERF_EVENTS) += perf_event.o 98obj-$(CONFIG_PERF_EVENTS) += perf_event.o
98 99
99ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 100ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c
new file mode 100644
index 000000000000..3988032571f5
--- /dev/null
+++ b/kernel/slow-work-proc.c
@@ -0,0 +1,227 @@
1/* Slow work debugging
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slow-work.h>
14#include <linux/fs.h>
15#include <linux/time.h>
16#include <linux/seq_file.h>
17#include "slow-work.h"
18
19#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
22
23void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
24{
25 seq_puts(m, "Slow-work: New thread");
26}
27
28/*
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
31 */
32static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
33{
34 struct timespec now, diff;
35
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
38
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
57}
58
59/*
60 * Describe a slow work item for /proc
61 */
62static int slow_work_runqueue_show(struct seq_file *m, void *v)
63{
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
67
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
75
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
78
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
83
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
87
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
91 }
92 read_unlock(&slow_work_execs_lock);
93 return 0;
94
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
101
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
106 }
107}
108
109/*
110 * map the iterator to a work item
111 */
112static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
113{
114 struct list_head *p;
115 unsigned long count, id;
116
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
130
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
137 }
138 *_pos = 0x2UL << ITERATOR_SHIFT;
139
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
146 }
147 *_pos = 0x3UL << ITERATOR_SHIFT;
148
149 default:
150 return NULL;
151 }
152}
153
154/*
155 * set up the iterator to start reading from the first line
156 */
157static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
158{
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
161}
162
163/*
164 * move to the next line
165 */
166static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
167{
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
170
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
175
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
181 }
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
184
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
190 }
191 *_pos = 0x3UL << ITERATOR_SHIFT;
192
193 default:
194 return NULL;
195 }
196}
197
198/*
199 * clean up after reading
200 */
201static void slow_work_runqueue_stop(struct seq_file *m, void *v)
202{
203 spin_unlock_irq(&slow_work_queue_lock);
204}
205
206static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
211};
212
213/*
214 * open "/proc/slow_work_rq" to list queue contents
215 */
216static int slow_work_runqueue_open(struct inode *inode, struct file *file)
217{
218 return seq_open(file, &slow_work_runqueue_ops);
219}
220
221const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,
227};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..da94f3c101af 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/proc_fs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24 21
25static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 43
47#ifdef CONFIG_SYSCTL 44#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 45static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 46static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 47static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 48static const int slow_work_max_vslow = 99;
52 49
@@ -98,6 +95,32 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 95static struct slow_work slow_work_new_thread; /* new thread starter */
99 96
100/* 97/*
98 * slow work ID allocation (use slow_work_queue_lock)
99 */
100static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
101
102/*
103 * Unregistration tracking to prevent put_ref() from disappearing during module
104 * unload
105 */
106#ifdef CONFIG_MODULES
107static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
108static struct module *slow_work_unreg_module;
109static struct slow_work *slow_work_unreg_work_item;
110static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
111static DEFINE_MUTEX(slow_work_unreg_sync_lock);
112#endif
113
114/*
115 * Data for tracking currently executing items for indication through /proc
116 */
117#ifdef CONFIG_SLOW_WORK_PROC
118struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
119pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
120DEFINE_RWLOCK(slow_work_execs_lock);
121#endif
122
123/*
101 * The queues of work items and the lock governing access to them. These are 124 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 125 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 126 * as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +128,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
105 * There are two queues of work items: one for slow work items, and one for 128 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items. 129 * very slow work items.
107 */ 130 */
108static LIST_HEAD(slow_work_queue); 131LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue); 132LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock); 133DEFINE_SPINLOCK(slow_work_queue_lock);
134
135/*
136 * The following are two wait queues that get pinged when a work item is placed
137 * on an empty queue. These allow work items that are hogging a thread by
138 * sleeping in a way that could be deferred to yield their thread and enqueue
139 * themselves.
140 */
141static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
142static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
111 143
112/* 144/*
113 * The thread controls. A variable used to signal to the threads that they 145 * The thread controls. A variable used to signal to the threads that they
@@ -126,6 +158,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
126static int slow_work_user_count; 158static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock); 159static DEFINE_MUTEX(slow_work_user_lock);
128 160
161static inline int slow_work_get_ref(struct slow_work *work)
162{
163 if (work->ops->get_ref)
164 return work->ops->get_ref(work);
165
166 return 0;
167}
168
169static inline void slow_work_put_ref(struct slow_work *work)
170{
171 if (work->ops->put_ref)
172 work->ops->put_ref(work);
173}
174
129/* 175/*
130 * Calculate the maximum number of active threads in the pool that are 176 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items. 177 * permitted to process very slow work items.
@@ -149,8 +195,11 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 195 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 196 * it, false if there was nothing to do.
151 */ 197 */
152static bool slow_work_execute(void) 198static noinline bool slow_work_execute(int id)
153{ 199{
200#ifdef CONFIG_MODULES
201 struct module *module;
202#endif
154 struct slow_work *work = NULL; 203 struct slow_work *work = NULL;
155 unsigned vsmax; 204 unsigned vsmax;
156 bool very_slow; 205 bool very_slow;
@@ -186,6 +235,16 @@ static bool slow_work_execute(void)
186 } else { 235 } else {
187 very_slow = false; /* avoid the compiler warning */ 236 very_slow = false; /* avoid the compiler warning */
188 } 237 }
238
239#ifdef CONFIG_MODULES
240 if (work)
241 slow_work_thread_processing[id] = work->owner;
242#endif
243 if (work) {
244 slow_work_mark_time(work);
245 slow_work_begin_exec(id, work);
246 }
247
189 spin_unlock_irq(&slow_work_queue_lock); 248 spin_unlock_irq(&slow_work_queue_lock);
190 249
191 if (!work) 250 if (!work)
@@ -194,12 +253,19 @@ static bool slow_work_execute(void)
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 253 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG(); 254 BUG();
196 255
197 work->ops->execute(work); 256 /* don't execute if the work is in the process of being cancelled */
257 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
258 work->ops->execute(work);
198 259
199 if (very_slow) 260 if (very_slow)
200 atomic_dec(&vslow_work_executing_count); 261 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 262 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202 263
264 /* wake up anyone waiting for this work to be complete */
265 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
266
267 slow_work_end_exec(id, work);
268
203 /* if someone tried to enqueue the item whilst we were executing it, 269 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to 270 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously 271 * execute it simultaneously
@@ -219,7 +285,18 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 285 spin_unlock_irq(&slow_work_queue_lock);
220 } 286 }
221 287
222 work->ops->put_ref(work); 288 /* sort out the race between module unloading and put_ref() */
289 slow_work_put_ref(work);
290
291#ifdef CONFIG_MODULES
292 module = slow_work_thread_processing[id];
293 slow_work_thread_processing[id] = NULL;
294 smp_mb();
295 if (slow_work_unreg_work_item == work ||
296 slow_work_unreg_module == module)
297 wake_up_all(&slow_work_unreg_wq);
298#endif
299
223 return true; 300 return true;
224 301
225auto_requeue: 302auto_requeue:
@@ -227,15 +304,61 @@ auto_requeue:
227 * - we transfer our ref on the item back to the appropriate queue 304 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already 305 * - don't wake another thread up as we're awake already
229 */ 306 */
307 slow_work_mark_time(work);
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 308 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue); 309 list_add_tail(&work->link, &vslow_work_queue);
232 else 310 else
233 list_add_tail(&work->link, &slow_work_queue); 311 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 312 spin_unlock_irq(&slow_work_queue_lock);
313 slow_work_thread_processing[id] = NULL;
235 return true; 314 return true;
236} 315}
237 316
238/** 317/**
318 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
319 * work: The work item under execution that wants to sleep
320 * _timeout: Scheduler sleep timeout
321 *
322 * Allow a requeueable work item to sleep on a slow-work processor thread until
323 * that thread is needed to do some other work or the sleep is interrupted by
324 * some other event.
325 *
326 * The caller must set up a wake up event before calling this and must have set
327 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
328 * condition before calling this function as no test is made here.
329 *
330 * False is returned if there is nothing on the queue; true is returned if the
331 * work item should be requeued
332 */
333bool slow_work_sleep_till_thread_needed(struct slow_work *work,
334 signed long *_timeout)
335{
336 wait_queue_head_t *wfo_wq;
337 struct list_head *queue;
338
339 DEFINE_WAIT(wait);
340
341 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
342 wfo_wq = &vslow_work_queue_waits_for_occupation;
343 queue = &vslow_work_queue;
344 } else {
345 wfo_wq = &slow_work_queue_waits_for_occupation;
346 queue = &slow_work_queue;
347 }
348
349 if (!list_empty(queue))
350 return true;
351
352 add_wait_queue_exclusive(wfo_wq, &wait);
353 if (list_empty(queue))
354 *_timeout = schedule_timeout(*_timeout);
355 finish_wait(wfo_wq, &wait);
356
357 return !list_empty(queue);
358}
359EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
360
361/**
239 * slow_work_enqueue - Schedule a slow work item for processing 362 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue 363 * @work: The work item to queue
241 * 364 *
@@ -260,16 +383,22 @@ auto_requeue:
260 * allowed to pick items to execute. This ensures that very slow items won't 383 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow. 384 * overly block ones that are just ordinarily slow.
262 * 385 *
263 * Returns 0 if successful, -EAGAIN if not. 386 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
387 * attempted queued)
264 */ 388 */
265int slow_work_enqueue(struct slow_work *work) 389int slow_work_enqueue(struct slow_work *work)
266{ 390{
391 wait_queue_head_t *wfo_wq;
392 struct list_head *queue;
267 unsigned long flags; 393 unsigned long flags;
394 int ret;
395
396 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
397 return -ECANCELED;
268 398
269 BUG_ON(slow_work_user_count <= 0); 399 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work); 400 BUG_ON(!work);
271 BUG_ON(!work->ops); 401 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273 402
274 /* when honouring an enqueue request, we only promise that we will run 403 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once 404 * the work function in the future; we do not promise to run it once
@@ -280,8 +409,19 @@ int slow_work_enqueue(struct slow_work *work)
280 * maintaining our promise 409 * maintaining our promise
281 */ 410 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 411 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
412 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
413 wfo_wq = &vslow_work_queue_waits_for_occupation;
414 queue = &vslow_work_queue;
415 } else {
416 wfo_wq = &slow_work_queue_waits_for_occupation;
417 queue = &slow_work_queue;
418 }
419
283 spin_lock_irqsave(&slow_work_queue_lock, flags); 420 spin_lock_irqsave(&slow_work_queue_lock, flags);
284 421
422 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
423 goto cancelled;
424
285 /* we promise that we will not attempt to execute the work 425 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously 426 * function in more than one thread simultaneously
287 * 427 *
@@ -299,25 +439,221 @@ int slow_work_enqueue(struct slow_work *work)
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 439 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 440 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else { 441 } else {
302 if (work->ops->get_ref(work) < 0) 442 ret = slow_work_get_ref(work);
303 goto cant_get_ref; 443 if (ret < 0)
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 444 goto failed;
305 list_add_tail(&work->link, &vslow_work_queue); 445 slow_work_mark_time(work);
306 else 446 list_add_tail(&work->link, queue);
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq); 447 wake_up(&slow_work_thread_wq);
448
449 /* if someone who could be requeued is sleeping on a
450 * thread, then ask them to yield their thread */
451 if (work->link.prev == queue)
452 wake_up(wfo_wq);
309 } 453 }
310 454
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 455 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 } 456 }
313 return 0; 457 return 0;
314 458
315cant_get_ref: 459cancelled:
460 ret = -ECANCELED;
461failed:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 462 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN; 463 return ret;
318} 464}
319EXPORT_SYMBOL(slow_work_enqueue); 465EXPORT_SYMBOL(slow_work_enqueue);
320 466
467static int slow_work_wait(void *word)
468{
469 schedule();
470 return 0;
471}
472
473/**
474 * slow_work_cancel - Cancel a slow work item
475 * @work: The work item to cancel
476 *
477 * This function will cancel a previously enqueued work item. If we cannot
478 * cancel the work item, it is guarenteed to have run when this function
479 * returns.
480 */
481void slow_work_cancel(struct slow_work *work)
482{
483 bool wait = true, put = false;
484
485 set_bit(SLOW_WORK_CANCELLING, &work->flags);
486 smp_mb();
487
488 /* if the work item is a delayed work item with an active timer, we
489 * need to wait for the timer to finish _before_ getting the spinlock,
490 * lest we deadlock against the timer routine
491 *
492 * the timer routine will leave DELAYED set if it notices the
493 * CANCELLING flag in time
494 */
495 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
496 struct delayed_slow_work *dwork =
497 container_of(work, struct delayed_slow_work, work);
498 del_timer_sync(&dwork->timer);
499 }
500
501 spin_lock_irq(&slow_work_queue_lock);
502
503 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
504 /* the timer routine aborted or never happened, so we are left
505 * holding the timer's reference on the item and should just
506 * drop the pending flag and wait for any ongoing execution to
507 * finish */
508 struct delayed_slow_work *dwork =
509 container_of(work, struct delayed_slow_work, work);
510
511 BUG_ON(timer_pending(&dwork->timer));
512 BUG_ON(!list_empty(&work->link));
513
514 clear_bit(SLOW_WORK_DELAYED, &work->flags);
515 put = true;
516 clear_bit(SLOW_WORK_PENDING, &work->flags);
517
518 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
519 !list_empty(&work->link)) {
520 /* the link in the pending queue holds a reference on the item
521 * that we will need to release */
522 list_del_init(&work->link);
523 wait = false;
524 put = true;
525 clear_bit(SLOW_WORK_PENDING, &work->flags);
526
527 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
528 /* the executor is holding our only reference on the item, so
529 * we merely need to wait for it to finish executing */
530 clear_bit(SLOW_WORK_PENDING, &work->flags);
531 }
532
533 spin_unlock_irq(&slow_work_queue_lock);
534
535 /* the EXECUTING flag is set by the executor whilst the spinlock is set
536 * and before the item is dequeued - so assuming the above doesn't
537 * actually dequeue it, simply waiting for the EXECUTING flag to be
538 * released here should be sufficient */
539 if (wait)
540 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
541 TASK_UNINTERRUPTIBLE);
542
543 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
544 if (put)
545 slow_work_put_ref(work);
546}
547EXPORT_SYMBOL(slow_work_cancel);
548
549/*
550 * Handle expiry of the delay timer, indicating that a delayed slow work item
551 * should now be queued if not cancelled
552 */
553static void delayed_slow_work_timer(unsigned long data)
554{
555 wait_queue_head_t *wfo_wq;
556 struct list_head *queue;
557 struct slow_work *work = (struct slow_work *) data;
558 unsigned long flags;
559 bool queued = false, put = false, first = false;
560
561 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
562 wfo_wq = &vslow_work_queue_waits_for_occupation;
563 queue = &vslow_work_queue;
564 } else {
565 wfo_wq = &slow_work_queue_waits_for_occupation;
566 queue = &slow_work_queue;
567 }
568
569 spin_lock_irqsave(&slow_work_queue_lock, flags);
570 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
571 clear_bit(SLOW_WORK_DELAYED, &work->flags);
572
573 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
574 /* we discard the reference the timer was holding in
575 * favour of the one the executor holds */
576 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
577 put = true;
578 } else {
579 slow_work_mark_time(work);
580 list_add_tail(&work->link, queue);
581 queued = true;
582 if (work->link.prev == queue)
583 first = true;
584 }
585 }
586
587 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
588 if (put)
589 slow_work_put_ref(work);
590 if (first)
591 wake_up(wfo_wq);
592 if (queued)
593 wake_up(&slow_work_thread_wq);
594}
595
596/**
597 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
598 * @dwork: The delayed work item to queue
599 * @delay: When to start executing the work, in jiffies from now
600 *
601 * This is similar to slow_work_enqueue(), but it adds a delay before the work
602 * is actually queued for processing.
603 *
604 * The item can have delayed processing requested on it whilst it is being
605 * executed. The delay will begin immediately, and if it expires before the
606 * item finishes executing, the item will be placed back on the queue when it
607 * has done executing.
608 */
609int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
610 unsigned long delay)
611{
612 struct slow_work *work = &dwork->work;
613 unsigned long flags;
614 int ret;
615
616 if (delay == 0)
617 return slow_work_enqueue(&dwork->work);
618
619 BUG_ON(slow_work_user_count <= 0);
620 BUG_ON(!work);
621 BUG_ON(!work->ops);
622
623 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
624 return -ECANCELED;
625
626 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
627 spin_lock_irqsave(&slow_work_queue_lock, flags);
628
629 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
630 goto cancelled;
631
632 /* the timer holds a reference whilst it is pending */
633 ret = work->ops->get_ref(work);
634 if (ret < 0)
635 goto cant_get_ref;
636
637 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
638 BUG();
639 dwork->timer.expires = jiffies + delay;
640 dwork->timer.data = (unsigned long) work;
641 dwork->timer.function = delayed_slow_work_timer;
642 add_timer(&dwork->timer);
643
644 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
645 }
646
647 return 0;
648
649cancelled:
650 ret = -ECANCELED;
651cant_get_ref:
652 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
653 return ret;
654}
655EXPORT_SYMBOL(delayed_slow_work_enqueue);
656
321/* 657/*
322 * Schedule a cull of the thread pool at some time in the near future 658 * Schedule a cull of the thread pool at some time in the near future
323 */ 659 */
@@ -368,13 +704,23 @@ static inline bool slow_work_available(int vsmax)
368 */ 704 */
369static int slow_work_thread(void *_data) 705static int slow_work_thread(void *_data)
370{ 706{
371 int vsmax; 707 int vsmax, id;
372 708
373 DEFINE_WAIT(wait); 709 DEFINE_WAIT(wait);
374 710
375 set_freezable(); 711 set_freezable();
376 set_user_nice(current, -5); 712 set_user_nice(current, -5);
377 713
714 /* allocate ourselves an ID */
715 spin_lock_irq(&slow_work_queue_lock);
716 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
717 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
718 __set_bit(id, slow_work_ids);
719 slow_work_set_thread_pid(id, current->pid);
720 spin_unlock_irq(&slow_work_queue_lock);
721
722 sprintf(current->comm, "kslowd%03u", id);
723
378 for (;;) { 724 for (;;) {
379 vsmax = vslow_work_proportion; 725 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 726 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +741,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 741 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 742 vsmax /= 100;
397 743
398 if (slow_work_available(vsmax) && slow_work_execute()) { 744 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 745 cond_resched();
400 if (list_empty(&slow_work_queue) && 746 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 747 list_empty(&vslow_work_queue) &&
@@ -412,6 +758,11 @@ static int slow_work_thread(void *_data)
412 break; 758 break;
413 } 759 }
414 760
761 spin_lock_irq(&slow_work_queue_lock);
762 slow_work_set_thread_pid(id, 0);
763 __clear_bit(id, slow_work_ids);
764 spin_unlock_irq(&slow_work_queue_lock);
765
415 if (atomic_dec_and_test(&slow_work_thread_count)) 766 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 767 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 768 return 0;
@@ -427,21 +778,6 @@ static void slow_work_cull_timeout(unsigned long data)
427} 778}
428 779
429/* 780/*
430 * Get a reference on slow work thread starter
431 */
432static int slow_work_new_thread_get_ref(struct slow_work *work)
433{
434 return 0;
435}
436
437/*
438 * Drop a reference on slow work thread starter
439 */
440static void slow_work_new_thread_put_ref(struct slow_work *work)
441{
442}
443
444/*
445 * Start a new slow work thread 781 * Start a new slow work thread
446 */ 782 */
447static void slow_work_new_thread_execute(struct slow_work *work) 783static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +811,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 811}
476 812
477static const struct slow_work_ops slow_work_new_thread_ops = { 813static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref, 814 .owner = THIS_MODULE,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 815 .execute = slow_work_new_thread_execute,
816#ifdef CONFIG_SLOW_WORK_PROC
817 .desc = slow_work_new_thread_desc,
818#endif
481}; 819};
482 820
483/* 821/*
@@ -546,12 +884,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 884
547/** 885/**
548 * slow_work_register_user - Register a user of the facility 886 * slow_work_register_user - Register a user of the facility
887 * @module: The module about to make use of the facility
549 * 888 *
550 * Register a user of the facility, starting up the initial threads if there 889 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 890 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 891 * an error if not.
553 */ 892 */
554int slow_work_register_user(void) 893int slow_work_register_user(struct module *module)
555{ 894{
556 struct task_struct *p; 895 struct task_struct *p;
557 int loop; 896 int loop;
@@ -598,14 +937,79 @@ error:
598} 937}
599EXPORT_SYMBOL(slow_work_register_user); 938EXPORT_SYMBOL(slow_work_register_user);
600 939
940/*
941 * wait for all outstanding items from the calling module to complete
942 * - note that more items may be queued whilst we're waiting
943 */
944static void slow_work_wait_for_items(struct module *module)
945{
946 DECLARE_WAITQUEUE(myself, current);
947 struct slow_work *work;
948 int loop;
949
950 mutex_lock(&slow_work_unreg_sync_lock);
951 add_wait_queue(&slow_work_unreg_wq, &myself);
952
953 for (;;) {
954 spin_lock_irq(&slow_work_queue_lock);
955
956 /* first of all, we wait for the last queued item in each list
957 * to be processed */
958 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
959 if (work->owner == module) {
960 set_current_state(TASK_UNINTERRUPTIBLE);
961 slow_work_unreg_work_item = work;
962 goto do_wait;
963 }
964 }
965 list_for_each_entry_reverse(work, &slow_work_queue, link) {
966 if (work->owner == module) {
967 set_current_state(TASK_UNINTERRUPTIBLE);
968 slow_work_unreg_work_item = work;
969 goto do_wait;
970 }
971 }
972
973 /* then we wait for the items being processed to finish */
974 slow_work_unreg_module = module;
975 smp_mb();
976 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
977 if (slow_work_thread_processing[loop] == module)
978 goto do_wait;
979 }
980 spin_unlock_irq(&slow_work_queue_lock);
981 break; /* okay, we're done */
982
983 do_wait:
984 spin_unlock_irq(&slow_work_queue_lock);
985 schedule();
986 slow_work_unreg_work_item = NULL;
987 slow_work_unreg_module = NULL;
988 }
989
990 remove_wait_queue(&slow_work_unreg_wq, &myself);
991 mutex_unlock(&slow_work_unreg_sync_lock);
992}
993
601/** 994/**
602 * slow_work_unregister_user - Unregister a user of the facility 995 * slow_work_unregister_user - Unregister a user of the facility
996 * @module: The module whose items should be cleared
603 * 997 *
604 * Unregister a user of the facility, killing all the threads if this was the 998 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 999 * last one.
1000 *
1001 * This waits for all the work items belonging to the nominated module to go
1002 * away before proceeding.
606 */ 1003 */
607void slow_work_unregister_user(void) 1004void slow_work_unregister_user(struct module *module)
608{ 1005{
1006 /* first of all, wait for all outstanding items from the calling module
1007 * to complete */
1008 if (module)
1009 slow_work_wait_for_items(module);
1010
1011 /* then we can actually go about shutting down the facility if need
1012 * be */
609 mutex_lock(&slow_work_user_lock); 1013 mutex_lock(&slow_work_user_lock);
610 1014
611 BUG_ON(slow_work_user_count <= 0); 1015 BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1043,10 @@ static int __init init_slow_work(void)
639 if (slow_work_max_max_threads < nr_cpus * 2) 1043 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2; 1044 slow_work_max_max_threads = nr_cpus * 2;
641#endif 1045#endif
1046#ifdef CONFIG_SLOW_WORK_PROC
1047 proc_create("slow_work_rq", S_IFREG | 0400, NULL,
1048 &slow_work_runqueue_fops);
1049#endif
642 return 0; 1050 return 0;
643} 1051}
644 1052
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..3c2f007f3ad6
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
1/* Slow work private definitions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
13 * things to do */
14#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
15 * OOM */
16
17#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
18
19/*
20 * slow-work.c
21 */
22#ifdef CONFIG_SLOW_WORK_PROC
23extern struct slow_work *slow_work_execs[];
24extern pid_t slow_work_pids[];
25extern rwlock_t slow_work_execs_lock;
26#endif
27
28extern struct list_head slow_work_queue;
29extern struct list_head vslow_work_queue;
30extern spinlock_t slow_work_queue_lock;
31
32/*
33 * slow-work-proc.c
34 */
35#ifdef CONFIG_SLOW_WORK_PROC
36extern const struct file_operations slow_work_runqueue_fops;
37
38extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
39#endif
40
41/*
42 * Helper functions
43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{
46#ifdef CONFIG_SLOW_WORK_PROC
47 slow_work_pids[id] = pid;
48#endif
49}
50
51static inline void slow_work_mark_time(struct slow_work *work)
52{
53#ifdef CONFIG_SLOW_WORK_PROC
54 work->mark = CURRENT_TIME;
55#endif
56}
57
58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{
60#ifdef CONFIG_SLOW_WORK_PROC
61 slow_work_execs[id] = work;
62#endif
63}
64
65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{
67#ifdef CONFIG_SLOW_WORK_PROC
68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock);
71#endif
72}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 23abbd93cae1..92cdd9936e3d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node)
200 * ensure that the addition of a single element in the tree cannot fail. On 200 * ensure that the addition of a single element in the tree cannot fail. On
201 * success, return zero, with preemption disabled. On error, return -ENOMEM 201 * success, return zero, with preemption disabled. On error, return -ENOMEM
202 * with preemption not disabled. 202 * with preemption not disabled.
203 *
204 * To make use of this facility, the radix tree must be initialised without
205 * __GFP_WAIT being passed to INIT_RADIX_TREE().
203 */ 206 */
204int radix_tree_preload(gfp_t gfp_mask) 207int radix_tree_preload(gfp_t gfp_mask)
205{ 208{
@@ -543,7 +546,6 @@ out:
543} 546}
544EXPORT_SYMBOL(radix_tree_tag_clear); 547EXPORT_SYMBOL(radix_tree_tag_clear);
545 548
546#ifndef __KERNEL__ /* Only the test harness uses this at present */
547/** 549/**
548 * radix_tree_tag_get - get a tag on a radix tree node 550 * radix_tree_tag_get - get a tag on a radix tree node
549 * @root: radix tree root 551 * @root: radix tree root
@@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
606 } 608 }
607} 609}
608EXPORT_SYMBOL(radix_tree_tag_get); 610EXPORT_SYMBOL(radix_tree_tag_get);
609#endif
610 611
611/** 612/**
612 * radix_tree_next_hole - find the next hole (not-present entry) 613 * radix_tree_next_hole - find the next hole (not-present entry)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 11aee09dd2a6..67a33a5a1a93 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -604,10 +604,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
604 604
605 /* 605 /*
606 * Finally, kill the kernel threads. We don't need to be RCU 606 * Finally, kill the kernel threads. We don't need to be RCU
607 * safe anymore, since the bdi is gone from visibility. 607 * safe anymore, since the bdi is gone from visibility. Force
608 * unfreeze of the thread before calling kthread_stop(), otherwise
609 * it would never exet if it is currently stuck in the refrigerator.
608 */ 610 */
609 list_for_each_entry(wb, &bdi->wb_list, list) 611 list_for_each_entry(wb, &bdi->wb_list, list) {
612 wb->task->flags &= ~PF_FROZEN;
610 kthread_stop(wb->task); 613 kthread_stop(wb->task);
614 }
611} 615}
612 616
613/* 617/*
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 575f9bd51ccd..d3fe10be7219 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -563,7 +563,7 @@ out_oversize:
563 printk(KERN_INFO "Oversized IP packet from %pI4.\n", 563 printk(KERN_INFO "Oversized IP packet from %pI4.\n",
564 &qp->saddr); 564 &qp->saddr);
565out_fail: 565out_fail:
566 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); 566 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
567 return err; 567 return err;
568} 568}
569 569
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c9f20e28521b..23e5e97aa617 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
423 if ((reason == SCTP_RTXR_FAST_RTX && 423 if ((reason == SCTP_RTXR_FAST_RTX &&
424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
426 /* If this chunk was sent less then 1 rto ago, do not
427 * retransmit this chunk, but give the peer time
428 * to acknowlege it. Do this only when
429 * retransmitting due to T3 timeout.
430 */
431 if (reason == SCTP_RTXR_T3_RTX &&
432 time_before(jiffies, chunk->sent_at +
433 transport->last_rto))
434 continue;
435
436 /* RFC 2960 6.2.1 Processing a Received SACK 426 /* RFC 2960 6.2.1 Processing a Received SACK
437 * 427 *
438 * C) Any time a DATA chunk is marked for 428 * C) Any time a DATA chunk is marked for
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8674d4919556..efa516b47e81 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -480,7 +480,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
480 * that indicates that we have an outstanding HB. 480 * that indicates that we have an outstanding HB.
481 */ 481 */
482 if (!is_hb || transport->hb_sent) { 482 if (!is_hb || transport->hb_sent) {
483 transport->last_rto = transport->rto;
484 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 483 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
485 } 484 }
486} 485}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3b141bb32faf..37a1184d789f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
74 * given destination transport address, set RTO to the protocol 74 * given destination transport address, set RTO to the protocol
75 * parameter 'RTO.Initial'. 75 * parameter 'RTO.Initial'.
76 */ 76 */
77 peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial); 77 peer->rto = msecs_to_jiffies(sctp_rto_initial);
78 peer->rtt = 0; 78 peer->rtt = 0;
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
@@ -386,7 +386,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
386 tp->rto = tp->asoc->rto_max; 386 tp->rto = tp->asoc->rto_max;
387 387
388 tp->rtt = rtt; 388 tp->rtt = rtt;
389 tp->last_rto = tp->rto;
390 389
391 /* Reset rto_pending so that a new RTT measurement is started when a 390 /* Reset rto_pending so that a new RTT measurement is started when a
392 * new data chunk is sent. 391 * new data chunk is sent.
@@ -602,7 +601,7 @@ void sctp_transport_reset(struct sctp_transport *t)
602 */ 601 */
603 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 602 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
604 t->ssthresh = asoc->peer.i.a_rwnd; 603 t->ssthresh = asoc->peer.i.a_rwnd;
605 t->last_rto = t->rto = asoc->rto_initial; 604 t->rto = asoc->rto_initial;
606 t->rtt = 0; 605 t->rtt = 0;
607 t->srtt = 0; 606 t->srtt = 0;
608 t->rttvar = 0; 607 t->rttvar = 0;
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 22e8fd89477f..c7450c8f0a7c 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -306,24 +306,25 @@ EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
306 * @sap: buffer into which to plant socket address 306 * @sap: buffer into which to plant socket address
307 * @salen: size of buffer 307 * @salen: size of buffer
308 * 308 *
309 * @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
310 * rpc_pton() require proper string termination to be successful.
311 *
309 * Returns the size of the socket address if successful; otherwise 312 * Returns the size of the socket address if successful; otherwise
310 * zero is returned. 313 * zero is returned.
311 */ 314 */
312size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len, 315size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
313 struct sockaddr *sap, const size_t salen) 316 struct sockaddr *sap, const size_t salen)
314{ 317{
315 char *c, buf[RPCBIND_MAXUADDRLEN]; 318 char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
316 unsigned long portlo, porthi; 319 unsigned long portlo, porthi;
317 unsigned short port; 320 unsigned short port;
318 321
319 if (uaddr_len > sizeof(buf)) 322 if (uaddr_len > RPCBIND_MAXUADDRLEN)
320 return 0; 323 return 0;
321 324
322 memcpy(buf, uaddr, uaddr_len); 325 memcpy(buf, uaddr, uaddr_len);
323 326
324 buf[uaddr_len] = '\n'; 327 buf[uaddr_len] = '\0';
325 buf[uaddr_len + 1] = '\0';
326
327 c = strrchr(buf, '.'); 328 c = strrchr(buf, '.');
328 if (unlikely(c == NULL)) 329 if (unlikely(c == NULL))
329 return 0; 330 return 0;
@@ -332,9 +333,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
332 if (unlikely(portlo > 255)) 333 if (unlikely(portlo > 255))
333 return 0; 334 return 0;
334 335
335 c[0] = '\n'; 336 *c = '\0';
336 c[1] = '\0';
337
338 c = strrchr(buf, '.'); 337 c = strrchr(buf, '.');
339 if (unlikely(c == NULL)) 338 if (unlikely(c == NULL))
340 return 0; 339 return 0;
@@ -345,8 +344,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
345 344
346 port = (unsigned short)((porthi << 8) | portlo); 345 port = (unsigned short)((porthi << 8) | portlo);
347 346
348 c[0] = '\0'; 347 *c = '\0';
349
350 if (rpc_pton(buf, strlen(buf), sap, salen) == 0) 348 if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
351 return 0; 349 return 0;
352 350
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 6d69c7ccdcc7..80599e3a7994 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -30,7 +30,7 @@ silentoldconfig: $(obj)/conf
30 $< -s $(Kconfig) 30 $< -s $(Kconfig)
31 31
32localmodconfig: $(obj)/streamline_config.pl $(obj)/conf 32localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
33 $(Q)perl $< $(Kconfig) > .tmp.config 33 $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
34 $(Q)if [ -f .config ]; then \ 34 $(Q)if [ -f .config ]; then \
35 cmp -s .tmp.config .config || \ 35 cmp -s .tmp.config .config || \
36 (mv -f .config .config.old.1; \ 36 (mv -f .config .config.old.1; \
@@ -44,7 +44,7 @@ localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
44 $(Q)rm -f .tmp.config 44 $(Q)rm -f .tmp.config
45 45
46localyesconfig: $(obj)/streamline_config.pl $(obj)/conf 46localyesconfig: $(obj)/streamline_config.pl $(obj)/conf
47 $(Q)perl $< $(Kconfig) > .tmp.config 47 $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
48 $(Q)sed -i s/=m/=y/ .tmp.config 48 $(Q)sed -i s/=m/=y/ .tmp.config
49 $(Q)if [ -f .config ]; then \ 49 $(Q)if [ -f .config ]; then \
50 cmp -s .tmp.config .config || \ 50 cmp -s .tmp.config .config || \
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 95984db8e1e0..0d800820c3cd 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -43,7 +43,6 @@
43# make oldconfig 43# make oldconfig
44# 44#
45my $config = ".config"; 45my $config = ".config";
46my $linuxpath = ".";
47 46
48my $uname = `uname -r`; 47my $uname = `uname -r`;
49chomp $uname; 48chomp $uname;
@@ -111,7 +110,11 @@ sub find_config {
111 110
112find_config; 111find_config;
113 112
114my @makefiles = `find $linuxpath -name Makefile`; 113# Get the build source and top level Kconfig file (passed in)
114my $ksource = $ARGV[0];
115my $kconfig = $ARGV[1];
116
117my @makefiles = `find $ksource -name Makefile`;
115my %depends; 118my %depends;
116my %selects; 119my %selects;
117my %prompts; 120my %prompts;
@@ -119,9 +122,6 @@ my %objects;
119my $var; 122my $var;
120my $cont = 0; 123my $cont = 0;
121 124
122# Get the top level Kconfig file (passed in)
123my $kconfig = $ARGV[0];
124
125# prevent recursion 125# prevent recursion
126my %read_kconfigs; 126my %read_kconfigs;
127 127
@@ -132,7 +132,7 @@ sub read_kconfig {
132 my $config; 132 my $config;
133 my @kconfigs; 133 my @kconfigs;
134 134
135 open(KIN, $kconfig) || die "Can't open $kconfig"; 135 open(KIN, "$ksource/$kconfig") || die "Can't open $kconfig";
136 while (<KIN>) { 136 while (<KIN>) {
137 chomp; 137 chomp;
138 138
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 1f0f8213e2d5..6c160a038b23 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -504,6 +504,10 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
504 int err; 504 int err;
505 505
506 aaci_pcm_hw_free(substream); 506 aaci_pcm_hw_free(substream);
507 if (aacirun->pcm_open) {
508 snd_ac97_pcm_close(aacirun->pcm);
509 aacirun->pcm_open = 0;
510 }
507 511
508 err = devdma_hw_alloc(NULL, substream, 512 err = devdma_hw_alloc(NULL, substream,
509 params_buffer_bytes(params)); 513 params_buffer_bytes(params));
@@ -517,7 +521,7 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
517 else 521 else
518 err = snd_ac97_pcm_open(aacirun->pcm, params_rate(params), 522 err = snd_ac97_pcm_open(aacirun->pcm, params_rate(params),
519 params_channels(params), 523 params_channels(params),
520 aacirun->pcm->r[1].slots); 524 aacirun->pcm->r[0].slots);
521 525
522 if (err) 526 if (err)
523 goto out; 527 goto out;
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 6b24d8bb02bb..90a0264f7538 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -625,11 +625,10 @@ static int tlv320aic23_resume(struct platform_device *pdev)
625{ 625{
626 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 626 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
627 struct snd_soc_codec *codec = socdev->card->codec; 627 struct snd_soc_codec *codec = socdev->card->codec;
628 int i;
629 u16 reg; 628 u16 reg;
630 629
631 /* Sync reg_cache with the hardware */ 630 /* Sync reg_cache with the hardware */
632 for (reg = 0; reg < ARRAY_SIZE(tlv320aic23_reg); i++) { 631 for (reg = 0; reg < TLV320AIC23_RESET; reg++) {
633 u16 val = tlv320aic23_read_reg_cache(codec, reg); 632 u16 val = tlv320aic23_read_reg_cache(codec, reg);
634 tlv320aic23_write(codec, reg, val); 633 tlv320aic23_write(codec, reg, val);
635 } 634 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index d89f6dc00908..66d4c165f99b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -973,9 +973,19 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
973 if (!w->power_check) 973 if (!w->power_check)
974 continue; 974 continue;
975 975
976 power = w->power_check(w); 976 /* If we're suspending then pull down all the
977 if (power) 977 * power. */
978 sys_power = 1; 978 switch (event) {
979 case SND_SOC_DAPM_STREAM_SUSPEND:
980 power = 0;
981 break;
982
983 default:
984 power = w->power_check(w);
985 if (power)
986 sys_power = 1;
987 break;
988 }
979 989
980 if (w->power == power) 990 if (w->power == power)
981 continue; 991 continue;
@@ -999,8 +1009,12 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
999 case SND_SOC_DAPM_STREAM_RESUME: 1009 case SND_SOC_DAPM_STREAM_RESUME:
1000 sys_power = 1; 1010 sys_power = 1;
1001 break; 1011 break;
1012 case SND_SOC_DAPM_STREAM_SUSPEND:
1013 sys_power = 0;
1014 break;
1002 case SND_SOC_DAPM_STREAM_NOP: 1015 case SND_SOC_DAPM_STREAM_NOP:
1003 sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY; 1016 sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY;
1017 break;
1004 default: 1018 default:
1005 break; 1019 break;
1006 } 1020 }