aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl308
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt4
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/sparc/include/asm/unistd.h4
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--crypto/ablkcipher.c14
-rw-r--r--crypto/aead.c14
-rw-r--r--crypto/ahash.c7
-rw-r--r--crypto/blkcipher.c7
-rw-r--r--crypto/crypto_user.c3
-rw-r--r--crypto/pcompress.c7
-rw-r--r--crypto/rng.c7
-rw-r--r--crypto/shash.c7
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c7
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c13
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c53
-rw-r--r--drivers/gpu/drm/radeon/r600.c118
-rw-r--r--drivers/gpu/drm/radeon/radeon.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c118
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/hwspinlock/u8500_hsem.c7
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/video/s5k6aa.c1
-rw-r--r--drivers/mfd/ab5500-core.c1
-rw-r--r--drivers/mfd/ab5500-debugfs.c1
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c1
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/delayed-inode.c58
-rw-r--r--fs/btrfs/disk-io.c42
-rw-r--r--fs/btrfs/extent-tree.c50
-rw-r--r--fs/btrfs/free-space-cache.c17
-rw-r--r--fs/btrfs/inode-map.c28
-rw-r--r--fs/btrfs/inode.c84
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c64
-rw-r--r--fs/btrfs/super.c49
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/volumes.c5
-rw-r--r--fs/proc/base.c146
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_dquot_item.c6
-rw-r--r--fs/xfs/xfs_extfree_item.c4
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_trans.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c14
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/exynos_drm.h2
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/hwspinlock.h1
-rw-r--r--include/linux/mfd/wm8994/registers.h15
-rw-r--r--kernel/power/qos.c1
-rw-r--r--sound/core/vmaster.c18
-rw-r--r--sound/pci/hda/hda_codec.c64
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_local.h16
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c13
-rw-r--r--sound/pci/hda/patch_sigmatel.c11
-rw-r--r--sound/pci/intel8x0.c58
-rw-r--r--sound/soc/codecs/wm8994.c43
-rw-r--r--sound/usb/mixer.c110
-rw-r--r--sound/usb/quirks.c7
-rwxr-xr-xtools/testing/ktest/ktest.pl515
-rw-r--r--tools/testing/ktest/sample.conf146
105 files changed, 1834 insertions, 896 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c27915893974..196b8b9dba11 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -32,7 +32,7 @@
32 The Linux DRM layer contains code intended to support the needs 32 The Linux DRM layer contains code intended to support the needs
33 of complex graphics devices, usually containing programmable 33 of complex graphics devices, usually containing programmable
34 pipelines well suited to 3D graphics acceleration. Graphics 34 pipelines well suited to 3D graphics acceleration. Graphics
35 drivers in the kernel can make use of DRM functions to make 35 drivers in the kernel may make use of DRM functions to make
36 tasks like memory management, interrupt handling and DMA easier, 36 tasks like memory management, interrupt handling and DMA easier,
37 and provide a uniform interface to applications. 37 and provide a uniform interface to applications.
38 </para> 38 </para>
@@ -57,10 +57,10 @@
57 existing drivers. 57 existing drivers.
58 </para> 58 </para>
59 <para> 59 <para>
60 First, we'll go over some typical driver initialization 60 First, we go over some typical driver initialization
61 requirements, like setting up command buffers, creating an 61 requirements, like setting up command buffers, creating an
62 initial output configuration, and initializing core services. 62 initial output configuration, and initializing core services.
63 Subsequent sections will cover core internals in more detail, 63 Subsequent sections cover core internals in more detail,
64 providing implementation notes and examples. 64 providing implementation notes and examples.
65 </para> 65 </para>
66 <para> 66 <para>
@@ -74,7 +74,7 @@
74 </para> 74 </para>
75 <para> 75 <para>
76 The core of every DRM driver is struct drm_driver. Drivers 76 The core of every DRM driver is struct drm_driver. Drivers
77 will typically statically initialize a drm_driver structure, 77 typically statically initialize a drm_driver structure,
78 then pass it to drm_init() at load time. 78 then pass it to drm_init() at load time.
79 </para> 79 </para>
80 80
@@ -88,8 +88,8 @@
88 </para> 88 </para>
89 <programlisting> 89 <programlisting>
90 static struct drm_driver driver = { 90 static struct drm_driver driver = {
91 /* don't use mtrr's here, the Xserver or user space app should 91 /* Don't use MTRRs here; the Xserver or userspace app should
92 * deal with them for intel hardware. 92 * deal with them for Intel hardware.
93 */ 93 */
94 .driver_features = 94 .driver_features =
95 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 95 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
@@ -154,8 +154,8 @@
154 </programlisting> 154 </programlisting>
155 <para> 155 <para>
156 In the example above, taken from the i915 DRM driver, the driver 156 In the example above, taken from the i915 DRM driver, the driver
157 sets several flags indicating what core features it supports. 157 sets several flags indicating what core features it supports;
158 We'll go over the individual callbacks in later sections. Since 158 we go over the individual callbacks in later sections. Since
159 flags indicate which features your driver supports to the DRM 159 flags indicate which features your driver supports to the DRM
160 core, you need to set most of them prior to calling drm_init(). Some, 160 core, you need to set most of them prior to calling drm_init(). Some,
161 like DRIVER_MODESET can be set later based on user supplied parameters, 161 like DRIVER_MODESET can be set later based on user supplied parameters,
@@ -203,8 +203,8 @@
203 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term> 203 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
204 <listitem> 204 <listitem>
205 <para> 205 <para>
206 DRIVER_HAVE_IRQ indicates whether the driver has a IRQ 206 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
207 handler, DRIVER_IRQ_SHARED indicates whether the device &amp; 207 handler. DRIVER_IRQ_SHARED indicates whether the device &amp;
208 handler support shared IRQs (note that this is required of 208 handler support shared IRQs (note that this is required of
209 PCI drivers). 209 PCI drivers).
210 </para> 210 </para>
@@ -214,8 +214,8 @@
214 <term>DRIVER_DMA_QUEUE</term> 214 <term>DRIVER_DMA_QUEUE</term>
215 <listitem> 215 <listitem>
216 <para> 216 <para>
217 If the driver queues DMA requests and completes them 217 Should be set if the driver queues DMA requests and completes them
218 asynchronously, this flag should be set. Deprecated. 218 asynchronously. Deprecated.
219 </para> 219 </para>
220 </listitem> 220 </listitem>
221 </varlistentry> 221 </varlistentry>
@@ -238,7 +238,7 @@
238 </variablelist> 238 </variablelist>
239 <para> 239 <para>
240 In this specific case, the driver requires AGP and supports 240 In this specific case, the driver requires AGP and supports
241 IRQs. DMA, as we'll see, is handled by device specific ioctls 241 IRQs. DMA, as discussed later, is handled by device-specific ioctls
242 in this case. It also supports the kernel mode setting APIs, though 242 in this case. It also supports the kernel mode setting APIs, though
243 unlike in the actual i915 driver source, this example unconditionally 243 unlike in the actual i915 driver source, this example unconditionally
244 exports KMS capability. 244 exports KMS capability.
@@ -269,36 +269,34 @@
269 initial output configuration. 269 initial output configuration.
270 </para> 270 </para>
271 <para> 271 <para>
272 Note that the tasks performed at driver load time must not 272 If compatibility is a concern (e.g. with drivers converted over
273 conflict with DRM client requirements. For instance, if user 273 to the new interfaces from the old ones), care must be taken to
274 prevent device initialization and control that is incompatible with
275 currently active userspace drivers. For instance, if user
274 level mode setting drivers are in use, it would be problematic 276 level mode setting drivers are in use, it would be problematic
275 to perform output discovery &amp; configuration at load time. 277 to perform output discovery &amp; configuration at load time.
276 Likewise, if pre-memory management aware user level drivers are 278 Likewise, if user-level drivers unaware of memory management are
277 in use, memory management and command buffer setup may need to 279 in use, memory management and command buffer setup may need to
278 be omitted. These requirements are driver specific, and care 280 be omitted. These requirements are driver-specific, and care
279 needs to be taken to keep both old and new applications and 281 needs to be taken to keep both old and new applications and
280 libraries working. The i915 driver supports the "modeset" 282 libraries working. The i915 driver supports the "modeset"
281 module parameter to control whether advanced features are 283 module parameter to control whether advanced features are
282 enabled at load time or in legacy fashion. If compatibility is 284 enabled at load time or in legacy fashion.
283 a concern (e.g. with drivers converted over to the new interfaces
284 from the old ones), care must be taken to prevent incompatible
285 device initialization and control with the currently active
286 userspace drivers.
287 </para> 285 </para>
288 286
289 <sect2> 287 <sect2>
290 <title>Driver private &amp; performance counters</title> 288 <title>Driver private &amp; performance counters</title>
291 <para> 289 <para>
292 The driver private hangs off the main drm_device structure and 290 The driver private hangs off the main drm_device structure and
293 can be used for tracking various device specific bits of 291 can be used for tracking various device-specific bits of
294 information, like register offsets, command buffer status, 292 information, like register offsets, command buffer status,
295 register state for suspend/resume, etc. At load time, a 293 register state for suspend/resume, etc. At load time, a
296 driver can simply allocate one and set drm_device.dev_priv 294 driver may simply allocate one and set drm_device.dev_priv
297 appropriately; at unload the driver can free it and set 295 appropriately; it should be freed and drm_device.dev_priv set
298 drm_device.dev_priv to NULL. 296 to NULL when the driver is unloaded.
299 </para> 297 </para>
300 <para> 298 <para>
301 The DRM supports several counters which can be used for rough 299 The DRM supports several counters which may be used for rough
302 performance characterization. Note that the DRM stat counter 300 performance characterization. Note that the DRM stat counter
303 system is not often used by applications, and supporting 301 system is not often used by applications, and supporting
304 additional counters is completely optional. 302 additional counters is completely optional.
@@ -307,15 +305,15 @@
307 These interfaces are deprecated and should not be used. If performance 305 These interfaces are deprecated and should not be used. If performance
308 monitoring is desired, the developer should investigate and 306 monitoring is desired, the developer should investigate and
309 potentially enhance the kernel perf and tracing infrastructure to export 307 potentially enhance the kernel perf and tracing infrastructure to export
310 GPU related performance information to performance monitoring 308 GPU related performance information for consumption by performance
311 tools and applications. 309 monitoring tools and applications.
312 </para> 310 </para>
313 </sect2> 311 </sect2>
314 312
315 <sect2> 313 <sect2>
316 <title>Configuring the device</title> 314 <title>Configuring the device</title>
317 <para> 315 <para>
318 Obviously, device configuration will be device specific. 316 Obviously, device configuration is device-specific.
319 However, there are several common operations: finding a 317 However, there are several common operations: finding a
320 device's PCI resources, mapping them, and potentially setting 318 device's PCI resources, mapping them, and potentially setting
321 up an IRQ handler. 319 up an IRQ handler.
@@ -323,10 +321,10 @@
323 <para> 321 <para>
324 Finding &amp; mapping resources is fairly straightforward. The 322 Finding &amp; mapping resources is fairly straightforward. The
325 DRM wrapper functions, drm_get_resource_start() and 323 DRM wrapper functions, drm_get_resource_start() and
326 drm_get_resource_len() can be used to find BARs on the given 324 drm_get_resource_len(), may be used to find BARs on the given
327 drm_device struct. Once those values have been retrieved, the 325 drm_device struct. Once those values have been retrieved, the
328 driver load function can call drm_addmap() to create a new 326 driver load function can call drm_addmap() to create a new
329 mapping for the BAR in question. Note you'll probably want a 327 mapping for the BAR in question. Note that you probably want a
330 drm_local_map_t in your driver private structure to track any 328 drm_local_map_t in your driver private structure to track any
331 mappings you create. 329 mappings you create.
332<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* --> 330<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
@@ -335,20 +333,20 @@
335 <para> 333 <para>
336 if compatibility with other operating systems isn't a concern 334 if compatibility with other operating systems isn't a concern
337 (DRM drivers can run under various BSD variants and OpenSolaris), 335 (DRM drivers can run under various BSD variants and OpenSolaris),
338 native Linux calls can be used for the above, e.g. pci_resource_* 336 native Linux calls may be used for the above, e.g. pci_resource_*
339 and iomap*/iounmap. See the Linux device driver book for more 337 and iomap*/iounmap. See the Linux device driver book for more
340 info. 338 info.
341 </para> 339 </para>
342 <para> 340 <para>
343 Once you have a register map, you can use the DRM_READn() and 341 Once you have a register map, you may use the DRM_READn() and
344 DRM_WRITEn() macros to access the registers on your device, or 342 DRM_WRITEn() macros to access the registers on your device, or
345 use driver specific versions to offset into your MMIO space 343 use driver-specific versions to offset into your MMIO space
346 relative to a driver specific base pointer (see I915_READ for 344 relative to a driver-specific base pointer (see I915_READ for
347 example). 345 an example).
348 </para> 346 </para>
349 <para> 347 <para>
350 If your device supports interrupt generation, you may want to 348 If your device supports interrupt generation, you may want to
351 setup an interrupt handler at driver load time as well. This 349 set up an interrupt handler when the driver is loaded. This
352 is done using the drm_irq_install() function. If your device 350 is done using the drm_irq_install() function. If your device
353 supports vertical blank interrupts, it should call 351 supports vertical blank interrupts, it should call
354 drm_vblank_init() to initialize the core vblank handling code before 352 drm_vblank_init() to initialize the core vblank handling code before
@@ -357,7 +355,7 @@
357 </para> 355 </para>
358<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install--> 356<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
359 <para> 357 <para>
360 Once your interrupt handler is registered (it'll use your 358 Once your interrupt handler is registered (it uses your
361 drm_driver.irq_handler as the actual interrupt handling 359 drm_driver.irq_handler as the actual interrupt handling
362 function), you can safely enable interrupts on your device, 360 function), you can safely enable interrupts on your device,
363 assuming any other state your interrupt handler uses is also 361 assuming any other state your interrupt handler uses is also
@@ -371,10 +369,10 @@
371 using the pci_map_rom() call, a convenience function that 369 using the pci_map_rom() call, a convenience function that
372 takes care of mapping the actual ROM, whether it has been 370 takes care of mapping the actual ROM, whether it has been
373 shadowed into memory (typically at address 0xc0000) or exists 371 shadowed into memory (typically at address 0xc0000) or exists
374 on the PCI device in the ROM BAR. Note that once you've 372 on the PCI device in the ROM BAR. Note that after the ROM
375 mapped the ROM and extracted any necessary information, be 373 has been mapped and any necessary information has been extracted,
376 sure to unmap it; on many devices the ROM address decoder is 374 it should be unmapped; on many devices, the ROM address decoder is
377 shared with other BARs, so leaving it mapped can cause 375 shared with other BARs, so leaving it mapped could cause
378 undesired behavior like hangs or memory corruption. 376 undesired behavior like hangs or memory corruption.
379<!--!Fdrivers/pci/rom.c pci_map_rom--> 377<!--!Fdrivers/pci/rom.c pci_map_rom-->
380 </para> 378 </para>
@@ -389,9 +387,9 @@
389 should support a memory manager. 387 should support a memory manager.
390 </para> 388 </para>
391 <para> 389 <para>
392 If your driver supports memory management (it should!), you'll 390 If your driver supports memory management (it should!), you
393 need to set that up at load time as well. How you initialize 391 need to set that up at load time as well. How you initialize
394 it depends on which memory manager you're using, TTM or GEM. 392 it depends on which memory manager you're using: TTM or GEM.
395 </para> 393 </para>
396 <sect3> 394 <sect3>
397 <title>TTM initialization</title> 395 <title>TTM initialization</title>
@@ -401,7 +399,7 @@
401 and devices with dedicated video RAM (VRAM), i.e. most discrete 399 and devices with dedicated video RAM (VRAM), i.e. most discrete
402 graphics devices. If your device has dedicated RAM, supporting 400 graphics devices. If your device has dedicated RAM, supporting
403 TTM is desirable. TTM also integrates tightly with your 401 TTM is desirable. TTM also integrates tightly with your
404 driver specific buffer execution function. See the radeon 402 driver-specific buffer execution function. See the radeon
405 driver for examples. 403 driver for examples.
406 </para> 404 </para>
407 <para> 405 <para>
@@ -429,21 +427,21 @@
429 created by the memory manager at runtime. Your global TTM should 427 created by the memory manager at runtime. Your global TTM should
430 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global 428 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
431 object should be sizeof(struct ttm_mem_global), and the init and 429 object should be sizeof(struct ttm_mem_global), and the init and
432 release hooks should point at your driver specific init and 430 release hooks should point at your driver-specific init and
433 release routines, which will probably eventually call 431 release routines, which probably eventually call
434 ttm_mem_global_init and ttm_mem_global_release respectively. 432 ttm_mem_global_init and ttm_mem_global_release, respectively.
435 </para> 433 </para>
436 <para> 434 <para>
437 Once your global TTM accounting structure is set up and initialized 435 Once your global TTM accounting structure is set up and initialized
438 (done by calling ttm_global_item_ref on the global object you 436 by calling ttm_global_item_ref() on it,
439 just created), you'll need to create a buffer object TTM to 437 you need to create a buffer object TTM to
440 provide a pool for buffer object allocation by clients and the 438 provide a pool for buffer object allocation by clients and the
441 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, 439 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
442 and its size should be sizeof(struct ttm_bo_global). Again, 440 and its size should be sizeof(struct ttm_bo_global). Again,
443 driver specific init and release functions can be provided, 441 driver-specific init and release functions may be provided,
444 likely eventually calling ttm_bo_global_init and 442 likely eventually calling ttm_bo_global_init() and
445 ttm_bo_global_release, respectively. Also like the previous 443 ttm_bo_global_release(), respectively. Also, like the previous
446 object, ttm_global_item_ref is used to create an initial reference 444 object, ttm_global_item_ref() is used to create an initial reference
447 count for the TTM, which will call your initialization function. 445 count for the TTM, which will call your initialization function.
448 </para> 446 </para>
449 </sect3> 447 </sect3>
@@ -453,27 +451,26 @@
453 GEM is an alternative to TTM, designed specifically for UMA 451 GEM is an alternative to TTM, designed specifically for UMA
454 devices. It has simpler initialization and execution requirements 452 devices. It has simpler initialization and execution requirements
455 than TTM, but has no VRAM management capability. Core GEM 453 than TTM, but has no VRAM management capability. Core GEM
456 initialization is comprised of a basic drm_mm_init call to create 454 is initialized by calling drm_mm_init() to create
457 a GTT DRM MM object, which provides an address space pool for 455 a GTT DRM MM object, which provides an address space pool for
458 object allocation. In a KMS configuration, the driver will 456 object allocation. In a KMS configuration, the driver
459 need to allocate and initialize a command ring buffer following 457 needs to allocate and initialize a command ring buffer following
460 basic GEM initialization. Most UMA devices have a so-called 458 core GEM initialization. A UMA device usually has what is called a
461 "stolen" memory region, which provides space for the initial 459 "stolen" memory region, which provides space for the initial
462 framebuffer and large, contiguous memory regions required by the 460 framebuffer and large, contiguous memory regions required by the
463 device. This space is not typically managed by GEM, and must 461 device. This space is not typically managed by GEM, and it must
464 be initialized separately into its own DRM MM object. 462 be initialized separately into its own DRM MM object.
465 </para> 463 </para>
466 <para> 464 <para>
467 Initialization will be driver specific, and will depend on 465 Initialization is driver-specific. In the case of Intel
468 the architecture of the device. In the case of Intel
469 integrated graphics chips like 965GM, GEM initialization can 466 integrated graphics chips like 965GM, GEM initialization can
470 be done by calling the internal GEM init function, 467 be done by calling the internal GEM init function,
471 i915_gem_do_init(). Since the 965GM is a UMA device 468 i915_gem_do_init(). Since the 965GM is a UMA device
472 (i.e. it doesn't have dedicated VRAM), GEM will manage 469 (i.e. it doesn't have dedicated VRAM), GEM manages
473 making regular RAM available for GPU operations. Memory set 470 making regular RAM available for GPU operations. Memory set
474 aside by the BIOS (called "stolen" memory by the i915 471 aside by the BIOS (called "stolen" memory by the i915
475 driver) will be managed by the DRM memrange allocator; the 472 driver) is managed by the DRM memrange allocator; the
476 rest of the aperture will be managed by GEM. 473 rest of the aperture is managed by GEM.
477 <programlisting> 474 <programlisting>
478 /* Basic memrange allocator for stolen space (aka vram) */ 475 /* Basic memrange allocator for stolen space (aka vram) */
479 drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size); 476 drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
@@ -483,7 +480,7 @@
483<!--!Edrivers/char/drm/drm_memrange.c--> 480<!--!Edrivers/char/drm/drm_memrange.c-->
484 </para> 481 </para>
485 <para> 482 <para>
486 Once the memory manager has been set up, we can allocate the 483 Once the memory manager has been set up, we may allocate the
487 command buffer. In the i915 case, this is also done with a 484 command buffer. In the i915 case, this is also done with a
488 GEM function, i915_gem_init_ringbuffer(). 485 GEM function, i915_gem_init_ringbuffer().
489 </para> 486 </para>
@@ -493,16 +490,25 @@
493 <sect2> 490 <sect2>
494 <title>Output configuration</title> 491 <title>Output configuration</title>
495 <para> 492 <para>
496 The final initialization task is output configuration. This involves 493 The final initialization task is output configuration. This involves:
497 finding and initializing the CRTCs, encoders and connectors 494 <itemizedlist>
498 for your device, creating an initial configuration and 495 <listitem>
499 registering a framebuffer console driver. 496 Finding and initializing the CRTCs, encoders, and connectors
497 for the device.
498 </listitem>
499 <listitem>
500 Creating an initial configuration.
501 </listitem>
502 <listitem>
503 Registering a framebuffer console driver.
504 </listitem>
505 </itemizedlist>
500 </para> 506 </para>
501 <sect3> 507 <sect3>
502 <title>Output discovery and initialization</title> 508 <title>Output discovery and initialization</title>
503 <para> 509 <para>
504 Several core functions exist to create CRTCs, encoders and 510 Several core functions exist to create CRTCs, encoders, and
505 connectors, namely drm_crtc_init(), drm_connector_init() and 511 connectors, namely: drm_crtc_init(), drm_connector_init(), and
506 drm_encoder_init(), along with several "helper" functions to 512 drm_encoder_init(), along with several "helper" functions to
507 perform common tasks. 513 perform common tasks.
508 </para> 514 </para>
@@ -555,10 +561,10 @@ void intel_crt_init(struct drm_device *dev)
555 </programlisting> 561 </programlisting>
556 <para> 562 <para>
557 In the example above (again, taken from the i915 driver), a 563 In the example above (again, taken from the i915 driver), a
558 CRT connector and encoder combination is created. A device 564 CRT connector and encoder combination is created. A device-specific
559 specific i2c bus is also created, for fetching EDID data and 565 i2c bus is also created for fetching EDID data and
560 performing monitor detection. Once the process is complete, 566 performing monitor detection. Once the process is complete,
561 the new connector is registered with sysfs, to make its 567 the new connector is registered with sysfs to make its
562 properties available to applications. 568 properties available to applications.
563 </para> 569 </para>
564 <sect4> 570 <sect4>
@@ -567,12 +573,12 @@ void intel_crt_init(struct drm_device *dev)
567 Since many PC-class graphics devices have similar display output 573 Since many PC-class graphics devices have similar display output
568 designs, the DRM provides a set of helper functions to make 574 designs, the DRM provides a set of helper functions to make
569 output management easier. The core helper routines handle 575 output management easier. The core helper routines handle
570 encoder re-routing and disabling of unused functions following 576 encoder re-routing and the disabling of unused functions following
571 mode set. Using the helpers is optional, but recommended for 577 mode setting. Using the helpers is optional, but recommended for
572 devices with PC-style architectures (i.e. a set of display planes 578 devices with PC-style architectures (i.e. a set of display planes
573 for feeding pixels to encoders which are in turn routed to 579 for feeding pixels to encoders which are in turn routed to
574 connectors). Devices with more complex requirements needing 580 connectors). Devices with more complex requirements needing
575 finer grained management can opt to use the core callbacks 581 finer grained management may opt to use the core callbacks
576 directly. 582 directly.
577 </para> 583 </para>
578 <para> 584 <para>
@@ -580,17 +586,25 @@ void intel_crt_init(struct drm_device *dev)
580 </para> 586 </para>
581 </sect4> 587 </sect4>
582 <para> 588 <para>
583 For each encoder, CRTC and connector, several functions must 589 Each encoder object needs to provide:
584 be provided, depending on the object type. Encoder objects 590 <itemizedlist>
585 need to provide a DPMS (basically on/off) function, mode fixup 591 <listitem>
586 (for converting requested modes into native hardware timings), 592 A DPMS (basically on/off) function.
587 and prepare, set and commit functions for use by the core DRM 593 </listitem>
588 helper functions. Connector helpers need to provide mode fetch and 594 <listitem>
589 validity functions as well as an encoder matching function for 595 A mode-fixup function (for converting requested modes into
590 returning an ideal encoder for a given connector. The core 596 native hardware timings).
591 connector functions include a DPMS callback, (deprecated) 597 </listitem>
592 save/restore routines, detection, mode probing, property handling, 598 <listitem>
593 and cleanup functions. 599 Functions (prepare, set, and commit) for use by the core DRM
600 helper functions.
601 </listitem>
602 </itemizedlist>
603 Connector helpers need to provide functions (mode-fetch, validity,
604 and encoder-matching) for returning an ideal encoder for a given
605 connector. The core connector functions include a DPMS callback,
606 save/restore routines (deprecated), detection, mode probing,
607 property handling, and cleanup functions.
594 </para> 608 </para>
595<!--!Edrivers/char/drm/drm_crtc.h--> 609<!--!Edrivers/char/drm/drm_crtc.h-->
596<!--!Edrivers/char/drm/drm_crtc.c--> 610<!--!Edrivers/char/drm/drm_crtc.c-->
@@ -605,23 +619,34 @@ void intel_crt_init(struct drm_device *dev)
605 <title>VBlank event handling</title> 619 <title>VBlank event handling</title>
606 <para> 620 <para>
607 The DRM core exposes two vertical blank related ioctls: 621 The DRM core exposes two vertical blank related ioctls:
608 DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL. 622 <variablelist>
623 <varlistentry>
624 <term>DRM_IOCTL_WAIT_VBLANK</term>
625 <listitem>
626 <para>
627 This takes a struct drm_wait_vblank structure as its argument,
628 and it is used to block or request a signal when a specified
629 vblank event occurs.
630 </para>
631 </listitem>
632 </varlistentry>
633 <varlistentry>
634 <term>DRM_IOCTL_MODESET_CTL</term>
635 <listitem>
636 <para>
637 This should be called by application level drivers before and
638 after mode setting, since on many devices the vertical blank
639 counter is reset at that time. Internally, the DRM snapshots
640 the last vblank count when the ioctl is called with the
641 _DRM_PRE_MODESET command, so that the counter won't go backwards
642 (which is dealt with when _DRM_POST_MODESET is used).
643 </para>
644 </listitem>
645 </varlistentry>
646 </variablelist>
609<!--!Edrivers/char/drm/drm_irq.c--> 647<!--!Edrivers/char/drm/drm_irq.c-->
610 </para> 648 </para>
611 <para> 649 <para>
612 DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
613 as its argument, and is used to block or request a signal when a
614 specified vblank event occurs.
615 </para>
616 <para>
617 DRM_IOCTL_MODESET_CTL should be called by application level
618 drivers before and after mode setting, since on many devices the
619 vertical blank counter will be reset at that time. Internally,
620 the DRM snapshots the last vblank count when the ioctl is called
621 with the _DRM_PRE_MODESET command so that the counter won't go
622 backwards (which is dealt with when _DRM_POST_MODESET is used).
623 </para>
624 <para>
625 To support the functions above, the DRM core provides several 650 To support the functions above, the DRM core provides several
626 helper functions for tracking vertical blank counters, and 651 helper functions for tracking vertical blank counters, and
627 requires drivers to provide several callbacks: 652 requires drivers to provide several callbacks:
@@ -632,24 +657,24 @@ void intel_crt_init(struct drm_device *dev)
632 register. The enable and disable vblank callbacks should enable 657 register. The enable and disable vblank callbacks should enable
633 and disable vertical blank interrupts, respectively. In the 658 and disable vertical blank interrupts, respectively. In the
634 absence of DRM clients waiting on vblank events, the core DRM 659 absence of DRM clients waiting on vblank events, the core DRM
635 code will use the disable_vblank() function to disable 660 code uses the disable_vblank() function to disable
636 interrupts, which saves power. They'll be re-enabled again when 661 interrupts, which saves power. They are re-enabled again when
637 a client calls the vblank wait ioctl above. 662 a client calls the vblank wait ioctl above.
638 </para> 663 </para>
639 <para> 664 <para>
640 Devices that don't provide a count register can simply use an 665 A device that doesn't provide a count register may simply use an
641 internal atomic counter incremented on every vertical blank 666 internal atomic counter incremented on every vertical blank
642 interrupt, and can make their enable and disable vblank 667 interrupt (and then treat the enable_vblank() and disable_vblank()
643 functions into no-ops. 668 callbacks as no-ops).
644 </para> 669 </para>
645 </sect1> 670 </sect1>
646 671
647 <sect1> 672 <sect1>
648 <title>Memory management</title> 673 <title>Memory management</title>
649 <para> 674 <para>
650 The memory manager lies at the heart of many DRM operations, and 675 The memory manager lies at the heart of many DRM operations; it
651 is also required to support advanced client features like OpenGL 676 is required to support advanced client features like OpenGL
652 pbuffers. The DRM currently contains two memory managers, TTM 677 pbuffers. The DRM currently contains two memory managers: TTM
653 and GEM. 678 and GEM.
654 </para> 679 </para>
655 680
@@ -679,41 +704,46 @@ void intel_crt_init(struct drm_device *dev)
679 <para> 704 <para>
680 GEM-enabled drivers must provide gem_init_object() and 705 GEM-enabled drivers must provide gem_init_object() and
681 gem_free_object() callbacks to support the core memory 706 gem_free_object() callbacks to support the core memory
682 allocation routines. They should also provide several driver 707 allocation routines. They should also provide several driver-specific
683 specific ioctls to support command execution, pinning, buffer 708 ioctls to support command execution, pinning, buffer
684 read &amp; write, mapping, and domain ownership transfers. 709 read &amp; write, mapping, and domain ownership transfers.
685 </para> 710 </para>
686 <para> 711 <para>
687 On a fundamental level, GEM involves several operations: memory 712 On a fundamental level, GEM involves several operations:
688 allocation and freeing, command execution, and aperture management 713 <itemizedlist>
689 at command execution time. Buffer object allocation is relatively 714 <listitem>Memory allocation and freeing</listitem>
715 <listitem>Command execution</listitem>
716 <listitem>Aperture management at command execution time</listitem>
717 </itemizedlist>
718 Buffer object allocation is relatively
690 straightforward and largely provided by Linux's shmem layer, which 719 straightforward and largely provided by Linux's shmem layer, which
691 provides memory to back each object. When mapped into the GTT 720 provides memory to back each object. When mapped into the GTT
692 or used in a command buffer, the backing pages for an object are 721 or used in a command buffer, the backing pages for an object are
693 flushed to memory and marked write combined so as to be coherent 722 flushed to memory and marked write combined so as to be coherent
694 with the GPU. Likewise, when the GPU finishes rendering to an object, 723 with the GPU. Likewise, if the CPU accesses an object after the GPU
695 if the CPU accesses it, it must be made coherent with the CPU's view 724 has finished rendering to the object, then the object must be made
725 coherent with the CPU's view
696 of memory, usually involving GPU cache flushing of various kinds. 726 of memory, usually involving GPU cache flushing of various kinds.
697 This core CPU&lt;-&gt;GPU coherency management is provided by the GEM 727 This core CPU&lt;-&gt;GPU coherency management is provided by a
698 set domain function, which evaluates an object's current domain and 728 device-specific ioctl, which evaluates an object's current domain and
699 performs any necessary flushing or synchronization to put the object 729 performs any necessary flushing or synchronization to put the object
700 into the desired coherency domain (note that the object may be busy, 730 into the desired coherency domain (note that the object may be busy,
701 i.e. an active render target; in that case the set domain function 731 i.e. an active render target; in that case, setting the domain
702 will block the client and wait for rendering to complete before 732 blocks the client and waits for rendering to complete before
703 performing any necessary flushing operations). 733 performing any necessary flushing operations).
704 </para> 734 </para>
705 <para> 735 <para>
706 Perhaps the most important GEM function is providing a command 736 Perhaps the most important GEM function is providing a command
707 execution interface to clients. Client programs construct command 737 execution interface to clients. Client programs construct command
708 buffers containing references to previously allocated memory objects 738 buffers containing references to previously allocated memory objects,
709 and submit them to GEM. At that point, GEM will take care to bind 739 and then submit them to GEM. At that point, GEM takes care to bind
710 all the objects into the GTT, execute the buffer, and provide 740 all the objects into the GTT, execute the buffer, and provide
711 necessary synchronization between clients accessing the same buffers. 741 necessary synchronization between clients accessing the same buffers.
712 This often involves evicting some objects from the GTT and re-binding 742 This often involves evicting some objects from the GTT and re-binding
713 others (a fairly expensive operation), and providing relocation 743 others (a fairly expensive operation), and providing relocation
714 support which hides fixed GTT offsets from clients. Clients must 744 support which hides fixed GTT offsets from clients. Clients must
715 take care not to submit command buffers that reference more objects 745 take care not to submit command buffers that reference more objects
716 than can fit in the GTT or GEM will reject them and no rendering 746 than can fit in the GTT; otherwise, GEM will reject them and no rendering
717 will occur. Similarly, if several objects in the buffer require 747 will occur. Similarly, if several objects in the buffer require
718 fence registers to be allocated for correct rendering (e.g. 2D blits 748 fence registers to be allocated for correct rendering (e.g. 2D blits
719 on pre-965 chips), care must be taken not to require more fence 749 on pre-965 chips), care must be taken not to require more fence
@@ -729,7 +759,7 @@ void intel_crt_init(struct drm_device *dev)
729 <title>Output management</title> 759 <title>Output management</title>
730 <para> 760 <para>
731 At the core of the DRM output management code is a set of 761 At the core of the DRM output management code is a set of
732 structures representing CRTCs, encoders and connectors. 762 structures representing CRTCs, encoders, and connectors.
733 </para> 763 </para>
734 <para> 764 <para>
735 A CRTC is an abstraction representing a part of the chip that 765 A CRTC is an abstraction representing a part of the chip that
@@ -765,21 +795,19 @@ void intel_crt_init(struct drm_device *dev)
765 <sect1> 795 <sect1>
766 <title>Framebuffer management</title> 796 <title>Framebuffer management</title>
767 <para> 797 <para>
768 In order to set a mode on a given CRTC, encoder and connector 798 Clients need to provide a framebuffer object which provides a source
769 configuration, clients need to provide a framebuffer object which 799 of pixels for a CRTC to deliver to the encoder(s) and ultimately the
770 will provide a source of pixels for the CRTC to deliver to the encoder(s) 800 connector(s). A framebuffer is fundamentally a driver-specific memory
771 and ultimately the connector(s) in the configuration. A framebuffer 801 object, made into an opaque handle by the DRM's addfb() function.
772 is fundamentally a driver specific memory object, made into an opaque 802 Once a framebuffer has been created this way, it may be passed to the
773 handle by the DRM addfb function. Once an fb has been created this 803 KMS mode setting routines for use in a completed configuration.
774 way it can be passed to the KMS mode setting routines for use in
775 a configuration.
776 </para> 804 </para>
777 </sect1> 805 </sect1>
778 806
779 <sect1> 807 <sect1>
780 <title>Command submission &amp; fencing</title> 808 <title>Command submission &amp; fencing</title>
781 <para> 809 <para>
782 This should cover a few device specific command submission 810 This should cover a few device-specific command submission
783 implementations. 811 implementations.
784 </para> 812 </para>
785 </sect1> 813 </sect1>
@@ -789,7 +817,7 @@ void intel_crt_init(struct drm_device *dev)
789 <para> 817 <para>
790 The DRM core provides some suspend/resume code, but drivers 818 The DRM core provides some suspend/resume code, but drivers
791 wanting full suspend/resume support should provide save() and 819 wanting full suspend/resume support should provide save() and
792 restore() functions. These will be called at suspend, 820 restore() functions. These are called at suspend,
793 hibernate, or resume time, and should perform any state save or 821 hibernate, or resume time, and should perform any state save or
794 restore required by your device across suspend or hibernate 822 restore required by your device across suspend or hibernate
795 states. 823 states.
@@ -812,8 +840,8 @@ void intel_crt_init(struct drm_device *dev)
812 <para> 840 <para>
813 The DRM core exports several interfaces to applications, 841 The DRM core exports several interfaces to applications,
814 generally intended to be used through corresponding libdrm 842 generally intended to be used through corresponding libdrm
815 wrapper functions. In addition, drivers export device specific 843 wrapper functions. In addition, drivers export device-specific
816 interfaces for use by userspace drivers &amp; device aware 844 interfaces for use by userspace drivers &amp; device-aware
817 applications through ioctls and sysfs files. 845 applications through ioctls and sysfs files.
818 </para> 846 </para>
819 <para> 847 <para>
@@ -822,8 +850,8 @@ void intel_crt_init(struct drm_device *dev)
822 management, memory management, and output management. 850 management, memory management, and output management.
823 </para> 851 </para>
824 <para> 852 <para>
825 Cover generic ioctls and sysfs layout here. Only need high 853 Cover generic ioctls and sysfs layout here. We only need high-level
826 level info, since man pages will cover the rest. 854 info, since man pages should cover the rest.
827 </para> 855 </para>
828 </chapter> 856 </chapter>
829 857
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index c21d77742a07..7e62de1e59ff 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -33,9 +33,9 @@ demonstrate this problem using nested bash shells:
33 33
34 From a second, unrelated bash shell: 34 From a second, unrelated bash shell:
35 $ kill -SIGSTOP 16690 35 $ kill -SIGSTOP 16690
36 $ kill -SIGCONT 16990 36 $ kill -SIGCONT 16690
37 37
38 <at this point 16990 exits and causes 16644 to exit too> 38 <at this point 16690 exits and causes 16644 to exit too>
39 39
40This happens because bash can observe both signals and choose how it 40This happens because bash can observe both signals and choose how it
41responds to them. 41responds to them.
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 4f3443230d89..edad99abec21 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -349,6 +349,7 @@ STAC92HD83*
349 ref Reference board 349 ref Reference board
350 mic-ref Reference board with power management for ports 350 mic-ref Reference board with power management for ports
351 dell-s14 Dell laptop 351 dell-s14 Dell laptop
352 dell-vostro-3500 Dell Vostro 3500 laptop
352 hp HP laptops with (inverted) mute-LED 353 hp HP laptops with (inverted) mute-LED
353 hp-dv7-4000 HP dv-7 4000 354 hp-dv7-4000 HP dv-7 4000
354 auto BIOS setup (default) 355 auto BIOS setup (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index 4808256446f2..3e8bc6163be6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2342,6 +2342,13 @@ S: Supported
2342F: drivers/gpu/drm/i915 2342F: drivers/gpu/drm/i915
2343F: include/drm/i915* 2343F: include/drm/i915*
2344 2344
2345DRM DRIVERS FOR EXYNOS
2346M: Inki Dae <inki.dae@samsung.com>
2347L: dri-devel@lists.freedesktop.org
2348S: Supported
2349F: drivers/gpu/drm/exynos
2350F: include/drm/exynos*
2351
2345DSCC4 DRIVER 2352DSCC4 DRIVER
2346M: Francois Romieu <romieu@fr.zoreil.com> 2353M: Francois Romieu <romieu@fr.zoreil.com>
2347L: netdev@vger.kernel.org 2354L: netdev@vger.kernel.org
@@ -6122,7 +6129,7 @@ F: sound/
6122SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) 6129SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
6123M: Liam Girdwood <lrg@ti.com> 6130M: Liam Girdwood <lrg@ti.com>
6124M: Mark Brown <broonie@opensource.wolfsonmicro.com> 6131M: Mark Brown <broonie@opensource.wolfsonmicro.com>
6125T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git 6132T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
6126L: alsa-devel@alsa-project.org (moderated for non-subscribers) 6133L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6127W: http://alsa-project.org/main/index.php/ASoC 6134W: http://alsa-project.org/main/index.php/ASoC
6128S: Supported 6135S: Supported
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 6260d5deeabc..c7cb0af0eb59 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -406,8 +406,10 @@
406#define __NR_syncfs 335 406#define __NR_syncfs 335
407#define __NR_sendmmsg 336 407#define __NR_sendmmsg 336
408#define __NR_setns 337 408#define __NR_setns 337
409#define __NR_process_vm_readv 338
410#define __NR_process_vm_writev 339
409 411
410#define NR_syscalls 338 412#define NR_syscalls 340
411 413
412#ifdef __32bit_syscall_numbers__ 414#ifdef __32bit_syscall_numbers__
413/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 415/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 09d8ec454450..63402f9e9f51 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -84,4 +84,4 @@ sys_call_table:
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index edbec45d4688..db86b1a0e9a9 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,7 @@ sys_call_table32:
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89 89
90#endif /* CONFIG_COMPAT */ 90#endif /* CONFIG_COMPAT */
91 91
@@ -162,4 +162,4 @@ sys_call_table:
162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
165 .word sys_syncfs, sys_sendmmsg, sys_setns 165 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a816f24f2d52..a0f768c1d9aa 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -383,6 +383,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
383 return 0; 383 return 0;
384} 384}
385 385
386#ifdef CONFIG_NET
386static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 387static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
387{ 388{
388 struct crypto_report_blkcipher rblkcipher; 389 struct crypto_report_blkcipher rblkcipher;
@@ -404,6 +405,12 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
404nla_put_failure: 405nla_put_failure:
405 return -EMSGSIZE; 406 return -EMSGSIZE;
406} 407}
408#else
409static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
410{
411 return -ENOSYS;
412}
413#endif
407 414
408static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 415static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
409 __attribute__ ((unused)); 416 __attribute__ ((unused));
@@ -457,6 +464,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
457 return 0; 464 return 0;
458} 465}
459 466
467#ifdef CONFIG_NET
460static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 468static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
461{ 469{
462 struct crypto_report_blkcipher rblkcipher; 470 struct crypto_report_blkcipher rblkcipher;
@@ -478,6 +486,12 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
478nla_put_failure: 486nla_put_failure:
479 return -EMSGSIZE; 487 return -EMSGSIZE;
480} 488}
489#else
490static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
491{
492 return -ENOSYS;
493}
494#endif
481 495
482static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) 496static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
483 __attribute__ ((unused)); 497 __attribute__ ((unused));
diff --git a/crypto/aead.c b/crypto/aead.c
index 701556ffaaef..04add3dca6fe 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -111,6 +111,7 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
111 return 0; 111 return 0;
112} 112}
113 113
114#ifdef CONFIG_NET
114static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) 115static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
115{ 116{
116 struct crypto_report_aead raead; 117 struct crypto_report_aead raead;
@@ -132,6 +133,12 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
132nla_put_failure: 133nla_put_failure:
133 return -EMSGSIZE; 134 return -EMSGSIZE;
134} 135}
136#else
137static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
138{
139 return -ENOSYS;
140}
141#endif
135 142
136static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) 143static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
137 __attribute__ ((unused)); 144 __attribute__ ((unused));
@@ -190,6 +197,7 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
190 return 0; 197 return 0;
191} 198}
192 199
200#ifdef CONFIG_NET
193static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) 201static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
194{ 202{
195 struct crypto_report_aead raead; 203 struct crypto_report_aead raead;
@@ -210,6 +218,12 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
210nla_put_failure: 218nla_put_failure:
211 return -EMSGSIZE; 219 return -EMSGSIZE;
212} 220}
221#else
222static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
223{
224 return -ENOSYS;
225}
226#endif
213 227
214 228
215static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) 229static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a3e6ef99394a..ac93c99cfae8 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -399,6 +399,7 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
399 return sizeof(struct crypto_shash *); 399 return sizeof(struct crypto_shash *);
400} 400}
401 401
402#ifdef CONFIG_NET
402static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 403static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
403{ 404{
404 struct crypto_report_hash rhash; 405 struct crypto_report_hash rhash;
@@ -416,6 +417,12 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
416nla_put_failure: 417nla_put_failure:
417 return -EMSGSIZE; 418 return -EMSGSIZE;
418} 419}
420#else
421static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
422{
423 return -ENOSYS;
424}
425#endif
419 426
420static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 427static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
421 __attribute__ ((unused)); 428 __attribute__ ((unused));
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 2572d2600136..1e61d1a888b2 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -494,6 +494,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
494 return crypto_init_blkcipher_ops_async(tfm); 494 return crypto_init_blkcipher_ops_async(tfm);
495} 495}
496 496
497#ifdef CONFIG_NET
497static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 498static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
498{ 499{
499 struct crypto_report_blkcipher rblkcipher; 500 struct crypto_report_blkcipher rblkcipher;
@@ -515,6 +516,12 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
515nla_put_failure: 516nla_put_failure:
516 return -EMSGSIZE; 517 return -EMSGSIZE;
517} 518}
519#else
520static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
521{
522 return -ENOSYS;
523}
524#endif
518 525
519static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 526static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
520 __attribute__ ((unused)); 527 __attribute__ ((unused));
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 2abca780312d..0605a2bbba75 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -44,9 +44,6 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
44 44
45 down_read(&crypto_alg_sem); 45 down_read(&crypto_alg_sem);
46 46
47 if (list_empty(&crypto_alg_list))
48 return NULL;
49
50 list_for_each_entry(q, &crypto_alg_list, cra_list) { 47 list_for_each_entry(q, &crypto_alg_list, cra_list) {
51 int match = 0; 48 int match = 0;
52 49
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index fefda78a6a2a..2e458e5482d0 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -48,6 +48,7 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
48 return 0; 48 return 0;
49} 49}
50 50
51#ifdef CONFIG_NET
51static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) 52static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
52{ 53{
53 struct crypto_report_comp rpcomp; 54 struct crypto_report_comp rpcomp;
@@ -62,6 +63,12 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
62nla_put_failure: 63nla_put_failure:
63 return -EMSGSIZE; 64 return -EMSGSIZE;
64} 65}
66#else
67static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
68{
69 return -ENOSYS;
70}
71#endif
65 72
66static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) 73static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
67 __attribute__ ((unused)); 74 __attribute__ ((unused));
diff --git a/crypto/rng.c b/crypto/rng.c
index feb7de00f437..64f864fa8043 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -60,6 +60,7 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_NET
63static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) 64static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
64{ 65{
65 struct crypto_report_rng rrng; 66 struct crypto_report_rng rrng;
@@ -76,6 +77,12 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
76nla_put_failure: 77nla_put_failure:
77 return -EMSGSIZE; 78 return -EMSGSIZE;
78} 79}
80#else
81static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
82{
83 return -ENOSYS;
84}
85#endif
79 86
80static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) 87static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
81 __attribute__ ((unused)); 88 __attribute__ ((unused));
diff --git a/crypto/shash.c b/crypto/shash.c
index ea8a9c6e21e3..9100912716ae 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -524,6 +524,7 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
524 return alg->cra_ctxsize; 524 return alg->cra_ctxsize;
525} 525}
526 526
527#ifdef CONFIG_NET
527static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) 528static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
528{ 529{
529 struct crypto_report_hash rhash; 530 struct crypto_report_hash rhash;
@@ -541,6 +542,12 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
541nla_put_failure: 542nla_put_failure:
542 return -EMSGSIZE; 543 return -EMSGSIZE;
543} 544}
545#else
546static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
547{
548 return -ENOSYS;
549}
550#endif
544 551
545static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) 552static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
546 __attribute__ ((unused)); 553 __attribute__ ((unused));
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 434a6c011675..95706fa24c73 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
669 struct device_opp *dev_opp = find_device_opp(dev); 669 struct device_opp *dev_opp = find_device_opp(dev);
670 670
671 if (IS_ERR(dev_opp)) 671 if (IS_ERR(dev_opp))
672 return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ 672 return ERR_CAST(dev_opp); /* matching type */
673 673
674 return &dev_opp->head; 674 return &dev_opp->head;
675} 675}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 66cd0b8096ca..c92424ca1a55 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1186,10 +1186,11 @@ static void gen6_cleanup(void)
1186/* Certain Gen5 chipsets require require idling the GPU before 1186/* Certain Gen5 chipsets require require idling the GPU before
1187 * unmapping anything from the GTT when VT-d is enabled. 1187 * unmapping anything from the GTT when VT-d is enabled.
1188 */ 1188 */
1189extern int intel_iommu_gfx_mapped;
1190static inline int needs_idle_maps(void) 1189static inline int needs_idle_maps(void)
1191{ 1190{
1191#ifdef CONFIG_INTEL_IOMMU
1192 const unsigned short gpu_devid = intel_private.pcidev->device; 1192 const unsigned short gpu_devid = intel_private.pcidev->device;
1193 extern int intel_iommu_gfx_mapped;
1193 1194
1194 /* Query intel_iommu to see if we need the workaround. Presumably that 1195 /* Query intel_iommu to see if we need the workaround. Presumably that
1195 * was loaded first. 1196 * was loaded first.
@@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void)
1198 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 1199 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1199 intel_iommu_gfx_mapped) 1200 intel_iommu_gfx_mapped)
1200 return 1; 1201 return 1;
1201 1202#endif
1202 return 0; 1203 return 0;
1203} 1204}
1204 1205
@@ -1236,7 +1237,7 @@ static int i9xx_setup(void)
1236 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1237 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1237 } 1238 }
1238 1239
1239 if (needs_idle_maps()); 1240 if (needs_idle_maps())
1240 intel_private.base.do_idle_maps = 1; 1241 intel_private.base.do_idle_maps = 1;
1241 1242
1242 intel_i9xx_setup_flush(); 1243 intel_i9xx_setup_flush();
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 785127cb281b..1368826ef284 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,7 +9,6 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select SLOW_WORK
13 help 12 help
14 Kernel-level support for the Direct Rendering Infrastructure (DRI) 13 Kernel-level support for the Direct Rendering Infrastructure (DRI)
15 introduced in XFree86 4.0. If you say Y here, you need to select 14 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -96,6 +95,7 @@ config DRM_I915
96 select FB_CFB_IMAGEBLIT 95 select FB_CFB_IMAGEBLIT
97 # i915 depends on ACPI_VIDEO when ACPI is enabled 96 # i915 depends on ACPI_VIDEO when ACPI is enabled
98 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
98 select BACKLIGHT_LCD_SUPPORT if ACPI
99 select BACKLIGHT_CLASS_DEVICE if ACPI 99 select BACKLIGHT_CLASS_DEVICE if ACPI
100 select VIDEO_OUTPUT_CONTROL if ACPI 100 select VIDEO_OUTPUT_CONTROL if ACPI
101 select INPUT if ACPI 101 select INPUT if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 9a2e2a14b3bb..405c63b9d539 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2118,8 +2118,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2118 property->num_values = num_values; 2118 property->num_values = num_values;
2119 INIT_LIST_HEAD(&property->enum_blob_list); 2119 INIT_LIST_HEAD(&property->enum_blob_list);
2120 2120
2121 if (name) 2121 if (name) {
2122 strncpy(property->name, name, DRM_PROP_NAME_LEN); 2122 strncpy(property->name, name, DRM_PROP_NAME_LEN);
2123 property->name[DRM_PROP_NAME_LEN-1] = '\0';
2124 }
2123 2125
2124 list_add_tail(&property->head, &dev->mode_config.property_list); 2126 list_add_tail(&property->head, &dev->mode_config.property_list);
2125 return property; 2127 return property;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2957636161e8..3969f7553fe7 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -484,6 +484,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
484 struct drm_connector *save_connectors, *connector; 484 struct drm_connector *save_connectors, *connector;
485 int count = 0, ro, fail = 0; 485 int count = 0, ro, fail = 0;
486 struct drm_crtc_helper_funcs *crtc_funcs; 486 struct drm_crtc_helper_funcs *crtc_funcs;
487 struct drm_mode_set save_set;
487 int ret = 0; 488 int ret = 0;
488 int i; 489 int i;
489 490
@@ -556,6 +557,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
556 save_connectors[count++] = *connector; 557 save_connectors[count++] = *connector;
557 } 558 }
558 559
560 save_set.crtc = set->crtc;
561 save_set.mode = &set->crtc->mode;
562 save_set.x = set->crtc->x;
563 save_set.y = set->crtc->y;
564 save_set.fb = set->crtc->fb;
565
559 /* We should be able to check here if the fb has the same properties 566 /* We should be able to check here if the fb has the same properties
560 * and then just flip_or_move it */ 567 * and then just flip_or_move it */
561 if (set->crtc->fb != set->fb) { 568 if (set->crtc->fb != set->fb) {
@@ -721,6 +728,12 @@ fail:
721 *connector = save_connectors[count++]; 728 *connector = save_connectors[count++];
722 } 729 }
723 730
731 /* Try to restore the config */
732 if (mode_changed &&
733 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
734 save_set.y, save_set.fb))
735 DRM_ERROR("failed to restore config after modeset failure\n");
736
724 kfree(save_connectors); 737 kfree(save_connectors);
725 kfree(save_encoders); 738 kfree(save_encoders);
726 kfree(save_crtcs); 739 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index d067c12ba940..1c7a1c0d3edd 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
118 tmp->minor = minor; 118 tmp->minor = minor;
119 tmp->dent = ent; 119 tmp->dent = ent;
120 tmp->info_ent = &files[i]; 120 tmp->info_ent = &files[i];
121 list_add(&(tmp->list), &(minor->debugfs_nodes.list)); 121
122 mutex_lock(&minor->debugfs_lock);
123 list_add(&tmp->list, &minor->debugfs_list);
124 mutex_unlock(&minor->debugfs_lock);
122 } 125 }
123 return 0; 126 return 0;
124 127
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
146 char name[64]; 149 char name[64];
147 int ret; 150 int ret;
148 151
149 INIT_LIST_HEAD(&minor->debugfs_nodes.list); 152 INIT_LIST_HEAD(&minor->debugfs_list);
153 mutex_init(&minor->debugfs_lock);
150 sprintf(name, "%d", minor_id); 154 sprintf(name, "%d", minor_id);
151 minor->debugfs_root = debugfs_create_dir(name, root); 155 minor->debugfs_root = debugfs_create_dir(name, root);
152 if (!minor->debugfs_root) { 156 if (!minor->debugfs_root) {
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
192 struct drm_info_node *tmp; 196 struct drm_info_node *tmp;
193 int i; 197 int i;
194 198
199 mutex_lock(&minor->debugfs_lock);
195 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
196 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { 201 list_for_each_safe(pos, q, &minor->debugfs_list) {
197 tmp = list_entry(pos, struct drm_info_node, list); 202 tmp = list_entry(pos, struct drm_info_node, list);
198 if (tmp->info_ent == &files[i]) { 203 if (tmp->info_ent == &files[i]) {
199 debugfs_remove(tmp->dent); 204 debugfs_remove(tmp->dent);
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
202 } 207 }
203 } 208 }
204 } 209 }
210 mutex_unlock(&minor->debugfs_lock);
205 return 0; 211 return 0;
206} 212}
207EXPORT_SYMBOL(drm_debugfs_remove_files); 213EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fc81af9dbf42..40c187c60f44 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
129 129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
131 131
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb3794a00f98..68b756253f9f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -407,13 +407,16 @@ int drm_irq_uninstall(struct drm_device *dev)
407 /* 407 /*
408 * Wake up any waiters so they don't hang. 408 * Wake up any waiters so they don't hang.
409 */ 409 */
410 spin_lock_irqsave(&dev->vbl_lock, irqflags); 410 if (dev->num_crtcs) {
411 for (i = 0; i < dev->num_crtcs; i++) { 411 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 DRM_WAKEUP(&dev->vbl_queue[i]); 412 for (i = 0; i < dev->num_crtcs; i++) {
413 dev->vblank_enabled[i] = 0; 413 DRM_WAKEUP(&dev->vbl_queue[i]);
414 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); 414 dev->vblank_enabled[i] = 0;
415 dev->last_vblank[i] =
416 dev->driver->get_vblank_counter(dev, i);
417 }
418 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
415 } 419 }
416 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
417 420
418 if (!irq_enabled) 421 if (!irq_enabled)
419 return -EINVAL; 422 return -EINVAL;
@@ -1125,6 +1128,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1125 trace_drm_vblank_event_delivered(current->pid, pipe, 1128 trace_drm_vblank_event_delivered(current->pid, pipe,
1126 vblwait->request.sequence); 1129 vblwait->request.sequence);
1127 } else { 1130 } else {
1131 /* drm_handle_vblank_events will call drm_vblank_put */
1128 list_add_tail(&e->base.link, &dev->vblank_event_list); 1132 list_add_tail(&e->base.link, &dev->vblank_event_list);
1129 vblwait->reply.sequence = vblwait->request.sequence; 1133 vblwait->reply.sequence = vblwait->request.sequence;
1130 } 1134 }
@@ -1205,8 +1209,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1205 goto done; 1209 goto done;
1206 } 1210 }
1207 1211
1208 if (flags & _DRM_VBLANK_EVENT) 1212 if (flags & _DRM_VBLANK_EVENT) {
1213 /* must hold on to the vblank ref until the event fires
1214 * drm_vblank_put will be called asynchronously
1215 */
1209 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1216 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
1217 }
1210 1218
1211 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1219 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1212 (seq - vblwait->request.sequence) <= (1<<23)) { 1220 (seq - vblwait->request.sequence) <= (1<<23)) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d14b44e13f51..4f40f1ce1d8e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1506,7 +1506,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
1506 node->minor = minor; 1506 node->minor = minor;
1507 node->dent = ent; 1507 node->dent = ent;
1508 node->info_ent = (void *) key; 1508 node->info_ent = (void *) key;
1509 list_add(&node->list, &minor->debugfs_nodes.list); 1509
1510 mutex_lock(&minor->debugfs_lock);
1511 list_add(&node->list, &minor->debugfs_list);
1512 mutex_unlock(&minor->debugfs_lock);
1510 1513
1511 return 0; 1514 return 0;
1512} 1515}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc531bb59c26..e9c2cfe45daa 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = {
789}; 789};
790 790
791static struct drm_driver driver = { 791static struct drm_driver driver = {
792 /* don't use mtrr's here, the Xserver or user space app should 792 /* Don't use MTRRs here; the Xserver or userspace app should
793 * deal with them for intel hardware. 793 * deal with them for Intel hardware.
794 */ 794 */
795 .driver_features = 795 .driver_features =
796 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 796 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6651c36b6e8a..d18b07adcffa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1396 1396
1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398 ret = -E2BIG; 1398 ret = -E2BIG;
1399 goto unlock; 1399 goto out;
1400 } 1400 }
1401 1401
1402 if (obj->madv != I915_MADV_WILLNEED) { 1402 if (obj->madv != I915_MADV_WILLNEED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 032a82098136..5fc201b49d30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -640,10 +640,9 @@ static int
640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) 640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
641{ 641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 uint32_t reg0 = nv_rd32(dev, reg + 0);
644 uint32_t reg1 = nv_rd32(dev, reg + 4);
645 struct nouveau_pll_vals pll; 643 struct nouveau_pll_vals pll;
646 struct pll_lims pll_limits; 644 struct pll_lims pll_limits;
645 u32 ctrl, mask, coef;
647 int ret; 646 int ret;
648 647
649 ret = get_pll_limits(dev, reg, &pll_limits); 648 ret = get_pll_limits(dev, reg, &pll_limits);
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
654 if (!clk) 653 if (!clk)
655 return -ERANGE; 654 return -ERANGE;
656 655
657 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 656 coef = pll.N1 << 8 | pll.M1;
658 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 657 ctrl = pll.log2P << 16;
659 658 mask = 0x00070000;
660 if (dev_priv->vbios.execute) { 659 if (reg == 0x004008) {
661 still_alive(); 660 mask |= 0x01f80000;
662 nv_wr32(dev, reg + 4, reg1); 661 ctrl |= (pll_limits.log2p_bias << 19);
663 nv_wr32(dev, reg + 0, reg0); 662 ctrl |= (pll.log2P << 22);
664 } 663 }
665 664
665 if (!dev_priv->vbios.execute)
666 return 0;
667
668 nv_mask(dev, reg + 0, mask, ctrl);
669 nv_wr32(dev, reg + 4, coef);
666 return 0; 670 return 0;
667} 671}
668 672
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7226f419e178..7cc37e690860 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
148 148
149 if (dev_priv->card_type == NV_10 && 149 if (dev_priv->card_type == NV_10 &&
150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
151 nvbo->bo.mem.num_pages < vram_pages / 2) { 151 nvbo->bo.mem.num_pages < vram_pages / 4) {
152 /* 152 /*
153 * Make sure that the color and depth buffers are handled 153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x 154 * by independent memory controller units. Up to a 9x
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a319d5646ea9..bb6ec9ef8676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
161 162
162 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e0d275e1c96c..cea6696b1906 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
710 case OUTPUT_DP: 710 case OUTPUT_DP:
711 max_clock = nv_encoder->dp.link_nr; 711 max_clock = nv_encoder->dp.link_nr;
712 max_clock *= nv_encoder->dp.link_bw; 712 max_clock *= nv_encoder->dp.link_bw;
713 clock = clock * nouveau_connector_bpp(connector) / 8; 713 clock = clock * nouveau_connector_bpp(connector) / 10;
714 break; 714 break;
715 default: 715 default:
716 BUG_ON(1); 716 BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 14a8627efe4d..3a4cc32b9e44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
487{ 487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private; 488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_fbdev *nfbdev; 489 struct nouveau_fbdev *nfbdev;
490 int preferred_bpp;
490 int ret; 491 int ret;
491 492
492 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 493 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
505 } 506 }
506 507
507 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 508 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
508 drm_fb_helper_initial_config(&nfbdev->helper, 32); 509
510 if (dev_priv->vram_size <= 32 * 1024 * 1024)
511 preferred_bpp = 8;
512 else if (dev_priv->vram_size <= 64 * 1024 * 1024)
513 preferred_bpp = 16;
514 else
515 preferred_bpp = 32;
516
517 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
509 return 0; 518 return 0;
510} 519}
511 520
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 81116cfea275..2f6daae68b9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
539 return ret; 539 return ret;
540 } 540 }
541 541
542 INIT_LIST_HEAD(&chan->fence.pending);
543 spin_lock_init(&chan->fence.lock);
544 atomic_set(&chan->fence.last_sequence_irq, 0); 542 atomic_set(&chan->fence.last_sequence_irq, 0);
545 return 0; 543 return 0;
546} 544}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index c6143df48b9f..d39b2202b197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
333 333
334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
335 335
336 for (i = 0; info[i].addr; i++) { 336 for (i = 0; i2c && info[i].addr; i++) {
337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) && 337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
338 (!match || match(i2c, &info[i]))) { 338 (!match || match(i2c, &info[i]))) {
339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 9f178aa94162..33d03fbf00df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev)
239 if(version == 0x15) { 239 if(version == 0x15) {
240 memtimings->timing = 240 memtimings->timing =
241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
242 if(!memtimings) { 242 if (!memtimings->timing) {
243 NV_WARN(dev,"Could not allocate memtiming table\n"); 243 NV_WARN(dev,"Could not allocate memtiming table\n");
244 return; 244 return;
245 } 245 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 82478e0998e5..d8831ab42bb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
579 if (ret) 579 if (ret)
580 goto out_display_early; 580 goto out_display_early;
581 581
582 /* workaround an odd issue on nvc1 by disabling the device's
583 * nosnoop capability. hopefully won't cause issues until a
584 * better fix is found - assuming there is one...
585 */
586 if (dev_priv->chipset == 0xc1) {
587 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
588 }
589
582 nouveau_pm_init(dev); 590 nouveau_pm_init(dev);
583 591
584 ret = engine->vram.init(dev); 592 ret = engine->vram.init(dev);
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1102 dev_priv->noaccel = !!nouveau_noaccel; 1110 dev_priv->noaccel = !!nouveau_noaccel;
1103 if (nouveau_noaccel == -1) { 1111 if (nouveau_noaccel == -1) {
1104 switch (dev_priv->chipset) { 1112 switch (dev_priv->chipset) {
1105 case 0xc1: /* known broken */ 1113#if 0
1106 case 0xc8: /* never tested */ 1114 case 0xXX: /* known broken */
1107 NV_INFO(dev, "acceleration disabled by default, pass " 1115 NV_INFO(dev, "acceleration disabled by default, pass "
1108 "noaccel=0 to force enable\n"); 1116 "noaccel=0 to force enable\n");
1109 dev_priv->noaccel = true; 1117 dev_priv->noaccel = true;
1110 break; 1118 break;
1119#endif
1111 default: 1120 default:
1112 dev_priv->noaccel = false; 1121 dev_priv->noaccel = false;
1113 break; 1122 break;
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index bbc0b9c7e1f7..e676b0d53478 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg)
57 int P = (ctrl & 0x00070000) >> 16; 57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0; 58 u32 ref = 27000, clk = 0;
59 59
60 if (ctrl & 0x80000000) 60 if ((ctrl & 0x80000000) && M1) {
61 clk = ref * N1 / M1; 61 clk = ref * N1 / M1;
62 62 if ((ctrl & 0x40000100) == 0x40000000) {
63 if (!(ctrl & 0x00000100)) { 63 if (M2)
64 if (ctrl & 0x40000000) 64 clk = clk * N2 / M2;
65 clk = clk * N2 / M2; 65 else
66 clk = 0;
67 }
66 } 68 }
67 69
68 return clk >> P; 70 return clk >> P;
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
177 } 179 }
178 180
179 /* memory clock */ 181 /* memory clock */
182 if (!perflvl->memory) {
183 info->mpll_ctrl = 0x00000000;
184 goto out;
185 }
186
180 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, 187 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
181 &N1, &M1, &N2, &M2, &log2P); 188 &N1, &M1, &N2, &M2, &log2P);
182 if (ret < 0) 189 if (ret < 0)
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
264 mdelay(5); 271 mdelay(5);
265 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 272 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
266 273
274 if (!info->mpll_ctrl)
275 goto resume;
276
267 /* wait for vblank start on active crtcs, disable memory access */ 277 /* wait for vblank start on active crtcs, disable memory access */
268 for (i = 0; i < 2; i++) { 278 for (i = 0; i < 2; i++) {
269 if (!(crtc_mask & (1 << i))) 279 if (!(crtc_mask & (1 << i)))
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8c979b31ff61..ac601f7c4e1a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine)
131 NV_DEBUG(dev, "\n"); 131 NV_DEBUG(dev, "\n");
132 132
133 /* master reset */ 133 /* master reset */
134 nv_mask(dev, 0x000200, 0x00200100, 0x00000000); 134 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
135 nv_mask(dev, 0x000200, 0x00200100, 0x00200100); 135 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
137 137
138 /* reset/enable traps and interrupts */ 138 /* reset/enable traps and interrupts */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d05c2c3b2444..4b46d6968566 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
601 gr_def(ctx, offset + 0x1c, 0x00880000); 601 gr_def(ctx, offset + 0x1c, 0x00880000);
602 break; 602 break;
603 case 0x86: 603 case 0x86:
604 gr_def(ctx, offset + 0x1c, 0x008c0000); 604 gr_def(ctx, offset + 0x1c, 0x018c0000);
605 break; 605 break;
606 case 0x92: 606 case 0x92:
607 case 0x96: 607 case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 9da23838e63e..2e45e57fd869 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
160 colbits = (r4 & 0x0000f000) >> 12; 160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = ((r4 & 0x01000000) ? 8 : 4); 163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164 164
165 rowsize = parts * banks * (1 << colbits) * 8; 165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa; 166 predicted = rowsize << rowbitsa;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index bbdbc51830c8..a74e501afd25 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
159 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 int i = 0, gpc, tp, ret; 161 int i = 0, gpc, tp, ret;
161 u32 magic;
162 162
163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, 163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
164 &grch->unk408004); 164 &grch->unk408004);
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
208 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 208 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
209 209
210 magic = 0x02180000; 210 if (dev_priv->chipset != 0xc1) {
211 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 211 u32 magic = 0x02180000;
212 nv_wo32(grch->mmio, i++ * 4, magic); 212 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
213 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 213 nv_wo32(grch->mmio, i++ * 4, magic);
214 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { 214 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
215 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 215 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
216 nv_wo32(grch->mmio, i++ * 4, reg); 216 u32 reg = TP_UNIT(gpc, tp, 0x520);
217 nv_wo32(grch->mmio, i++ * 4, magic); 217 nv_wo32(grch->mmio, i++ * 4, reg);
218 nv_wo32(grch->mmio, i++ * 4, magic);
219 magic += 0x0324;
220 }
221 }
222 } else {
223 u32 magic = 0x02180000;
224 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
225 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
226 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
227 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
228 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
229 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
230 u32 reg = TP_UNIT(gpc, tp, 0x520);
231 nv_wo32(grch->mmio, i++ * 4, reg);
232 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
233 magic += 0x0324;
234 }
235 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
236 u32 reg = TP_UNIT(gpc, tp, 0x544);
237 nv_wo32(grch->mmio, i++ * 4, reg);
238 nv_wo32(grch->mmio, i++ * 4, magic);
239 magic += 0x0324;
240 }
218 } 241 }
219 } 242 }
220 243
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index dd0e6a736b3b..96b0b93d94ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1812 /* calculate first set of magics */ 1812 /* calculate first set of magics */
1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1814 1814
1815 gpc = -1;
1815 for (tp = 0; tp < priv->tp_total; tp++) { 1816 for (tp = 0; tp < priv->tp_total; tp++) {
1816 do { 1817 do {
1817 gpc = (gpc + 1) % priv->gpc_nr; 1818 gpc = (gpc + 1) % priv->gpc_nr;
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1861 1862
1862 if (1) { 1863 if (1) {
1863 u32 tp_mask = 0, tp_set = 0; 1864 u32 tp_mask = 0, tp_set = 0;
1864 u8 tpnr[GPC_MAX]; 1865 u8 tpnr[GPC_MAX], a, b;
1865 1866
1866 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1867 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1867 for (gpc = 0; gpc < priv->gpc_nr; gpc++) 1868 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1868 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); 1869 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1869 1870
1870 gpc = -1; 1871 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1871 for (i = 0, gpc = -1; i < 32; i++) { 1872 a = (i * (priv->tp_total - 1)) / 32;
1872 int ltp = i * (priv->tp_total - 1) / 32; 1873 if (a != b) {
1873 1874 b = a;
1874 do { 1875 do {
1875 gpc = (gpc + 1) % priv->gpc_nr; 1876 gpc = (gpc + 1) % priv->gpc_nr;
1876 } while (!tpnr[gpc]); 1877 } while (!tpnr[gpc]);
1877 tp = priv->tp_nr[gpc] - tpnr[gpc]--; 1878 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1878 1879
1879 tp_set |= 1 << ((gpc * 8) + tp); 1880 tp_set |= 1 << ((gpc * 8) + tp);
1881 }
1880 1882
1881 do { 1883 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1882 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); 1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1883 tp_set ^= tp_mask;
1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
1885 tp_set ^= tp_mask;
1886 } while (ltp == (++i * (priv->tp_total - 1) / 32));
1887 i--;
1888 } 1885 }
1889 } 1886 }
1890 1887
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index edbfe9360ae2..ce984d573a51 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev)
110 u32 bsize = nv_rd32(dev, 0x10f20c); 110 u32 bsize = nv_rd32(dev, 0x10f20c);
111 u32 offset, length; 111 u32 offset, length;
112 bool uniform = true; 112 bool uniform = true;
113 int ret, i; 113 int ret, part;
114 114
115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); 115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); 116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
117 117
118 /* read amount of vram attached to each memory controller */ 118 /* read amount of vram attached to each memory controller */
119 for (i = 0; i < parts; i++) { 119 part = 0;
120 u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); 120 while (parts) {
121 u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
122 if (psize == 0)
123 continue;
124 parts--;
125
121 if (psize != bsize) { 126 if (psize != bsize) {
122 if (psize < bsize) 127 if (psize < bsize)
123 bsize = psize; 128 bsize = psize;
124 uniform = false; 129 uniform = false;
125 } 130 }
126 131
127 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); 132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
128
129 dev_priv->vram_size += (u64)psize << 20; 133 dev_priv->vram_size += (u64)psize << 20;
130 } 134 }
131 135
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87921c88a95c..87631fede1f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1522,12 +1522,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1522 struct drm_display_mode *mode, 1522 struct drm_display_mode *mode,
1523 struct drm_display_mode *adjusted_mode) 1523 struct drm_display_mode *adjusted_mode)
1524{ 1524{
1525 struct drm_device *dev = crtc->dev;
1526 struct radeon_device *rdev = dev->dev_private;
1527
1528 /* adjust pm to upcoming mode change */
1529 radeon_pm_compute_clocks(rdev);
1530
1531 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1525 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1532 return false; 1526 return false;
1533 return true; 1527 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a0de48542f71..6fb335a4fdda 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
283 } 283 }
284 } 284 }
285 285
286 DRM_ERROR("aux i2c too many retries, giving up\n"); 286 DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
287 return -EREMOTEIO; 287 return -EREMOTEIO;
288} 288}
289 289
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e4c384b9511c..1d603a3335db 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -157,6 +157,57 @@ int sumo_get_temp(struct radeon_device *rdev)
157 return actual_temp * 1000; 157 return actual_temp * 1000;
158} 158}
159 159
160void sumo_pm_init_profile(struct radeon_device *rdev)
161{
162 int idx;
163
164 /* default */
165 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
166 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
167 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
168 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
169
170 /* low,mid sh/mh */
171 if (rdev->flags & RADEON_IS_MOBILITY)
172 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
173 else
174 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
175
176 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
177 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
178 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
179 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
180
181 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
182 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
183 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
185
186 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
187 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
188 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
190
191 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
192 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
193 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
194 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
195
196 /* high sh/mh */
197 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
198 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
199 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
200 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
201 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
202 rdev->pm.power_state[idx].num_clock_modes - 1;
203
204 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
205 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
206 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
207 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
208 rdev->pm.power_state[idx].num_clock_modes - 1;
209}
210
160void evergreen_pm_misc(struct radeon_device *rdev) 211void evergreen_pm_misc(struct radeon_device *rdev)
161{ 212{
162 int req_ps_idx = rdev->pm.requested_power_state_index; 213 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1219,7 +1270,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
1219 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1270 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1220 rdev->mc.vram_end >> 12); 1271 rdev->mc.vram_end >> 12);
1221 } 1272 }
1222 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1273 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1223 if (rdev->flags & RADEON_IS_IGP) { 1274 if (rdev->flags & RADEON_IS_IGP) {
1224 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1275 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1225 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1276 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 19afc43ad173..9cdda0b3b081 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
288 pcie_lanes); 288 pcie_lanes);
289} 289}
290 290
291static int r600_pm_get_type_index(struct radeon_device *rdev,
292 enum radeon_pm_state_type ps_type,
293 int instance)
294{
295 int i;
296 int found_instance = -1;
297
298 for (i = 0; i < rdev->pm.num_power_states; i++) {
299 if (rdev->pm.power_state[i].type == ps_type) {
300 found_instance++;
301 if (found_instance == instance)
302 return i;
303 }
304 }
305 /* return default if no match */
306 return rdev->pm.default_power_state_index;
307}
308
309void rs780_pm_init_profile(struct radeon_device *rdev) 291void rs780_pm_init_profile(struct radeon_device *rdev)
310{ 292{
311 if (rdev->pm.num_power_states == 2) { 293 if (rdev->pm.num_power_states == 2) {
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
421 403
422void r600_pm_init_profile(struct radeon_device *rdev) 404void r600_pm_init_profile(struct radeon_device *rdev)
423{ 405{
406 int idx;
407
424 if (rdev->family == CHIP_R600) { 408 if (rdev->family == CHIP_R600) {
425 /* XXX */ 409 /* XXX */
426 /* default */ 410 /* default */
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
504 /* low sh */ 488 /* low sh */
505 if (rdev->flags & RADEON_IS_MOBILITY) { 489 if (rdev->flags & RADEON_IS_MOBILITY)
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 490 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 491 else
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 492 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
512 } else { 496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
514 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
519 }
520 /* mid sh */ 497 /* mid sh */
521 if (rdev->flags & RADEON_IS_MOBILITY) { 498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
523 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
528 } else {
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
530 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
535 }
536 /* high sh */ 502 /* high sh */
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 503 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 /* low mh */ 508 /* low mh */
544 if (rdev->flags & RADEON_IS_MOBILITY) { 509 if (rdev->flags & RADEON_IS_MOBILITY)
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 511 else
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
551 } else { 516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
553 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
557 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 }
559 /* mid mh */ 517 /* mid mh */
560 if (rdev->flags & RADEON_IS_MOBILITY) { 518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
562 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
567 } else {
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
569 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
574 }
575 /* high mh */ 522 /* high mh */
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
578 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
579 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
582 } 528 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b316b301152f..fc5a1d642cb5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -784,8 +784,7 @@ struct radeon_pm_clock_info {
784 784
785struct radeon_power_state { 785struct radeon_power_state {
786 enum radeon_pm_state_type type; 786 enum radeon_pm_state_type type;
787 /* XXX: use a define for num clock modes */ 787 struct radeon_pm_clock_info *clock_info;
788 struct radeon_pm_clock_info clock_info[8];
789 /* number of valid clock modes in this power state */ 788 /* number of valid clock modes in this power state */
790 int num_clock_modes; 789 int num_clock_modes;
791 struct radeon_pm_clock_info *default_clock_mode; 790 struct radeon_pm_clock_info *default_clock_mode;
@@ -855,6 +854,9 @@ struct radeon_pm {
855 struct device *int_hwmon_dev; 854 struct device *int_hwmon_dev;
856}; 855};
857 856
857int radeon_pm_get_type_index(struct radeon_device *rdev,
858 enum radeon_pm_state_type ps_type,
859 int instance);
858 860
859/* 861/*
860 * Benchmarking 862 * Benchmarking
@@ -1142,6 +1144,48 @@ struct r600_vram_scratch {
1142 u64 gpu_addr; 1144 u64 gpu_addr;
1143}; 1145};
1144 1146
1147
1148/*
1149 * Mutex which allows recursive locking from the same process.
1150 */
1151struct radeon_mutex {
1152 struct mutex mutex;
1153 struct task_struct *owner;
1154 int level;
1155};
1156
1157static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1158{
1159 mutex_init(&mutex->mutex);
1160 mutex->owner = NULL;
1161 mutex->level = 0;
1162}
1163
1164static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1165{
1166 if (mutex_trylock(&mutex->mutex)) {
1167 /* The mutex was unlocked before, so it's ours now */
1168 mutex->owner = current;
1169 } else if (mutex->owner != current) {
1170 /* Another process locked the mutex, take it */
1171 mutex_lock(&mutex->mutex);
1172 mutex->owner = current;
1173 }
1174 /* Otherwise the mutex was already locked by this process */
1175
1176 mutex->level++;
1177}
1178
1179static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1180{
1181 if (--mutex->level > 0)
1182 return;
1183
1184 mutex->owner = NULL;
1185 mutex_unlock(&mutex->mutex);
1186}
1187
1188
1145/* 1189/*
1146 * Core structure, functions and helpers. 1190 * Core structure, functions and helpers.
1147 */ 1191 */
@@ -1197,7 +1241,7 @@ struct radeon_device {
1197 struct radeon_gem gem; 1241 struct radeon_gem gem;
1198 struct radeon_pm pm; 1242 struct radeon_pm pm;
1199 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1243 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1200 struct mutex cs_mutex; 1244 struct radeon_mutex cs_mutex;
1201 struct radeon_wb wb; 1245 struct radeon_wb wb;
1202 struct radeon_dummy_page dummy_page; 1246 struct radeon_dummy_page dummy_page;
1203 bool gpu_lockup; 1247 bool gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e2944566ffea..a2e1eae114ef 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
834 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
835 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
836 .pm_finish = &evergreen_pm_finish, 836 .pm_finish = &evergreen_pm_finish,
837 .pm_init_profile = &rs780_pm_init_profile, 837 .pm_init_profile = &sumo_pm_init_profile,
838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
839 .pre_page_flip = &evergreen_pre_page_flip, 839 .pre_page_flip = &evergreen_pre_page_flip,
840 .page_flip = &evergreen_page_flip, 840 .page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 85f14f0337e4..59914842a729 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
413extern void evergreen_pm_misc(struct radeon_device *rdev); 413extern void evergreen_pm_misc(struct radeon_device *rdev);
414extern void evergreen_pm_prepare(struct radeon_device *rdev); 414extern void evergreen_pm_prepare(struct radeon_device *rdev);
415extern void evergreen_pm_finish(struct radeon_device *rdev); 415extern void evergreen_pm_finish(struct radeon_device *rdev);
416extern void sumo_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 417extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 418extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 419extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08d0b94332e6..d2d179267af3 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1999,6 +1999,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 2000 switch (frev) {
2001 case 1: 2001 case 1:
2002 rdev->pm.power_state[state_index].clock_info =
2003 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2004 if (!rdev->pm.power_state[state_index].clock_info)
2005 return state_index;
2002 rdev->pm.power_state[state_index].num_clock_modes = 1; 2006 rdev->pm.power_state[state_index].num_clock_modes = 1;
2003 rdev->pm.power_state[state_index].clock_info[0].mclk = 2007 rdev->pm.power_state[state_index].clock_info[0].mclk =
2004 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 2008 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
@@ -2035,6 +2039,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2035 state_index++; 2039 state_index++;
2036 break; 2040 break;
2037 case 2: 2041 case 2:
2042 rdev->pm.power_state[state_index].clock_info =
2043 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2044 if (!rdev->pm.power_state[state_index].clock_info)
2045 return state_index;
2038 rdev->pm.power_state[state_index].num_clock_modes = 1; 2046 rdev->pm.power_state[state_index].num_clock_modes = 1;
2039 rdev->pm.power_state[state_index].clock_info[0].mclk = 2047 rdev->pm.power_state[state_index].clock_info[0].mclk =
2040 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2048 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
@@ -2072,6 +2080,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2072 state_index++; 2080 state_index++;
2073 break; 2081 break;
2074 case 3: 2082 case 3:
2083 rdev->pm.power_state[state_index].clock_info =
2084 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2085 if (!rdev->pm.power_state[state_index].clock_info)
2086 return state_index;
2075 rdev->pm.power_state[state_index].num_clock_modes = 1; 2087 rdev->pm.power_state[state_index].num_clock_modes = 1;
2076 rdev->pm.power_state[state_index].clock_info[0].mclk = 2088 rdev->pm.power_state[state_index].clock_info[0].mclk =
2077 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2089 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
@@ -2257,7 +2269,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2257 rdev->pm.default_power_state_index = state_index; 2269 rdev->pm.default_power_state_index = state_index;
2258 rdev->pm.power_state[state_index].default_clock_mode = 2270 rdev->pm.power_state[state_index].default_clock_mode =
2259 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2271 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2260 if (ASIC_IS_DCE5(rdev)) { 2272 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2261 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2273 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2262 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2274 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2263 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2275 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2389,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2377 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 2389 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2378 (power_state->v1.ucNonClockStateIndex * 2390 (power_state->v1.ucNonClockStateIndex *
2379 power_info->pplib.ucNonClockSize)); 2391 power_info->pplib.ucNonClockSize));
2380 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2392 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2381 clock_info = (union pplib_clock_info *) 2393 ((power_info->pplib.ucStateEntrySize - 1) ?
2382 (mode_info->atom_context->bios + data_offset + 2394 (power_info->pplib.ucStateEntrySize - 1) : 1),
2383 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2395 GFP_KERNEL);
2384 (power_state->v1.ucClockStateIndices[j] * 2396 if (!rdev->pm.power_state[i].clock_info)
2385 power_info->pplib.ucClockInfoSize)); 2397 return state_index;
2386 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2398 if (power_info->pplib.ucStateEntrySize - 1) {
2387 state_index, mode_index, 2399 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2388 clock_info); 2400 clock_info = (union pplib_clock_info *)
2389 if (valid) 2401 (mode_info->atom_context->bios + data_offset +
2390 mode_index++; 2402 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2403 (power_state->v1.ucClockStateIndices[j] *
2404 power_info->pplib.ucClockInfoSize));
2405 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2406 state_index, mode_index,
2407 clock_info);
2408 if (valid)
2409 mode_index++;
2410 }
2411 } else {
2412 rdev->pm.power_state[state_index].clock_info[0].mclk =
2413 rdev->clock.default_mclk;
2414 rdev->pm.power_state[state_index].clock_info[0].sclk =
2415 rdev->clock.default_sclk;
2416 mode_index++;
2391 } 2417 }
2392 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2418 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2393 if (mode_index) { 2419 if (mode_index) {
@@ -2456,18 +2482,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2456 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ 2482 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2457 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2483 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2458 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2484 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2459 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2485 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2460 clock_array_index = power_state->v2.clockInfoIndex[j]; 2486 (power_state->v2.ucNumDPMLevels ?
2461 /* XXX this might be an inagua bug... */ 2487 power_state->v2.ucNumDPMLevels : 1),
2462 if (clock_array_index >= clock_info_array->ucNumEntries) 2488 GFP_KERNEL);
2463 continue; 2489 if (!rdev->pm.power_state[i].clock_info)
2464 clock_info = (union pplib_clock_info *) 2490 return state_index;
2465 &clock_info_array->clockInfo[clock_array_index]; 2491 if (power_state->v2.ucNumDPMLevels) {
2466 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2492 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2467 state_index, mode_index, 2493 clock_array_index = power_state->v2.clockInfoIndex[j];
2468 clock_info); 2494 /* XXX this might be an inagua bug... */
2469 if (valid) 2495 if (clock_array_index >= clock_info_array->ucNumEntries)
2470 mode_index++; 2496 continue;
2497 clock_info = (union pplib_clock_info *)
2498 &clock_info_array->clockInfo[clock_array_index];
2499 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2500 state_index, mode_index,
2501 clock_info);
2502 if (valid)
2503 mode_index++;
2504 }
2505 } else {
2506 rdev->pm.power_state[state_index].clock_info[0].mclk =
2507 rdev->clock.default_mclk;
2508 rdev->pm.power_state[state_index].clock_info[0].sclk =
2509 rdev->clock.default_sclk;
2510 mode_index++;
2471 } 2511 }
2472 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2512 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2473 if (mode_index) { 2513 if (mode_index) {
@@ -2524,19 +2564,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2524 } else { 2564 } else {
2525 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2526 if (rdev->pm.power_state) { 2566 if (rdev->pm.power_state) {
2527 /* add the default mode */ 2567 rdev->pm.power_state[0].clock_info =
2528 rdev->pm.power_state[state_index].type = 2568 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2529 POWER_STATE_TYPE_DEFAULT; 2569 if (rdev->pm.power_state[0].clock_info) {
2530 rdev->pm.power_state[state_index].num_clock_modes = 1; 2570 /* add the default mode */
2531 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2571 rdev->pm.power_state[state_index].type =
2532 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2572 POWER_STATE_TYPE_DEFAULT;
2533 rdev->pm.power_state[state_index].default_clock_mode = 2573 rdev->pm.power_state[state_index].num_clock_modes = 1;
2534 &rdev->pm.power_state[state_index].clock_info[0]; 2574 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2535 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2575 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2536 rdev->pm.power_state[state_index].pcie_lanes = 16; 2576 rdev->pm.power_state[state_index].default_clock_mode =
2537 rdev->pm.default_power_state_index = state_index; 2577 &rdev->pm.power_state[state_index].clock_info[0];
2538 rdev->pm.power_state[state_index].flags = 0; 2578 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2539 state_index++; 2579 rdev->pm.power_state[state_index].pcie_lanes = 16;
2580 rdev->pm.default_power_state_index = state_index;
2581 rdev->pm.power_state[state_index].flags = 0;
2582 state_index++;
2583 }
2540 } 2584 }
2541 } 2585 }
2542 2586
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 5cafc90de7f8..17e1a9b2d8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
98 struct radeon_bo *sobj = NULL; 98 struct radeon_bo *sobj = NULL;
99 uint64_t saddr, daddr; 99 uint64_t saddr, daddr;
100 int r, n; 100 int r, n;
101 unsigned int time; 101 int time;
102 102
103 n = RADEON_BENCHMARK_ITERATIONS; 103 n = RADEON_BENCHMARK_ITERATIONS;
104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); 104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75aa..ccaa243c1442 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
222 struct radeon_cs_chunk *ib_chunk; 222 struct radeon_cs_chunk *ib_chunk;
223 int r; 223 int r;
224 224
225 mutex_lock(&rdev->cs_mutex); 225 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 226 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 227 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 228 parser.filp = filp;
@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 if (r) { 233 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 234 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 235 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 236 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 237 return r;
238 } 238 }
239 r = radeon_ib_get(rdev, &parser.ib); 239 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 240 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 241 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 242 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 243 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 244 return r;
245 } 245 }
246 r = radeon_cs_parser_relocs(&parser); 246 r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
248 if (r != -ERESTARTSYS) 248 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 249 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 250 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 251 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 252 return r;
253 } 253 }
254 /* Copy the packet into the IB, the parser will read from the 254 /* Copy the packet into the IB, the parser will read from the
@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 if (r || parser.parser_error) { 260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 263 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 264 return r;
265 } 265 }
266 r = radeon_cs_finish_pages(&parser); 266 r = radeon_cs_finish_pages(&parser);
267 if (r) { 267 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 270 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 271 return r;
272 } 272 }
273 r = radeon_ib_schedule(rdev, parser.ib); 273 r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
275 DRM_ERROR("Failed to schedule IB !\n"); 275 DRM_ERROR("Failed to schedule IB !\n");
276 } 276 }
277 radeon_cs_parser_fini(&parser, r); 277 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 278 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 279 return r;
280} 280}
281 281
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c33bc914d93d..c4d00a171411 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
955 int r; 955 int r;
956 int resched; 956 int resched;
957 957
958 /* Prevent CS ioctl from interfering */
959 radeon_mutex_lock(&rdev->cs_mutex);
960
958 radeon_save_bios_scratch_regs(rdev); 961 radeon_save_bios_scratch_regs(rdev);
959 /* block TTM */ 962 /* block TTM */
960 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 963 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
967 radeon_restore_bios_scratch_regs(rdev); 970 radeon_restore_bios_scratch_regs(rdev);
968 drm_helper_resume_force_mode(rdev->ddev); 971 drm_helper_resume_force_mode(rdev->ddev);
969 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 972 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
970 return 0;
971 } 973 }
972 /* bad news, how to tell it to userspace ? */ 974
973 dev_info(rdev->dev, "GPU reset failed\n"); 975 radeon_mutex_unlock(&rdev->cs_mutex);
976
977 if (r) {
978 /* bad news, how to tell it to userspace ? */
979 dev_info(rdev->dev, "GPU reset failed\n");
980 }
981
974 return r; 982 return r;
975} 983}
976 984
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 41a5d48e657b..daadf2111040 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
991 struct drm_display_mode *mode, 991 struct drm_display_mode *mode,
992 struct drm_display_mode *adjusted_mode) 992 struct drm_display_mode *adjusted_mode)
993{ 993{
994 struct drm_device *dev = crtc->dev;
995 struct radeon_device *rdev = dev->dev_private;
996
997 /* adjust pm to upcoming mode change */
998 radeon_pm_compute_clocks(rdev);
999
1000 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 994 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1001 return false; 995 return false;
1002 return true; 996 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a1..78a665bd9519 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 53
54#define ACPI_AC_CLASS "ac_adapter" 54#define ACPI_AC_CLASS "ac_adapter"
55 55
56int radeon_pm_get_type_index(struct radeon_device *rdev,
57 enum radeon_pm_state_type ps_type,
58 int instance)
59{
60 int i;
61 int found_instance = -1;
62
63 for (i = 0; i < rdev->pm.num_power_states; i++) {
64 if (rdev->pm.power_state[i].type == ps_type) {
65 found_instance++;
66 if (found_instance == instance)
67 return i;
68 }
69 }
70 /* return default if no match */
71 return rdev->pm.default_power_state_index;
72}
73
56#ifdef CONFIG_ACPI 74#ifdef CONFIG_ACPI
57static int radeon_acpi_event(struct notifier_block *nb, 75static int radeon_acpi_event(struct notifier_block *nb,
58 unsigned long val, 76 unsigned long val,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 03daefa73397..880e285d7578 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
105 struct vmw_dma_buffer *dmabuf = NULL; 105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret; 106 int ret;
107 107
108 /* A lot of the code assumes this */
109 if (handle && (width != 64 || height != 64))
110 return -EINVAL;
111
108 if (handle) { 112 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 114 handle, &surface);
@@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
410 top = clips->y1; 414 top = clips->y1;
411 bottom = clips->y2; 415 bottom = clips->y2;
412 416
413 clips_ptr = clips; 417 /* skip the first clip rect */
414 for (i = 1; i < num_clips; i++, clips_ptr += inc) { 418 for (i = 1, clips_ptr = clips + inc;
419 i < num_clips; i++, clips_ptr += inc) {
415 left = min_t(int, left, (int)clips_ptr->x1); 420 left = min_t(int, left, (int)clips_ptr->x1);
416 right = max_t(int, right, (int)clips_ptr->x2); 421 right = max_t(int, right, (int)clips_ptr->x2);
417 top = min_t(int, top, (int)clips_ptr->y1); 422 top = min_t(int, top, (int)clips_ptr->y1);
@@ -1323,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
1323 * drm_encoder_cleanup which takes the lock we deadlock. 1328 * drm_encoder_cleanup which takes the lock we deadlock.
1324 */ 1329 */
1325 drm_mode_config_cleanup(dev_priv->dev); 1330 drm_mode_config_cleanup(dev_priv->dev);
1326 vmw_kms_close_legacy_display_system(dev_priv); 1331 if (dev_priv->sou_priv)
1332 vmw_kms_close_screen_object_display(dev_priv);
1333 else
1334 vmw_kms_close_legacy_display_system(dev_priv);
1327 return 0; 1335 return 0;
1328} 1336}
1329 1337
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 143461a95ae4..86980fe04117 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -21,6 +21,7 @@
21 * General Public License for more details. 21 * General Public License for more details.
22 */ 22 */
23 23
24#include <linux/module.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/io.h> 26#include <linux/io.h>
26#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
@@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev)
108 return -ENODEV; 109 return -ENODEV;
109 110
110 io_base = ioremap(res->start, resource_size(res)); 111 io_base = ioremap(res->start, resource_size(res));
111 if (!io_base) { 112 if (!io_base)
112 ret = -ENOMEM; 113 return -ENOMEM;
113 goto free_state;
114 }
115 114
116 /* make sure protocol 1 is selected */ 115 /* make sure protocol 1 is selected */
117 val = readl(io_base + HSEM_CTRL_REG); 116 val = readl(io_base + HSEM_CTRL_REG);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 472aedfb07cf..297e26092178 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3110,7 +3110,7 @@ static void handle_stripe(struct stripe_head *sh)
3110 struct r5dev *pdev, *qdev; 3110 struct r5dev *pdev, *qdev;
3111 3111
3112 clear_bit(STRIPE_HANDLE, &sh->state); 3112 clear_bit(STRIPE_HANDLE, &sh->state);
3113 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { 3113 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3114 /* already being handled, ensure it gets handled 3114 /* already being handled, ensure it gets handled
3115 * again when current action finishes */ 3115 * again when current action finishes */
3116 set_bit(STRIPE_HANDLE, &sh->state); 3116 set_bit(STRIPE_HANDLE, &sh->state);
@@ -3159,10 +3159,14 @@ static void handle_stripe(struct stripe_head *sh)
3159 /* check if the array has lost more than max_degraded devices and, 3159 /* check if the array has lost more than max_degraded devices and,
3160 * if so, some requests might need to be failed. 3160 * if so, some requests might need to be failed.
3161 */ 3161 */
3162 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) 3162 if (s.failed > conf->max_degraded) {
3163 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3163 sh->check_state = 0;
3164 if (s.failed > conf->max_degraded && s.syncing) 3164 sh->reconstruct_state = 0;
3165 handle_failed_sync(conf, sh, &s); 3165 if (s.to_read+s.to_write+s.written)
3166 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3167 if (s.syncing)
3168 handle_failed_sync(conf, sh, &s);
3169 }
3166 3170
3167 /* 3171 /*
3168 * might be able to return some write requests if the parity blocks 3172 * might be able to return some write requests if the parity blocks
@@ -3371,7 +3375,7 @@ finish:
3371 3375
3372 return_io(s.return_bi); 3376 return_io(s.return_bi);
3373 3377
3374 clear_bit(STRIPE_ACTIVE, &sh->state); 3378 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3375} 3379}
3376 3380
3377static void raid5_activate_delayed(struct r5conf *conf) 3381static void raid5_activate_delayed(struct r5conf *conf)
diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c
index 2446736b7871..0df7f2a41814 100644
--- a/drivers/media/video/s5k6aa.c
+++ b/drivers/media/video/s5k6aa.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/media.h> 21#include <linux/media.h>
22#include <linux/module.h>
22#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24 25
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index 4175544b491b..ec10629a0b0b 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -13,6 +13,7 @@
13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. 13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/mutex.h> 17#include <linux/mutex.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 6be1fe6b5f9a..43c0ebb81956 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,6 +4,7 @@
4 * Debugfs support for the AB5500 MFD driver 4 * Debugfs support for the AB5500 MFD driver
5 */ 5 */
6 6
7#include <linux/export.h>
7#include <linux/debugfs.h> 8#include <linux/debugfs.h>
8#include <linux/seq_file.h> 9#include <linux/seq_file.h>
9#include <linux/mfd/ab5500/ab5500.h> 10#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 608967fe74c6..736ca10ca9f1 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h>
24#include <linux/mtd/map.h> 25#include <linux/mtd/map.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f4e3d82379d7..7f43cf86d776 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -83,8 +83,10 @@ config DELL_LAPTOP
83 depends on EXPERIMENTAL 83 depends on EXPERIMENTAL
84 depends on BACKLIGHT_CLASS_DEVICE 84 depends on BACKLIGHT_CLASS_DEVICE
85 depends on RFKILL || RFKILL = n 85 depends on RFKILL || RFKILL = n
86 depends on POWER_SUPPLY
87 depends on SERIO_I8042 86 depends on SERIO_I8042
87 select POWER_SUPPLY
88 select LEDS_CLASS
89 select NEW_LEDS
88 default n 90 default n
89 ---help--- 91 ---help---
90 This driver adds support for rfkill and backlight control to Dell 92 This driver adds support for rfkill and backlight control to Dell
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4cb0d0a3e57b..fc7bbba585ce 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -66,14 +66,16 @@
66static int debug; 66static int debug;
67module_param(debug, int, 0600); 67module_param(debug, int, 0600);
68 68
69#define T1 (HZ/10) 69/* Defaults: these are from the specification */
70#define T2 (HZ/3) 70
71#define N2 3 71#define T1 10 /* 100mS */
72#define T2 34 /* 333mS */
73#define N2 3 /* Retry 3 times */
72 74
73/* Use long timers for testing at low speed with debug on */ 75/* Use long timers for testing at low speed with debug on */
74#ifdef DEBUG_TIMING 76#ifdef DEBUG_TIMING
75#define T1 HZ 77#define T1 100
76#define T2 (2 * HZ) 78#define T2 200
77#endif 79#endif
78 80
79/* 81/*
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 5a5d325a3935..634608d2a6d0 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -147,14 +147,12 @@ struct btrfs_inode {
147 * the btrfs file release call will add this inode to the 147 * the btrfs file release call will add this inode to the
148 * ordered operations list so that we make sure to flush out any 148 * ordered operations list so that we make sure to flush out any
149 * new data the application may have written before commit. 149 * new data the application may have written before commit.
150 *
151 * yes, its silly to have a single bitflag, but we might grow more
152 * of these.
153 */ 150 */
154 unsigned ordered_data_close:1; 151 unsigned ordered_data_close:1;
155 unsigned orphan_meta_reserved:1; 152 unsigned orphan_meta_reserved:1;
156 unsigned dummy_inode:1; 153 unsigned dummy_inode:1;
157 unsigned in_defrag:1; 154 unsigned in_defrag:1;
155 unsigned delalloc_meta_reserved:1;
158 156
159 /* 157 /*
160 * always compress this one file 158 * always compress this one file
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3a1b939c9ae2..5b163572e0ca 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
617static int btrfs_delayed_inode_reserve_metadata( 617static int btrfs_delayed_inode_reserve_metadata(
618 struct btrfs_trans_handle *trans, 618 struct btrfs_trans_handle *trans,
619 struct btrfs_root *root, 619 struct btrfs_root *root,
620 struct inode *inode,
620 struct btrfs_delayed_node *node) 621 struct btrfs_delayed_node *node)
621{ 622{
622 struct btrfs_block_rsv *src_rsv; 623 struct btrfs_block_rsv *src_rsv;
623 struct btrfs_block_rsv *dst_rsv; 624 struct btrfs_block_rsv *dst_rsv;
624 u64 num_bytes; 625 u64 num_bytes;
625 int ret; 626 int ret;
627 int release = false;
626 628
627 src_rsv = trans->block_rsv; 629 src_rsv = trans->block_rsv;
628 dst_rsv = &root->fs_info->delayed_block_rsv; 630 dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
652 if (!ret) 654 if (!ret)
653 node->bytes_reserved = num_bytes; 655 node->bytes_reserved = num_bytes;
654 return ret; 656 return ret;
657 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
658 spin_lock(&BTRFS_I(inode)->lock);
659 if (BTRFS_I(inode)->delalloc_meta_reserved) {
660 BTRFS_I(inode)->delalloc_meta_reserved = 0;
661 spin_unlock(&BTRFS_I(inode)->lock);
662 release = true;
663 goto migrate;
664 }
665 spin_unlock(&BTRFS_I(inode)->lock);
666
667 /* Ok we didn't have space pre-reserved. This shouldn't happen
668 * too often but it can happen if we do delalloc to an existing
669 * inode which gets dirtied because of the time update, and then
670 * isn't touched again until after the transaction commits and
671 * then we try to write out the data. First try to be nice and
672 * reserve something strictly for us. If not be a pain and try
673 * to steal from the delalloc block rsv.
674 */
675 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
676 if (!ret)
677 goto out;
678
679 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
680 if (!ret)
681 goto out;
682
683 /*
684 * Ok this is a problem, let's just steal from the global rsv
685 * since this really shouldn't happen that often.
686 */
687 WARN_ON(1);
688 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
689 dst_rsv, num_bytes);
690 goto out;
655 } 691 }
656 692
693migrate:
657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 694 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
695
696out:
697 /*
698 * Migrate only takes a reservation, it doesn't touch the size of the
699 * block_rsv. This is to simplify people who don't normally have things
700 * migrated from their block rsv. If they go to release their
701 * reservation, that will decrease the size as well, so if migrate
702 * reduced size we'd end up with a negative size. But for the
703 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
704 * but we could in fact do this reserve/migrate dance several times
705 * between the time we did the original reservation and we'd clean it
706 * up. So to take care of this, release the space for the meta
707 * reservation here. I think it may be time for a documentation page on
708 * how block rsvs. work.
709 */
658 if (!ret) 710 if (!ret)
659 node->bytes_reserved = num_bytes; 711 node->bytes_reserved = num_bytes;
660 712
713 if (release)
714 btrfs_block_rsv_release(root, src_rsv, num_bytes);
715
661 return ret; 716 return ret;
662} 717}
663 718
@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1708 goto release_node; 1763 goto release_node;
1709 } 1764 }
1710 1765
1711 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1766 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1767 delayed_node);
1712 if (ret) 1768 if (ret)
1713 goto release_node; 1769 goto release_node;
1714 1770
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 102c176fc29c..62afe5c5694e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1890 u64 features; 1890 u64 features;
1891 struct btrfs_key location; 1891 struct btrfs_key location;
1892 struct buffer_head *bh; 1892 struct buffer_head *bh;
1893 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), 1893 struct btrfs_super_block *disk_super;
1894 GFP_NOFS);
1895 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1896 GFP_NOFS);
1897 struct btrfs_root *tree_root = btrfs_sb(sb); 1894 struct btrfs_root *tree_root = btrfs_sb(sb);
1898 struct btrfs_fs_info *fs_info = NULL; 1895 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1899 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1896 struct btrfs_root *extent_root;
1900 GFP_NOFS); 1897 struct btrfs_root *csum_root;
1901 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1898 struct btrfs_root *chunk_root;
1902 GFP_NOFS); 1899 struct btrfs_root *dev_root;
1903 struct btrfs_root *log_tree_root; 1900 struct btrfs_root *log_tree_root;
1904
1905 int ret; 1901 int ret;
1906 int err = -EINVAL; 1902 int err = -EINVAL;
1907 int num_backups_tried = 0; 1903 int num_backups_tried = 0;
1908 int backup_index = 0; 1904 int backup_index = 0;
1909 1905
1910 struct btrfs_super_block *disk_super; 1906 extent_root = fs_info->extent_root =
1907 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1908 csum_root = fs_info->csum_root =
1909 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1910 chunk_root = fs_info->chunk_root =
1911 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1912 dev_root = fs_info->dev_root =
1913 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1911 1914
1912 if (!extent_root || !tree_root || !tree_root->fs_info || 1915 if (!extent_root || !csum_root || !chunk_root || !dev_root) {
1913 !chunk_root || !dev_root || !csum_root) {
1914 err = -ENOMEM; 1916 err = -ENOMEM;
1915 goto fail; 1917 goto fail;
1916 } 1918 }
1917 fs_info = tree_root->fs_info;
1918 1919
1919 ret = init_srcu_struct(&fs_info->subvol_srcu); 1920 ret = init_srcu_struct(&fs_info->subvol_srcu);
1920 if (ret) { 1921 if (ret) {
@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1954 mutex_init(&fs_info->reloc_mutex); 1955 mutex_init(&fs_info->reloc_mutex);
1955 1956
1956 init_completion(&fs_info->kobj_unregister); 1957 init_completion(&fs_info->kobj_unregister);
1957 fs_info->tree_root = tree_root;
1958 fs_info->extent_root = extent_root;
1959 fs_info->csum_root = csum_root;
1960 fs_info->chunk_root = chunk_root;
1961 fs_info->dev_root = dev_root;
1962 fs_info->fs_devices = fs_devices;
1963 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1958 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1964 INIT_LIST_HEAD(&fs_info->space_info); 1959 INIT_LIST_HEAD(&fs_info->space_info);
1965 btrfs_mapping_init(&fs_info->mapping_tree); 1960 btrfs_mapping_init(&fs_info->mapping_tree);
@@ -2465,21 +2460,20 @@ fail_sb_buffer:
2465 btrfs_stop_workers(&fs_info->caching_workers); 2460 btrfs_stop_workers(&fs_info->caching_workers);
2466fail_alloc: 2461fail_alloc:
2467fail_iput: 2462fail_iput:
2463 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2464
2468 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2465 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2469 iput(fs_info->btree_inode); 2466 iput(fs_info->btree_inode);
2470
2471 btrfs_close_devices(fs_info->fs_devices);
2472 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2473fail_bdi: 2467fail_bdi:
2474 bdi_destroy(&fs_info->bdi); 2468 bdi_destroy(&fs_info->bdi);
2475fail_srcu: 2469fail_srcu:
2476 cleanup_srcu_struct(&fs_info->subvol_srcu); 2470 cleanup_srcu_struct(&fs_info->subvol_srcu);
2477fail: 2471fail:
2472 btrfs_close_devices(fs_info->fs_devices);
2478 free_fs_info(fs_info); 2473 free_fs_info(fs_info);
2479 return ERR_PTR(err); 2474 return ERR_PTR(err);
2480 2475
2481recovery_tree_root: 2476recovery_tree_root:
2482
2483 if (!btrfs_test_opt(tree_root, RECOVERY)) 2477 if (!btrfs_test_opt(tree_root, RECOVERY))
2484 goto fail_tree_roots; 2478 goto fail_tree_roots;
2485 2479
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9879bd474632..b232150b5b6b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
3797 kfree(rsv); 3797 kfree(rsv);
3798} 3798}
3799 3799
3800int btrfs_block_rsv_add(struct btrfs_root *root, 3800static inline int __block_rsv_add(struct btrfs_root *root,
3801 struct btrfs_block_rsv *block_rsv, 3801 struct btrfs_block_rsv *block_rsv,
3802 u64 num_bytes) 3802 u64 num_bytes, int flush)
3803{ 3803{
3804 int ret; 3804 int ret;
3805 3805
3806 if (num_bytes == 0) 3806 if (num_bytes == 0)
3807 return 0; 3807 return 0;
3808 3808
3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3810 if (!ret) { 3810 if (!ret) {
3811 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3811 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3812 return 0; 3812 return 0;
@@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
3815 return ret; 3815 return ret;
3816} 3816}
3817 3817
3818int btrfs_block_rsv_add(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes)
3821{
3822 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3823}
3824
3818int btrfs_block_rsv_add_noflush(struct btrfs_root *root, 3825int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv, 3826 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes) 3827 u64 num_bytes)
3821{ 3828{
3822 int ret; 3829 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3823
3824 if (num_bytes == 0)
3825 return 0;
3826
3827 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
3828 if (!ret) {
3829 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3830 return 0;
3831 }
3832
3833 return ret;
3834} 3830}
3835 3831
3836int btrfs_block_rsv_check(struct btrfs_root *root, 3832int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4064 */ 4060 */
4065static unsigned drop_outstanding_extent(struct inode *inode) 4061static unsigned drop_outstanding_extent(struct inode *inode)
4066{ 4062{
4063 unsigned drop_inode_space = 0;
4067 unsigned dropped_extents = 0; 4064 unsigned dropped_extents = 0;
4068 4065
4069 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4066 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4070 BTRFS_I(inode)->outstanding_extents--; 4067 BTRFS_I(inode)->outstanding_extents--;
4071 4068
4069 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4070 BTRFS_I(inode)->delalloc_meta_reserved) {
4071 drop_inode_space = 1;
4072 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4073 }
4074
4072 /* 4075 /*
4073 * If we have more or the same amount of outsanding extents than we have 4076 * If we have more or the same amount of outsanding extents than we have
4074 * reserved then we need to leave the reserved extents count alone. 4077 * reserved then we need to leave the reserved extents count alone.
4075 */ 4078 */
4076 if (BTRFS_I(inode)->outstanding_extents >= 4079 if (BTRFS_I(inode)->outstanding_extents >=
4077 BTRFS_I(inode)->reserved_extents) 4080 BTRFS_I(inode)->reserved_extents)
4078 return 0; 4081 return drop_inode_space;
4079 4082
4080 dropped_extents = BTRFS_I(inode)->reserved_extents - 4083 dropped_extents = BTRFS_I(inode)->reserved_extents -
4081 BTRFS_I(inode)->outstanding_extents; 4084 BTRFS_I(inode)->outstanding_extents;
4082 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4085 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4083 return dropped_extents; 4086 return dropped_extents + drop_inode_space;
4084} 4087}
4085 4088
4086/** 4089/**
@@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4166 nr_extents = BTRFS_I(inode)->outstanding_extents - 4169 nr_extents = BTRFS_I(inode)->outstanding_extents -
4167 BTRFS_I(inode)->reserved_extents; 4170 BTRFS_I(inode)->reserved_extents;
4168 BTRFS_I(inode)->reserved_extents += nr_extents; 4171 BTRFS_I(inode)->reserved_extents += nr_extents;
4172 }
4169 4173
4170 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4174 /*
4175 * Add an item to reserve for updating the inode when we complete the
4176 * delalloc io.
4177 */
4178 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4179 nr_extents++;
4180 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4171 } 4181 }
4182
4183 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4172 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4184 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4173 spin_unlock(&BTRFS_I(inode)->lock); 4185 spin_unlock(&BTRFS_I(inode)->lock);
4174 4186
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 7a15fcfb3e1f..181760f9d2ab 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
537 struct btrfs_free_space *entry, u8 *type) 537 struct btrfs_free_space *entry, u8 *type)
538{ 538{
539 struct btrfs_free_space_entry *e; 539 struct btrfs_free_space_entry *e;
540 int ret;
541
542 if (!io_ctl->cur) {
543 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
544 if (ret)
545 return ret;
546 }
540 547
541 e = io_ctl->cur; 548 e = io_ctl->cur;
542 entry->offset = le64_to_cpu(e->offset); 549 entry->offset = le64_to_cpu(e->offset);
@@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
550 557
551 io_ctl_unmap_page(io_ctl); 558 io_ctl_unmap_page(io_ctl);
552 559
553 if (io_ctl->index >= io_ctl->num_pages) 560 return 0;
554 return 0;
555
556 return io_ctl_check_crc(io_ctl, io_ctl->index);
557} 561}
558 562
559static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 563static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
@@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
561{ 565{
562 int ret; 566 int ret;
563 567
564 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
565 io_ctl_unmap_page(io_ctl);
566
567 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 568 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
568 if (ret) 569 if (ret)
569 return ret; 570 return ret;
@@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
699 num_entries--; 700 num_entries--;
700 } 701 }
701 702
703 io_ctl_unmap_page(&io_ctl);
704
702 /* 705 /*
703 * We add the bitmaps at the end of the entries in order that 706 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache. 707 * the bitmap entries are added to the cache.
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 53dcbdf446cd..f8962a957d65 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
399 struct btrfs_path *path; 399 struct btrfs_path *path;
400 struct inode *inode; 400 struct inode *inode;
401 struct btrfs_block_rsv *rsv;
402 u64 num_bytes;
401 u64 alloc_hint = 0; 403 u64 alloc_hint = 0;
402 int ret; 404 int ret;
403 int prealloc; 405 int prealloc;
@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
421 if (!path) 423 if (!path)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
426 rsv = trans->block_rsv;
427 trans->block_rsv = &root->fs_info->trans_block_rsv;
428
429 num_bytes = trans->bytes_reserved;
430 /*
431 * 1 item for inode item insertion if need
432 * 3 items for inode item update (in the worst case)
433 * 1 item for free space object
434 * 3 items for pre-allocation
435 */
436 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
437 ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
438 trans->bytes_reserved);
439 if (ret)
440 goto out;
424again: 441again:
425 inode = lookup_free_ino_inode(root, path); 442 inode = lookup_free_ino_inode(root, path);
426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 443 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
427 ret = PTR_ERR(inode); 444 ret = PTR_ERR(inode);
428 goto out; 445 goto out_release;
429 } 446 }
430 447
431 if (IS_ERR(inode)) { 448 if (IS_ERR(inode)) {
@@ -434,7 +451,7 @@ again:
434 451
435 ret = create_free_ino_inode(root, trans, path); 452 ret = create_free_ino_inode(root, trans, path);
436 if (ret) 453 if (ret)
437 goto out; 454 goto out_release;
438 goto again; 455 goto again;
439 } 456 }
440 457
@@ -477,11 +494,14 @@ again:
477 } 494 }
478 btrfs_free_reserved_data_space(inode, prealloc); 495 btrfs_free_reserved_data_space(inode, prealloc);
479 496
497 ret = btrfs_write_out_ino_cache(root, trans, path);
480out_put: 498out_put:
481 iput(inode); 499 iput(inode);
500out_release:
501 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
482out: 502out:
483 if (ret == 0) 503 trans->block_rsv = rsv;
484 ret = btrfs_write_out_ino_cache(root, trans, path); 504 trans->bytes_reserved = num_bytes;
485 505
486 btrfs_free_path(path); 506 btrfs_free_path(path);
487 return ret; 507 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 966ddcc4c63d..116ab67a06df 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
93 struct page *locked_page, 93 struct page *locked_page,
94 u64 start, u64 end, int *page_started, 94 u64 start, u64 end, int *page_started,
95 unsigned long *nr_written, int unlock); 95 unsigned long *nr_written, int unlock);
96static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode);
96 98
97static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 99static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98 struct inode *inode, struct inode *dir, 100 struct inode *inode, struct inode *dir,
@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1741 trans = btrfs_join_transaction(root); 1743 trans = btrfs_join_transaction(root);
1742 BUG_ON(IS_ERR(trans)); 1744 BUG_ON(IS_ERR(trans));
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1745 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1746 ret = btrfs_update_inode_fallback(trans, root, inode);
1745 BUG_ON(ret); 1747 BUG_ON(ret);
1746 } 1748 }
1747 goto out; 1749 goto out;
@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1791 1793
1792 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1794 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1793 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1795 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1794 ret = btrfs_update_inode(trans, root, inode); 1796 ret = btrfs_update_inode_fallback(trans, root, inode);
1795 BUG_ON(ret); 1797 BUG_ON(ret);
1796 } 1798 }
1797 ret = 0; 1799 ret = 0;
@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2199 if (ret) 2201 if (ret)
2200 goto out; 2202 goto out;
2201 } 2203 }
2204 /* release the path since we're done with it */
2205 btrfs_release_path(path);
2206
2202 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2207 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2203 2208
2204 if (root->orphan_block_rsv) 2209 if (root->orphan_block_rsv)
@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2426/* 2431/*
2427 * copy everything in the in-memory inode into the btree. 2432 * copy everything in the in-memory inode into the btree.
2428 */ 2433 */
2429noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2434static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2430 struct btrfs_root *root, struct inode *inode) 2435 struct btrfs_root *root, struct inode *inode)
2431{ 2436{
2432 struct btrfs_inode_item *inode_item; 2437 struct btrfs_inode_item *inode_item;
@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2434 struct extent_buffer *leaf; 2439 struct extent_buffer *leaf;
2435 int ret; 2440 int ret;
2436 2441
2437 /*
2438 * If the inode is a free space inode, we can deadlock during commit
2439 * if we put it into the delayed code.
2440 *
2441 * The data relocation inode should also be directly updated
2442 * without delay
2443 */
2444 if (!btrfs_is_free_space_inode(root, inode)
2445 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2446 ret = btrfs_delayed_update_inode(trans, root, inode);
2447 if (!ret)
2448 btrfs_set_inode_last_trans(trans, inode);
2449 return ret;
2450 }
2451
2452 path = btrfs_alloc_path(); 2442 path = btrfs_alloc_path();
2453 if (!path) 2443 if (!path)
2454 return -ENOMEM; 2444 return -ENOMEM;
@@ -2477,6 +2467,43 @@ failed:
2477} 2467}
2478 2468
2479/* 2469/*
2470 * copy everything in the in-memory inode into the btree.
2471 */
2472noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root, struct inode *inode)
2474{
2475 int ret;
2476
2477 /*
2478 * If the inode is a free space inode, we can deadlock during commit
2479 * if we put it into the delayed code.
2480 *
2481 * The data relocation inode should also be directly updated
2482 * without delay
2483 */
2484 if (!btrfs_is_free_space_inode(root, inode)
2485 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2486 ret = btrfs_delayed_update_inode(trans, root, inode);
2487 if (!ret)
2488 btrfs_set_inode_last_trans(trans, inode);
2489 return ret;
2490 }
2491
2492 return btrfs_update_inode_item(trans, root, inode);
2493}
2494
2495static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2496 struct btrfs_root *root, struct inode *inode)
2497{
2498 int ret;
2499
2500 ret = btrfs_update_inode(trans, root, inode);
2501 if (ret == -ENOSPC)
2502 return btrfs_update_inode_item(trans, root, inode);
2503 return ret;
2504}
2505
2506/*
2480 * unlink helper that gets used here in inode.c and in the tree logging 2507 * unlink helper that gets used here in inode.c and in the tree logging
2481 * recovery code. It remove a link in a directory with a given name, and 2508 * recovery code. It remove a link in a directory with a given name, and
2482 * also drops the back refs in the inode to the directory 2509 * also drops the back refs in the inode to the directory
@@ -5632,7 +5659,7 @@ again:
5632 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5659 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5633 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5660 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5634 if (!ret) 5661 if (!ret)
5635 err = btrfs_update_inode(trans, root, inode); 5662 err = btrfs_update_inode_fallback(trans, root, inode);
5636 goto out; 5663 goto out;
5637 } 5664 }
5638 5665
@@ -5670,7 +5697,7 @@ again:
5670 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5697 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5671 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5698 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5672 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5699 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5673 btrfs_update_inode(trans, root, inode); 5700 btrfs_update_inode_fallback(trans, root, inode);
5674 ret = 0; 5701 ret = 0;
5675out_unlock: 5702out_unlock:
5676 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5703 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
@@ -6529,14 +6556,16 @@ end_trans:
6529 ret = btrfs_orphan_del(NULL, inode); 6556 ret = btrfs_orphan_del(NULL, inode);
6530 } 6557 }
6531 6558
6532 trans->block_rsv = &root->fs_info->trans_block_rsv; 6559 if (trans) {
6533 ret = btrfs_update_inode(trans, root, inode); 6560 trans->block_rsv = &root->fs_info->trans_block_rsv;
6534 if (ret && !err) 6561 ret = btrfs_update_inode(trans, root, inode);
6535 err = ret; 6562 if (ret && !err)
6563 err = ret;
6536 6564
6537 nr = trans->blocks_used; 6565 nr = trans->blocks_used;
6538 ret = btrfs_end_transaction_throttle(trans, root); 6566 ret = btrfs_end_transaction_throttle(trans, root);
6539 btrfs_btree_balance_dirty(root, nr); 6567 btrfs_btree_balance_dirty(root, nr);
6568 }
6540 6569
6541out: 6570out:
6542 btrfs_free_block_rsv(root, rsv); 6571 btrfs_free_block_rsv(root, rsv);
@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6605 ei->orphan_meta_reserved = 0; 6634 ei->orphan_meta_reserved = 0;
6606 ei->dummy_inode = 0; 6635 ei->dummy_inode = 0;
6607 ei->in_defrag = 0; 6636 ei->in_defrag = 0;
6637 ei->delalloc_meta_reserved = 0;
6608 ei->force_compress = BTRFS_COMPRESS_NONE; 6638 ei->force_compress = BTRFS_COMPRESS_NONE;
6609 6639
6610 ei->delayed_node = NULL; 6640 ei->delayed_node = NULL;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 24d654ce7a06..dff29d5e151a 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1174 list_add_tail(&new_edge->list[UPPER], 1174 list_add_tail(&new_edge->list[UPPER],
1175 &new_node->lower); 1175 &new_node->lower);
1176 } 1176 }
1177 } else {
1178 list_add_tail(&new_node->lower, &cache->leaves);
1177 } 1179 }
1178 1180
1179 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ed11d3866afd..f4190f22edfb 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
944static int scrub_submit(struct scrub_dev *sdev) 944static int scrub_submit(struct scrub_dev *sdev)
945{ 945{
946 struct scrub_bio *sbio; 946 struct scrub_bio *sbio;
947 struct bio *bio;
948 int i;
949 947
950 if (sdev->curr == -1) 948 if (sdev->curr == -1)
951 return 0; 949 return 0;
952 950
953 sbio = sdev->bios[sdev->curr]; 951 sbio = sdev->bios[sdev->curr];
954
955 bio = bio_alloc(GFP_NOFS, sbio->count);
956 if (!bio)
957 goto nomem;
958
959 bio->bi_private = sbio;
960 bio->bi_end_io = scrub_bio_end_io;
961 bio->bi_bdev = sdev->dev->bdev;
962 bio->bi_sector = sbio->physical >> 9;
963
964 for (i = 0; i < sbio->count; ++i) {
965 struct page *page;
966 int ret;
967
968 page = alloc_page(GFP_NOFS);
969 if (!page)
970 goto nomem;
971
972 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
973 if (!ret) {
974 __free_page(page);
975 goto nomem;
976 }
977 }
978
979 sbio->err = 0; 952 sbio->err = 0;
980 sdev->curr = -1; 953 sdev->curr = -1;
981 atomic_inc(&sdev->in_flight); 954 atomic_inc(&sdev->in_flight);
982 955
983 submit_bio(READ, bio); 956 submit_bio(READ, sbio->bio);
984 957
985 return 0; 958 return 0;
986
987nomem:
988 scrub_free_bio(bio);
989
990 return -ENOMEM;
991} 959}
992 960
993static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 961static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
995 u8 *csum, int force) 963 u8 *csum, int force)
996{ 964{
997 struct scrub_bio *sbio; 965 struct scrub_bio *sbio;
966 struct page *page;
967 int ret;
998 968
999again: 969again:
1000 /* 970 /*
@@ -1015,12 +985,22 @@ again:
1015 } 985 }
1016 sbio = sdev->bios[sdev->curr]; 986 sbio = sdev->bios[sdev->curr];
1017 if (sbio->count == 0) { 987 if (sbio->count == 0) {
988 struct bio *bio;
989
1018 sbio->physical = physical; 990 sbio->physical = physical;
1019 sbio->logical = logical; 991 sbio->logical = logical;
992 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
993 if (!bio)
994 return -ENOMEM;
995
996 bio->bi_private = sbio;
997 bio->bi_end_io = scrub_bio_end_io;
998 bio->bi_bdev = sdev->dev->bdev;
999 bio->bi_sector = sbio->physical >> 9;
1000 sbio->err = 0;
1001 sbio->bio = bio;
1020 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1002 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1021 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1003 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1022 int ret;
1023
1024 ret = scrub_submit(sdev); 1004 ret = scrub_submit(sdev);
1025 if (ret) 1005 if (ret)
1026 return ret; 1006 return ret;
@@ -1030,6 +1010,20 @@ again:
1030 sbio->spag[sbio->count].generation = gen; 1010 sbio->spag[sbio->count].generation = gen;
1031 sbio->spag[sbio->count].have_csum = 0; 1011 sbio->spag[sbio->count].have_csum = 0;
1032 sbio->spag[sbio->count].mirror_num = mirror_num; 1012 sbio->spag[sbio->count].mirror_num = mirror_num;
1013
1014 page = alloc_page(GFP_NOFS);
1015 if (!page)
1016 return -ENOMEM;
1017
1018 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1019 if (!ret) {
1020 __free_page(page);
1021 ret = scrub_submit(sdev);
1022 if (ret)
1023 return ret;
1024 goto again;
1025 }
1026
1033 if (csum) { 1027 if (csum) {
1034 sbio->spag[sbio->count].have_csum = 1; 1028 sbio->spag[sbio->count].have_csum = 1;
1035 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1029 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 57080dffdfc6..8bd9d6d0e07a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -197,7 +197,7 @@ static match_table_t tokens = {
197 {Opt_subvolrootid, "subvolrootid=%d"}, 197 {Opt_subvolrootid, "subvolrootid=%d"},
198 {Opt_defrag, "autodefrag"}, 198 {Opt_defrag, "autodefrag"},
199 {Opt_inode_cache, "inode_cache"}, 199 {Opt_inode_cache, "inode_cache"},
200 {Opt_no_space_cache, "no_space_cache"}, 200 {Opt_no_space_cache, "nospace_cache"},
201 {Opt_recovery, "recovery"}, 201 {Opt_recovery, "recovery"},
202 {Opt_err, NULL}, 202 {Opt_err, NULL},
203}; 203};
@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
448 token = match_token(p, tokens, args); 448 token = match_token(p, tokens, args);
449 switch (token) { 449 switch (token) {
450 case Opt_subvol: 450 case Opt_subvol:
451 kfree(*subvol_name);
451 *subvol_name = match_strdup(&args[0]); 452 *subvol_name = match_strdup(&args[0]);
452 break; 453 break;
453 case Opt_subvolid: 454 case Opt_subvolid:
@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
710 if (btrfs_test_opt(root, SPACE_CACHE)) 711 if (btrfs_test_opt(root, SPACE_CACHE))
711 seq_puts(seq, ",space_cache"); 712 seq_puts(seq, ",space_cache");
712 else 713 else
713 seq_puts(seq, ",no_space_cache"); 714 seq_puts(seq, ",nospace_cache");
714 if (btrfs_test_opt(root, CLEAR_CACHE)) 715 if (btrfs_test_opt(root, CLEAR_CACHE))
715 seq_puts(seq, ",clear_cache"); 716 seq_puts(seq, ",clear_cache");
716 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 717 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
@@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
890 struct super_block *s; 891 struct super_block *s;
891 struct dentry *root; 892 struct dentry *root;
892 struct btrfs_fs_devices *fs_devices = NULL; 893 struct btrfs_fs_devices *fs_devices = NULL;
893 struct btrfs_root *tree_root = NULL;
894 struct btrfs_fs_info *fs_info = NULL; 894 struct btrfs_fs_info *fs_info = NULL;
895 fmode_t mode = FMODE_READ; 895 fmode_t mode = FMODE_READ;
896 char *subvol_name = NULL; 896 char *subvol_name = NULL;
@@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
904 error = btrfs_parse_early_options(data, mode, fs_type, 904 error = btrfs_parse_early_options(data, mode, fs_type,
905 &subvol_name, &subvol_objectid, 905 &subvol_name, &subvol_objectid,
906 &subvol_rootid, &fs_devices); 906 &subvol_rootid, &fs_devices);
907 if (error) 907 if (error) {
908 kfree(subvol_name);
908 return ERR_PTR(error); 909 return ERR_PTR(error);
910 }
909 911
910 if (subvol_name) { 912 if (subvol_name) {
911 root = mount_subvol(subvol_name, flags, device_name, data); 913 root = mount_subvol(subvol_name, flags, device_name, data);
@@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
917 if (error) 919 if (error)
918 return ERR_PTR(error); 920 return ERR_PTR(error);
919 921
920 error = btrfs_open_devices(fs_devices, mode, fs_type);
921 if (error)
922 return ERR_PTR(error);
923
924 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
925 error = -EACCES;
926 goto error_close_devices;
927 }
928
929 /* 922 /*
930 * Setup a dummy root and fs_info for test/set super. This is because 923 * Setup a dummy root and fs_info for test/set super. This is because
931 * we don't actually fill this stuff out until open_ctree, but we need 924 * we don't actually fill this stuff out until open_ctree, but we need
@@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
933 * then open_ctree will properly initialize everything later. 926 * then open_ctree will properly initialize everything later.
934 */ 927 */
935 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 928 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
936 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); 929 if (!fs_info)
937 if (!fs_info || !tree_root) { 930 return ERR_PTR(-ENOMEM);
931
932 fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
933 if (!fs_info->tree_root) {
938 error = -ENOMEM; 934 error = -ENOMEM;
939 goto error_close_devices; 935 goto error_fs_info;
940 } 936 }
941 fs_info->tree_root = tree_root; 937 fs_info->tree_root->fs_info = fs_info;
942 fs_info->fs_devices = fs_devices; 938 fs_info->fs_devices = fs_devices;
943 tree_root->fs_info = fs_info;
944 939
945 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 940 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
946 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 941 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
947 if (!fs_info->super_copy || !fs_info->super_for_commit) { 942 if (!fs_info->super_copy || !fs_info->super_for_commit) {
948 error = -ENOMEM; 943 error = -ENOMEM;
944 goto error_fs_info;
945 }
946
947 error = btrfs_open_devices(fs_devices, mode, fs_type);
948 if (error)
949 goto error_fs_info;
950
951 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
952 error = -EACCES;
949 goto error_close_devices; 953 goto error_close_devices;
950 } 954 }
951 955
952 bdev = fs_devices->latest_bdev; 956 bdev = fs_devices->latest_bdev;
953 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); 957 s = sget(fs_type, btrfs_test_super, btrfs_set_super,
958 fs_info->tree_root);
954 if (IS_ERR(s)) { 959 if (IS_ERR(s)) {
955 error = PTR_ERR(s); 960 error = PTR_ERR(s);
956 goto error_close_devices; 961 goto error_close_devices;
@@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
959 if (s->s_root) { 964 if (s->s_root) {
960 if ((flags ^ s->s_flags) & MS_RDONLY) { 965 if ((flags ^ s->s_flags) & MS_RDONLY) {
961 deactivate_locked_super(s); 966 deactivate_locked_super(s);
962 return ERR_PTR(-EBUSY); 967 error = -EBUSY;
968 goto error_close_devices;
963 } 969 }
964 970
965 btrfs_close_devices(fs_devices); 971 btrfs_close_devices(fs_devices);
966 free_fs_info(fs_info); 972 free_fs_info(fs_info);
967 kfree(tree_root);
968 } else { 973 } else {
969 char b[BDEVNAME_SIZE]; 974 char b[BDEVNAME_SIZE];
970 975
@@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
991 996
992error_close_devices: 997error_close_devices:
993 btrfs_close_devices(fs_devices); 998 btrfs_close_devices(fs_devices);
999error_fs_info:
994 free_fs_info(fs_info); 1000 free_fs_info(fs_info);
995 kfree(tree_root);
996 return ERR_PTR(error); 1001 return ERR_PTR(error);
997} 1002}
998 1003
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 960835eaf4da..6a0574e923bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
883 883
884 if (to_reserve > 0) { 884 if (to_reserve > 0) {
885 ret = btrfs_block_rsv_add(root, &pending->block_rsv, 885 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
886 to_reserve); 886 to_reserve);
887 if (ret) { 887 if (ret) {
888 pending->error = ret; 888 pending->error = ret;
889 goto fail; 889 goto fail;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f8e2943101a1..c37433d3cd82 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
999 key.objectid = device->devid; 999 key.objectid = device->devid;
1000 key.offset = start; 1000 key.offset = start;
1001 key.type = BTRFS_DEV_EXTENT_KEY; 1001 key.type = BTRFS_DEV_EXTENT_KEY;
1002 1002again:
1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1004 if (ret > 0) { 1004 if (ret > 0) {
1005 ret = btrfs_previous_item(root, path, key.objectid, 1005 ret = btrfs_previous_item(root, path, key.objectid,
@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1012 struct btrfs_dev_extent); 1012 struct btrfs_dev_extent);
1013 BUG_ON(found_key.offset > start || found_key.offset + 1013 BUG_ON(found_key.offset > start || found_key.offset +
1014 btrfs_dev_extent_length(leaf, extent) < start); 1014 btrfs_dev_extent_length(leaf, extent) < start);
1015 key = found_key;
1016 btrfs_release_path(path);
1017 goto again;
1015 } else if (ret == 0) { 1018 } else if (ret == 0) {
1016 leaf = path->nodes[0]; 1019 leaf = path->nodes[0];
1017 extent = btrfs_item_ptr(leaf, path->slots[0], 1020 extent = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2db1bd3173b2..851ba3dcdc29 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1652,46 +1652,12 @@ out:
1652 return error; 1652 return error;
1653} 1653}
1654 1654
1655static int proc_pid_fd_link_getattr(struct vfsmount *mnt, struct dentry *dentry,
1656 struct kstat *stat)
1657{
1658 struct inode *inode = dentry->d_inode;
1659 struct task_struct *task = get_proc_task(inode);
1660 int rc;
1661
1662 if (task == NULL)
1663 return -ESRCH;
1664
1665 rc = -EACCES;
1666 if (lock_trace(task))
1667 goto out_task;
1668
1669 generic_fillattr(inode, stat);
1670 unlock_trace(task);
1671 rc = 0;
1672out_task:
1673 put_task_struct(task);
1674 return rc;
1675}
1676
1677static const struct inode_operations proc_pid_link_inode_operations = { 1655static const struct inode_operations proc_pid_link_inode_operations = {
1678 .readlink = proc_pid_readlink, 1656 .readlink = proc_pid_readlink,
1679 .follow_link = proc_pid_follow_link, 1657 .follow_link = proc_pid_follow_link,
1680 .setattr = proc_setattr, 1658 .setattr = proc_setattr,
1681}; 1659};
1682 1660
1683static const struct inode_operations proc_fdinfo_link_inode_operations = {
1684 .setattr = proc_setattr,
1685 .getattr = proc_pid_fd_link_getattr,
1686};
1687
1688static const struct inode_operations proc_fd_link_inode_operations = {
1689 .readlink = proc_pid_readlink,
1690 .follow_link = proc_pid_follow_link,
1691 .setattr = proc_setattr,
1692 .getattr = proc_pid_fd_link_getattr,
1693};
1694
1695 1661
1696/* building an inode */ 1662/* building an inode */
1697 1663
@@ -1923,61 +1889,49 @@ out:
1923 1889
1924static int proc_fd_info(struct inode *inode, struct path *path, char *info) 1890static int proc_fd_info(struct inode *inode, struct path *path, char *info)
1925{ 1891{
1926 struct task_struct *task; 1892 struct task_struct *task = get_proc_task(inode);
1927 struct files_struct *files; 1893 struct files_struct *files = NULL;
1928 struct file *file; 1894 struct file *file;
1929 int fd = proc_fd(inode); 1895 int fd = proc_fd(inode);
1930 int rc;
1931
1932 task = get_proc_task(inode);
1933 if (!task)
1934 return -ENOENT;
1935
1936 rc = -EACCES;
1937 if (lock_trace(task))
1938 goto out_task;
1939
1940 rc = -ENOENT;
1941 files = get_files_struct(task);
1942 if (files == NULL)
1943 goto out_unlock;
1944 1896
1945 /* 1897 if (task) {
1946 * We are not taking a ref to the file structure, so we must 1898 files = get_files_struct(task);
1947 * hold ->file_lock. 1899 put_task_struct(task);
1948 */ 1900 }
1949 spin_lock(&files->file_lock); 1901 if (files) {
1950 file = fcheck_files(files, fd); 1902 /*
1951 if (file) { 1903 * We are not taking a ref to the file structure, so we must
1952 unsigned int f_flags; 1904 * hold ->file_lock.
1953 struct fdtable *fdt; 1905 */
1954 1906 spin_lock(&files->file_lock);
1955 fdt = files_fdtable(files); 1907 file = fcheck_files(files, fd);
1956 f_flags = file->f_flags & ~O_CLOEXEC; 1908 if (file) {
1957 if (FD_ISSET(fd, fdt->close_on_exec)) 1909 unsigned int f_flags;
1958 f_flags |= O_CLOEXEC; 1910 struct fdtable *fdt;
1959 1911
1960 if (path) { 1912 fdt = files_fdtable(files);
1961 *path = file->f_path; 1913 f_flags = file->f_flags & ~O_CLOEXEC;
1962 path_get(&file->f_path); 1914 if (FD_ISSET(fd, fdt->close_on_exec))
1915 f_flags |= O_CLOEXEC;
1916
1917 if (path) {
1918 *path = file->f_path;
1919 path_get(&file->f_path);
1920 }
1921 if (info)
1922 snprintf(info, PROC_FDINFO_MAX,
1923 "pos:\t%lli\n"
1924 "flags:\t0%o\n",
1925 (long long) file->f_pos,
1926 f_flags);
1927 spin_unlock(&files->file_lock);
1928 put_files_struct(files);
1929 return 0;
1963 } 1930 }
1964 if (info) 1931 spin_unlock(&files->file_lock);
1965 snprintf(info, PROC_FDINFO_MAX, 1932 put_files_struct(files);
1966 "pos:\t%lli\n" 1933 }
1967 "flags:\t0%o\n", 1934 return -ENOENT;
1968 (long long) file->f_pos,
1969 f_flags);
1970 rc = 0;
1971 } else
1972 rc = -ENOENT;
1973 spin_unlock(&files->file_lock);
1974 put_files_struct(files);
1975
1976out_unlock:
1977 unlock_trace(task);
1978out_task:
1979 put_task_struct(task);
1980 return rc;
1981} 1935}
1982 1936
1983static int proc_fd_link(struct inode *inode, struct path *path) 1937static int proc_fd_link(struct inode *inode, struct path *path)
@@ -2072,7 +2026,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
2072 spin_unlock(&files->file_lock); 2026 spin_unlock(&files->file_lock);
2073 put_files_struct(files); 2027 put_files_struct(files);
2074 2028
2075 inode->i_op = &proc_fd_link_inode_operations; 2029 inode->i_op = &proc_pid_link_inode_operations;
2076 inode->i_size = 64; 2030 inode->i_size = 64;
2077 ei->op.proc_get_link = proc_fd_link; 2031 ei->op.proc_get_link = proc_fd_link;
2078 d_set_d_op(dentry, &tid_fd_dentry_operations); 2032 d_set_d_op(dentry, &tid_fd_dentry_operations);
@@ -2104,12 +2058,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
2104 if (fd == ~0U) 2058 if (fd == ~0U)
2105 goto out; 2059 goto out;
2106 2060
2107 result = ERR_PTR(-EACCES);
2108 if (lock_trace(task))
2109 goto out;
2110
2111 result = instantiate(dir, dentry, task, &fd); 2061 result = instantiate(dir, dentry, task, &fd);
2112 unlock_trace(task);
2113out: 2062out:
2114 put_task_struct(task); 2063 put_task_struct(task);
2115out_no_task: 2064out_no_task:
@@ -2129,28 +2078,23 @@ static int proc_readfd_common(struct file * filp, void * dirent,
2129 retval = -ENOENT; 2078 retval = -ENOENT;
2130 if (!p) 2079 if (!p)
2131 goto out_no_task; 2080 goto out_no_task;
2132
2133 retval = -EACCES;
2134 if (lock_trace(p))
2135 goto out;
2136
2137 retval = 0; 2081 retval = 0;
2138 2082
2139 fd = filp->f_pos; 2083 fd = filp->f_pos;
2140 switch (fd) { 2084 switch (fd) {
2141 case 0: 2085 case 0:
2142 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 2086 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
2143 goto out_unlock; 2087 goto out;
2144 filp->f_pos++; 2088 filp->f_pos++;
2145 case 1: 2089 case 1:
2146 ino = parent_ino(dentry); 2090 ino = parent_ino(dentry);
2147 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 2091 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
2148 goto out_unlock; 2092 goto out;
2149 filp->f_pos++; 2093 filp->f_pos++;
2150 default: 2094 default:
2151 files = get_files_struct(p); 2095 files = get_files_struct(p);
2152 if (!files) 2096 if (!files)
2153 goto out_unlock; 2097 goto out;
2154 rcu_read_lock(); 2098 rcu_read_lock();
2155 for (fd = filp->f_pos-2; 2099 for (fd = filp->f_pos-2;
2156 fd < files_fdtable(files)->max_fds; 2100 fd < files_fdtable(files)->max_fds;
@@ -2174,9 +2118,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,
2174 rcu_read_unlock(); 2118 rcu_read_unlock();
2175 put_files_struct(files); 2119 put_files_struct(files);
2176 } 2120 }
2177
2178out_unlock:
2179 unlock_trace(p);
2180out: 2121out:
2181 put_task_struct(p); 2122 put_task_struct(p);
2182out_no_task: 2123out_no_task:
@@ -2254,7 +2195,6 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
2254 ei->fd = fd; 2195 ei->fd = fd;
2255 inode->i_mode = S_IFREG | S_IRUSR; 2196 inode->i_mode = S_IFREG | S_IRUSR;
2256 inode->i_fop = &proc_fdinfo_file_operations; 2197 inode->i_fop = &proc_fdinfo_file_operations;
2257 inode->i_op = &proc_fdinfo_link_inode_operations;
2258 d_set_d_op(dentry, &tid_fd_dentry_operations); 2198 d_set_d_op(dentry, &tid_fd_dentry_operations);
2259 d_add(dentry, inode); 2199 d_add(dentry, inode);
2260 /* Close the race of the process dying before we return the dentry */ 2200 /* Close the race of the process dying before we return the dentry */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 33b13310ee0c..574d4ee9b625 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -189,7 +189,7 @@ xfs_end_io(
189 int error = 0; 189 int error = 0;
190 190
191 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 191 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
192 error = -EIO; 192 ioend->io_error = -EIO;
193 goto done; 193 goto done;
194 } 194 }
195 if (ioend->io_error) 195 if (ioend->io_error)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1a3513881bce..eac97ef81e2a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -656,7 +656,7 @@ xfs_buf_item_committing(
656/* 656/*
657 * This is the ops vector shared by all buf log items. 657 * This is the ops vector shared by all buf log items.
658 */ 658 */
659static struct xfs_item_ops xfs_buf_item_ops = { 659static const struct xfs_item_ops xfs_buf_item_ops = {
660 .iop_size = xfs_buf_item_size, 660 .iop_size = xfs_buf_item_size,
661 .iop_format = xfs_buf_item_format, 661 .iop_format = xfs_buf_item_format,
662 .iop_pin = xfs_buf_item_pin, 662 .iop_pin = xfs_buf_item_pin,
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index bb3f71d236d2..0dee0b71029d 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -295,7 +295,7 @@ xfs_qm_dquot_logitem_committing(
295/* 295/*
296 * This is the ops vector for dquots 296 * This is the ops vector for dquots
297 */ 297 */
298static struct xfs_item_ops xfs_dquot_item_ops = { 298static const struct xfs_item_ops xfs_dquot_item_ops = {
299 .iop_size = xfs_qm_dquot_logitem_size, 299 .iop_size = xfs_qm_dquot_logitem_size,
300 .iop_format = xfs_qm_dquot_logitem_format, 300 .iop_format = xfs_qm_dquot_logitem_format,
301 .iop_pin = xfs_qm_dquot_logitem_pin, 301 .iop_pin = xfs_qm_dquot_logitem_pin,
@@ -483,7 +483,7 @@ xfs_qm_qoff_logitem_committing(
483{ 483{
484} 484}
485 485
486static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { 486static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
487 .iop_size = xfs_qm_qoff_logitem_size, 487 .iop_size = xfs_qm_qoff_logitem_size,
488 .iop_format = xfs_qm_qoff_logitem_format, 488 .iop_format = xfs_qm_qoff_logitem_format,
489 .iop_pin = xfs_qm_qoff_logitem_pin, 489 .iop_pin = xfs_qm_qoff_logitem_pin,
@@ -498,7 +498,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
498/* 498/*
499 * This is the ops vector shared by all quotaoff-start log items. 499 * This is the ops vector shared by all quotaoff-start log items.
500 */ 500 */
501static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { 501static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
502 .iop_size = xfs_qm_qoff_logitem_size, 502 .iop_size = xfs_qm_qoff_logitem_size,
503 .iop_format = xfs_qm_qoff_logitem_format, 503 .iop_format = xfs_qm_qoff_logitem_format,
504 .iop_pin = xfs_qm_qoff_logitem_pin, 504 .iop_pin = xfs_qm_qoff_logitem_pin,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index d22e62623437..35c2aff38b20 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -217,7 +217,7 @@ xfs_efi_item_committing(
217/* 217/*
218 * This is the ops vector shared by all efi log items. 218 * This is the ops vector shared by all efi log items.
219 */ 219 */
220static struct xfs_item_ops xfs_efi_item_ops = { 220static const struct xfs_item_ops xfs_efi_item_ops = {
221 .iop_size = xfs_efi_item_size, 221 .iop_size = xfs_efi_item_size,
222 .iop_format = xfs_efi_item_format, 222 .iop_format = xfs_efi_item_format,
223 .iop_pin = xfs_efi_item_pin, 223 .iop_pin = xfs_efi_item_pin,
@@ -477,7 +477,7 @@ xfs_efd_item_committing(
477/* 477/*
478 * This is the ops vector shared by all efd log items. 478 * This is the ops vector shared by all efd log items.
479 */ 479 */
480static struct xfs_item_ops xfs_efd_item_ops = { 480static const struct xfs_item_ops xfs_efd_item_ops = {
481 .iop_size = xfs_efd_item_size, 481 .iop_size = xfs_efd_item_size,
482 .iop_format = xfs_efd_item_format, 482 .iop_format = xfs_efd_item_format,
483 .iop_pin = xfs_efd_item_pin, 483 .iop_pin = xfs_efd_item_pin,
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index b7cf21ba240f..abaafdbb3e65 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -795,7 +795,7 @@ xfs_inode_item_committing(
795/* 795/*
796 * This is the ops vector shared by all buf log items. 796 * This is the ops vector shared by all buf log items.
797 */ 797 */
798static struct xfs_item_ops xfs_inode_item_ops = { 798static const struct xfs_item_ops xfs_inode_item_ops = {
799 .iop_size = xfs_inode_item_size, 799 .iop_size = xfs_inode_item_size,
800 .iop_format = xfs_inode_item_format, 800 .iop_format = xfs_inode_item_format,
801 .iop_pin = xfs_inode_item_pin, 801 .iop_pin = xfs_inode_item_pin,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2758a6277c52..a14cd89fe465 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -626,7 +626,7 @@ xfs_log_item_init(
626 struct xfs_mount *mp, 626 struct xfs_mount *mp,
627 struct xfs_log_item *item, 627 struct xfs_log_item *item,
628 int type, 628 int type,
629 struct xfs_item_ops *ops) 629 const struct xfs_item_ops *ops)
630{ 630{
631 item->li_mountp = mp; 631 item->li_mountp = mp;
632 item->li_ailp = mp->m_ail; 632 item->li_ailp = mp->m_ail;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 78c9039994af..3f7bf451c034 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -137,7 +137,7 @@ struct xfs_trans;
137void xfs_log_item_init(struct xfs_mount *mp, 137void xfs_log_item_init(struct xfs_mount *mp,
138 struct xfs_log_item *item, 138 struct xfs_log_item *item,
139 int type, 139 int type,
140 struct xfs_item_ops *ops); 140 const struct xfs_item_ops *ops);
141 141
142xfs_lsn_t xfs_log_done(struct xfs_mount *mp, 142xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
143 struct xlog_ticket *ticket, 143 struct xlog_ticket *ticket,
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 603f3eb52041..3ae713c0abd9 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -326,7 +326,7 @@ typedef struct xfs_log_item {
326 struct xfs_log_item *); 326 struct xfs_log_item *);
327 /* buffer item iodone */ 327 /* buffer item iodone */
328 /* callback func */ 328 /* callback func */
329 struct xfs_item_ops *li_ops; /* function list */ 329 const struct xfs_item_ops *li_ops; /* function list */
330 330
331 /* delayed logging */ 331 /* delayed logging */
332 struct list_head li_cil; /* CIL pointers */ 332 struct list_head li_cil; /* CIL pointers */
@@ -341,7 +341,7 @@ typedef struct xfs_log_item {
341 { XFS_LI_IN_AIL, "IN_AIL" }, \ 341 { XFS_LI_IN_AIL, "IN_AIL" }, \
342 { XFS_LI_ABORTED, "ABORTED" } 342 { XFS_LI_ABORTED, "ABORTED" }
343 343
344typedef struct xfs_item_ops { 344struct xfs_item_ops {
345 uint (*iop_size)(xfs_log_item_t *); 345 uint (*iop_size)(xfs_log_item_t *);
346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); 346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
347 void (*iop_pin)(xfs_log_item_t *); 347 void (*iop_pin)(xfs_log_item_t *);
@@ -352,7 +352,7 @@ typedef struct xfs_item_ops {
352 void (*iop_push)(xfs_log_item_t *); 352 void (*iop_push)(xfs_log_item_t *);
353 bool (*iop_pushbuf)(xfs_log_item_t *); 353 bool (*iop_pushbuf)(xfs_log_item_t *);
354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
355} xfs_item_ops_t; 355};
356 356
357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) 357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) 358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4ecf2a549060..ce9268a2f56b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -112,7 +112,7 @@ xfs_readlink(
112 char *link) 112 char *link)
113{ 113{
114 xfs_mount_t *mp = ip->i_mount; 114 xfs_mount_t *mp = ip->i_mount;
115 int pathlen; 115 xfs_fsize_t pathlen;
116 int error = 0; 116 int error = 0;
117 117
118 trace_xfs_readlink(ip); 118 trace_xfs_readlink(ip);
@@ -122,13 +122,19 @@ xfs_readlink(
122 122
123 xfs_ilock(ip, XFS_ILOCK_SHARED); 123 xfs_ilock(ip, XFS_ILOCK_SHARED);
124 124
125 ASSERT(S_ISLNK(ip->i_d.di_mode));
126 ASSERT(ip->i_d.di_size <= MAXPATHLEN);
127
128 pathlen = ip->i_d.di_size; 125 pathlen = ip->i_d.di_size;
129 if (!pathlen) 126 if (!pathlen)
130 goto out; 127 goto out;
131 128
129 if (pathlen < 0 || pathlen > MAXPATHLEN) {
130 xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
131 __func__, (unsigned long long) ip->i_ino,
132 (long long) pathlen);
133 ASSERT(0);
134 return XFS_ERROR(EFSCORRUPTED);
135 }
136
137
132 if (ip->i_df.if_flags & XFS_IFINLINE) { 138 if (ip->i_df.if_flags & XFS_IFINLINE) {
133 memcpy(link, ip->i_df.if_u1.if_data, pathlen); 139 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
134 link[pathlen] = '\0'; 140 link[pathlen] = '\0';
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index cf399495d38f..1f9e9516e2b7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -990,7 +990,9 @@ struct drm_minor {
990 struct proc_dir_entry *proc_root; /**< proc directory entry */ 990 struct proc_dir_entry *proc_root; /**< proc directory entry */
991 struct drm_info_node proc_nodes; 991 struct drm_info_node proc_nodes;
992 struct dentry *debugfs_root; 992 struct dentry *debugfs_root;
993 struct drm_info_node debugfs_nodes; 993
994 struct list_head debugfs_list;
995 struct mutex debugfs_lock; /* Protects debugfs_list. */
994 996
995 struct drm_master *master; /* currently active master for this node */ 997 struct drm_master *master; /* currently active master for this node */
996 struct list_head master_list; 998 struct list_head master_list;
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 874c4d271328..1d161cb3aca5 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -36,11 +36,13 @@
36 * - this size value would be page-aligned internally. 36 * - this size value would be page-aligned internally.
37 * @flags: user request for setting memory type or cache attributes. 37 * @flags: user request for setting memory type or cache attributes.
38 * @handle: returned handle for the object. 38 * @handle: returned handle for the object.
39 * @pad: just padding to be 64-bit aligned.
39 */ 40 */
40struct drm_exynos_gem_create { 41struct drm_exynos_gem_create {
41 unsigned int size; 42 unsigned int size;
42 unsigned int flags; 43 unsigned int flags;
43 unsigned int handle; 44 unsigned int handle;
45 unsigned int pad;
44}; 46};
45 47
46/** 48/**
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index afb94583960c..98ce8124b1cc 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -41,7 +41,7 @@ struct devfreq_dev_status {
41 unsigned long total_time; 41 unsigned long total_time;
42 unsigned long busy_time; 42 unsigned long busy_time;
43 unsigned long current_frequency; 43 unsigned long current_frequency;
44 void *private_date; 44 void *private_data;
45}; 45};
46 46
47/** 47/**
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 08a2fee40659..aad6bd4b3efd 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -118,7 +118,6 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
118static inline 118static inline
119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120{ 120{
121 return 0;
122} 121}
123 122
124static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) 123static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index fae295048a8b..83a9caec0e43 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -1963,6 +1963,21 @@
1963#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ 1963#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
1964 1964
1965/* 1965/*
1966 * R210 (0xD2) - Mic Detect 3
1967 */
1968#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
1969#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
1970#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
1971#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */
1972#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */
1973#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */
1974#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */
1975#define WM8958_MICD_STS 0x0001 /* MICD_STS */
1976#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */
1977#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */
1978#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */
1979
1980/*
1966 * R76 (0x4C) - Charge Pump (1) 1981 * R76 (0x4C) - Charge Pump (1)
1967 */ 1982 */
1968#define WM8994_CP_ENA 0x8000 /* CP_ENA */ 1983#define WM8994_CP_ENA 0x8000 /* CP_ENA */
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 56db75147186..995e3bd3417b 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -70,6 +70,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
70}; 70};
71static struct pm_qos_object cpu_dma_pm_qos = { 71static struct pm_qos_object cpu_dma_pm_qos = {
72 .constraints = &cpu_dma_constraints, 72 .constraints = &cpu_dma_constraints,
73 .name = "cpu_dma_latency",
73}; 74};
74 75
75static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 76static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 5dbab38d04af..130cfe677d60 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -52,6 +52,7 @@ struct link_slave {
52 struct link_ctl_info info; 52 struct link_ctl_info info;
53 int vals[2]; /* current values */ 53 int vals[2]; /* current values */
54 unsigned int flags; 54 unsigned int flags;
55 struct snd_kcontrol *kctl; /* original kcontrol pointer */
55 struct snd_kcontrol slave; /* the copy of original control entry */ 56 struct snd_kcontrol slave; /* the copy of original control entry */
56}; 57};
57 58
@@ -252,6 +253,7 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
252 slave->count * sizeof(*slave->vd), GFP_KERNEL); 253 slave->count * sizeof(*slave->vd), GFP_KERNEL);
253 if (!srec) 254 if (!srec)
254 return -ENOMEM; 255 return -ENOMEM;
256 srec->kctl = slave;
255 srec->slave = *slave; 257 srec->slave = *slave;
256 memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd)); 258 memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd));
257 srec->master = master_link; 259 srec->master = master_link;
@@ -333,10 +335,18 @@ static int master_put(struct snd_kcontrol *kcontrol,
333static void master_free(struct snd_kcontrol *kcontrol) 335static void master_free(struct snd_kcontrol *kcontrol)
334{ 336{
335 struct link_master *master = snd_kcontrol_chip(kcontrol); 337 struct link_master *master = snd_kcontrol_chip(kcontrol);
336 struct link_slave *slave; 338 struct link_slave *slave, *n;
337 339
338 list_for_each_entry(slave, &master->slaves, list) 340 /* free all slave links and retore the original slave kctls */
339 slave->master = NULL; 341 list_for_each_entry_safe(slave, n, &master->slaves, list) {
342 struct snd_kcontrol *sctl = slave->kctl;
343 struct list_head olist = sctl->list;
344 memcpy(sctl, &slave->slave, sizeof(*sctl));
345 memcpy(sctl->vd, slave->slave.vd,
346 sctl->count * sizeof(*sctl->vd));
347 sctl->list = olist; /* keep the current linked-list */
348 kfree(slave);
349 }
340 kfree(master); 350 kfree(master);
341} 351}
342 352
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 916a1863af73..e44b107fdc75 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2331,6 +2331,39 @@ int snd_hda_codec_reset(struct hda_codec *codec)
2331 return 0; 2331 return 0;
2332} 2332}
2333 2333
2334typedef int (*map_slave_func_t)(void *, struct snd_kcontrol *);
2335
2336/* apply the function to all matching slave ctls in the mixer list */
2337static int map_slaves(struct hda_codec *codec, const char * const *slaves,
2338 map_slave_func_t func, void *data)
2339{
2340 struct hda_nid_item *items;
2341 const char * const *s;
2342 int i, err;
2343
2344 items = codec->mixers.list;
2345 for (i = 0; i < codec->mixers.used; i++) {
2346 struct snd_kcontrol *sctl = items[i].kctl;
2347 if (!sctl || !sctl->id.name ||
2348 sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
2349 continue;
2350 for (s = slaves; *s; s++) {
2351 if (!strcmp(sctl->id.name, *s)) {
2352 err = func(data, sctl);
2353 if (err)
2354 return err;
2355 break;
2356 }
2357 }
2358 }
2359 return 0;
2360}
2361
2362static int check_slave_present(void *data, struct snd_kcontrol *sctl)
2363{
2364 return 1;
2365}
2366
2334/** 2367/**
2335 * snd_hda_add_vmaster - create a virtual master control and add slaves 2368 * snd_hda_add_vmaster - create a virtual master control and add slaves
2336 * @codec: HD-audio codec 2369 * @codec: HD-audio codec
@@ -2351,12 +2384,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
2351 unsigned int *tlv, const char * const *slaves) 2384 unsigned int *tlv, const char * const *slaves)
2352{ 2385{
2353 struct snd_kcontrol *kctl; 2386 struct snd_kcontrol *kctl;
2354 const char * const *s;
2355 int err; 2387 int err;
2356 2388
2357 for (s = slaves; *s && !snd_hda_find_mixer_ctl(codec, *s); s++) 2389 err = map_slaves(codec, slaves, check_slave_present, NULL);
2358 ; 2390 if (err != 1) {
2359 if (!*s) {
2360 snd_printdd("No slave found for %s\n", name); 2391 snd_printdd("No slave found for %s\n", name);
2361 return 0; 2392 return 0;
2362 } 2393 }
@@ -2367,23 +2398,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
2367 if (err < 0) 2398 if (err < 0)
2368 return err; 2399 return err;
2369 2400
2370 for (s = slaves; *s; s++) { 2401 err = map_slaves(codec, slaves, (map_slave_func_t)snd_ctl_add_slave,
2371 struct snd_kcontrol *sctl; 2402 kctl);
2372 int i = 0; 2403 if (err < 0)
2373 for (;;) { 2404 return err;
2374 sctl = _snd_hda_find_mixer_ctl(codec, *s, i);
2375 if (!sctl) {
2376 if (!i)
2377 snd_printdd("Cannot find slave %s, "
2378 "skipped\n", *s);
2379 break;
2380 }
2381 err = snd_ctl_add_slave(kctl, sctl);
2382 if (err < 0)
2383 return err;
2384 i++;
2385 }
2386 }
2387 return 0; 2405 return 0;
2388} 2406}
2389EXPORT_SYMBOL_HDA(snd_hda_add_vmaster); 2407EXPORT_SYMBOL_HDA(snd_hda_add_vmaster);
@@ -4752,6 +4770,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
4752 memset(sequences_hp, 0, sizeof(sequences_hp)); 4770 memset(sequences_hp, 0, sizeof(sequences_hp));
4753 assoc_line_out = 0; 4771 assoc_line_out = 0;
4754 4772
4773 codec->ignore_misc_bit = true;
4755 end_nid = codec->start_nid + codec->num_nodes; 4774 end_nid = codec->start_nid + codec->num_nodes;
4756 for (nid = codec->start_nid; nid < end_nid; nid++) { 4775 for (nid = codec->start_nid; nid < end_nid; nid++) {
4757 unsigned int wid_caps = get_wcaps(codec, nid); 4776 unsigned int wid_caps = get_wcaps(codec, nid);
@@ -4767,6 +4786,9 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
4767 continue; 4786 continue;
4768 4787
4769 def_conf = snd_hda_codec_get_pincfg(codec, nid); 4788 def_conf = snd_hda_codec_get_pincfg(codec, nid);
4789 if (!(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
4790 AC_DEFCFG_MISC_NO_PRESENCE))
4791 codec->ignore_misc_bit = false;
4770 conn = get_defcfg_connect(def_conf); 4792 conn = get_defcfg_connect(def_conf);
4771 if (conn == AC_JACK_PORT_NONE) 4793 if (conn == AC_JACK_PORT_NONE)
4772 continue; 4794 continue;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 755f2b0f9d8e..564471169cae 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -854,6 +854,7 @@ struct hda_codec {
854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */ 854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
855 unsigned int pins_shutup:1; /* pins are shut up */ 855 unsigned int pins_shutup:1; /* pins are shut up */
856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ 856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
857 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
857#ifdef CONFIG_SND_HDA_POWER_SAVE 858#ifdef CONFIG_SND_HDA_POWER_SAVE
858 unsigned int power_on :1; /* current (global) power-state */ 859 unsigned int power_on :1; /* current (global) power-state */
859 unsigned int power_transition :1; /* power-state in transition */ 860 unsigned int power_transition :1; /* power-state in transition */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index dcbea0da0fa2..6579e0f2bb57 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -510,13 +510,15 @@ int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
510 510
511static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid) 511static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
512{ 512{
513 return (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT) && 513 if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT))
514 /* disable MISC_NO_PRESENCE check because it may break too 514 return false;
515 * many devices 515 if (!codec->ignore_misc_bit &&
516 */ 516 (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
517 /*(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid) & 517 AC_DEFCFG_MISC_NO_PRESENCE))
518 AC_DEFCFG_MISC_NO_PRESENCE)) &&*/ 518 return false;
519 (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP); 519 if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP))
520 return false;
521 return true;
520} 522}
521 523
522/* flags for hda_nid_item */ 524/* flags for hda_nid_item */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 5e706e4d1737..0de21193a2b0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3062,7 +3062,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3062 SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS), 3062 SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
3063 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), 3063 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3064 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3064 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3065 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3066 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board", 3065 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
3067 CXT5066_LAPTOP), 3066 CXT5066_LAPTOP),
3068 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), 3067 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a24e068a021b..308bb575bc06 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -284,7 +284,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
284 struct alc_spec *spec = codec->spec; 284 struct alc_spec *spec = codec->spec;
285 const struct hda_input_mux *imux; 285 const struct hda_input_mux *imux;
286 unsigned int mux_idx; 286 unsigned int mux_idx;
287 int i, type; 287 int i, type, num_conns;
288 hda_nid_t nid; 288 hda_nid_t nid;
289 289
290 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx; 290 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
@@ -307,16 +307,17 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
307 spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx]; 307 spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
308 308
309 /* no selection? */ 309 /* no selection? */
310 if (snd_hda_get_conn_list(codec, nid, NULL) <= 1) 310 num_conns = snd_hda_get_conn_list(codec, nid, NULL);
311 if (num_conns <= 1)
311 return 1; 312 return 1;
312 313
313 type = get_wcaps_type(get_wcaps(codec, nid)); 314 type = get_wcaps_type(get_wcaps(codec, nid));
314 if (type == AC_WID_AUD_MIX) { 315 if (type == AC_WID_AUD_MIX) {
315 /* Matrix-mixer style (e.g. ALC882) */ 316 /* Matrix-mixer style (e.g. ALC882) */
316 for (i = 0; i < imux->num_items; i++) { 317 int active = imux->items[idx].index;
317 unsigned int v = (i == idx) ? 0 : HDA_AMP_MUTE; 318 for (i = 0; i < num_conns; i++) {
318 snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, 319 unsigned int v = (i == active) ? 0 : HDA_AMP_MUTE;
319 imux->items[i].index, 320 snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, i,
320 HDA_AMP_MUTE, v); 321 HDA_AMP_MUTE, v);
321 } 322 }
322 } else { 323 } else {
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4e715fefebef..edc2b7bc177c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -95,6 +95,7 @@ enum {
95 STAC_92HD83XXX_REF, 95 STAC_92HD83XXX_REF,
96 STAC_92HD83XXX_PWR_REF, 96 STAC_92HD83XXX_PWR_REF,
97 STAC_DELL_S14, 97 STAC_DELL_S14,
98 STAC_DELL_VOSTRO_3500,
98 STAC_92HD83XXX_HP, 99 STAC_92HD83XXX_HP,
99 STAC_92HD83XXX_HP_cNB11_INTQUAD, 100 STAC_92HD83XXX_HP_cNB11_INTQUAD,
100 STAC_HP_DV7_4000, 101 STAC_HP_DV7_4000,
@@ -1659,6 +1660,12 @@ static const unsigned int dell_s14_pin_configs[10] = {
1659 0x40f000f0, 0x40f000f0, 1660 0x40f000f0, 0x40f000f0,
1660}; 1661};
1661 1662
1663static const unsigned int dell_vostro_3500_pin_configs[10] = {
1664 0x02a11020, 0x0221101f, 0x400000f0, 0x90170110,
1665 0x400000f1, 0x400000f2, 0x400000f3, 0x90a60160,
1666 0x400000f4, 0x400000f5,
1667};
1668
1662static const unsigned int hp_dv7_4000_pin_configs[10] = { 1669static const unsigned int hp_dv7_4000_pin_configs[10] = {
1663 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 1670 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
1664 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, 1671 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
@@ -1675,6 +1682,7 @@ static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
1675 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, 1682 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
1676 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, 1683 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
1677 [STAC_DELL_S14] = dell_s14_pin_configs, 1684 [STAC_DELL_S14] = dell_s14_pin_configs,
1685 [STAC_DELL_VOSTRO_3500] = dell_vostro_3500_pin_configs,
1678 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs, 1686 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs,
1679 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, 1687 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
1680}; 1688};
@@ -1684,6 +1692,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
1684 [STAC_92HD83XXX_REF] = "ref", 1692 [STAC_92HD83XXX_REF] = "ref",
1685 [STAC_92HD83XXX_PWR_REF] = "mic-ref", 1693 [STAC_92HD83XXX_PWR_REF] = "mic-ref",
1686 [STAC_DELL_S14] = "dell-s14", 1694 [STAC_DELL_S14] = "dell-s14",
1695 [STAC_DELL_VOSTRO_3500] = "dell-vostro-3500",
1687 [STAC_92HD83XXX_HP] = "hp", 1696 [STAC_92HD83XXX_HP] = "hp",
1688 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", 1697 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
1689 [STAC_HP_DV7_4000] = "hp-dv7-4000", 1698 [STAC_HP_DV7_4000] = "hp-dv7-4000",
@@ -1697,6 +1706,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
1697 "DFI LanParty", STAC_92HD83XXX_REF), 1706 "DFI LanParty", STAC_92HD83XXX_REF),
1698 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, 1707 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
1699 "unknown Dell", STAC_DELL_S14), 1708 "unknown Dell", STAC_DELL_S14),
1709 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x1028,
1710 "Dell Vostro 3500", STAC_DELL_VOSTRO_3500),
1700 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, 1711 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
1701 "HP", STAC_92HD83XXX_HP), 1712 "HP", STAC_92HD83XXX_HP),
1702 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656, 1713 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656,
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 29e312597f20..11718b49b2e2 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1077,6 +1077,13 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1077 } 1077 }
1078 if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV)) 1078 if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV))
1079 continue; 1079 continue;
1080
1081 /* IO read operation is very expensive inside virtual machine
1082 * as it is emulated. The probability that subsequent PICB read
1083 * will return different result is high enough to loop till
1084 * timeout here.
1085 * Same CIV is strict enough condition to be sure that PICB
1086 * is valid inside VM on emulated card. */
1080 if (chip->inside_vm) 1087 if (chip->inside_vm)
1081 break; 1088 break;
1082 if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb)) 1089 if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
@@ -2930,6 +2937,45 @@ static unsigned int sis_codec_bits[3] = {
2930 ICH_PCR, ICH_SCR, ICH_SIS_TCR 2937 ICH_PCR, ICH_SCR, ICH_SIS_TCR
2931}; 2938};
2932 2939
2940static int __devinit snd_intel8x0_inside_vm(struct pci_dev *pci)
2941{
2942 int result = inside_vm;
2943 char *msg = NULL;
2944
2945 /* check module parameter first (override detection) */
2946 if (result >= 0) {
2947 msg = result ? "enable (forced) VM" : "disable (forced) VM";
2948 goto fini;
2949 }
2950
2951 /* detect KVM and Parallels virtual environments */
2952 result = kvm_para_available();
2953#ifdef X86_FEATURE_HYPERVISOR
2954 result = result || boot_cpu_has(X86_FEATURE_HYPERVISOR);
2955#endif
2956 if (!result)
2957 goto fini;
2958
2959 /* check for known (emulated) devices */
2960 if (pci->subsystem_vendor == 0x1af4 &&
2961 pci->subsystem_device == 0x1100) {
2962 /* KVM emulated sound, PCI SSID: 1af4:1100 */
2963 msg = "enable KVM";
2964 } else if (pci->subsystem_vendor == 0x1ab8) {
2965 /* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */
2966 msg = "enable Parallels VM";
2967 } else {
2968 msg = "disable (unknown or VT-d) VM";
2969 result = 0;
2970 }
2971
2972fini:
2973 if (msg != NULL)
2974 printk(KERN_INFO "intel8x0: %s optimization\n", msg);
2975
2976 return result;
2977}
2978
2933static int __devinit snd_intel8x0_create(struct snd_card *card, 2979static int __devinit snd_intel8x0_create(struct snd_card *card,
2934 struct pci_dev *pci, 2980 struct pci_dev *pci,
2935 unsigned long device_type, 2981 unsigned long device_type,
@@ -2997,9 +3043,7 @@ static int __devinit snd_intel8x0_create(struct snd_card *card,
2997 if (xbox) 3043 if (xbox)
2998 chip->xbox = 1; 3044 chip->xbox = 1;
2999 3045
3000 chip->inside_vm = inside_vm; 3046 chip->inside_vm = snd_intel8x0_inside_vm(pci);
3001 if (inside_vm)
3002 printk(KERN_INFO "intel8x0: enable KVM optimization\n");
3003 3047
3004 if (pci->vendor == PCI_VENDOR_ID_INTEL && 3048 if (pci->vendor == PCI_VENDOR_ID_INTEL &&
3005 pci->device == PCI_DEVICE_ID_INTEL_440MX) 3049 pci->device == PCI_DEVICE_ID_INTEL_440MX)
@@ -3243,14 +3287,6 @@ static int __devinit snd_intel8x0_probe(struct pci_dev *pci,
3243 buggy_irq = 0; 3287 buggy_irq = 0;
3244 } 3288 }
3245 3289
3246 if (inside_vm < 0) {
3247 /* detect KVM and Parallels virtual environments */
3248 inside_vm = kvm_para_available();
3249#if defined(__i386__) || defined(__x86_64__)
3250 inside_vm = inside_vm || boot_cpu_has(X86_FEATURE_HYPERVISOR);
3251#endif
3252 }
3253
3254 if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data, 3290 if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data,
3255 &chip)) < 0) { 3291 &chip)) < 0) {
3256 snd_card_free(card); 3292 snd_card_free(card);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 6b73efd26991..9c982e47eb99 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -56,7 +56,7 @@ static int wm8994_retune_mobile_base[] = {
56static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg) 56static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg)
57{ 57{
58 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 58 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
59 struct wm8994 *control = wm8994->control_data; 59 struct wm8994 *control = codec->control_data;
60 60
61 switch (reg) { 61 switch (reg) {
62 case WM8994_GPIO_1: 62 case WM8994_GPIO_1:
@@ -3030,19 +3030,34 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3030{ 3030{
3031 struct wm8994_priv *wm8994 = data; 3031 struct wm8994_priv *wm8994 = data;
3032 struct snd_soc_codec *codec = wm8994->codec; 3032 struct snd_soc_codec *codec = wm8994->codec;
3033 int reg; 3033 int reg, count;
3034 3034
3035 reg = snd_soc_read(codec, WM8958_MIC_DETECT_3); 3035 /* We may occasionally read a detection without an impedence
3036 if (reg < 0) { 3036 * range being provided - if that happens loop again.
3037 dev_err(codec->dev, "Failed to read mic detect status: %d\n", 3037 */
3038 reg); 3038 count = 10;
3039 return IRQ_NONE; 3039 do {
3040 } 3040 reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
3041 if (reg < 0) {
3042 dev_err(codec->dev,
3043 "Failed to read mic detect status: %d\n",
3044 reg);
3045 return IRQ_NONE;
3046 }
3041 3047
3042 if (!(reg & WM8958_MICD_VALID)) { 3048 if (!(reg & WM8958_MICD_VALID)) {
3043 dev_dbg(codec->dev, "Mic detect data not valid\n"); 3049 dev_dbg(codec->dev, "Mic detect data not valid\n");
3044 goto out; 3050 goto out;
3045 } 3051 }
3052
3053 if (!(reg & WM8958_MICD_STS) || (reg & WM8958_MICD_LVL_MASK))
3054 break;
3055
3056 msleep(1);
3057 } while (count--);
3058
3059 if (count == 0)
3060 dev_warn(codec->dev, "No impedence range reported for jack\n");
3046 3061
3047#ifndef CONFIG_SND_SOC_WM8994_MODULE 3062#ifndef CONFIG_SND_SOC_WM8994_MODULE
3048 trace_snd_soc_jack_irq(dev_name(codec->dev)); 3063 trace_snd_soc_jack_irq(dev_name(codec->dev));
@@ -3180,9 +3195,9 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3180 3195
3181 wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR, 3196 wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR,
3182 wm8994_fifo_error, "FIFO error", codec); 3197 wm8994_fifo_error, "FIFO error", codec);
3183 wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_WARN, 3198 wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_WARN,
3184 wm8994_temp_warn, "Thermal warning", codec); 3199 wm8994_temp_warn, "Thermal warning", codec);
3185 wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_SHUT, 3200 wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_SHUT,
3186 wm8994_temp_shut, "Thermal shutdown", codec); 3201 wm8994_temp_shut, "Thermal shutdown", codec);
3187 3202
3188 ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE, 3203 ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 60f65ace7474..ab23869c01bb 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -765,10 +765,61 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
765 * interface to ALSA control for feature/mixer units 765 * interface to ALSA control for feature/mixer units
766 */ 766 */
767 767
768/* volume control quirks */
769static void volume_control_quirks(struct usb_mixer_elem_info *cval,
770 struct snd_kcontrol *kctl)
771{
772 switch (cval->mixer->chip->usb_id) {
773 case USB_ID(0x0471, 0x0101):
774 case USB_ID(0x0471, 0x0104):
775 case USB_ID(0x0471, 0x0105):
776 case USB_ID(0x0672, 0x1041):
777 /* quirk for UDA1321/N101.
778 * note that detection between firmware 2.1.1.7 (N101)
779 * and later 2.1.1.21 is not very clear from datasheets.
780 * I hope that the min value is -15360 for newer firmware --jk
781 */
782 if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
783 cval->min == -15616) {
784 snd_printk(KERN_INFO
785 "set volume quirk for UDA1321/N101 chip\n");
786 cval->max = -256;
787 }
788 break;
789
790 case USB_ID(0x046d, 0x09a4):
791 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
792 snd_printk(KERN_INFO
793 "set volume quirk for QuickCam E3500\n");
794 cval->min = 6080;
795 cval->max = 8768;
796 cval->res = 192;
797 }
798 break;
799
800 case USB_ID(0x046d, 0x0808):
801 case USB_ID(0x046d, 0x0809):
802 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
803 case USB_ID(0x046d, 0x0991):
804 /* Most audio usb devices lie about volume resolution.
805 * Most Logitech webcams have res = 384.
806 * Proboly there is some logitech magic behind this number --fishor
807 */
808 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
809 snd_printk(KERN_INFO
810 "set resolution quirk: cval->res = 384\n");
811 cval->res = 384;
812 }
813 break;
814
815 }
816}
817
768/* 818/*
769 * retrieve the minimum and maximum values for the specified control 819 * retrieve the minimum and maximum values for the specified control
770 */ 820 */
771static int get_min_max(struct usb_mixer_elem_info *cval, int default_min) 821static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
822 int default_min, struct snd_kcontrol *kctl)
772{ 823{
773 /* for failsafe */ 824 /* for failsafe */
774 cval->min = default_min; 825 cval->min = default_min;
@@ -844,6 +895,9 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
844 cval->initialized = 1; 895 cval->initialized = 1;
845 } 896 }
846 897
898 if (kctl)
899 volume_control_quirks(cval, kctl);
900
847 /* USB descriptions contain the dB scale in 1/256 dB unit 901 /* USB descriptions contain the dB scale in 1/256 dB unit
848 * while ALSA TLV contains in 1/100 dB unit 902 * while ALSA TLV contains in 1/100 dB unit
849 */ 903 */
@@ -864,6 +918,7 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
864 return 0; 918 return 0;
865} 919}
866 920
921#define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
867 922
868/* get a feature/mixer unit info */ 923/* get a feature/mixer unit info */
869static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) 924static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
@@ -882,7 +937,7 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
882 uinfo->value.integer.max = 1; 937 uinfo->value.integer.max = 1;
883 } else { 938 } else {
884 if (!cval->initialized) { 939 if (!cval->initialized) {
885 get_min_max(cval, 0); 940 get_min_max_with_quirks(cval, 0, kcontrol);
886 if (cval->initialized && cval->dBmin >= cval->dBmax) { 941 if (cval->initialized && cval->dBmin >= cval->dBmax) {
887 kcontrol->vd[0].access &= 942 kcontrol->vd[0].access &=
888 ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | 943 ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
@@ -1045,9 +1100,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1045 cval->ch_readonly = readonly_mask; 1100 cval->ch_readonly = readonly_mask;
1046 } 1101 }
1047 1102
1048 /* get min/max values */
1049 get_min_max(cval, 0);
1050
1051 /* if all channels in the mask are marked read-only, make the control 1103 /* if all channels in the mask are marked read-only, make the control
1052 * read-only. set_cur_mix_value() will check the mask again and won't 1104 * read-only. set_cur_mix_value() will check the mask again and won't
1053 * issue write commands to read-only channels. */ 1105 * issue write commands to read-only channels. */
@@ -1069,6 +1121,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1069 len = snd_usb_copy_string_desc(state, nameid, 1121 len = snd_usb_copy_string_desc(state, nameid,
1070 kctl->id.name, sizeof(kctl->id.name)); 1122 kctl->id.name, sizeof(kctl->id.name));
1071 1123
1124 /* get min/max values */
1125 get_min_max_with_quirks(cval, 0, kctl);
1126
1072 switch (control) { 1127 switch (control) {
1073 case UAC_FU_MUTE: 1128 case UAC_FU_MUTE:
1074 case UAC_FU_VOLUME: 1129 case UAC_FU_VOLUME:
@@ -1118,51 +1173,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1118 break; 1173 break;
1119 } 1174 }
1120 1175
1121 /* volume control quirks */
1122 switch (state->chip->usb_id) {
1123 case USB_ID(0x0471, 0x0101):
1124 case USB_ID(0x0471, 0x0104):
1125 case USB_ID(0x0471, 0x0105):
1126 case USB_ID(0x0672, 0x1041):
1127 /* quirk for UDA1321/N101.
1128 * note that detection between firmware 2.1.1.7 (N101)
1129 * and later 2.1.1.21 is not very clear from datasheets.
1130 * I hope that the min value is -15360 for newer firmware --jk
1131 */
1132 if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
1133 cval->min == -15616) {
1134 snd_printk(KERN_INFO
1135 "set volume quirk for UDA1321/N101 chip\n");
1136 cval->max = -256;
1137 }
1138 break;
1139
1140 case USB_ID(0x046d, 0x09a4):
1141 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
1142 snd_printk(KERN_INFO
1143 "set volume quirk for QuickCam E3500\n");
1144 cval->min = 6080;
1145 cval->max = 8768;
1146 cval->res = 192;
1147 }
1148 break;
1149
1150 case USB_ID(0x046d, 0x0808):
1151 case USB_ID(0x046d, 0x0809):
1152 case USB_ID(0x046d, 0x0991):
1153 /* Most audio usb devices lie about volume resolution.
1154 * Most Logitech webcams have res = 384.
1155 * Proboly there is some logitech magic behind this number --fishor
1156 */
1157 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
1158 snd_printk(KERN_INFO
1159 "set resolution quirk: cval->res = 384\n");
1160 cval->res = 384;
1161 }
1162 break;
1163
1164 }
1165
1166 range = (cval->max - cval->min) / cval->res; 1176 range = (cval->max - cval->min) / cval->res;
1167 /* Are there devices with volume range more than 255? I use a bit more 1177 /* Are there devices with volume range more than 255? I use a bit more
1168 * to be sure. 384 is a resolution magic number found on Logitech 1178 * to be sure. 384 is a resolution magic number found on Logitech
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2e5bc7344026..a3ddac0deffd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -137,12 +137,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
137 return -ENOMEM; 137 return -ENOMEM;
138 } 138 }
139 if (fp->nr_rates > 0) { 139 if (fp->nr_rates > 0) {
140 rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL); 140 rate_table = kmemdup(fp->rate_table,
141 sizeof(int) * fp->nr_rates, GFP_KERNEL);
141 if (!rate_table) { 142 if (!rate_table) {
142 kfree(fp); 143 kfree(fp);
143 return -ENOMEM; 144 return -ENOMEM;
144 } 145 }
145 memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates);
146 fp->rate_table = rate_table; 146 fp->rate_table = rate_table;
147 } 147 }
148 148
@@ -224,10 +224,9 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
224 if (altsd->bNumEndpoints != 1) 224 if (altsd->bNumEndpoints != 1)
225 return -ENXIO; 225 return -ENXIO;
226 226
227 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 227 fp = kmemdup(&ua_format, sizeof(*fp), GFP_KERNEL);
228 if (!fp) 228 if (!fp)
229 return -ENOMEM; 229 return -ENOMEM;
230 memcpy(fp, &ua_format, sizeof(*fp));
231 230
232 fp->iface = altsd->bInterfaceNumber; 231 fp->iface = altsd->bInterfaceNumber;
233 fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; 232 fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 8d02ccb10c59..30e2befd6f2a 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -42,6 +42,7 @@ $default{"BISECT_MANUAL"} = 0;
42$default{"BISECT_SKIP"} = 1; 42$default{"BISECT_SKIP"} = 1;
43$default{"SUCCESS_LINE"} = "login:"; 43$default{"SUCCESS_LINE"} = "login:";
44$default{"DETECT_TRIPLE_FAULT"} = 1; 44$default{"DETECT_TRIPLE_FAULT"} = 1;
45$default{"NO_INSTALL"} = 0;
45$default{"BOOTED_TIMEOUT"} = 1; 46$default{"BOOTED_TIMEOUT"} = 1;
46$default{"DIE_ON_FAILURE"} = 1; 47$default{"DIE_ON_FAILURE"} = 1;
47$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND"; 48$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
@@ -84,6 +85,7 @@ my $grub_number;
84my $target; 85my $target;
85my $make; 86my $make;
86my $post_install; 87my $post_install;
88my $no_install;
87my $noclean; 89my $noclean;
88my $minconfig; 90my $minconfig;
89my $start_minconfig; 91my $start_minconfig;
@@ -115,6 +117,7 @@ my $timeout;
115my $booted_timeout; 117my $booted_timeout;
116my $detect_triplefault; 118my $detect_triplefault;
117my $console; 119my $console;
120my $reboot_success_line;
118my $success_line; 121my $success_line;
119my $stop_after_success; 122my $stop_after_success;
120my $stop_after_failure; 123my $stop_after_failure;
@@ -130,6 +133,12 @@ my %config_help;
130my %variable; 133my %variable;
131my %force_config; 134my %force_config;
132 135
136# do not force reboots on config problems
137my $no_reboot = 1;
138
139# default variables that can be used
140chomp ($variable{"PWD"} = `pwd`);
141
133$config_help{"MACHINE"} = << "EOF" 142$config_help{"MACHINE"} = << "EOF"
134 The machine hostname that you will test. 143 The machine hostname that you will test.
135EOF 144EOF
@@ -241,6 +250,7 @@ sub read_yn {
241 250
242sub get_ktest_config { 251sub get_ktest_config {
243 my ($config) = @_; 252 my ($config) = @_;
253 my $ans;
244 254
245 return if (defined($opt{$config})); 255 return if (defined($opt{$config}));
246 256
@@ -254,16 +264,17 @@ sub get_ktest_config {
254 if (defined($default{$config})) { 264 if (defined($default{$config})) {
255 print "\[$default{$config}\] "; 265 print "\[$default{$config}\] ";
256 } 266 }
257 $entered_configs{$config} = <STDIN>; 267 $ans = <STDIN>;
258 $entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/; 268 $ans =~ s/^\s*(.*\S)\s*$/$1/;
259 if ($entered_configs{$config} =~ /^\s*$/) { 269 if ($ans =~ /^\s*$/) {
260 if ($default{$config}) { 270 if ($default{$config}) {
261 $entered_configs{$config} = $default{$config}; 271 $ans = $default{$config};
262 } else { 272 } else {
263 print "Your answer can not be blank\n"; 273 print "Your answer can not be blank\n";
264 next; 274 next;
265 } 275 }
266 } 276 }
277 $entered_configs{$config} = process_variables($ans);
267 last; 278 last;
268 } 279 }
269} 280}
@@ -298,7 +309,7 @@ sub get_ktest_configs {
298} 309}
299 310
300sub process_variables { 311sub process_variables {
301 my ($value) = @_; 312 my ($value, $remove_undef) = @_;
302 my $retval = ""; 313 my $retval = "";
303 314
304 # We want to check for '\', and it is just easier 315 # We want to check for '\', and it is just easier
@@ -316,6 +327,10 @@ sub process_variables {
316 $retval = "$retval$begin"; 327 $retval = "$retval$begin";
317 if (defined($variable{$var})) { 328 if (defined($variable{$var})) {
318 $retval = "$retval$variable{$var}"; 329 $retval = "$retval$variable{$var}";
330 } elsif (defined($remove_undef) && $remove_undef) {
331 # for if statements, any variable that is not defined,
332 # we simple convert to 0
333 $retval = "${retval}0";
319 } else { 334 } else {
320 # put back the origin piece. 335 # put back the origin piece.
321 $retval = "$retval\$\{$var\}"; 336 $retval = "$retval\$\{$var\}";
@@ -331,10 +346,17 @@ sub process_variables {
331} 346}
332 347
333sub set_value { 348sub set_value {
334 my ($lvalue, $rvalue) = @_; 349 my ($lvalue, $rvalue, $override, $overrides, $name) = @_;
335 350
336 if (defined($opt{$lvalue})) { 351 if (defined($opt{$lvalue})) {
337 die "Error: Option $lvalue defined more than once!\n"; 352 if (!$override || defined(${$overrides}{$lvalue})) {
353 my $extra = "";
354 if ($override) {
355 $extra = "In the same override section!\n";
356 }
357 die "$name: $.: Option $lvalue defined more than once!\n$extra";
358 }
359 ${$overrides}{$lvalue} = $rvalue;
338 } 360 }
339 if ($rvalue =~ /^\s*$/) { 361 if ($rvalue =~ /^\s*$/) {
340 delete $opt{$lvalue}; 362 delete $opt{$lvalue};
@@ -355,86 +377,274 @@ sub set_variable {
355 } 377 }
356} 378}
357 379
358sub read_config { 380sub process_compare {
359 my ($config) = @_; 381 my ($lval, $cmp, $rval) = @_;
382
383 # remove whitespace
384
385 $lval =~ s/^\s*//;
386 $lval =~ s/\s*$//;
387
388 $rval =~ s/^\s*//;
389 $rval =~ s/\s*$//;
390
391 if ($cmp eq "==") {
392 return $lval eq $rval;
393 } elsif ($cmp eq "!=") {
394 return $lval ne $rval;
395 }
396
397 my $statement = "$lval $cmp $rval";
398 my $ret = eval $statement;
399
400 # $@ stores error of eval
401 if ($@) {
402 return -1;
403 }
404
405 return $ret;
406}
407
408sub value_defined {
409 my ($val) = @_;
410
411 return defined($variable{$2}) ||
412 defined($opt{$2});
413}
414
415my $d = 0;
416sub process_expression {
417 my ($name, $val) = @_;
418
419 my $c = $d++;
420
421 while ($val =~ s/\(([^\(]*?)\)/\&\&\&\&VAL\&\&\&\&/) {
422 my $express = $1;
423
424 if (process_expression($name, $express)) {
425 $val =~ s/\&\&\&\&VAL\&\&\&\&/ 1 /;
426 } else {
427 $val =~ s/\&\&\&\&VAL\&\&\&\&/ 0 /;
428 }
429 }
430
431 $d--;
432 my $OR = "\\|\\|";
433 my $AND = "\\&\\&";
434
435 while ($val =~ s/^(.*?)($OR|$AND)//) {
436 my $express = $1;
437 my $op = $2;
438
439 if (process_expression($name, $express)) {
440 if ($op eq "||") {
441 return 1;
442 }
443 } else {
444 if ($op eq "&&") {
445 return 0;
446 }
447 }
448 }
449
450 if ($val =~ /(.*)(==|\!=|>=|<=|>|<)(.*)/) {
451 my $ret = process_compare($1, $2, $3);
452 if ($ret < 0) {
453 die "$name: $.: Unable to process comparison\n";
454 }
455 return $ret;
456 }
457
458 if ($val =~ /^\s*(NOT\s*)?DEFINED\s+(\S+)\s*$/) {
459 if (defined $1) {
460 return !value_defined($2);
461 } else {
462 return value_defined($2);
463 }
464 }
465
466 if ($val =~ /^\s*0\s*$/) {
467 return 0;
468 } elsif ($val =~ /^\s*\d+\s*$/) {
469 return 1;
470 }
471
472 die ("$name: $.: Undefined content $val in if statement\n");
473}
474
475sub process_if {
476 my ($name, $value) = @_;
477
478 # Convert variables and replace undefined ones with 0
479 my $val = process_variables($value, 1);
480 my $ret = process_expression $name, $val;
481
482 return $ret;
483}
360 484
361 open(IN, $config) || die "can't read file $config"; 485sub __read_config {
486 my ($config, $current_test_num) = @_;
487
488 my $in;
489 open($in, $config) || die "can't read file $config";
362 490
363 my $name = $config; 491 my $name = $config;
364 $name =~ s,.*/(.*),$1,; 492 $name =~ s,.*/(.*),$1,;
365 493
366 my $test_num = 0; 494 my $test_num = $$current_test_num;
367 my $default = 1; 495 my $default = 1;
368 my $repeat = 1; 496 my $repeat = 1;
369 my $num_tests_set = 0; 497 my $num_tests_set = 0;
370 my $skip = 0; 498 my $skip = 0;
371 my $rest; 499 my $rest;
500 my $line;
372 my $test_case = 0; 501 my $test_case = 0;
502 my $if = 0;
503 my $if_set = 0;
504 my $override = 0;
373 505
374 while (<IN>) { 506 my %overrides;
507
508 while (<$in>) {
375 509
376 # ignore blank lines and comments 510 # ignore blank lines and comments
377 next if (/^\s*$/ || /\s*\#/); 511 next if (/^\s*$/ || /\s*\#/);
378 512
379 if (/^\s*TEST_START(.*)/) { 513 if (/^\s*(TEST_START|DEFAULTS)\b(.*)/) {
380 514
381 $rest = $1; 515 my $type = $1;
516 $rest = $2;
517 $line = $2;
382 518
383 if ($num_tests_set) { 519 my $old_test_num;
384 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; 520 my $old_repeat;
385 } 521 $override = 0;
522
523 if ($type eq "TEST_START") {
524
525 if ($num_tests_set) {
526 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
527 }
386 528
387 my $old_test_num = $test_num; 529 $old_test_num = $test_num;
388 my $old_repeat = $repeat; 530 $old_repeat = $repeat;
389 531
390 $test_num += $repeat; 532 $test_num += $repeat;
391 $default = 0; 533 $default = 0;
392 $repeat = 1; 534 $repeat = 1;
535 } else {
536 $default = 1;
537 }
393 538
394 if ($rest =~ /\s+SKIP(.*)/) { 539 # If SKIP is anywhere in the line, the command will be skipped
395 $rest = $1; 540 if ($rest =~ s/\s+SKIP\b//) {
396 $skip = 1; 541 $skip = 1;
397 } else { 542 } else {
398 $test_case = 1; 543 $test_case = 1;
399 $skip = 0; 544 $skip = 0;
400 } 545 }
401 546
402 if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) { 547 if ($rest =~ s/\sELSE\b//) {
403 $repeat = $1; 548 if (!$if) {
404 $rest = $2; 549 die "$name: $.: ELSE found with out matching IF section\n$_";
405 $repeat_tests{"$test_num"} = $repeat; 550 }
551 $if = 0;
552
553 if ($if_set) {
554 $skip = 1;
555 } else {
556 $skip = 0;
557 }
406 } 558 }
407 559
408 if ($rest =~ /\s+SKIP(.*)/) { 560 if ($rest =~ s/\sIF\s+(.*)//) {
409 $rest = $1; 561 if (process_if($name, $1)) {
410 $skip = 1; 562 $if_set = 1;
563 } else {
564 $skip = 1;
565 }
566 $if = 1;
567 } else {
568 $if = 0;
569 $if_set = 0;
411 } 570 }
412 571
413 if ($rest !~ /^\s*$/) { 572 if (!$skip) {
414 die "$name: $.: Gargbage found after TEST_START\n$_"; 573 if ($type eq "TEST_START") {
574 if ($rest =~ s/\s+ITERATE\s+(\d+)//) {
575 $repeat = $1;
576 $repeat_tests{"$test_num"} = $repeat;
577 }
578 } elsif ($rest =~ s/\sOVERRIDE\b//) {
579 # DEFAULT only
580 $override = 1;
581 # Clear previous overrides
582 %overrides = ();
583 }
584 }
585
586 if (!$skip && $rest !~ /^\s*$/) {
587 die "$name: $.: Gargbage found after $type\n$_";
415 } 588 }
416 589
417 if ($skip) { 590 if ($skip && $type eq "TEST_START") {
418 $test_num = $old_test_num; 591 $test_num = $old_test_num;
419 $repeat = $old_repeat; 592 $repeat = $old_repeat;
420 } 593 }
421 594
422 } elsif (/^\s*DEFAULTS(.*)$/) { 595 } elsif (/^\s*ELSE\b(.*)$/) {
423 $default = 1; 596 if (!$if) {
424 597 die "$name: $.: ELSE found with out matching IF section\n$_";
598 }
425 $rest = $1; 599 $rest = $1;
426 600 if ($if_set) {
427 if ($rest =~ /\s+SKIP(.*)/) {
428 $rest = $1;
429 $skip = 1; 601 $skip = 1;
602 $rest = "";
430 } else { 603 } else {
431 $skip = 0; 604 $skip = 0;
605
606 if ($rest =~ /\sIF\s+(.*)/) {
607 # May be a ELSE IF section.
608 if (!process_if($name, $1)) {
609 $skip = 1;
610 }
611 $rest = "";
612 } else {
613 $if = 0;
614 }
432 } 615 }
433 616
434 if ($rest !~ /^\s*$/) { 617 if ($rest !~ /^\s*$/) {
435 die "$name: $.: Gargbage found after DEFAULTS\n$_"; 618 die "$name: $.: Gargbage found after DEFAULTS\n$_";
436 } 619 }
437 620
621 } elsif (/^\s*INCLUDE\s+(\S+)/) {
622
623 next if ($skip);
624
625 if (!$default) {
626 die "$name: $.: INCLUDE can only be done in default sections\n$_";
627 }
628
629 my $file = process_variables($1);
630
631 if ($file !~ m,^/,) {
632 # check the path of the config file first
633 if ($config =~ m,(.*)/,) {
634 if (-f "$1/$file") {
635 $file = "$1/$file";
636 }
637 }
638 }
639
640 if ( ! -r $file ) {
641 die "$name: $.: Can't read file $file\n$_";
642 }
643
644 if (__read_config($file, \$test_num)) {
645 $test_case = 1;
646 }
647
438 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) { 648 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
439 649
440 next if ($skip); 650 next if ($skip);
@@ -460,10 +670,10 @@ sub read_config {
460 } 670 }
461 671
462 if ($default || $lvalue =~ /\[\d+\]$/) { 672 if ($default || $lvalue =~ /\[\d+\]$/) {
463 set_value($lvalue, $rvalue); 673 set_value($lvalue, $rvalue, $override, \%overrides, $name);
464 } else { 674 } else {
465 my $val = "$lvalue\[$test_num\]"; 675 my $val = "$lvalue\[$test_num\]";
466 set_value($val, $rvalue); 676 set_value($val, $rvalue, $override, \%overrides, $name);
467 677
468 if ($repeat > 1) { 678 if ($repeat > 1) {
469 $repeats{$val} = $repeat; 679 $repeats{$val} = $repeat;
@@ -490,13 +700,26 @@ sub read_config {
490 } 700 }
491 } 701 }
492 702
493 close(IN);
494
495 if ($test_num) { 703 if ($test_num) {
496 $test_num += $repeat - 1; 704 $test_num += $repeat - 1;
497 $opt{"NUM_TESTS"} = $test_num; 705 $opt{"NUM_TESTS"} = $test_num;
498 } 706 }
499 707
708 close($in);
709
710 $$current_test_num = $test_num;
711
712 return $test_case;
713}
714
715sub read_config {
716 my ($config) = @_;
717
718 my $test_case;
719 my $test_num = 0;
720
721 $test_case = __read_config $config, \$test_num;
722
500 # make sure we have all mandatory configs 723 # make sure we have all mandatory configs
501 get_ktest_configs; 724 get_ktest_configs;
502 725
@@ -603,8 +826,20 @@ sub doprint {
603} 826}
604 827
605sub run_command; 828sub run_command;
829sub start_monitor;
830sub end_monitor;
831sub wait_for_monitor;
606 832
607sub reboot { 833sub reboot {
834 my ($time) = @_;
835
836 if (defined($time)) {
837 start_monitor;
838 # flush out current monitor
839 # May contain the reboot success line
840 wait_for_monitor 1;
841 }
842
608 # try to reboot normally 843 # try to reboot normally
609 if (run_command $reboot) { 844 if (run_command $reboot) {
610 if (defined($powercycle_after_reboot)) { 845 if (defined($powercycle_after_reboot)) {
@@ -615,12 +850,17 @@ sub reboot {
615 # nope? power cycle it. 850 # nope? power cycle it.
616 run_command "$power_cycle"; 851 run_command "$power_cycle";
617 } 852 }
853
854 if (defined($time)) {
855 wait_for_monitor($time, $reboot_success_line);
856 end_monitor;
857 }
618} 858}
619 859
620sub do_not_reboot { 860sub do_not_reboot {
621 my $i = $iteration; 861 my $i = $iteration;
622 862
623 return $test_type eq "build" || 863 return $test_type eq "build" || $no_reboot ||
624 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") || 864 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
625 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build"); 865 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
626} 866}
@@ -693,16 +933,29 @@ sub end_monitor {
693} 933}
694 934
695sub wait_for_monitor { 935sub wait_for_monitor {
696 my ($time) = @_; 936 my ($time, $stop) = @_;
937 my $full_line = "";
697 my $line; 938 my $line;
939 my $booted = 0;
698 940
699 doprint "** Wait for monitor to settle down **\n"; 941 doprint "** Wait for monitor to settle down **\n";
700 942
701 # read the monitor and wait for the system to calm down 943 # read the monitor and wait for the system to calm down
702 do { 944 while (!$booted) {
703 $line = wait_for_input($monitor_fp, $time); 945 $line = wait_for_input($monitor_fp, $time);
704 print "$line" if (defined($line)); 946 last if (!defined($line));
705 } while (defined($line)); 947 print "$line";
948 $full_line .= $line;
949
950 if (defined($stop) && $full_line =~ /$stop/) {
951 doprint "wait for monitor detected $stop\n";
952 $booted = 1;
953 }
954
955 if ($line =~ /\n/) {
956 $full_line = "";
957 }
958 }
706 print "** Monitor flushed **\n"; 959 print "** Monitor flushed **\n";
707} 960}
708 961
@@ -719,10 +972,7 @@ sub fail {
719 # no need to reboot for just building. 972 # no need to reboot for just building.
720 if (!do_not_reboot) { 973 if (!do_not_reboot) {
721 doprint "REBOOTING\n"; 974 doprint "REBOOTING\n";
722 reboot; 975 reboot $sleep_time;
723 start_monitor;
724 wait_for_monitor $sleep_time;
725 end_monitor;
726 } 976 }
727 977
728 my $name = ""; 978 my $name = "";
@@ -854,9 +1104,12 @@ sub get_grub_index {
854 open(IN, "$ssh_grub |") 1104 open(IN, "$ssh_grub |")
855 or die "unable to get menu.lst"; 1105 or die "unable to get menu.lst";
856 1106
1107 my $found = 0;
1108
857 while (<IN>) { 1109 while (<IN>) {
858 if (/^\s*title\s+$grub_menu\s*$/) { 1110 if (/^\s*title\s+$grub_menu\s*$/) {
859 $grub_number++; 1111 $grub_number++;
1112 $found = 1;
860 last; 1113 last;
861 } elsif (/^\s*title\s/) { 1114 } elsif (/^\s*title\s/) {
862 $grub_number++; 1115 $grub_number++;
@@ -865,7 +1118,7 @@ sub get_grub_index {
865 close(IN); 1118 close(IN);
866 1119
867 die "Could not find '$grub_menu' in /boot/grub/menu on $machine" 1120 die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
868 if ($grub_number < 0); 1121 if (!$found);
869 doprint "$grub_number\n"; 1122 doprint "$grub_number\n";
870} 1123}
871 1124
@@ -902,7 +1155,8 @@ sub wait_for_input
902 1155
903sub reboot_to { 1156sub reboot_to {
904 if ($reboot_type eq "grub") { 1157 if ($reboot_type eq "grub") {
905 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'"; 1158 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
1159 reboot;
906 return; 1160 return;
907 } 1161 }
908 1162
@@ -1083,6 +1337,8 @@ sub do_post_install {
1083 1337
1084sub install { 1338sub install {
1085 1339
1340 return if ($no_install);
1341
1086 run_scp "$outputdir/$build_target", "$target_image" or 1342 run_scp "$outputdir/$build_target", "$target_image" or
1087 dodie "failed to copy image"; 1343 dodie "failed to copy image";
1088 1344
@@ -1140,6 +1396,11 @@ sub get_version {
1140} 1396}
1141 1397
1142sub start_monitor_and_boot { 1398sub start_monitor_and_boot {
1399 # Make sure the stable kernel has finished booting
1400 start_monitor;
1401 wait_for_monitor 5;
1402 end_monitor;
1403
1143 get_grub_index; 1404 get_grub_index;
1144 get_version; 1405 get_version;
1145 install; 1406 install;
@@ -1250,6 +1511,10 @@ sub build {
1250 1511
1251 unlink $buildlog; 1512 unlink $buildlog;
1252 1513
1514 # Failed builds should not reboot the target
1515 my $save_no_reboot = $no_reboot;
1516 $no_reboot = 1;
1517
1253 if (defined($pre_build)) { 1518 if (defined($pre_build)) {
1254 my $ret = run_command $pre_build; 1519 my $ret = run_command $pre_build;
1255 if (!$ret && defined($pre_build_die) && 1520 if (!$ret && defined($pre_build_die) &&
@@ -1272,15 +1537,15 @@ sub build {
1272 # allow for empty configs 1537 # allow for empty configs
1273 run_command "touch $output_config"; 1538 run_command "touch $output_config";
1274 1539
1275 run_command "mv $output_config $outputdir/config_temp" or 1540 if (!$noclean) {
1276 dodie "moving .config"; 1541 run_command "mv $output_config $outputdir/config_temp" or
1542 dodie "moving .config";
1277 1543
1278 if (!$noclean && !run_command "$make mrproper") { 1544 run_command "$make mrproper" or dodie "make mrproper";
1279 dodie "make mrproper";
1280 }
1281 1545
1282 run_command "mv $outputdir/config_temp $output_config" or 1546 run_command "mv $outputdir/config_temp $output_config" or
1283 dodie "moving config_temp"; 1547 dodie "moving config_temp";
1548 }
1284 1549
1285 } elsif (!$noclean) { 1550 } elsif (!$noclean) {
1286 unlink "$output_config"; 1551 unlink "$output_config";
@@ -1318,10 +1583,15 @@ sub build {
1318 1583
1319 if (!$build_ret) { 1584 if (!$build_ret) {
1320 # bisect may need this to pass 1585 # bisect may need this to pass
1321 return 0 if ($in_bisect); 1586 if ($in_bisect) {
1587 $no_reboot = $save_no_reboot;
1588 return 0;
1589 }
1322 fail "failed build" and return 0; 1590 fail "failed build" and return 0;
1323 } 1591 }
1324 1592
1593 $no_reboot = $save_no_reboot;
1594
1325 return 1; 1595 return 1;
1326} 1596}
1327 1597
@@ -1356,10 +1626,7 @@ sub success {
1356 1626
1357 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) { 1627 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
1358 doprint "Reboot and wait $sleep_time seconds\n"; 1628 doprint "Reboot and wait $sleep_time seconds\n";
1359 reboot; 1629 reboot $sleep_time;
1360 start_monitor;
1361 wait_for_monitor $sleep_time;
1362 end_monitor;
1363 } 1630 }
1364} 1631}
1365 1632
@@ -1500,10 +1767,7 @@ sub run_git_bisect {
1500 1767
1501sub bisect_reboot { 1768sub bisect_reboot {
1502 doprint "Reboot and sleep $bisect_sleep_time seconds\n"; 1769 doprint "Reboot and sleep $bisect_sleep_time seconds\n";
1503 reboot; 1770 reboot $bisect_sleep_time;
1504 start_monitor;
1505 wait_for_monitor $bisect_sleep_time;
1506 end_monitor;
1507} 1771}
1508 1772
1509# returns 1 on success, 0 on failure, -1 on skip 1773# returns 1 on success, 0 on failure, -1 on skip
@@ -2066,10 +2330,7 @@ sub config_bisect {
2066 2330
2067sub patchcheck_reboot { 2331sub patchcheck_reboot {
2068 doprint "Reboot and sleep $patchcheck_sleep_time seconds\n"; 2332 doprint "Reboot and sleep $patchcheck_sleep_time seconds\n";
2069 reboot; 2333 reboot $patchcheck_sleep_time;
2070 start_monitor;
2071 wait_for_monitor $patchcheck_sleep_time;
2072 end_monitor;
2073} 2334}
2074 2335
2075sub patchcheck { 2336sub patchcheck {
@@ -2178,12 +2439,31 @@ sub patchcheck {
2178} 2439}
2179 2440
2180my %depends; 2441my %depends;
2442my %depcount;
2181my $iflevel = 0; 2443my $iflevel = 0;
2182my @ifdeps; 2444my @ifdeps;
2183 2445
2184# prevent recursion 2446# prevent recursion
2185my %read_kconfigs; 2447my %read_kconfigs;
2186 2448
2449sub add_dep {
2450 # $config depends on $dep
2451 my ($config, $dep) = @_;
2452
2453 if (defined($depends{$config})) {
2454 $depends{$config} .= " " . $dep;
2455 } else {
2456 $depends{$config} = $dep;
2457 }
2458
2459 # record the number of configs depending on $dep
2460 if (defined $depcount{$dep}) {
2461 $depcount{$dep}++;
2462 } else {
2463 $depcount{$dep} = 1;
2464 }
2465}
2466
2187# taken from streamline_config.pl 2467# taken from streamline_config.pl
2188sub read_kconfig { 2468sub read_kconfig {
2189 my ($kconfig) = @_; 2469 my ($kconfig) = @_;
@@ -2230,30 +2510,19 @@ sub read_kconfig {
2230 $config = $2; 2510 $config = $2;
2231 2511
2232 for (my $i = 0; $i < $iflevel; $i++) { 2512 for (my $i = 0; $i < $iflevel; $i++) {
2233 if ($i) { 2513 add_dep $config, $ifdeps[$i];
2234 $depends{$config} .= " " . $ifdeps[$i];
2235 } else {
2236 $depends{$config} = $ifdeps[$i];
2237 }
2238 $state = "DEP";
2239 } 2514 }
2240 2515
2241 # collect the depends for the config 2516 # collect the depends for the config
2242 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) { 2517 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) {
2243 2518
2244 if (defined($depends{$1})) { 2519 add_dep $config, $1;
2245 $depends{$config} .= " " . $1;
2246 } else {
2247 $depends{$config} = $1;
2248 }
2249 2520
2250 # Get the configs that select this config 2521 # Get the configs that select this config
2251 } elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) { 2522 } elsif ($state eq "NEW" && /^\s*select\s+(\S+)/) {
2252 if (defined($depends{$1})) { 2523
2253 $depends{$1} .= " " . $config; 2524 # selected by depends on config
2254 } else { 2525 add_dep $1, $config;
2255 $depends{$1} = $config;
2256 }
2257 2526
2258 # Check for if statements 2527 # Check for if statements
2259 } elsif (/^if\s+(.*\S)\s*$/) { 2528 } elsif (/^if\s+(.*\S)\s*$/) {
@@ -2365,11 +2634,18 @@ sub make_new_config {
2365 close OUT; 2634 close OUT;
2366} 2635}
2367 2636
2637sub chomp_config {
2638 my ($config) = @_;
2639
2640 $config =~ s/CONFIG_//;
2641
2642 return $config;
2643}
2644
2368sub get_depends { 2645sub get_depends {
2369 my ($dep) = @_; 2646 my ($dep) = @_;
2370 2647
2371 my $kconfig = $dep; 2648 my $kconfig = chomp_config $dep;
2372 $kconfig =~ s/CONFIG_//;
2373 2649
2374 $dep = $depends{"$kconfig"}; 2650 $dep = $depends{"$kconfig"};
2375 2651
@@ -2419,8 +2695,7 @@ sub test_this_config {
2419 return undef; 2695 return undef;
2420 } 2696 }
2421 2697
2422 my $kconfig = $config; 2698 my $kconfig = chomp_config $config;
2423 $kconfig =~ s/CONFIG_//;
2424 2699
2425 # Test dependencies first 2700 # Test dependencies first
2426 if (defined($depends{"$kconfig"})) { 2701 if (defined($depends{"$kconfig"})) {
@@ -2510,6 +2785,14 @@ sub make_min_config {
2510 2785
2511 my @config_keys = keys %min_configs; 2786 my @config_keys = keys %min_configs;
2512 2787
2788 # All configs need a depcount
2789 foreach my $config (@config_keys) {
2790 my $kconfig = chomp_config $config;
2791 if (!defined $depcount{$kconfig}) {
2792 $depcount{$kconfig} = 0;
2793 }
2794 }
2795
2513 # Remove anything that was set by the make allnoconfig 2796 # Remove anything that was set by the make allnoconfig
2514 # we shouldn't need them as they get set for us anyway. 2797 # we shouldn't need them as they get set for us anyway.
2515 foreach my $config (@config_keys) { 2798 foreach my $config (@config_keys) {
@@ -2548,8 +2831,13 @@ sub make_min_config {
2548 # Now disable each config one by one and do a make oldconfig 2831 # Now disable each config one by one and do a make oldconfig
2549 # till we find a config that changes our list. 2832 # till we find a config that changes our list.
2550 2833
2551 # Put configs that did not modify the config at the end.
2552 my @test_configs = keys %min_configs; 2834 my @test_configs = keys %min_configs;
2835
2836 # Sort keys by who is most dependent on
2837 @test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} }
2838 @test_configs ;
2839
2840 # Put configs that did not modify the config at the end.
2553 my $reset = 1; 2841 my $reset = 1;
2554 for (my $i = 0; $i < $#test_configs; $i++) { 2842 for (my $i = 0; $i < $#test_configs; $i++) {
2555 if (!defined($nochange_config{$test_configs[0]})) { 2843 if (!defined($nochange_config{$test_configs[0]})) {
@@ -2659,10 +2947,7 @@ sub make_min_config {
2659 } 2947 }
2660 2948
2661 doprint "Reboot and wait $sleep_time seconds\n"; 2949 doprint "Reboot and wait $sleep_time seconds\n";
2662 reboot; 2950 reboot $sleep_time;
2663 start_monitor;
2664 wait_for_monitor $sleep_time;
2665 end_monitor;
2666 } 2951 }
2667 2952
2668 success $i; 2953 success $i;
@@ -2783,6 +3068,9 @@ sub set_test_option {
2783# First we need to do is the builds 3068# First we need to do is the builds
2784for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { 3069for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2785 3070
3071 # Do not reboot on failing test options
3072 $no_reboot = 1;
3073
2786 $iteration = $i; 3074 $iteration = $i;
2787 3075
2788 my $makecmd = set_test_option("MAKE_CMD", $i); 3076 my $makecmd = set_test_option("MAKE_CMD", $i);
@@ -2811,6 +3099,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2811 $reboot_type = set_test_option("REBOOT_TYPE", $i); 3099 $reboot_type = set_test_option("REBOOT_TYPE", $i);
2812 $grub_menu = set_test_option("GRUB_MENU", $i); 3100 $grub_menu = set_test_option("GRUB_MENU", $i);
2813 $post_install = set_test_option("POST_INSTALL", $i); 3101 $post_install = set_test_option("POST_INSTALL", $i);
3102 $no_install = set_test_option("NO_INSTALL", $i);
2814 $reboot_script = set_test_option("REBOOT_SCRIPT", $i); 3103 $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
2815 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i); 3104 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
2816 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i); 3105 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
@@ -2832,6 +3121,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2832 $console = set_test_option("CONSOLE", $i); 3121 $console = set_test_option("CONSOLE", $i);
2833 $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i); 3122 $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i);
2834 $success_line = set_test_option("SUCCESS_LINE", $i); 3123 $success_line = set_test_option("SUCCESS_LINE", $i);
3124 $reboot_success_line = set_test_option("REBOOT_SUCCESS_LINE", $i);
2835 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i); 3125 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
2836 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i); 3126 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
2837 $stop_test_after = set_test_option("STOP_TEST_AFTER", $i); 3127 $stop_test_after = set_test_option("STOP_TEST_AFTER", $i);
@@ -2850,9 +3140,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2850 3140
2851 chdir $builddir || die "can't change directory to $builddir"; 3141 chdir $builddir || die "can't change directory to $builddir";
2852 3142
2853 if (!-d $tmpdir) { 3143 foreach my $dir ($tmpdir, $outputdir) {
2854 mkpath($tmpdir) or 3144 if (!-d $dir) {
2855 die "can't create $tmpdir"; 3145 mkpath($dir) or
3146 die "can't create $dir";
3147 }
2856 } 3148 }
2857 3149
2858 $ENV{"SSH_USER"} = $ssh_user; 3150 $ENV{"SSH_USER"} = $ssh_user;
@@ -2889,8 +3181,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2889 $run_type = "ERROR"; 3181 $run_type = "ERROR";
2890 } 3182 }
2891 3183
3184 my $installme = "";
3185 $installme = " no_install" if ($no_install);
3186
2892 doprint "\n\n"; 3187 doprint "\n\n";
2893 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n"; 3188 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type$installme\n\n";
2894 3189
2895 unlink $dmesg; 3190 unlink $dmesg;
2896 unlink $buildlog; 3191 unlink $buildlog;
@@ -2911,6 +3206,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2911 die "failed to checkout $checkout"; 3206 die "failed to checkout $checkout";
2912 } 3207 }
2913 3208
3209 $no_reboot = 0;
3210
3211
2914 if ($test_type eq "bisect") { 3212 if ($test_type eq "bisect") {
2915 bisect $i; 3213 bisect $i;
2916 next; 3214 next;
@@ -2929,6 +3227,13 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2929 build $build_type or next; 3227 build $build_type or next;
2930 } 3228 }
2931 3229
3230 if ($test_type eq "install") {
3231 get_version;
3232 install;
3233 success $i;
3234 next;
3235 }
3236
2932 if ($test_type ne "build") { 3237 if ($test_type ne "build") {
2933 my $failed = 0; 3238 my $failed = 0;
2934 start_monitor_and_boot or $failed = 1; 3239 start_monitor_and_boot or $failed = 1;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index b8bcd14b5a4d..dbedfa196727 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -72,6 +72,128 @@
72# the same option name under the same test or as default 72# the same option name under the same test or as default
73# ktest will fail to execute, and no tests will run. 73# ktest will fail to execute, and no tests will run.
74# 74#
75# DEFAULTS OVERRIDE
76#
77# Options defined in the DEFAULTS section can not be duplicated
78# even if they are defined in two different DEFAULT sections.
79# This is done to catch mistakes where an option is added but
80# the previous option was forgotten about and not commented.
81#
82# The OVERRIDE keyword can be added to a section to allow this
83# section to override other DEFAULT sections values that have
84# been defined previously. It will only override options that
85# have been defined before its use. Options defined later
86# in a non override section will still error. The same option
87# can not be defined in the same section even if that section
88# is marked OVERRIDE.
89#
90#
91#
92# Both TEST_START and DEFAULTS sections can also have the IF keyword
93# The value after the IF must evaluate into a 0 or non 0 positive
94# integer, and can use the config variables (explained below).
95#
96# DEFAULTS IF ${IS_X86_32}
97#
98# The above will process the DEFAULTS section if the config
99# variable IS_X86_32 evaluates to a non zero positive integer
100# otherwise if it evaluates to zero, it will act the same
101# as if the SKIP keyword was used.
102#
103# The ELSE keyword can be used directly after a section with
104# a IF statement.
105#
106# TEST_START IF ${RUN_NET_TESTS}
107# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
108#
109# ELSE
110#
111# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-normal
112#
113#
114# The ELSE keyword can also contain an IF statement to allow multiple
115# if then else sections. But all the sections must be either
116# DEFAULT or TEST_START, they can not be a mixture.
117#
118# TEST_START IF ${RUN_NET_TESTS}
119# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
120#
121# ELSE IF ${RUN_DISK_TESTS}
122# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-tests
123#
124# ELSE IF ${RUN_CPU_TESTS}
125# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-cpu
126#
127# ELSE
128# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
129#
130# The if statement may also have comparisons that will and for
131# == and !=, strings may be used for both sides.
132#
133# BOX_TYPE := x86_32
134#
135# DEFAULTS IF ${BOX_TYPE} == x86_32
136# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-32
137# ELSE
138# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-64
139#
140# The DEFINED keyword can be used by the IF statements too.
141# It returns true if the given config variable or option has been defined
142# or false otherwise.
143#
144#
145# DEFAULTS IF DEFINED USE_CC
146# CC := ${USE_CC}
147# ELSE
148# CC := gcc
149#
150#
151# As well as NOT DEFINED.
152#
153# DEFAULTS IF NOT DEFINED MAKE_CMD
154# MAKE_CMD := make ARCH=x86
155#
156#
157# And/or ops (&&,||) may also be used to make complex conditionals.
158#
159# TEST_START IF (DEFINED ALL_TESTS || ${MYTEST} == boottest) && ${MACHINE} == gandalf
160#
161# Notice the use of paranthesis. Without any paranthesis the above would be
162# processed the same as:
163#
164# TEST_START IF DEFINED ALL_TESTS || (${MYTEST} == boottest && ${MACHINE} == gandalf)
165#
166#
167#
168# INCLUDE file
169#
170# The INCLUDE keyword may be used in DEFAULT sections. This will
171# read another config file and process that file as well. The included
172# file can include other files, add new test cases or default
173# statements. Config variables will be passed to these files and changes
174# to config variables will be seen by top level config files. Including
175# a file is processed just like the contents of the file was cut and pasted
176# into the top level file, except, that include files that end with
177# TEST_START sections will have that section ended at the end of
178# the include file. That is, an included file is included followed
179# by another DEFAULT keyword.
180#
181# Unlike other files referenced in this config, the file path does not need
182# to be absolute. If the file does not start with '/', then the directory
183# that the current config file was located in is used. If no config by the
184# given name is found there, then the current directory is searched.
185#
186# INCLUDE myfile
187# DEFAULT
188#
189# is the same as:
190#
191# INCLUDE myfile
192#
193# Note, if the include file does not contain a full path, the file is
194# searched first by the location of the original include file, and then
195# by the location that ktest.pl was executed in.
196#
75 197
76#### Config variables #### 198#### Config variables ####
77# 199#
@@ -253,9 +375,10 @@
253 375
254# The default test type (default test) 376# The default test type (default test)
255# The test types may be: 377# The test types may be:
256# build - only build the kernel, do nothing else 378# build - only build the kernel, do nothing else
257# boot - build and boot the kernel 379# install - build and install, but do nothing else (does not reboot)
258# test - build, boot and if TEST is set, run the test script 380# boot - build, install, and boot the kernel
381# test - build, boot and if TEST is set, run the test script
259# (If TEST is not set, it defaults back to boot) 382# (If TEST is not set, it defaults back to boot)
260# bisect - Perform a bisect on the kernel (see BISECT_TYPE below) 383# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
261# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below) 384# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
@@ -293,6 +416,13 @@
293# or on some systems: 416# or on some systems:
294#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION 417#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
295 418
419# If for some reason you just want to boot the kernel and you do not
420# want the test to install anything new. For example, you may just want
421# to boot test the same kernel over and over and do not want to go through
422# the hassle of installing anything, you can set this option to 1
423# (default 0)
424#NO_INSTALL = 1
425
296# If there is a script that you require to run before the build is done 426# If there is a script that you require to run before the build is done
297# you can specify it with PRE_BUILD. 427# you can specify it with PRE_BUILD.
298# 428#
@@ -415,6 +545,14 @@
415# (default "login:") 545# (default "login:")
416#SUCCESS_LINE = login: 546#SUCCESS_LINE = login:
417 547
548# To speed up between reboots, defining a line that the
549# default kernel produces that represents that the default
550# kernel has successfully booted and can be used to pass
551# a new test kernel to it. Otherwise ktest.pl will wait till
552# SLEEP_TIME to continue.
553# (default undefined)
554#REBOOT_SUCCESS_LINE = login:
555
418# In case the console constantly fills the screen, having 556# In case the console constantly fills the screen, having
419# a specified time to stop the test after success is recommended. 557# a specified time to stop the test after success is recommended.
420# (in seconds) 558# (in seconds)
@@ -480,6 +618,8 @@
480# another test. If a reboot to the reliable kernel happens, 618# another test. If a reboot to the reliable kernel happens,
481# we wait SLEEP_TIME for the console to stop producing output 619# we wait SLEEP_TIME for the console to stop producing output
482# before starting the next test. 620# before starting the next test.
621#
622# You can speed up reboot times even more by setting REBOOT_SUCCESS_LINE.
483# (default 60) 623# (default 60)
484#SLEEP_TIME = 60 624#SLEEP_TIME = 60
485 625