diff options
741 files changed, 10181 insertions, 7255 deletions
diff --git a/.gitignore b/.gitignore index 8faa6c02b39e..5d56a3fd0de6 100644 --- a/.gitignore +++ b/.gitignore | |||
@@ -28,6 +28,7 @@ modules.builtin | |||
28 | *.gz | 28 | *.gz |
29 | *.bz2 | 29 | *.bz2 |
30 | *.lzma | 30 | *.lzma |
31 | *.xz | ||
31 | *.lzo | 32 | *.lzo |
32 | *.patch | 33 | *.patch |
33 | *.gcno | 34 | *.gcno |
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 2861055afd7a..c27915893974 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl | |||
@@ -73,8 +73,8 @@ | |||
73 | services. | 73 | services. |
74 | </para> | 74 | </para> |
75 | <para> | 75 | <para> |
76 | The core of every DRM driver is struct drm_device. Drivers | 76 | The core of every DRM driver is struct drm_driver. Drivers |
77 | will typically statically initialize a drm_device structure, | 77 | will typically statically initialize a drm_driver structure, |
78 | then pass it to drm_init() at load time. | 78 | then pass it to drm_init() at load time. |
79 | </para> | 79 | </para> |
80 | 80 | ||
@@ -84,7 +84,7 @@ | |||
84 | <title>Driver initialization</title> | 84 | <title>Driver initialization</title> |
85 | <para> | 85 | <para> |
86 | Before calling the DRM initialization routines, the driver must | 86 | Before calling the DRM initialization routines, the driver must |
87 | first create and fill out a struct drm_device structure. | 87 | first create and fill out a struct drm_driver structure. |
88 | </para> | 88 | </para> |
89 | <programlisting> | 89 | <programlisting> |
90 | static struct drm_driver driver = { | 90 | static struct drm_driver driver = { |
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl index 5e87ad58c0b5..f51f28531b8d 100644 --- a/Documentation/DocBook/filesystems.tmpl +++ b/Documentation/DocBook/filesystems.tmpl | |||
@@ -82,6 +82,11 @@ | |||
82 | </sect1> | 82 | </sect1> |
83 | </chapter> | 83 | </chapter> |
84 | 84 | ||
85 | <chapter id="fs_events"> | ||
86 | <title>Events based on file descriptors</title> | ||
87 | !Efs/eventfd.c | ||
88 | </chapter> | ||
89 | |||
85 | <chapter id="sysfs"> | 90 | <chapter id="sysfs"> |
86 | <title>The Filesystem for Exporting Kernel Objects</title> | 91 | <title>The Filesystem for Exporting Kernel Objects</title> |
87 | !Efs/sysfs/file.c | 92 | !Efs/sysfs/file.c |
diff --git a/Documentation/powerpc/dts-bindings/fsl/sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt index b46bcf46c3d8..b46bcf46c3d8 100644 --- a/Documentation/powerpc/dts-bindings/fsl/sata.txt +++ b/Documentation/devicetree/bindings/ata/fsl-sata.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/eeprom.txt b/Documentation/devicetree/bindings/eeprom.txt index 4342c10de1bf..4342c10de1bf 100644 --- a/Documentation/powerpc/dts-bindings/eeprom.txt +++ b/Documentation/devicetree/bindings/eeprom.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt index b0019eb5330e..b0019eb5330e 100644 --- a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt +++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt index edaa84d288a1..edaa84d288a1 100644 --- a/Documentation/powerpc/dts-bindings/gpio/gpio.txt +++ b/Documentation/devicetree/bindings/gpio/gpio.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt index 064db928c3c1..064db928c3c1 100644 --- a/Documentation/powerpc/dts-bindings/gpio/led.txt +++ b/Documentation/devicetree/bindings/gpio/led.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/i2c.txt b/Documentation/devicetree/bindings/i2c/fsl-i2c.txt index 1eacd6b20ed5..1eacd6b20ed5 100644 --- a/Documentation/powerpc/dts-bindings/fsl/i2c.txt +++ b/Documentation/devicetree/bindings/i2c/fsl-i2c.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/devicetree/bindings/marvell.txt index f1533d91953a..f1533d91953a 100644 --- a/Documentation/powerpc/dts-bindings/marvell.txt +++ b/Documentation/devicetree/bindings/marvell.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt index 64bcb8be973c..64bcb8be973c 100644 --- a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt index c39ac2891951..c39ac2891951 100644 --- a/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt +++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt index a48b2cadc7f0..a48b2cadc7f0 100644 --- a/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt +++ b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt index 80152cb567d9..80152cb567d9 100644 --- a/Documentation/powerpc/dts-bindings/mtd-physmap.txt +++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/can.txt b/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt index 2fa4fcd38fd6..2fa4fcd38fd6 100644 --- a/Documentation/powerpc/dts-bindings/fsl/can.txt +++ b/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt index d6d209ded937..d6d209ded937 100644 --- a/Documentation/powerpc/dts-bindings/can/sja1000.txt +++ b/Documentation/devicetree/bindings/net/can/sja1000.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/tsec.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt index edb7ae19e868..edb7ae19e868 100644 --- a/Documentation/powerpc/dts-bindings/fsl/tsec.txt +++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/gpio/mdio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt index bc9549529014..bc9549529014 100644 --- a/Documentation/powerpc/dts-bindings/gpio/mdio.txt +++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index bb8c742eb8c5..bb8c742eb8c5 100644 --- a/Documentation/powerpc/dts-bindings/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt index 35a465362408..35a465362408 100644 --- a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt +++ b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt index ee459806d35e..ee459806d35e 100644 --- a/Documentation/powerpc/dts-bindings/4xx/cpm.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt index 2161334a7ca5..2161334a7ca5 100644 --- a/Documentation/powerpc/dts-bindings/4xx/emac.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt index 869f0b5f16e8..869f0b5f16e8 100644 --- a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt index 515ebcf1b97d..515ebcf1b97d 100644 --- a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt index d7217260589c..d7217260589c 100644 --- a/Documentation/powerpc/dts-bindings/4xx/reboot.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt index 39e941515a36..39e941515a36 100644 --- a/Documentation/powerpc/dts-bindings/fsl/board.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/board.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt index 160c752484b4..160c752484b4 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt index 4c7d45eaf025..4c7d45eaf025 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt index 87bc6048667e..87bc6048667e 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt index 8e3ee1681618..8e3ee1681618 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt index 74bfda4bb824..74bfda4bb824 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt index 349f79fd7076..349f79fd7076 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt index 0e4269446580..0e4269446580 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt index 4f8930263dd9..4f8930263dd9 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt index 249db3a15d15..249db3a15d15 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt index 60984260207b..60984260207b 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt index c5b43061db3a..c5b43061db3a 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt index e47734bee3f0..e47734bee3f0 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt index 9ccd5f30405b..9ccd5f30405b 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt index 2ea76d9d137c..2ea76d9d137c 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/diu.txt b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt index b66cb6d31d69..b66cb6d31d69 100644 --- a/Documentation/powerpc/dts-bindings/fsl/diu.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt index 2a4b4bce6110..2a4b4bce6110 100644 --- a/Documentation/powerpc/dts-bindings/fsl/dma.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/ecm.txt b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt index f514f29c67d6..f514f29c67d6 100644 --- a/Documentation/powerpc/dts-bindings/ecm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/gtm.txt b/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt index 9a33efded4bc..9a33efded4bc 100644 --- a/Documentation/powerpc/dts-bindings/fsl/gtm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/guts.txt b/Documentation/devicetree/bindings/powerpc/fsl/guts.txt index 9e7a2417dac5..9e7a2417dac5 100644 --- a/Documentation/powerpc/dts-bindings/fsl/guts.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/guts.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/lbc.txt b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt index 3300fec501c5..3300fec501c5 100644 --- a/Documentation/powerpc/dts-bindings/fsl/lbc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/mcm.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt index 4ceda9b3b413..4ceda9b3b413 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mcm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt index 0f766333b6eb..0f766333b6eb 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt index 8832e8798912..8832e8798912 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt index 4ccb2cd5df94..4ccb2cd5df94 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpic.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt index 71e39cf3215b..71e39cf3215b 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpic.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt index bcc30bac6831..bcc30bac6831 100644 --- a/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/pmc.txt b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt index 07256b7ffcaa..07256b7ffcaa 100644 --- a/Documentation/powerpc/dts-bindings/fsl/pmc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/sec.txt b/Documentation/devicetree/bindings/powerpc/fsl/sec.txt index 2b6f2d45c45a..2b6f2d45c45a 100644 --- a/Documentation/powerpc/dts-bindings/fsl/sec.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/sec.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/ssi.txt b/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt index 5ff76c9c57d2..5ff76c9c57d2 100644 --- a/Documentation/powerpc/dts-bindings/fsl/ssi.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt b/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt index b558585b1aaf..b558585b1aaf 100644 --- a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt +++ b/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/nintendo/wii.txt b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt index a7e155a023b8..a7e155a023b8 100644 --- a/Documentation/powerpc/dts-bindings/nintendo/wii.txt +++ b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt index 777abd7399d5..777abd7399d5 100644 --- a/Documentation/powerpc/dts-bindings/fsl/spi.txt +++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt index e782add2e457..e782add2e457 100644 --- a/Documentation/powerpc/dts-bindings/spi-bus.txt +++ b/Documentation/devicetree/bindings/spi/spi-bus.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/fsl/usb.txt b/Documentation/devicetree/bindings/usb/fsl-usb.txt index bd5723f0b67e..bd5723f0b67e 100644 --- a/Documentation/powerpc/dts-bindings/fsl/usb.txt +++ b/Documentation/devicetree/bindings/usb/fsl-usb.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt index fa18612f757b..fa18612f757b 100644 --- a/Documentation/powerpc/dts-bindings/usb-ehci.txt +++ b/Documentation/devicetree/bindings/usb/usb-ehci.txt | |||
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt index 299d0923537b..299d0923537b 100644 --- a/Documentation/powerpc/dts-bindings/xilinx.txt +++ b/Documentation/devicetree/bindings/xilinx.txt | |||
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt index 7400d7555dc3..28b1c9d3d351 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/devicetree/booting-without-of.txt | |||
@@ -13,7 +13,6 @@ Table of Contents | |||
13 | 13 | ||
14 | I - Introduction | 14 | I - Introduction |
15 | 1) Entry point for arch/powerpc | 15 | 1) Entry point for arch/powerpc |
16 | 2) Board support | ||
17 | 16 | ||
18 | II - The DT block format | 17 | II - The DT block format |
19 | 1) Header | 18 | 1) Header |
@@ -41,13 +40,6 @@ Table of Contents | |||
41 | VI - System-on-a-chip devices and nodes | 40 | VI - System-on-a-chip devices and nodes |
42 | 1) Defining child nodes of an SOC | 41 | 1) Defining child nodes of an SOC |
43 | 2) Representing devices without a current OF specification | 42 | 2) Representing devices without a current OF specification |
44 | a) PHY nodes | ||
45 | b) Interrupt controllers | ||
46 | c) 4xx/Axon EMAC ethernet nodes | ||
47 | d) Xilinx IP cores | ||
48 | e) USB EHCI controllers | ||
49 | f) MDIO on GPIOs | ||
50 | g) SPI busses | ||
51 | 43 | ||
52 | VII - Specifying interrupt information for devices | 44 | VII - Specifying interrupt information for devices |
53 | 1) interrupts property | 45 | 1) interrupts property |
@@ -123,7 +115,7 @@ Revision Information | |||
123 | I - Introduction | 115 | I - Introduction |
124 | ================ | 116 | ================ |
125 | 117 | ||
126 | During the recent development of the Linux/ppc64 kernel, and more | 118 | During the development of the Linux/ppc64 kernel, and more |
127 | specifically, the addition of new platform types outside of the old | 119 | specifically, the addition of new platform types outside of the old |
128 | IBM pSeries/iSeries pair, it was decided to enforce some strict rules | 120 | IBM pSeries/iSeries pair, it was decided to enforce some strict rules |
129 | regarding the kernel entry and bootloader <-> kernel interfaces, in | 121 | regarding the kernel entry and bootloader <-> kernel interfaces, in |
@@ -146,7 +138,7 @@ section III, but, for example, the kernel does not require you to | |||
146 | create a node for every PCI device in the system. It is a requirement | 138 | create a node for every PCI device in the system. It is a requirement |
147 | to have a node for PCI host bridges in order to provide interrupt | 139 | to have a node for PCI host bridges in order to provide interrupt |
148 | routing informations and memory/IO ranges, among others. It is also | 140 | routing informations and memory/IO ranges, among others. It is also |
149 | recommended to define nodes for on chip devices and other busses that | 141 | recommended to define nodes for on chip devices and other buses that |
150 | don't specifically fit in an existing OF specification. This creates a | 142 | don't specifically fit in an existing OF specification. This creates a |
151 | great flexibility in the way the kernel can then probe those and match | 143 | great flexibility in the way the kernel can then probe those and match |
152 | drivers to device, without having to hard code all sorts of tables. It | 144 | drivers to device, without having to hard code all sorts of tables. It |
@@ -158,7 +150,7 @@ it with special cases. | |||
158 | 1) Entry point for arch/powerpc | 150 | 1) Entry point for arch/powerpc |
159 | ------------------------------- | 151 | ------------------------------- |
160 | 152 | ||
161 | There is one and one single entry point to the kernel, at the start | 153 | There is one single entry point to the kernel, at the start |
162 | of the kernel image. That entry point supports two calling | 154 | of the kernel image. That entry point supports two calling |
163 | conventions: | 155 | conventions: |
164 | 156 | ||
@@ -210,12 +202,6 @@ it with special cases. | |||
210 | with all CPUs. The way to do that with method b) will be | 202 | with all CPUs. The way to do that with method b) will be |
211 | described in a later revision of this document. | 203 | described in a later revision of this document. |
212 | 204 | ||
213 | |||
214 | 2) Board support | ||
215 | ---------------- | ||
216 | |||
217 | 64-bit kernels: | ||
218 | |||
219 | Board supports (platforms) are not exclusive config options. An | 205 | Board supports (platforms) are not exclusive config options. An |
220 | arbitrary set of board supports can be built in a single kernel | 206 | arbitrary set of board supports can be built in a single kernel |
221 | image. The kernel will "know" what set of functions to use for a | 207 | image. The kernel will "know" what set of functions to use for a |
@@ -234,48 +220,11 @@ it with special cases. | |||
234 | containing the various callbacks that the generic code will | 220 | containing the various callbacks that the generic code will |
235 | use to get to your platform specific code | 221 | use to get to your platform specific code |
236 | 222 | ||
237 | c) Add a reference to your "ppc_md" structure in the | 223 | A kernel image may support multiple platforms, but only if the |
238 | "machines" table in arch/powerpc/kernel/setup_64.c if you are | ||
239 | a 64-bit platform. | ||
240 | |||
241 | d) request and get assigned a platform number (see PLATFORM_* | ||
242 | constants in arch/powerpc/include/asm/processor.h | ||
243 | |||
244 | 32-bit embedded kernels: | ||
245 | |||
246 | Currently, board support is essentially an exclusive config option. | ||
247 | The kernel is configured for a single platform. Part of the reason | ||
248 | for this is to keep kernels on embedded systems small and efficient; | ||
249 | part of this is due to the fact the code is already that way. In the | ||
250 | future, a kernel may support multiple platforms, but only if the | ||
251 | platforms feature the same core architecture. A single kernel build | 224 | platforms feature the same core architecture. A single kernel build |
252 | cannot support both configurations with Book E and configurations | 225 | cannot support both configurations with Book E and configurations |
253 | with classic Powerpc architectures. | 226 | with classic Powerpc architectures. |
254 | 227 | ||
255 | 32-bit embedded platforms that are moved into arch/powerpc using a | ||
256 | flattened device tree should adopt the merged tree practice of | ||
257 | setting ppc_md up dynamically, even though the kernel is currently | ||
258 | built with support for only a single platform at a time. This allows | ||
259 | unification of the setup code, and will make it easier to go to a | ||
260 | multiple-platform-support model in the future. | ||
261 | |||
262 | NOTE: I believe the above will be true once Ben's done with the merge | ||
263 | of the boot sequences.... someone speak up if this is wrong! | ||
264 | |||
265 | To add a 32-bit embedded platform support, follow the instructions | ||
266 | for 64-bit platforms above, with the exception that the Kconfig | ||
267 | option should be set up such that the kernel builds exclusively for | ||
268 | the platform selected. The processor type for the platform should | ||
269 | enable another config option to select the specific board | ||
270 | supported. | ||
271 | |||
272 | NOTE: If Ben doesn't merge the setup files, may need to change this to | ||
273 | point to setup_32.c | ||
274 | |||
275 | |||
276 | I will describe later the boot process and various callbacks that | ||
277 | your platform should implement. | ||
278 | |||
279 | 228 | ||
280 | II - The DT block format | 229 | II - The DT block format |
281 | ======================== | 230 | ======================== |
@@ -300,8 +249,8 @@ the block to RAM before passing it to the kernel. | |||
300 | 1) Header | 249 | 1) Header |
301 | --------- | 250 | --------- |
302 | 251 | ||
303 | The kernel is entered with r3 pointing to an area of memory that is | 252 | The kernel is passed the physical address pointing to an area of memory |
304 | roughly described in arch/powerpc/include/asm/prom.h by the structure | 253 | that is roughly described in include/linux/of_fdt.h by the structure |
305 | boot_param_header: | 254 | boot_param_header: |
306 | 255 | ||
307 | struct boot_param_header { | 256 | struct boot_param_header { |
@@ -339,7 +288,7 @@ struct boot_param_header { | |||
339 | All values in this header are in big endian format, the various | 288 | All values in this header are in big endian format, the various |
340 | fields in this header are defined more precisely below. All | 289 | fields in this header are defined more precisely below. All |
341 | "offset" values are in bytes from the start of the header; that is | 290 | "offset" values are in bytes from the start of the header; that is |
342 | from the value of r3. | 291 | from the physical base address of the device tree block. |
343 | 292 | ||
344 | - magic | 293 | - magic |
345 | 294 | ||
@@ -437,7 +386,7 @@ struct boot_param_header { | |||
437 | 386 | ||
438 | 387 | ||
439 | ------------------------------ | 388 | ------------------------------ |
440 | r3 -> | struct boot_param_header | | 389 | base -> | struct boot_param_header | |
441 | ------------------------------ | 390 | ------------------------------ |
442 | | (alignment gap) (*) | | 391 | | (alignment gap) (*) | |
443 | ------------------------------ | 392 | ------------------------------ |
@@ -457,7 +406,7 @@ struct boot_param_header { | |||
457 | -----> ------------------------------ | 406 | -----> ------------------------------ |
458 | | | 407 | | |
459 | | | 408 | | |
460 | --- (r3 + totalsize) | 409 | --- (base + totalsize) |
461 | 410 | ||
462 | (*) The alignment gaps are not necessarily present; their presence | 411 | (*) The alignment gaps are not necessarily present; their presence |
463 | and size are dependent on the various alignment requirements of | 412 | and size are dependent on the various alignment requirements of |
@@ -500,7 +449,7 @@ the device-tree structure. It is typically used to represent "path" in | |||
500 | the device-tree. More details about the actual format of these will be | 449 | the device-tree. More details about the actual format of these will be |
501 | below. | 450 | below. |
502 | 451 | ||
503 | The kernel powerpc generic code does not make any formal use of the | 452 | The kernel generic code does not make any formal use of the |
504 | unit address (though some board support code may do) so the only real | 453 | unit address (though some board support code may do) so the only real |
505 | requirement here for the unit address is to ensure uniqueness of | 454 | requirement here for the unit address is to ensure uniqueness of |
506 | the node unit name at a given level of the tree. Nodes with no notion | 455 | the node unit name at a given level of the tree. Nodes with no notion |
@@ -518,20 +467,21 @@ path to the root node is "/". | |||
518 | 467 | ||
519 | Every node which actually represents an actual device (that is, a node | 468 | Every node which actually represents an actual device (that is, a node |
520 | which isn't only a virtual "container" for more nodes, like "/cpus" | 469 | which isn't only a virtual "container" for more nodes, like "/cpus" |
521 | is) is also required to have a "device_type" property indicating the | 470 | is) is also required to have a "compatible" property indicating the |
522 | type of node . | 471 | specific hardware and an optional list of devices it is fully |
472 | backwards compatible with. | ||
523 | 473 | ||
524 | Finally, every node that can be referenced from a property in another | 474 | Finally, every node that can be referenced from a property in another |
525 | node is required to have a "linux,phandle" property. Real open | 475 | node is required to have either a "phandle" or a "linux,phandle" |
526 | firmware implementations provide a unique "phandle" value for every | 476 | property. Real Open Firmware implementations provide a unique |
527 | node that the "prom_init()" trampoline code turns into | 477 | "phandle" value for every node that the "prom_init()" trampoline code |
528 | "linux,phandle" properties. However, this is made optional if the | 478 | turns into "linux,phandle" properties. However, this is made optional |
529 | flattened device tree is used directly. An example of a node | 479 | if the flattened device tree is used directly. An example of a node |
530 | referencing another node via "phandle" is when laying out the | 480 | referencing another node via "phandle" is when laying out the |
531 | interrupt tree which will be described in a further version of this | 481 | interrupt tree which will be described in a further version of this |
532 | document. | 482 | document. |
533 | 483 | ||
534 | This "linux, phandle" property is a 32-bit value that uniquely | 484 | The "phandle" property is a 32-bit value that uniquely |
535 | identifies a node. You are free to use whatever values or system of | 485 | identifies a node. You are free to use whatever values or system of |
536 | values, internal pointers, or whatever to generate these, the only | 486 | values, internal pointers, or whatever to generate these, the only |
537 | requirement is that every node for which you provide that property has | 487 | requirement is that every node for which you provide that property has |
@@ -694,7 +644,7 @@ made of 3 cells, the bottom two containing the actual address itself | |||
694 | while the top cell contains address space indication, flags, and pci | 644 | while the top cell contains address space indication, flags, and pci |
695 | bus & device numbers. | 645 | bus & device numbers. |
696 | 646 | ||
697 | For busses that support dynamic allocation, it's the accepted practice | 647 | For buses that support dynamic allocation, it's the accepted practice |
698 | to then not provide the address in "reg" (keep it 0) though while | 648 | to then not provide the address in "reg" (keep it 0) though while |
699 | providing a flag indicating the address is dynamically allocated, and | 649 | providing a flag indicating the address is dynamically allocated, and |
700 | then, to provide a separate "assigned-addresses" property that | 650 | then, to provide a separate "assigned-addresses" property that |
@@ -711,7 +661,7 @@ prom_parse.c file of the recent kernels for your bus type. | |||
711 | The "reg" property only defines addresses and sizes (if #size-cells is | 661 | The "reg" property only defines addresses and sizes (if #size-cells is |
712 | non-0) within a given bus. In order to translate addresses upward | 662 | non-0) within a given bus. In order to translate addresses upward |
713 | (that is into parent bus addresses, and possibly into CPU physical | 663 | (that is into parent bus addresses, and possibly into CPU physical |
714 | addresses), all busses must contain a "ranges" property. If the | 664 | addresses), all buses must contain a "ranges" property. If the |
715 | "ranges" property is missing at a given level, it's assumed that | 665 | "ranges" property is missing at a given level, it's assumed that |
716 | translation isn't possible, i.e., the registers are not visible on the | 666 | translation isn't possible, i.e., the registers are not visible on the |
717 | parent bus. The format of the "ranges" property for a bus is a list | 667 | parent bus. The format of the "ranges" property for a bus is a list |
@@ -727,9 +677,9 @@ example, for a PCI host controller, that would be a CPU address. For a | |||
727 | PCI<->ISA bridge, that would be a PCI address. It defines the base | 677 | PCI<->ISA bridge, that would be a PCI address. It defines the base |
728 | address in the parent bus where the beginning of that range is mapped. | 678 | address in the parent bus where the beginning of that range is mapped. |
729 | 679 | ||
730 | For a new 64-bit powerpc board, I recommend either the 2/2 format or | 680 | For new 64-bit board support, I recommend either the 2/2 format or |
731 | Apple's 2/1 format which is slightly more compact since sizes usually | 681 | Apple's 2/1 format which is slightly more compact since sizes usually |
732 | fit in a single 32-bit word. New 32-bit powerpc boards should use a | 682 | fit in a single 32-bit word. New 32-bit board support should use a |
733 | 1/1 format, unless the processor supports physical addresses greater | 683 | 1/1 format, unless the processor supports physical addresses greater |
734 | than 32-bits, in which case a 2/1 format is recommended. | 684 | than 32-bits, in which case a 2/1 format is recommended. |
735 | 685 | ||
@@ -754,7 +704,7 @@ of their actual names. | |||
754 | While earlier users of Open Firmware like OldWorld macintoshes tended | 704 | While earlier users of Open Firmware like OldWorld macintoshes tended |
755 | to use the actual device name for the "name" property, it's nowadays | 705 | to use the actual device name for the "name" property, it's nowadays |
756 | considered a good practice to use a name that is closer to the device | 706 | considered a good practice to use a name that is closer to the device |
757 | class (often equal to device_type). For example, nowadays, ethernet | 707 | class (often equal to device_type). For example, nowadays, Ethernet |
758 | controllers are named "ethernet", an additional "model" property | 708 | controllers are named "ethernet", an additional "model" property |
759 | defining precisely the chip type/model, and "compatible" property | 709 | defining precisely the chip type/model, and "compatible" property |
760 | defining the family in case a single driver can driver more than one | 710 | defining the family in case a single driver can driver more than one |
@@ -772,7 +722,7 @@ is present). | |||
772 | 4) Note about node and property names and character set | 722 | 4) Note about node and property names and character set |
773 | ------------------------------------------------------- | 723 | ------------------------------------------------------- |
774 | 724 | ||
775 | While open firmware provides more flexible usage of 8859-1, this | 725 | While Open Firmware provides more flexible usage of 8859-1, this |
776 | specification enforces more strict rules. Nodes and properties should | 726 | specification enforces more strict rules. Nodes and properties should |
777 | be comprised only of ASCII characters 'a' to 'z', '0' to | 727 | be comprised only of ASCII characters 'a' to 'z', '0' to |
778 | '9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally | 728 | '9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally |
@@ -792,7 +742,7 @@ address which can extend beyond that limit. | |||
792 | -------------------------------- | 742 | -------------------------------- |
793 | These are all that are currently required. However, it is strongly | 743 | These are all that are currently required. However, it is strongly |
794 | recommended that you expose PCI host bridges as documented in the | 744 | recommended that you expose PCI host bridges as documented in the |
795 | PCI binding to open firmware, and your interrupt tree as documented | 745 | PCI binding to Open Firmware, and your interrupt tree as documented |
796 | in OF interrupt tree specification. | 746 | in OF interrupt tree specification. |
797 | 747 | ||
798 | a) The root node | 748 | a) The root node |
@@ -802,20 +752,12 @@ address which can extend beyond that limit. | |||
802 | - model : this is your board name/model | 752 | - model : this is your board name/model |
803 | - #address-cells : address representation for "root" devices | 753 | - #address-cells : address representation for "root" devices |
804 | - #size-cells: the size representation for "root" devices | 754 | - #size-cells: the size representation for "root" devices |
805 | - device_type : This property shouldn't be necessary. However, if | ||
806 | you decide to create a device_type for your root node, make sure it | ||
807 | is _not_ "chrp" unless your platform is a pSeries or PAPR compliant | ||
808 | one for 64-bit, or a CHRP-type machine for 32-bit as this will | ||
809 | matched by the kernel this way. | ||
810 | |||
811 | Additionally, some recommended properties are: | ||
812 | |||
813 | - compatible : the board "family" generally finds its way here, | 755 | - compatible : the board "family" generally finds its way here, |
814 | for example, if you have 2 board models with a similar layout, | 756 | for example, if you have 2 board models with a similar layout, |
815 | that typically get driven by the same platform code in the | 757 | that typically get driven by the same platform code in the |
816 | kernel, you would use a different "model" property but put a | 758 | kernel, you would specify the exact board model in the |
817 | value in "compatible". The kernel doesn't directly use that | 759 | compatible property followed by an entry that represents the SoC |
818 | value but it is generally useful. | 760 | model. |
819 | 761 | ||
820 | The root node is also generally where you add additional properties | 762 | The root node is also generally where you add additional properties |
821 | specific to your board like the serial number if any, that sort of | 763 | specific to your board like the serial number if any, that sort of |
@@ -841,8 +783,11 @@ address which can extend beyond that limit. | |||
841 | 783 | ||
842 | So under /cpus, you are supposed to create a node for every CPU on | 784 | So under /cpus, you are supposed to create a node for every CPU on |
843 | the machine. There is no specific restriction on the name of the | 785 | the machine. There is no specific restriction on the name of the |
844 | CPU, though It's common practice to call it PowerPC,<name>. For | 786 | CPU, though it's common to call it <architecture>,<core>. For |
845 | example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX. | 787 | example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX. |
788 | However, the Generic Names convention suggests that it would be | ||
789 | better to simply use 'cpu' for each cpu node and use the compatible | ||
790 | property to identify the specific cpu core. | ||
846 | 791 | ||
847 | Required properties: | 792 | Required properties: |
848 | 793 | ||
@@ -923,7 +868,7 @@ compatibility. | |||
923 | 868 | ||
924 | e) The /chosen node | 869 | e) The /chosen node |
925 | 870 | ||
926 | This node is a bit "special". Normally, that's where open firmware | 871 | This node is a bit "special". Normally, that's where Open Firmware |
927 | puts some variable environment information, like the arguments, or | 872 | puts some variable environment information, like the arguments, or |
928 | the default input/output devices. | 873 | the default input/output devices. |
929 | 874 | ||
@@ -940,11 +885,7 @@ compatibility. | |||
940 | console device if any. Typically, if you have serial devices on | 885 | console device if any. Typically, if you have serial devices on |
941 | your board, you may want to put the full path to the one set as | 886 | your board, you may want to put the full path to the one set as |
942 | the default console in the firmware here, for the kernel to pick | 887 | the default console in the firmware here, for the kernel to pick |
943 | it up as its own default console. If you look at the function | 888 | it up as its own default console. |
944 | set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see | ||
945 | that the kernel tries to find out the default console and has | ||
946 | knowledge of various types like 8250 serial ports. You may want | ||
947 | to extend this function to add your own. | ||
948 | 889 | ||
949 | Note that u-boot creates and fills in the chosen node for platforms | 890 | Note that u-boot creates and fills in the chosen node for platforms |
950 | that use it. | 891 | that use it. |
@@ -955,23 +896,23 @@ compatibility. | |||
955 | 896 | ||
956 | f) the /soc<SOCname> node | 897 | f) the /soc<SOCname> node |
957 | 898 | ||
958 | This node is used to represent a system-on-a-chip (SOC) and must be | 899 | This node is used to represent a system-on-a-chip (SoC) and must be |
959 | present if the processor is a SOC. The top-level soc node contains | 900 | present if the processor is a SoC. The top-level soc node contains |
960 | information that is global to all devices on the SOC. The node name | 901 | information that is global to all devices on the SoC. The node name |
961 | should contain a unit address for the SOC, which is the base address | 902 | should contain a unit address for the SoC, which is the base address |
962 | of the memory-mapped register set for the SOC. The name of an soc | 903 | of the memory-mapped register set for the SoC. The name of an SoC |
963 | node should start with "soc", and the remainder of the name should | 904 | node should start with "soc", and the remainder of the name should |
964 | represent the part number for the soc. For example, the MPC8540's | 905 | represent the part number for the soc. For example, the MPC8540's |
965 | soc node would be called "soc8540". | 906 | soc node would be called "soc8540". |
966 | 907 | ||
967 | Required properties: | 908 | Required properties: |
968 | 909 | ||
969 | - device_type : Should be "soc" | ||
970 | - ranges : Should be defined as specified in 1) to describe the | 910 | - ranges : Should be defined as specified in 1) to describe the |
971 | translation of SOC addresses for memory mapped SOC registers. | 911 | translation of SoC addresses for memory mapped SoC registers. |
972 | - bus-frequency: Contains the bus frequency for the SOC node. | 912 | - bus-frequency: Contains the bus frequency for the SoC node. |
973 | Typically, the value of this field is filled in by the boot | 913 | Typically, the value of this field is filled in by the boot |
974 | loader. | 914 | loader. |
915 | - compatible : Exact model of the SoC | ||
975 | 916 | ||
976 | 917 | ||
977 | Recommended properties: | 918 | Recommended properties: |
@@ -1155,12 +1096,13 @@ while all this has been defined and implemented. | |||
1155 | 1096 | ||
1156 | - An example of code for iterating nodes & retrieving properties | 1097 | - An example of code for iterating nodes & retrieving properties |
1157 | directly from the flattened tree format can be found in the kernel | 1098 | directly from the flattened tree format can be found in the kernel |
1158 | file arch/ppc64/kernel/prom.c, look at scan_flat_dt() function, | 1099 | file drivers/of/fdt.c. Look at the of_scan_flat_dt() function, |
1159 | its usage in early_init_devtree(), and the corresponding various | 1100 | its usage in early_init_devtree(), and the corresponding various |
1160 | early_init_dt_scan_*() callbacks. That code can be re-used in a | 1101 | early_init_dt_scan_*() callbacks. That code can be re-used in a |
1161 | GPL bootloader, and as the author of that code, I would be happy | 1102 | GPL bootloader, and as the author of that code, I would be happy |
1162 | to discuss possible free licensing to any vendor who wishes to | 1103 | to discuss possible free licensing to any vendor who wishes to |
1163 | integrate all or part of this code into a non-GPL bootloader. | 1104 | integrate all or part of this code into a non-GPL bootloader. |
1105 | (reference needed; who is 'I' here? ---gcl Jan 31, 2011) | ||
1164 | 1106 | ||
1165 | 1107 | ||
1166 | 1108 | ||
@@ -1203,18 +1145,19 @@ MPC8540. | |||
1203 | 2) Representing devices without a current OF specification | 1145 | 2) Representing devices without a current OF specification |
1204 | ---------------------------------------------------------- | 1146 | ---------------------------------------------------------- |
1205 | 1147 | ||
1206 | Currently, there are many devices on SOCs that do not have a standard | 1148 | Currently, there are many devices on SoCs that do not have a standard |
1207 | representation pre-defined as part of the open firmware | 1149 | representation defined as part of the Open Firmware specifications, |
1208 | specifications, mainly because the boards that contain these SOCs are | 1150 | mainly because the boards that contain these SoCs are not currently |
1209 | not currently booted using open firmware. This section contains | 1151 | booted using Open Firmware. Binding documentation for new devices |
1210 | descriptions for the SOC devices for which new nodes have been | 1152 | should be added to the Documentation/devicetree/bindings directory. |
1211 | defined; this list will expand as more and more SOC-containing | 1153 | That directory will expand as device tree support is added to more and |
1212 | platforms are moved over to use the flattened-device-tree model. | 1154 | more SoCs. |
1155 | |||
1213 | 1156 | ||
1214 | VII - Specifying interrupt information for devices | 1157 | VII - Specifying interrupt information for devices |
1215 | =================================================== | 1158 | =================================================== |
1216 | 1159 | ||
1217 | The device tree represents the busses and devices of a hardware | 1160 | The device tree represents the buses and devices of a hardware |
1218 | system in a form similar to the physical bus topology of the | 1161 | system in a form similar to the physical bus topology of the |
1219 | hardware. | 1162 | hardware. |
1220 | 1163 | ||
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42 index 0e76ef12e4c6..a22ecf48f255 100644 --- a/Documentation/hwmon/jc42 +++ b/Documentation/hwmon/jc42 | |||
@@ -51,7 +51,8 @@ Supported chips: | |||
51 | * JEDEC JC 42.4 compliant temperature sensor chips | 51 | * JEDEC JC 42.4 compliant temperature sensor chips |
52 | Prefix: 'jc42' | 52 | Prefix: 'jc42' |
53 | Addresses scanned: I2C 0x18 - 0x1f | 53 | Addresses scanned: I2C 0x18 - 0x1f |
54 | Datasheet: - | 54 | Datasheet: |
55 | http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf | ||
55 | 56 | ||
56 | Author: | 57 | Author: |
57 | Guenter Roeck <guenter.roeck@ericsson.com> | 58 | Guenter Roeck <guenter.roeck@ericsson.com> |
@@ -60,7 +61,11 @@ Author: | |||
60 | Description | 61 | Description |
61 | ----------- | 62 | ----------- |
62 | 63 | ||
63 | This driver implements support for JEDEC JC 42.4 compliant temperature sensors. | 64 | This driver implements support for JEDEC JC 42.4 compliant temperature sensors, |
65 | which are used on many DDR3 memory modules for mobile devices and servers. Some | ||
66 | systems use the sensor to prevent memory overheating by automatically throttling | ||
67 | the memory controller. | ||
68 | |||
64 | The driver auto-detects the chips listed above, but can be manually instantiated | 69 | The driver auto-detects the chips listed above, but can be manually instantiated |
65 | to support other JC 42.4 compliant chips. | 70 | to support other JC 42.4 compliant chips. |
66 | 71 | ||
@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis, | |||
81 | which applies to all limits. This register can be written by writing into | 86 | which applies to all limits. This register can be written by writing into |
82 | temp1_crit_hyst. Other hysteresis attributes are read-only. | 87 | temp1_crit_hyst. Other hysteresis attributes are read-only. |
83 | 88 | ||
89 | If the BIOS has configured the sensor for automatic temperature management, it | ||
90 | is likely that it has locked the registers, i.e., that the temperature limits | ||
91 | cannot be changed. | ||
92 | |||
84 | Sysfs entries | 93 | Sysfs entries |
85 | ------------- | 94 | ------------- |
86 | 95 | ||
87 | temp1_input Temperature (RO) | 96 | temp1_input Temperature (RO) |
88 | temp1_min Minimum temperature (RW) | 97 | temp1_min Minimum temperature (RO or RW) |
89 | temp1_max Maximum temperature (RW) | 98 | temp1_max Maximum temperature (RO or RW) |
90 | temp1_crit Critical high temperature (RW) | 99 | temp1_crit Critical high temperature (RO or RW) |
91 | 100 | ||
92 | temp1_crit_hyst Critical hysteresis temperature (RW) | 101 | temp1_crit_hyst Critical hysteresis temperature (RO or RW) |
93 | temp1_max_hyst Maximum hysteresis temperature (RO) | 102 | temp1_max_hyst Maximum hysteresis temperature (RO) |
94 | 103 | ||
95 | temp1_min_alarm Temperature low alarm | 104 | temp1_min_alarm Temperature low alarm |
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp index 6526eee525a6..d2b56a4fd1f5 100644 --- a/Documentation/hwmon/k10temp +++ b/Documentation/hwmon/k10temp | |||
@@ -9,6 +9,8 @@ Supported chips: | |||
9 | Socket S1G3: Athlon II, Sempron, Turion II | 9 | Socket S1G3: Athlon II, Sempron, Turion II |
10 | * AMD Family 11h processors: | 10 | * AMD Family 11h processors: |
11 | Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) | 11 | Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) |
12 | * AMD Family 12h processors: "Llano" | ||
13 | * AMD Family 14h processors: "Brazos" (C/E/G-Series) | ||
12 | 14 | ||
13 | Prefix: 'k10temp' | 15 | Prefix: 'k10temp' |
14 | Addresses scanned: PCI space | 16 | Addresses scanned: PCI space |
@@ -17,10 +19,14 @@ Supported chips: | |||
17 | http://support.amd.com/us/Processor_TechDocs/31116.pdf | 19 | http://support.amd.com/us/Processor_TechDocs/31116.pdf |
18 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: | 20 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: |
19 | http://support.amd.com/us/Processor_TechDocs/41256.pdf | 21 | http://support.amd.com/us/Processor_TechDocs/41256.pdf |
22 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors: | ||
23 | http://support.amd.com/us/Processor_TechDocs/43170.pdf | ||
20 | Revision Guide for AMD Family 10h Processors: | 24 | Revision Guide for AMD Family 10h Processors: |
21 | http://support.amd.com/us/Processor_TechDocs/41322.pdf | 25 | http://support.amd.com/us/Processor_TechDocs/41322.pdf |
22 | Revision Guide for AMD Family 11h Processors: | 26 | Revision Guide for AMD Family 11h Processors: |
23 | http://support.amd.com/us/Processor_TechDocs/41788.pdf | 27 | http://support.amd.com/us/Processor_TechDocs/41788.pdf |
28 | Revision Guide for AMD Family 14h Models 00h-0Fh Processors: | ||
29 | http://support.amd.com/us/Processor_TechDocs/47534.pdf | ||
24 | AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: | 30 | AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: |
25 | http://support.amd.com/us/Processor_TechDocs/43373.pdf | 31 | http://support.amd.com/us/Processor_TechDocs/43373.pdf |
26 | AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: | 32 | AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: |
@@ -34,7 +40,7 @@ Description | |||
34 | ----------- | 40 | ----------- |
35 | 41 | ||
36 | This driver permits reading of the internal temperature sensor of AMD | 42 | This driver permits reading of the internal temperature sensor of AMD |
37 | Family 10h and 11h processors. | 43 | Family 10h/11h/12h/14h processors. |
38 | 44 | ||
39 | All these processors have a sensor, but on those for Socket F or AM2+, | 45 | All these processors have a sensor, but on those for Socket F or AM2+, |
40 | the sensor may return inconsistent values (erratum 319). The driver | 46 | the sensor may return inconsistent values (erratum 319). The driver |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 89835a4766a6..f4a04c0c7edc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture | |||
144 | and is between 256 and 4096 characters. It is defined in the file | 144 | and is between 256 and 4096 characters. It is defined in the file |
145 | ./include/asm/setup.h as COMMAND_LINE_SIZE. | 145 | ./include/asm/setup.h as COMMAND_LINE_SIZE. |
146 | 146 | ||
147 | Finally, the [KMG] suffix is commonly described after a number of kernel | ||
148 | parameter values. These 'K', 'M', and 'G' letters represent the _binary_ | ||
149 | multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30 | ||
150 | bytes respectively. Such letter suffixes can also be entirely omitted. | ||
151 | |||
147 | 152 | ||
148 | acpi= [HW,ACPI,X86] | 153 | acpi= [HW,ACPI,X86] |
149 | Advanced Configuration and Power Interface | 154 | Advanced Configuration and Power Interface |
@@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file | |||
545 | Format: | 550 | Format: |
546 | <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] | 551 | <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] |
547 | 552 | ||
548 | crashkernel=nn[KMG]@ss[KMG] | 553 | crashkernel=size[KMG][@offset[KMG]] |
549 | [KNL] Reserve a chunk of physical memory to | 554 | [KNL] Using kexec, Linux can switch to a 'crash kernel' |
550 | hold a kernel to switch to with kexec on panic. | 555 | upon panic. This parameter reserves the physical |
556 | memory region [offset, offset + size] for that kernel | ||
557 | image. If '@offset' is omitted, then a suitable offset | ||
558 | is selected automatically. Check | ||
559 | Documentation/kdump/kdump.txt for further details. | ||
551 | 560 | ||
552 | crashkernel=range1:size1[,range2:size2,...][@offset] | 561 | crashkernel=range1:size1[,range2:size2,...][@offset] |
553 | [KNL] Same as above, but depends on the memory | 562 | [KNL] Same as above, but depends on the memory |
554 | in the running system. The syntax of range is | 563 | in the running system. The syntax of range is |
555 | start-[end] where start and end are both | 564 | start-[end] where start and end are both |
556 | a memory unit (amount[KMG]). See also | 565 | a memory unit (amount[KMG]). See also |
557 | Documentation/kdump/kdump.txt for a example. | 566 | Documentation/kdump/kdump.txt for an example. |
558 | 567 | ||
559 | cs89x0_dma= [HW,NET] | 568 | cs89x0_dma= [HW,NET] |
560 | Format: <dma> | 569 | Format: <dma> |
@@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1262 | 6 (KERN_INFO) informational | 1271 | 6 (KERN_INFO) informational |
1263 | 7 (KERN_DEBUG) debug-level messages | 1272 | 7 (KERN_DEBUG) debug-level messages |
1264 | 1273 | ||
1265 | log_buf_len=n Sets the size of the printk ring buffer, in bytes. | 1274 | log_buf_len=n[KMG] Sets the size of the printk ring buffer, |
1266 | Format: { n | nk | nM } | 1275 | in bytes. n must be a power of two. The default |
1267 | n must be a power of two. The default size | 1276 | size is set in the kernel config file. |
1268 | is set in the kernel config file. | ||
1269 | 1277 | ||
1270 | logo.nologo [FB] Disables display of the built-in Linux logo. | 1278 | logo.nologo [FB] Disables display of the built-in Linux logo. |
1271 | This may be used to provide more screen space for | 1279 | This may be used to provide more screen space for |
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile index 5aba7a33aeeb..24c308dd3fd1 100644 --- a/Documentation/networking/Makefile +++ b/Documentation/networking/Makefile | |||
@@ -4,6 +4,8 @@ obj- := dummy.o | |||
4 | # List of programs to build | 4 | # List of programs to build |
5 | hostprogs-y := ifenslave | 5 | hostprogs-y := ifenslave |
6 | 6 | ||
7 | HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include | ||
8 | |||
7 | # Tell kbuild to always build the programs | 9 | # Tell kbuild to always build the programs |
8 | always := $(hostprogs-y) | 10 | always := $(hostprogs-y) |
9 | 11 | ||
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index 996a27d9b8db..01c513fac40e 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt | |||
@@ -190,9 +190,9 @@ resources, scheduled and executed. | |||
190 | * Long running CPU intensive workloads which can be better | 190 | * Long running CPU intensive workloads which can be better |
191 | managed by the system scheduler. | 191 | managed by the system scheduler. |
192 | 192 | ||
193 | WQ_FREEZEABLE | 193 | WQ_FREEZABLE |
194 | 194 | ||
195 | A freezeable wq participates in the freeze phase of the system | 195 | A freezable wq participates in the freeze phase of the system |
196 | suspend operations. Work items on the wq are drained and no | 196 | suspend operations. Work items on the wq are drained and no |
197 | new work item starts execution until thawed. | 197 | new work item starts execution until thawed. |
198 | 198 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 531c5cf150ba..8afba6321e24 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -885,7 +885,7 @@ S: Supported | |||
885 | 885 | ||
886 | ARM/QUALCOMM MSM MACHINE SUPPORT | 886 | ARM/QUALCOMM MSM MACHINE SUPPORT |
887 | M: David Brown <davidb@codeaurora.org> | 887 | M: David Brown <davidb@codeaurora.org> |
888 | M: Daniel Walker <dwalker@codeaurora.org> | 888 | M: Daniel Walker <dwalker@fifo99.com> |
889 | M: Bryan Huntsman <bryanh@codeaurora.org> | 889 | M: Bryan Huntsman <bryanh@codeaurora.org> |
890 | L: linux-arm-msm@vger.kernel.org | 890 | L: linux-arm-msm@vger.kernel.org |
891 | F: arch/arm/mach-msm/ | 891 | F: arch/arm/mach-msm/ |
@@ -1692,6 +1692,13 @@ M: Andy Whitcroft <apw@canonical.com> | |||
1692 | S: Supported | 1692 | S: Supported |
1693 | F: scripts/checkpatch.pl | 1693 | F: scripts/checkpatch.pl |
1694 | 1694 | ||
1695 | CHINESE DOCUMENTATION | ||
1696 | M: Harry Wei <harryxiyou@gmail.com> | ||
1697 | L: xiyoulinuxkernelgroup@googlegroups.com | ||
1698 | L: linux-kernel@zh-kernel.org (moderated for non-subscribers) | ||
1699 | S: Maintained | ||
1700 | F: Documentation/zh_CN/ | ||
1701 | |||
1695 | CISCO VIC ETHERNET NIC DRIVER | 1702 | CISCO VIC ETHERNET NIC DRIVER |
1696 | M: Vasanthy Kolluri <vkolluri@cisco.com> | 1703 | M: Vasanthy Kolluri <vkolluri@cisco.com> |
1697 | M: Roopa Prabhu <roprabhu@cisco.com> | 1704 | M: Roopa Prabhu <roprabhu@cisco.com> |
@@ -2126,6 +2133,7 @@ S: Supported | |||
2126 | F: fs/dlm/ | 2133 | F: fs/dlm/ |
2127 | 2134 | ||
2128 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM | 2135 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM |
2136 | M: Vinod Koul <vinod.koul@intel.com> | ||
2129 | M: Dan Williams <dan.j.williams@intel.com> | 2137 | M: Dan Williams <dan.j.williams@intel.com> |
2130 | S: Supported | 2138 | S: Supported |
2131 | F: drivers/dma/ | 2139 | F: drivers/dma/ |
@@ -2774,6 +2782,15 @@ F: Documentation/isdn/README.gigaset | |||
2774 | F: drivers/isdn/gigaset/ | 2782 | F: drivers/isdn/gigaset/ |
2775 | F: include/linux/gigaset_dev.h | 2783 | F: include/linux/gigaset_dev.h |
2776 | 2784 | ||
2785 | GPIO SUBSYSTEM | ||
2786 | M: Grant Likely <grant.likely@secretlab.ca> | ||
2787 | L: linux-kernel@vger.kernel.org | ||
2788 | S: Maintained | ||
2789 | T: git git://git.secretlab.ca/git/linux-2.6.git | ||
2790 | F: Documentation/gpio/gpio.txt | ||
2791 | F: drivers/gpio/ | ||
2792 | F: include/linux/gpio* | ||
2793 | |||
2777 | GRETH 10/100/1G Ethernet MAC device driver | 2794 | GRETH 10/100/1G Ethernet MAC device driver |
2778 | M: Kristoffer Glembo <kristoffer@gaisler.com> | 2795 | M: Kristoffer Glembo <kristoffer@gaisler.com> |
2779 | L: netdev@vger.kernel.org | 2796 | L: netdev@vger.kernel.org |
@@ -2863,7 +2880,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com> | |||
2863 | L: lm-sensors@lm-sensors.org | 2880 | L: lm-sensors@lm-sensors.org |
2864 | W: http://www.lm-sensors.org/ | 2881 | W: http://www.lm-sensors.org/ |
2865 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ | 2882 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
2866 | T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ | ||
2867 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git | 2883 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git |
2868 | S: Maintained | 2884 | S: Maintained |
2869 | F: Documentation/hwmon/ | 2885 | F: Documentation/hwmon/ |
@@ -4591,7 +4607,7 @@ F: drivers/i2c/busses/i2c-ocores.c | |||
4591 | 4607 | ||
4592 | OPEN FIRMWARE AND FLATTENED DEVICE TREE | 4608 | OPEN FIRMWARE AND FLATTENED DEVICE TREE |
4593 | M: Grant Likely <grant.likely@secretlab.ca> | 4609 | M: Grant Likely <grant.likely@secretlab.ca> |
4594 | L: devicetree-discuss@lists.ozlabs.org | 4610 | L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers) |
4595 | W: http://fdt.secretlab.ca | 4611 | W: http://fdt.secretlab.ca |
4596 | T: git git://git.secretlab.ca/git/linux-2.6.git | 4612 | T: git git://git.secretlab.ca/git/linux-2.6.git |
4597 | S: Maintained | 4613 | S: Maintained |
@@ -5257,7 +5273,7 @@ S: Maintained | |||
5257 | F: drivers/net/wireless/rtl818x/rtl8180/ | 5273 | F: drivers/net/wireless/rtl818x/rtl8180/ |
5258 | 5274 | ||
5259 | RTL8187 WIRELESS DRIVER | 5275 | RTL8187 WIRELESS DRIVER |
5260 | M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> | 5276 | M: Herton Ronaldo Krzesinski <herton@canonical.com> |
5261 | M: Hin-Tak Leung <htl10@users.sourceforge.net> | 5277 | M: Hin-Tak Leung <htl10@users.sourceforge.net> |
5262 | M: Larry Finger <Larry.Finger@lwfinger.net> | 5278 | M: Larry Finger <Larry.Finger@lwfinger.net> |
5263 | L: linux-wireless@vger.kernel.org | 5279 | L: linux-wireless@vger.kernel.org |
@@ -6095,7 +6111,7 @@ S: Maintained | |||
6095 | F: security/tomoyo/ | 6111 | F: security/tomoyo/ |
6096 | 6112 | ||
6097 | TOPSTAR LAPTOP EXTRAS DRIVER | 6113 | TOPSTAR LAPTOP EXTRAS DRIVER |
6098 | M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> | 6114 | M: Herton Ronaldo Krzesinski <herton@canonical.com> |
6099 | L: platform-driver-x86@vger.kernel.org | 6115 | L: platform-driver-x86@vger.kernel.org |
6100 | S: Maintained | 6116 | S: Maintained |
6101 | F: drivers/platform/x86/topstar-laptop.c | 6117 | F: drivers/platform/x86/topstar-laptop.c |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 38 | 3 | SUBLEVEL = 38 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb0..166efa2a19cd 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622 | |||
1177 | visible impact on the overall performance or power consumption of the | 1177 | visible impact on the overall performance or power consumption of the |
1178 | processor. | 1178 | processor. |
1179 | 1179 | ||
1180 | config ARM_ERRATA_751472 | ||
1181 | bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" | ||
1182 | depends on CPU_V7 && SMP | ||
1183 | help | ||
1184 | This option enables the workaround for the 751472 Cortex-A9 (prior | ||
1185 | to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the | ||
1186 | completion of a following broadcasted operation if the second | ||
1187 | operation is received by a CPU before the ICIALLUIS has completed, | ||
1188 | potentially leading to corrupted entries in the cache or TLB. | ||
1189 | |||
1190 | config ARM_ERRATA_753970 | ||
1191 | bool "ARM errata: cache sync operation may be faulty" | ||
1192 | depends on CACHE_PL310 | ||
1193 | help | ||
1194 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. | ||
1195 | |||
1196 | Under some condition the effect of cache sync operation on | ||
1197 | the store buffer still remains when the operation completes. | ||
1198 | This means that the store buffer is always asked to drain and | ||
1199 | this prevents it from merging any further writes. The workaround | ||
1200 | is to replace the normal offset of cache sync operation (0x730) | ||
1201 | by another offset targeting an unmapped PL310 register 0x740. | ||
1202 | This has the same effect as the cache sync operation: store buffer | ||
1203 | drain and waiting for all buffers empty. | ||
1204 | |||
1180 | endmenu | 1205 | endmenu |
1181 | 1206 | ||
1182 | source "arch/arm/common/Kconfig" | 1207 | source "arch/arm/common/Kconfig" |
@@ -1391,7 +1416,7 @@ config AEABI | |||
1391 | 1416 | ||
1392 | config OABI_COMPAT | 1417 | config OABI_COMPAT |
1393 | bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" | 1418 | bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" |
1394 | depends on AEABI && EXPERIMENTAL | 1419 | depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL |
1395 | default y | 1420 | default y |
1396 | help | 1421 | help |
1397 | This option preserves the old syscall interface along with the | 1422 | This option preserves the old syscall interface along with the |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd6..6f7b29294c80 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) | |||
15 | LDFLAGS_vmlinux += --be8 | 15 | LDFLAGS_vmlinux += --be8 |
16 | endif | 16 | endif |
17 | 17 | ||
18 | OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S | 18 | OBJCOPYFLAGS :=-O binary -R .comment -S |
19 | GZFLAGS :=-9 | 19 | GZFLAGS :=-9 |
20 | #KBUILD_CFLAGS +=-pipe | 20 | #KBUILD_CFLAGS +=-pipe |
21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: | 21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: |
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index ab204db594d3..c6028967d336 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore | |||
@@ -1,3 +1,7 @@ | |||
1 | font.c | 1 | font.c |
2 | piggy.gz | 2 | lib1funcs.S |
3 | piggy.gzip | ||
4 | piggy.lzo | ||
5 | piggy.lzma | ||
6 | vmlinux | ||
3 | vmlinux.lds | 7 | vmlinux.lds |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735c..16bd48031583 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #define L2X0_RAW_INTR_STAT 0x21C | 36 | #define L2X0_RAW_INTR_STAT 0x21C |
37 | #define L2X0_INTR_CLEAR 0x220 | 37 | #define L2X0_INTR_CLEAR 0x220 |
38 | #define L2X0_CACHE_SYNC 0x730 | 38 | #define L2X0_CACHE_SYNC 0x730 |
39 | #define L2X0_DUMMY_REG 0x740 | ||
39 | #define L2X0_INV_LINE_PA 0x770 | 40 | #define L2X0_INV_LINE_PA 0x770 |
40 | #define L2X0_INV_WAY 0x77C | 41 | #define L2X0_INV_WAY 0x77C |
41 | #define L2X0_CLEAN_LINE_PA 0x7B0 | 42 | #define L2X0_CLEAN_LINE_PA 0x7B0 |
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 721847dc68ab..e0d1c0cfa548 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h | |||
@@ -58,6 +58,9 @@ | |||
58 | 58 | ||
59 | static inline void sysctl_soft_reset(void __iomem *base) | 59 | static inline void sysctl_soft_reset(void __iomem *base) |
60 | { | 60 | { |
61 | /* switch to slow mode */ | ||
62 | writel(0x2, base + SCCTRL); | ||
63 | |||
61 | /* writing any value to SCSYSSTAT reg will reset system */ | 64 | /* writing any value to SCSYSSTAT reg will reset system */ |
62 | writel(0, base + SCSYSSTAT); | 65 | writel(0, base + SCSYSSTAT); |
63 | } | 66 | } |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..82dfe5d0c41e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -18,16 +18,34 @@ | |||
18 | #define __ASMARM_TLB_H | 18 | #define __ASMARM_TLB_H |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/tlbflush.h> | ||
22 | 21 | ||
23 | #ifndef CONFIG_MMU | 22 | #ifndef CONFIG_MMU |
24 | 23 | ||
25 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | |||
26 | #define tlb_flush(tlb) ((void) tlb) | ||
27 | |||
26 | #include <asm-generic/tlb.h> | 28 | #include <asm-generic/tlb.h> |
27 | 29 | ||
28 | #else /* !CONFIG_MMU */ | 30 | #else /* !CONFIG_MMU */ |
29 | 31 | ||
32 | #include <linux/swap.h> | ||
30 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | ||
35 | |||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #define FREE_PTE_NR 500 | ||
45 | #else | ||
46 | #define tlb_fast_mode(tlb) 1 | ||
47 | #define FREE_PTE_NR 0 | ||
48 | #endif | ||
31 | 49 | ||
32 | /* | 50 | /* |
33 | * TLB handling. This allows us to remove pages from the page | 51 | * TLB handling. This allows us to remove pages from the page |
@@ -36,12 +54,58 @@ | |||
36 | struct mmu_gather { | 54 | struct mmu_gather { |
37 | struct mm_struct *mm; | 55 | struct mm_struct *mm; |
38 | unsigned int fullmm; | 56 | unsigned int fullmm; |
57 | struct vm_area_struct *vma; | ||
39 | unsigned long range_start; | 58 | unsigned long range_start; |
40 | unsigned long range_end; | 59 | unsigned long range_end; |
60 | unsigned int nr; | ||
61 | struct page *pages[FREE_PTE_NR]; | ||
41 | }; | 62 | }; |
42 | 63 | ||
43 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 64 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
44 | 65 | ||
66 | /* | ||
67 | * This is unnecessarily complex. There's three ways the TLB shootdown | ||
68 | * code is used: | ||
69 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | ||
70 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | ||
71 | * tlb->vma will be non-NULL. | ||
72 | * 2. Unmapping all vmas. See exit_mmap(). | ||
73 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | ||
74 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. | ||
75 | * 3. Unmapping argument pages. See shift_arg_pages(). | ||
76 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | ||
77 | * tlb->vma will be NULL. | ||
78 | */ | ||
79 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
80 | { | ||
81 | if (tlb->fullmm || !tlb->vma) | ||
82 | flush_tlb_mm(tlb->mm); | ||
83 | else if (tlb->range_end > 0) { | ||
84 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); | ||
85 | tlb->range_start = TASK_SIZE; | ||
86 | tlb->range_end = 0; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | ||
91 | { | ||
92 | if (!tlb->fullmm) { | ||
93 | if (addr < tlb->range_start) | ||
94 | tlb->range_start = addr; | ||
95 | if (addr + PAGE_SIZE > tlb->range_end) | ||
96 | tlb->range_end = addr + PAGE_SIZE; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | ||
101 | { | ||
102 | tlb_flush(tlb); | ||
103 | if (!tlb_fast_mode(tlb)) { | ||
104 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | ||
105 | tlb->nr = 0; | ||
106 | } | ||
107 | } | ||
108 | |||
45 | static inline struct mmu_gather * | 109 | static inline struct mmu_gather * |
46 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 110 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) |
47 | { | 111 | { |
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
49 | 113 | ||
50 | tlb->mm = mm; | 114 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 115 | tlb->fullmm = full_mm_flush; |
116 | tlb->vma = NULL; | ||
117 | tlb->nr = 0; | ||
52 | 118 | ||
53 | return tlb; | 119 | return tlb; |
54 | } | 120 | } |
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
56 | static inline void | 122 | static inline void |
57 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 123 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
58 | { | 124 | { |
59 | if (tlb->fullmm) | 125 | tlb_flush_mmu(tlb); |
60 | flush_tlb_mm(tlb->mm); | ||
61 | 126 | ||
62 | /* keep the page table cache within bounds */ | 127 | /* keep the page table cache within bounds */ |
63 | check_pgt_cache(); | 128 | check_pgt_cache(); |
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
71 | static inline void | 136 | static inline void |
72 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) | 137 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) |
73 | { | 138 | { |
74 | if (!tlb->fullmm) { | 139 | tlb_add_flush(tlb, addr); |
75 | if (addr < tlb->range_start) | ||
76 | tlb->range_start = addr; | ||
77 | if (addr + PAGE_SIZE > tlb->range_end) | ||
78 | tlb->range_end = addr + PAGE_SIZE; | ||
79 | } | ||
80 | } | 140 | } |
81 | 141 | ||
82 | /* | 142 | /* |
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
89 | { | 149 | { |
90 | if (!tlb->fullmm) { | 150 | if (!tlb->fullmm) { |
91 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | 151 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
152 | tlb->vma = vma; | ||
92 | tlb->range_start = TASK_SIZE; | 153 | tlb->range_start = TASK_SIZE; |
93 | tlb->range_end = 0; | 154 | tlb->range_end = 0; |
94 | } | 155 | } |
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
97 | static inline void | 158 | static inline void |
98 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 159 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
99 | { | 160 | { |
100 | if (!tlb->fullmm && tlb->range_end > 0) | 161 | if (!tlb->fullmm) |
101 | flush_tlb_range(vma, tlb->range_start, tlb->range_end); | 162 | tlb_flush(tlb); |
163 | } | ||
164 | |||
165 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
166 | { | ||
167 | if (tlb_fast_mode(tlb)) { | ||
168 | free_page_and_swap_cache(page); | ||
169 | } else { | ||
170 | tlb->pages[tlb->nr++] = page; | ||
171 | if (tlb->nr >= FREE_PTE_NR) | ||
172 | tlb_flush_mmu(tlb); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | ||
177 | unsigned long addr) | ||
178 | { | ||
179 | pgtable_page_dtor(pte); | ||
180 | tlb_add_flush(tlb, addr); | ||
181 | tlb_remove_page(tlb, pte); | ||
102 | } | 182 | } |
103 | 183 | ||
104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 184 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
105 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) | ||
106 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) | 185 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
107 | 186 | ||
108 | #define tlb_migrate_finish(mm) do { } while (0) | 187 | #define tlb_migrate_finish(mm) do { } while (0) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a2..d2005de383b8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -10,12 +10,7 @@ | |||
10 | #ifndef _ASMARM_TLBFLUSH_H | 10 | #ifndef _ASMARM_TLBFLUSH_H |
11 | #define _ASMARM_TLBFLUSH_H | 11 | #define _ASMARM_TLBFLUSH_H |
12 | 12 | ||
13 | 13 | #ifdef CONFIG_MMU | |
14 | #ifndef CONFIG_MMU | ||
15 | |||
16 | #define tlb_flush(tlb) ((void) tlb) | ||
17 | |||
18 | #else /* CONFIG_MMU */ | ||
19 | 14 | ||
20 | #include <asm/glue.h> | 15 | #include <asm/glue.h> |
21 | 16 | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb21..f06ff9feb0db 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) | |||
391 | 391 | ||
392 | 392 | ||
393 | #ifdef CONFIG_SMP_ON_UP | 393 | #ifdef CONFIG_SMP_ON_UP |
394 | __INIT | ||
394 | __fixup_smp: | 395 | __fixup_smp: |
395 | and r3, r9, #0x000f0000 @ architecture version | 396 | and r3, r9, #0x000f0000 @ architecture version |
396 | teq r3, #0x000f0000 @ CPU ID supported? | 397 | teq r3, #0x000f0000 @ CPU ID supported? |
@@ -415,18 +416,7 @@ __fixup_smp_on_up: | |||
415 | sub r3, r0, r3 | 416 | sub r3, r0, r3 |
416 | add r4, r4, r3 | 417 | add r4, r4, r3 |
417 | add r5, r5, r3 | 418 | add r5, r5, r3 |
418 | 2: cmp r4, r5 | 419 | b __do_fixup_smp_on_up |
419 | movhs pc, lr | ||
420 | ldmia r4!, {r0, r6} | ||
421 | ARM( str r6, [r0, r3] ) | ||
422 | THUMB( add r0, r0, r3 ) | ||
423 | #ifdef __ARMEB__ | ||
424 | THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. | ||
425 | #endif | ||
426 | THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords | ||
427 | THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. | ||
428 | THUMB( strh r6, [r0] ) | ||
429 | b 2b | ||
430 | ENDPROC(__fixup_smp) | 420 | ENDPROC(__fixup_smp) |
431 | 421 | ||
432 | .align | 422 | .align |
@@ -440,7 +430,31 @@ smp_on_up: | |||
440 | ALT_SMP(.long 1) | 430 | ALT_SMP(.long 1) |
441 | ALT_UP(.long 0) | 431 | ALT_UP(.long 0) |
442 | .popsection | 432 | .popsection |
433 | #endif | ||
443 | 434 | ||
435 | .text | ||
436 | __do_fixup_smp_on_up: | ||
437 | cmp r4, r5 | ||
438 | movhs pc, lr | ||
439 | ldmia r4!, {r0, r6} | ||
440 | ARM( str r6, [r0, r3] ) | ||
441 | THUMB( add r0, r0, r3 ) | ||
442 | #ifdef __ARMEB__ | ||
443 | THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. | ||
444 | #endif | 444 | #endif |
445 | THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords | ||
446 | THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. | ||
447 | THUMB( strh r6, [r0] ) | ||
448 | b __do_fixup_smp_on_up | ||
449 | ENDPROC(__do_fixup_smp_on_up) | ||
450 | |||
451 | ENTRY(fixup_smp) | ||
452 | stmfd sp!, {r4 - r6, lr} | ||
453 | mov r4, r0 | ||
454 | add r5, r0, r1 | ||
455 | mov r3, #0 | ||
456 | bl __do_fixup_smp_on_up | ||
457 | ldmfd sp!, {r4 - r6, pc} | ||
458 | ENDPROC(fixup_smp) | ||
445 | 459 | ||
446 | #include "head-common.S" | 460 | #include "head-common.S" |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index c9f3f0467570..d600bd350704 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void) | |||
137 | u32 didr; | 137 | u32 didr; |
138 | 138 | ||
139 | /* Do we implement the extended CPUID interface? */ | 139 | /* Do we implement the extended CPUID interface? */ |
140 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { | 140 | if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), |
141 | pr_warning("CPUID feature registers not supported. " | 141 | "CPUID feature registers not supported. " |
142 | "Assuming v6 debug is present.\n"); | 142 | "Assuming v6 debug is present.\n")) |
143 | return ARM_DEBUG_ARCH_V6; | 143 | return ARM_DEBUG_ARCH_V6; |
144 | } | ||
145 | 144 | ||
146 | ARM_DBG_READ(c0, 0, didr); | 145 | ARM_DBG_READ(c0, 0, didr); |
147 | return (didr >> 16) & 0xf; | 146 | return (didr >> 16) & 0xf; |
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void) | |||
152 | return debug_arch; | 151 | return debug_arch; |
153 | } | 152 | } |
154 | 153 | ||
154 | static int debug_arch_supported(void) | ||
155 | { | ||
156 | u8 arch = get_debug_arch(); | ||
157 | return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; | ||
158 | } | ||
159 | |||
155 | /* Determine number of BRP register available. */ | 160 | /* Determine number of BRP register available. */ |
156 | static int get_num_brp_resources(void) | 161 | static int get_num_brp_resources(void) |
157 | { | 162 | { |
@@ -268,6 +273,9 @@ out: | |||
268 | 273 | ||
269 | int hw_breakpoint_slots(int type) | 274 | int hw_breakpoint_slots(int type) |
270 | { | 275 | { |
276 | if (!debug_arch_supported()) | ||
277 | return 0; | ||
278 | |||
271 | /* | 279 | /* |
272 | * We can be called early, so don't rely on | 280 | * We can be called early, so don't rely on |
273 | * our static variables being initialised. | 281 | * our static variables being initialised. |
@@ -834,11 +842,11 @@ static void reset_ctrl_regs(void *unused) | |||
834 | 842 | ||
835 | /* | 843 | /* |
836 | * v7 debug contains save and restore registers so that debug state | 844 | * v7 debug contains save and restore registers so that debug state |
837 | * can be maintained across low-power modes without leaving | 845 | * can be maintained across low-power modes without leaving the debug |
838 | * the debug logic powered up. It is IMPLEMENTATION DEFINED whether | 846 | * logic powered up. It is IMPLEMENTATION DEFINED whether we can access |
839 | * we can write to the debug registers out of reset, so we must | 847 | * the debug registers out of reset, so we must unlock the OS Lock |
840 | * unlock the OS Lock Access Register to avoid taking undefined | 848 | * Access Register to avoid taking undefined instruction exceptions |
841 | * instruction exceptions later on. | 849 | * later on. |
842 | */ | 850 | */ |
843 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { | 851 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { |
844 | /* | 852 | /* |
@@ -882,7 +890,7 @@ static int __init arch_hw_breakpoint_init(void) | |||
882 | 890 | ||
883 | debug_arch = get_debug_arch(); | 891 | debug_arch = get_debug_arch(); |
884 | 892 | ||
885 | if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { | 893 | if (!debug_arch_supported()) { |
886 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); | 894 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); |
887 | return 0; | 895 | return 0; |
888 | } | 896 | } |
@@ -899,18 +907,18 @@ static int __init arch_hw_breakpoint_init(void) | |||
899 | pr_info("%d breakpoint(s) reserved for watchpoint " | 907 | pr_info("%d breakpoint(s) reserved for watchpoint " |
900 | "single-step.\n", core_num_reserved_brps); | 908 | "single-step.\n", core_num_reserved_brps); |
901 | 909 | ||
910 | /* | ||
911 | * Reset the breakpoint resources. We assume that a halting | ||
912 | * debugger will leave the world in a nice state for us. | ||
913 | */ | ||
914 | on_each_cpu(reset_ctrl_regs, NULL, 1); | ||
915 | |||
902 | ARM_DBG_READ(c1, 0, dscr); | 916 | ARM_DBG_READ(c1, 0, dscr); |
903 | if (dscr & ARM_DSCR_HDBGEN) { | 917 | if (dscr & ARM_DSCR_HDBGEN) { |
918 | max_watchpoint_len = 4; | ||
904 | pr_warning("halting debug mode enabled. Assuming maximum " | 919 | pr_warning("halting debug mode enabled. Assuming maximum " |
905 | "watchpoint size of 4 bytes."); | 920 | "watchpoint size of %u bytes.", max_watchpoint_len); |
906 | } else { | 921 | } else { |
907 | /* | ||
908 | * Reset the breakpoint resources. We assume that a halting | ||
909 | * debugger will leave the world in a nice state for us. | ||
910 | */ | ||
911 | smp_call_function(reset_ctrl_regs, NULL, 1); | ||
912 | reset_ctrl_regs(NULL); | ||
913 | |||
914 | /* Work out the maximum supported watchpoint length. */ | 922 | /* Work out the maximum supported watchpoint length. */ |
915 | max_watchpoint_len = get_max_wp_len(); | 923 | max_watchpoint_len = get_max_wp_len(); |
916 | pr_info("maximum watchpoint size is %u bytes.\n", | 924 | pr_info("maximum watchpoint size is %u bytes.\n", |
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c4..8f6ed43861f1 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1437 | 1437 | ||
1438 | return space_cccc_1100_010x(insn, asi); | 1438 | return space_cccc_1100_010x(insn, asi); |
1439 | 1439 | ||
1440 | } else if ((insn & 0x0e000000) == 0x0c400000) { | 1440 | } else if ((insn & 0x0e000000) == 0x0c000000) { |
1441 | 1441 | ||
1442 | return space_cccc_110x(insn, asi); | 1442 | return space_cccc_110x(insn, asi); |
1443 | 1443 | ||
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b478..6d4105e6872f 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/sections.h> | 24 | #include <asm/sections.h> |
25 | #include <asm/smp_plat.h> | ||
25 | #include <asm/unwind.h> | 26 | #include <asm/unwind.h> |
26 | 27 | ||
27 | #ifdef CONFIG_XIP_KERNEL | 28 | #ifdef CONFIG_XIP_KERNEL |
@@ -268,12 +269,28 @@ struct mod_unwind_map { | |||
268 | const Elf_Shdr *txt_sec; | 269 | const Elf_Shdr *txt_sec; |
269 | }; | 270 | }; |
270 | 271 | ||
272 | static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, | ||
273 | const Elf_Shdr *sechdrs, const char *name) | ||
274 | { | ||
275 | const Elf_Shdr *s, *se; | ||
276 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
277 | |||
278 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) | ||
279 | if (strcmp(name, secstrs + s->sh_name) == 0) | ||
280 | return s; | ||
281 | |||
282 | return NULL; | ||
283 | } | ||
284 | |||
285 | extern void fixup_smp(const void *, unsigned long); | ||
286 | |||
271 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | 287 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, |
272 | struct module *mod) | 288 | struct module *mod) |
273 | { | 289 | { |
290 | const Elf_Shdr * __maybe_unused s = NULL; | ||
274 | #ifdef CONFIG_ARM_UNWIND | 291 | #ifdef CONFIG_ARM_UNWIND |
275 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | 292 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
276 | const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; | 293 | const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; |
277 | struct mod_unwind_map maps[ARM_SEC_MAX]; | 294 | struct mod_unwind_map maps[ARM_SEC_MAX]; |
278 | int i; | 295 | int i; |
279 | 296 | ||
@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
315 | maps[i].txt_sec->sh_addr, | 332 | maps[i].txt_sec->sh_addr, |
316 | maps[i].txt_sec->sh_size); | 333 | maps[i].txt_sec->sh_size); |
317 | #endif | 334 | #endif |
335 | s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); | ||
336 | if (s && !is_smp()) | ||
337 | fixup_smp((void *)s->sh_addr, s->sh_size); | ||
318 | return 0; | 338 | return 0; |
319 | } | 339 | } |
320 | 340 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5efa2647a2fb..d150ad1ccb5d 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail, | |||
700 | * Frame pointers should strictly progress back up the stack | 700 | * Frame pointers should strictly progress back up the stack |
701 | * (towards higher addresses). | 701 | * (towards higher addresses). |
702 | */ | 702 | */ |
703 | if (tail >= buftail.fp) | 703 | if (tail + 1 >= buftail.fp) |
704 | return NULL; | 704 | return NULL; |
705 | 705 | ||
706 | return buftail.fp - 1; | 706 | return buftail.fp - 1; |
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e6..2c79eec19262 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -97,28 +97,34 @@ set_irq_affinity(int irq, | |||
97 | irq, cpu); | 97 | irq, cpu); |
98 | return err; | 98 | return err; |
99 | #else | 99 | #else |
100 | return 0; | 100 | return -EINVAL; |
101 | #endif | 101 | #endif |
102 | } | 102 | } |
103 | 103 | ||
104 | static int | 104 | static int |
105 | init_cpu_pmu(void) | 105 | init_cpu_pmu(void) |
106 | { | 106 | { |
107 | int i, err = 0; | 107 | int i, irqs, err = 0; |
108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; | 108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; |
109 | 109 | ||
110 | if (!pdev) { | 110 | if (!pdev) |
111 | err = -ENODEV; | 111 | return -ENODEV; |
112 | goto out; | 112 | |
113 | } | 113 | irqs = pdev->num_resources; |
114 | |||
115 | /* | ||
116 | * If we have a single PMU interrupt that we can't shift, assume that | ||
117 | * we're running on a uniprocessor machine and continue. | ||
118 | */ | ||
119 | if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) | ||
120 | return 0; | ||
114 | 121 | ||
115 | for (i = 0; i < pdev->num_resources; ++i) { | 122 | for (i = 0; i < irqs; ++i) { |
116 | err = set_irq_affinity(platform_get_irq(pdev, i), i); | 123 | err = set_irq_affinity(platform_get_irq(pdev, i), i); |
117 | if (err) | 124 | if (err) |
118 | break; | 125 | break; |
119 | } | 126 | } |
120 | 127 | ||
121 | out: | ||
122 | return err; | 128 | return err; |
123 | } | 129 | } |
124 | 130 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d6..5ea4fb718b97 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -226,8 +226,8 @@ int cpu_architecture(void) | |||
226 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 226 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
227 | asm("mrc p15, 0, %0, c0, c1, 4" | 227 | asm("mrc p15, 0, %0, c0, c1, 4" |
228 | : "=r" (mmfr0)); | 228 | : "=r" (mmfr0)); |
229 | if ((mmfr0 & 0x0000000f) == 0x00000003 || | 229 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
230 | (mmfr0 & 0x000000f0) == 0x00000030) | 230 | (mmfr0 & 0x000000f0) >= 0x00000030) |
231 | cpu_arch = CPU_ARCH_ARMv7; | 231 | cpu_arch = CPU_ARCH_ARMv7; |
232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || | 232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || |
233 | (mmfr0 & 0x000000f0) == 0x00000020) | 233 | (mmfr0 & 0x000000f0) == 0x00000020) |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..abaf8445ce25 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; | 474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; |
475 | unsigned long retcode; | 475 | unsigned long retcode; |
476 | int thumb = 0; | 476 | int thumb = 0; |
477 | unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; | 477 | unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); |
478 | |||
479 | cpsr |= PSR_ENDSTATE; | ||
478 | 480 | ||
479 | /* | 481 | /* |
480 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. | 482 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..61462790757f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -21,6 +21,12 @@ | |||
21 | #define ARM_CPU_KEEP(x) | 21 | #define ARM_CPU_KEEP(x) |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
25 | #define ARM_EXIT_KEEP(x) x | ||
26 | #else | ||
27 | #define ARM_EXIT_KEEP(x) | ||
28 | #endif | ||
29 | |||
24 | OUTPUT_ARCH(arm) | 30 | OUTPUT_ARCH(arm) |
25 | ENTRY(stext) | 31 | ENTRY(stext) |
26 | 32 | ||
@@ -43,6 +49,7 @@ SECTIONS | |||
43 | _sinittext = .; | 49 | _sinittext = .; |
44 | HEAD_TEXT | 50 | HEAD_TEXT |
45 | INIT_TEXT | 51 | INIT_TEXT |
52 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
46 | _einittext = .; | 53 | _einittext = .; |
47 | ARM_CPU_DISCARD(PROC_INFO) | 54 | ARM_CPU_DISCARD(PROC_INFO) |
48 | __arch_info_begin = .; | 55 | __arch_info_begin = .; |
@@ -67,6 +74,7 @@ SECTIONS | |||
67 | #ifndef CONFIG_XIP_KERNEL | 74 | #ifndef CONFIG_XIP_KERNEL |
68 | __init_begin = _stext; | 75 | __init_begin = _stext; |
69 | INIT_DATA | 76 | INIT_DATA |
77 | ARM_EXIT_KEEP(EXIT_DATA) | ||
70 | #endif | 78 | #endif |
71 | } | 79 | } |
72 | 80 | ||
@@ -162,6 +170,7 @@ SECTIONS | |||
162 | . = ALIGN(PAGE_SIZE); | 170 | . = ALIGN(PAGE_SIZE); |
163 | __init_begin = .; | 171 | __init_begin = .; |
164 | INIT_DATA | 172 | INIT_DATA |
173 | ARM_EXIT_KEEP(EXIT_DATA) | ||
165 | . = ALIGN(PAGE_SIZE); | 174 | . = ALIGN(PAGE_SIZE); |
166 | __init_end = .; | 175 | __init_end = .; |
167 | #endif | 176 | #endif |
@@ -247,6 +256,8 @@ SECTIONS | |||
247 | } | 256 | } |
248 | #endif | 257 | #endif |
249 | 258 | ||
259 | NOTES | ||
260 | |||
250 | BSS_SECTION(0, 0, 0) | 261 | BSS_SECTION(0, 0, 0) |
251 | _end = .; | 262 | _end = .; |
252 | 263 | ||
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index ffdf87be2958..82079545adc4 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
@@ -838,7 +838,7 @@ EXPORT_SYMBOL(ep93xx_i2s_release); | |||
838 | static struct resource ep93xx_ac97_resources[] = { | 838 | static struct resource ep93xx_ac97_resources[] = { |
839 | { | 839 | { |
840 | .start = EP93XX_AAC_PHYS_BASE, | 840 | .start = EP93XX_AAC_PHYS_BASE, |
841 | .end = EP93XX_AAC_PHYS_BASE + 0xb0 - 1, | 841 | .end = EP93XX_AAC_PHYS_BASE + 0xac - 1, |
842 | .flags = IORESOURCE_MEM, | 842 | .flags = IORESOURCE_MEM, |
843 | }, | 843 | }, |
844 | { | 844 | { |
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index aa76cfd9f348..8382e7902078 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c | |||
@@ -180,7 +180,7 @@ static const uint32_t mx25pdk_keymap[] = { | |||
180 | KEY(3, 3, KEY_POWER), | 180 | KEY(3, 3, KEY_POWER), |
181 | }; | 181 | }; |
182 | 182 | ||
183 | static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = { | 183 | static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = { |
184 | .keymap = mx25pdk_keymap, | 184 | .keymap = mx25pdk_keymap, |
185 | .keymap_size = ARRAY_SIZE(mx25pdk_keymap), | 185 | .keymap_size = ARRAY_SIZE(mx25pdk_keymap), |
186 | }; | 186 | }; |
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c index b1a362ebfded..ca72a05ed9c1 100644 --- a/arch/arm/mach-mxs/clock-mx23.c +++ b/arch/arm/mach-mxs/clock-mx23.c | |||
@@ -304,7 +304,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ | |||
304 | reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ | 304 | reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ |
305 | reg &= ~BM_CLKCTRL_##dr##_DIV; \ | 305 | reg &= ~BM_CLKCTRL_##dr##_DIV; \ |
306 | reg |= div << BP_CLKCTRL_##dr##_DIV; \ | 306 | reg |= div << BP_CLKCTRL_##dr##_DIV; \ |
307 | if (reg | (1 << clk->enable_shift)) { \ | 307 | if (reg & (1 << clk->enable_shift)) { \ |
308 | pr_err("%s: clock is gated\n", __func__); \ | 308 | pr_err("%s: clock is gated\n", __func__); \ |
309 | return -EINVAL; \ | 309 | return -EINVAL; \ |
310 | } \ | 310 | } \ |
@@ -347,7 +347,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \ | |||
347 | { \ | 347 | { \ |
348 | if (parent != clk->parent) { \ | 348 | if (parent != clk->parent) { \ |
349 | __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ | 349 | __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ |
350 | HW_CLKCTRL_CLKSEQ_TOG); \ | 350 | CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \ |
351 | clk->parent = parent; \ | 351 | clk->parent = parent; \ |
352 | } \ | 352 | } \ |
353 | \ | 353 | \ |
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c index 56312c092a9e..fd1c4c54b8e5 100644 --- a/arch/arm/mach-mxs/clock-mx28.c +++ b/arch/arm/mach-mxs/clock-mx28.c | |||
@@ -355,12 +355,12 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ | |||
355 | } else { \ | 355 | } else { \ |
356 | reg &= ~BM_CLKCTRL_##dr##_DIV; \ | 356 | reg &= ~BM_CLKCTRL_##dr##_DIV; \ |
357 | reg |= div << BP_CLKCTRL_##dr##_DIV; \ | 357 | reg |= div << BP_CLKCTRL_##dr##_DIV; \ |
358 | if (reg | (1 << clk->enable_shift)) { \ | 358 | if (reg & (1 << clk->enable_shift)) { \ |
359 | pr_err("%s: clock is gated\n", __func__); \ | 359 | pr_err("%s: clock is gated\n", __func__); \ |
360 | return -EINVAL; \ | 360 | return -EINVAL; \ |
361 | } \ | 361 | } \ |
362 | } \ | 362 | } \ |
363 | __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); \ | 363 | __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ |
364 | \ | 364 | \ |
365 | for (i = 10000; i; i--) \ | 365 | for (i = 10000; i; i--) \ |
366 | if (!(__raw_readl(CLKCTRL_BASE_ADDR + \ | 366 | if (!(__raw_readl(CLKCTRL_BASE_ADDR + \ |
@@ -483,7 +483,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \ | |||
483 | { \ | 483 | { \ |
484 | if (parent != clk->parent) { \ | 484 | if (parent != clk->parent) { \ |
485 | __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ | 485 | __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ |
486 | HW_CLKCTRL_CLKSEQ_TOG); \ | 486 | CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \ |
487 | clk->parent = parent; \ | 487 | clk->parent = parent; \ |
488 | } \ | 488 | } \ |
489 | \ | 489 | \ |
@@ -609,7 +609,6 @@ static struct clk_lookup lookups[] = { | |||
609 | _REGISTER_CLOCK("duart", NULL, uart_clk) | 609 | _REGISTER_CLOCK("duart", NULL, uart_clk) |
610 | _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk) | 610 | _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk) |
611 | _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk) | 611 | _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk) |
612 | _REGISTER_CLOCK("fec.0", NULL, fec_clk) | ||
613 | _REGISTER_CLOCK("rtc", NULL, rtc_clk) | 612 | _REGISTER_CLOCK("rtc", NULL, rtc_clk) |
614 | _REGISTER_CLOCK("pll2", NULL, pll2_clk) | 613 | _REGISTER_CLOCK("pll2", NULL, pll2_clk) |
615 | _REGISTER_CLOCK(NULL, "hclk", hbus_clk) | 614 | _REGISTER_CLOCK(NULL, "hclk", hbus_clk) |
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c index e7d2269cf70e..a7093c88e6a6 100644 --- a/arch/arm/mach-mxs/clock.c +++ b/arch/arm/mach-mxs/clock.c | |||
@@ -57,7 +57,6 @@ static void __clk_disable(struct clk *clk) | |||
57 | if (clk->disable) | 57 | if (clk->disable) |
58 | clk->disable(clk); | 58 | clk->disable(clk); |
59 | __clk_disable(clk->parent); | 59 | __clk_disable(clk->parent); |
60 | __clk_disable(clk->secondary); | ||
61 | } | 60 | } |
62 | } | 61 | } |
63 | 62 | ||
@@ -68,7 +67,6 @@ static int __clk_enable(struct clk *clk) | |||
68 | 67 | ||
69 | if (clk->usecount++ == 0) { | 68 | if (clk->usecount++ == 0) { |
70 | __clk_enable(clk->parent); | 69 | __clk_enable(clk->parent); |
71 | __clk_enable(clk->secondary); | ||
72 | 70 | ||
73 | if (clk->enable) | 71 | if (clk->enable) |
74 | clk->enable(clk); | 72 | clk->enable(clk); |
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c index d7ad7a61366d..cb0c0e83a527 100644 --- a/arch/arm/mach-mxs/gpio.c +++ b/arch/arm/mach-mxs/gpio.c | |||
@@ -139,6 +139,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) | |||
139 | struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq); | 139 | struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq); |
140 | u32 gpio_irq_no_base = port->virtual_irq_start; | 140 | u32 gpio_irq_no_base = port->virtual_irq_start; |
141 | 141 | ||
142 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
143 | |||
142 | irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) & | 144 | irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) & |
143 | __raw_readl(port->base + PINCTRL_IRQEN(port->id)); | 145 | __raw_readl(port->base + PINCTRL_IRQEN(port->id)); |
144 | 146 | ||
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h index 041e276d8a32..592c9ab5d760 100644 --- a/arch/arm/mach-mxs/include/mach/clock.h +++ b/arch/arm/mach-mxs/include/mach/clock.h | |||
@@ -29,8 +29,6 @@ struct clk { | |||
29 | int id; | 29 | int id; |
30 | /* Source clock this clk depends on */ | 30 | /* Source clock this clk depends on */ |
31 | struct clk *parent; | 31 | struct clk *parent; |
32 | /* Secondary clock to enable/disable with this clock */ | ||
33 | struct clk *secondary; | ||
34 | /* Reference count of clock enable/disable */ | 32 | /* Reference count of clock enable/disable */ |
35 | __s8 usecount; | 33 | __s8 usecount; |
36 | /* Register bit position for clock's enable/disable control. */ | 34 | /* Register bit position for clock's enable/disable control. */ |
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c index c9088d85da04..453809359ba6 100644 --- a/arch/arm/mach-omap1/lcd_dma.c +++ b/arch/arm/mach-omap1/lcd_dma.c | |||
@@ -37,7 +37,7 @@ int omap_lcd_dma_running(void) | |||
37 | * On OMAP1510, internal LCD controller will start the transfer | 37 | * On OMAP1510, internal LCD controller will start the transfer |
38 | * when it gets enabled, so assume DMA running if LCD enabled. | 38 | * when it gets enabled, so assume DMA running if LCD enabled. |
39 | */ | 39 | */ |
40 | if (cpu_is_omap1510()) | 40 | if (cpu_is_omap15xx()) |
41 | if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) | 41 | if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) |
42 | return 1; | 42 | return 1; |
43 | 43 | ||
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); | |||
95 | 95 | ||
96 | void omap_set_lcd_dma_b1_rotation(int rotate) | 96 | void omap_set_lcd_dma_b1_rotation(int rotate) |
97 | { | 97 | { |
98 | if (cpu_is_omap1510()) { | 98 | if (cpu_is_omap15xx()) { |
99 | printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); | 99 | printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); |
100 | BUG(); | 100 | BUG(); |
101 | return; | 101 | return; |
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); | |||
106 | 106 | ||
107 | void omap_set_lcd_dma_b1_mirror(int mirror) | 107 | void omap_set_lcd_dma_b1_mirror(int mirror) |
108 | { | 108 | { |
109 | if (cpu_is_omap1510()) { | 109 | if (cpu_is_omap15xx()) { |
110 | printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); | 110 | printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); |
111 | BUG(); | 111 | BUG(); |
112 | } | 112 | } |
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); | |||
116 | 116 | ||
117 | void omap_set_lcd_dma_b1_vxres(unsigned long vxres) | 117 | void omap_set_lcd_dma_b1_vxres(unsigned long vxres) |
118 | { | 118 | { |
119 | if (cpu_is_omap1510()) { | 119 | if (cpu_is_omap15xx()) { |
120 | printk(KERN_ERR "DMA virtual resulotion is not supported " | 120 | printk(KERN_ERR "DMA virtual resulotion is not supported " |
121 | "in 1510 mode\n"); | 121 | "in 1510 mode\n"); |
122 | BUG(); | 122 | BUG(); |
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); | |||
127 | 127 | ||
128 | void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) | 128 | void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) |
129 | { | 129 | { |
130 | if (cpu_is_omap1510()) { | 130 | if (cpu_is_omap15xx()) { |
131 | printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); | 131 | printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); |
132 | BUG(); | 132 | BUG(); |
133 | } | 133 | } |
@@ -177,7 +177,7 @@ static void set_b1_regs(void) | |||
177 | bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); | 177 | bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); |
178 | /* 1510 DMA requires the bottom address to be 2 more | 178 | /* 1510 DMA requires the bottom address to be 2 more |
179 | * than the actual last memory access location. */ | 179 | * than the actual last memory access location. */ |
180 | if (cpu_is_omap1510() && | 180 | if (cpu_is_omap15xx() && |
181 | lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) | 181 | lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) |
182 | bottom += 2; | 182 | bottom += 2; |
183 | ei = PIXSTEP(0, 0, 1, 0); | 183 | ei = PIXSTEP(0, 0, 1, 0); |
@@ -241,7 +241,7 @@ static void set_b1_regs(void) | |||
241 | return; /* Suppress warning about uninitialized vars */ | 241 | return; /* Suppress warning about uninitialized vars */ |
242 | } | 242 | } |
243 | 243 | ||
244 | if (cpu_is_omap1510()) { | 244 | if (cpu_is_omap15xx()) { |
245 | omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); | 245 | omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); |
246 | omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); | 246 | omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); |
247 | omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); | 247 | omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); |
@@ -343,7 +343,7 @@ void omap_free_lcd_dma(void) | |||
343 | BUG(); | 343 | BUG(); |
344 | return; | 344 | return; |
345 | } | 345 | } |
346 | if (!cpu_is_omap1510()) | 346 | if (!cpu_is_omap15xx()) |
347 | omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, | 347 | omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, |
348 | OMAP1610_DMA_LCD_CCR); | 348 | OMAP1610_DMA_LCD_CCR); |
349 | lcd_dma.reserved = 0; | 349 | lcd_dma.reserved = 0; |
@@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void) | |||
360 | * connected. Otherwise the OMAP internal controller will | 360 | * connected. Otherwise the OMAP internal controller will |
361 | * start the transfer when it gets enabled. | 361 | * start the transfer when it gets enabled. |
362 | */ | 362 | */ |
363 | if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) | 363 | if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) |
364 | return; | 364 | return; |
365 | 365 | ||
366 | w = omap_readw(OMAP1610_DMA_LCD_CTRL); | 366 | w = omap_readw(OMAP1610_DMA_LCD_CTRL); |
@@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma); | |||
378 | void omap_setup_lcd_dma(void) | 378 | void omap_setup_lcd_dma(void) |
379 | { | 379 | { |
380 | BUG_ON(lcd_dma.active); | 380 | BUG_ON(lcd_dma.active); |
381 | if (!cpu_is_omap1510()) { | 381 | if (!cpu_is_omap15xx()) { |
382 | /* Set some reasonable defaults */ | 382 | /* Set some reasonable defaults */ |
383 | omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); | 383 | omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); |
384 | omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); | 384 | omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); |
385 | omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); | 385 | omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); |
386 | } | 386 | } |
387 | set_b1_regs(); | 387 | set_b1_regs(); |
388 | if (!cpu_is_omap1510()) { | 388 | if (!cpu_is_omap15xx()) { |
389 | u16 w; | 389 | u16 w; |
390 | 390 | ||
391 | w = omap_readw(OMAP1610_DMA_LCD_CCR); | 391 | w = omap_readw(OMAP1610_DMA_LCD_CCR); |
@@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void) | |||
407 | u16 w; | 407 | u16 w; |
408 | 408 | ||
409 | lcd_dma.active = 0; | 409 | lcd_dma.active = 0; |
410 | if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) | 410 | if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) |
411 | return; | 411 | return; |
412 | 412 | ||
413 | w = omap_readw(OMAP1610_DMA_LCD_CCR); | 413 | w = omap_readw(OMAP1610_DMA_LCD_CCR); |
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index f83fc335c613..6885d2fac183 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/clocksource.h> | 44 | #include <linux/clocksource.h> |
45 | #include <linux/clockchips.h> | 45 | #include <linux/clockchips.h> |
46 | #include <linux/io.h> | 46 | #include <linux/io.h> |
47 | #include <linux/sched.h> | ||
48 | 47 | ||
49 | #include <asm/system.h> | 48 | #include <asm/system.h> |
50 | #include <mach/hardware.h> | 49 | #include <mach/hardware.h> |
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index e906e05bb41b..9a2a31e011ce 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c | |||
@@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = { | |||
115 | 115 | ||
116 | static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev) | 116 | static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev) |
117 | { | 117 | { |
118 | twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1); | ||
119 | twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0); | ||
120 | |||
121 | if (gpio_is_valid(dssdev->reset_gpio)) | 118 | if (gpio_is_valid(dssdev->reset_gpio)) |
122 | gpio_set_value_cansleep(dssdev->reset_gpio, 1); | 119 | gpio_set_value_cansleep(dssdev->reset_gpio, 1); |
123 | return 0; | 120 | return 0; |
@@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[]; | |||
247 | static int devkit8000_twl_gpio_setup(struct device *dev, | 244 | static int devkit8000_twl_gpio_setup(struct device *dev, |
248 | unsigned gpio, unsigned ngpio) | 245 | unsigned gpio, unsigned ngpio) |
249 | { | 246 | { |
247 | int ret; | ||
248 | |||
250 | omap_mux_init_gpio(29, OMAP_PIN_INPUT); | 249 | omap_mux_init_gpio(29, OMAP_PIN_INPUT); |
251 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ | 250 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ |
252 | mmc[0].gpio_cd = gpio + 0; | 251 | mmc[0].gpio_cd = gpio + 0; |
@@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev, | |||
255 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | 254 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ |
256 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 255 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; |
257 | 256 | ||
258 | /* gpio + 1 is "LCD_PWREN" (out, active high) */ | 257 | /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ |
259 | devkit8000_lcd_device.reset_gpio = gpio + 1; | 258 | devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; |
260 | gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN"); | 259 | ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, |
261 | /* Disable until needed */ | 260 | GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN"); |
262 | gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0); | 261 | if (ret < 0) { |
262 | devkit8000_lcd_device.reset_gpio = -EINVAL; | ||
263 | printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); | ||
264 | } | ||
263 | 265 | ||
264 | /* gpio + 7 is "DVI_PD" (out, active low) */ | 266 | /* gpio + 7 is "DVI_PD" (out, active low) */ |
265 | devkit8000_dvi_device.reset_gpio = gpio + 7; | 267 | devkit8000_dvi_device.reset_gpio = gpio + 7; |
266 | gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown"); | 268 | ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, |
267 | /* Disable until needed */ | 269 | GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown"); |
268 | gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0); | 270 | if (ret < 0) { |
271 | devkit8000_dvi_device.reset_gpio = -EINVAL; | ||
272 | printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); | ||
273 | } | ||
269 | 274 | ||
270 | return 0; | 275 | return 0; |
271 | } | 276 | } |
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e001a048dc0c..e944025d5ef8 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c | |||
@@ -409,8 +409,6 @@ static void __init omap4_panda_init(void) | |||
409 | platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); | 409 | platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); |
410 | omap_serial_init(); | 410 | omap_serial_init(); |
411 | omap4_twl6030_hsmmc_init(mmc); | 411 | omap4_twl6030_hsmmc_init(mmc); |
412 | /* OMAP4 Panda uses internal transceiver so register nop transceiver */ | ||
413 | usb_nop_xceiv_register(); | ||
414 | omap4_ehci_init(); | 412 | omap4_ehci_init(); |
415 | usb_musb_init(&musb_board_data); | 413 | usb_musb_init(&musb_board_data); |
416 | } | 414 | } |
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c index cb77be7ac44f..39a71bb8a308 100644 --- a/arch/arm/mach-omap2/board-rm680.c +++ b/arch/arm/mach-omap2/board-rm680.c | |||
@@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = { | |||
40 | static struct regulator_init_data rm680_vemmc = { | 40 | static struct regulator_init_data rm680_vemmc = { |
41 | .constraints = { | 41 | .constraints = { |
42 | .name = "rm680_vemmc", | 42 | .name = "rm680_vemmc", |
43 | .min_uV = 2900000, | ||
44 | .max_uV = 2900000, | ||
45 | .apply_uV = 1, | ||
46 | .valid_modes_mask = REGULATOR_MODE_NORMAL | 43 | .valid_modes_mask = REGULATOR_MODE_NORMAL |
47 | | REGULATOR_MODE_STANDBY, | 44 | | REGULATOR_MODE_STANDBY, |
48 | .valid_ops_mask = REGULATOR_CHANGE_STATUS | 45 | .valid_ops_mask = REGULATOR_CHANGE_STATUS |
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c index 337392c3f549..acb7ae5b0a25 100644 --- a/arch/arm/mach-omap2/clkt_dpll.c +++ b/arch/arm/mach-omap2/clkt_dpll.c | |||
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n) | |||
77 | dd = clk->dpll_data; | 77 | dd = clk->dpll_data; |
78 | 78 | ||
79 | /* DPLL divider must result in a valid jitter correction val */ | 79 | /* DPLL divider must result in a valid jitter correction val */ |
80 | fint = clk->parent->rate / (n + 1); | 80 | fint = clk->parent->rate / n; |
81 | if (fint < DPLL_FINT_BAND1_MIN) { | 81 | if (fint < DPLL_FINT_BAND1_MIN) { |
82 | 82 | ||
83 | pr_debug("rejecting n=%d due to Fint failure, " | 83 | pr_debug("rejecting n=%d due to Fint failure, " |
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 394413dc7deb..0a585dfa9874 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c | |||
@@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = { | |||
334 | .priv = &omap2_mbox_iva_priv, | 334 | .priv = &omap2_mbox_iva_priv, |
335 | }; | 335 | }; |
336 | 336 | ||
337 | struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL }; | 337 | struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL }; |
338 | #endif | 338 | #endif |
339 | 339 | ||
340 | #if defined(CONFIG_ARCH_OMAP4) | 340 | #if defined(CONFIG_ARCH_OMAP4) |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index fae49d12bc76..6c84659cf846 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c | |||
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry( | |||
605 | list_for_each_entry(e, &partition->muxmodes, node) { | 605 | list_for_each_entry(e, &partition->muxmodes, node) { |
606 | struct omap_mux *m = &e->mux; | 606 | struct omap_mux *m = &e->mux; |
607 | 607 | ||
608 | (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir, | 608 | (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, |
609 | m, &omap_mux_dbg_signal_fops); | 609 | m, &omap_mux_dbg_signal_fops); |
610 | } | 610 | } |
611 | } | 611 | } |
@@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags, | |||
1000 | if (!partition->base) { | 1000 | if (!partition->base) { |
1001 | pr_err("%s: Could not ioremap mux partition at 0x%08x\n", | 1001 | pr_err("%s: Could not ioremap mux partition at 0x%08x\n", |
1002 | __func__, partition->phys); | 1002 | __func__, partition->phys); |
1003 | kfree(partition); | ||
1003 | return -ENODEV; | 1004 | return -ENODEV; |
1004 | } | 1005 | } |
1005 | 1006 | ||
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 125f56591fb5..a5a83b358ddd 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void) | |||
637 | 637 | ||
638 | } | 638 | } |
639 | 639 | ||
640 | (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d, | 640 | (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d, |
641 | &enable_off_mode, &pm_dbg_option_fops); | 641 | &enable_off_mode, &pm_dbg_option_fops); |
642 | (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d, | 642 | (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d, |
643 | &sleep_while_idle, &pm_dbg_option_fops); | 643 | &sleep_while_idle, &pm_dbg_option_fops); |
644 | (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d, | 644 | (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d, |
645 | &wakeup_timer_seconds, &pm_dbg_option_fops); | 645 | &wakeup_timer_seconds, &pm_dbg_option_fops); |
646 | (void) debugfs_create_file("wakeup_timer_milliseconds", | 646 | (void) debugfs_create_file("wakeup_timer_milliseconds", |
647 | S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds, | 647 | S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds, |
648 | &pm_dbg_option_fops); | 648 | &pm_dbg_option_fops); |
649 | pm_dbg_init_done = 1; | 649 | pm_dbg_init_done = 1; |
650 | 650 | ||
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index a4aa1920a75c..2f864e4b085d 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -168,9 +168,10 @@ static void omap3_core_restore_context(void) | |||
168 | * once during boot sequence, but this works as we are not using secure | 168 | * once during boot sequence, but this works as we are not using secure |
169 | * services. | 169 | * services. |
170 | */ | 170 | */ |
171 | static void omap3_save_secure_ram_context(u32 target_mpu_state) | 171 | static void omap3_save_secure_ram_context(void) |
172 | { | 172 | { |
173 | u32 ret; | 173 | u32 ret; |
174 | int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); | ||
174 | 175 | ||
175 | if (omap_type() != OMAP2_DEVICE_TYPE_GP) { | 176 | if (omap_type() != OMAP2_DEVICE_TYPE_GP) { |
176 | /* | 177 | /* |
@@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state) | |||
181 | pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); | 182 | pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); |
182 | ret = _omap_save_secure_sram((u32 *) | 183 | ret = _omap_save_secure_sram((u32 *) |
183 | __pa(omap3_secure_ram_storage)); | 184 | __pa(omap3_secure_ram_storage)); |
184 | pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state); | 185 | pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); |
185 | /* Following is for error tracking, it should not happen */ | 186 | /* Following is for error tracking, it should not happen */ |
186 | if (ret) { | 187 | if (ret) { |
187 | printk(KERN_ERR "save_secure_sram() returns %08x\n", | 188 | printk(KERN_ERR "save_secure_sram() returns %08x\n", |
@@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void) | |||
1094 | local_fiq_disable(); | 1095 | local_fiq_disable(); |
1095 | 1096 | ||
1096 | omap_dma_global_context_save(); | 1097 | omap_dma_global_context_save(); |
1097 | omap3_save_secure_ram_context(PWRDM_POWER_ON); | 1098 | omap3_save_secure_ram_context(); |
1098 | omap_dma_global_context_restore(); | 1099 | omap_dma_global_context_restore(); |
1099 | 1100 | ||
1100 | local_irq_enable(); | 1101 | local_irq_enable(); |
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h index 729a644ce852..3300ff6e3cfe 100644 --- a/arch/arm/mach-omap2/prcm_mpu44xx.h +++ b/arch/arm/mach-omap2/prcm_mpu44xx.h | |||
@@ -38,8 +38,8 @@ | |||
38 | #define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 | 38 | #define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 |
39 | 39 | ||
40 | /* PRCM_MPU clockdomain register offsets (from instance start) */ | 40 | /* PRCM_MPU clockdomain register offsets (from instance start) */ |
41 | #define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000 | 41 | #define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018 |
42 | #define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000 | 42 | #define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018 |
43 | 43 | ||
44 | 44 | ||
45 | /* | 45 | /* |
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 77ecebf3fae2..95ac336fe3f7 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c | |||
@@ -780,8 +780,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val) | |||
780 | struct omap_sr *sr_info = (struct omap_sr *) data; | 780 | struct omap_sr *sr_info = (struct omap_sr *) data; |
781 | 781 | ||
782 | if (!sr_info) { | 782 | if (!sr_info) { |
783 | pr_warning("%s: omap_sr struct for sr_%s not found\n", | 783 | pr_warning("%s: omap_sr struct not found\n", __func__); |
784 | __func__, sr_info->voltdm->name); | ||
785 | return -EINVAL; | 784 | return -EINVAL; |
786 | } | 785 | } |
787 | 786 | ||
@@ -795,8 +794,7 @@ static int omap_sr_autocomp_store(void *data, u64 val) | |||
795 | struct omap_sr *sr_info = (struct omap_sr *) data; | 794 | struct omap_sr *sr_info = (struct omap_sr *) data; |
796 | 795 | ||
797 | if (!sr_info) { | 796 | if (!sr_info) { |
798 | pr_warning("%s: omap_sr struct for sr_%s not found\n", | 797 | pr_warning("%s: omap_sr struct not found\n", __func__); |
799 | __func__, sr_info->voltdm->name); | ||
800 | return -EINVAL; | 798 | return -EINVAL; |
801 | } | 799 | } |
802 | 800 | ||
@@ -834,7 +832,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
834 | 832 | ||
835 | if (!pdata) { | 833 | if (!pdata) { |
836 | dev_err(&pdev->dev, "%s: platform data missing\n", __func__); | 834 | dev_err(&pdev->dev, "%s: platform data missing\n", __func__); |
837 | return -EINVAL; | 835 | ret = -EINVAL; |
836 | goto err_free_devinfo; | ||
838 | } | 837 | } |
839 | 838 | ||
840 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 839 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -901,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
901 | return PTR_ERR(dbg_dir); | 900 | return PTR_ERR(dbg_dir); |
902 | } | 901 | } |
903 | 902 | ||
904 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir, | 903 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, |
905 | (void *)sr_info, &pm_sr_fops); | 904 | (void *)sr_info, &pm_sr_fops); |
906 | (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, | 905 | (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, |
907 | &sr_info->err_weight); | 906 | &sr_info->err_weight); |
@@ -940,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
940 | strcpy(name, "volt_"); | 939 | strcpy(name, "volt_"); |
941 | sprintf(volt_name, "%d", volt_data[i].volt_nominal); | 940 | sprintf(volt_name, "%d", volt_data[i].volt_nominal); |
942 | strcat(name, volt_name); | 941 | strcat(name, volt_name); |
943 | (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir, | 942 | (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, |
944 | &(sr_info->nvalue_table[i].nvalue)); | 943 | &(sr_info->nvalue_table[i].nvalue)); |
945 | } | 944 | } |
946 | 945 | ||
@@ -966,7 +965,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev) | |||
966 | } | 965 | } |
967 | 966 | ||
968 | sr_info = _sr_lookup(pdata->voltdm); | 967 | sr_info = _sr_lookup(pdata->voltdm); |
969 | if (!sr_info) { | 968 | if (IS_ERR(sr_info)) { |
970 | dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", | 969 | dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", |
971 | __func__); | 970 | __func__); |
972 | return -EINVAL; | 971 | return -EINVAL; |
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 7b7c2683ae7b..0fc550e7e482 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/mach/time.h> | 39 | #include <asm/mach/time.h> |
40 | #include <plat/dmtimer.h> | 40 | #include <plat/dmtimer.h> |
41 | #include <asm/localtimer.h> | 41 | #include <asm/localtimer.h> |
42 | #include <asm/sched_clock.h> | ||
42 | 43 | ||
43 | #include "timer-gp.h" | 44 | #include "timer-gp.h" |
44 | 45 | ||
@@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void) | |||
190 | /* | 191 | /* |
191 | * clocksource | 192 | * clocksource |
192 | */ | 193 | */ |
194 | static DEFINE_CLOCK_DATA(cd); | ||
193 | static struct omap_dm_timer *gpt_clocksource; | 195 | static struct omap_dm_timer *gpt_clocksource; |
194 | static cycle_t clocksource_read_cycles(struct clocksource *cs) | 196 | static cycle_t clocksource_read_cycles(struct clocksource *cs) |
195 | { | 197 | { |
@@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = { | |||
204 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 206 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
205 | }; | 207 | }; |
206 | 208 | ||
209 | static void notrace dmtimer_update_sched_clock(void) | ||
210 | { | ||
211 | u32 cyc; | ||
212 | |||
213 | cyc = omap_dm_timer_read_counter(gpt_clocksource); | ||
214 | |||
215 | update_sched_clock(&cd, cyc, (u32)~0); | ||
216 | } | ||
217 | |||
207 | /* Setup free-running counter for clocksource */ | 218 | /* Setup free-running counter for clocksource */ |
208 | static void __init omap2_gp_clocksource_init(void) | 219 | static void __init omap2_gp_clocksource_init(void) |
209 | { | 220 | { |
@@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void) | |||
224 | 235 | ||
225 | omap_dm_timer_set_load_start(gpt, 1, 0); | 236 | omap_dm_timer_set_load_start(gpt, 1, 0); |
226 | 237 | ||
238 | init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate); | ||
239 | |||
227 | if (clocksource_register_hz(&clocksource_gpt, tick_rate)) | 240 | if (clocksource_register_hz(&clocksource_gpt, tick_rate)) |
228 | printk(err2, clocksource_gpt.name); | 241 | printk(err2, clocksource_gpt.name); |
229 | } | 242 | } |
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c index ed6079c94c57..12be525b8df4 100644 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c | |||
@@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd) | |||
471 | strcat(name, vdd->voltdm.name); | 471 | strcat(name, vdd->voltdm.name); |
472 | 472 | ||
473 | vdd->debug_dir = debugfs_create_dir(name, voltage_dir); | 473 | vdd->debug_dir = debugfs_create_dir(name, voltage_dir); |
474 | kfree(name); | ||
474 | if (IS_ERR(vdd->debug_dir)) { | 475 | if (IS_ERR(vdd->debug_dir)) { |
475 | pr_warning("%s: Unable to create debugfs directory for" | 476 | pr_warning("%s: Unable to create debugfs directory for" |
476 | " vdd_%s\n", __func__, vdd->voltdm.name); | 477 | " vdd_%s\n", __func__, vdd->voltdm.name); |
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c index 6b2c800a1133..28f667e52ef9 100644 --- a/arch/arm/mach-pxa/colibri-evalboard.c +++ b/arch/arm/mach-pxa/colibri-evalboard.c | |||
@@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void) | |||
50 | GPIO0_COLIBRI_PXA270_SD_DETECT; | 50 | GPIO0_COLIBRI_PXA270_SD_DETECT; |
51 | if (machine_is_colibri300()) /* PXA300 Colibri */ | 51 | if (machine_is_colibri300()) /* PXA300 Colibri */ |
52 | colibri_mci_platform_data.gpio_card_detect = | 52 | colibri_mci_platform_data.gpio_card_detect = |
53 | GPIO39_COLIBRI_PXA300_SD_DETECT; | 53 | GPIO13_COLIBRI_PXA300_SD_DETECT; |
54 | else /* PXA320 Colibri */ | 54 | else /* PXA320 Colibri */ |
55 | colibri_mci_platform_data.gpio_card_detect = | 55 | colibri_mci_platform_data.gpio_card_detect = |
56 | GPIO28_COLIBRI_PXA320_SD_DETECT; | 56 | GPIO28_COLIBRI_PXA320_SD_DETECT; |
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c index fddb16d07eb0..66dd81cbc8a0 100644 --- a/arch/arm/mach-pxa/colibri-pxa300.c +++ b/arch/arm/mach-pxa/colibri-pxa300.c | |||
@@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = { | |||
41 | GPIO4_MMC1_DAT1, | 41 | GPIO4_MMC1_DAT1, |
42 | GPIO5_MMC1_DAT2, | 42 | GPIO5_MMC1_DAT2, |
43 | GPIO6_MMC1_DAT3, | 43 | GPIO6_MMC1_DAT3, |
44 | GPIO39_GPIO, /* SD detect */ | 44 | GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */ |
45 | 45 | ||
46 | /* UHC */ | 46 | /* UHC */ |
47 | GPIO0_2_USBH_PEN, | 47 | GPIO0_2_USBH_PEN, |
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h index 388a96f1ef93..cb4236e98a0f 100644 --- a/arch/arm/mach-pxa/include/mach/colibri.h +++ b/arch/arm/mach-pxa/include/mach/colibri.h | |||
@@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {} | |||
60 | #define GPIO113_COLIBRI_PXA270_TS_IRQ 113 | 60 | #define GPIO113_COLIBRI_PXA270_TS_IRQ 113 |
61 | 61 | ||
62 | /* GPIO definitions for Colibri PXA300/310 */ | 62 | /* GPIO definitions for Colibri PXA300/310 */ |
63 | #define GPIO39_COLIBRI_PXA300_SD_DETECT 39 | 63 | #define GPIO13_COLIBRI_PXA300_SD_DETECT 13 |
64 | 64 | ||
65 | /* GPIO definitions for Colibri PXA320 */ | 65 | /* GPIO definitions for Colibri PXA320 */ |
66 | #define GPIO28_COLIBRI_PXA320_SD_DETECT 28 | 66 | #define GPIO28_COLIBRI_PXA320_SD_DETECT 28 |
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 405b92a29793..35572c427fa8 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c | |||
@@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = { | |||
323 | .pwm_id = 0, | 323 | .pwm_id = 0, |
324 | .max_brightness = 0xfe, | 324 | .max_brightness = 0xfe, |
325 | .dft_brightness = 0x7e, | 325 | .dft_brightness = 0x7e, |
326 | .pwm_period_ns = 3500, | 326 | .pwm_period_ns = 3500 * 1024, |
327 | .init = palm27x_backlight_init, | 327 | .init = palm27x_backlight_init, |
328 | .notify = palm27x_backlight_notify, | 328 | .notify = palm27x_backlight_notify, |
329 | .exit = palm27x_backlight_exit, | 329 | .exit = palm27x_backlight_exit, |
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 978e1b289544..1807c9abdde0 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c | |||
@@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state) | |||
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* skip registers saving for standby */ | 35 | /* skip registers saving for standby */ |
36 | if (state != PM_SUSPEND_STANDBY) { | 36 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) { |
37 | pxa_cpu_pm_fns->save(sleep_save); | 37 | pxa_cpu_pm_fns->save(sleep_save); |
38 | /* before sleeping, calculate and save a checksum */ | 38 | /* before sleeping, calculate and save a checksum */ |
39 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) | 39 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) |
@@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state) | |||
44 | pxa_cpu_pm_fns->enter(state); | 44 | pxa_cpu_pm_fns->enter(state); |
45 | cpu_init(); | 45 | cpu_init(); |
46 | 46 | ||
47 | if (state != PM_SUSPEND_STANDBY) { | 47 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { |
48 | /* after sleeping, validate the checksum */ | 48 | /* after sleeping, validate the checksum */ |
49 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) | 49 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) |
50 | checksum += sleep_save[i]; | 50 | checksum += sleep_save[i]; |
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h index 203dd5a18bd5..058dab4482a1 100644 --- a/arch/arm/mach-s5p6442/include/mach/map.h +++ b/arch/arm/mach-s5p6442/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5p6442/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5p6442/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com/ | 4 | * http://www.samsung.com/ |
5 | * | 5 | * |
6 | * S5P6442 - Memory map definitions | 6 | * S5P6442 - Memory map definitions |
@@ -16,56 +16,61 @@ | |||
16 | #include <plat/map-base.h> | 16 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 17 | #include <plat/map-s5p.h> |
18 | 18 | ||
19 | #define S5P6442_PA_CHIPID (0xE0000000) | 19 | #define S5P6442_PA_SDRAM 0x20000000 |
20 | #define S5P_PA_CHIPID S5P6442_PA_CHIPID | ||
21 | 20 | ||
22 | #define S5P6442_PA_SYSCON (0xE0100000) | 21 | #define S5P6442_PA_I2S0 0xC0B00000 |
23 | #define S5P_PA_SYSCON S5P6442_PA_SYSCON | 22 | #define S5P6442_PA_I2S1 0xF2200000 |
24 | 23 | ||
25 | #define S5P6442_PA_GPIO (0xE0200000) | 24 | #define S5P6442_PA_CHIPID 0xE0000000 |
26 | 25 | ||
27 | #define S5P6442_PA_VIC0 (0xE4000000) | 26 | #define S5P6442_PA_SYSCON 0xE0100000 |
28 | #define S5P6442_PA_VIC1 (0xE4100000) | ||
29 | #define S5P6442_PA_VIC2 (0xE4200000) | ||
30 | 27 | ||
31 | #define S5P6442_PA_SROMC (0xE7000000) | 28 | #define S5P6442_PA_GPIO 0xE0200000 |
32 | #define S5P_PA_SROMC S5P6442_PA_SROMC | ||
33 | 29 | ||
34 | #define S5P6442_PA_MDMA 0xE8000000 | 30 | #define S5P6442_PA_VIC0 0xE4000000 |
35 | #define S5P6442_PA_PDMA 0xE9000000 | 31 | #define S5P6442_PA_VIC1 0xE4100000 |
32 | #define S5P6442_PA_VIC2 0xE4200000 | ||
36 | 33 | ||
37 | #define S5P6442_PA_TIMER (0xEA000000) | 34 | #define S5P6442_PA_SROMC 0xE7000000 |
38 | #define S5P_PA_TIMER S5P6442_PA_TIMER | ||
39 | 35 | ||
40 | #define S5P6442_PA_SYSTIMER (0xEA100000) | 36 | #define S5P6442_PA_MDMA 0xE8000000 |
37 | #define S5P6442_PA_PDMA 0xE9000000 | ||
41 | 38 | ||
42 | #define S5P6442_PA_WATCHDOG (0xEA200000) | 39 | #define S5P6442_PA_TIMER 0xEA000000 |
43 | 40 | ||
44 | #define S5P6442_PA_UART (0xEC000000) | 41 | #define S5P6442_PA_SYSTIMER 0xEA100000 |
45 | 42 | ||
46 | #define S5P_PA_UART0 (S5P6442_PA_UART + 0x0) | 43 | #define S5P6442_PA_WATCHDOG 0xEA200000 |
47 | #define S5P_PA_UART1 (S5P6442_PA_UART + 0x400) | ||
48 | #define S5P_PA_UART2 (S5P6442_PA_UART + 0x800) | ||
49 | #define S5P_SZ_UART SZ_256 | ||
50 | 44 | ||
51 | #define S5P6442_PA_IIC0 (0xEC100000) | 45 | #define S5P6442_PA_UART 0xEC000000 |
52 | 46 | ||
53 | #define S5P6442_PA_SDRAM (0x20000000) | 47 | #define S5P6442_PA_IIC0 0xEC100000 |
54 | #define S5P_PA_SDRAM S5P6442_PA_SDRAM | ||
55 | 48 | ||
56 | #define S5P6442_PA_SPI 0xEC300000 | 49 | #define S5P6442_PA_SPI 0xEC300000 |
57 | 50 | ||
58 | /* I2S */ | ||
59 | #define S5P6442_PA_I2S0 0xC0B00000 | ||
60 | #define S5P6442_PA_I2S1 0xF2200000 | ||
61 | |||
62 | /* PCM */ | ||
63 | #define S5P6442_PA_PCM0 0xF2400000 | 51 | #define S5P6442_PA_PCM0 0xF2400000 |
64 | #define S5P6442_PA_PCM1 0xF2500000 | 52 | #define S5P6442_PA_PCM1 0xF2500000 |
65 | 53 | ||
66 | /* compatibiltiy defines. */ | 54 | /* Compatibiltiy Defines */ |
55 | |||
56 | #define S3C_PA_IIC S5P6442_PA_IIC0 | ||
67 | #define S3C_PA_WDT S5P6442_PA_WATCHDOG | 57 | #define S3C_PA_WDT S5P6442_PA_WATCHDOG |
58 | |||
59 | #define S5P_PA_CHIPID S5P6442_PA_CHIPID | ||
60 | #define S5P_PA_SDRAM S5P6442_PA_SDRAM | ||
61 | #define S5P_PA_SROMC S5P6442_PA_SROMC | ||
62 | #define S5P_PA_SYSCON S5P6442_PA_SYSCON | ||
63 | #define S5P_PA_TIMER S5P6442_PA_TIMER | ||
64 | |||
65 | /* UART */ | ||
66 | |||
68 | #define S3C_PA_UART S5P6442_PA_UART | 67 | #define S3C_PA_UART S5P6442_PA_UART |
69 | #define S3C_PA_IIC S5P6442_PA_IIC0 | 68 | |
69 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) | ||
70 | #define S5P_PA_UART0 S5P_PA_UART(0) | ||
71 | #define S5P_PA_UART1 S5P_PA_UART(1) | ||
72 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
73 | |||
74 | #define S5P_SZ_UART SZ_256 | ||
70 | 75 | ||
71 | #endif /* __ASM_ARCH_MAP_H */ | 76 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h index a9365e5ba614..95c91257c7ca 100644 --- a/arch/arm/mach-s5p64x0/include/mach/map.h +++ b/arch/arm/mach-s5p64x0/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5p64x0/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5p64x0/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com | 4 | * http://www.samsung.com |
5 | * | 5 | * |
6 | * S5P64X0 - Memory map definitions | 6 | * S5P64X0 - Memory map definitions |
@@ -16,64 +16,46 @@ | |||
16 | #include <plat/map-base.h> | 16 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 17 | #include <plat/map-s5p.h> |
18 | 18 | ||
19 | #define S5P64X0_PA_SDRAM (0x20000000) | 19 | #define S5P64X0_PA_SDRAM 0x20000000 |
20 | 20 | ||
21 | #define S5P64X0_PA_CHIPID (0xE0000000) | 21 | #define S5P64X0_PA_CHIPID 0xE0000000 |
22 | #define S5P_PA_CHIPID S5P64X0_PA_CHIPID | ||
23 | |||
24 | #define S5P64X0_PA_SYSCON (0xE0100000) | ||
25 | #define S5P_PA_SYSCON S5P64X0_PA_SYSCON | ||
26 | |||
27 | #define S5P64X0_PA_GPIO (0xE0308000) | ||
28 | |||
29 | #define S5P64X0_PA_VIC0 (0xE4000000) | ||
30 | #define S5P64X0_PA_VIC1 (0xE4100000) | ||
31 | 22 | ||
32 | #define S5P64X0_PA_SROMC (0xE7000000) | 23 | #define S5P64X0_PA_SYSCON 0xE0100000 |
33 | #define S5P_PA_SROMC S5P64X0_PA_SROMC | ||
34 | |||
35 | #define S5P64X0_PA_PDMA (0xE9000000) | ||
36 | |||
37 | #define S5P64X0_PA_TIMER (0xEA000000) | ||
38 | #define S5P_PA_TIMER S5P64X0_PA_TIMER | ||
39 | 24 | ||
40 | #define S5P64X0_PA_RTC (0xEA100000) | 25 | #define S5P64X0_PA_GPIO 0xE0308000 |
41 | 26 | ||
42 | #define S5P64X0_PA_WDT (0xEA200000) | 27 | #define S5P64X0_PA_VIC0 0xE4000000 |
28 | #define S5P64X0_PA_VIC1 0xE4100000 | ||
43 | 29 | ||
44 | #define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) | 30 | #define S5P64X0_PA_SROMC 0xE7000000 |
45 | #define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) | ||
46 | 31 | ||
47 | #define S5P_PA_UART0 S5P6450_PA_UART(0) | 32 | #define S5P64X0_PA_PDMA 0xE9000000 |
48 | #define S5P_PA_UART1 S5P6450_PA_UART(1) | ||
49 | #define S5P_PA_UART2 S5P6450_PA_UART(2) | ||
50 | #define S5P_PA_UART3 S5P6450_PA_UART(3) | ||
51 | #define S5P_PA_UART4 S5P6450_PA_UART(4) | ||
52 | #define S5P_PA_UART5 S5P6450_PA_UART(5) | ||
53 | 33 | ||
54 | #define S5P_SZ_UART SZ_256 | 34 | #define S5P64X0_PA_TIMER 0xEA000000 |
35 | #define S5P64X0_PA_RTC 0xEA100000 | ||
36 | #define S5P64X0_PA_WDT 0xEA200000 | ||
55 | 37 | ||
56 | #define S5P6440_PA_IIC0 (0xEC104000) | 38 | #define S5P6440_PA_IIC0 0xEC104000 |
57 | #define S5P6440_PA_IIC1 (0xEC20F000) | 39 | #define S5P6440_PA_IIC1 0xEC20F000 |
58 | #define S5P6450_PA_IIC0 (0xEC100000) | 40 | #define S5P6450_PA_IIC0 0xEC100000 |
59 | #define S5P6450_PA_IIC1 (0xEC200000) | 41 | #define S5P6450_PA_IIC1 0xEC200000 |
60 | 42 | ||
61 | #define S5P64X0_PA_SPI0 (0xEC400000) | 43 | #define S5P64X0_PA_SPI0 0xEC400000 |
62 | #define S5P64X0_PA_SPI1 (0xEC500000) | 44 | #define S5P64X0_PA_SPI1 0xEC500000 |
63 | 45 | ||
64 | #define S5P64X0_PA_HSOTG (0xED100000) | 46 | #define S5P64X0_PA_HSOTG 0xED100000 |
65 | 47 | ||
66 | #define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) | 48 | #define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) |
67 | 49 | ||
68 | #define S5P64X0_PA_I2S (0xF2000000) | 50 | #define S5P64X0_PA_I2S 0xF2000000 |
69 | #define S5P6450_PA_I2S1 0xF2800000 | 51 | #define S5P6450_PA_I2S1 0xF2800000 |
70 | #define S5P6450_PA_I2S2 0xF2900000 | 52 | #define S5P6450_PA_I2S2 0xF2900000 |
71 | 53 | ||
72 | #define S5P64X0_PA_PCM (0xF2100000) | 54 | #define S5P64X0_PA_PCM 0xF2100000 |
73 | 55 | ||
74 | #define S5P64X0_PA_ADC (0xF3000000) | 56 | #define S5P64X0_PA_ADC 0xF3000000 |
75 | 57 | ||
76 | /* compatibiltiy defines. */ | 58 | /* Compatibiltiy Defines */ |
77 | 59 | ||
78 | #define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) | 60 | #define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) |
79 | #define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) | 61 | #define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) |
@@ -83,6 +65,25 @@ | |||
83 | #define S3C_PA_RTC S5P64X0_PA_RTC | 65 | #define S3C_PA_RTC S5P64X0_PA_RTC |
84 | #define S3C_PA_WDT S5P64X0_PA_WDT | 66 | #define S3C_PA_WDT S5P64X0_PA_WDT |
85 | 67 | ||
68 | #define S5P_PA_CHIPID S5P64X0_PA_CHIPID | ||
69 | #define S5P_PA_SROMC S5P64X0_PA_SROMC | ||
70 | #define S5P_PA_SYSCON S5P64X0_PA_SYSCON | ||
71 | #define S5P_PA_TIMER S5P64X0_PA_TIMER | ||
72 | |||
86 | #define SAMSUNG_PA_ADC S5P64X0_PA_ADC | 73 | #define SAMSUNG_PA_ADC S5P64X0_PA_ADC |
87 | 74 | ||
75 | /* UART */ | ||
76 | |||
77 | #define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) | ||
78 | #define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) | ||
79 | |||
80 | #define S5P_PA_UART0 S5P6450_PA_UART(0) | ||
81 | #define S5P_PA_UART1 S5P6450_PA_UART(1) | ||
82 | #define S5P_PA_UART2 S5P6450_PA_UART(2) | ||
83 | #define S5P_PA_UART3 S5P6450_PA_UART(3) | ||
84 | #define S5P_PA_UART4 S5P6450_PA_UART(4) | ||
85 | #define S5P_PA_UART5 S5P6450_PA_UART(5) | ||
86 | |||
87 | #define S5P_SZ_UART SZ_256 | ||
88 | |||
88 | #endif /* __ASM_ARCH_MAP_H */ | 89 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h index 328467b346aa..ccbe6b767f7d 100644 --- a/arch/arm/mach-s5pc100/include/mach/map.h +++ b/arch/arm/mach-s5pc100/include/mach/map.h | |||
@@ -1,5 +1,8 @@ | |||
1 | /* linux/arch/arm/mach-s5pc100/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5pc100/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com/ | ||
5 | * | ||
3 | * Copyright 2009 Samsung Electronics Co. | 6 | * Copyright 2009 Samsung Electronics Co. |
4 | * Byungho Min <bhmin@samsung.com> | 7 | * Byungho Min <bhmin@samsung.com> |
5 | * | 8 | * |
@@ -16,145 +19,115 @@ | |||
16 | #include <plat/map-base.h> | 19 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 20 | #include <plat/map-s5p.h> |
18 | 21 | ||
19 | /* | 22 | #define S5PC100_PA_SDRAM 0x20000000 |
20 | * map-base.h has already defined virtual memory address | 23 | |
21 | * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s) | 24 | #define S5PC100_PA_ONENAND 0xE7100000 |
22 | * S3C_VA_SYS S3C_ADDR(0x00100000) system control | 25 | #define S5PC100_PA_ONENAND_BUF 0xB0000000 |
23 | * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used) | 26 | |
24 | * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block | 27 | #define S5PC100_PA_CHIPID 0xE0000000 |
25 | * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog | ||
26 | * S3C_VA_UART S3C_ADDR(0x01000000) UART | ||
27 | * | ||
28 | * S5PC100 specific virtual memory address can be defined here | ||
29 | * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO | ||
30 | * | ||
31 | */ | ||
32 | 28 | ||
33 | #define S5PC100_PA_ONENAND_BUF (0xB0000000) | 29 | #define S5PC100_PA_SYSCON 0xE0100000 |
34 | #define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M) | ||
35 | 30 | ||
36 | /* Chip ID */ | 31 | #define S5PC100_PA_OTHERS 0xE0200000 |
37 | 32 | ||
38 | #define S5PC100_PA_CHIPID (0xE0000000) | 33 | #define S5PC100_PA_GPIO 0xE0300000 |
39 | #define S5P_PA_CHIPID S5PC100_PA_CHIPID | ||
40 | 34 | ||
41 | #define S5PC100_PA_SYSCON (0xE0100000) | 35 | #define S5PC100_PA_VIC0 0xE4000000 |
42 | #define S5P_PA_SYSCON S5PC100_PA_SYSCON | 36 | #define S5PC100_PA_VIC1 0xE4100000 |
37 | #define S5PC100_PA_VIC2 0xE4200000 | ||
43 | 38 | ||
44 | #define S5PC100_PA_OTHERS (0xE0200000) | 39 | #define S5PC100_PA_SROMC 0xE7000000 |
45 | #define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) | ||
46 | 40 | ||
47 | #define S5PC100_PA_GPIO (0xE0300000) | 41 | #define S5PC100_PA_CFCON 0xE7800000 |
48 | #define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) | ||
49 | 42 | ||
50 | /* Interrupt */ | 43 | #define S5PC100_PA_MDMA 0xE8100000 |
51 | #define S5PC100_PA_VIC0 (0xE4000000) | 44 | #define S5PC100_PA_PDMA0 0xE9000000 |
52 | #define S5PC100_PA_VIC1 (0xE4100000) | 45 | #define S5PC100_PA_PDMA1 0xE9200000 |
53 | #define S5PC100_PA_VIC2 (0xE4200000) | ||
54 | #define S5PC100_VA_VIC S3C_VA_IRQ | ||
55 | #define S5PC100_VA_VIC_OFFSET 0x10000 | ||
56 | #define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET)) | ||
57 | 46 | ||
58 | #define S5PC100_PA_SROMC (0xE7000000) | 47 | #define S5PC100_PA_TIMER 0xEA000000 |
59 | #define S5P_PA_SROMC S5PC100_PA_SROMC | 48 | #define S5PC100_PA_SYSTIMER 0xEA100000 |
49 | #define S5PC100_PA_WATCHDOG 0xEA200000 | ||
50 | #define S5PC100_PA_RTC 0xEA300000 | ||
60 | 51 | ||
61 | #define S5PC100_PA_ONENAND (0xE7100000) | 52 | #define S5PC100_PA_UART 0xEC000000 |
62 | 53 | ||
63 | #define S5PC100_PA_CFCON (0xE7800000) | 54 | #define S5PC100_PA_IIC0 0xEC100000 |
55 | #define S5PC100_PA_IIC1 0xEC200000 | ||
64 | 56 | ||
65 | /* DMA */ | 57 | #define S5PC100_PA_SPI0 0xEC300000 |
66 | #define S5PC100_PA_MDMA (0xE8100000) | 58 | #define S5PC100_PA_SPI1 0xEC400000 |
67 | #define S5PC100_PA_PDMA0 (0xE9000000) | 59 | #define S5PC100_PA_SPI2 0xEC500000 |
68 | #define S5PC100_PA_PDMA1 (0xE9200000) | ||
69 | 60 | ||
70 | /* Timer */ | 61 | #define S5PC100_PA_USB_HSOTG 0xED200000 |
71 | #define S5PC100_PA_TIMER (0xEA000000) | 62 | #define S5PC100_PA_USB_HSPHY 0xED300000 |
72 | #define S5P_PA_TIMER S5PC100_PA_TIMER | ||
73 | 63 | ||
74 | #define S5PC100_PA_SYSTIMER (0xEA100000) | 64 | #define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) |
75 | 65 | ||
76 | #define S5PC100_PA_WATCHDOG (0xEA200000) | 66 | #define S5PC100_PA_FB 0xEE000000 |
77 | #define S5PC100_PA_RTC (0xEA300000) | ||
78 | 67 | ||
79 | #define S5PC100_PA_UART (0xEC000000) | 68 | #define S5PC100_PA_FIMC0 0xEE200000 |
69 | #define S5PC100_PA_FIMC1 0xEE300000 | ||
70 | #define S5PC100_PA_FIMC2 0xEE400000 | ||
80 | 71 | ||
81 | #define S5P_PA_UART0 (S5PC100_PA_UART + 0x0) | 72 | #define S5PC100_PA_I2S0 0xF2000000 |
82 | #define S5P_PA_UART1 (S5PC100_PA_UART + 0x400) | 73 | #define S5PC100_PA_I2S1 0xF2100000 |
83 | #define S5P_PA_UART2 (S5PC100_PA_UART + 0x800) | 74 | #define S5PC100_PA_I2S2 0xF2200000 |
84 | #define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00) | ||
85 | #define S5P_SZ_UART SZ_256 | ||
86 | 75 | ||
87 | #define S5PC100_PA_IIC0 (0xEC100000) | 76 | #define S5PC100_PA_AC97 0xF2300000 |
88 | #define S5PC100_PA_IIC1 (0xEC200000) | ||
89 | 77 | ||
90 | /* SPI */ | 78 | #define S5PC100_PA_PCM0 0xF2400000 |
91 | #define S5PC100_PA_SPI0 0xEC300000 | 79 | #define S5PC100_PA_PCM1 0xF2500000 |
92 | #define S5PC100_PA_SPI1 0xEC400000 | ||
93 | #define S5PC100_PA_SPI2 0xEC500000 | ||
94 | 80 | ||
95 | /* USB HS OTG */ | 81 | #define S5PC100_PA_SPDIF 0xF2600000 |
96 | #define S5PC100_PA_USB_HSOTG (0xED200000) | ||
97 | #define S5PC100_PA_USB_HSPHY (0xED300000) | ||
98 | 82 | ||
99 | #define S5PC100_PA_FB (0xEE000000) | 83 | #define S5PC100_PA_TSADC 0xF3000000 |
100 | 84 | ||
101 | #define S5PC100_PA_FIMC0 (0xEE200000) | 85 | #define S5PC100_PA_KEYPAD 0xF3100000 |
102 | #define S5PC100_PA_FIMC1 (0xEE300000) | ||
103 | #define S5PC100_PA_FIMC2 (0xEE400000) | ||
104 | 86 | ||
105 | #define S5PC100_PA_I2S0 (0xF2000000) | 87 | /* Compatibiltiy Defines */ |
106 | #define S5PC100_PA_I2S1 (0xF2100000) | ||
107 | #define S5PC100_PA_I2S2 (0xF2200000) | ||
108 | 88 | ||
109 | #define S5PC100_PA_AC97 0xF2300000 | 89 | #define S3C_PA_FB S5PC100_PA_FB |
90 | #define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) | ||
91 | #define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) | ||
92 | #define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) | ||
93 | #define S3C_PA_IIC S5PC100_PA_IIC0 | ||
94 | #define S3C_PA_IIC1 S5PC100_PA_IIC1 | ||
95 | #define S3C_PA_KEYPAD S5PC100_PA_KEYPAD | ||
96 | #define S3C_PA_ONENAND S5PC100_PA_ONENAND | ||
97 | #define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF | ||
98 | #define S3C_PA_RTC S5PC100_PA_RTC | ||
99 | #define S3C_PA_TSADC S5PC100_PA_TSADC | ||
100 | #define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG | ||
101 | #define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY | ||
102 | #define S3C_PA_WDT S5PC100_PA_WATCHDOG | ||
110 | 103 | ||
111 | /* PCM */ | 104 | #define S5P_PA_CHIPID S5PC100_PA_CHIPID |
112 | #define S5PC100_PA_PCM0 0xF2400000 | 105 | #define S5P_PA_FIMC0 S5PC100_PA_FIMC0 |
113 | #define S5PC100_PA_PCM1 0xF2500000 | 106 | #define S5P_PA_FIMC1 S5PC100_PA_FIMC1 |
107 | #define S5P_PA_FIMC2 S5PC100_PA_FIMC2 | ||
108 | #define S5P_PA_SDRAM S5PC100_PA_SDRAM | ||
109 | #define S5P_PA_SROMC S5PC100_PA_SROMC | ||
110 | #define S5P_PA_SYSCON S5PC100_PA_SYSCON | ||
111 | #define S5P_PA_TIMER S5PC100_PA_TIMER | ||
114 | 112 | ||
115 | #define S5PC100_PA_SPDIF 0xF2600000 | 113 | #define SAMSUNG_PA_ADC S5PC100_PA_TSADC |
114 | #define SAMSUNG_PA_CFCON S5PC100_PA_CFCON | ||
115 | #define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD | ||
116 | 116 | ||
117 | #define S5PC100_PA_TSADC (0xF3000000) | 117 | #define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) |
118 | 118 | ||
119 | /* KEYPAD */ | 119 | #define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M) |
120 | #define S5PC100_PA_KEYPAD (0xF3100000) | ||
121 | 120 | ||
122 | #define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) | 121 | /* UART */ |
123 | 122 | ||
124 | #define S5PC100_PA_SDRAM (0x20000000) | 123 | #define S3C_PA_UART S5PC100_PA_UART |
125 | #define S5P_PA_SDRAM S5PC100_PA_SDRAM | ||
126 | 124 | ||
127 | /* compatibiltiy defines. */ | 125 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) |
128 | #define S3C_PA_UART S5PC100_PA_UART | 126 | #define S5P_PA_UART0 S5P_PA_UART(0) |
129 | #define S3C_PA_IIC S5PC100_PA_IIC0 | 127 | #define S5P_PA_UART1 S5P_PA_UART(1) |
130 | #define S3C_PA_IIC1 S5PC100_PA_IIC1 | 128 | #define S5P_PA_UART2 S5P_PA_UART(2) |
131 | #define S3C_PA_FB S5PC100_PA_FB | 129 | #define S5P_PA_UART3 S5P_PA_UART(3) |
132 | #define S3C_PA_G2D S5PC100_PA_G2D | ||
133 | #define S3C_PA_G3D S5PC100_PA_G3D | ||
134 | #define S3C_PA_JPEG S5PC100_PA_JPEG | ||
135 | #define S3C_PA_ROTATOR S5PC100_PA_ROTATOR | ||
136 | #define S5P_VA_VIC0 S5PC1XX_VA_VIC(0) | ||
137 | #define S5P_VA_VIC1 S5PC1XX_VA_VIC(1) | ||
138 | #define S5P_VA_VIC2 S5PC1XX_VA_VIC(2) | ||
139 | #define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG | ||
140 | #define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY | ||
141 | #define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) | ||
142 | #define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) | ||
143 | #define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) | ||
144 | #define S3C_PA_KEYPAD S5PC100_PA_KEYPAD | ||
145 | #define S3C_PA_WDT S5PC100_PA_WATCHDOG | ||
146 | #define S3C_PA_TSADC S5PC100_PA_TSADC | ||
147 | #define S3C_PA_ONENAND S5PC100_PA_ONENAND | ||
148 | #define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF | ||
149 | #define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF | ||
150 | #define S3C_PA_RTC S5PC100_PA_RTC | ||
151 | |||
152 | #define SAMSUNG_PA_ADC S5PC100_PA_TSADC | ||
153 | #define SAMSUNG_PA_CFCON S5PC100_PA_CFCON | ||
154 | #define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD | ||
155 | 130 | ||
156 | #define S5P_PA_FIMC0 S5PC100_PA_FIMC0 | 131 | #define S5P_SZ_UART SZ_256 |
157 | #define S5P_PA_FIMC1 S5PC100_PA_FIMC1 | ||
158 | #define S5P_PA_FIMC2 S5PC100_PA_FIMC2 | ||
159 | 132 | ||
160 | #endif /* __ASM_ARCH_C100_MAP_H */ | 133 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h index 3611492ad681..1dd58836fd4f 100644 --- a/arch/arm/mach-s5pv210/include/mach/map.h +++ b/arch/arm/mach-s5pv210/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5pv210/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5pv210/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com/ | 4 | * http://www.samsung.com/ |
5 | * | 5 | * |
6 | * S5PV210 - Memory map definitions | 6 | * S5PV210 - Memory map definitions |
@@ -16,122 +16,120 @@ | |||
16 | #include <plat/map-base.h> | 16 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 17 | #include <plat/map-s5p.h> |
18 | 18 | ||
19 | #define S5PV210_PA_SROM_BANK5 (0xA8000000) | 19 | #define S5PV210_PA_SDRAM 0x20000000 |
20 | 20 | ||
21 | #define S5PC110_PA_ONENAND (0xB0000000) | 21 | #define S5PV210_PA_SROM_BANK5 0xA8000000 |
22 | #define S5P_PA_ONENAND S5PC110_PA_ONENAND | ||
23 | 22 | ||
24 | #define S5PC110_PA_ONENAND_DMA (0xB0600000) | 23 | #define S5PC110_PA_ONENAND 0xB0000000 |
25 | #define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA | 24 | #define S5PC110_PA_ONENAND_DMA 0xB0600000 |
26 | 25 | ||
27 | #define S5PV210_PA_CHIPID (0xE0000000) | 26 | #define S5PV210_PA_CHIPID 0xE0000000 |
28 | #define S5P_PA_CHIPID S5PV210_PA_CHIPID | ||
29 | 27 | ||
30 | #define S5PV210_PA_SYSCON (0xE0100000) | 28 | #define S5PV210_PA_SYSCON 0xE0100000 |
31 | #define S5P_PA_SYSCON S5PV210_PA_SYSCON | ||
32 | 29 | ||
33 | #define S5PV210_PA_GPIO (0xE0200000) | 30 | #define S5PV210_PA_GPIO 0xE0200000 |
34 | 31 | ||
35 | /* SPI */ | 32 | #define S5PV210_PA_SPDIF 0xE1100000 |
36 | #define S5PV210_PA_SPI0 0xE1300000 | ||
37 | #define S5PV210_PA_SPI1 0xE1400000 | ||
38 | 33 | ||
39 | #define S5PV210_PA_KEYPAD (0xE1600000) | 34 | #define S5PV210_PA_SPI0 0xE1300000 |
35 | #define S5PV210_PA_SPI1 0xE1400000 | ||
40 | 36 | ||
41 | #define S5PV210_PA_IIC0 (0xE1800000) | 37 | #define S5PV210_PA_KEYPAD 0xE1600000 |
42 | #define S5PV210_PA_IIC1 (0xFAB00000) | ||
43 | #define S5PV210_PA_IIC2 (0xE1A00000) | ||
44 | 38 | ||
45 | #define S5PV210_PA_TIMER (0xE2500000) | 39 | #define S5PV210_PA_ADC 0xE1700000 |
46 | #define S5P_PA_TIMER S5PV210_PA_TIMER | ||
47 | 40 | ||
48 | #define S5PV210_PA_SYSTIMER (0xE2600000) | 41 | #define S5PV210_PA_IIC0 0xE1800000 |
42 | #define S5PV210_PA_IIC1 0xFAB00000 | ||
43 | #define S5PV210_PA_IIC2 0xE1A00000 | ||
49 | 44 | ||
50 | #define S5PV210_PA_WATCHDOG (0xE2700000) | 45 | #define S5PV210_PA_AC97 0xE2200000 |
51 | 46 | ||
52 | #define S5PV210_PA_RTC (0xE2800000) | 47 | #define S5PV210_PA_PCM0 0xE2300000 |
53 | #define S5PV210_PA_UART (0xE2900000) | 48 | #define S5PV210_PA_PCM1 0xE1200000 |
49 | #define S5PV210_PA_PCM2 0xE2B00000 | ||
54 | 50 | ||
55 | #define S5P_PA_UART0 (S5PV210_PA_UART + 0x0) | 51 | #define S5PV210_PA_TIMER 0xE2500000 |
56 | #define S5P_PA_UART1 (S5PV210_PA_UART + 0x400) | 52 | #define S5PV210_PA_SYSTIMER 0xE2600000 |
57 | #define S5P_PA_UART2 (S5PV210_PA_UART + 0x800) | 53 | #define S5PV210_PA_WATCHDOG 0xE2700000 |
58 | #define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00) | 54 | #define S5PV210_PA_RTC 0xE2800000 |
59 | 55 | ||
60 | #define S5P_SZ_UART SZ_256 | 56 | #define S5PV210_PA_UART 0xE2900000 |
61 | 57 | ||
62 | #define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) | 58 | #define S5PV210_PA_SROMC 0xE8000000 |
63 | 59 | ||
64 | #define S5PV210_PA_SROMC (0xE8000000) | 60 | #define S5PV210_PA_CFCON 0xE8200000 |
65 | #define S5P_PA_SROMC S5PV210_PA_SROMC | ||
66 | 61 | ||
67 | #define S5PV210_PA_CFCON (0xE8200000) | 62 | #define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) |
68 | 63 | ||
69 | #define S5PV210_PA_MDMA 0xFA200000 | 64 | #define S5PV210_PA_HSOTG 0xEC000000 |
70 | #define S5PV210_PA_PDMA0 0xE0900000 | 65 | #define S5PV210_PA_HSPHY 0xEC100000 |
71 | #define S5PV210_PA_PDMA1 0xE0A00000 | ||
72 | 66 | ||
73 | #define S5PV210_PA_FB (0xF8000000) | 67 | #define S5PV210_PA_IIS0 0xEEE30000 |
68 | #define S5PV210_PA_IIS1 0xE2100000 | ||
69 | #define S5PV210_PA_IIS2 0xE2A00000 | ||
74 | 70 | ||
75 | #define S5PV210_PA_FIMC0 (0xFB200000) | 71 | #define S5PV210_PA_DMC0 0xF0000000 |
76 | #define S5PV210_PA_FIMC1 (0xFB300000) | 72 | #define S5PV210_PA_DMC1 0xF1400000 |
77 | #define S5PV210_PA_FIMC2 (0xFB400000) | ||
78 | 73 | ||
79 | #define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) | 74 | #define S5PV210_PA_VIC0 0xF2000000 |
75 | #define S5PV210_PA_VIC1 0xF2100000 | ||
76 | #define S5PV210_PA_VIC2 0xF2200000 | ||
77 | #define S5PV210_PA_VIC3 0xF2300000 | ||
80 | 78 | ||
81 | #define S5PV210_PA_HSOTG (0xEC000000) | 79 | #define S5PV210_PA_FB 0xF8000000 |
82 | #define S5PV210_PA_HSPHY (0xEC100000) | ||
83 | 80 | ||
84 | #define S5PV210_PA_VIC0 (0xF2000000) | 81 | #define S5PV210_PA_MDMA 0xFA200000 |
85 | #define S5PV210_PA_VIC1 (0xF2100000) | 82 | #define S5PV210_PA_PDMA0 0xE0900000 |
86 | #define S5PV210_PA_VIC2 (0xF2200000) | 83 | #define S5PV210_PA_PDMA1 0xE0A00000 |
87 | #define S5PV210_PA_VIC3 (0xF2300000) | ||
88 | 84 | ||
89 | #define S5PV210_PA_SDRAM (0x20000000) | 85 | #define S5PV210_PA_MIPI_CSIS 0xFA600000 |
90 | #define S5P_PA_SDRAM S5PV210_PA_SDRAM | ||
91 | 86 | ||
92 | /* S/PDIF */ | 87 | #define S5PV210_PA_FIMC0 0xFB200000 |
93 | #define S5PV210_PA_SPDIF 0xE1100000 | 88 | #define S5PV210_PA_FIMC1 0xFB300000 |
89 | #define S5PV210_PA_FIMC2 0xFB400000 | ||
94 | 90 | ||
95 | /* I2S */ | 91 | /* Compatibiltiy Defines */ |
96 | #define S5PV210_PA_IIS0 0xEEE30000 | ||
97 | #define S5PV210_PA_IIS1 0xE2100000 | ||
98 | #define S5PV210_PA_IIS2 0xE2A00000 | ||
99 | 92 | ||
100 | /* PCM */ | 93 | #define S3C_PA_FB S5PV210_PA_FB |
101 | #define S5PV210_PA_PCM0 0xE2300000 | 94 | #define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) |
102 | #define S5PV210_PA_PCM1 0xE1200000 | 95 | #define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) |
103 | #define S5PV210_PA_PCM2 0xE2B00000 | 96 | #define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) |
97 | #define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) | ||
98 | #define S3C_PA_IIC S5PV210_PA_IIC0 | ||
99 | #define S3C_PA_IIC1 S5PV210_PA_IIC1 | ||
100 | #define S3C_PA_IIC2 S5PV210_PA_IIC2 | ||
101 | #define S3C_PA_RTC S5PV210_PA_RTC | ||
102 | #define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG | ||
103 | #define S3C_PA_WDT S5PV210_PA_WATCHDOG | ||
104 | 104 | ||
105 | /* AC97 */ | 105 | #define S5P_PA_CHIPID S5PV210_PA_CHIPID |
106 | #define S5PV210_PA_AC97 0xE2200000 | 106 | #define S5P_PA_FIMC0 S5PV210_PA_FIMC0 |
107 | #define S5P_PA_FIMC1 S5PV210_PA_FIMC1 | ||
108 | #define S5P_PA_FIMC2 S5PV210_PA_FIMC2 | ||
109 | #define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS | ||
110 | #define S5P_PA_ONENAND S5PC110_PA_ONENAND | ||
111 | #define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA | ||
112 | #define S5P_PA_SDRAM S5PV210_PA_SDRAM | ||
113 | #define S5P_PA_SROMC S5PV210_PA_SROMC | ||
114 | #define S5P_PA_SYSCON S5PV210_PA_SYSCON | ||
115 | #define S5P_PA_TIMER S5PV210_PA_TIMER | ||
107 | 116 | ||
108 | #define S5PV210_PA_ADC (0xE1700000) | 117 | #define SAMSUNG_PA_ADC S5PV210_PA_ADC |
118 | #define SAMSUNG_PA_CFCON S5PV210_PA_CFCON | ||
119 | #define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD | ||
109 | 120 | ||
110 | #define S5PV210_PA_DMC0 (0xF0000000) | 121 | /* UART */ |
111 | #define S5PV210_PA_DMC1 (0xF1400000) | ||
112 | 122 | ||
113 | #define S5PV210_PA_MIPI_CSIS 0xFA600000 | 123 | #define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) |
114 | 124 | ||
115 | /* compatibiltiy defines. */ | 125 | #define S3C_PA_UART S5PV210_PA_UART |
116 | #define S3C_PA_UART S5PV210_PA_UART | ||
117 | #define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) | ||
118 | #define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) | ||
119 | #define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) | ||
120 | #define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) | ||
121 | #define S3C_PA_IIC S5PV210_PA_IIC0 | ||
122 | #define S3C_PA_IIC1 S5PV210_PA_IIC1 | ||
123 | #define S3C_PA_IIC2 S5PV210_PA_IIC2 | ||
124 | #define S3C_PA_FB S5PV210_PA_FB | ||
125 | #define S3C_PA_RTC S5PV210_PA_RTC | ||
126 | #define S3C_PA_WDT S5PV210_PA_WATCHDOG | ||
127 | #define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG | ||
128 | #define S5P_PA_FIMC0 S5PV210_PA_FIMC0 | ||
129 | #define S5P_PA_FIMC1 S5PV210_PA_FIMC1 | ||
130 | #define S5P_PA_FIMC2 S5PV210_PA_FIMC2 | ||
131 | #define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS | ||
132 | 126 | ||
133 | #define SAMSUNG_PA_ADC S5PV210_PA_ADC | 127 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) |
134 | #define SAMSUNG_PA_CFCON S5PV210_PA_CFCON | 128 | #define S5P_PA_UART0 S5P_PA_UART(0) |
135 | #define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD | 129 | #define S5P_PA_UART1 S5P_PA_UART(1) |
130 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
131 | #define S5P_PA_UART3 S5P_PA_UART(3) | ||
132 | |||
133 | #define S5P_SZ_UART SZ_256 | ||
136 | 134 | ||
137 | #endif /* __ASM_ARCH_MAP_H */ | 135 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index 461aa035afc0..557add4fc56c 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c | |||
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = { | |||
149 | 149 | ||
150 | static struct regulator_init_data aquila_ldo3_data = { | 150 | static struct regulator_init_data aquila_ldo3_data = { |
151 | .constraints = { | 151 | .constraints = { |
152 | .name = "VUSB/MIPI_1.1V", | 152 | .name = "VUSB+MIPI_1.1V", |
153 | .min_uV = 1100000, | 153 | .min_uV = 1100000, |
154 | .max_uV = 1100000, | 154 | .max_uV = 1100000, |
155 | .apply_uV = 1, | 155 | .apply_uV = 1, |
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = { | |||
197 | 197 | ||
198 | static struct regulator_init_data aquila_ldo8_data = { | 198 | static struct regulator_init_data aquila_ldo8_data = { |
199 | .constraints = { | 199 | .constraints = { |
200 | .name = "VUSB/VADC_3.3V", | 200 | .name = "VUSB+VADC_3.3V", |
201 | .min_uV = 3300000, | 201 | .min_uV = 3300000, |
202 | .max_uV = 3300000, | 202 | .max_uV = 3300000, |
203 | .apply_uV = 1, | 203 | .apply_uV = 1, |
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = { | |||
207 | 207 | ||
208 | static struct regulator_init_data aquila_ldo9_data = { | 208 | static struct regulator_init_data aquila_ldo9_data = { |
209 | .constraints = { | 209 | .constraints = { |
210 | .name = "VCC/VCAM_2.8V", | 210 | .name = "VCC+VCAM_2.8V", |
211 | .min_uV = 2800000, | 211 | .min_uV = 2800000, |
212 | .max_uV = 2800000, | 212 | .max_uV = 2800000, |
213 | .apply_uV = 1, | 213 | .apply_uV = 1, |
@@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = { | |||
381 | .buck1_set1 = S5PV210_GPH0(3), | 381 | .buck1_set1 = S5PV210_GPH0(3), |
382 | .buck1_set2 = S5PV210_GPH0(4), | 382 | .buck1_set2 = S5PV210_GPH0(4), |
383 | .buck2_set3 = S5PV210_GPH0(5), | 383 | .buck2_set3 = S5PV210_GPH0(5), |
384 | .buck1_max_voltage1 = 1200000, | 384 | .buck1_voltage1 = 1200000, |
385 | .buck1_max_voltage2 = 1200000, | 385 | .buck1_voltage2 = 1200000, |
386 | .buck2_max_voltage = 1200000, | 386 | .buck1_voltage3 = 1200000, |
387 | .buck1_voltage4 = 1200000, | ||
388 | .buck2_voltage1 = 1200000, | ||
389 | .buck2_voltage2 = 1200000, | ||
387 | }; | 390 | }; |
388 | #endif | 391 | #endif |
389 | 392 | ||
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index e22d5112fd44..056f5c769b0a 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c | |||
@@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = { | |||
288 | 288 | ||
289 | static struct regulator_init_data goni_ldo3_data = { | 289 | static struct regulator_init_data goni_ldo3_data = { |
290 | .constraints = { | 290 | .constraints = { |
291 | .name = "VUSB/MIPI_1.1V", | 291 | .name = "VUSB+MIPI_1.1V", |
292 | .min_uV = 1100000, | 292 | .min_uV = 1100000, |
293 | .max_uV = 1100000, | 293 | .max_uV = 1100000, |
294 | .apply_uV = 1, | 294 | .apply_uV = 1, |
@@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = { | |||
337 | 337 | ||
338 | static struct regulator_init_data goni_ldo8_data = { | 338 | static struct regulator_init_data goni_ldo8_data = { |
339 | .constraints = { | 339 | .constraints = { |
340 | .name = "VUSB/VADC_3.3V", | 340 | .name = "VUSB+VADC_3.3V", |
341 | .min_uV = 3300000, | 341 | .min_uV = 3300000, |
342 | .max_uV = 3300000, | 342 | .max_uV = 3300000, |
343 | .apply_uV = 1, | 343 | .apply_uV = 1, |
@@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = { | |||
347 | 347 | ||
348 | static struct regulator_init_data goni_ldo9_data = { | 348 | static struct regulator_init_data goni_ldo9_data = { |
349 | .constraints = { | 349 | .constraints = { |
350 | .name = "VCC/VCAM_2.8V", | 350 | .name = "VCC+VCAM_2.8V", |
351 | .min_uV = 2800000, | 351 | .min_uV = 2800000, |
352 | .max_uV = 2800000, | 352 | .max_uV = 2800000, |
353 | .apply_uV = 1, | 353 | .apply_uV = 1, |
@@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = { | |||
521 | .buck1_set1 = S5PV210_GPH0(3), | 521 | .buck1_set1 = S5PV210_GPH0(3), |
522 | .buck1_set2 = S5PV210_GPH0(4), | 522 | .buck1_set2 = S5PV210_GPH0(4), |
523 | .buck2_set3 = S5PV210_GPH0(5), | 523 | .buck2_set3 = S5PV210_GPH0(5), |
524 | .buck1_max_voltage1 = 1200000, | 524 | .buck1_voltage1 = 1200000, |
525 | .buck1_max_voltage2 = 1200000, | 525 | .buck1_voltage2 = 1200000, |
526 | .buck2_max_voltage = 1200000, | 526 | .buck1_voltage3 = 1200000, |
527 | .buck1_voltage4 = 1200000, | ||
528 | .buck2_voltage1 = 1200000, | ||
529 | .buck2_voltage2 = 1200000, | ||
527 | }; | 530 | }; |
528 | #endif | 531 | #endif |
529 | 532 | ||
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig index 09c4c21b70cc..b2a9acc5185f 100644 --- a/arch/arm/mach-s5pv310/Kconfig +++ b/arch/arm/mach-s5pv310/Kconfig | |||
@@ -122,6 +122,7 @@ config MACH_SMDKV310 | |||
122 | select S3C_DEV_HSMMC2 | 122 | select S3C_DEV_HSMMC2 |
123 | select S3C_DEV_HSMMC3 | 123 | select S3C_DEV_HSMMC3 |
124 | select S5PV310_DEV_PD | 124 | select S5PV310_DEV_PD |
125 | select S5PV310_DEV_SYSMMU | ||
125 | select S5PV310_SETUP_I2C1 | 126 | select S5PV310_SETUP_I2C1 |
126 | select S5PV310_SETUP_SDHCI | 127 | select S5PV310_SETUP_SDHCI |
127 | help | 128 | help |
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h index 74d400625a23..901657fa7a12 100644 --- a/arch/arm/mach-s5pv310/include/mach/map.h +++ b/arch/arm/mach-s5pv310/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5pv310/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5pv310/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com/ | 4 | * http://www.samsung.com/ |
5 | * | 5 | * |
6 | * S5PV310 - Memory map definitions | 6 | * S5PV310 - Memory map definitions |
@@ -23,90 +23,43 @@ | |||
23 | 23 | ||
24 | #include <plat/map-s5p.h> | 24 | #include <plat/map-s5p.h> |
25 | 25 | ||
26 | #define S5PV310_PA_SYSRAM (0x02025000) | 26 | #define S5PV310_PA_SYSRAM 0x02025000 |
27 | 27 | ||
28 | #define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) | 28 | #define S5PV310_PA_I2S0 0x03830000 |
29 | 29 | #define S5PV310_PA_I2S1 0xE3100000 | |
30 | #define S5PC210_PA_ONENAND (0x0C000000) | 30 | #define S5PV310_PA_I2S2 0xE2A00000 |
31 | #define S5P_PA_ONENAND S5PC210_PA_ONENAND | ||
32 | |||
33 | #define S5PC210_PA_ONENAND_DMA (0x0C600000) | ||
34 | #define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA | ||
35 | |||
36 | #define S5PV310_PA_CHIPID (0x10000000) | ||
37 | #define S5P_PA_CHIPID S5PV310_PA_CHIPID | ||
38 | |||
39 | #define S5PV310_PA_SYSCON (0x10010000) | ||
40 | #define S5P_PA_SYSCON S5PV310_PA_SYSCON | ||
41 | 31 | ||
42 | #define S5PV310_PA_PMU (0x10020000) | 32 | #define S5PV310_PA_PCM0 0x03840000 |
33 | #define S5PV310_PA_PCM1 0x13980000 | ||
34 | #define S5PV310_PA_PCM2 0x13990000 | ||
43 | 35 | ||
44 | #define S5PV310_PA_CMU (0x10030000) | 36 | #define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) |
45 | |||
46 | #define S5PV310_PA_WATCHDOG (0x10060000) | ||
47 | #define S5PV310_PA_RTC (0x10070000) | ||
48 | |||
49 | #define S5PV310_PA_DMC0 (0x10400000) | ||
50 | |||
51 | #define S5PV310_PA_COMBINER (0x10448000) | ||
52 | |||
53 | #define S5PV310_PA_COREPERI (0x10500000) | ||
54 | #define S5PV310_PA_GIC_CPU (0x10500100) | ||
55 | #define S5PV310_PA_TWD (0x10500600) | ||
56 | #define S5PV310_PA_GIC_DIST (0x10501000) | ||
57 | #define S5PV310_PA_L2CC (0x10502000) | ||
58 | |||
59 | /* DMA */ | ||
60 | #define S5PV310_PA_MDMA 0x10810000 | ||
61 | #define S5PV310_PA_PDMA0 0x12680000 | ||
62 | #define S5PV310_PA_PDMA1 0x12690000 | ||
63 | |||
64 | #define S5PV310_PA_GPIO1 (0x11400000) | ||
65 | #define S5PV310_PA_GPIO2 (0x11000000) | ||
66 | #define S5PV310_PA_GPIO3 (0x03860000) | ||
67 | |||
68 | #define S5PV310_PA_MIPI_CSIS0 0x11880000 | ||
69 | #define S5PV310_PA_MIPI_CSIS1 0x11890000 | ||
70 | 37 | ||
71 | #define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) | 38 | #define S5PC210_PA_ONENAND 0x0C000000 |
39 | #define S5PC210_PA_ONENAND_DMA 0x0C600000 | ||
72 | 40 | ||
73 | #define S5PV310_PA_SROMC (0x12570000) | 41 | #define S5PV310_PA_CHIPID 0x10000000 |
74 | #define S5P_PA_SROMC S5PV310_PA_SROMC | ||
75 | 42 | ||
76 | /* S/PDIF */ | 43 | #define S5PV310_PA_SYSCON 0x10010000 |
77 | #define S5PV310_PA_SPDIF 0xE1100000 | 44 | #define S5PV310_PA_PMU 0x10020000 |
45 | #define S5PV310_PA_CMU 0x10030000 | ||
78 | 46 | ||
79 | /* I2S */ | 47 | #define S5PV310_PA_WATCHDOG 0x10060000 |
80 | #define S5PV310_PA_I2S0 0x03830000 | 48 | #define S5PV310_PA_RTC 0x10070000 |
81 | #define S5PV310_PA_I2S1 0xE3100000 | ||
82 | #define S5PV310_PA_I2S2 0xE2A00000 | ||
83 | 49 | ||
84 | /* PCM */ | 50 | #define S5PV310_PA_DMC0 0x10400000 |
85 | #define S5PV310_PA_PCM0 0x03840000 | ||
86 | #define S5PV310_PA_PCM1 0x13980000 | ||
87 | #define S5PV310_PA_PCM2 0x13990000 | ||
88 | 51 | ||
89 | /* AC97 */ | 52 | #define S5PV310_PA_COMBINER 0x10448000 |
90 | #define S5PV310_PA_AC97 0x139A0000 | ||
91 | 53 | ||
92 | #define S5PV310_PA_UART (0x13800000) | 54 | #define S5PV310_PA_COREPERI 0x10500000 |
55 | #define S5PV310_PA_GIC_CPU 0x10500100 | ||
56 | #define S5PV310_PA_TWD 0x10500600 | ||
57 | #define S5PV310_PA_GIC_DIST 0x10501000 | ||
58 | #define S5PV310_PA_L2CC 0x10502000 | ||
93 | 59 | ||
94 | #define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) | 60 | #define S5PV310_PA_MDMA 0x10810000 |
95 | #define S5P_PA_UART0 S5P_PA_UART(0) | 61 | #define S5PV310_PA_PDMA0 0x12680000 |
96 | #define S5P_PA_UART1 S5P_PA_UART(1) | 62 | #define S5PV310_PA_PDMA1 0x12690000 |
97 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
98 | #define S5P_PA_UART3 S5P_PA_UART(3) | ||
99 | #define S5P_PA_UART4 S5P_PA_UART(4) | ||
100 | |||
101 | #define S5P_SZ_UART SZ_256 | ||
102 | |||
103 | #define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) | ||
104 | |||
105 | #define S5PV310_PA_TIMER (0x139D0000) | ||
106 | #define S5P_PA_TIMER S5PV310_PA_TIMER | ||
107 | |||
108 | #define S5PV310_PA_SDRAM (0x40000000) | ||
109 | #define S5P_PA_SDRAM S5PV310_PA_SDRAM | ||
110 | 63 | ||
111 | #define S5PV310_PA_SYSMMU_MDMA 0x10A40000 | 64 | #define S5PV310_PA_SYSMMU_MDMA 0x10A40000 |
112 | #define S5PV310_PA_SYSMMU_SSS 0x10A50000 | 65 | #define S5PV310_PA_SYSMMU_SSS 0x10A50000 |
@@ -124,11 +77,32 @@ | |||
124 | #define S5PV310_PA_SYSMMU_TV 0x12E20000 | 77 | #define S5PV310_PA_SYSMMU_TV 0x12E20000 |
125 | #define S5PV310_PA_SYSMMU_MFC_L 0x13620000 | 78 | #define S5PV310_PA_SYSMMU_MFC_L 0x13620000 |
126 | #define S5PV310_PA_SYSMMU_MFC_R 0x13630000 | 79 | #define S5PV310_PA_SYSMMU_MFC_R 0x13630000 |
127 | #define S5PV310_SYSMMU_TOTAL_IPNUM 16 | ||
128 | #define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM | ||
129 | 80 | ||
130 | /* compatibiltiy defines. */ | 81 | #define S5PV310_PA_GPIO1 0x11400000 |
131 | #define S3C_PA_UART S5PV310_PA_UART | 82 | #define S5PV310_PA_GPIO2 0x11000000 |
83 | #define S5PV310_PA_GPIO3 0x03860000 | ||
84 | |||
85 | #define S5PV310_PA_MIPI_CSIS0 0x11880000 | ||
86 | #define S5PV310_PA_MIPI_CSIS1 0x11890000 | ||
87 | |||
88 | #define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) | ||
89 | |||
90 | #define S5PV310_PA_SROMC 0x12570000 | ||
91 | |||
92 | #define S5PV310_PA_UART 0x13800000 | ||
93 | |||
94 | #define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) | ||
95 | |||
96 | #define S5PV310_PA_AC97 0x139A0000 | ||
97 | |||
98 | #define S5PV310_PA_TIMER 0x139D0000 | ||
99 | |||
100 | #define S5PV310_PA_SDRAM 0x40000000 | ||
101 | |||
102 | #define S5PV310_PA_SPDIF 0xE1100000 | ||
103 | |||
104 | /* Compatibiltiy Defines */ | ||
105 | |||
132 | #define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) | 106 | #define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) |
133 | #define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) | 107 | #define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) |
134 | #define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) | 108 | #define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) |
@@ -143,7 +117,28 @@ | |||
143 | #define S3C_PA_IIC7 S5PV310_PA_IIC(7) | 117 | #define S3C_PA_IIC7 S5PV310_PA_IIC(7) |
144 | #define S3C_PA_RTC S5PV310_PA_RTC | 118 | #define S3C_PA_RTC S5PV310_PA_RTC |
145 | #define S3C_PA_WDT S5PV310_PA_WATCHDOG | 119 | #define S3C_PA_WDT S5PV310_PA_WATCHDOG |
120 | |||
121 | #define S5P_PA_CHIPID S5PV310_PA_CHIPID | ||
146 | #define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 | 122 | #define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 |
147 | #define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 | 123 | #define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 |
124 | #define S5P_PA_ONENAND S5PC210_PA_ONENAND | ||
125 | #define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA | ||
126 | #define S5P_PA_SDRAM S5PV310_PA_SDRAM | ||
127 | #define S5P_PA_SROMC S5PV310_PA_SROMC | ||
128 | #define S5P_PA_SYSCON S5PV310_PA_SYSCON | ||
129 | #define S5P_PA_TIMER S5PV310_PA_TIMER | ||
130 | |||
131 | /* UART */ | ||
132 | |||
133 | #define S3C_PA_UART S5PV310_PA_UART | ||
134 | |||
135 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) | ||
136 | #define S5P_PA_UART0 S5P_PA_UART(0) | ||
137 | #define S5P_PA_UART1 S5P_PA_UART(1) | ||
138 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
139 | #define S5P_PA_UART3 S5P_PA_UART(3) | ||
140 | #define S5P_PA_UART4 S5P_PA_UART(4) | ||
141 | |||
142 | #define S5P_SZ_UART SZ_256 | ||
148 | 143 | ||
149 | #endif /* __ASM_ARCH_MAP_H */ | 144 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/arch/arm/mach-s5pv310/include/mach/sysmmu.h index 662fe85ff4d5..598fc5c9211b 100644 --- a/arch/arm/mach-s5pv310/include/mach/sysmmu.h +++ b/arch/arm/mach-s5pv310/include/mach/sysmmu.h | |||
@@ -13,6 +13,9 @@ | |||
13 | #ifndef __ASM_ARM_ARCH_SYSMMU_H | 13 | #ifndef __ASM_ARM_ARCH_SYSMMU_H |
14 | #define __ASM_ARM_ARCH_SYSMMU_H __FILE__ | 14 | #define __ASM_ARM_ARCH_SYSMMU_H __FILE__ |
15 | 15 | ||
16 | #define S5PV310_SYSMMU_TOTAL_IPNUM 16 | ||
17 | #define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM | ||
18 | |||
16 | enum s5pv310_sysmmu_ips { | 19 | enum s5pv310_sysmmu_ips { |
17 | SYSMMU_MDMA, | 20 | SYSMMU_MDMA, |
18 | SYSMMU_SSS, | 21 | SYSMMU_SSS, |
@@ -32,7 +35,7 @@ enum s5pv310_sysmmu_ips { | |||
32 | SYSMMU_MFC_R, | 35 | SYSMMU_MFC_R, |
33 | }; | 36 | }; |
34 | 37 | ||
35 | static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = { | 38 | static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = { |
36 | "SYSMMU_MDMA" , | 39 | "SYSMMU_MDMA" , |
37 | "SYSMMU_SSS" , | 40 | "SYSMMU_SSS" , |
38 | "SYSMMU_FIMC0" , | 41 | "SYSMMU_FIMC0" , |
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index d43c5ef58eb6..bd3e1bfdd6aa 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c | |||
@@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = { | |||
241 | struct platform_device collie_locomo_device = { | 241 | struct platform_device collie_locomo_device = { |
242 | .name = "locomo", | 242 | .name = "locomo", |
243 | .id = 0, | 243 | .id = 0, |
244 | .dev = { | ||
245 | .platform_data = &locomo_info, | ||
246 | }, | ||
244 | .num_resources = ARRAY_SIZE(locomo_resources), | 247 | .num_resources = ARRAY_SIZE(locomo_resources), |
245 | .resource = locomo_resources, | 248 | .resource = locomo_resources, |
246 | }; | 249 | }; |
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h index cacf17a958cd..53677e464d4b 100644 --- a/arch/arm/mach-spear3xx/include/mach/spear320.h +++ b/arch/arm/mach-spear3xx/include/mach/spear320.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #define SPEAR320_SMII1_BASE 0xAB000000 | 62 | #define SPEAR320_SMII1_BASE 0xAB000000 |
63 | #define SPEAR320_SMII1_SIZE 0x01000000 | 63 | #define SPEAR320_SMII1_SIZE 0x01000000 |
64 | 64 | ||
65 | #define SPEAR320_SOC_CONFIG_BASE 0xB4000000 | 65 | #define SPEAR320_SOC_CONFIG_BASE 0xB3000000 |
66 | #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 | 66 | #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 |
67 | /* Interrupt registers offsets and masks */ | 67 | /* Interrupt registers offsets and masks */ |
68 | #define INT_STS_MASK_REG 0x04 | 68 | #define INT_STS_MASK_REG 0x04 |
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h index 66ad2760c621..04c779832c78 100644 --- a/arch/arm/mach-tegra/include/mach/kbc.h +++ b/arch/arm/mach-tegra/include/mach/kbc.h | |||
@@ -57,5 +57,6 @@ struct tegra_kbc_platform_data { | |||
57 | const struct matrix_keymap_data *keymap_data; | 57 | const struct matrix_keymap_data *keymap_data; |
58 | 58 | ||
59 | bool wakeup; | 59 | bool wakeup; |
60 | bool use_fn_map; | ||
60 | }; | 61 | }; |
61 | #endif | 62 | #endif |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9d30c6f804b9..e4509bae8fc4 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -405,7 +405,7 @@ config CPU_V6 | |||
405 | config CPU_32v6K | 405 | config CPU_32v6K |
406 | bool "Support ARM V6K processor extensions" if !SMP | 406 | bool "Support ARM V6K processor extensions" if !SMP |
407 | depends on CPU_V6 || CPU_V7 | 407 | depends on CPU_V6 || CPU_V7 |
408 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) | 408 | default y if SMP |
409 | help | 409 | help |
410 | Say Y here if your ARMv6 processor supports the 'K' extension. | 410 | Say Y here if your ARMv6 processor supports the 'K' extension. |
411 | This enables the kernel to use some instructions not present | 411 | This enables the kernel to use some instructions not present |
@@ -416,7 +416,7 @@ config CPU_32v6K | |||
416 | # ARMv7 | 416 | # ARMv7 |
417 | config CPU_V7 | 417 | config CPU_V7 |
418 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 418 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
419 | select CPU_32v6K if !ARCH_OMAP2 | 419 | select CPU_32v6K |
420 | select CPU_32v7 | 420 | select CPU_32v7 |
421 | select CPU_ABRT_EV7 | 421 | select CPU_ABRT_EV7 |
422 | select CPU_PABRT_V7 | 422 | select CPU_PABRT_V7 |
@@ -644,7 +644,7 @@ config ARM_THUMBEE | |||
644 | 644 | ||
645 | config SWP_EMULATE | 645 | config SWP_EMULATE |
646 | bool "Emulate SWP/SWPB instructions" | 646 | bool "Emulate SWP/SWPB instructions" |
647 | depends on CPU_V7 && !CPU_V6 | 647 | depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6 |
648 | select HAVE_PROC_CPU if PROC_FS | 648 | select HAVE_PROC_CPU if PROC_FS |
649 | default y if SMP | 649 | default y if SMP |
650 | help | 650 | help |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb95866..f2ce38e085d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask) | |||
49 | static inline void cache_sync(void) | 49 | static inline void cache_sync(void) |
50 | { | 50 | { |
51 | void __iomem *base = l2x0_base; | 51 | void __iomem *base = l2x0_base; |
52 | |||
53 | #ifdef CONFIG_ARM_ERRATA_753970 | ||
54 | /* write to an unmmapped register */ | ||
55 | writel_relaxed(0, base + L2X0_DUMMY_REG); | ||
56 | #else | ||
52 | writel_relaxed(0, base + L2X0_CACHE_SYNC); | 57 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
58 | #endif | ||
53 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 59 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
54 | } | 60 | } |
55 | 61 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4e..8e3356239136 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -264,6 +264,12 @@ __v7_setup: | |||
264 | orreq r10, r10, #1 << 6 @ set bit #6 | 264 | orreq r10, r10, #1 << 6 @ set bit #6 |
265 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 265 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
266 | #endif | 266 | #endif |
267 | #ifdef CONFIG_ARM_ERRATA_751472 | ||
268 | cmp r6, #0x30 @ present prior to r3p0 | ||
269 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
270 | orrlt r10, r10, #1 << 11 @ set bit #11 | ||
271 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
272 | #endif | ||
267 | 273 | ||
268 | 3: mov r10, #0 | 274 | 3: mov r10, #0 |
269 | #ifdef HARVARD_CACHE | 275 | #ifdef HARVARD_CACHE |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 8aa974491dfc..c074e66ad224 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -10,8 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/err.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
17 | #include <linux/oprofile.h> | 15 | #include <linux/oprofile.h> |
@@ -46,6 +44,7 @@ char *op_name_from_perf_id(void) | |||
46 | return NULL; | 44 | return NULL; |
47 | } | 45 | } |
48 | } | 46 | } |
47 | #endif | ||
49 | 48 | ||
50 | static int report_trace(struct stackframe *frame, void *d) | 49 | static int report_trace(struct stackframe *frame, void *d) |
51 | { | 50 | { |
@@ -85,7 +84,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail) | |||
85 | 84 | ||
86 | /* frame pointers should strictly progress back up the stack | 85 | /* frame pointers should strictly progress back up the stack |
87 | * (towards higher addresses) */ | 86 | * (towards higher addresses) */ |
88 | if (tail >= buftail[0].fp) | 87 | if (tail + 1 >= buftail[0].fp) |
89 | return NULL; | 88 | return NULL; |
90 | 89 | ||
91 | return buftail[0].fp-1; | 90 | return buftail[0].fp-1; |
@@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
111 | 110 | ||
112 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 111 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
113 | { | 112 | { |
113 | /* provide backtrace support also in timer mode: */ | ||
114 | ops->backtrace = arm_backtrace; | 114 | ops->backtrace = arm_backtrace; |
115 | 115 | ||
116 | return oprofile_perf_init(ops); | 116 | return oprofile_perf_init(ops); |
@@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void) | |||
120 | { | 120 | { |
121 | oprofile_perf_exit(); | 121 | oprofile_perf_exit(); |
122 | } | 122 | } |
123 | #else | ||
124 | int __init oprofile_arch_init(struct oprofile_operations *ops) | ||
125 | { | ||
126 | pr_info("oprofile: hardware counters not available\n"); | ||
127 | return -ENODEV; | ||
128 | } | ||
129 | void __exit oprofile_arch_exit(void) {} | ||
130 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h index 3a70ebf0477f..ff469c4f1d76 100644 --- a/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/arch/arm/plat-mxc/include/mach/uncompress.h | |||
@@ -95,6 +95,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) | |||
95 | case MACH_TYPE_MX35_3DS: | 95 | case MACH_TYPE_MX35_3DS: |
96 | case MACH_TYPE_PCM043: | 96 | case MACH_TYPE_PCM043: |
97 | case MACH_TYPE_LILLY1131: | 97 | case MACH_TYPE_LILLY1131: |
98 | case MACH_TYPE_VPR200: | ||
98 | uart_base = MX3X_UART1_BASE_ADDR; | 99 | uart_base = MX3X_UART1_BASE_ADDR; |
99 | break; | 100 | break; |
100 | case MACH_TYPE_MAGX_ZN5: | 101 | case MACH_TYPE_MAGX_ZN5: |
@@ -102,6 +103,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) | |||
102 | break; | 103 | break; |
103 | case MACH_TYPE_MX51_BABBAGE: | 104 | case MACH_TYPE_MX51_BABBAGE: |
104 | case MACH_TYPE_EUKREA_CPUIMX51SD: | 105 | case MACH_TYPE_EUKREA_CPUIMX51SD: |
106 | case MACH_TYPE_MX51_3DS: | ||
105 | uart_base = MX51_UART1_BASE_ADDR; | 107 | uart_base = MX51_UART1_BASE_ADDR; |
106 | break; | 108 | break; |
107 | case MACH_TYPE_MX50_RDP: | 109 | case MACH_TYPE_MX50_RDP: |
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 459b319a9fad..49d3208793e5 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c | |||
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox) | |||
322 | 322 | ||
323 | struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) | 323 | struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) |
324 | { | 324 | { |
325 | struct omap_mbox *mbox; | 325 | struct omap_mbox *_mbox, *mbox = NULL; |
326 | int ret; | 326 | int i, ret; |
327 | 327 | ||
328 | if (!mboxes) | 328 | if (!mboxes) |
329 | return ERR_PTR(-EINVAL); | 329 | return ERR_PTR(-EINVAL); |
330 | 330 | ||
331 | for (mbox = *mboxes; mbox; mbox++) | 331 | for (i = 0; (_mbox = mboxes[i]); i++) { |
332 | if (!strcmp(mbox->name, name)) | 332 | if (!strcmp(_mbox->name, name)) { |
333 | mbox = _mbox; | ||
333 | break; | 334 | break; |
335 | } | ||
336 | } | ||
334 | 337 | ||
335 | if (!mbox) | 338 | if (!mbox) |
336 | return ERR_PTR(-ENOENT); | 339 | return ERR_PTR(-ENOENT); |
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c index b77e018d36c1..a9aa5ad3f4eb 100644 --- a/arch/arm/plat-pxa/mfp.c +++ b/arch/arm/plat-pxa/mfp.c | |||
@@ -139,10 +139,11 @@ static const unsigned long mfpr_edge[] = { | |||
139 | #define mfp_configured(p) ((p)->config != -1) | 139 | #define mfp_configured(p) ((p)->config != -1) |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * perform a read-back of any MFPR register to make sure the | 142 | * perform a read-back of any valid MFPR register to make sure the |
143 | * previous writings are finished | 143 | * previous writings are finished |
144 | */ | 144 | */ |
145 | #define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + 0) | 145 | static unsigned long mfpr_off_readback; |
146 | #define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback) | ||
146 | 147 | ||
147 | static inline void __mfp_config_run(struct mfp_pin *p) | 148 | static inline void __mfp_config_run(struct mfp_pin *p) |
148 | { | 149 | { |
@@ -248,6 +249,9 @@ void __init mfp_init_addr(struct mfp_addr_map *map) | |||
248 | 249 | ||
249 | spin_lock_irqsave(&mfp_spin_lock, flags); | 250 | spin_lock_irqsave(&mfp_spin_lock, flags); |
250 | 251 | ||
252 | /* mfp offset for readback */ | ||
253 | mfpr_off_readback = map[0].offset; | ||
254 | |||
251 | for (p = map; p->start != MFP_PIN_INVALID; p++) { | 255 | for (p = map; p->start != MFP_PIN_INVALID; p++) { |
252 | offset = p->offset; | 256 | offset = p->offset; |
253 | i = p->start; | 257 | i = p->start; |
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig index deb39951a22e..557f8c507f6d 100644 --- a/arch/arm/plat-s5p/Kconfig +++ b/arch/arm/plat-s5p/Kconfig | |||
@@ -37,6 +37,14 @@ config S5P_GPIO_INT | |||
37 | help | 37 | help |
38 | Common code for the GPIO interrupts (other than external interrupts.) | 38 | Common code for the GPIO interrupts (other than external interrupts.) |
39 | 39 | ||
40 | comment "System MMU" | ||
41 | |||
42 | config S5P_SYSTEM_MMU | ||
43 | bool "S5P SYSTEM MMU" | ||
44 | depends on ARCH_S5PV310 | ||
45 | help | ||
46 | Say Y here if you want to enable System MMU | ||
47 | |||
40 | config S5P_DEV_FIMC0 | 48 | config S5P_DEV_FIMC0 |
41 | bool | 49 | bool |
42 | help | 50 | help |
@@ -66,19 +74,3 @@ config S5P_DEV_CSIS1 | |||
66 | bool | 74 | bool |
67 | help | 75 | help |
68 | Compile in platform device definitions for MIPI-CSIS channel 1 | 76 | Compile in platform device definitions for MIPI-CSIS channel 1 |
69 | |||
70 | menuconfig S5P_SYSMMU | ||
71 | bool "SYSMMU support" | ||
72 | depends on ARCH_S5PV310 | ||
73 | help | ||
74 | This is a System MMU driver for Samsung ARM based Soc. | ||
75 | |||
76 | if S5P_SYSMMU | ||
77 | |||
78 | config S5P_SYSMMU_DEBUG | ||
79 | bool "Enables debug messages" | ||
80 | depends on S5P_SYSMMU | ||
81 | help | ||
82 | This enables SYSMMU driver debug massages. | ||
83 | |||
84 | endif | ||
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile index 92efe1adcfd6..4bd5cf908977 100644 --- a/arch/arm/plat-s5p/Makefile +++ b/arch/arm/plat-s5p/Makefile | |||
@@ -19,6 +19,7 @@ obj-y += clock.o | |||
19 | obj-y += irq.o | 19 | obj-y += irq.o |
20 | obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o | 20 | obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o |
21 | obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o | 21 | obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o |
22 | obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o | ||
22 | obj-$(CONFIG_PM) += pm.o | 23 | obj-$(CONFIG_PM) += pm.o |
23 | obj-$(CONFIG_PM) += irq-pm.o | 24 | obj-$(CONFIG_PM) += irq-pm.o |
24 | 25 | ||
@@ -30,4 +31,3 @@ obj-$(CONFIG_S5P_DEV_FIMC2) += dev-fimc2.o | |||
30 | obj-$(CONFIG_S5P_DEV_ONENAND) += dev-onenand.o | 31 | obj-$(CONFIG_S5P_DEV_ONENAND) += dev-onenand.o |
31 | obj-$(CONFIG_S5P_DEV_CSIS0) += dev-csis0.o | 32 | obj-$(CONFIG_S5P_DEV_CSIS0) += dev-csis0.o |
32 | obj-$(CONFIG_S5P_DEV_CSIS1) += dev-csis1.o | 33 | obj-$(CONFIG_S5P_DEV_CSIS1) += dev-csis1.o |
33 | obj-$(CONFIG_S5P_SYSMMU) += sysmmu.o | ||
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c index 6a7342886171..afaf87fdb93e 100644 --- a/arch/arm/plat-s5p/dev-uart.c +++ b/arch/arm/plat-s5p/dev-uart.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static struct resource s5p_uart0_resource[] = { | 28 | static struct resource s5p_uart0_resource[] = { |
29 | [0] = { | 29 | [0] = { |
30 | .start = S5P_PA_UART0, | 30 | .start = S5P_PA_UART0, |
31 | .end = S5P_PA_UART0 + S5P_SZ_UART, | 31 | .end = S5P_PA_UART0 + S5P_SZ_UART - 1, |
32 | .flags = IORESOURCE_MEM, | 32 | .flags = IORESOURCE_MEM, |
33 | }, | 33 | }, |
34 | [1] = { | 34 | [1] = { |
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = { | |||
51 | static struct resource s5p_uart1_resource[] = { | 51 | static struct resource s5p_uart1_resource[] = { |
52 | [0] = { | 52 | [0] = { |
53 | .start = S5P_PA_UART1, | 53 | .start = S5P_PA_UART1, |
54 | .end = S5P_PA_UART1 + S5P_SZ_UART, | 54 | .end = S5P_PA_UART1 + S5P_SZ_UART - 1, |
55 | .flags = IORESOURCE_MEM, | 55 | .flags = IORESOURCE_MEM, |
56 | }, | 56 | }, |
57 | [1] = { | 57 | [1] = { |
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = { | |||
74 | static struct resource s5p_uart2_resource[] = { | 74 | static struct resource s5p_uart2_resource[] = { |
75 | [0] = { | 75 | [0] = { |
76 | .start = S5P_PA_UART2, | 76 | .start = S5P_PA_UART2, |
77 | .end = S5P_PA_UART2 + S5P_SZ_UART, | 77 | .end = S5P_PA_UART2 + S5P_SZ_UART - 1, |
78 | .flags = IORESOURCE_MEM, | 78 | .flags = IORESOURCE_MEM, |
79 | }, | 79 | }, |
80 | [1] = { | 80 | [1] = { |
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = { | |||
98 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 | 98 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 |
99 | [0] = { | 99 | [0] = { |
100 | .start = S5P_PA_UART3, | 100 | .start = S5P_PA_UART3, |
101 | .end = S5P_PA_UART3 + S5P_SZ_UART, | 101 | .end = S5P_PA_UART3 + S5P_SZ_UART - 1, |
102 | .flags = IORESOURCE_MEM, | 102 | .flags = IORESOURCE_MEM, |
103 | }, | 103 | }, |
104 | [1] = { | 104 | [1] = { |
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = { | |||
123 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 | 123 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 |
124 | [0] = { | 124 | [0] = { |
125 | .start = S5P_PA_UART4, | 125 | .start = S5P_PA_UART4, |
126 | .end = S5P_PA_UART4 + S5P_SZ_UART, | 126 | .end = S5P_PA_UART4 + S5P_SZ_UART - 1, |
127 | .flags = IORESOURCE_MEM, | 127 | .flags = IORESOURCE_MEM, |
128 | }, | 128 | }, |
129 | [1] = { | 129 | [1] = { |
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = { | |||
148 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 | 148 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 |
149 | [0] = { | 149 | [0] = { |
150 | .start = S5P_PA_UART5, | 150 | .start = S5P_PA_UART5, |
151 | .end = S5P_PA_UART5 + S5P_SZ_UART, | 151 | .end = S5P_PA_UART5 + S5P_SZ_UART - 1, |
152 | .flags = IORESOURCE_MEM, | 152 | .flags = IORESOURCE_MEM, |
153 | }, | 153 | }, |
154 | [1] = { | 154 | [1] = { |
diff --git a/arch/arm/plat-s5p/include/plat/sysmmu.h b/arch/arm/plat-s5p/include/plat/sysmmu.h deleted file mode 100644 index db298fc5438a..000000000000 --- a/arch/arm/plat-s5p/include/plat/sysmmu.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* linux/arch/arm/plat-s5p/include/plat/sysmmu.h | ||
2 | * | ||
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com/ | ||
5 | * | ||
6 | * Samsung sysmmu driver | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __ASM_PLAT_S5P_SYSMMU_H | ||
14 | #define __ASM_PLAT_S5P_SYSMMU_H __FILE__ | ||
15 | |||
16 | /* debug macro */ | ||
17 | #ifdef CONFIG_S5P_SYSMMU_DEBUG | ||
18 | #define sysmmu_debug(fmt, arg...) printk(KERN_INFO "[%s] " fmt, __func__, ## arg) | ||
19 | #else | ||
20 | #define sysmmu_debug(fmt, arg...) do { } while (0) | ||
21 | #endif | ||
22 | |||
23 | #endif /* __ASM_PLAT_S5P_SYSMMU_H */ | ||
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c index d804914dc2e2..ffe8a48bc3c1 100644 --- a/arch/arm/plat-s5p/sysmmu.c +++ b/arch/arm/plat-s5p/sysmmu.c | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <mach/regs-sysmmu.h> | 16 | #include <mach/regs-sysmmu.h> |
17 | #include <mach/sysmmu.h> | 17 | #include <mach/sysmmu.h> |
18 | 18 | ||
19 | #include <plat/sysmmu.h> | ||
20 | |||
21 | struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM]; | 19 | struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM]; |
22 | 20 | ||
23 | void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp) | 21 | void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp) |
@@ -123,7 +121,7 @@ static int s5p_sysmmu_set_tablebase(sysmmu_ips ips) | |||
123 | : "=r" (pg) : : "cc"); \ | 121 | : "=r" (pg) : : "cc"); \ |
124 | pg &= ~0x3fff; | 122 | pg &= ~0x3fff; |
125 | 123 | ||
126 | sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg); | 124 | printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg); |
127 | 125 | ||
128 | /* Set sysmmu page table base address */ | 126 | /* Set sysmmu page table base address */ |
129 | __raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR); | 127 | __raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR); |
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c index 236ef8427d7d..3e4bd8147bf4 100644 --- a/arch/arm/plat-samsung/dev-ts.c +++ b/arch/arm/plat-samsung/dev-ts.c | |||
@@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd) | |||
58 | 58 | ||
59 | s3c_device_ts.dev.platform_data = npd; | 59 | s3c_device_ts.dev.platform_data = npd; |
60 | } | 60 | } |
61 | EXPORT_SYMBOL(s3c24xx_ts_set_platdata); | ||
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index d9025e377675..30518cc9a67c 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h | |||
@@ -17,6 +17,8 @@ | |||
17 | 17 | ||
18 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
19 | 19 | ||
20 | struct sys_device; | ||
21 | |||
20 | #ifdef CONFIG_PM | 22 | #ifdef CONFIG_PM |
21 | 23 | ||
22 | extern __init int s3c_pm_init(void); | 24 | extern __init int s3c_pm_init(void); |
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h index 99ba6789cc97..6dd455bafdfd 100644 --- a/arch/arm/plat-spear/include/plat/uncompress.h +++ b/arch/arm/plat-spear/include/plat/uncompress.h | |||
@@ -24,10 +24,10 @@ static inline void putc(int c) | |||
24 | { | 24 | { |
25 | void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; | 25 | void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; |
26 | 26 | ||
27 | while (readl(base + UART01x_FR) & UART01x_FR_TXFF) | 27 | while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF) |
28 | barrier(); | 28 | barrier(); |
29 | 29 | ||
30 | writel(c, base + UART01x_DR); | 30 | writel_relaxed(c, base + UART01x_DR); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void flush(void) | 33 | static inline void flush(void) |
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h index 09e9372aea21..8c8b24d07046 100644 --- a/arch/arm/plat-spear/include/plat/vmalloc.h +++ b/arch/arm/plat-spear/include/plat/vmalloc.h | |||
@@ -14,6 +14,6 @@ | |||
14 | #ifndef __PLAT_VMALLOC_H | 14 | #ifndef __PLAT_VMALLOC_H |
15 | #define __PLAT_VMALLOC_H | 15 | #define __PLAT_VMALLOC_H |
16 | 16 | ||
17 | #define VMALLOC_END 0xF0000000 | 17 | #define VMALLOC_END 0xF0000000UL |
18 | 18 | ||
19 | #endif /* __PLAT_VMALLOC_H */ | 19 | #endif /* __PLAT_VMALLOC_H */ |
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 2fea897ebeb1..9d6feaabbe7d 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types | |||
@@ -12,7 +12,7 @@ | |||
12 | # | 12 | # |
13 | # http://www.arm.linux.org.uk/developer/machines/?action=new | 13 | # http://www.arm.linux.org.uk/developer/machines/?action=new |
14 | # | 14 | # |
15 | # Last update: Sun Dec 12 23:24:27 2010 | 15 | # Last update: Mon Feb 7 08:59:27 2011 |
16 | # | 16 | # |
17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number | 17 | # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number |
18 | # | 18 | # |
@@ -2240,7 +2240,7 @@ arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250 | |||
2240 | vs_v210 MACH_VS_V210 VS_V210 2252 | 2240 | vs_v210 MACH_VS_V210 VS_V210 2252 |
2241 | vs_v212 MACH_VS_V212 VS_V212 2253 | 2241 | vs_v212 MACH_VS_V212 VS_V212 2253 |
2242 | hmt MACH_HMT HMT 2254 | 2242 | hmt MACH_HMT HMT 2254 |
2243 | suen3 MACH_SUEN3 SUEN3 2255 | 2243 | km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255 |
2244 | vesper MACH_VESPER VESPER 2256 | 2244 | vesper MACH_VESPER VESPER 2256 |
2245 | str9 MACH_STR9 STR9 2257 | 2245 | str9 MACH_STR9 STR9 2257 |
2246 | omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 | 2246 | omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 |
@@ -2987,7 +2987,7 @@ pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 | |||
2987 | ea20 MACH_EA20 EA20 3002 | 2987 | ea20 MACH_EA20 EA20 3002 |
2988 | awm2 MACH_AWM2 AWM2 3003 | 2988 | awm2 MACH_AWM2 AWM2 3003 |
2989 | ti8148evm MACH_TI8148EVM TI8148EVM 3004 | 2989 | ti8148evm MACH_TI8148EVM TI8148EVM 3004 |
2990 | tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 | 2990 | seaboard MACH_SEABOARD SEABOARD 3005 |
2991 | linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 | 2991 | linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 |
2992 | tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 | 2992 | tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 |
2993 | rubys MACH_RUBYS RUBYS 3008 | 2993 | rubys MACH_RUBYS RUBYS 3008 |
@@ -3190,7 +3190,7 @@ synergy MACH_SYNERGY SYNERGY 3205 | |||
3190 | ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 | 3190 | ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 |
3191 | wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 | 3191 | wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 |
3192 | punica MACH_PUNICA PUNICA 3208 | 3192 | punica MACH_PUNICA PUNICA 3208 |
3193 | sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209 | 3193 | trimslice MACH_TRIMSLICE TRIMSLICE 3209 |
3194 | mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 | 3194 | mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 |
3195 | mackerel MACH_MACKEREL MACKEREL 3211 | 3195 | mackerel MACH_MACKEREL MACKEREL 3211 |
3196 | fa9x27 MACH_FA9X27 FA9X27 3213 | 3196 | fa9x27 MACH_FA9X27 FA9X27 3213 |
@@ -3219,3 +3219,100 @@ pivicc MACH_PIVICC PIVICC 3235 | |||
3219 | pcm048 MACH_PCM048 PCM048 3236 | 3219 | pcm048 MACH_PCM048 PCM048 3236 |
3220 | dds MACH_DDS DDS 3237 | 3220 | dds MACH_DDS DDS 3237 |
3221 | chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238 | 3221 | chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238 |
3222 | ts48xx MACH_TS48XX TS48XX 3239 | ||
3223 | tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240 | ||
3224 | whistler MACH_WHISTLER WHISTLER 3241 | ||
3225 | asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242 | ||
3226 | at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243 | ||
3227 | ddplug MACH_DDPLUG DDPLUG 3244 | ||
3228 | d2plug MACH_D2PLUG D2PLUG 3245 | ||
3229 | kzm9d MACH_KZM9D KZM9D 3246 | ||
3230 | verdi_lte MACH_VERDI_LTE VERDI_LTE 3247 | ||
3231 | nanozoom MACH_NANOZOOM NANOZOOM 3248 | ||
3232 | dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249 | ||
3233 | dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250 | ||
3234 | anchovy MACH_ANCHOVY ANCHOVY 3251 | ||
3235 | re2rev20 MACH_RE2REV20 RE2REV20 3253 | ||
3236 | re2rev21 MACH_RE2REV21 RE2REV21 3254 | ||
3237 | cns21xx MACH_CNS21XX CNS21XX 3255 | ||
3238 | rider MACH_RIDER RIDER 3257 | ||
3239 | nsk330 MACH_NSK330 NSK330 3258 | ||
3240 | cns2133evb MACH_CNS2133EVB CNS2133EVB 3259 | ||
3241 | z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260 | ||
3242 | z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261 | ||
3243 | beect MACH_BEECT BEECT 3262 | ||
3244 | dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263 | ||
3245 | omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264 | ||
3246 | mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265 | ||
3247 | mione MACH_MIONE MIONE 3266 | ||
3248 | top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267 | ||
3249 | top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268 | ||
3250 | kingdom MACH_KINGDOM KINGDOM 3269 | ||
3251 | armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270 | ||
3252 | lq2 MACH_LQ2 LQ2 3271 | ||
3253 | sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272 | ||
3254 | mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 | ||
3255 | acer_a8 MACH_ACER_A8 ACER_A8 3275 | ||
3256 | acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276 | ||
3257 | guppy MACH_GUPPY GUPPY 3277 | ||
3258 | mx61_ard MACH_MX61_ARD MX61_ARD 3278 | ||
3259 | tx53 MACH_TX53 TX53 3279 | ||
3260 | omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 | ||
3261 | uemd MACH_UEMD UEMD 3281 | ||
3262 | ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 | ||
3263 | rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 | ||
3264 | nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284 | ||
3265 | hkdkc100 MACH_HKDKC100 HKDKC100 3285 | ||
3266 | ts42xx MACH_TS42XX TS42XX 3286 | ||
3267 | aebl MACH_AEBL AEBL 3287 | ||
3268 | wario MACH_WARIO WARIO 3288 | ||
3269 | gfs_spm MACH_GFS_SPM GFS_SPM 3289 | ||
3270 | cm_t3730 MACH_CM_T3730 CM_T3730 3290 | ||
3271 | isc3 MACH_ISC3 ISC3 3291 | ||
3272 | rascal MACH_RASCAL RASCAL 3292 | ||
3273 | hrefv60 MACH_HREFV60 HREFV60 3293 | ||
3274 | tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294 | ||
3275 | pyramid_td MACH_PYRAMID_TD PYRAMID_TD 3295 | ||
3276 | splendor MACH_SPLENDOR SPLENDOR 3296 | ||
3277 | guf_planet MACH_GUF_PLANET GUF_PLANET 3297 | ||
3278 | msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298 | ||
3279 | htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299 | ||
3280 | athene MACH_ATHENE ATHENE 3300 | ||
3281 | deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301 | ||
3282 | vivow_ct MACH_VIVOW_CT VIVOW_CT 3302 | ||
3283 | nery_1000 MACH_NERY_1000 NERY_1000 3303 | ||
3284 | rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304 | ||
3285 | nmh MACH_NMH NMH 3305 | ||
3286 | wn802t MACH_WN802T WN802T 3306 | ||
3287 | dragonet MACH_DRAGONET DRAGONET 3307 | ||
3288 | geneva_b MACH_GENEVA_B GENEVA_B 3308 | ||
3289 | at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309 | ||
3290 | bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310 | ||
3291 | bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311 | ||
3292 | koi MACH_KOI KOI 3312 | ||
3293 | ts4800 MACH_TS4800 TS4800 3313 | ||
3294 | tqma9263 MACH_TQMA9263 TQMA9263 3314 | ||
3295 | holiday MACH_HOLIDAY HOLIDAY 3315 | ||
3296 | dma_6410 MACH_DMA6410 DMA6410 3316 | ||
3297 | pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317 | ||
3298 | hwgw6410 MACH_HWGW6410 HWGW6410 3318 | ||
3299 | shenzhou MACH_SHENZHOU SHENZHOU 3319 | ||
3300 | cwme9210 MACH_CWME9210 CWME9210 3320 | ||
3301 | cwme9210js MACH_CWME9210JS CWME9210JS 3321 | ||
3302 | pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322 | ||
3303 | colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323 | ||
3304 | w21 MACH_W21 W21 3324 | ||
3305 | polysat1 MACH_POLYSAT1 POLYSAT1 3325 | ||
3306 | dataway MACH_DATAWAY DATAWAY 3326 | ||
3307 | cobral138 MACH_COBRAL138 COBRAL138 3327 | ||
3308 | roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328 | ||
3309 | marvelc MACH_MARVELC MARVELC 3329 | ||
3310 | navefihid MACH_NAVEFIHID NAVEFIHID 3330 | ||
3311 | dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331 | ||
3312 | able MACH_ABLE ABLE 3332 | ||
3313 | legacy MACH_LEGACY LEGACY 3333 | ||
3314 | icong MACH_ICONG ICONG 3334 | ||
3315 | rover_g8 MACH_ROVER_G8 ROVER_G8 3335 | ||
3316 | t5388p MACH_T5388P T5388P 3336 | ||
3317 | dingo MACH_DINGO DINGO 3337 | ||
3318 | goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338 | ||
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h index 1ff9f1468c02..7dbc664eab1e 100644 --- a/arch/blackfin/include/asm/bfin_serial.h +++ b/arch/blackfin/include/asm/bfin_serial.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define __BFIN_ASM_SERIAL_H__ | 10 | #define __BFIN_ASM_SERIAL_H__ |
11 | 11 | ||
12 | #include <linux/serial_core.h> | 12 | #include <linux/serial_core.h> |
13 | #include <linux/spinlock.h> | ||
13 | #include <mach/anomaly.h> | 14 | #include <mach/anomaly.h> |
14 | #include <mach/bfin_serial.h> | 15 | #include <mach/bfin_serial.h> |
15 | 16 | ||
@@ -41,6 +42,7 @@ struct bfin_serial_port { | |||
41 | struct circ_buf rx_dma_buf; | 42 | struct circ_buf rx_dma_buf; |
42 | struct timer_list rx_dma_timer; | 43 | struct timer_list rx_dma_timer; |
43 | int rx_dma_nrows; | 44 | int rx_dma_nrows; |
45 | spinlock_t rx_lock; | ||
44 | unsigned int tx_dma_channel; | 46 | unsigned int tx_dma_channel; |
45 | unsigned int rx_dma_channel; | 47 | unsigned int rx_dma_channel; |
46 | struct work_struct tx_dma_workqueue; | 48 | struct work_struct tx_dma_workqueue; |
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 442218980db0..c49be845f96a 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S | |||
@@ -72,11 +72,6 @@ SECTIONS | |||
72 | INIT_TEXT_SECTION(PAGE_SIZE) | 72 | INIT_TEXT_SECTION(PAGE_SIZE) |
73 | .init.data : { INIT_DATA } | 73 | .init.data : { INIT_DATA } |
74 | .init.setup : { INIT_SETUP(16) } | 74 | .init.setup : { INIT_SETUP(16) } |
75 | #ifdef CONFIG_ETRAX_ARCH_V32 | ||
76 | __start___param = .; | ||
77 | __param : { *(__param) } | ||
78 | __stop___param = .; | ||
79 | #endif | ||
80 | .initcall.init : { | 75 | .initcall.init : { |
81 | INIT_CALLS | 76 | INIT_CALLS |
82 | } | 77 | } |
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h index 65b131282837..32198454da70 100644 --- a/arch/m68k/include/asm/string.h +++ b/arch/m68k/include/asm/string.h | |||
@@ -99,14 +99,12 @@ static inline int strcmp(const char *cs, const char *ct) | |||
99 | : "+a" (cs), "+a" (ct), "=d" (res)); | 99 | : "+a" (cs), "+a" (ct), "=d" (res)); |
100 | return res; | 100 | return res; |
101 | } | 101 | } |
102 | #endif /* CONFIG_COLDFIRE */ | ||
102 | 103 | ||
103 | #define __HAVE_ARCH_MEMMOVE | 104 | #define __HAVE_ARCH_MEMMOVE |
104 | extern void *memmove(void *, const void *, __kernel_size_t); | 105 | extern void *memmove(void *, const void *, __kernel_size_t); |
105 | 106 | ||
106 | #define __HAVE_ARCH_MEMCMP | ||
107 | extern int memcmp(const void *, const void *, __kernel_size_t); | ||
108 | #define memcmp(d, s, n) __builtin_memcmp(d, s, n) | 107 | #define memcmp(d, s, n) __builtin_memcmp(d, s, n) |
109 | #endif /* CONFIG_COLDFIRE */ | ||
110 | 108 | ||
111 | #define __HAVE_ARCH_MEMSET | 109 | #define __HAVE_ARCH_MEMSET |
112 | extern void *memset(void *, int, __kernel_size_t); | 110 | extern void *memset(void *, int, __kernel_size_t); |
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c index 4253f870e54f..d399c5f25636 100644 --- a/arch/m68k/lib/string.c +++ b/arch/m68k/lib/string.c | |||
@@ -243,14 +243,3 @@ void *memmove(void *dest, const void *src, size_t n) | |||
243 | return xdest; | 243 | return xdest; |
244 | } | 244 | } |
245 | EXPORT_SYMBOL(memmove); | 245 | EXPORT_SYMBOL(memmove); |
246 | |||
247 | int memcmp(const void *cs, const void *ct, size_t count) | ||
248 | { | ||
249 | const unsigned char *su1, *su2; | ||
250 | |||
251 | for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--) | ||
252 | if (*su1 != *su2) | ||
253 | return *su1 < *su2 ? -1 : +1; | ||
254 | return 0; | ||
255 | } | ||
256 | EXPORT_SYMBOL(memcmp); | ||
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index ef332136f96d..47e15ebfd893 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S | |||
@@ -141,6 +141,12 @@ SECTIONS { | |||
141 | *(__param) | 141 | *(__param) |
142 | __stop___param = .; | 142 | __stop___param = .; |
143 | 143 | ||
144 | /* Built-in module versions */ | ||
145 | . = ALIGN(4) ; | ||
146 | __start___modver = .; | ||
147 | *(__modver) | ||
148 | __stop___modver = .; | ||
149 | |||
144 | . = ALIGN(4) ; | 150 | . = ALIGN(4) ; |
145 | _etext = . ; | 151 | _etext = . ; |
146 | } > TEXT | 152 | } > TEXT |
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile index d94d709665aa..32d852e586d7 100644 --- a/arch/m68knommu/lib/Makefile +++ b/arch/m68knommu/lib/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ | 5 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ |
6 | muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ | 6 | muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ |
7 | checksum.o memcpy.o memset.o delay.o | 7 | checksum.o memcpy.o memmove.o memset.o delay.o |
diff --git a/arch/m68knommu/lib/memmove.c b/arch/m68knommu/lib/memmove.c new file mode 100644 index 000000000000..b3dcfe9dab7e --- /dev/null +++ b/arch/m68knommu/lib/memmove.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file COPYING in the main directory of this archive | ||
4 | * for more details. | ||
5 | */ | ||
6 | |||
7 | #define __IN_STRING_C | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/string.h> | ||
11 | |||
12 | void *memmove(void *dest, const void *src, size_t n) | ||
13 | { | ||
14 | void *xdest = dest; | ||
15 | size_t temp; | ||
16 | |||
17 | if (!n) | ||
18 | return xdest; | ||
19 | |||
20 | if (dest < src) { | ||
21 | if ((long)dest & 1) { | ||
22 | char *cdest = dest; | ||
23 | const char *csrc = src; | ||
24 | *cdest++ = *csrc++; | ||
25 | dest = cdest; | ||
26 | src = csrc; | ||
27 | n--; | ||
28 | } | ||
29 | if (n > 2 && (long)dest & 2) { | ||
30 | short *sdest = dest; | ||
31 | const short *ssrc = src; | ||
32 | *sdest++ = *ssrc++; | ||
33 | dest = sdest; | ||
34 | src = ssrc; | ||
35 | n -= 2; | ||
36 | } | ||
37 | temp = n >> 2; | ||
38 | if (temp) { | ||
39 | long *ldest = dest; | ||
40 | const long *lsrc = src; | ||
41 | temp--; | ||
42 | do | ||
43 | *ldest++ = *lsrc++; | ||
44 | while (temp--); | ||
45 | dest = ldest; | ||
46 | src = lsrc; | ||
47 | } | ||
48 | if (n & 2) { | ||
49 | short *sdest = dest; | ||
50 | const short *ssrc = src; | ||
51 | *sdest++ = *ssrc++; | ||
52 | dest = sdest; | ||
53 | src = ssrc; | ||
54 | } | ||
55 | if (n & 1) { | ||
56 | char *cdest = dest; | ||
57 | const char *csrc = src; | ||
58 | *cdest = *csrc; | ||
59 | } | ||
60 | } else { | ||
61 | dest = (char *)dest + n; | ||
62 | src = (const char *)src + n; | ||
63 | if ((long)dest & 1) { | ||
64 | char *cdest = dest; | ||
65 | const char *csrc = src; | ||
66 | *--cdest = *--csrc; | ||
67 | dest = cdest; | ||
68 | src = csrc; | ||
69 | n--; | ||
70 | } | ||
71 | if (n > 2 && (long)dest & 2) { | ||
72 | short *sdest = dest; | ||
73 | const short *ssrc = src; | ||
74 | *--sdest = *--ssrc; | ||
75 | dest = sdest; | ||
76 | src = ssrc; | ||
77 | n -= 2; | ||
78 | } | ||
79 | temp = n >> 2; | ||
80 | if (temp) { | ||
81 | long *ldest = dest; | ||
82 | const long *lsrc = src; | ||
83 | temp--; | ||
84 | do | ||
85 | *--ldest = *--lsrc; | ||
86 | while (temp--); | ||
87 | dest = ldest; | ||
88 | src = lsrc; | ||
89 | } | ||
90 | if (n & 2) { | ||
91 | short *sdest = dest; | ||
92 | const short *ssrc = src; | ||
93 | *--sdest = *--ssrc; | ||
94 | dest = sdest; | ||
95 | src = ssrc; | ||
96 | } | ||
97 | if (n & 1) { | ||
98 | char *cdest = dest; | ||
99 | const char *csrc = src; | ||
100 | *--cdest = *--csrc; | ||
101 | } | ||
102 | } | ||
103 | return xdest; | ||
104 | } | ||
105 | EXPORT_SYMBOL(memmove); | ||
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c index d09d9da04537..c5151f846591 100644 --- a/arch/m68knommu/platform/5249/intc2.c +++ b/arch/m68knommu/platform/5249/intc2.c | |||
@@ -50,8 +50,10 @@ static int __init mcf_intc2_init(void) | |||
50 | int irq; | 50 | int irq; |
51 | 51 | ||
52 | /* GPIO interrupt sources */ | 52 | /* GPIO interrupt sources */ |
53 | for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) | 53 | for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { |
54 | irq_desc[irq].chip = &intc2_irq_gpio_chip; | 54 | irq_desc[irq].chip = &intc2_irq_gpio_chip; |
55 | set_irq_handler(irq, handle_edge_irq); | ||
56 | } | ||
55 | 57 | ||
56 | return 0; | 58 | return 0; |
57 | } | 59 | } |
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S index 240a7a6e25c8..676960cf022a 100644 --- a/arch/m68knommu/platform/68328/entry.S +++ b/arch/m68knommu/platform/68328/entry.S | |||
@@ -108,7 +108,6 @@ Luser_return: | |||
108 | movel %d1,%a2 | 108 | movel %d1,%a2 |
109 | 1: | 109 | 1: |
110 | move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ | 110 | move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ |
111 | andl #_TIF_WORK_MASK,%d1 | ||
112 | jne Lwork_to_do | 111 | jne Lwork_to_do |
113 | RESTORE_ALL | 112 | RESTORE_ALL |
114 | 113 | ||
diff --git a/arch/m68knommu/platform/68360/commproc.c b/arch/m68knommu/platform/68360/commproc.c index f27e688c404e..8e4e10cc0080 100644 --- a/arch/m68knommu/platform/68360/commproc.c +++ b/arch/m68knommu/platform/68360/commproc.c | |||
@@ -210,7 +210,7 @@ void | |||
210 | cpm_install_handler(int vec, void (*handler)(), void *dev_id) | 210 | cpm_install_handler(int vec, void (*handler)(), void *dev_id) |
211 | { | 211 | { |
212 | 212 | ||
213 | request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id); | 213 | request_irq(vec, handler, 0, "timer", dev_id); |
214 | 214 | ||
215 | /* if (cpm_vecs[vec].handler != 0) */ | 215 | /* if (cpm_vecs[vec].handler != 0) */ |
216 | /* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */ | 216 | /* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */ |
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c index ac629fa30099..9dd5bca38749 100644 --- a/arch/m68knommu/platform/68360/config.c +++ b/arch/m68knommu/platform/68360/config.c | |||
@@ -75,7 +75,7 @@ void hw_timer_init(void) | |||
75 | /* Set compare register 32Khz / 32 / 10 = 100 */ | 75 | /* Set compare register 32Khz / 32 / 10 = 100 */ |
76 | TCMP = 10; | 76 | TCMP = 10; |
77 | 77 | ||
78 | request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL); | 78 | request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL); |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | /* General purpose quicc timers: MC68360UM p7-20 */ | 81 | /* General purpose quicc timers: MC68360UM p7-20 */ |
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S index 8a28788c0eea..46c1b18c9dcb 100644 --- a/arch/m68knommu/platform/68360/entry.S +++ b/arch/m68knommu/platform/68360/entry.S | |||
@@ -104,7 +104,6 @@ Luser_return: | |||
104 | movel %d1,%a2 | 104 | movel %d1,%a2 |
105 | 1: | 105 | 1: |
106 | move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ | 106 | move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ |
107 | andl #_TIF_WORK_MASK,%d1 | ||
108 | jne Lwork_to_do | 107 | jne Lwork_to_do |
109 | RESTORE_ALL | 108 | RESTORE_ALL |
110 | 109 | ||
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index ad96ab1051f0..a29041c1a8a0 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c | |||
@@ -132,8 +132,8 @@ void init_IRQ(void) | |||
132 | pquicc->intr_cimr = 0x00000000; | 132 | pquicc->intr_cimr = 0x00000000; |
133 | 133 | ||
134 | for (i = 0; (i < NR_IRQS); i++) { | 134 | for (i = 0; (i < NR_IRQS); i++) { |
135 | set_irq_chip(irq, &intc_irq_chip); | 135 | set_irq_chip(i, &intc_irq_chip); |
136 | set_irq_handler(irq, handle_level_irq); | 136 | set_irq_handler(i, handle_level_irq); |
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 4ddfc3da70d8..5837cf080b6d 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S | |||
@@ -138,7 +138,6 @@ Luser_return: | |||
138 | andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ | 138 | andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ |
139 | movel %d1,%a0 | 139 | movel %d1,%a0 |
140 | movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ | 140 | movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ |
141 | andl #0xefff,%d1 | ||
142 | jne Lwork_to_do /* still work to do */ | 141 | jne Lwork_to_do /* still work to do */ |
143 | 142 | ||
144 | Lreturn: | 143 | Lreturn: |
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h index 5fd31905775d..c4532f032b3b 100644 --- a/arch/microblaze/include/asm/irqflags.h +++ b/arch/microblaze/include/asm/irqflags.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <asm/registers.h> | 13 | #include <asm/registers.h> |
14 | 14 | ||
15 | #ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 15 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR |
16 | 16 | ||
17 | static inline unsigned long arch_local_irq_save(void) | 17 | static inline unsigned long arch_local_irq_save(void) |
18 | { | 18 | { |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index b23f68075879..885574a73f01 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -411,20 +411,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
411 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, | 411 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, |
412 | unsigned long set) | 412 | unsigned long set) |
413 | { | 413 | { |
414 | unsigned long old, tmp, msr; | 414 | unsigned long flags, old, tmp; |
415 | 415 | ||
416 | __asm__ __volatile__("\ | 416 | raw_local_irq_save(flags); |
417 | msrclr %2, 0x2\n\ | 417 | |
418 | nop\n\ | 418 | __asm__ __volatile__( "lw %0, %2, r0 \n" |
419 | lw %0, %4, r0\n\ | 419 | "andn %1, %0, %3 \n" |
420 | andn %1, %0, %5\n\ | 420 | "or %1, %1, %4 \n" |
421 | or %1, %1, %6\n\ | 421 | "sw %1, %2, r0 \n" |
422 | sw %1, %4, r0\n\ | 422 | : "=&r" (old), "=&r" (tmp) |
423 | mts rmsr, %2\n\ | 423 | : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set) |
424 | nop" | 424 | : "cc"); |
425 | : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) | 425 | |
426 | : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p) | 426 | raw_local_irq_restore(flags); |
427 | : "cc"); | ||
428 | 427 | ||
429 | return old; | 428 | return old; |
430 | } | 429 | } |
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c index e01afa68273e..488c1ed24e38 100644 --- a/arch/microblaze/kernel/cpu/pvr.c +++ b/arch/microblaze/kernel/cpu/pvr.c | |||
@@ -27,7 +27,7 @@ | |||
27 | register unsigned tmp __asm__("r3"); \ | 27 | register unsigned tmp __asm__("r3"); \ |
28 | tmp = 0x0; /* Prevent warning about unused */ \ | 28 | tmp = 0x0; /* Prevent warning about unused */ \ |
29 | __asm__ __volatile__ ( \ | 29 | __asm__ __volatile__ ( \ |
30 | "mfs %0, rpvr" #pvrid ";" \ | 30 | "mfs %0, rpvr" #pvrid ";" \ |
31 | : "=r" (tmp) : : "memory"); \ | 31 | : "=r" (tmp) : : "memory"); \ |
32 | val = tmp; \ | 32 | val = tmp; \ |
33 | } | 33 | } |
@@ -54,7 +54,7 @@ int cpu_has_pvr(void) | |||
54 | if (!(flags & PVR_MSR_BIT)) | 54 | if (!(flags & PVR_MSR_BIT)) |
55 | return 0; | 55 | return 0; |
56 | 56 | ||
57 | get_single_pvr(0x00, pvr0); | 57 | get_single_pvr(0, pvr0); |
58 | pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0); | 58 | pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0); |
59 | 59 | ||
60 | if (pvr0 & PVR0_PVR_FULL_MASK) | 60 | if (pvr0 & PVR0_PVR_FULL_MASK) |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 0db20b5abb54..778a5ce2e4fc 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -62,15 +62,14 @@ real_start: | |||
62 | andi r1, r1, ~2 | 62 | andi r1, r1, ~2 |
63 | mts rmsr, r1 | 63 | mts rmsr, r1 |
64 | /* | 64 | /* |
65 | * Here is checking mechanism which check if Microblaze has msr instructions | 65 | * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' |
66 | * We load msr and compare it with previous r1 value - if is the same, | 66 | * if the msrclr instruction is not enabled. We use this to detect |
67 | * msr instructions works if not - cpu don't have them. | 67 | * if the opcode is available, by issuing msrclr and then testing the result. |
68 | * r8 == 0 - msr instructions are implemented | ||
69 | * r8 != 0 - msr instructions are not implemented | ||
68 | */ | 70 | */ |
69 | /* r8=0 - I have msr instr, 1 - I don't have them */ | 71 | msrclr r8, 0 /* clear nothing - just read msr for test */ |
70 | rsubi r0, r0, 1 /* set the carry bit */ | 72 | cmpu r8, r8, r1 /* r1 must contain msr reg content */ |
71 | msrclr r0, 0x4 /* try to clear it */ | ||
72 | /* read the carry bit, r8 will be '0' if msrclr exists */ | ||
73 | addik r8, r0, 0 | ||
74 | 73 | ||
75 | /* r7 may point to an FDT, or there may be one linked in. | 74 | /* r7 may point to an FDT, or there may be one linked in. |
76 | if it's in r7, we've got to save it away ASAP. | 75 | if it's in r7, we've got to save it away ASAP. |
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index bb1558e4b283..9312fbb37efd 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -161,11 +161,11 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
161 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | 161 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR |
162 | if (msr) | 162 | if (msr) |
163 | eprintk("!!!Your kernel has setup MSR instruction but " | 163 | eprintk("!!!Your kernel has setup MSR instruction but " |
164 | "CPU don't have it %d\n", msr); | 164 | "CPU don't have it %x\n", msr); |
165 | #else | 165 | #else |
166 | if (!msr) | 166 | if (!msr) |
167 | eprintk("!!!Your kernel not setup MSR instruction but " | 167 | eprintk("!!!Your kernel not setup MSR instruction but " |
168 | "CPU have it %d\n", msr); | 168 | "CPU have it %x\n", msr); |
169 | #endif | 169 | #endif |
170 | 170 | ||
171 | for (src = __ivt_start; src < __ivt_end; src++, dst++) | 171 | for (src = __ivt_start; src < __ivt_end; src++, dst++) |
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 8eaed81ea642..17194fcd4040 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h | |||
@@ -40,8 +40,8 @@ | |||
40 | 40 | ||
41 | /* MAS registers bit definitions */ | 41 | /* MAS registers bit definitions */ |
42 | 42 | ||
43 | #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) | 43 | #define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) |
44 | #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) | 44 | #define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) |
45 | #define MAS0_NV(x) ((x) & 0x00000FFF) | 45 | #define MAS0_NV(x) ((x) & 0x00000FFF) |
46 | #define MAS0_HES 0x00004000 | 46 | #define MAS0_HES 0x00004000 |
47 | #define MAS0_WQ_ALLWAYS 0x00000000 | 47 | #define MAS0_WQ_ALLWAYS 0x00000000 |
@@ -50,12 +50,12 @@ | |||
50 | 50 | ||
51 | #define MAS1_VALID 0x80000000 | 51 | #define MAS1_VALID 0x80000000 |
52 | #define MAS1_IPROT 0x40000000 | 52 | #define MAS1_IPROT 0x40000000 |
53 | #define MAS1_TID(x) ((x << 16) & 0x3FFF0000) | 53 | #define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) |
54 | #define MAS1_IND 0x00002000 | 54 | #define MAS1_IND 0x00002000 |
55 | #define MAS1_TS 0x00001000 | 55 | #define MAS1_TS 0x00001000 |
56 | #define MAS1_TSIZE_MASK 0x00000f80 | 56 | #define MAS1_TSIZE_MASK 0x00000f80 |
57 | #define MAS1_TSIZE_SHIFT 7 | 57 | #define MAS1_TSIZE_SHIFT 7 |
58 | #define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) | 58 | #define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) |
59 | 59 | ||
60 | #define MAS2_EPN 0xFFFFF000 | 60 | #define MAS2_EPN 0xFFFFF000 |
61 | #define MAS2_X0 0x00000040 | 61 | #define MAS2_X0 0x00000040 |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 53b64be40eb2..da4b20008541 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr; | |||
101 | 101 | ||
102 | #ifdef CONFIG_FLATMEM | 102 | #ifdef CONFIG_FLATMEM |
103 | #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) | 103 | #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) |
104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) | 104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 107 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a959..f8cd9fba4d35 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | 19 | ||
20 | _GLOBAL(__setup_cpu_603) | 20 | _GLOBAL(__setup_cpu_603) |
21 | mflr r4 | 21 | mflr r5 |
22 | BEGIN_MMU_FTR_SECTION | 22 | BEGIN_MMU_FTR_SECTION |
23 | li r10,0 | 23 | li r10,0 |
24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ | 24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ |
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION | |||
27 | bl __init_fpu_registers | 27 | bl __init_fpu_registers |
28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) | 28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) |
29 | bl setup_common_caches | 29 | bl setup_common_caches |
30 | mtlr r4 | 30 | mtlr r5 |
31 | blr | 31 | blr |
32 | _GLOBAL(__setup_cpu_604) | 32 | _GLOBAL(__setup_cpu_604) |
33 | mflr r4 | 33 | mflr r5 |
34 | bl setup_common_caches | 34 | bl setup_common_caches |
35 | bl setup_604_hid0 | 35 | bl setup_604_hid0 |
36 | mtlr r4 | 36 | mtlr r5 |
37 | blr | 37 | blr |
38 | _GLOBAL(__setup_cpu_750) | 38 | _GLOBAL(__setup_cpu_750) |
39 | mflr r4 | 39 | mflr r5 |
40 | bl __init_fpu_registers | 40 | bl __init_fpu_registers |
41 | bl setup_common_caches | 41 | bl setup_common_caches |
42 | bl setup_750_7400_hid0 | 42 | bl setup_750_7400_hid0 |
43 | mtlr r4 | 43 | mtlr r5 |
44 | blr | 44 | blr |
45 | _GLOBAL(__setup_cpu_750cx) | 45 | _GLOBAL(__setup_cpu_750cx) |
46 | mflr r4 | 46 | mflr r5 |
47 | bl __init_fpu_registers | 47 | bl __init_fpu_registers |
48 | bl setup_common_caches | 48 | bl setup_common_caches |
49 | bl setup_750_7400_hid0 | 49 | bl setup_750_7400_hid0 |
50 | bl setup_750cx | 50 | bl setup_750cx |
51 | mtlr r4 | 51 | mtlr r5 |
52 | blr | 52 | blr |
53 | _GLOBAL(__setup_cpu_750fx) | 53 | _GLOBAL(__setup_cpu_750fx) |
54 | mflr r4 | 54 | mflr r5 |
55 | bl __init_fpu_registers | 55 | bl __init_fpu_registers |
56 | bl setup_common_caches | 56 | bl setup_common_caches |
57 | bl setup_750_7400_hid0 | 57 | bl setup_750_7400_hid0 |
58 | bl setup_750fx | 58 | bl setup_750fx |
59 | mtlr r4 | 59 | mtlr r5 |
60 | blr | 60 | blr |
61 | _GLOBAL(__setup_cpu_7400) | 61 | _GLOBAL(__setup_cpu_7400) |
62 | mflr r4 | 62 | mflr r5 |
63 | bl __init_fpu_registers | 63 | bl __init_fpu_registers |
64 | bl setup_7400_workarounds | 64 | bl setup_7400_workarounds |
65 | bl setup_common_caches | 65 | bl setup_common_caches |
66 | bl setup_750_7400_hid0 | 66 | bl setup_750_7400_hid0 |
67 | mtlr r4 | 67 | mtlr r5 |
68 | blr | 68 | blr |
69 | _GLOBAL(__setup_cpu_7410) | 69 | _GLOBAL(__setup_cpu_7410) |
70 | mflr r4 | 70 | mflr r5 |
71 | bl __init_fpu_registers | 71 | bl __init_fpu_registers |
72 | bl setup_7410_workarounds | 72 | bl setup_7410_workarounds |
73 | bl setup_common_caches | 73 | bl setup_common_caches |
74 | bl setup_750_7400_hid0 | 74 | bl setup_750_7400_hid0 |
75 | li r3,0 | 75 | li r3,0 |
76 | mtspr SPRN_L2CR2,r3 | 76 | mtspr SPRN_L2CR2,r3 |
77 | mtlr r4 | 77 | mtlr r5 |
78 | blr | 78 | blr |
79 | _GLOBAL(__setup_cpu_745x) | 79 | _GLOBAL(__setup_cpu_745x) |
80 | mflr r4 | 80 | mflr r5 |
81 | bl setup_common_caches | 81 | bl setup_common_caches |
82 | bl setup_745x_specifics | 82 | bl setup_745x_specifics |
83 | mtlr r4 | 83 | mtlr r5 |
84 | blr | 84 | blr |
85 | 85 | ||
86 | /* Enable caches for 603's, 604, 750 & 7400 */ | 86 | /* Enable caches for 603's, 604, 750 & 7400 */ |
@@ -194,10 +194,10 @@ setup_750cx: | |||
194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | 194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq |
195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq | 195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq |
196 | bnelr | 196 | bnelr |
197 | lwz r6,CPU_SPEC_FEATURES(r5) | 197 | lwz r6,CPU_SPEC_FEATURES(r4) |
198 | li r7,CPU_FTR_CAN_NAP | 198 | li r7,CPU_FTR_CAN_NAP |
199 | andc r6,r6,r7 | 199 | andc r6,r6,r7 |
200 | stw r6,CPU_SPEC_FEATURES(r5) | 200 | stw r6,CPU_SPEC_FEATURES(r4) |
201 | blr | 201 | blr |
202 | 202 | ||
203 | /* 750fx specific | 203 | /* 750fx specific |
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION | |||
225 | andis. r11,r11,L3CR_L3E@h | 225 | andis. r11,r11,L3CR_L3E@h |
226 | beq 1f | 226 | beq 1f |
227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) | 227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) |
228 | lwz r6,CPU_SPEC_FEATURES(r5) | 228 | lwz r6,CPU_SPEC_FEATURES(r4) |
229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP | 229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP |
230 | beq 1f | 230 | beq 1f |
231 | li r7,CPU_FTR_CAN_NAP | 231 | li r7,CPU_FTR_CAN_NAP |
232 | andc r6,r6,r7 | 232 | andc r6,r6,r7 |
233 | stw r6,CPU_SPEC_FEATURES(r5) | 233 | stw r6,CPU_SPEC_FEATURES(r4) |
234 | 1: | 234 | 1: |
235 | mfspr r11,SPRN_HID0 | 235 | mfspr r11,SPRN_HID0 |
236 | 236 | ||
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8d74a24c5502..e8e915ce3d8d 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) | |||
2076 | * pointer on ppc64 and booke as we are running at 0 in real mode | 2076 | * pointer on ppc64 and booke as we are running at 0 in real mode |
2077 | * on ppc64 and reloc_offset is always 0 on booke. | 2077 | * on ppc64 and reloc_offset is always 0 on booke. |
2078 | */ | 2078 | */ |
2079 | if (s->cpu_setup) { | 2079 | if (t->cpu_setup) { |
2080 | s->cpu_setup(offset, s); | 2080 | t->cpu_setup(offset, t); |
2081 | } | 2081 | } |
2082 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ | 2082 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ |
2083 | } | 2083 | } |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index bf5cb91f07de..fd4812329570 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu) | |||
186 | dbg("removing cpu %lu from node %d\n", cpu, node); | 186 | dbg("removing cpu %lu from node %d\n", cpu, node); |
187 | 187 | ||
188 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { | 188 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { |
189 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | 189 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
190 | } else { | 190 | } else { |
191 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | 191 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", |
192 | cpu, node); | 192 | cpu, node); |
@@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void) | |||
1289 | } | 1289 | } |
1290 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 1290 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1291 | 1291 | ||
1292 | /* Vrtual Processor Home Node (VPHN) support */ | 1292 | /* Virtual Processor Home Node (VPHN) support */ |
1293 | #ifdef CONFIG_PPC_SPLPAR | 1293 | #ifdef CONFIG_PPC_SPLPAR |
1294 | #define VPHN_NR_CHANGE_CTRS (8) | 1294 | static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; |
1295 | static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; | ||
1296 | static cpumask_t cpu_associativity_changes_mask; | 1295 | static cpumask_t cpu_associativity_changes_mask; |
1297 | static int vphn_enabled; | 1296 | static int vphn_enabled; |
1298 | static void set_topology_timer(void); | 1297 | static void set_topology_timer(void); |
@@ -1303,16 +1302,18 @@ static void set_topology_timer(void); | |||
1303 | */ | 1302 | */ |
1304 | static void setup_cpu_associativity_change_counters(void) | 1303 | static void setup_cpu_associativity_change_counters(void) |
1305 | { | 1304 | { |
1306 | int cpu = 0; | 1305 | int cpu; |
1306 | |||
1307 | /* The VPHN feature supports a maximum of 8 reference points */ | ||
1308 | BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); | ||
1307 | 1309 | ||
1308 | for_each_possible_cpu(cpu) { | 1310 | for_each_possible_cpu(cpu) { |
1309 | int i = 0; | 1311 | int i; |
1310 | u8 *counts = vphn_cpu_change_counts[cpu]; | 1312 | u8 *counts = vphn_cpu_change_counts[cpu]; |
1311 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | 1313 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; |
1312 | 1314 | ||
1313 | for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { | 1315 | for (i = 0; i < distance_ref_points_depth; i++) |
1314 | counts[i] = hypervisor_counts[i]; | 1316 | counts[i] = hypervisor_counts[i]; |
1315 | } | ||
1316 | } | 1317 | } |
1317 | } | 1318 | } |
1318 | 1319 | ||
@@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void) | |||
1329 | */ | 1330 | */ |
1330 | static int update_cpu_associativity_changes_mask(void) | 1331 | static int update_cpu_associativity_changes_mask(void) |
1331 | { | 1332 | { |
1332 | int cpu = 0, nr_cpus = 0; | 1333 | int cpu, nr_cpus = 0; |
1333 | cpumask_t *changes = &cpu_associativity_changes_mask; | 1334 | cpumask_t *changes = &cpu_associativity_changes_mask; |
1334 | 1335 | ||
1335 | cpumask_clear(changes); | 1336 | cpumask_clear(changes); |
@@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void) | |||
1339 | u8 *counts = vphn_cpu_change_counts[cpu]; | 1340 | u8 *counts = vphn_cpu_change_counts[cpu]; |
1340 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | 1341 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; |
1341 | 1342 | ||
1342 | for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { | 1343 | for (i = 0; i < distance_ref_points_depth; i++) { |
1343 | if (hypervisor_counts[i] > counts[i]) { | 1344 | if (hypervisor_counts[i] != counts[i]) { |
1344 | counts[i] = hypervisor_counts[i]; | 1345 | counts[i] = hypervisor_counts[i]; |
1345 | changed = 1; | 1346 | changed = 1; |
1346 | } | 1347 | } |
@@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void) | |||
1354 | return nr_cpus; | 1355 | return nr_cpus; |
1355 | } | 1356 | } |
1356 | 1357 | ||
1357 | /* 6 64-bit registers unpacked into 12 32-bit associativity values */ | 1358 | /* |
1358 | #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) | 1359 | * 6 64-bit registers unpacked into 12 32-bit associativity values. To form |
1360 | * the complete property we have to add the length in the first cell. | ||
1361 | */ | ||
1362 | #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) | ||
1359 | 1363 | ||
1360 | /* | 1364 | /* |
1361 | * Convert the associativity domain numbers returned from the hypervisor | 1365 | * Convert the associativity domain numbers returned from the hypervisor |
@@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void) | |||
1363 | */ | 1367 | */ |
1364 | static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) | 1368 | static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) |
1365 | { | 1369 | { |
1366 | int i = 0; | 1370 | int i, nr_assoc_doms = 0; |
1367 | int nr_assoc_doms = 0; | ||
1368 | const u16 *field = (const u16*) packed; | 1371 | const u16 *field = (const u16*) packed; |
1369 | 1372 | ||
1370 | #define VPHN_FIELD_UNUSED (0xffff) | 1373 | #define VPHN_FIELD_UNUSED (0xffff) |
1371 | #define VPHN_FIELD_MSB (0x8000) | 1374 | #define VPHN_FIELD_MSB (0x8000) |
1372 | #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) | 1375 | #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) |
1373 | 1376 | ||
1374 | for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { | 1377 | for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { |
1375 | if (*field == VPHN_FIELD_UNUSED) { | 1378 | if (*field == VPHN_FIELD_UNUSED) { |
1376 | /* All significant fields processed, and remaining | 1379 | /* All significant fields processed, and remaining |
1377 | * fields contain the reserved value of all 1's. | 1380 | * fields contain the reserved value of all 1's. |
@@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) | |||
1379 | */ | 1382 | */ |
1380 | unpacked[i] = *((u32*)field); | 1383 | unpacked[i] = *((u32*)field); |
1381 | field += 2; | 1384 | field += 2; |
1382 | } | 1385 | } else if (*field & VPHN_FIELD_MSB) { |
1383 | else if (*field & VPHN_FIELD_MSB) { | ||
1384 | /* Data is in the lower 15 bits of this field */ | 1386 | /* Data is in the lower 15 bits of this field */ |
1385 | unpacked[i] = *field & VPHN_FIELD_MASK; | 1387 | unpacked[i] = *field & VPHN_FIELD_MASK; |
1386 | field++; | 1388 | field++; |
1387 | nr_assoc_doms++; | 1389 | nr_assoc_doms++; |
1388 | } | 1390 | } else { |
1389 | else { | ||
1390 | /* Data is in the lower 15 bits of this field | 1391 | /* Data is in the lower 15 bits of this field |
1391 | * concatenated with the next 16 bit field | 1392 | * concatenated with the next 16 bit field |
1392 | */ | 1393 | */ |
@@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) | |||
1396 | } | 1397 | } |
1397 | } | 1398 | } |
1398 | 1399 | ||
1400 | /* The first cell contains the length of the property */ | ||
1401 | unpacked[0] = nr_assoc_doms; | ||
1402 | |||
1399 | return nr_assoc_doms; | 1403 | return nr_assoc_doms; |
1400 | } | 1404 | } |
1401 | 1405 | ||
@@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) | |||
1405 | */ | 1409 | */ |
1406 | static long hcall_vphn(unsigned long cpu, unsigned int *associativity) | 1410 | static long hcall_vphn(unsigned long cpu, unsigned int *associativity) |
1407 | { | 1411 | { |
1408 | long rc = 0; | 1412 | long rc; |
1409 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; | 1413 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; |
1410 | u64 flags = 1; | 1414 | u64 flags = 1; |
1411 | int hwcpu = get_hard_smp_processor_id(cpu); | 1415 | int hwcpu = get_hard_smp_processor_id(cpu); |
@@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) | |||
1419 | static long vphn_get_associativity(unsigned long cpu, | 1423 | static long vphn_get_associativity(unsigned long cpu, |
1420 | unsigned int *associativity) | 1424 | unsigned int *associativity) |
1421 | { | 1425 | { |
1422 | long rc = 0; | 1426 | long rc; |
1423 | 1427 | ||
1424 | rc = hcall_vphn(cpu, associativity); | 1428 | rc = hcall_vphn(cpu, associativity); |
1425 | 1429 | ||
@@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu, | |||
1445 | */ | 1449 | */ |
1446 | int arch_update_cpu_topology(void) | 1450 | int arch_update_cpu_topology(void) |
1447 | { | 1451 | { |
1448 | int cpu = 0, nid = 0, old_nid = 0; | 1452 | int cpu, nid, old_nid; |
1449 | unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; | 1453 | unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; |
1450 | struct sys_device *sysdev = NULL; | 1454 | struct sys_device *sysdev; |
1451 | 1455 | ||
1452 | for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { | 1456 | for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { |
1453 | vphn_get_associativity(cpu, associativity); | 1457 | vphn_get_associativity(cpu, associativity); |
@@ -1512,7 +1516,8 @@ int start_topology_update(void) | |||
1512 | { | 1516 | { |
1513 | int rc = 0; | 1517 | int rc = 0; |
1514 | 1518 | ||
1515 | if (firmware_has_feature(FW_FEATURE_VPHN)) { | 1519 | if (firmware_has_feature(FW_FEATURE_VPHN) && |
1520 | get_lppaca()->shared_proc) { | ||
1516 | vphn_enabled = 1; | 1521 | vphn_enabled = 1; |
1517 | setup_cpu_associativity_change_counters(); | 1522 | setup_cpu_associativity_change_counters(); |
1518 | init_timer_deferrable(&topology_timer); | 1523 | init_timer_deferrable(&topology_timer); |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5d3ea9f60dd7..ca5d5898d320 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page); | |||
713 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 713 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
714 | extern long hcall_tracepoint_refcount; | 714 | extern long hcall_tracepoint_refcount; |
715 | 715 | ||
716 | /* | ||
717 | * Since the tracing code might execute hcalls we need to guard against | ||
718 | * recursion. One example of this are spinlocks calling H_YIELD on | ||
719 | * shared processor partitions. | ||
720 | */ | ||
721 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | ||
722 | |||
716 | void hcall_tracepoint_regfunc(void) | 723 | void hcall_tracepoint_regfunc(void) |
717 | { | 724 | { |
718 | hcall_tracepoint_refcount++; | 725 | hcall_tracepoint_refcount++; |
@@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void) | |||
725 | 732 | ||
726 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | 733 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
727 | { | 734 | { |
735 | unsigned long flags; | ||
736 | unsigned int *depth; | ||
737 | |||
738 | local_irq_save(flags); | ||
739 | |||
740 | depth = &__get_cpu_var(hcall_trace_depth); | ||
741 | |||
742 | if (*depth) | ||
743 | goto out; | ||
744 | |||
745 | (*depth)++; | ||
728 | trace_hcall_entry(opcode, args); | 746 | trace_hcall_entry(opcode, args); |
747 | (*depth)--; | ||
748 | |||
749 | out: | ||
750 | local_irq_restore(flags); | ||
729 | } | 751 | } |
730 | 752 | ||
731 | void __trace_hcall_exit(long opcode, unsigned long retval, | 753 | void __trace_hcall_exit(long opcode, unsigned long retval, |
732 | unsigned long *retbuf) | 754 | unsigned long *retbuf) |
733 | { | 755 | { |
756 | unsigned long flags; | ||
757 | unsigned int *depth; | ||
758 | |||
759 | local_irq_save(flags); | ||
760 | |||
761 | depth = &__get_cpu_var(hcall_trace_depth); | ||
762 | |||
763 | if (*depth) | ||
764 | goto out; | ||
765 | |||
766 | (*depth)++; | ||
734 | trace_hcall_exit(opcode, retval, retbuf); | 767 | trace_hcall_exit(opcode, retval, retbuf); |
768 | (*depth)--; | ||
769 | |||
770 | out: | ||
771 | local_irq_restore(flags); | ||
735 | } | 772 | } |
736 | #endif | 773 | #endif |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 0851eb1e919e..2751b3a8a66f 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void) | |||
133 | unsigned long output_addr; | 133 | unsigned long output_addr; |
134 | unsigned char *output; | 134 | unsigned char *output; |
135 | 135 | ||
136 | check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); | 136 | output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; |
137 | check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); | ||
137 | memset(&_bss, 0, &_ebss - &_bss); | 138 | memset(&_bss, 0, &_ebss - &_bss); |
138 | free_mem_ptr = (unsigned long)&_end; | 139 | free_mem_ptr = (unsigned long)&_end; |
139 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | 140 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; |
140 | output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); | 141 | output = (unsigned char *) output_addr; |
141 | 142 | ||
142 | #ifdef CONFIG_BLK_DEV_INITRD | 143 | #ifdef CONFIG_BLK_DEV_INITRD |
143 | /* | 144 | /* |
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index f42dbabc0d30..48884f89ab92 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c | |||
@@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) | |||
38 | BUG_ON(ret != bsize); | 38 | BUG_ON(ret != bsize); |
39 | data += bsize - index; | 39 | data += bsize - index; |
40 | len -= bsize - index; | 40 | len -= bsize - index; |
41 | index = 0; | ||
41 | } | 42 | } |
42 | 43 | ||
43 | /* process as many blocks as possible */ | 44 | /* process as many blocks as possible */ |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea117181..5c5ba10384c2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -36,14 +36,19 @@ | |||
36 | 36 | ||
37 | static inline int atomic_read(const atomic_t *v) | 37 | static inline int atomic_read(const atomic_t *v) |
38 | { | 38 | { |
39 | barrier(); | 39 | int c; |
40 | return v->counter; | 40 | |
41 | asm volatile( | ||
42 | " l %0,%1\n" | ||
43 | : "=d" (c) : "Q" (v->counter)); | ||
44 | return c; | ||
41 | } | 45 | } |
42 | 46 | ||
43 | static inline void atomic_set(atomic_t *v, int i) | 47 | static inline void atomic_set(atomic_t *v, int i) |
44 | { | 48 | { |
45 | v->counter = i; | 49 | asm volatile( |
46 | barrier(); | 50 | " st %1,%0\n" |
51 | : "=Q" (v->counter) : "d" (i)); | ||
47 | } | 52 | } |
48 | 53 | ||
49 | static inline int atomic_add_return(int i, atomic_t *v) | 54 | static inline int atomic_add_return(int i, atomic_t *v) |
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
128 | 133 | ||
129 | static inline long long atomic64_read(const atomic64_t *v) | 134 | static inline long long atomic64_read(const atomic64_t *v) |
130 | { | 135 | { |
131 | barrier(); | 136 | long long c; |
132 | return v->counter; | 137 | |
138 | asm volatile( | ||
139 | " lg %0,%1\n" | ||
140 | : "=d" (c) : "Q" (v->counter)); | ||
141 | return c; | ||
133 | } | 142 | } |
134 | 143 | ||
135 | static inline void atomic64_set(atomic64_t *v, long long i) | 144 | static inline void atomic64_set(atomic64_t *v, long long i) |
136 | { | 145 | { |
137 | v->counter = i; | 146 | asm volatile( |
138 | barrier(); | 147 | " stg %1,%0\n" |
148 | : "=Q" (v->counter) : "d" (i)); | ||
139 | } | 149 | } |
140 | 150 | ||
141 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | 151 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b643..2a30d5ac0667 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #define L1_CACHE_BYTES 256 | 14 | #define L1_CACHE_BYTES 256 |
15 | #define L1_CACHE_SHIFT 8 | 15 | #define L1_CACHE_SHIFT 8 |
16 | #define NET_SKB_PAD 32 | ||
16 | 17 | ||
17 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | 18 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
18 | 19 | ||
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index bf3de04170a7..2c79b6416271 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | |||
148 | */ | 148 | */ |
149 | extern unsigned long thread_saved_pc(struct task_struct *t); | 149 | extern unsigned long thread_saved_pc(struct task_struct *t); |
150 | 150 | ||
151 | /* | ||
152 | * Print register of task into buffer. Used in fs/proc/array.c. | ||
153 | */ | ||
154 | extern void task_show_regs(struct seq_file *m, struct task_struct *task); | ||
155 | |||
156 | extern void show_code(struct pt_regs *regs); | 151 | extern void show_code(struct pt_regs *regs); |
157 | 152 | ||
158 | unsigned long get_wchan(struct task_struct *p); | 153 | unsigned long get_wchan(struct task_struct *p); |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5eb78dd584ce..b5a4a739b477 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs) | |||
237 | show_last_breaking_event(regs); | 237 | show_last_breaking_event(regs); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* This is called from fs/proc/array.c */ | ||
241 | void task_show_regs(struct seq_file *m, struct task_struct *task) | ||
242 | { | ||
243 | struct pt_regs *regs; | ||
244 | |||
245 | regs = task_pt_regs(task); | ||
246 | seq_printf(m, "task: %p, ksp: %p\n", | ||
247 | task, (void *)task->thread.ksp); | ||
248 | seq_printf(m, "User PSW : %p %p\n", | ||
249 | (void *) regs->psw.mask, (void *)regs->psw.addr); | ||
250 | |||
251 | seq_printf(m, "User GPRS: " FOURLONG, | ||
252 | regs->gprs[0], regs->gprs[1], | ||
253 | regs->gprs[2], regs->gprs[3]); | ||
254 | seq_printf(m, " " FOURLONG, | ||
255 | regs->gprs[4], regs->gprs[5], | ||
256 | regs->gprs[6], regs->gprs[7]); | ||
257 | seq_printf(m, " " FOURLONG, | ||
258 | regs->gprs[8], regs->gprs[9], | ||
259 | regs->gprs[10], regs->gprs[11]); | ||
260 | seq_printf(m, " " FOURLONG, | ||
261 | regs->gprs[12], regs->gprs[13], | ||
262 | regs->gprs[14], regs->gprs[15]); | ||
263 | seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", | ||
264 | task->thread.acrs[0], task->thread.acrs[1], | ||
265 | task->thread.acrs[2], task->thread.acrs[3]); | ||
266 | seq_printf(m, " %08x %08x %08x %08x\n", | ||
267 | task->thread.acrs[4], task->thread.acrs[5], | ||
268 | task->thread.acrs[6], task->thread.acrs[7]); | ||
269 | seq_printf(m, " %08x %08x %08x %08x\n", | ||
270 | task->thread.acrs[8], task->thread.acrs[9], | ||
271 | task->thread.acrs[10], task->thread.acrs[11]); | ||
272 | seq_printf(m, " %08x %08x %08x %08x\n", | ||
273 | task->thread.acrs[12], task->thread.acrs[13], | ||
274 | task->thread.acrs[14], task->thread.acrs[15]); | ||
275 | } | ||
276 | |||
277 | static DEFINE_SPINLOCK(die_lock); | 240 | static DEFINE_SPINLOCK(die_lock); |
278 | 241 | ||
279 | void die(const char * str, struct pt_regs * regs, long err) | 242 | void die(const char * str, struct pt_regs * regs, long err) |
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h index a2f5c61f924e..843e4faf6a50 100644 --- a/arch/sparc/include/asm/pcr.h +++ b/arch/sparc/include/asm/pcr.h | |||
@@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz) | |||
43 | 43 | ||
44 | extern u64 pcr_enable; | 44 | extern u64 pcr_enable; |
45 | 45 | ||
46 | extern int pcr_arch_init(void); | ||
47 | |||
46 | #endif /* __PCR_H */ | 48 | #endif /* __PCR_H */ |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a77f6c6..72509d0e34be 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, | |||
255 | static int iommu_alloc_ctx(struct iommu *iommu) | 255 | static int iommu_alloc_ctx(struct iommu *iommu) |
256 | { | 256 | { |
257 | int lowest = iommu->ctx_lowest_free; | 257 | int lowest = iommu->ctx_lowest_free; |
258 | int sz = IOMMU_NUM_CTXS - lowest; | 258 | int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); |
259 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
260 | 259 | ||
261 | if (unlikely(n == sz)) { | 260 | if (unlikely(n == IOMMU_NUM_CTXS)) { |
262 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | 261 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); |
263 | if (unlikely(n == lowest)) { | 262 | if (unlikely(n == lowest)) { |
264 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | 263 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); |
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index ae96cf52a955..7c2ced612b8f 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -167,5 +167,3 @@ out_unregister: | |||
167 | unregister_perf_hsvc(); | 167 | unregister_perf_hsvc(); |
168 | return err; | 168 | return err; |
169 | } | 169 | } |
170 | |||
171 | early_initcall(pcr_arch_init); | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f47040..555a76d1f4a1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mdesc.h> | 49 | #include <asm/mdesc.h> |
50 | #include <asm/ldc.h> | 50 | #include <asm/ldc.h> |
51 | #include <asm/hypervisor.h> | 51 | #include <asm/hypervisor.h> |
52 | #include <asm/pcr.h> | ||
52 | 53 | ||
53 | #include "cpumap.h" | 54 | #include "cpumap.h" |
54 | 55 | ||
@@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu) | |||
1358 | 1359 | ||
1359 | void __init smp_cpus_done(unsigned int max_cpus) | 1360 | void __init smp_cpus_done(unsigned int max_cpus) |
1360 | { | 1361 | { |
1362 | pcr_arch_init(); | ||
1361 | } | 1363 | } |
1362 | 1364 | ||
1363 | void smp_send_reschedule(int cpu) | 1365 | void smp_send_reschedule(int cpu) |
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S index 8cc03458eb7e..8f096e84a937 100644 --- a/arch/sparc/kernel/una_asm_32.S +++ b/arch/sparc/kernel/una_asm_32.S | |||
@@ -24,9 +24,9 @@ retl_efault: | |||
24 | .globl __do_int_store | 24 | .globl __do_int_store |
25 | __do_int_store: | 25 | __do_int_store: |
26 | ld [%o2], %g1 | 26 | ld [%o2], %g1 |
27 | cmp %1, 2 | 27 | cmp %o1, 2 |
28 | be 2f | 28 | be 2f |
29 | cmp %1, 4 | 29 | cmp %o1, 4 |
30 | be 1f | 30 | be 1f |
31 | srl %g1, 24, %g2 | 31 | srl %g1, 24, %g2 |
32 | srl %g1, 16, %g7 | 32 | srl %g1, 16, %g7 |
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 764b3eb7b604..48d00e72ce15 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/bitops.h> | 13 | #include <linux/bitmap.h> |
14 | 14 | ||
15 | #include <asm/bitext.h> | 15 | #include <asm/bitext.h> |
16 | 16 | ||
@@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align) | |||
80 | while (test_bit(offset + i, t->map) == 0) { | 80 | while (test_bit(offset + i, t->map) == 0) { |
81 | i++; | 81 | i++; |
82 | if (i == len) { | 82 | if (i == len) { |
83 | for (i = 0; i < len; i++) | 83 | bitmap_set(t->map, offset, len); |
84 | __set_bit(offset + i, t->map); | ||
85 | if (offset == t->first_free) | 84 | if (offset == t->first_free) |
86 | t->first_free = find_next_zero_bit | 85 | t->first_free = find_next_zero_bit |
87 | (t->map, t->size, | 86 | (t->map, t->size, |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 211ca3f7fd16..4ea15ca89b2b 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -88,6 +88,7 @@ extern int acpi_disabled; | |||
88 | extern int acpi_pci_disabled; | 88 | extern int acpi_pci_disabled; |
89 | extern int acpi_skip_timer_override; | 89 | extern int acpi_skip_timer_override; |
90 | extern int acpi_use_timer_override; | 90 | extern int acpi_use_timer_override; |
91 | extern int acpi_fix_pin2_polarity; | ||
91 | 92 | ||
92 | extern u8 acpi_sci_flags; | 93 | extern u8 acpi_sci_flags; |
93 | extern int acpi_sci_override_gsi; | 94 | extern int acpi_sci_override_gsi; |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5e3969c36d7f..3c896946f4cc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -233,6 +233,7 @@ extern void sync_Arb_IDs(void); | |||
233 | extern void init_bsp_APIC(void); | 233 | extern void init_bsp_APIC(void); |
234 | extern void setup_local_APIC(void); | 234 | extern void setup_local_APIC(void); |
235 | extern void end_local_APIC_setup(void); | 235 | extern void end_local_APIC_setup(void); |
236 | extern void bsp_end_local_APIC_setup(void); | ||
236 | extern void init_apic_mappings(void); | 237 | extern void init_apic_mappings(void); |
237 | void register_lapic_address(unsigned long address); | 238 | void register_lapic_address(unsigned long address); |
238 | extern void setup_boot_APIC_clock(void); | 239 | extern void setup_boot_APIC_clock(void); |
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 6e6e7558e702..4564c8e28a33 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h | |||
@@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int); | |||
32 | 32 | ||
33 | DECLARE_PER_CPU(int, cpu_state); | 33 | DECLARE_PER_CPU(int, cpu_state); |
34 | 34 | ||
35 | int __cpuinit mwait_usable(const struct cpuinfo_x86 *); | 35 | int mwait_usable(const struct cpuinfo_x86 *); |
36 | 36 | ||
37 | #endif /* _ASM_X86_CPU_H */ | 37 | #endif /* _ASM_X86_CPU_H */ |
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index e2f6a99f14ab..cc29086e30cd 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #define ARCH_P4_CNTRVAL_BITS (40) | 23 | #define ARCH_P4_CNTRVAL_BITS (40) |
24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) | 24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) |
25 | #define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) | ||
25 | 26 | ||
26 | #define P4_ESCR_EVENT_MASK 0x7e000000U | 27 | #define P4_ESCR_EVENT_MASK 0x7e000000U |
27 | #define P4_ESCR_EVENT_SHIFT 25 | 28 | #define P4_ESCR_EVENT_SHIFT 25 |
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 6c22bf353f26..725b77831993 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h | |||
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) | |||
34 | */ | 34 | */ |
35 | CMOS_WRITE(0, 0xf); | 35 | CMOS_WRITE(0, 0xf); |
36 | 36 | ||
37 | *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; | 37 | *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void __init smpboot_setup_io_apic(void) | 40 | static inline void __init smpboot_setup_io_apic(void) |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a71137983a..3e6e2d68f761 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; | |||
72 | int acpi_sci_override_gsi __initdata; | 72 | int acpi_sci_override_gsi __initdata; |
73 | int acpi_skip_timer_override __initdata; | 73 | int acpi_skip_timer_override __initdata; |
74 | int acpi_use_timer_override __initdata; | 74 | int acpi_use_timer_override __initdata; |
75 | int acpi_fix_pin2_polarity __initdata; | ||
75 | 76 | ||
76 | #ifdef CONFIG_X86_LOCAL_APIC | 77 | #ifdef CONFIG_X86_LOCAL_APIC |
77 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 78 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, | |||
415 | return 0; | 416 | return 0; |
416 | } | 417 | } |
417 | 418 | ||
418 | if (acpi_skip_timer_override && | 419 | if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { |
419 | intsrc->source_irq == 0 && intsrc->global_irq == 2) { | 420 | if (acpi_skip_timer_override) { |
420 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); | 421 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); |
421 | return 0; | 422 | return 0; |
423 | } | ||
424 | if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { | ||
425 | intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; | ||
426 | printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); | ||
427 | } | ||
422 | } | 428 | } |
423 | 429 | ||
424 | mp_override_legacy_irq(intsrc->source_irq, | 430 | mp_override_legacy_irq(intsrc->source_irq, |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 4d9ebbab2230..68d1537b8c81 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -12,10 +12,8 @@ | |||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <asm/segment.h> | 13 | #include <asm/segment.h> |
14 | #include <asm/desc.h> | 14 | #include <asm/desc.h> |
15 | |||
16 | #ifdef CONFIG_X86_32 | ||
17 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
18 | #endif | 16 | #include <asm/cacheflush.h> |
19 | 17 | ||
20 | #include "realmode/wakeup.h" | 18 | #include "realmode/wakeup.h" |
21 | #include "sleep.h" | 19 | #include "sleep.h" |
@@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void) | |||
149 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | 147 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); |
150 | } | 148 | } |
151 | 149 | ||
150 | int __init acpi_configure_wakeup_memory(void) | ||
151 | { | ||
152 | if (acpi_realmode) | ||
153 | set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | arch_initcall(acpi_configure_wakeup_memory); | ||
158 | |||
152 | 159 | ||
153 | static int __init acpi_sleep_setup(char *str) | 160 | static int __init acpi_sleep_setup(char *str) |
154 | { | 161 | { |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 123608531c8f..7038b95d363f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
671 | 671 | ||
672 | atomic_set(&stop_machine_first, 1); | 672 | atomic_set(&stop_machine_first, 1); |
673 | wrote_text = 0; | 673 | wrote_text = 0; |
674 | stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 674 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
675 | } | 675 | } |
676 | 676 | ||
677 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 677 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51ef31a89be9..51d4e1663066 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void) | |||
284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); | 284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); |
285 | 285 | ||
286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { | 286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { |
287 | apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; | 287 | adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; |
288 | global_clock_event = &adev->evt; | 288 | global_clock_event = &adev->evt; |
289 | printk(KERN_DEBUG "%s clockevent registered as global\n", | 289 | printk(KERN_DEBUG "%s clockevent registered as global\n", |
290 | global_clock_event->name); | 290 | global_clock_event->name); |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 06c196d7e59c..76b96d74978a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1381,12 +1381,17 @@ void __cpuinit end_local_APIC_setup(void) | |||
1381 | #endif | 1381 | #endif |
1382 | 1382 | ||
1383 | apic_pm_activate(); | 1383 | apic_pm_activate(); |
1384 | } | ||
1385 | |||
1386 | void __init bsp_end_local_APIC_setup(void) | ||
1387 | { | ||
1388 | end_local_APIC_setup(); | ||
1384 | 1389 | ||
1385 | /* | 1390 | /* |
1386 | * Now that local APIC setup is completed for BP, configure the fault | 1391 | * Now that local APIC setup is completed for BP, configure the fault |
1387 | * handling for interrupt remapping. | 1392 | * handling for interrupt remapping. |
1388 | */ | 1393 | */ |
1389 | if (!smp_processor_id() && intr_remapping_enabled) | 1394 | if (intr_remapping_enabled) |
1390 | enable_drhd_fault_handling(); | 1395 | enable_drhd_fault_handling(); |
1391 | 1396 | ||
1392 | } | 1397 | } |
@@ -1756,7 +1761,7 @@ int __init APIC_init_uniprocessor(void) | |||
1756 | enable_IO_APIC(); | 1761 | enable_IO_APIC(); |
1757 | #endif | 1762 | #endif |
1758 | 1763 | ||
1759 | end_local_APIC_setup(); | 1764 | bsp_end_local_APIC_setup(); |
1760 | 1765 | ||
1761 | #ifdef CONFIG_X86_IO_APIC | 1766 | #ifdef CONFIG_X86_IO_APIC |
1762 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) | 1767 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 697dc34b7b87..ca9e2a3545a9 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -4002,6 +4002,9 @@ int mp_find_ioapic(u32 gsi) | |||
4002 | { | 4002 | { |
4003 | int i = 0; | 4003 | int i = 0; |
4004 | 4004 | ||
4005 | if (nr_ioapics == 0) | ||
4006 | return -1; | ||
4007 | |||
4005 | /* Find the IOAPIC that manages this GSI. */ | 4008 | /* Find the IOAPIC that manages this GSI. */ |
4006 | for (i = 0; i < nr_ioapics; i++) { | 4009 | for (i = 0; i < nr_ioapics; i++) { |
4007 | if ((gsi >= mp_gsi_routing[i].gsi_base) | 4010 | if ((gsi >= mp_gsi_routing[i].gsi_base) |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index f7a0993c1e7c..ff751a9f182b 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | |||
770 | return 1; | 770 | return 1; |
771 | } | 771 | } |
772 | 772 | ||
773 | /* it might be unflagged overflow */ | 773 | /* |
774 | rdmsrl(hwc->event_base + hwc->idx, v); | 774 | * In some circumstances the overflow might issue an NMI but did |
775 | if (!(v & ARCH_P4_CNTRVAL_MASK)) | 775 | * not set P4_CCCR_OVF bit. Because a counter holds a negative value |
776 | * we simply check for high bit being set, if it's cleared it means | ||
777 | * the counter has reached zero value and continued counting before | ||
778 | * real NMI signal was received: | ||
779 | */ | ||
780 | if (!(v & ARCH_P4_UNFLAGGED_BIT)) | ||
776 | return 1; | 781 | return 1; |
777 | 782 | ||
778 | return 0; | 783 | return 0; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 76b8cd953dee..9efbdcc56425 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func) | |||
143 | 143 | ||
144 | static u32 __init ati_sbx00_rev(int num, int slot, int func) | 144 | static u32 __init ati_sbx00_rev(int num, int slot, int func) |
145 | { | 145 | { |
146 | u32 old, d; | 146 | u32 d; |
147 | 147 | ||
148 | d = read_pci_config(num, slot, func, 0x70); | ||
149 | old = d; | ||
150 | d &= ~(1<<8); | ||
151 | write_pci_config(num, slot, func, 0x70, d); | ||
152 | d = read_pci_config(num, slot, func, 0x8); | 148 | d = read_pci_config(num, slot, func, 0x8); |
153 | d &= 0xff; | 149 | d &= 0xff; |
154 | write_pci_config(num, slot, func, 0x70, old); | ||
155 | 150 | ||
156 | return d; | 151 | return d; |
157 | } | 152 | } |
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
160 | { | 155 | { |
161 | u32 d, rev; | 156 | u32 d, rev; |
162 | 157 | ||
163 | if (acpi_use_timer_override) | ||
164 | return; | ||
165 | |||
166 | rev = ati_sbx00_rev(num, slot, func); | 158 | rev = ati_sbx00_rev(num, slot, func); |
159 | if (rev >= 0x40) | ||
160 | acpi_fix_pin2_polarity = 1; | ||
161 | |||
167 | if (rev > 0x13) | 162 | if (rev > 0x13) |
168 | return; | 163 | return; |
169 | 164 | ||
165 | if (acpi_use_timer_override) | ||
166 | return; | ||
167 | |||
170 | /* check for IRQ0 interrupt swap */ | 168 | /* check for IRQ0 interrupt swap */ |
171 | d = read_pci_config(num, slot, func, 0x64); | 169 | d = read_pci_config(num, slot, func, 0x64); |
172 | if (!(d & (1<<14))) | 170 | if (!(d & (1<<14))) |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 52945da52a94..387b6a0c9e81 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -367,7 +367,8 @@ void fixup_irqs(void) | |||
367 | if (irr & (1 << (vector % 32))) { | 367 | if (irr & (1 << (vector % 32))) { |
368 | irq = __this_cpu_read(vector_irq[vector]); | 368 | irq = __this_cpu_read(vector_irq[vector]); |
369 | 369 | ||
370 | data = irq_get_irq_data(irq); | 370 | desc = irq_to_desc(irq); |
371 | data = &desc->irq_data; | ||
371 | raw_spin_lock(&desc->lock); | 372 | raw_spin_lock(&desc->lock); |
372 | if (data->chip->irq_retrigger) | 373 | if (data->chip->irq_retrigger) |
373 | data->chip->irq_retrigger(data); | 374 | data->chip->irq_retrigger(data); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e764fc05d700..ff4554198981 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -92,21 +92,31 @@ void show_regs(struct pt_regs *regs) | |||
92 | 92 | ||
93 | void show_regs_common(void) | 93 | void show_regs_common(void) |
94 | { | 94 | { |
95 | const char *board, *product; | 95 | const char *vendor, *product, *board; |
96 | 96 | ||
97 | board = dmi_get_system_info(DMI_BOARD_NAME); | 97 | vendor = dmi_get_system_info(DMI_SYS_VENDOR); |
98 | if (!board) | 98 | if (!vendor) |
99 | board = ""; | 99 | vendor = ""; |
100 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | 100 | product = dmi_get_system_info(DMI_PRODUCT_NAME); |
101 | if (!product) | 101 | if (!product) |
102 | product = ""; | 102 | product = ""; |
103 | 103 | ||
104 | /* Board Name is optional */ | ||
105 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
106 | |||
104 | printk(KERN_CONT "\n"); | 107 | printk(KERN_CONT "\n"); |
105 | printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", | 108 | printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", |
106 | current->pid, current->comm, print_tainted(), | 109 | current->pid, current->comm, print_tainted(), |
107 | init_utsname()->release, | 110 | init_utsname()->release, |
108 | (int)strcspn(init_utsname()->version, " "), | 111 | (int)strcspn(init_utsname()->version, " "), |
109 | init_utsname()->version, board, product); | 112 | init_utsname()->version); |
113 | printk(KERN_CONT " "); | ||
114 | printk(KERN_CONT "%s %s", vendor, product); | ||
115 | if (board) { | ||
116 | printk(KERN_CONT "/"); | ||
117 | printk(KERN_CONT "%s", board); | ||
118 | } | ||
119 | printk(KERN_CONT "\n"); | ||
110 | } | 120 | } |
111 | 121 | ||
112 | void flush_thread(void) | 122 | void flush_thread(void) |
@@ -506,7 +516,7 @@ static void poll_idle(void) | |||
506 | #define MWAIT_ECX_EXTENDED_INFO 0x01 | 516 | #define MWAIT_ECX_EXTENDED_INFO 0x01 |
507 | #define MWAIT_EDX_C1 0xf0 | 517 | #define MWAIT_EDX_C1 0xf0 |
508 | 518 | ||
509 | int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | 519 | int mwait_usable(const struct cpuinfo_x86 *c) |
510 | { | 520 | { |
511 | u32 eax, ebx, ecx, edx; | 521 | u32 eax, ebx, ecx, edx; |
512 | 522 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index fc7aae1e2bc7..715037caeb43 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
285 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | 285 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), |
286 | }, | 286 | }, |
287 | }, | 287 | }, |
288 | { /* Handle problems with rebooting on VersaLogic Menlow boards */ | ||
289 | .callback = set_bios_reboot, | ||
290 | .ident = "VersaLogic Menlow based board", | ||
291 | .matches = { | ||
292 | DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), | ||
293 | DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), | ||
294 | }, | ||
295 | }, | ||
288 | { } | 296 | { } |
289 | }; | 297 | }; |
290 | 298 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 03273b6c272c..08776a953487 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1060,7 +1060,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
1060 | 1060 | ||
1061 | connect_bsp_APIC(); | 1061 | connect_bsp_APIC(); |
1062 | setup_local_APIC(); | 1062 | setup_local_APIC(); |
1063 | end_local_APIC_setup(); | 1063 | bsp_end_local_APIC_setup(); |
1064 | return -1; | 1064 | return -1; |
1065 | } | 1065 | } |
1066 | 1066 | ||
@@ -1137,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1137 | if (!skip_ioapic_setup && nr_ioapics) | 1137 | if (!skip_ioapic_setup && nr_ioapics) |
1138 | enable_IO_APIC(); | 1138 | enable_IO_APIC(); |
1139 | 1139 | ||
1140 | end_local_APIC_setup(); | 1140 | bsp_end_local_APIC_setup(); |
1141 | 1141 | ||
1142 | map_cpu_to_logical_apicid(); | 1142 | map_cpu_to_logical_apicid(); |
1143 | 1143 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 25bd1bc5aad2..63fec1531e89 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1150,8 +1150,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |||
1150 | kvm_load_ldt(svm->host.ldt); | 1150 | kvm_load_ldt(svm->host.ldt); |
1151 | #ifdef CONFIG_X86_64 | 1151 | #ifdef CONFIG_X86_64 |
1152 | loadsegment(fs, svm->host.fs); | 1152 | loadsegment(fs, svm->host.fs); |
1153 | load_gs_index(svm->host.gs); | ||
1154 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | 1153 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); |
1154 | load_gs_index(svm->host.gs); | ||
1155 | #else | 1155 | #else |
1156 | loadsegment(gs, svm->host.gs); | 1156 | loadsegment(gs, svm->host.gs); |
1157 | #endif | 1157 | #endif |
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm) | |||
2777 | kvm_register_write(&svm->vcpu, reg, val); | 2777 | kvm_register_write(&svm->vcpu, reg, val); |
2778 | } | 2778 | } |
2779 | 2779 | ||
2780 | skip_emulated_instruction(&svm->vcpu); | ||
2781 | |||
2780 | return 1; | 2782 | return 1; |
2781 | } | 2783 | } |
2782 | 2784 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 381b09bb562b..a89043a3caa4 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -168,7 +168,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, | |||
168 | * tree of blkg (instead of traversing through hash list all | 168 | * tree of blkg (instead of traversing through hash list all |
169 | * the time. | 169 | * the time. |
170 | */ | 170 | */ |
171 | tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); | 171 | |
172 | /* | ||
173 | * This is the common case when there are no blkio cgroups. | ||
174 | * Avoid lookup in this case | ||
175 | */ | ||
176 | if (blkcg == &blkio_root_cgroup) | ||
177 | tg = &td->root_tg; | ||
178 | else | ||
179 | tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); | ||
172 | 180 | ||
173 | /* Fill in device details for root group */ | 181 | /* Fill in device details for root group */ |
174 | if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { | 182 | if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 501ffdf0399c..7be4c7959625 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
599 | } | 599 | } |
600 | 600 | ||
601 | static inline unsigned | 601 | static inline unsigned |
602 | cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 602 | cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
603 | { | 603 | { |
604 | unsigned slice = cfq_prio_to_slice(cfqd, cfqq); | 604 | unsigned slice = cfq_prio_to_slice(cfqd, cfqq); |
605 | if (cfqd->cfq_latency) { | 605 | if (cfqd->cfq_latency) { |
@@ -631,7 +631,7 @@ cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
631 | static inline void | 631 | static inline void |
632 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 632 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
633 | { | 633 | { |
634 | unsigned slice = cfq_scaled_group_slice(cfqd, cfqq); | 634 | unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); |
635 | 635 | ||
636 | cfqq->slice_start = jiffies; | 636 | cfqq->slice_start = jiffies; |
637 | cfqq->slice_end = jiffies + slice; | 637 | cfqq->slice_end = jiffies + slice; |
@@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1671 | */ | 1671 | */ |
1672 | if (timed_out) { | 1672 | if (timed_out) { |
1673 | if (cfq_cfqq_slice_new(cfqq)) | 1673 | if (cfq_cfqq_slice_new(cfqq)) |
1674 | cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq); | 1674 | cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); |
1675 | else | 1675 | else |
1676 | cfqq->slice_resid = cfqq->slice_end - jiffies; | 1676 | cfqq->slice_resid = cfqq->slice_end - jiffies; |
1677 | cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); | 1677 | cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); |
@@ -3432,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
3432 | { | 3432 | { |
3433 | struct cfq_io_context *cic = cfqd->active_cic; | 3433 | struct cfq_io_context *cic = cfqd->active_cic; |
3434 | 3434 | ||
3435 | /* If the queue already has requests, don't wait */ | ||
3436 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) | ||
3437 | return false; | ||
3438 | |||
3435 | /* If there are other queues in the group, don't wait */ | 3439 | /* If there are other queues in the group, don't wait */ |
3436 | if (cfqq->cfqg->nr_cfqq > 1) | 3440 | if (cfqq->cfqg->nr_cfqq > 1) |
3437 | return false; | 3441 | return false; |
diff --git a/block/genhd.c b/block/genhd.c index 6a5b772aa201..cbf1112a885c 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno) | |||
1355 | struct block_device *bdev = bdget_disk(disk, partno); | 1355 | struct block_device *bdev = bdget_disk(disk, partno); |
1356 | if (bdev) { | 1356 | if (bdev) { |
1357 | fsync_bdev(bdev); | 1357 | fsync_bdev(bdev); |
1358 | res = __invalidate_device(bdev); | 1358 | res = __invalidate_device(bdev, true); |
1359 | bdput(bdev); | 1359 | bdput(bdev); |
1360 | } | 1360 | } |
1361 | return res; | 1361 | return res; |
diff --git a/block/ioctl.c b/block/ioctl.c index 9049d460fa89..1124cd297263 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
294 | return -EINVAL; | 294 | return -EINVAL; |
295 | if (get_user(n, (int __user *) arg)) | 295 | if (get_user(n, (int __user *) arg)) |
296 | return -EFAULT; | 296 | return -EFAULT; |
297 | if (!(mode & FMODE_EXCL) && | 297 | if (!(mode & FMODE_EXCL)) { |
298 | blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) | 298 | bdgrab(bdev); |
299 | return -EBUSY; | 299 | if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) |
300 | return -EBUSY; | ||
301 | } | ||
300 | ret = set_blocksize(bdev, n); | 302 | ret = set_blocksize(bdev, n); |
301 | if (!(mode & FMODE_EXCL)) | 303 | if (!(mode & FMODE_EXCL)) |
302 | blkdev_put(bdev, mode | FMODE_EXCL); | 304 | blkdev_put(bdev, mode | FMODE_EXCL); |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index e9562a7cb2f9..3b20a3401b64 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -212,37 +212,40 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
212 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 212 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Validate wake_device is of type Device */ | ||
216 | |||
217 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | ||
218 | if (device_node->type != ACPI_TYPE_DEVICE) { | ||
219 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
220 | } | ||
221 | |||
222 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 215 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
223 | 216 | ||
224 | /* Ensure that we have a valid GPE number */ | 217 | /* Ensure that we have a valid GPE number */ |
225 | 218 | ||
226 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | 219 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); |
227 | if (gpe_event_info) { | 220 | if (!gpe_event_info) { |
228 | /* | 221 | goto unlock_and_exit; |
229 | * If there is no method or handler for this GPE, then the | 222 | } |
230 | * wake_device will be notified whenever this GPE fires (aka | 223 | |
231 | * "implicit notify") Note: The GPE is assumed to be | 224 | /* |
232 | * level-triggered (for windows compatibility). | 225 | * If there is no method or handler for this GPE, then the |
233 | */ | 226 | * wake_device will be notified whenever this GPE fires (aka |
234 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 227 | * "implicit notify") Note: The GPE is assumed to be |
235 | ACPI_GPE_DISPATCH_NONE) { | 228 | * level-triggered (for windows compatibility). |
236 | gpe_event_info->flags = | 229 | */ |
237 | (ACPI_GPE_DISPATCH_NOTIFY | | 230 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == |
238 | ACPI_GPE_LEVEL_TRIGGERED); | 231 | ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { |
239 | gpe_event_info->dispatch.device_node = device_node; | ||
240 | } | ||
241 | 232 | ||
242 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | 233 | /* Validate wake_device is of type Device */ |
243 | status = AE_OK; | 234 | |
235 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, | ||
236 | wake_device); | ||
237 | if (device_node->type != ACPI_TYPE_DEVICE) { | ||
238 | goto unlock_and_exit; | ||
239 | } | ||
240 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | | ||
241 | ACPI_GPE_LEVEL_TRIGGERED); | ||
242 | gpe_event_info->dispatch.device_node = device_node; | ||
244 | } | 243 | } |
245 | 244 | ||
245 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | ||
246 | status = AE_OK; | ||
247 | |||
248 | unlock_and_exit: | ||
246 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 249 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
247 | return_ACPI_STATUS(status); | 250 | return_ACPI_STATUS(status); |
248 | } | 251 | } |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b0931818cf98..c90c76aa7f8b 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port); | |||
636 | acpi_status | 636 | acpi_status |
637 | acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | 637 | acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) |
638 | { | 638 | { |
639 | u32 dummy; | ||
640 | void __iomem *virt_addr; | 639 | void __iomem *virt_addr; |
641 | int size = width / 8, unmap = 0; | 640 | unsigned int size = width / 8; |
641 | bool unmap = false; | ||
642 | u32 dummy; | ||
642 | 643 | ||
643 | rcu_read_lock(); | 644 | rcu_read_lock(); |
644 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 645 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
645 | rcu_read_unlock(); | ||
646 | if (!virt_addr) { | 646 | if (!virt_addr) { |
647 | rcu_read_unlock(); | ||
647 | virt_addr = acpi_os_ioremap(phys_addr, size); | 648 | virt_addr = acpi_os_ioremap(phys_addr, size); |
648 | unmap = 1; | 649 | if (!virt_addr) |
650 | return AE_BAD_ADDRESS; | ||
651 | unmap = true; | ||
649 | } | 652 | } |
653 | |||
650 | if (!value) | 654 | if (!value) |
651 | value = &dummy; | 655 | value = &dummy; |
652 | 656 | ||
@@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | |||
666 | 670 | ||
667 | if (unmap) | 671 | if (unmap) |
668 | iounmap(virt_addr); | 672 | iounmap(virt_addr); |
673 | else | ||
674 | rcu_read_unlock(); | ||
669 | 675 | ||
670 | return AE_OK; | 676 | return AE_OK; |
671 | } | 677 | } |
@@ -674,14 +680,17 @@ acpi_status | |||
674 | acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | 680 | acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) |
675 | { | 681 | { |
676 | void __iomem *virt_addr; | 682 | void __iomem *virt_addr; |
677 | int size = width / 8, unmap = 0; | 683 | unsigned int size = width / 8; |
684 | bool unmap = false; | ||
678 | 685 | ||
679 | rcu_read_lock(); | 686 | rcu_read_lock(); |
680 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 687 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
681 | rcu_read_unlock(); | ||
682 | if (!virt_addr) { | 688 | if (!virt_addr) { |
689 | rcu_read_unlock(); | ||
683 | virt_addr = acpi_os_ioremap(phys_addr, size); | 690 | virt_addr = acpi_os_ioremap(phys_addr, size); |
684 | unmap = 1; | 691 | if (!virt_addr) |
692 | return AE_BAD_ADDRESS; | ||
693 | unmap = true; | ||
685 | } | 694 | } |
686 | 695 | ||
687 | switch (width) { | 696 | switch (width) { |
@@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | |||
700 | 709 | ||
701 | if (unmap) | 710 | if (unmap) |
702 | iounmap(virt_addr); | 711 | iounmap(virt_addr); |
712 | else | ||
713 | rcu_read_unlock(); | ||
703 | 714 | ||
704 | return AE_OK; | 715 | return AE_OK; |
705 | } | 716 | } |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 42d3d72dae85..5af3479714f6 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device) | |||
82 | if (!device) | 82 | if (!device) |
83 | return 0; | 83 | return 0; |
84 | 84 | ||
85 | /* Is this device able to support video switching ? */ | ||
86 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || | ||
87 | ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) | ||
88 | video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; | ||
89 | |||
85 | /* Is this device able to retrieve a video ROM ? */ | 90 | /* Is this device able to retrieve a video ROM ? */ |
86 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) | 91 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) |
87 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; | 92 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; |
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index ed6501452507..7bfbe40bc43b 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void) | |||
86 | struct acpi_device *dev = container_of(node, | 86 | struct acpi_device *dev = container_of(node, |
87 | struct acpi_device, | 87 | struct acpi_device, |
88 | wakeup_list); | 88 | wakeup_list); |
89 | if (device_can_wakeup(&dev->dev)) | 89 | if (device_can_wakeup(&dev->dev)) { |
90 | /* Button GPEs are supposed to be always enabled. */ | ||
91 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
92 | dev->wakeup.gpe_number); | ||
90 | device_set_wakeup_enable(&dev->dev, true); | 93 | device_set_wakeup_enable(&dev->dev, true); |
94 | } | ||
91 | } | 95 | } |
92 | mutex_unlock(&acpi_device_lock); | 96 | mutex_unlock(&acpi_device_lock); |
93 | return 0; | 97 | return 0; |
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 73fb1c4f4cd4..25ef1a4556e6 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc) | |||
866 | } | 866 | } |
867 | 867 | ||
868 | skb = alloc_skb(sizeof(*header), GFP_ATOMIC); | 868 | skb = alloc_skb(sizeof(*header), GFP_ATOMIC); |
869 | if (!skb && net_ratelimit()) { | 869 | if (!skb) { |
870 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); | 870 | if (net_ratelimit()) |
871 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); | ||
871 | return -ENOMEM; | 872 | return -ENOMEM; |
872 | } | 873 | } |
873 | header = (void *)skb_put(skb, sizeof(*header)); | 874 | header = (void *)skb_put(skb, sizeof(*header)); |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index d7f463d6312d..40528ba56d1b 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o | |||
39 | obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ | 39 | obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ |
40 | obj-$(CONFIG_BLK_DEV_RBD) += rbd.o | 40 | obj-$(CONFIG_BLK_DEV_RBD) += rbd.o |
41 | 41 | ||
42 | swim_mod-objs := swim.o swim_asm.o | 42 | swim_mod-y := swim.o swim_asm.o |
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile index e76d997183c6..06ea82cdf27d 100644 --- a/drivers/block/aoe/Makefile +++ b/drivers/block/aoe/Makefile | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_ATA_OVER_ETH) += aoe.o | 5 | obj-$(CONFIG_ATA_OVER_ETH) += aoe.o |
6 | aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o | 6 | aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 516d5bbec2b6..9279272b3732 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk) | |||
2833 | sector_t total_size; | 2833 | sector_t total_size; |
2834 | InquiryData_struct *inq_buff = NULL; | 2834 | InquiryData_struct *inq_buff = NULL; |
2835 | 2835 | ||
2836 | for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { | 2836 | for (logvol = 0; logvol <= h->highest_lun; logvol++) { |
2837 | if (!h->drv[logvol]) | 2837 | if (!h->drv[logvol]) |
2838 | continue; | 2838 | continue; |
2839 | if (memcmp(h->drv[logvol]->LunID, drv->LunID, | 2839 | if (memcmp(h->drv[logvol]->LunID, drv->LunID, |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b9ba04fc2b34..77fc76f8aea9 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3281 | struct block_device *bdev = opened_bdev[cnt]; | 3281 | struct block_device *bdev = opened_bdev[cnt]; |
3282 | if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) | 3282 | if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) |
3283 | continue; | 3283 | continue; |
3284 | __invalidate_device(bdev); | 3284 | __invalidate_device(bdev, true); |
3285 | } | 3285 | } |
3286 | mutex_unlock(&open_lock); | 3286 | mutex_unlock(&open_lock); |
3287 | } else { | 3287 | } else { |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 44e18c073c44..49e6a545eb63 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1641,6 +1641,9 @@ out: | |||
1641 | 1641 | ||
1642 | static void loop_free(struct loop_device *lo) | 1642 | static void loop_free(struct loop_device *lo) |
1643 | { | 1643 | { |
1644 | if (!lo->lo_queue->queue_lock) | ||
1645 | lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock; | ||
1646 | |||
1644 | blk_cleanup_queue(lo->lo_queue); | 1647 | blk_cleanup_queue(lo->lo_queue); |
1645 | put_disk(lo->lo_disk); | 1648 | put_disk(lo->lo_disk); |
1646 | list_del(&lo->lo_list); | 1649 | list_del(&lo->lo_list); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a32fb41246f8..e6fc716aca45 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -53,7 +53,6 @@ | |||
53 | #define DBG_BLKDEV 0x0100 | 53 | #define DBG_BLKDEV 0x0100 |
54 | #define DBG_RX 0x0200 | 54 | #define DBG_RX 0x0200 |
55 | #define DBG_TX 0x0400 | 55 | #define DBG_TX 0x0400 |
56 | static DEFINE_MUTEX(nbd_mutex); | ||
57 | static unsigned int debugflags; | 56 | static unsigned int debugflags; |
58 | #endif /* NDEBUG */ | 57 | #endif /* NDEBUG */ |
59 | 58 | ||
@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |||
718 | dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", | 717 | dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", |
719 | lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); | 718 | lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); |
720 | 719 | ||
721 | mutex_lock(&nbd_mutex); | ||
722 | mutex_lock(&lo->tx_lock); | 720 | mutex_lock(&lo->tx_lock); |
723 | error = __nbd_ioctl(bdev, lo, cmd, arg); | 721 | error = __nbd_ioctl(bdev, lo, cmd, arg); |
724 | mutex_unlock(&lo->tx_lock); | 722 | mutex_unlock(&lo->tx_lock); |
725 | mutex_unlock(&nbd_mutex); | ||
726 | 723 | ||
727 | return error; | 724 | return error; |
728 | } | 725 | } |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a126e614601f..6dcd55a74c0a 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = { | |||
39 | /* Atheros AR3011 with sflash firmware*/ | 39 | /* Atheros AR3011 with sflash firmware*/ |
40 | { USB_DEVICE(0x0CF3, 0x3002) }, | 40 | { USB_DEVICE(0x0CF3, 0x3002) }, |
41 | 41 | ||
42 | /* Atheros AR9285 Malbec with sflash firmware */ | ||
43 | { USB_DEVICE(0x03F0, 0x311D) }, | ||
44 | |||
45 | /* Atheros AR5BBU12 with sflash firmware */ | ||
46 | { USB_DEVICE(0x0489, 0xE02C) }, | ||
42 | { } /* Terminating entry */ | 47 | { } /* Terminating entry */ |
43 | }; | 48 | }; |
44 | 49 | ||
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1da773f899a2..700a3840fddc 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = { | |||
102 | /* Atheros 3011 with sflash firmware */ | 102 | /* Atheros 3011 with sflash firmware */ |
103 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, | 103 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
104 | 104 | ||
105 | /* Atheros AR9285 Malbec with sflash firmware */ | ||
106 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | ||
107 | |||
108 | /* Atheros AR5BBU12 with sflash firmware */ | ||
109 | { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, | ||
110 | |||
105 | /* Broadcom BCM2035 */ | 111 | /* Broadcom BCM2035 */ |
106 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, | 112 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, |
107 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, | 113 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, |
@@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work) | |||
826 | 832 | ||
827 | if (hdev->conn_hash.sco_num > 0) { | 833 | if (hdev->conn_hash.sco_num > 0) { |
828 | if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { | 834 | if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { |
829 | err = usb_autopm_get_interface(data->isoc); | 835 | err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); |
830 | if (err < 0) { | 836 | if (err < 0) { |
831 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); | 837 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); |
832 | usb_kill_anchored_urbs(&data->isoc_anchor); | 838 | usb_kill_anchored_urbs(&data->isoc_anchor); |
@@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work) | |||
855 | 861 | ||
856 | __set_isoc_interface(hdev, 0); | 862 | __set_isoc_interface(hdev, 0); |
857 | if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) | 863 | if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) |
858 | usb_autopm_put_interface(data->isoc); | 864 | usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); |
859 | } | 865 | } |
860 | } | 866 | } |
861 | 867 | ||
@@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf, | |||
1038 | 1044 | ||
1039 | usb_set_intfdata(intf, data); | 1045 | usb_set_intfdata(intf, data); |
1040 | 1046 | ||
1041 | usb_enable_autosuspend(interface_to_usbdev(intf)); | ||
1042 | |||
1043 | return 0; | 1047 | return 0; |
1044 | } | 1048 | } |
1045 | 1049 | ||
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 14033a36bcd0..e2c48a7eccff 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi) | |||
409 | } | 409 | } |
410 | 410 | ||
411 | ENSURE(drive_status, CDC_DRIVE_STATUS ); | 411 | ENSURE(drive_status, CDC_DRIVE_STATUS ); |
412 | ENSURE(media_changed, CDC_MEDIA_CHANGED); | 412 | if (cdo->check_events == NULL && cdo->media_changed == NULL) |
413 | *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); | ||
413 | ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); | 414 | ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); |
414 | ENSURE(lock_door, CDC_LOCK); | 415 | ENSURE(lock_door, CDC_LOCK); |
415 | ENSURE(select_speed, CDC_SELECT_SPEED); | 416 | ENSURE(select_speed, CDC_SELECT_SPEED); |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 5bc765d4c3ca..8238f89f73c9 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o | |||
30 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o | 30 | obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o |
31 | obj-$(CONFIG_SX) += sx.o generic_serial.o | 31 | obj-$(CONFIG_SX) += sx.o generic_serial.o |
32 | obj-$(CONFIG_RIO) += rio/ generic_serial.o | 32 | obj-$(CONFIG_RIO) += rio/ generic_serial.o |
33 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o | ||
33 | obj-$(CONFIG_RAW_DRIVER) += raw.o | 34 | obj-$(CONFIG_RAW_DRIVER) += raw.o |
34 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 35 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
35 | obj-$(CONFIG_MSPEC) += mspec.o | 36 | obj-$(CONFIG_MSPEC) += mspec.o |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 9252e85706ef..780498d76581 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void) | |||
773 | #else | 773 | #else |
774 | printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); | 774 | printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); |
775 | #endif | 775 | #endif |
776 | pci_unregister_driver(&agp_amd64_pci_driver); | ||
776 | return -ENODEV; | 777 | return -ENODEV; |
777 | } | 778 | } |
778 | 779 | ||
779 | /* First check that we have at least one AMD64 NB */ | 780 | /* First check that we have at least one AMD64 NB */ |
780 | if (!pci_dev_present(amd_nb_misc_ids)) | 781 | if (!pci_dev_present(amd_nb_misc_ids)) { |
782 | pci_unregister_driver(&agp_amd64_pci_driver); | ||
781 | return -ENODEV; | 783 | return -ENODEV; |
784 | } | ||
782 | 785 | ||
783 | /* Look for any AGP bridge */ | 786 | /* Look for any AGP bridge */ |
784 | agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; | 787 | agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; |
785 | err = driver_attach(&agp_amd64_pci_driver.driver); | 788 | err = driver_attach(&agp_amd64_pci_driver.driver); |
786 | if (err == 0 && agp_bridges_found == 0) | 789 | if (err == 0 && agp_bridges_found == 0) { |
790 | pci_unregister_driver(&agp_amd64_pci_driver); | ||
787 | err = -ENODEV; | 791 | err = -ENODEV; |
792 | } | ||
788 | } | 793 | } |
789 | return err; | 794 | return err; |
790 | } | 795 | } |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c195bfeade11..5feebe2800e9 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -130,6 +130,7 @@ | |||
130 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | 130 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) |
131 | 131 | ||
132 | #define I915_IFPADDR 0x60 | 132 | #define I915_IFPADDR 0x60 |
133 | #define I830_HIC 0x70 | ||
133 | 134 | ||
134 | /* Intel 965G registers */ | 135 | /* Intel 965G registers */ |
135 | #define I965_MSAC 0x62 | 136 | #define I965_MSAC 0x62 |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fab3d3265adb..0d09b537bb9a 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/agp_backend.h> | 23 | #include <linux/agp_backend.h> |
24 | #include <linux/delay.h> | ||
24 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
25 | #include "agp.h" | 26 | #include "agp.h" |
26 | #include "intel-agp.h" | 27 | #include "intel-agp.h" |
@@ -70,12 +71,8 @@ static struct _intel_private { | |||
70 | u32 __iomem *gtt; /* I915G */ | 71 | u32 __iomem *gtt; /* I915G */ |
71 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ | 72 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
72 | int num_dcache_entries; | 73 | int num_dcache_entries; |
73 | union { | 74 | void __iomem *i9xx_flush_page; |
74 | void __iomem *i9xx_flush_page; | ||
75 | void *i8xx_flush_page; | ||
76 | }; | ||
77 | char *i81x_gtt_table; | 75 | char *i81x_gtt_table; |
78 | struct page *i8xx_page; | ||
79 | struct resource ifp_resource; | 76 | struct resource ifp_resource; |
80 | int resource_valid; | 77 | int resource_valid; |
81 | struct page *scratch_page; | 78 | struct page *scratch_page; |
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void) | |||
722 | 719 | ||
723 | static void i830_cleanup(void) | 720 | static void i830_cleanup(void) |
724 | { | 721 | { |
725 | if (intel_private.i8xx_flush_page) { | ||
726 | kunmap(intel_private.i8xx_flush_page); | ||
727 | intel_private.i8xx_flush_page = NULL; | ||
728 | } | ||
729 | |||
730 | __free_page(intel_private.i8xx_page); | ||
731 | intel_private.i8xx_page = NULL; | ||
732 | } | ||
733 | |||
734 | static void intel_i830_setup_flush(void) | ||
735 | { | ||
736 | /* return if we've already set the flush mechanism up */ | ||
737 | if (intel_private.i8xx_page) | ||
738 | return; | ||
739 | |||
740 | intel_private.i8xx_page = alloc_page(GFP_KERNEL); | ||
741 | if (!intel_private.i8xx_page) | ||
742 | return; | ||
743 | |||
744 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
745 | if (!intel_private.i8xx_flush_page) | ||
746 | i830_cleanup(); | ||
747 | } | 722 | } |
748 | 723 | ||
749 | /* The chipset_flush interface needs to get data that has already been | 724 | /* The chipset_flush interface needs to get data that has already been |
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void) | |||
758 | */ | 733 | */ |
759 | static void i830_chipset_flush(void) | 734 | static void i830_chipset_flush(void) |
760 | { | 735 | { |
761 | unsigned int *pg = intel_private.i8xx_flush_page; | 736 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
737 | |||
738 | /* Forcibly evict everything from the CPU write buffers. | ||
739 | * clflush appears to be insufficient. | ||
740 | */ | ||
741 | wbinvd_on_all_cpus(); | ||
742 | |||
743 | /* Now we've only seen documents for this magic bit on 855GM, | ||
744 | * we hope it exists for the other gen2 chipsets... | ||
745 | * | ||
746 | * Also works as advertised on my 845G. | ||
747 | */ | ||
748 | writel(readl(intel_private.registers+I830_HIC) | (1<<31), | ||
749 | intel_private.registers+I830_HIC); | ||
762 | 750 | ||
763 | memset(pg, 0, 1024); | 751 | while (readl(intel_private.registers+I830_HIC) & (1<<31)) { |
752 | if (time_after(jiffies, timeout)) | ||
753 | break; | ||
764 | 754 | ||
765 | if (cpu_has_clflush) | 755 | udelay(50); |
766 | clflush_cache_range(pg, 1024); | 756 | } |
767 | else if (wbinvd_on_all_cpus() != 0) | ||
768 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
769 | } | 757 | } |
770 | 758 | ||
771 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, | 759 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
@@ -849,8 +837,6 @@ static int i830_setup(void) | |||
849 | 837 | ||
850 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; | 838 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; |
851 | 839 | ||
852 | intel_i830_setup_flush(); | ||
853 | |||
854 | return 0; | 840 | return 0; |
855 | } | 841 | } |
856 | 842 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index b6ae6e9a9c5f..7855f9f45b8e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -320,6 +320,7 @@ static int unload_when_empty = 1; | |||
320 | static int add_smi(struct smi_info *smi); | 320 | static int add_smi(struct smi_info *smi); |
321 | static int try_smi_init(struct smi_info *smi); | 321 | static int try_smi_init(struct smi_info *smi); |
322 | static void cleanup_one_si(struct smi_info *to_clean); | 322 | static void cleanup_one_si(struct smi_info *to_clean); |
323 | static void cleanup_ipmi_si(void); | ||
323 | 324 | ||
324 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); | 325 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); |
325 | static int register_xaction_notifier(struct notifier_block *nb) | 326 | static int register_xaction_notifier(struct notifier_block *nb) |
@@ -3450,16 +3451,7 @@ static int __devinit init_ipmi_si(void) | |||
3450 | mutex_lock(&smi_infos_lock); | 3451 | mutex_lock(&smi_infos_lock); |
3451 | if (unload_when_empty && list_empty(&smi_infos)) { | 3452 | if (unload_when_empty && list_empty(&smi_infos)) { |
3452 | mutex_unlock(&smi_infos_lock); | 3453 | mutex_unlock(&smi_infos_lock); |
3453 | #ifdef CONFIG_PCI | 3454 | cleanup_ipmi_si(); |
3454 | if (pci_registered) | ||
3455 | pci_unregister_driver(&ipmi_pci_driver); | ||
3456 | #endif | ||
3457 | |||
3458 | #ifdef CONFIG_PPC_OF | ||
3459 | if (of_registered) | ||
3460 | of_unregister_platform_driver(&ipmi_of_platform_driver); | ||
3461 | #endif | ||
3462 | driver_unregister(&ipmi_driver.driver); | ||
3463 | printk(KERN_WARNING PFX | 3455 | printk(KERN_WARNING PFX |
3464 | "Unable to find any System Interface(s)\n"); | 3456 | "Unable to find any System Interface(s)\n"); |
3465 | return -ENODEV; | 3457 | return -ENODEV; |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 777181a2e603..bcbbc71febb7 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p) | |||
830 | test_bit(IS_ANY_T1, &dev->flags))) { | 830 | test_bit(IS_ANY_T1, &dev->flags))) { |
831 | DEBUGP(4, dev, "Perform AUTOPPS\n"); | 831 | DEBUGP(4, dev, "Perform AUTOPPS\n"); |
832 | set_bit(IS_AUTOPPS_ACT, &dev->flags); | 832 | set_bit(IS_AUTOPPS_ACT, &dev->flags); |
833 | ptsreq.protocol = ptsreq.protocol = | 833 | ptsreq.protocol = (0x01 << dev->proto); |
834 | (0x01 << dev->proto); | ||
835 | ptsreq.flags = 0x01; | 834 | ptsreq.flags = 0x01; |
836 | ptsreq.pts1 = 0x00; | 835 | ptsreq.pts1 = 0x00; |
837 | ptsreq.pts2 = 0x00; | 836 | ptsreq.pts2 = 0x00; |
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index 94b8eb4d691d..444155a305ae 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c | |||
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data) | |||
78 | static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | 78 | static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) |
79 | { | 79 | { |
80 | struct ipw_dev *ipw = priv_data; | 80 | struct ipw_dev *ipw = priv_data; |
81 | struct resource *io_resource; | ||
82 | int ret; | 81 | int ret; |
83 | 82 | ||
84 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 83 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | |||
92 | if (ret) | 91 | if (ret) |
93 | return ret; | 92 | return ret; |
94 | 93 | ||
95 | io_resource = request_region(p_dev->resource[0]->start, | 94 | if (!request_region(p_dev->resource[0]->start, |
96 | resource_size(p_dev->resource[0]), | 95 | resource_size(p_dev->resource[0]), |
97 | IPWIRELESS_PCCARD_NAME); | 96 | IPWIRELESS_PCCARD_NAME)) { |
97 | ret = -EBUSY; | ||
98 | goto exit; | ||
99 | } | ||
98 | 100 | ||
99 | p_dev->resource[2]->flags |= | 101 | p_dev->resource[2]->flags |= |
100 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; | 102 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; |
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | |||
105 | 107 | ||
106 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); | 108 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); |
107 | if (ret != 0) | 109 | if (ret != 0) |
108 | goto exit2; | 110 | goto exit1; |
109 | 111 | ||
110 | ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; | 112 | ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; |
111 | 113 | ||
112 | ipw->attr_memory = ioremap(p_dev->resource[2]->start, | 114 | ipw->common_memory = ioremap(p_dev->resource[2]->start, |
113 | resource_size(p_dev->resource[2])); | 115 | resource_size(p_dev->resource[2])); |
114 | request_mem_region(p_dev->resource[2]->start, | 116 | if (!request_mem_region(p_dev->resource[2]->start, |
115 | resource_size(p_dev->resource[2]), | 117 | resource_size(p_dev->resource[2]), |
116 | IPWIRELESS_PCCARD_NAME); | 118 | IPWIRELESS_PCCARD_NAME)) { |
119 | ret = -EBUSY; | ||
120 | goto exit2; | ||
121 | } | ||
117 | 122 | ||
118 | p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | | 123 | p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | |
119 | WIN_ENABLE; | 124 | WIN_ENABLE; |
120 | p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ | 125 | p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ |
121 | ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); | 126 | ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); |
122 | if (ret != 0) | 127 | if (ret != 0) |
123 | goto exit2; | 128 | goto exit3; |
124 | 129 | ||
125 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); | 130 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); |
126 | if (ret != 0) | 131 | if (ret != 0) |
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | |||
128 | 133 | ||
129 | ipw->attr_memory = ioremap(p_dev->resource[3]->start, | 134 | ipw->attr_memory = ioremap(p_dev->resource[3]->start, |
130 | resource_size(p_dev->resource[3])); | 135 | resource_size(p_dev->resource[3])); |
131 | request_mem_region(p_dev->resource[3]->start, | 136 | if (!request_mem_region(p_dev->resource[3]->start, |
132 | resource_size(p_dev->resource[3]), | 137 | resource_size(p_dev->resource[3]), |
133 | IPWIRELESS_PCCARD_NAME); | 138 | IPWIRELESS_PCCARD_NAME)) { |
139 | ret = -EBUSY; | ||
140 | goto exit4; | ||
141 | } | ||
134 | 142 | ||
135 | return 0; | 143 | return 0; |
136 | 144 | ||
145 | exit4: | ||
146 | iounmap(ipw->attr_memory); | ||
137 | exit3: | 147 | exit3: |
148 | release_mem_region(p_dev->resource[2]->start, | ||
149 | resource_size(p_dev->resource[2])); | ||
138 | exit2: | 150 | exit2: |
139 | if (ipw->common_memory) { | 151 | iounmap(ipw->common_memory); |
140 | release_mem_region(p_dev->resource[2]->start, | ||
141 | resource_size(p_dev->resource[2])); | ||
142 | iounmap(ipw->common_memory); | ||
143 | } | ||
144 | exit1: | 152 | exit1: |
145 | release_resource(io_resource); | 153 | release_region(p_dev->resource[0]->start, |
154 | resource_size(p_dev->resource[0])); | ||
155 | exit: | ||
146 | pcmcia_disable_device(p_dev); | 156 | pcmcia_disable_device(p_dev); |
147 | return -1; | 157 | return ret; |
148 | } | 158 | } |
149 | 159 | ||
150 | static int config_ipwireless(struct ipw_dev *ipw) | 160 | static int config_ipwireless(struct ipw_dev *ipw) |
@@ -219,6 +229,8 @@ exit: | |||
219 | 229 | ||
220 | static void release_ipwireless(struct ipw_dev *ipw) | 230 | static void release_ipwireless(struct ipw_dev *ipw) |
221 | { | 231 | { |
232 | release_region(ipw->link->resource[0]->start, | ||
233 | resource_size(ipw->link->resource[0])); | ||
222 | if (ipw->common_memory) { | 234 | if (ipw->common_memory) { |
223 | release_mem_region(ipw->link->resource[2]->start, | 235 | release_mem_region(ipw->link->resource[2]->start, |
224 | resource_size(ipw->link->resource[2])); | 236 | resource_size(ipw->link->resource[2])); |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 36e0fa161c2b..1f46f1cd9225 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, | |||
364 | tpm_protected_ordinal_duration[ordinal & | 364 | tpm_protected_ordinal_duration[ordinal & |
365 | TPM_PROTECTED_ORDINAL_MASK]; | 365 | TPM_PROTECTED_ORDINAL_MASK]; |
366 | 366 | ||
367 | if (duration_idx != TPM_UNDEFINED) { | 367 | if (duration_idx != TPM_UNDEFINED) |
368 | duration = chip->vendor.duration[duration_idx]; | 368 | duration = chip->vendor.duration[duration_idx]; |
369 | /* if duration is 0, it's because chip->vendor.duration wasn't */ | 369 | if (duration <= 0) |
370 | /* filled yet, so we set the lowest timeout just to give enough */ | ||
371 | /* time for tpm_get_timeouts() to succeed */ | ||
372 | return (duration <= 0 ? HZ : duration); | ||
373 | } else | ||
374 | return 2 * 60 * HZ; | 370 | return 2 * 60 * HZ; |
371 | else | ||
372 | return duration; | ||
375 | } | 373 | } |
376 | EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); | 374 | EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); |
377 | 375 | ||
diff --git a/drivers/tty/hvc/virtio_console.c b/drivers/char/virtio_console.c index 896a2ced1d27..490393186338 100644 --- a/drivers/tty/hvc/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation | 2 | * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation |
3 | * Copyright (C) 2009, 2010 Red Hat, Inc. | 3 | * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. |
4 | * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com> | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -31,7 +32,7 @@ | |||
31 | #include <linux/virtio_console.h> | 32 | #include <linux/virtio_console.h> |
32 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
33 | #include <linux/workqueue.h> | 34 | #include <linux/workqueue.h> |
34 | #include "hvc_console.h" | 35 | #include "../tty/hvc/hvc_console.h" |
35 | 36 | ||
36 | /* | 37 | /* |
37 | * This is a global struct for storing common data for all the devices | 38 | * This is a global struct for storing common data for all the devices |
@@ -1462,6 +1463,17 @@ static void control_work_handler(struct work_struct *work) | |||
1462 | spin_unlock(&portdev->cvq_lock); | 1463 | spin_unlock(&portdev->cvq_lock); |
1463 | } | 1464 | } |
1464 | 1465 | ||
1466 | static void out_intr(struct virtqueue *vq) | ||
1467 | { | ||
1468 | struct port *port; | ||
1469 | |||
1470 | port = find_port_by_vq(vq->vdev->priv, vq); | ||
1471 | if (!port) | ||
1472 | return; | ||
1473 | |||
1474 | wake_up_interruptible(&port->waitqueue); | ||
1475 | } | ||
1476 | |||
1465 | static void in_intr(struct virtqueue *vq) | 1477 | static void in_intr(struct virtqueue *vq) |
1466 | { | 1478 | { |
1467 | struct port *port; | 1479 | struct port *port; |
@@ -1566,7 +1578,7 @@ static int init_vqs(struct ports_device *portdev) | |||
1566 | */ | 1578 | */ |
1567 | j = 0; | 1579 | j = 0; |
1568 | io_callbacks[j] = in_intr; | 1580 | io_callbacks[j] = in_intr; |
1569 | io_callbacks[j + 1] = NULL; | 1581 | io_callbacks[j + 1] = out_intr; |
1570 | io_names[j] = "input"; | 1582 | io_names[j] = "input"; |
1571 | io_names[j + 1] = "output"; | 1583 | io_names[j + 1] = "output"; |
1572 | j += 2; | 1584 | j += 2; |
@@ -1580,7 +1592,7 @@ static int init_vqs(struct ports_device *portdev) | |||
1580 | for (i = 1; i < nr_ports; i++) { | 1592 | for (i = 1; i < nr_ports; i++) { |
1581 | j += 2; | 1593 | j += 2; |
1582 | io_callbacks[j] = in_intr; | 1594 | io_callbacks[j] = in_intr; |
1583 | io_callbacks[j + 1] = NULL; | 1595 | io_callbacks[j + 1] = out_intr; |
1584 | io_names[j] = "input"; | 1596 | io_names[j] = "input"; |
1585 | io_names[j + 1] = "output"; | 1597 | io_names[j + 1] = "output"; |
1586 | } | 1598 | } |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 297f48b0cba9..07bca4970e50 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/module.h> | 79 | #include <linux/module.h> |
80 | #include <linux/interrupt.h> | 80 | #include <linux/interrupt.h> |
81 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
82 | #include <linux/delay.h> | ||
82 | #include <linux/dmapool.h> | 83 | #include <linux/dmapool.h> |
83 | #include <linux/dmaengine.h> | 84 | #include <linux/dmaengine.h> |
84 | #include <linux/amba/bus.h> | 85 | #include <linux/amba/bus.h> |
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan, | |||
235 | } | 236 | } |
236 | 237 | ||
237 | /* | 238 | /* |
238 | * Overall DMAC remains enabled always. | 239 | * Pause the channel by setting the HALT bit. |
239 | * | 240 | * |
240 | * Disabling individual channels could lose data. | 241 | * For M->P transfers, pause the DMAC first and then stop the peripheral - |
242 | * the FIFO can only drain if the peripheral is still requesting data. | ||
243 | * (note: this can still timeout if the DMAC FIFO never drains of data.) | ||
241 | * | 244 | * |
242 | * Disable the peripheral DMA after disabling the DMAC in order to allow | 245 | * For P->M transfers, disable the peripheral first to stop it filling |
243 | * the DMAC FIFO to drain, and hence allow the channel to show inactive | 246 | * the DMAC FIFO, and then pause the DMAC. |
244 | */ | 247 | */ |
245 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | 248 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) |
246 | { | 249 | { |
247 | u32 val; | 250 | u32 val; |
251 | int timeout; | ||
248 | 252 | ||
249 | /* Set the HALT bit and wait for the FIFO to drain */ | 253 | /* Set the HALT bit and wait for the FIFO to drain */ |
250 | val = readl(ch->base + PL080_CH_CONFIG); | 254 | val = readl(ch->base + PL080_CH_CONFIG); |
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |||
252 | writel(val, ch->base + PL080_CH_CONFIG); | 256 | writel(val, ch->base + PL080_CH_CONFIG); |
253 | 257 | ||
254 | /* Wait for channel inactive */ | 258 | /* Wait for channel inactive */ |
255 | while (pl08x_phy_channel_busy(ch)) | 259 | for (timeout = 1000; timeout; timeout--) { |
256 | cpu_relax(); | 260 | if (!pl08x_phy_channel_busy(ch)) |
261 | break; | ||
262 | udelay(1); | ||
263 | } | ||
264 | if (pl08x_phy_channel_busy(ch)) | ||
265 | pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); | ||
257 | } | 266 | } |
258 | 267 | ||
259 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | 268 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) |
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
267 | } | 276 | } |
268 | 277 | ||
269 | 278 | ||
270 | /* Stops the channel */ | 279 | /* |
271 | static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) | 280 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and |
281 | * clears any pending interrupt status. This should not be used for | ||
282 | * an on-going transfer, but as a method of shutting down a channel | ||
283 | * (eg, when it's no longer used) or terminating a transfer. | ||
284 | */ | ||
285 | static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, | ||
286 | struct pl08x_phy_chan *ch) | ||
272 | { | 287 | { |
273 | u32 val; | 288 | u32 val = readl(ch->base + PL080_CH_CONFIG); |
274 | 289 | ||
275 | pl08x_pause_phy_chan(ch); | 290 | val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | |
291 | PL080_CONFIG_TC_IRQ_MASK); | ||
276 | 292 | ||
277 | /* Disable channel */ | ||
278 | val = readl(ch->base + PL080_CH_CONFIG); | ||
279 | val &= ~PL080_CONFIG_ENABLE; | ||
280 | val &= ~PL080_CONFIG_ERR_IRQ_MASK; | ||
281 | val &= ~PL080_CONFIG_TC_IRQ_MASK; | ||
282 | writel(val, ch->base + PL080_CH_CONFIG); | 293 | writel(val, ch->base + PL080_CH_CONFIG); |
294 | |||
295 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); | ||
296 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); | ||
283 | } | 297 | } |
284 | 298 | ||
285 | static inline u32 get_bytes_in_cctl(u32 cctl) | 299 | static inline u32 get_bytes_in_cctl(u32 cctl) |
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | |||
404 | { | 418 | { |
405 | unsigned long flags; | 419 | unsigned long flags; |
406 | 420 | ||
421 | spin_lock_irqsave(&ch->lock, flags); | ||
422 | |||
407 | /* Stop the channel and clear its interrupts */ | 423 | /* Stop the channel and clear its interrupts */ |
408 | pl08x_stop_phy_chan(ch); | 424 | pl08x_terminate_phy_chan(pl08x, ch); |
409 | writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); | ||
410 | writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); | ||
411 | 425 | ||
412 | /* Mark it as free */ | 426 | /* Mark it as free */ |
413 | spin_lock_irqsave(&ch->lock, flags); | ||
414 | ch->serving = NULL; | 427 | ch->serving = NULL; |
415 | spin_unlock_irqrestore(&ch->lock, flags); | 428 | spin_unlock_irqrestore(&ch->lock, flags); |
416 | } | 429 | } |
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1449 | plchan->state = PL08X_CHAN_IDLE; | 1462 | plchan->state = PL08X_CHAN_IDLE; |
1450 | 1463 | ||
1451 | if (plchan->phychan) { | 1464 | if (plchan->phychan) { |
1452 | pl08x_stop_phy_chan(plchan->phychan); | 1465 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); |
1453 | 1466 | ||
1454 | /* | 1467 | /* |
1455 | * Mark physical channel as free and free any slave | 1468 | * Mark physical channel as free and free any slave |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index e53d438142bb..e18eaabe92b9 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -49,6 +49,7 @@ struct imxdma_channel { | |||
49 | 49 | ||
50 | struct imxdma_engine { | 50 | struct imxdma_engine { |
51 | struct device *dev; | 51 | struct device *dev; |
52 | struct device_dma_parameters dma_parms; | ||
52 | struct dma_device dma_device; | 53 | struct dma_device dma_device; |
53 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | 54 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; |
54 | }; | 55 | }; |
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
242 | else | 243 | else |
243 | dmamode = DMA_MODE_WRITE; | 244 | dmamode = DMA_MODE_WRITE; |
244 | 245 | ||
246 | switch (imxdmac->word_size) { | ||
247 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
248 | if (sgl->length & 3 || sgl->dma_address & 3) | ||
249 | return NULL; | ||
250 | break; | ||
251 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
252 | if (sgl->length & 1 || sgl->dma_address & 1) | ||
253 | return NULL; | ||
254 | break; | ||
255 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
256 | break; | ||
257 | default: | ||
258 | return NULL; | ||
259 | } | ||
260 | |||
245 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | 261 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, |
246 | dma_length, imxdmac->per_address, dmamode); | 262 | dma_length, imxdmac->per_address, dmamode); |
247 | if (ret) | 263 | if (ret) |
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
329 | 345 | ||
330 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | 346 | INIT_LIST_HEAD(&imxdma->dma_device.channels); |
331 | 347 | ||
348 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
349 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
350 | |||
332 | /* Initialize channel parameters */ | 351 | /* Initialize channel parameters */ |
333 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 352 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
334 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 353 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
346 | imxdmac->imxdma = imxdma; | 365 | imxdmac->imxdma = imxdma; |
347 | spin_lock_init(&imxdmac->lock); | 366 | spin_lock_init(&imxdmac->lock); |
348 | 367 | ||
349 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
350 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
351 | |||
352 | imxdmac->chan.device = &imxdma->dma_device; | 368 | imxdmac->chan.device = &imxdma->dma_device; |
353 | imxdmac->chan.chan_id = i; | ||
354 | imxdmac->channel = i; | 369 | imxdmac->channel = i; |
355 | 370 | ||
356 | /* Add the channel to the DMAC list */ | 371 | /* Add the channel to the DMAC list */ |
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
370 | 385 | ||
371 | platform_set_drvdata(pdev, imxdma); | 386 | platform_set_drvdata(pdev, imxdma); |
372 | 387 | ||
388 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; | ||
389 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | ||
390 | |||
373 | ret = dma_async_device_register(&imxdma->dma_device); | 391 | ret = dma_async_device_register(&imxdma->dma_device); |
374 | if (ret) { | 392 | if (ret) { |
375 | dev_err(&pdev->dev, "unable to register\n"); | 393 | dev_err(&pdev->dev, "unable to register\n"); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d5a5d4d9c19b..b6d1455fa936 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -230,7 +230,7 @@ struct sdma_engine; | |||
230 | * struct sdma_channel - housekeeping for a SDMA channel | 230 | * struct sdma_channel - housekeeping for a SDMA channel |
231 | * | 231 | * |
232 | * @sdma pointer to the SDMA engine for this channel | 232 | * @sdma pointer to the SDMA engine for this channel |
233 | * @channel the channel number, matches dmaengine chan_id | 233 | * @channel the channel number, matches dmaengine chan_id + 1 |
234 | * @direction transfer type. Needed for setting SDMA script | 234 | * @direction transfer type. Needed for setting SDMA script |
235 | * @peripheral_type Peripheral type. Needed for setting SDMA script | 235 | * @peripheral_type Peripheral type. Needed for setting SDMA script |
236 | * @event_id0 aka dma request line | 236 | * @event_id0 aka dma request line |
@@ -301,6 +301,7 @@ struct sdma_firmware_header { | |||
301 | 301 | ||
302 | struct sdma_engine { | 302 | struct sdma_engine { |
303 | struct device *dev; | 303 | struct device *dev; |
304 | struct device_dma_parameters dma_parms; | ||
304 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | 305 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
305 | struct sdma_channel_control *channel_control; | 306 | struct sdma_channel_control *channel_control; |
306 | void __iomem *regs; | 307 | void __iomem *regs; |
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | |||
449 | if (bd->mode.status & BD_RROR) | 450 | if (bd->mode.status & BD_RROR) |
450 | sdmac->status = DMA_ERROR; | 451 | sdmac->status = DMA_ERROR; |
451 | else | 452 | else |
452 | sdmac->status = DMA_SUCCESS; | 453 | sdmac->status = DMA_IN_PROGRESS; |
453 | 454 | ||
454 | bd->mode.status |= BD_DONE; | 455 | bd->mode.status |= BD_DONE; |
455 | sdmac->buf_tail++; | 456 | sdmac->buf_tail++; |
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | |||
770 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | 771 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); |
771 | } | 772 | } |
772 | 773 | ||
773 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) | 774 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) |
774 | { | 775 | { |
775 | dma_cookie_t cookie = sdma->chan.cookie; | 776 | dma_cookie_t cookie = sdmac->chan.cookie; |
776 | 777 | ||
777 | if (++cookie < 0) | 778 | if (++cookie < 0) |
778 | cookie = 1; | 779 | cookie = 1; |
779 | 780 | ||
780 | sdma->chan.cookie = cookie; | 781 | sdmac->chan.cookie = cookie; |
781 | sdma->desc.cookie = cookie; | 782 | sdmac->desc.cookie = cookie; |
782 | 783 | ||
783 | return cookie; | 784 | return cookie; |
784 | } | 785 | } |
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
798 | 799 | ||
799 | cookie = sdma_assign_cookie(sdmac); | 800 | cookie = sdma_assign_cookie(sdmac); |
800 | 801 | ||
801 | sdma_enable_channel(sdma, tx->chan->chan_id); | 802 | sdma_enable_channel(sdma, sdmac->channel); |
802 | 803 | ||
803 | spin_unlock_irq(&sdmac->lock); | 804 | spin_unlock_irq(&sdmac->lock); |
804 | 805 | ||
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
811 | struct imx_dma_data *data = chan->private; | 812 | struct imx_dma_data *data = chan->private; |
812 | int prio, ret; | 813 | int prio, ret; |
813 | 814 | ||
814 | /* No need to execute this for internal channel 0 */ | ||
815 | if (chan->chan_id == 0) | ||
816 | return 0; | ||
817 | |||
818 | if (!data) | 815 | if (!data) |
819 | return -EINVAL; | 816 | return -EINVAL; |
820 | 817 | ||
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
879 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 876 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
880 | struct sdma_engine *sdma = sdmac->sdma; | 877 | struct sdma_engine *sdma = sdmac->sdma; |
881 | int ret, i, count; | 878 | int ret, i, count; |
882 | int channel = chan->chan_id; | 879 | int channel = sdmac->channel; |
883 | struct scatterlist *sg; | 880 | struct scatterlist *sg; |
884 | 881 | ||
885 | if (sdmac->status == DMA_IN_PROGRESS) | 882 | if (sdmac->status == DMA_IN_PROGRESS) |
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
924 | ret = -EINVAL; | 921 | ret = -EINVAL; |
925 | goto err_out; | 922 | goto err_out; |
926 | } | 923 | } |
927 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | 924 | |
925 | switch (sdmac->word_size) { | ||
926 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
928 | bd->mode.command = 0; | 927 | bd->mode.command = 0; |
929 | else | 928 | if (count & 3 || sg->dma_address & 3) |
930 | bd->mode.command = sdmac->word_size; | 929 | return NULL; |
930 | break; | ||
931 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
932 | bd->mode.command = 2; | ||
933 | if (count & 1 || sg->dma_address & 1) | ||
934 | return NULL; | ||
935 | break; | ||
936 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
937 | bd->mode.command = 1; | ||
938 | break; | ||
939 | default: | ||
940 | return NULL; | ||
941 | } | ||
931 | 942 | ||
932 | param = BD_DONE | BD_EXTD | BD_CONT; | 943 | param = BD_DONE | BD_EXTD | BD_CONT; |
933 | 944 | ||
934 | if (sdmac->flags & IMX_DMA_SG_LOOP) { | 945 | if (i + 1 == sg_len) { |
935 | param |= BD_INTR; | 946 | param |= BD_INTR; |
936 | if (i + 1 == sg_len) | 947 | param |= BD_LAST; |
937 | param |= BD_WRAP; | 948 | param &= ~BD_CONT; |
938 | } | 949 | } |
939 | 950 | ||
940 | if (i + 1 == sg_len) | ||
941 | param |= BD_INTR; | ||
942 | |||
943 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 951 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", |
944 | i, count, sg->dma_address, | 952 | i, count, sg->dma_address, |
945 | param & BD_WRAP ? "wrap" : "", | 953 | param & BD_WRAP ? "wrap" : "", |
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
953 | 961 | ||
954 | return &sdmac->desc; | 962 | return &sdmac->desc; |
955 | err_out: | 963 | err_out: |
964 | sdmac->status = DMA_ERROR; | ||
956 | return NULL; | 965 | return NULL; |
957 | } | 966 | } |
958 | 967 | ||
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
963 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 972 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
964 | struct sdma_engine *sdma = sdmac->sdma; | 973 | struct sdma_engine *sdma = sdmac->sdma; |
965 | int num_periods = buf_len / period_len; | 974 | int num_periods = buf_len / period_len; |
966 | int channel = chan->chan_id; | 975 | int channel = sdmac->channel; |
967 | int ret, i = 0, buf = 0; | 976 | int ret, i = 0, buf = 0; |
968 | 977 | ||
969 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | 978 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); |
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1066 | { | 1075 | { |
1067 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1076 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1068 | dma_cookie_t last_used; | 1077 | dma_cookie_t last_used; |
1069 | enum dma_status ret; | ||
1070 | 1078 | ||
1071 | last_used = chan->cookie; | 1079 | last_used = chan->cookie; |
1072 | 1080 | ||
1073 | ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); | ||
1074 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | 1081 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); |
1075 | 1082 | ||
1076 | return ret; | 1083 | return sdmac->status; |
1077 | } | 1084 | } |
1078 | 1085 | ||
1079 | static void sdma_issue_pending(struct dma_chan *chan) | 1086 | static void sdma_issue_pending(struct dma_chan *chan) |
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma, | |||
1135 | /* download the RAM image for SDMA */ | 1142 | /* download the RAM image for SDMA */ |
1136 | sdma_load_script(sdma, ram_code, | 1143 | sdma_load_script(sdma, ram_code, |
1137 | header->ram_code_size, | 1144 | header->ram_code_size, |
1138 | sdma->script_addrs->ram_code_start_addr); | 1145 | addr->ram_code_start_addr); |
1139 | clk_disable(sdma->clk); | 1146 | clk_disable(sdma->clk); |
1140 | 1147 | ||
1141 | sdma_add_scripts(sdma, addr); | 1148 | sdma_add_scripts(sdma, addr); |
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1237 | struct resource *iores; | 1244 | struct resource *iores; |
1238 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | 1245 | struct sdma_platform_data *pdata = pdev->dev.platform_data; |
1239 | int i; | 1246 | int i; |
1240 | dma_cap_mask_t mask; | ||
1241 | struct sdma_engine *sdma; | 1247 | struct sdma_engine *sdma; |
1242 | 1248 | ||
1243 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1249 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); |
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1280 | 1286 | ||
1281 | sdma->version = pdata->sdma_version; | 1287 | sdma->version = pdata->sdma_version; |
1282 | 1288 | ||
1289 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1290 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1291 | |||
1283 | INIT_LIST_HEAD(&sdma->dma_device.channels); | 1292 | INIT_LIST_HEAD(&sdma->dma_device.channels); |
1284 | /* Initialize channel parameters */ | 1293 | /* Initialize channel parameters */ |
1285 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1294 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1288 | sdmac->sdma = sdma; | 1297 | sdmac->sdma = sdma; |
1289 | spin_lock_init(&sdmac->lock); | 1298 | spin_lock_init(&sdmac->lock); |
1290 | 1299 | ||
1291 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1292 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1293 | |||
1294 | sdmac->chan.device = &sdma->dma_device; | 1300 | sdmac->chan.device = &sdma->dma_device; |
1295 | sdmac->chan.chan_id = i; | ||
1296 | sdmac->channel = i; | 1301 | sdmac->channel = i; |
1297 | 1302 | ||
1298 | /* Add the channel to the DMAC list */ | 1303 | /* |
1299 | list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); | 1304 | * Add the channel to the DMAC list. Do not add channel 0 though |
1305 | * because we need it internally in the SDMA driver. This also means | ||
1306 | * that channel 0 in dmaengine counting matches sdma channel 1. | ||
1307 | */ | ||
1308 | if (i) | ||
1309 | list_add_tail(&sdmac->chan.device_node, | ||
1310 | &sdma->dma_device.channels); | ||
1300 | } | 1311 | } |
1301 | 1312 | ||
1302 | ret = sdma_init(sdma); | 1313 | ret = sdma_init(sdma); |
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1317 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1328 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1318 | sdma->dma_device.device_control = sdma_control; | 1329 | sdma->dma_device.device_control = sdma_control; |
1319 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1330 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1331 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | ||
1332 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | ||
1320 | 1333 | ||
1321 | ret = dma_async_device_register(&sdma->dma_device); | 1334 | ret = dma_async_device_register(&sdma->dma_device); |
1322 | if (ret) { | 1335 | if (ret) { |
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1324 | goto err_init; | 1337 | goto err_init; |
1325 | } | 1338 | } |
1326 | 1339 | ||
1327 | /* request channel 0. This is an internal control channel | ||
1328 | * to the SDMA engine and not available to clients. | ||
1329 | */ | ||
1330 | dma_cap_zero(mask); | ||
1331 | dma_cap_set(DMA_SLAVE, mask); | ||
1332 | dma_request_channel(mask, NULL, NULL); | ||
1333 | |||
1334 | dev_info(sdma->dev, "initialized\n"); | 1340 | dev_info(sdma->dev, "initialized\n"); |
1335 | 1341 | ||
1336 | return 0; | 1342 | return 0; |
@@ -1348,7 +1354,7 @@ err_clk: | |||
1348 | err_request_region: | 1354 | err_request_region: |
1349 | err_irq: | 1355 | err_irq: |
1350 | kfree(sdma); | 1356 | kfree(sdma); |
1351 | return 0; | 1357 | return ret; |
1352 | } | 1358 | } |
1353 | 1359 | ||
1354 | static int __exit sdma_remove(struct platform_device *pdev) | 1360 | static int __exit sdma_remove(struct platform_device *pdev) |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb26ee9773d6..c1a125e7d1df 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | |||
1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | 1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); |
1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); | 1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); |
1147 | 1147 | ||
1148 | /* | ||
1149 | * Problem (observed with channel DMAIC_7): after enabling the channel | ||
1150 | * and initialising buffers, there comes an interrupt with current still | ||
1151 | * pointing at buffer 0, whereas it should use buffer 0 first and only | ||
1152 | * generate an interrupt when it is done, then current should already | ||
1153 | * point to buffer 1. This spurious interrupt also comes on channel | ||
1154 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the | ||
1155 | * first interrupt, there comes the second with current correctly | ||
1156 | * pointing to buffer 1 this time. But sometimes this second interrupt | ||
1157 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling | ||
1158 | * the channel seems to prevent the channel from hanging, but it doesn't | ||
1159 | * prevent the spurious interrupt. This might also be unsafe. Think | ||
1160 | * about the IDMAC controller trying to switch to a buffer, when we | ||
1161 | * clear the ready bit, and re-enable it a moment later. | ||
1162 | */ | ||
1163 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); | ||
1164 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); | ||
1165 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); | ||
1166 | |||
1167 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); | ||
1168 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); | ||
1169 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); | ||
1170 | |||
1171 | spin_unlock_irqrestore(&ipu->lock, flags); | 1148 | spin_unlock_irqrestore(&ipu->lock, flags); |
1172 | 1149 | ||
1173 | return 0; | 1150 | return 0; |
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1246 | 1223 | ||
1247 | /* Other interrupts do not interfere with this channel */ | 1224 | /* Other interrupts do not interfere with this channel */ |
1248 | spin_lock(&ichan->lock); | 1225 | spin_lock(&ichan->lock); |
1249 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | ||
1250 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && | ||
1251 | !list_is_last(ichan->queue.next, &ichan->queue))) { | ||
1252 | int i = 100; | ||
1253 | |||
1254 | /* This doesn't help. See comment in ipu_disable_channel() */ | ||
1255 | while (--i) { | ||
1256 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | ||
1257 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) | ||
1258 | break; | ||
1259 | cpu_relax(); | ||
1260 | } | ||
1261 | |||
1262 | if (!i) { | ||
1263 | spin_unlock(&ichan->lock); | ||
1264 | dev_dbg(dev, | ||
1265 | "IRQ on active buffer on channel %x, active " | ||
1266 | "%d, ready %x, %x, current %x!\n", chan_id, | ||
1267 | ichan->active_buffer, ready0, ready1, curbuf); | ||
1268 | return IRQ_NONE; | ||
1269 | } else | ||
1270 | dev_dbg(dev, | ||
1271 | "Buffer deactivated on channel %x, active " | ||
1272 | "%d, ready %x, %x, current %x, rest %d!\n", chan_id, | ||
1273 | ichan->active_buffer, ready0, ready1, curbuf, i); | ||
1274 | } | ||
1275 | |||
1276 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || | 1226 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || |
1277 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) | 1227 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) |
1278 | )) { | 1228 | )) { |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4a5ecc58025d..23e03554f0d3 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) | |||
826 | /* Display and decode various NB registers for debug purposes. */ | 826 | /* Display and decode various NB registers for debug purposes. */ |
827 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | 827 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) |
828 | { | 828 | { |
829 | int ganged; | ||
830 | |||
831 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); | 829 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
832 | 830 | ||
833 | debugf1(" NB two channel DRAM capable: %s\n", | 831 | debugf1(" NB two channel DRAM capable: %s\n", |
@@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |||
851 | debugf1(" DramHoleValid: %s\n", | 849 | debugf1(" DramHoleValid: %s\n", |
852 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | 850 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); |
853 | 851 | ||
852 | amd64_debug_display_dimm_sizes(0, pvt); | ||
853 | |||
854 | /* everything below this point is Fam10h and above */ | 854 | /* everything below this point is Fam10h and above */ |
855 | if (boot_cpu_data.x86 == 0xf) { | 855 | if (boot_cpu_data.x86 == 0xf) |
856 | amd64_debug_display_dimm_sizes(0, pvt); | ||
857 | return; | 856 | return; |
858 | } | 857 | |
858 | amd64_debug_display_dimm_sizes(1, pvt); | ||
859 | 859 | ||
860 | amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); | 860 | amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); |
861 | 861 | ||
862 | /* Only if NOT ganged does dclr1 have valid info */ | 862 | /* Only if NOT ganged does dclr1 have valid info */ |
863 | if (!dct_ganging_enabled(pvt)) | 863 | if (!dct_ganging_enabled(pvt)) |
864 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | 864 | amd64_dump_dramcfg_low(pvt->dclr1, 1); |
865 | |||
866 | /* | ||
867 | * Determine if ganged and then dump memory sizes for first controller, | ||
868 | * and if NOT ganged dump info for 2nd controller. | ||
869 | */ | ||
870 | ganged = dct_ganging_enabled(pvt); | ||
871 | |||
872 | amd64_debug_display_dimm_sizes(0, pvt); | ||
873 | |||
874 | if (!ganged) | ||
875 | amd64_debug_display_dimm_sizes(1, pvt); | ||
876 | } | 865 | } |
877 | 866 | ||
878 | /* Read in both of DBAM registers */ | 867 | /* Read in both of DBAM registers */ |
@@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) | |||
1644 | WARN_ON(ctrl != 0); | 1633 | WARN_ON(ctrl != 0); |
1645 | } | 1634 | } |
1646 | 1635 | ||
1647 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | 1636 | dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; |
1648 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | 1637 | dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0; |
1649 | 1638 | ||
1650 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | 1639 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); |
1651 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | ||
1652 | 1640 | ||
1653 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); | 1641 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1654 | 1642 | ||
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index e28e41668177..bcb1126e3d00 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info) | |||
378 | 378 | ||
379 | static void __init dmi_dump_ids(void) | 379 | static void __init dmi_dump_ids(void) |
380 | { | 380 | { |
381 | const char *board; /* Board Name is optional */ | ||
382 | |||
381 | printk(KERN_DEBUG "DMI: "); | 383 | printk(KERN_DEBUG "DMI: "); |
382 | print_filtered(dmi_get_system_info(DMI_BOARD_NAME)); | 384 | print_filtered(dmi_get_system_info(DMI_SYS_VENDOR)); |
383 | printk(KERN_CONT "/"); | 385 | printk(KERN_CONT " "); |
384 | print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); | 386 | print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); |
387 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
388 | if (board) { | ||
389 | printk(KERN_CONT "/"); | ||
390 | print_filtered(board); | ||
391 | } | ||
385 | printk(KERN_CONT ", BIOS "); | 392 | printk(KERN_CONT ", BIOS "); |
386 | print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); | 393 | print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); |
387 | printk(KERN_CONT " "); | 394 | printk(KERN_CONT " "); |
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index a261972f603d..b473429eee75 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -60,6 +60,7 @@ struct pca953x_chip { | |||
60 | unsigned gpio_start; | 60 | unsigned gpio_start; |
61 | uint16_t reg_output; | 61 | uint16_t reg_output; |
62 | uint16_t reg_direction; | 62 | uint16_t reg_direction; |
63 | struct mutex i2c_lock; | ||
63 | 64 | ||
64 | #ifdef CONFIG_GPIO_PCA953X_IRQ | 65 | #ifdef CONFIG_GPIO_PCA953X_IRQ |
65 | struct mutex irq_lock; | 66 | struct mutex irq_lock; |
@@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) | |||
119 | 120 | ||
120 | chip = container_of(gc, struct pca953x_chip, gpio_chip); | 121 | chip = container_of(gc, struct pca953x_chip, gpio_chip); |
121 | 122 | ||
123 | mutex_lock(&chip->i2c_lock); | ||
122 | reg_val = chip->reg_direction | (1u << off); | 124 | reg_val = chip->reg_direction | (1u << off); |
123 | ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); | 125 | ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); |
124 | if (ret) | 126 | if (ret) |
125 | return ret; | 127 | goto exit; |
126 | 128 | ||
127 | chip->reg_direction = reg_val; | 129 | chip->reg_direction = reg_val; |
128 | return 0; | 130 | ret = 0; |
131 | exit: | ||
132 | mutex_unlock(&chip->i2c_lock); | ||
133 | return ret; | ||
129 | } | 134 | } |
130 | 135 | ||
131 | static int pca953x_gpio_direction_output(struct gpio_chip *gc, | 136 | static int pca953x_gpio_direction_output(struct gpio_chip *gc, |
@@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, | |||
137 | 142 | ||
138 | chip = container_of(gc, struct pca953x_chip, gpio_chip); | 143 | chip = container_of(gc, struct pca953x_chip, gpio_chip); |
139 | 144 | ||
145 | mutex_lock(&chip->i2c_lock); | ||
140 | /* set output level */ | 146 | /* set output level */ |
141 | if (val) | 147 | if (val) |
142 | reg_val = chip->reg_output | (1u << off); | 148 | reg_val = chip->reg_output | (1u << off); |
@@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, | |||
145 | 151 | ||
146 | ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); | 152 | ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); |
147 | if (ret) | 153 | if (ret) |
148 | return ret; | 154 | goto exit; |
149 | 155 | ||
150 | chip->reg_output = reg_val; | 156 | chip->reg_output = reg_val; |
151 | 157 | ||
@@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, | |||
153 | reg_val = chip->reg_direction & ~(1u << off); | 159 | reg_val = chip->reg_direction & ~(1u << off); |
154 | ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); | 160 | ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); |
155 | if (ret) | 161 | if (ret) |
156 | return ret; | 162 | goto exit; |
157 | 163 | ||
158 | chip->reg_direction = reg_val; | 164 | chip->reg_direction = reg_val; |
159 | return 0; | 165 | ret = 0; |
166 | exit: | ||
167 | mutex_unlock(&chip->i2c_lock); | ||
168 | return ret; | ||
160 | } | 169 | } |
161 | 170 | ||
162 | static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) | 171 | static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) |
@@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) | |||
167 | 176 | ||
168 | chip = container_of(gc, struct pca953x_chip, gpio_chip); | 177 | chip = container_of(gc, struct pca953x_chip, gpio_chip); |
169 | 178 | ||
179 | mutex_lock(&chip->i2c_lock); | ||
170 | ret = pca953x_read_reg(chip, PCA953X_INPUT, ®_val); | 180 | ret = pca953x_read_reg(chip, PCA953X_INPUT, ®_val); |
181 | mutex_unlock(&chip->i2c_lock); | ||
171 | if (ret < 0) { | 182 | if (ret < 0) { |
172 | /* NOTE: diagnostic already emitted; that's all we should | 183 | /* NOTE: diagnostic already emitted; that's all we should |
173 | * do unless gpio_*_value_cansleep() calls become different | 184 | * do unless gpio_*_value_cansleep() calls become different |
@@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) | |||
187 | 198 | ||
188 | chip = container_of(gc, struct pca953x_chip, gpio_chip); | 199 | chip = container_of(gc, struct pca953x_chip, gpio_chip); |
189 | 200 | ||
201 | mutex_lock(&chip->i2c_lock); | ||
190 | if (val) | 202 | if (val) |
191 | reg_val = chip->reg_output | (1u << off); | 203 | reg_val = chip->reg_output | (1u << off); |
192 | else | 204 | else |
@@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) | |||
194 | 206 | ||
195 | ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); | 207 | ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); |
196 | if (ret) | 208 | if (ret) |
197 | return; | 209 | goto exit; |
198 | 210 | ||
199 | chip->reg_output = reg_val; | 211 | chip->reg_output = reg_val; |
212 | exit: | ||
213 | mutex_unlock(&chip->i2c_lock); | ||
200 | } | 214 | } |
201 | 215 | ||
202 | static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) | 216 | static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) |
@@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client, | |||
517 | 531 | ||
518 | chip->names = pdata->names; | 532 | chip->names = pdata->names; |
519 | 533 | ||
534 | mutex_init(&chip->i2c_lock); | ||
535 | |||
520 | /* initialize cached registers from their original values. | 536 | /* initialize cached registers from their original values. |
521 | * we can't share this chip with another i2c master. | 537 | * we can't share this chip with another i2c master. |
522 | */ | 538 | */ |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 812aaac4438a..ab1162da70f8 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -272,17 +272,18 @@ int drm_vma_info(struct seq_file *m, void *data) | |||
272 | #endif | 272 | #endif |
273 | 273 | ||
274 | mutex_lock(&dev->struct_mutex); | 274 | mutex_lock(&dev->struct_mutex); |
275 | seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", | 275 | seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", |
276 | atomic_read(&dev->vma_count), | 276 | atomic_read(&dev->vma_count), |
277 | high_memory, (u64)virt_to_phys(high_memory)); | 277 | high_memory, (void *)virt_to_phys(high_memory)); |
278 | 278 | ||
279 | list_for_each_entry(pt, &dev->vmalist, head) { | 279 | list_for_each_entry(pt, &dev->vmalist, head) { |
280 | vma = pt->vma; | 280 | vma = pt->vma; |
281 | if (!vma) | 281 | if (!vma) |
282 | continue; | 282 | continue; |
283 | seq_printf(m, | 283 | seq_printf(m, |
284 | "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", | 284 | "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000", |
285 | pt->pid, vma->vm_start, vma->vm_end, | 285 | pt->pid, |
286 | (void *)vma->vm_start, (void *)vma->vm_end, | ||
286 | vma->vm_flags & VM_READ ? 'r' : '-', | 287 | vma->vm_flags & VM_READ ? 'r' : '-', |
287 | vma->vm_flags & VM_WRITE ? 'w' : '-', | 288 | vma->vm_flags & VM_WRITE ? 'w' : '-', |
288 | vma->vm_flags & VM_EXEC ? 'x' : '-', | 289 | vma->vm_flags & VM_EXEC ? 'x' : '-', |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index cb49685bde01..a34ef97d3c81 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -154,8 +154,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
154 | * available. In that case we can't account for this and just | 154 | * available. In that case we can't account for this and just |
155 | * hope for the best. | 155 | * hope for the best. |
156 | */ | 156 | */ |
157 | if ((vblrc > 0) && (abs(diff_ns) > 1000000)) | 157 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { |
158 | atomic_inc(&dev->_vblank_count[crtc]); | 158 | atomic_inc(&dev->_vblank_count[crtc]); |
159 | smp_mb__after_atomic_inc(); | ||
160 | } | ||
159 | 161 | ||
160 | /* Invalidate all timestamps while vblank irq's are off. */ | 162 | /* Invalidate all timestamps while vblank irq's are off. */ |
161 | clear_vblank_timestamps(dev, crtc); | 163 | clear_vblank_timestamps(dev, crtc); |
@@ -481,6 +483,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) | |||
481 | /* Dot clock in Hz: */ | 483 | /* Dot clock in Hz: */ |
482 | dotclock = (u64) crtc->hwmode.clock * 1000; | 484 | dotclock = (u64) crtc->hwmode.clock * 1000; |
483 | 485 | ||
486 | /* Fields of interlaced scanout modes are only halve a frame duration. | ||
487 | * Double the dotclock to get halve the frame-/line-/pixelduration. | ||
488 | */ | ||
489 | if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) | ||
490 | dotclock *= 2; | ||
491 | |||
484 | /* Valid dotclock? */ | 492 | /* Valid dotclock? */ |
485 | if (dotclock > 0) { | 493 | if (dotclock > 0) { |
486 | /* Convert scanline length in pixels and video dot clock to | 494 | /* Convert scanline length in pixels and video dot clock to |
@@ -593,14 +601,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
593 | return -EAGAIN; | 601 | return -EAGAIN; |
594 | } | 602 | } |
595 | 603 | ||
596 | /* Don't know yet how to handle interlaced or | ||
597 | * double scan modes. Just no-op for now. | ||
598 | */ | ||
599 | if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { | ||
600 | DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); | ||
601 | return -ENOTSUPP; | ||
602 | } | ||
603 | |||
604 | /* Get current scanout position with system timestamp. | 604 | /* Get current scanout position with system timestamp. |
605 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times | 605 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times |
606 | * if single query takes longer than max_error nanoseconds. | 606 | * if single query takes longer than max_error nanoseconds. |
@@ -848,10 +848,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
848 | if (rc) { | 848 | if (rc) { |
849 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; | 849 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; |
850 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | 850 | vblanktimestamp(dev, crtc, tslot) = t_vblank; |
851 | smp_wmb(); | ||
852 | } | 851 | } |
853 | 852 | ||
853 | smp_mb__before_atomic_inc(); | ||
854 | atomic_add(diff, &dev->_vblank_count[crtc]); | 854 | atomic_add(diff, &dev->_vblank_count[crtc]); |
855 | smp_mb__after_atomic_inc(); | ||
855 | } | 856 | } |
856 | 857 | ||
857 | /** | 858 | /** |
@@ -1001,7 +1002,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | |||
1001 | struct drm_file *file_priv) | 1002 | struct drm_file *file_priv) |
1002 | { | 1003 | { |
1003 | struct drm_modeset_ctl *modeset = data; | 1004 | struct drm_modeset_ctl *modeset = data; |
1004 | int crtc, ret = 0; | 1005 | int ret = 0; |
1006 | unsigned int crtc; | ||
1005 | 1007 | ||
1006 | /* If drm_vblank_init() hasn't been called yet, just no-op */ | 1008 | /* If drm_vblank_init() hasn't been called yet, just no-op */ |
1007 | if (!dev->num_crtcs) | 1009 | if (!dev->num_crtcs) |
@@ -1283,15 +1285,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1283 | * e.g., due to spurious vblank interrupts. We need to | 1285 | * e.g., due to spurious vblank interrupts. We need to |
1284 | * ignore those for accounting. | 1286 | * ignore those for accounting. |
1285 | */ | 1287 | */ |
1286 | if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { | 1288 | if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { |
1287 | /* Store new timestamp in ringbuffer. */ | 1289 | /* Store new timestamp in ringbuffer. */ |
1288 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; | 1290 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; |
1289 | smp_wmb(); | ||
1290 | 1291 | ||
1291 | /* Increment cooked vblank count. This also atomically commits | 1292 | /* Increment cooked vblank count. This also atomically commits |
1292 | * the timestamp computed above. | 1293 | * the timestamp computed above. |
1293 | */ | 1294 | */ |
1295 | smp_mb__before_atomic_inc(); | ||
1294 | atomic_inc(&dev->_vblank_count[crtc]); | 1296 | atomic_inc(&dev->_vblank_count[crtc]); |
1297 | smp_mb__after_atomic_inc(); | ||
1295 | } else { | 1298 | } else { |
1296 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | 1299 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", |
1297 | crtc, (int) diff_ns); | 1300 | crtc, (int) diff_ns); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c5502..09e0327fc6ce 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
326 | struct intel_crtc *crtc; | 326 | struct intel_crtc *crtc; |
327 | 327 | ||
328 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 328 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
329 | const char *pipe = crtc->pipe ? "B" : "A"; | 329 | const char pipe = pipe_name(crtc->pipe); |
330 | const char *plane = crtc->plane ? "B" : "A"; | 330 | const char plane = plane_name(crtc->plane); |
331 | struct intel_unpin_work *work; | 331 | struct intel_unpin_work *work; |
332 | 332 | ||
333 | spin_lock_irqsave(&dev->event_lock, flags); | 333 | spin_lock_irqsave(&dev->event_lock, flags); |
334 | work = crtc->unpin_work; | 334 | work = crtc->unpin_work; |
335 | if (work == NULL) { | 335 | if (work == NULL) { |
336 | seq_printf(m, "No flip due on pipe %s (plane %s)\n", | 336 | seq_printf(m, "No flip due on pipe %c (plane %c)\n", |
337 | pipe, plane); | 337 | pipe, plane); |
338 | } else { | 338 | } else { |
339 | if (!work->pending) { | 339 | if (!work->pending) { |
340 | seq_printf(m, "Flip queued on pipe %s (plane %s)\n", | 340 | seq_printf(m, "Flip queued on pipe %c (plane %c)\n", |
341 | pipe, plane); | 341 | pipe, plane); |
342 | } else { | 342 | } else { |
343 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", | 343 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", |
344 | pipe, plane); | 344 | pipe, plane); |
345 | } | 345 | } |
346 | if (work->enable_stall_check) | 346 | if (work->enable_stall_check) |
@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
458 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 458 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
459 | struct drm_device *dev = node->minor->dev; | 459 | struct drm_device *dev = node->minor->dev; |
460 | drm_i915_private_t *dev_priv = dev->dev_private; | 460 | drm_i915_private_t *dev_priv = dev->dev_private; |
461 | int ret, i; | 461 | int ret, i, pipe; |
462 | 462 | ||
463 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 463 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
464 | if (ret) | 464 | if (ret) |
@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
471 | I915_READ(IIR)); | 471 | I915_READ(IIR)); |
472 | seq_printf(m, "Interrupt mask: %08x\n", | 472 | seq_printf(m, "Interrupt mask: %08x\n", |
473 | I915_READ(IMR)); | 473 | I915_READ(IMR)); |
474 | seq_printf(m, "Pipe A stat: %08x\n", | 474 | for_each_pipe(pipe) |
475 | I915_READ(PIPEASTAT)); | 475 | seq_printf(m, "Pipe %c stat: %08x\n", |
476 | seq_printf(m, "Pipe B stat: %08x\n", | 476 | pipe_name(pipe), |
477 | I915_READ(PIPEBSTAT)); | 477 | I915_READ(PIPESTAT(pipe))); |
478 | } else { | 478 | } else { |
479 | seq_printf(m, "North Display Interrupt enable: %08x\n", | 479 | seq_printf(m, "North Display Interrupt enable: %08x\n", |
480 | I915_READ(DEIER)); | 480 | I915_READ(DEIER)); |
@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
544 | struct drm_device *dev = node->minor->dev; | 544 | struct drm_device *dev = node->minor->dev; |
545 | drm_i915_private_t *dev_priv = dev->dev_private; | 545 | drm_i915_private_t *dev_priv = dev->dev_private; |
546 | struct intel_ring_buffer *ring; | 546 | struct intel_ring_buffer *ring; |
547 | volatile u32 *hws; | 547 | const volatile u32 __iomem *hws; |
548 | int i; | 548 | int i; |
549 | 549 | ||
550 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; | 550 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
551 | hws = (volatile u32 *)ring->status_page.page_addr; | 551 | hws = (volatile u32 __iomem *)ring->status_page.page_addr; |
552 | if (hws == NULL) | 552 | if (hws == NULL) |
553 | return 0; | 553 | return 0; |
554 | 554 | ||
@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) | |||
615 | if (!ring->obj) { | 615 | if (!ring->obj) { |
616 | seq_printf(m, "No ringbuffer setup\n"); | 616 | seq_printf(m, "No ringbuffer setup\n"); |
617 | } else { | 617 | } else { |
618 | u8 *virt = ring->virtual_start; | 618 | const u8 __iomem *virt = ring->virtual_start; |
619 | uint32_t off; | 619 | uint32_t off; |
620 | 620 | ||
621 | for (off = 0; off < ring->size; off += 4) { | 621 | for (off = 0; off < ring->size; off += 4) { |
@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
805 | } | 805 | } |
806 | } | 806 | } |
807 | 807 | ||
808 | if (error->ringbuffer) { | 808 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { |
809 | struct drm_i915_error_object *obj = error->ringbuffer; | 809 | if (error->ringbuffer[i]) { |
810 | 810 | struct drm_i915_error_object *obj = error->ringbuffer[i]; | |
811 | seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); | 811 | seq_printf(m, "%s --- ringbuffer = 0x%08x\n", |
812 | offset = 0; | 812 | dev_priv->ring[i].name, |
813 | for (page = 0; page < obj->page_count; page++) { | 813 | obj->gtt_offset); |
814 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | 814 | offset = 0; |
815 | seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); | 815 | for (page = 0; page < obj->page_count; page++) { |
816 | offset += 4; | 816 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
817 | seq_printf(m, "%08x : %08x\n", | ||
818 | offset, | ||
819 | obj->pages[page][elt]); | ||
820 | offset += 4; | ||
821 | } | ||
817 | } | 822 | } |
818 | } | 823 | } |
819 | } | 824 | } |
@@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
862 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 867 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
863 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 868 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); |
864 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 869 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
870 | u32 rpstat; | ||
871 | u32 rpupei, rpcurup, rpprevup; | ||
872 | u32 rpdownei, rpcurdown, rpprevdown; | ||
865 | int max_freq; | 873 | int max_freq; |
866 | 874 | ||
867 | /* RPSTAT1 is in the GT power well */ | 875 | /* RPSTAT1 is in the GT power well */ |
868 | __gen6_force_wake_get(dev_priv); | 876 | __gen6_gt_force_wake_get(dev_priv); |
877 | |||
878 | rpstat = I915_READ(GEN6_RPSTAT1); | ||
879 | rpupei = I915_READ(GEN6_RP_CUR_UP_EI); | ||
880 | rpcurup = I915_READ(GEN6_RP_CUR_UP); | ||
881 | rpprevup = I915_READ(GEN6_RP_PREV_UP); | ||
882 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); | ||
883 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); | ||
884 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); | ||
869 | 885 | ||
870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 886 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); | 887 | seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); |
872 | seq_printf(m, "Render p-state ratio: %d\n", | 888 | seq_printf(m, "Render p-state ratio: %d\n", |
873 | (gt_perf_status & 0xff00) >> 8); | 889 | (gt_perf_status & 0xff00) >> 8); |
874 | seq_printf(m, "Render p-state VID: %d\n", | 890 | seq_printf(m, "Render p-state VID: %d\n", |
875 | gt_perf_status & 0xff); | 891 | gt_perf_status & 0xff); |
876 | seq_printf(m, "Render p-state limit: %d\n", | 892 | seq_printf(m, "Render p-state limit: %d\n", |
877 | rp_state_limits & 0xff); | 893 | rp_state_limits & 0xff); |
894 | seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> | ||
895 | GEN6_CAGF_SHIFT) * 100); | ||
896 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & | ||
897 | GEN6_CURICONT_MASK); | ||
898 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & | ||
899 | GEN6_CURBSYTAVG_MASK); | ||
900 | seq_printf(m, "RP PREV UP: %dus\n", rpprevup & | ||
901 | GEN6_CURBSYTAVG_MASK); | ||
902 | seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & | ||
903 | GEN6_CURIAVG_MASK); | ||
904 | seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & | ||
905 | GEN6_CURBSYTAVG_MASK); | ||
906 | seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & | ||
907 | GEN6_CURBSYTAVG_MASK); | ||
878 | 908 | ||
879 | max_freq = (rp_state_cap & 0xff0000) >> 16; | 909 | max_freq = (rp_state_cap & 0xff0000) >> 16; |
880 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", | 910 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", |
@@ -888,7 +918,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 918 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
889 | max_freq * 100); | 919 | max_freq * 100); |
890 | 920 | ||
891 | __gen6_force_wake_put(dev_priv); | 921 | __gen6_gt_force_wake_put(dev_priv); |
892 | } else { | 922 | } else { |
893 | seq_printf(m, "no P-state info available\n"); | 923 | seq_printf(m, "no P-state info available\n"); |
894 | } | 924 | } |
@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
1259 | } | 1289 | } |
1260 | 1290 | ||
1261 | static struct drm_info_list i915_debugfs_list[] = { | 1291 | static struct drm_info_list i915_debugfs_list[] = { |
1262 | {"i915_capabilities", i915_capabilities, 0, 0}, | 1292 | {"i915_capabilities", i915_capabilities, 0}, |
1263 | {"i915_gem_objects", i915_gem_object_info, 0}, | 1293 | {"i915_gem_objects", i915_gem_object_info, 0}, |
1264 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, | 1294 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, |
1265 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 1295 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766f2081..72730377a01b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -43,6 +43,17 @@ | |||
43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <acpi/video.h> | 44 | #include <acpi/video.h> |
45 | 45 | ||
46 | static void i915_write_hws_pga(struct drm_device *dev) | ||
47 | { | ||
48 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
49 | u32 addr; | ||
50 | |||
51 | addr = dev_priv->status_page_dmah->busaddr; | ||
52 | if (INTEL_INFO(dev)->gen >= 4) | ||
53 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
54 | I915_WRITE(HWS_PGA, addr); | ||
55 | } | ||
56 | |||
46 | /** | 57 | /** |
47 | * Sets up the hardware status page for devices that need a physical address | 58 | * Sets up the hardware status page for devices that need a physical address |
48 | * in the register. | 59 | * in the register. |
@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
60 | DRM_ERROR("Can not allocate hardware status page\n"); | 71 | DRM_ERROR("Can not allocate hardware status page\n"); |
61 | return -ENOMEM; | 72 | return -ENOMEM; |
62 | } | 73 | } |
63 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | 74 | ring->status_page.page_addr = |
64 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | 75 | (void __force __iomem *)dev_priv->status_page_dmah->vaddr; |
65 | 76 | ||
66 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 77 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); |
67 | 78 | ||
68 | if (INTEL_INFO(dev)->gen >= 4) | 79 | i915_write_hws_pga(dev); |
69 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | ||
70 | 0xf0; | ||
71 | 80 | ||
72 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | ||
73 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 81 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
74 | return 0; | 82 | return 0; |
75 | } | 83 | } |
@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev) | |||
216 | if (ring->status_page.gfx_addr != 0) | 224 | if (ring->status_page.gfx_addr != 0) |
217 | intel_ring_setup_status_page(ring); | 225 | intel_ring_setup_status_page(ring); |
218 | else | 226 | else |
219 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 227 | i915_write_hws_pga(dev); |
220 | 228 | ||
221 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 229 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
222 | 230 | ||
@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
771 | case I915_PARAM_HAS_EXEC_CONSTANTS: | 779 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
772 | value = INTEL_INFO(dev)->gen >= 4; | 780 | value = INTEL_INFO(dev)->gen >= 4; |
773 | break; | 781 | break; |
782 | case I915_PARAM_HAS_RELAXED_DELTA: | ||
783 | value = 1; | ||
784 | break; | ||
774 | default: | 785 | default: |
775 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 786 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
776 | param->param); | 787 | param->param); |
@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
859 | " G33 hw status page\n"); | 870 | " G33 hw status page\n"); |
860 | return -ENOMEM; | 871 | return -ENOMEM; |
861 | } | 872 | } |
862 | ring->status_page.page_addr = dev_priv->hws_map.handle; | 873 | ring->status_page.page_addr = |
863 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 874 | (void __force __iomem *)dev_priv->hws_map.handle; |
875 | memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
864 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 876 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
865 | 877 | ||
866 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | 878 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
@@ -1895,6 +1907,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1895 | if (IS_GEN2(dev)) | 1907 | if (IS_GEN2(dev)) |
1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1908 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1897 | 1909 | ||
1910 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | ||
1911 | * using 32bit addressing, overwriting memory if HWS is located | ||
1912 | * above 4GB. | ||
1913 | * | ||
1914 | * The documentation also mentions an issue with undefined | ||
1915 | * behaviour if any general state is accessed within a page above 4GB, | ||
1916 | * which also needs to be handled carefully. | ||
1917 | */ | ||
1918 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
1919 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
1920 | |||
1898 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | 1921 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1899 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | 1922 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); |
1900 | if (!dev_priv->regs) { | 1923 | if (!dev_priv->regs) { |
@@ -2002,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2002 | 2025 | ||
2003 | spin_lock_init(&dev_priv->irq_lock); | 2026 | spin_lock_init(&dev_priv->irq_lock); |
2004 | spin_lock_init(&dev_priv->error_lock); | 2027 | spin_lock_init(&dev_priv->error_lock); |
2005 | dev_priv->trace_irq_seqno = 0; | ||
2006 | 2028 | ||
2007 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2029 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
2030 | dev_priv->num_pipe = 2; | ||
2031 | else | ||
2032 | dev_priv->num_pipe = 1; | ||
2033 | |||
2034 | ret = drm_vblank_init(dev, dev_priv->num_pipe); | ||
2008 | if (ret) | 2035 | if (ret) |
2009 | goto out_gem_unload; | 2036 | goto out_gem_unload; |
2010 | 2037 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9ad42d583493..c34a8dd31d02 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -43,16 +43,28 @@ module_param_named(modeset, i915_modeset, int, 0400); | |||
43 | unsigned int i915_fbpercrtc = 0; | 43 | unsigned int i915_fbpercrtc = 0; |
44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
45 | 45 | ||
46 | int i915_panel_ignore_lid = 0; | ||
47 | module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); | ||
48 | |||
46 | unsigned int i915_powersave = 1; | 49 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0600); | 50 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 51 | ||
52 | unsigned int i915_semaphores = 1; | ||
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | ||
54 | |||
55 | unsigned int i915_enable_rc6 = 0; | ||
56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | ||
57 | |||
49 | unsigned int i915_lvds_downclock = 0; | 58 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 59 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
51 | 60 | ||
52 | unsigned int i915_panel_use_ssc = 1; | 61 | unsigned int i915_panel_use_ssc = 1; |
53 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | 62 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
54 | 63 | ||
55 | bool i915_try_reset = true; | 64 | int i915_vbt_sdvo_panel_type = -1; |
65 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | ||
66 | |||
67 | static bool i915_try_reset = true; | ||
56 | module_param_named(reset, i915_try_reset, bool, 0600); | 68 | module_param_named(reset, i915_try_reset, bool, 0600); |
57 | 69 | ||
58 | static struct drm_driver driver; | 70 | static struct drm_driver driver; |
@@ -251,7 +263,7 @@ void intel_detect_pch (struct drm_device *dev) | |||
251 | } | 263 | } |
252 | } | 264 | } |
253 | 265 | ||
254 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | 266 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
255 | { | 267 | { |
256 | int count; | 268 | int count; |
257 | 269 | ||
@@ -267,12 +279,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | |||
267 | udelay(10); | 279 | udelay(10); |
268 | } | 280 | } |
269 | 281 | ||
270 | void __gen6_force_wake_put(struct drm_i915_private *dev_priv) | 282 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
271 | { | 283 | { |
272 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 284 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
273 | POSTING_READ(FORCEWAKE); | 285 | POSTING_READ(FORCEWAKE); |
274 | } | 286 | } |
275 | 287 | ||
288 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
289 | { | ||
290 | int loop = 500; | ||
291 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
292 | while (fifo < 20 && loop--) { | ||
293 | udelay(10); | ||
294 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
295 | } | ||
296 | } | ||
297 | |||
276 | static int i915_drm_freeze(struct drm_device *dev) | 298 | static int i915_drm_freeze(struct drm_device *dev) |
277 | { | 299 | { |
278 | struct drm_i915_private *dev_priv = dev->dev_private; | 300 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -360,7 +382,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
360 | /* Resume the modeset for every activated CRTC */ | 382 | /* Resume the modeset for every activated CRTC */ |
361 | drm_helper_resume_force_mode(dev); | 383 | drm_helper_resume_force_mode(dev); |
362 | 384 | ||
363 | if (dev_priv->renderctx && dev_priv->pwrctx) | 385 | if (IS_IRONLAKE_M(dev)) |
364 | ironlake_enable_rc6(dev); | 386 | ironlake_enable_rc6(dev); |
365 | } | 387 | } |
366 | 388 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a78197d43ce6..449650545bb4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -49,17 +49,22 @@ | |||
49 | enum pipe { | 49 | enum pipe { |
50 | PIPE_A = 0, | 50 | PIPE_A = 0, |
51 | PIPE_B, | 51 | PIPE_B, |
52 | PIPE_C, | ||
53 | I915_MAX_PIPES | ||
52 | }; | 54 | }; |
55 | #define pipe_name(p) ((p) + 'A') | ||
53 | 56 | ||
54 | enum plane { | 57 | enum plane { |
55 | PLANE_A = 0, | 58 | PLANE_A = 0, |
56 | PLANE_B, | 59 | PLANE_B, |
60 | PLANE_C, | ||
57 | }; | 61 | }; |
58 | 62 | #define plane_name(p) ((p) + 'A') | |
59 | #define I915_NUM_PIPE 2 | ||
60 | 63 | ||
61 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 64 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
62 | 65 | ||
66 | #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) | ||
67 | |||
63 | /* Interface history: | 68 | /* Interface history: |
64 | * | 69 | * |
65 | * 1.1: Original. | 70 | * 1.1: Original. |
@@ -75,10 +80,7 @@ enum plane { | |||
75 | #define DRIVER_PATCHLEVEL 0 | 80 | #define DRIVER_PATCHLEVEL 0 |
76 | 81 | ||
77 | #define WATCH_COHERENCY 0 | 82 | #define WATCH_COHERENCY 0 |
78 | #define WATCH_EXEC 0 | ||
79 | #define WATCH_RELOC 0 | ||
80 | #define WATCH_LISTS 0 | 83 | #define WATCH_LISTS 0 |
81 | #define WATCH_PWRITE 0 | ||
82 | 84 | ||
83 | #define I915_GEM_PHYS_CURSOR_0 1 | 85 | #define I915_GEM_PHYS_CURSOR_0 1 |
84 | #define I915_GEM_PHYS_CURSOR_1 2 | 86 | #define I915_GEM_PHYS_CURSOR_1 2 |
@@ -111,6 +113,7 @@ struct intel_opregion { | |||
111 | struct opregion_swsci *swsci; | 113 | struct opregion_swsci *swsci; |
112 | struct opregion_asle *asle; | 114 | struct opregion_asle *asle; |
113 | void *vbt; | 115 | void *vbt; |
116 | u32 __iomem *lid_state; | ||
114 | }; | 117 | }; |
115 | #define OPREGION_SIZE (8*1024) | 118 | #define OPREGION_SIZE (8*1024) |
116 | 119 | ||
@@ -144,8 +147,7 @@ struct intel_display_error_state; | |||
144 | struct drm_i915_error_state { | 147 | struct drm_i915_error_state { |
145 | u32 eir; | 148 | u32 eir; |
146 | u32 pgtbl_er; | 149 | u32 pgtbl_er; |
147 | u32 pipeastat; | 150 | u32 pipestat[I915_MAX_PIPES]; |
148 | u32 pipebstat; | ||
149 | u32 ipeir; | 151 | u32 ipeir; |
150 | u32 ipehr; | 152 | u32 ipehr; |
151 | u32 instdone; | 153 | u32 instdone; |
@@ -172,7 +174,7 @@ struct drm_i915_error_state { | |||
172 | int page_count; | 174 | int page_count; |
173 | u32 gtt_offset; | 175 | u32 gtt_offset; |
174 | u32 *pages[0]; | 176 | u32 *pages[0]; |
175 | } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; | 177 | } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; |
176 | struct drm_i915_error_buffer { | 178 | struct drm_i915_error_buffer { |
177 | u32 size; | 179 | u32 size; |
178 | u32 name; | 180 | u32 name; |
@@ -200,9 +202,7 @@ struct drm_i915_display_funcs { | |||
200 | void (*disable_fbc)(struct drm_device *dev); | 202 | void (*disable_fbc)(struct drm_device *dev); |
201 | int (*get_display_clock_speed)(struct drm_device *dev); | 203 | int (*get_display_clock_speed)(struct drm_device *dev); |
202 | int (*get_fifo_size)(struct drm_device *dev, int plane); | 204 | int (*get_fifo_size)(struct drm_device *dev, int plane); |
203 | void (*update_wm)(struct drm_device *dev, int planea_clock, | 205 | void (*update_wm)(struct drm_device *dev); |
204 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
205 | int pixel_size); | ||
206 | /* clock updates for mode set */ | 206 | /* clock updates for mode set */ |
207 | /* cursor updates */ | 207 | /* cursor updates */ |
208 | /* render clock increase/decrease */ | 208 | /* render clock increase/decrease */ |
@@ -274,7 +274,6 @@ typedef struct drm_i915_private { | |||
274 | uint32_t next_seqno; | 274 | uint32_t next_seqno; |
275 | 275 | ||
276 | drm_dma_handle_t *status_page_dmah; | 276 | drm_dma_handle_t *status_page_dmah; |
277 | dma_addr_t dma_status_page; | ||
278 | uint32_t counter; | 277 | uint32_t counter; |
279 | drm_local_map_t hws_map; | 278 | drm_local_map_t hws_map; |
280 | struct drm_i915_gem_object *pwrctx; | 279 | struct drm_i915_gem_object *pwrctx; |
@@ -289,7 +288,6 @@ typedef struct drm_i915_private { | |||
289 | int page_flipping; | 288 | int page_flipping; |
290 | 289 | ||
291 | atomic_t irq_received; | 290 | atomic_t irq_received; |
292 | u32 trace_irq_seqno; | ||
293 | 291 | ||
294 | /* protects the irq masks */ | 292 | /* protects the irq masks */ |
295 | spinlock_t irq_lock; | 293 | spinlock_t irq_lock; |
@@ -324,8 +322,6 @@ typedef struct drm_i915_private { | |||
324 | int cfb_plane; | 322 | int cfb_plane; |
325 | int cfb_y; | 323 | int cfb_y; |
326 | 324 | ||
327 | int irq_enabled; | ||
328 | |||
329 | struct intel_opregion opregion; | 325 | struct intel_opregion opregion; |
330 | 326 | ||
331 | /* overlay */ | 327 | /* overlay */ |
@@ -387,7 +383,6 @@ typedef struct drm_i915_private { | |||
387 | u32 saveDSPACNTR; | 383 | u32 saveDSPACNTR; |
388 | u32 saveDSPBCNTR; | 384 | u32 saveDSPBCNTR; |
389 | u32 saveDSPARB; | 385 | u32 saveDSPARB; |
390 | u32 saveHWS; | ||
391 | u32 savePIPEACONF; | 386 | u32 savePIPEACONF; |
392 | u32 savePIPEBCONF; | 387 | u32 savePIPEBCONF; |
393 | u32 savePIPEASRC; | 388 | u32 savePIPEASRC; |
@@ -615,6 +610,12 @@ typedef struct drm_i915_private { | |||
615 | struct delayed_work retire_work; | 610 | struct delayed_work retire_work; |
616 | 611 | ||
617 | /** | 612 | /** |
613 | * Are we in a non-interruptible section of code like | ||
614 | * modesetting? | ||
615 | */ | ||
616 | bool interruptible; | ||
617 | |||
618 | /** | ||
618 | * Flag if the X Server, and thus DRM, is not currently in | 619 | * Flag if the X Server, and thus DRM, is not currently in |
619 | * control of the device. | 620 | * control of the device. |
620 | * | 621 | * |
@@ -652,6 +653,7 @@ typedef struct drm_i915_private { | |||
652 | unsigned int lvds_border_bits; | 653 | unsigned int lvds_border_bits; |
653 | /* Panel fitter placement and size for Ironlake+ */ | 654 | /* Panel fitter placement and size for Ironlake+ */ |
654 | u32 pch_pf_pos, pch_pf_size; | 655 | u32 pch_pf_pos, pch_pf_size; |
656 | int panel_t3, panel_t12; | ||
655 | 657 | ||
656 | struct drm_crtc *plane_to_crtc_mapping[2]; | 658 | struct drm_crtc *plane_to_crtc_mapping[2]; |
657 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 659 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
@@ -698,6 +700,8 @@ typedef struct drm_i915_private { | |||
698 | 700 | ||
699 | /* list of fbdev register on this device */ | 701 | /* list of fbdev register on this device */ |
700 | struct intel_fbdev *fbdev; | 702 | struct intel_fbdev *fbdev; |
703 | |||
704 | struct drm_property *broadcast_rgb_property; | ||
701 | } drm_i915_private_t; | 705 | } drm_i915_private_t; |
702 | 706 | ||
703 | struct drm_i915_gem_object { | 707 | struct drm_i915_gem_object { |
@@ -955,9 +959,13 @@ enum intel_chip_family { | |||
955 | extern struct drm_ioctl_desc i915_ioctls[]; | 959 | extern struct drm_ioctl_desc i915_ioctls[]; |
956 | extern int i915_max_ioctl; | 960 | extern int i915_max_ioctl; |
957 | extern unsigned int i915_fbpercrtc; | 961 | extern unsigned int i915_fbpercrtc; |
962 | extern int i915_panel_ignore_lid; | ||
958 | extern unsigned int i915_powersave; | 963 | extern unsigned int i915_powersave; |
964 | extern unsigned int i915_semaphores; | ||
959 | extern unsigned int i915_lvds_downclock; | 965 | extern unsigned int i915_lvds_downclock; |
960 | extern unsigned int i915_panel_use_ssc; | 966 | extern unsigned int i915_panel_use_ssc; |
967 | extern int i915_vbt_sdvo_panel_type; | ||
968 | extern unsigned int i915_enable_rc6; | ||
961 | 969 | ||
962 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 970 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
963 | extern int i915_resume(struct drm_device *dev); | 971 | extern int i915_resume(struct drm_device *dev); |
@@ -996,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, | |||
996 | struct drm_file *file_priv); | 1004 | struct drm_file *file_priv); |
997 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 1005 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
998 | struct drm_file *file_priv); | 1006 | struct drm_file *file_priv); |
999 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno); | ||
1000 | extern void i915_enable_interrupt (struct drm_device *dev); | ||
1001 | 1007 | ||
1002 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 1008 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); |
1003 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | 1009 | extern void i915_driver_irq_preinstall(struct drm_device * dev); |
@@ -1049,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap); | |||
1049 | extern void i915_mem_release(struct drm_device * dev, | 1055 | extern void i915_mem_release(struct drm_device * dev, |
1050 | struct drm_file *file_priv, struct mem_block *heap); | 1056 | struct drm_file *file_priv, struct mem_block *heap); |
1051 | /* i915_gem.c */ | 1057 | /* i915_gem.c */ |
1052 | int i915_gem_check_is_wedged(struct drm_device *dev); | ||
1053 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, | 1058 | int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
1054 | struct drm_file *file_priv); | 1059 | struct drm_file *file_priv); |
1055 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, | 1060 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
@@ -1092,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
1092 | struct drm_file *file_priv); | 1097 | struct drm_file *file_priv); |
1093 | void i915_gem_load(struct drm_device *dev); | 1098 | void i915_gem_load(struct drm_device *dev); |
1094 | int i915_gem_init_object(struct drm_gem_object *obj); | 1099 | int i915_gem_init_object(struct drm_gem_object *obj); |
1095 | int __must_check i915_gem_flush_ring(struct drm_device *dev, | 1100 | int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, |
1096 | struct intel_ring_buffer *ring, | ||
1097 | uint32_t invalidate_domains, | 1101 | uint32_t invalidate_domains, |
1098 | uint32_t flush_domains); | 1102 | uint32_t flush_domains); |
1099 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1103 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
@@ -1108,8 +1112,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | |||
1108 | void i915_gem_lastclose(struct drm_device *dev); | 1112 | void i915_gem_lastclose(struct drm_device *dev); |
1109 | 1113 | ||
1110 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | 1114 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
1111 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 1115 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); |
1112 | bool interruptible); | ||
1113 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | 1116 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1114 | struct intel_ring_buffer *ring, | 1117 | struct intel_ring_buffer *ring, |
1115 | u32 seqno); | 1118 | u32 seqno); |
@@ -1131,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
1131 | } | 1134 | } |
1132 | 1135 | ||
1133 | static inline u32 | 1136 | static inline u32 |
1134 | i915_gem_next_request_seqno(struct drm_device *dev, | 1137 | i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
1135 | struct intel_ring_buffer *ring) | ||
1136 | { | 1138 | { |
1137 | drm_i915_private_t *dev_priv = dev->dev_private; | 1139 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1138 | return ring->outstanding_lazy_request = dev_priv->next_seqno; | 1140 | return ring->outstanding_lazy_request = dev_priv->next_seqno; |
1139 | } | 1141 | } |
1140 | 1142 | ||
1141 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | 1143 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, |
1142 | struct intel_ring_buffer *pipelined, | 1144 | struct intel_ring_buffer *pipelined); |
1143 | bool interruptible); | ||
1144 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); | 1145 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1145 | 1146 | ||
1146 | void i915_gem_retire_requests(struct drm_device *dev); | 1147 | void i915_gem_retire_requests(struct drm_device *dev); |
@@ -1149,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | |||
1149 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, | 1150 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1150 | uint32_t read_domains, | 1151 | uint32_t read_domains, |
1151 | uint32_t write_domain); | 1152 | uint32_t write_domain); |
1152 | int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 1153 | int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); |
1153 | bool interruptible); | ||
1154 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); | 1154 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
1155 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1155 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1156 | void i915_gem_do_init(struct drm_device *dev, | 1156 | void i915_gem_do_init(struct drm_device *dev, |
@@ -1159,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev, | |||
1159 | unsigned long end); | 1159 | unsigned long end); |
1160 | int __must_check i915_gpu_idle(struct drm_device *dev); | 1160 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1161 | int __must_check i915_gem_idle(struct drm_device *dev); | 1161 | int __must_check i915_gem_idle(struct drm_device *dev); |
1162 | int __must_check i915_add_request(struct drm_device *dev, | 1162 | int __must_check i915_add_request(struct intel_ring_buffer *ring, |
1163 | struct drm_file *file_priv, | 1163 | struct drm_file *file, |
1164 | struct drm_i915_gem_request *request, | 1164 | struct drm_i915_gem_request *request); |
1165 | struct intel_ring_buffer *ring); | 1165 | int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
1166 | int __must_check i915_do_wait_request(struct drm_device *dev, | 1166 | uint32_t seqno); |
1167 | uint32_t seqno, | ||
1168 | bool interruptible, | ||
1169 | struct intel_ring_buffer *ring); | ||
1170 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1167 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1171 | int __must_check | 1168 | int __must_check |
1172 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 1169 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
@@ -1183,6 +1180,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
1183 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1180 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1184 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 1181 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1185 | 1182 | ||
1183 | uint32_t | ||
1184 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); | ||
1185 | |||
1186 | /* i915_gem_gtt.c */ | 1186 | /* i915_gem_gtt.c */ |
1187 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1187 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1188 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1188 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
@@ -1315,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m, | |||
1315 | #define __i915_read(x, y) \ | 1315 | #define __i915_read(x, y) \ |
1316 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1316 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1317 | u##x val = read##y(dev_priv->regs + reg); \ | 1317 | u##x val = read##y(dev_priv->regs + reg); \ |
1318 | trace_i915_reg_rw('R', reg, val, sizeof(val)); \ | 1318 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
1319 | return val; \ | 1319 | return val; \ |
1320 | } | 1320 | } |
1321 | __i915_read(8, b) | 1321 | __i915_read(8, b) |
@@ -1326,7 +1326,7 @@ __i915_read(64, q) | |||
1326 | 1326 | ||
1327 | #define __i915_write(x, y) \ | 1327 | #define __i915_write(x, y) \ |
1328 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1328 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1329 | trace_i915_reg_rw('W', reg, val, sizeof(val)); \ | 1329 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
1330 | write##y(val, dev_priv->regs + reg); \ | 1330 | write##y(val, dev_priv->regs + reg); \ |
1331 | } | 1331 | } |
1332 | __i915_write(8, b) | 1332 | __i915_write(8, b) |
@@ -1359,62 +1359,29 @@ __i915_write(64, q) | |||
1359 | * must be set to prevent GT core from power down and stale values being | 1359 | * must be set to prevent GT core from power down and stale values being |
1360 | * returned. | 1360 | * returned. |
1361 | */ | 1361 | */ |
1362 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv); | 1362 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1363 | void __gen6_force_wake_put (struct drm_i915_private *dev_priv); | 1363 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1364 | static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | 1364 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1365 | |||
1366 | static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) | ||
1365 | { | 1367 | { |
1366 | u32 val; | 1368 | u32 val; |
1367 | 1369 | ||
1368 | if (dev_priv->info->gen >= 6) { | 1370 | if (dev_priv->info->gen >= 6) { |
1369 | __gen6_force_wake_get(dev_priv); | 1371 | __gen6_gt_force_wake_get(dev_priv); |
1370 | val = I915_READ(reg); | 1372 | val = I915_READ(reg); |
1371 | __gen6_force_wake_put(dev_priv); | 1373 | __gen6_gt_force_wake_put(dev_priv); |
1372 | } else | 1374 | } else |
1373 | val = I915_READ(reg); | 1375 | val = I915_READ(reg); |
1374 | 1376 | ||
1375 | return val; | 1377 | return val; |
1376 | } | 1378 | } |
1377 | 1379 | ||
1378 | static inline void | 1380 | static inline void i915_gt_write(struct drm_i915_private *dev_priv, |
1379 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) | 1381 | u32 reg, u32 val) |
1380 | { | 1382 | { |
1381 | /* Trace down the write operation before the real write */ | 1383 | if (dev_priv->info->gen >= 6) |
1382 | trace_i915_reg_rw('W', reg, val, len); | 1384 | __gen6_gt_wait_for_fifo(dev_priv); |
1383 | switch (len) { | 1385 | I915_WRITE(reg, val); |
1384 | case 8: | ||
1385 | writeq(val, dev_priv->regs + reg); | ||
1386 | break; | ||
1387 | case 4: | ||
1388 | writel(val, dev_priv->regs + reg); | ||
1389 | break; | ||
1390 | case 2: | ||
1391 | writew(val, dev_priv->regs + reg); | ||
1392 | break; | ||
1393 | case 1: | ||
1394 | writeb(val, dev_priv->regs + reg); | ||
1395 | break; | ||
1396 | } | ||
1397 | } | 1386 | } |
1398 | |||
1399 | /** | ||
1400 | * Reads a dword out of the status page, which is written to from the command | ||
1401 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | ||
1402 | * MI_STORE_DATA_IMM. | ||
1403 | * | ||
1404 | * The following dwords have a reserved meaning: | ||
1405 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | ||
1406 | * 0x04: ring 0 head pointer | ||
1407 | * 0x05: ring 1 head pointer (915-class) | ||
1408 | * 0x06: ring 2 head pointer (915-class) | ||
1409 | * 0x10-0x1b: Context status DWords (GM45) | ||
1410 | * 0x1f: Last written status offset. (GM45) | ||
1411 | * | ||
1412 | * The area from dword 0x20 to 0x3ff is available for driver usage. | ||
1413 | */ | ||
1414 | #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ | ||
1415 | (LP_RING(dev_priv)->status_page.page_addr))[reg]) | ||
1416 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | ||
1417 | #define I915_GEM_HWS_INDEX 0x20 | ||
1418 | #define I915_BREADCRUMB_INDEX 0x21 | ||
1419 | |||
1420 | #endif | 1387 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bc7f06b8fbca..c4c2855d002d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | |||
75 | dev_priv->mm.object_memory -= size; | 75 | dev_priv->mm.object_memory -= size; |
76 | } | 76 | } |
77 | 77 | ||
78 | int | 78 | static int |
79 | i915_gem_check_is_wedged(struct drm_device *dev) | 79 | i915_gem_wait_for_error(struct drm_device *dev) |
80 | { | 80 | { |
81 | struct drm_i915_private *dev_priv = dev->dev_private; | 81 | struct drm_i915_private *dev_priv = dev->dev_private; |
82 | struct completion *x = &dev_priv->error_completion; | 82 | struct completion *x = &dev_priv->error_completion; |
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev) | |||
90 | if (ret) | 90 | if (ret) |
91 | return ret; | 91 | return ret; |
92 | 92 | ||
93 | /* Success, we reset the GPU! */ | 93 | if (atomic_read(&dev_priv->mm.wedged)) { |
94 | if (!atomic_read(&dev_priv->mm.wedged)) | 94 | /* GPU is hung, bump the completion count to account for |
95 | return 0; | 95 | * the token we just consumed so that we never hit zero and |
96 | 96 | * end up waiting upon a subsequent completion event that | |
97 | /* GPU is hung, bump the completion count to account for | 97 | * will never happen. |
98 | * the token we just consumed so that we never hit zero and | 98 | */ |
99 | * end up waiting upon a subsequent completion event that | 99 | spin_lock_irqsave(&x->wait.lock, flags); |
100 | * will never happen. | 100 | x->done++; |
101 | */ | 101 | spin_unlock_irqrestore(&x->wait.lock, flags); |
102 | spin_lock_irqsave(&x->wait.lock, flags); | 102 | } |
103 | x->done++; | 103 | return 0; |
104 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
105 | return -EIO; | ||
106 | } | 104 | } |
107 | 105 | ||
108 | int i915_mutex_lock_interruptible(struct drm_device *dev) | 106 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
109 | { | 107 | { |
110 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
111 | int ret; | 108 | int ret; |
112 | 109 | ||
113 | ret = i915_gem_check_is_wedged(dev); | 110 | ret = i915_gem_wait_for_error(dev); |
114 | if (ret) | 111 | if (ret) |
115 | return ret; | 112 | return ret; |
116 | 113 | ||
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) | |||
118 | if (ret) | 115 | if (ret) |
119 | return ret; | 116 | return ret; |
120 | 117 | ||
121 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
122 | mutex_unlock(&dev->struct_mutex); | ||
123 | return -EAGAIN; | ||
124 | } | ||
125 | |||
126 | WARN_ON(i915_verify_lists(dev)); | 118 | WARN_ON(i915_verify_lists(dev)); |
127 | return 0; | 119 | return 0; |
128 | } | 120 | } |
@@ -543,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
543 | return ret; | 535 | return ret; |
544 | 536 | ||
545 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 537 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
546 | if (obj == NULL) { | 538 | if (&obj->base == NULL) { |
547 | ret = -ENOENT; | 539 | ret = -ENOENT; |
548 | goto unlock; | 540 | goto unlock; |
549 | } | 541 | } |
@@ -555,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
555 | goto out; | 547 | goto out; |
556 | } | 548 | } |
557 | 549 | ||
550 | trace_i915_gem_object_pread(obj, args->offset, args->size); | ||
551 | |||
558 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | 552 | ret = i915_gem_object_set_cpu_read_domain_range(obj, |
559 | args->offset, | 553 | args->offset, |
560 | args->size); | 554 | args->size); |
@@ -984,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
984 | return ret; | 978 | return ret; |
985 | 979 | ||
986 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 980 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
987 | if (obj == NULL) { | 981 | if (&obj->base == NULL) { |
988 | ret = -ENOENT; | 982 | ret = -ENOENT; |
989 | goto unlock; | 983 | goto unlock; |
990 | } | 984 | } |
@@ -996,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
996 | goto out; | 990 | goto out; |
997 | } | 991 | } |
998 | 992 | ||
993 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | ||
994 | |||
999 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 995 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
1000 | * it would end up going through the fenced access, and we'll get | 996 | * it would end up going through the fenced access, and we'll get |
1001 | * different detiling behavior between reading and writing. | 997 | * different detiling behavior between reading and writing. |
@@ -1078,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1078 | return ret; | 1074 | return ret; |
1079 | 1075 | ||
1080 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 1076 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1081 | if (obj == NULL) { | 1077 | if (&obj->base == NULL) { |
1082 | ret = -ENOENT; | 1078 | ret = -ENOENT; |
1083 | goto unlock; | 1079 | goto unlock; |
1084 | } | 1080 | } |
@@ -1121,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1121 | return ret; | 1117 | return ret; |
1122 | 1118 | ||
1123 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 1119 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1124 | if (obj == NULL) { | 1120 | if (&obj->base == NULL) { |
1125 | ret = -ENOENT; | 1121 | ret = -ENOENT; |
1126 | goto unlock; | 1122 | goto unlock; |
1127 | } | 1123 | } |
@@ -1150,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1150 | struct drm_i915_private *dev_priv = dev->dev_private; | 1146 | struct drm_i915_private *dev_priv = dev->dev_private; |
1151 | struct drm_i915_gem_mmap *args = data; | 1147 | struct drm_i915_gem_mmap *args = data; |
1152 | struct drm_gem_object *obj; | 1148 | struct drm_gem_object *obj; |
1153 | loff_t offset; | ||
1154 | unsigned long addr; | 1149 | unsigned long addr; |
1155 | 1150 | ||
1156 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1151 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1165,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1165 | return -E2BIG; | 1160 | return -E2BIG; |
1166 | } | 1161 | } |
1167 | 1162 | ||
1168 | offset = args->offset; | ||
1169 | |||
1170 | down_write(¤t->mm->mmap_sem); | 1163 | down_write(¤t->mm->mmap_sem); |
1171 | addr = do_mmap(obj->filp, 0, args->size, | 1164 | addr = do_mmap(obj->filp, 0, args->size, |
1172 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1165 | PROT_READ | PROT_WRITE, MAP_SHARED, |
@@ -1211,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1211 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 1204 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> |
1212 | PAGE_SHIFT; | 1205 | PAGE_SHIFT; |
1213 | 1206 | ||
1214 | /* Now bind it into the GTT if needed */ | 1207 | ret = i915_mutex_lock_interruptible(dev); |
1215 | mutex_lock(&dev->struct_mutex); | 1208 | if (ret) |
1209 | goto out; | ||
1216 | 1210 | ||
1211 | trace_i915_gem_object_fault(obj, page_offset, true, write); | ||
1212 | |||
1213 | /* Now bind it into the GTT if needed */ | ||
1217 | if (!obj->map_and_fenceable) { | 1214 | if (!obj->map_and_fenceable) { |
1218 | ret = i915_gem_object_unbind(obj); | 1215 | ret = i915_gem_object_unbind(obj); |
1219 | if (ret) | 1216 | if (ret) |
@@ -1232,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1232 | if (obj->tiling_mode == I915_TILING_NONE) | 1229 | if (obj->tiling_mode == I915_TILING_NONE) |
1233 | ret = i915_gem_object_put_fence(obj); | 1230 | ret = i915_gem_object_put_fence(obj); |
1234 | else | 1231 | else |
1235 | ret = i915_gem_object_get_fence(obj, NULL, true); | 1232 | ret = i915_gem_object_get_fence(obj, NULL); |
1236 | if (ret) | 1233 | if (ret) |
1237 | goto unlock; | 1234 | goto unlock; |
1238 | 1235 | ||
@@ -1248,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1248 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 1245 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
1249 | unlock: | 1246 | unlock: |
1250 | mutex_unlock(&dev->struct_mutex); | 1247 | mutex_unlock(&dev->struct_mutex); |
1251 | 1248 | out: | |
1252 | switch (ret) { | 1249 | switch (ret) { |
1250 | case -EIO: | ||
1253 | case -EAGAIN: | 1251 | case -EAGAIN: |
1252 | /* Give the error handler a chance to run and move the | ||
1253 | * objects off the GPU active list. Next time we service the | ||
1254 | * fault, we should be able to transition the page into the | ||
1255 | * GTT without touching the GPU (and so avoid further | ||
1256 | * EIO/EGAIN). If the GPU is wedged, then there is no issue | ||
1257 | * with coherency, just lost writes. | ||
1258 | */ | ||
1254 | set_need_resched(); | 1259 | set_need_resched(); |
1255 | case 0: | 1260 | case 0: |
1256 | case -ERESTARTSYS: | 1261 | case -ERESTARTSYS: |
1262 | case -EINTR: | ||
1257 | return VM_FAULT_NOPAGE; | 1263 | return VM_FAULT_NOPAGE; |
1258 | case -ENOMEM: | 1264 | case -ENOMEM: |
1259 | return VM_FAULT_OOM; | 1265 | return VM_FAULT_OOM; |
@@ -1427,7 +1433,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) | |||
1427 | * Return the required GTT alignment for an object, only taking into account | 1433 | * Return the required GTT alignment for an object, only taking into account |
1428 | * unfenced tiled surface requirements. | 1434 | * unfenced tiled surface requirements. |
1429 | */ | 1435 | */ |
1430 | static uint32_t | 1436 | uint32_t |
1431 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) | 1437 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
1432 | { | 1438 | { |
1433 | struct drm_device *dev = obj->base.dev; | 1439 | struct drm_device *dev = obj->base.dev; |
@@ -1472,7 +1478,7 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1472 | return ret; | 1478 | return ret; |
1473 | 1479 | ||
1474 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | 1480 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
1475 | if (obj == NULL) { | 1481 | if (&obj->base == NULL) { |
1476 | ret = -ENOENT; | 1482 | ret = -ENOENT; |
1477 | goto unlock; | 1483 | goto unlock; |
1478 | } | 1484 | } |
@@ -1712,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | |||
1712 | } | 1718 | } |
1713 | 1719 | ||
1714 | static void | 1720 | static void |
1715 | i915_gem_process_flushing_list(struct drm_device *dev, | 1721 | i915_gem_process_flushing_list(struct intel_ring_buffer *ring, |
1716 | uint32_t flush_domains, | 1722 | uint32_t flush_domains) |
1717 | struct intel_ring_buffer *ring) | ||
1718 | { | 1723 | { |
1719 | struct drm_i915_gem_object *obj, *next; | 1724 | struct drm_i915_gem_object *obj, *next; |
1720 | 1725 | ||
@@ -1727,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1727 | obj->base.write_domain = 0; | 1732 | obj->base.write_domain = 0; |
1728 | list_del_init(&obj->gpu_write_list); | 1733 | list_del_init(&obj->gpu_write_list); |
1729 | i915_gem_object_move_to_active(obj, ring, | 1734 | i915_gem_object_move_to_active(obj, ring, |
1730 | i915_gem_next_request_seqno(dev, ring)); | 1735 | i915_gem_next_request_seqno(ring)); |
1731 | 1736 | ||
1732 | trace_i915_gem_object_change_domain(obj, | 1737 | trace_i915_gem_object_change_domain(obj, |
1733 | obj->base.read_domains, | 1738 | obj->base.read_domains, |
@@ -1737,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1737 | } | 1742 | } |
1738 | 1743 | ||
1739 | int | 1744 | int |
1740 | i915_add_request(struct drm_device *dev, | 1745 | i915_add_request(struct intel_ring_buffer *ring, |
1741 | struct drm_file *file, | 1746 | struct drm_file *file, |
1742 | struct drm_i915_gem_request *request, | 1747 | struct drm_i915_gem_request *request) |
1743 | struct intel_ring_buffer *ring) | ||
1744 | { | 1748 | { |
1745 | drm_i915_private_t *dev_priv = dev->dev_private; | 1749 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1746 | struct drm_i915_file_private *file_priv = NULL; | ||
1747 | uint32_t seqno; | 1750 | uint32_t seqno; |
1748 | int was_empty; | 1751 | int was_empty; |
1749 | int ret; | 1752 | int ret; |
1750 | 1753 | ||
1751 | BUG_ON(request == NULL); | 1754 | BUG_ON(request == NULL); |
1752 | 1755 | ||
1753 | if (file != NULL) | ||
1754 | file_priv = file->driver_priv; | ||
1755 | |||
1756 | ret = ring->add_request(ring, &seqno); | 1756 | ret = ring->add_request(ring, &seqno); |
1757 | if (ret) | 1757 | if (ret) |
1758 | return ret; | 1758 | return ret; |
1759 | 1759 | ||
1760 | ring->outstanding_lazy_request = false; | 1760 | trace_i915_gem_request_add(ring, seqno); |
1761 | 1761 | ||
1762 | request->seqno = seqno; | 1762 | request->seqno = seqno; |
1763 | request->ring = ring; | 1763 | request->ring = ring; |
@@ -1765,7 +1765,9 @@ i915_add_request(struct drm_device *dev, | |||
1765 | was_empty = list_empty(&ring->request_list); | 1765 | was_empty = list_empty(&ring->request_list); |
1766 | list_add_tail(&request->list, &ring->request_list); | 1766 | list_add_tail(&request->list, &ring->request_list); |
1767 | 1767 | ||
1768 | if (file_priv) { | 1768 | if (file) { |
1769 | struct drm_i915_file_private *file_priv = file->driver_priv; | ||
1770 | |||
1769 | spin_lock(&file_priv->mm.lock); | 1771 | spin_lock(&file_priv->mm.lock); |
1770 | request->file_priv = file_priv; | 1772 | request->file_priv = file_priv; |
1771 | list_add_tail(&request->client_list, | 1773 | list_add_tail(&request->client_list, |
@@ -1773,6 +1775,8 @@ i915_add_request(struct drm_device *dev, | |||
1773 | spin_unlock(&file_priv->mm.lock); | 1775 | spin_unlock(&file_priv->mm.lock); |
1774 | } | 1776 | } |
1775 | 1777 | ||
1778 | ring->outstanding_lazy_request = false; | ||
1779 | |||
1776 | if (!dev_priv->mm.suspended) { | 1780 | if (!dev_priv->mm.suspended) { |
1777 | mod_timer(&dev_priv->hangcheck_timer, | 1781 | mod_timer(&dev_priv->hangcheck_timer, |
1778 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1782 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
@@ -1889,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev) | |||
1889 | * This function clears the request list as sequence numbers are passed. | 1893 | * This function clears the request list as sequence numbers are passed. |
1890 | */ | 1894 | */ |
1891 | static void | 1895 | static void |
1892 | i915_gem_retire_requests_ring(struct drm_device *dev, | 1896 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
1893 | struct intel_ring_buffer *ring) | ||
1894 | { | 1897 | { |
1895 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1896 | uint32_t seqno; | 1898 | uint32_t seqno; |
1897 | int i; | 1899 | int i; |
1898 | 1900 | ||
1899 | if (!ring->status_page.page_addr || | 1901 | if (list_empty(&ring->request_list)) |
1900 | list_empty(&ring->request_list)) | ||
1901 | return; | 1902 | return; |
1902 | 1903 | ||
1903 | WARN_ON(i915_verify_lists(dev)); | 1904 | WARN_ON(i915_verify_lists(ring->dev)); |
1904 | 1905 | ||
1905 | seqno = ring->get_seqno(ring); | 1906 | seqno = ring->get_seqno(ring); |
1906 | 1907 | ||
@@ -1918,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1918 | if (!i915_seqno_passed(seqno, request->seqno)) | 1919 | if (!i915_seqno_passed(seqno, request->seqno)) |
1919 | break; | 1920 | break; |
1920 | 1921 | ||
1921 | trace_i915_gem_request_retire(dev, request->seqno); | 1922 | trace_i915_gem_request_retire(ring, request->seqno); |
1922 | 1923 | ||
1923 | list_del(&request->list); | 1924 | list_del(&request->list); |
1924 | i915_gem_request_remove_from_client(request); | 1925 | i915_gem_request_remove_from_client(request); |
@@ -1944,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1944 | i915_gem_object_move_to_inactive(obj); | 1945 | i915_gem_object_move_to_inactive(obj); |
1945 | } | 1946 | } |
1946 | 1947 | ||
1947 | if (unlikely (dev_priv->trace_irq_seqno && | 1948 | if (unlikely(ring->trace_irq_seqno && |
1948 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1949 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
1949 | ring->irq_put(ring); | 1950 | ring->irq_put(ring); |
1950 | dev_priv->trace_irq_seqno = 0; | 1951 | ring->trace_irq_seqno = 0; |
1951 | } | 1952 | } |
1952 | 1953 | ||
1953 | WARN_ON(i915_verify_lists(dev)); | 1954 | WARN_ON(i915_verify_lists(ring->dev)); |
1954 | } | 1955 | } |
1955 | 1956 | ||
1956 | void | 1957 | void |
@@ -1974,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev) | |||
1974 | } | 1975 | } |
1975 | 1976 | ||
1976 | for (i = 0; i < I915_NUM_RINGS; i++) | 1977 | for (i = 0; i < I915_NUM_RINGS; i++) |
1977 | i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); | 1978 | i915_gem_retire_requests_ring(&dev_priv->ring[i]); |
1978 | } | 1979 | } |
1979 | 1980 | ||
1980 | static void | 1981 | static void |
@@ -2008,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
2008 | struct drm_i915_gem_request *request; | 2009 | struct drm_i915_gem_request *request; |
2009 | int ret; | 2010 | int ret; |
2010 | 2011 | ||
2011 | ret = i915_gem_flush_ring(dev, ring, 0, | 2012 | ret = i915_gem_flush_ring(ring, |
2012 | I915_GEM_GPU_DOMAINS); | 2013 | 0, I915_GEM_GPU_DOMAINS); |
2013 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 2014 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
2014 | if (ret || request == NULL || | 2015 | if (ret || request == NULL || |
2015 | i915_add_request(dev, NULL, request, ring)) | 2016 | i915_add_request(ring, NULL, request)) |
2016 | kfree(request); | 2017 | kfree(request); |
2017 | } | 2018 | } |
2018 | 2019 | ||
@@ -2025,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
2025 | mutex_unlock(&dev->struct_mutex); | 2026 | mutex_unlock(&dev->struct_mutex); |
2026 | } | 2027 | } |
2027 | 2028 | ||
2029 | /** | ||
2030 | * Waits for a sequence number to be signaled, and cleans up the | ||
2031 | * request and object lists appropriately for that event. | ||
2032 | */ | ||
2028 | int | 2033 | int |
2029 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | 2034 | i915_wait_request(struct intel_ring_buffer *ring, |
2030 | bool interruptible, struct intel_ring_buffer *ring) | 2035 | uint32_t seqno) |
2031 | { | 2036 | { |
2032 | drm_i915_private_t *dev_priv = dev->dev_private; | 2037 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
2033 | u32 ier; | 2038 | u32 ier; |
2034 | int ret = 0; | 2039 | int ret = 0; |
2035 | 2040 | ||
2036 | BUG_ON(seqno == 0); | 2041 | BUG_ON(seqno == 0); |
2037 | 2042 | ||
2038 | if (atomic_read(&dev_priv->mm.wedged)) | 2043 | if (atomic_read(&dev_priv->mm.wedged)) { |
2039 | return -EAGAIN; | 2044 | struct completion *x = &dev_priv->error_completion; |
2045 | bool recovery_complete; | ||
2046 | unsigned long flags; | ||
2047 | |||
2048 | /* Give the error handler a chance to run. */ | ||
2049 | spin_lock_irqsave(&x->wait.lock, flags); | ||
2050 | recovery_complete = x->done > 0; | ||
2051 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
2052 | |||
2053 | return recovery_complete ? -EIO : -EAGAIN; | ||
2054 | } | ||
2040 | 2055 | ||
2041 | if (seqno == ring->outstanding_lazy_request) { | 2056 | if (seqno == ring->outstanding_lazy_request) { |
2042 | struct drm_i915_gem_request *request; | 2057 | struct drm_i915_gem_request *request; |
@@ -2045,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2045 | if (request == NULL) | 2060 | if (request == NULL) |
2046 | return -ENOMEM; | 2061 | return -ENOMEM; |
2047 | 2062 | ||
2048 | ret = i915_add_request(dev, NULL, request, ring); | 2063 | ret = i915_add_request(ring, NULL, request); |
2049 | if (ret) { | 2064 | if (ret) { |
2050 | kfree(request); | 2065 | kfree(request); |
2051 | return ret; | 2066 | return ret; |
@@ -2055,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2055 | } | 2070 | } |
2056 | 2071 | ||
2057 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { | 2072 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
2058 | if (HAS_PCH_SPLIT(dev)) | 2073 | if (HAS_PCH_SPLIT(ring->dev)) |
2059 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 2074 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
2060 | else | 2075 | else |
2061 | ier = I915_READ(IER); | 2076 | ier = I915_READ(IER); |
2062 | if (!ier) { | 2077 | if (!ier) { |
2063 | DRM_ERROR("something (likely vbetool) disabled " | 2078 | DRM_ERROR("something (likely vbetool) disabled " |
2064 | "interrupts, re-enabling\n"); | 2079 | "interrupts, re-enabling\n"); |
2065 | i915_driver_irq_preinstall(dev); | 2080 | i915_driver_irq_preinstall(ring->dev); |
2066 | i915_driver_irq_postinstall(dev); | 2081 | i915_driver_irq_postinstall(ring->dev); |
2067 | } | 2082 | } |
2068 | 2083 | ||
2069 | trace_i915_gem_request_wait_begin(dev, seqno); | 2084 | trace_i915_gem_request_wait_begin(ring, seqno); |
2070 | 2085 | ||
2071 | ring->waiting_seqno = seqno; | 2086 | ring->waiting_seqno = seqno; |
2072 | if (ring->irq_get(ring)) { | 2087 | if (ring->irq_get(ring)) { |
2073 | if (interruptible) | 2088 | if (dev_priv->mm.interruptible) |
2074 | ret = wait_event_interruptible(ring->irq_queue, | 2089 | ret = wait_event_interruptible(ring->irq_queue, |
2075 | i915_seqno_passed(ring->get_seqno(ring), seqno) | 2090 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
2076 | || atomic_read(&dev_priv->mm.wedged)); | 2091 | || atomic_read(&dev_priv->mm.wedged)); |
@@ -2086,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2086 | ret = -EBUSY; | 2101 | ret = -EBUSY; |
2087 | ring->waiting_seqno = 0; | 2102 | ring->waiting_seqno = 0; |
2088 | 2103 | ||
2089 | trace_i915_gem_request_wait_end(dev, seqno); | 2104 | trace_i915_gem_request_wait_end(ring, seqno); |
2090 | } | 2105 | } |
2091 | if (atomic_read(&dev_priv->mm.wedged)) | 2106 | if (atomic_read(&dev_priv->mm.wedged)) |
2092 | ret = -EAGAIN; | 2107 | ret = -EAGAIN; |
@@ -2102,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2102 | * a separate wait queue to handle that. | 2117 | * a separate wait queue to handle that. |
2103 | */ | 2118 | */ |
2104 | if (ret == 0) | 2119 | if (ret == 0) |
2105 | i915_gem_retire_requests_ring(dev, ring); | 2120 | i915_gem_retire_requests_ring(ring); |
2106 | 2121 | ||
2107 | return ret; | 2122 | return ret; |
2108 | } | 2123 | } |
2109 | 2124 | ||
2110 | /** | 2125 | /** |
2111 | * Waits for a sequence number to be signaled, and cleans up the | ||
2112 | * request and object lists appropriately for that event. | ||
2113 | */ | ||
2114 | static int | ||
2115 | i915_wait_request(struct drm_device *dev, uint32_t seqno, | ||
2116 | struct intel_ring_buffer *ring) | ||
2117 | { | ||
2118 | return i915_do_wait_request(dev, seqno, 1, ring); | ||
2119 | } | ||
2120 | |||
2121 | /** | ||
2122 | * Ensures that all rendering to the object has completed and the object is | 2126 | * Ensures that all rendering to the object has completed and the object is |
2123 | * safe to unbind from the GTT or access from the CPU. | 2127 | * safe to unbind from the GTT or access from the CPU. |
2124 | */ | 2128 | */ |
2125 | int | 2129 | int |
2126 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 2130 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) |
2127 | bool interruptible) | ||
2128 | { | 2131 | { |
2129 | struct drm_device *dev = obj->base.dev; | ||
2130 | int ret; | 2132 | int ret; |
2131 | 2133 | ||
2132 | /* This function only exists to support waiting for existing rendering, | 2134 | /* This function only exists to support waiting for existing rendering, |
@@ -2138,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
2138 | * it. | 2140 | * it. |
2139 | */ | 2141 | */ |
2140 | if (obj->active) { | 2142 | if (obj->active) { |
2141 | ret = i915_do_wait_request(dev, | 2143 | ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); |
2142 | obj->last_rendering_seqno, | ||
2143 | interruptible, | ||
2144 | obj->ring); | ||
2145 | if (ret) | 2144 | if (ret) |
2146 | return ret; | 2145 | return ret; |
2147 | } | 2146 | } |
@@ -2191,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2191 | if (ret == -ERESTARTSYS) | 2190 | if (ret == -ERESTARTSYS) |
2192 | return ret; | 2191 | return ret; |
2193 | 2192 | ||
2193 | trace_i915_gem_object_unbind(obj); | ||
2194 | |||
2194 | i915_gem_gtt_unbind_object(obj); | 2195 | i915_gem_gtt_unbind_object(obj); |
2195 | i915_gem_object_put_pages_gtt(obj); | 2196 | i915_gem_object_put_pages_gtt(obj); |
2196 | 2197 | ||
@@ -2206,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2206 | if (i915_gem_object_is_purgeable(obj)) | 2207 | if (i915_gem_object_is_purgeable(obj)) |
2207 | i915_gem_object_truncate(obj); | 2208 | i915_gem_object_truncate(obj); |
2208 | 2209 | ||
2209 | trace_i915_gem_object_unbind(obj); | ||
2210 | |||
2211 | return ret; | 2210 | return ret; |
2212 | } | 2211 | } |
2213 | 2212 | ||
2214 | int | 2213 | int |
2215 | i915_gem_flush_ring(struct drm_device *dev, | 2214 | i915_gem_flush_ring(struct intel_ring_buffer *ring, |
2216 | struct intel_ring_buffer *ring, | ||
2217 | uint32_t invalidate_domains, | 2215 | uint32_t invalidate_domains, |
2218 | uint32_t flush_domains) | 2216 | uint32_t flush_domains) |
2219 | { | 2217 | { |
2220 | int ret; | 2218 | int ret; |
2221 | 2219 | ||
2220 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); | ||
2221 | |||
2222 | ret = ring->flush(ring, invalidate_domains, flush_domains); | 2222 | ret = ring->flush(ring, invalidate_domains, flush_domains); |
2223 | if (ret) | 2223 | if (ret) |
2224 | return ret; | 2224 | return ret; |
2225 | 2225 | ||
2226 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 2226 | i915_gem_process_flushing_list(ring, flush_domains); |
2227 | return 0; | 2227 | return 0; |
2228 | } | 2228 | } |
2229 | 2229 | ||
2230 | static int i915_ring_idle(struct drm_device *dev, | 2230 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
2231 | struct intel_ring_buffer *ring) | ||
2232 | { | 2231 | { |
2233 | int ret; | 2232 | int ret; |
2234 | 2233 | ||
@@ -2236,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev, | |||
2236 | return 0; | 2235 | return 0; |
2237 | 2236 | ||
2238 | if (!list_empty(&ring->gpu_write_list)) { | 2237 | if (!list_empty(&ring->gpu_write_list)) { |
2239 | ret = i915_gem_flush_ring(dev, ring, | 2238 | ret = i915_gem_flush_ring(ring, |
2240 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2239 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2241 | if (ret) | 2240 | if (ret) |
2242 | return ret; | 2241 | return ret; |
2243 | } | 2242 | } |
2244 | 2243 | ||
2245 | return i915_wait_request(dev, | 2244 | return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); |
2246 | i915_gem_next_request_seqno(dev, ring), | ||
2247 | ring); | ||
2248 | } | 2245 | } |
2249 | 2246 | ||
2250 | int | 2247 | int |
@@ -2261,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2261 | 2258 | ||
2262 | /* Flush everything onto the inactive list. */ | 2259 | /* Flush everything onto the inactive list. */ |
2263 | for (i = 0; i < I915_NUM_RINGS; i++) { | 2260 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2264 | ret = i915_ring_idle(dev, &dev_priv->ring[i]); | 2261 | ret = i915_ring_idle(&dev_priv->ring[i]); |
2265 | if (ret) | 2262 | if (ret) |
2266 | return ret; | 2263 | return ret; |
2267 | } | 2264 | } |
@@ -2445,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) | |||
2445 | 2442 | ||
2446 | static int | 2443 | static int |
2447 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | 2444 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, |
2448 | struct intel_ring_buffer *pipelined, | 2445 | struct intel_ring_buffer *pipelined) |
2449 | bool interruptible) | ||
2450 | { | 2446 | { |
2451 | int ret; | 2447 | int ret; |
2452 | 2448 | ||
2453 | if (obj->fenced_gpu_access) { | 2449 | if (obj->fenced_gpu_access) { |
2454 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 2450 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2455 | ret = i915_gem_flush_ring(obj->base.dev, | 2451 | ret = i915_gem_flush_ring(obj->last_fenced_ring, |
2456 | obj->last_fenced_ring, | ||
2457 | 0, obj->base.write_domain); | 2452 | 0, obj->base.write_domain); |
2458 | if (ret) | 2453 | if (ret) |
2459 | return ret; | 2454 | return ret; |
@@ -2465,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | |||
2465 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { | 2460 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { |
2466 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2461 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2467 | obj->last_fenced_seqno)) { | 2462 | obj->last_fenced_seqno)) { |
2468 | ret = i915_do_wait_request(obj->base.dev, | 2463 | ret = i915_wait_request(obj->last_fenced_ring, |
2469 | obj->last_fenced_seqno, | 2464 | obj->last_fenced_seqno); |
2470 | interruptible, | ||
2471 | obj->last_fenced_ring); | ||
2472 | if (ret) | 2465 | if (ret) |
2473 | return ret; | 2466 | return ret; |
2474 | } | 2467 | } |
@@ -2494,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) | |||
2494 | if (obj->tiling_mode) | 2487 | if (obj->tiling_mode) |
2495 | i915_gem_release_mmap(obj); | 2488 | i915_gem_release_mmap(obj); |
2496 | 2489 | ||
2497 | ret = i915_gem_object_flush_fence(obj, NULL, true); | 2490 | ret = i915_gem_object_flush_fence(obj, NULL); |
2498 | if (ret) | 2491 | if (ret) |
2499 | return ret; | 2492 | return ret; |
2500 | 2493 | ||
@@ -2571,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev, | |||
2571 | */ | 2564 | */ |
2572 | int | 2565 | int |
2573 | i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | 2566 | i915_gem_object_get_fence(struct drm_i915_gem_object *obj, |
2574 | struct intel_ring_buffer *pipelined, | 2567 | struct intel_ring_buffer *pipelined) |
2575 | bool interruptible) | ||
2576 | { | 2568 | { |
2577 | struct drm_device *dev = obj->base.dev; | 2569 | struct drm_device *dev = obj->base.dev; |
2578 | struct drm_i915_private *dev_priv = dev->dev_private; | 2570 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2594,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2594 | if (reg->setup_seqno) { | 2586 | if (reg->setup_seqno) { |
2595 | if (!ring_passed_seqno(obj->last_fenced_ring, | 2587 | if (!ring_passed_seqno(obj->last_fenced_ring, |
2596 | reg->setup_seqno)) { | 2588 | reg->setup_seqno)) { |
2597 | ret = i915_do_wait_request(obj->base.dev, | 2589 | ret = i915_wait_request(obj->last_fenced_ring, |
2598 | reg->setup_seqno, | 2590 | reg->setup_seqno); |
2599 | interruptible, | ||
2600 | obj->last_fenced_ring); | ||
2601 | if (ret) | 2591 | if (ret) |
2602 | return ret; | 2592 | return ret; |
2603 | } | 2593 | } |
@@ -2606,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2606 | } | 2596 | } |
2607 | } else if (obj->last_fenced_ring && | 2597 | } else if (obj->last_fenced_ring && |
2608 | obj->last_fenced_ring != pipelined) { | 2598 | obj->last_fenced_ring != pipelined) { |
2609 | ret = i915_gem_object_flush_fence(obj, | 2599 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2610 | pipelined, | ||
2611 | interruptible); | ||
2612 | if (ret) | 2600 | if (ret) |
2613 | return ret; | 2601 | return ret; |
2614 | } else if (obj->tiling_changed) { | 2602 | } else if (obj->tiling_changed) { |
2615 | if (obj->fenced_gpu_access) { | 2603 | if (obj->fenced_gpu_access) { |
2616 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 2604 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2617 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, | 2605 | ret = i915_gem_flush_ring(obj->ring, |
2618 | 0, obj->base.write_domain); | 2606 | 0, obj->base.write_domain); |
2619 | if (ret) | 2607 | if (ret) |
2620 | return ret; | 2608 | return ret; |
@@ -2631,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2631 | if (obj->tiling_changed) { | 2619 | if (obj->tiling_changed) { |
2632 | if (pipelined) { | 2620 | if (pipelined) { |
2633 | reg->setup_seqno = | 2621 | reg->setup_seqno = |
2634 | i915_gem_next_request_seqno(dev, pipelined); | 2622 | i915_gem_next_request_seqno(pipelined); |
2635 | obj->last_fenced_seqno = reg->setup_seqno; | 2623 | obj->last_fenced_seqno = reg->setup_seqno; |
2636 | obj->last_fenced_ring = pipelined; | 2624 | obj->last_fenced_ring = pipelined; |
2637 | } | 2625 | } |
@@ -2645,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2645 | if (reg == NULL) | 2633 | if (reg == NULL) |
2646 | return -ENOSPC; | 2634 | return -ENOSPC; |
2647 | 2635 | ||
2648 | ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); | 2636 | ret = i915_gem_object_flush_fence(obj, pipelined); |
2649 | if (ret) | 2637 | if (ret) |
2650 | return ret; | 2638 | return ret; |
2651 | 2639 | ||
@@ -2657,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2657 | if (old->tiling_mode) | 2645 | if (old->tiling_mode) |
2658 | i915_gem_release_mmap(old); | 2646 | i915_gem_release_mmap(old); |
2659 | 2647 | ||
2660 | ret = i915_gem_object_flush_fence(old, | 2648 | ret = i915_gem_object_flush_fence(old, pipelined); |
2661 | pipelined, | ||
2662 | interruptible); | ||
2663 | if (ret) { | 2649 | if (ret) { |
2664 | drm_gem_object_unreference(&old->base); | 2650 | drm_gem_object_unreference(&old->base); |
2665 | return ret; | 2651 | return ret; |
@@ -2671,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2671 | old->fence_reg = I915_FENCE_REG_NONE; | 2657 | old->fence_reg = I915_FENCE_REG_NONE; |
2672 | old->last_fenced_ring = pipelined; | 2658 | old->last_fenced_ring = pipelined; |
2673 | old->last_fenced_seqno = | 2659 | old->last_fenced_seqno = |
2674 | pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; | 2660 | pipelined ? i915_gem_next_request_seqno(pipelined) : 0; |
2675 | 2661 | ||
2676 | drm_gem_object_unreference(&old->base); | 2662 | drm_gem_object_unreference(&old->base); |
2677 | } else if (obj->last_fenced_seqno == 0) | 2663 | } else if (obj->last_fenced_seqno == 0) |
@@ -2683,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | |||
2683 | obj->last_fenced_ring = pipelined; | 2669 | obj->last_fenced_ring = pipelined; |
2684 | 2670 | ||
2685 | reg->setup_seqno = | 2671 | reg->setup_seqno = |
2686 | pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; | 2672 | pipelined ? i915_gem_next_request_seqno(pipelined) : 0; |
2687 | obj->last_fenced_seqno = reg->setup_seqno; | 2673 | obj->last_fenced_seqno = reg->setup_seqno; |
2688 | 2674 | ||
2689 | update: | 2675 | update: |
@@ -2880,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2880 | 2866 | ||
2881 | obj->map_and_fenceable = mappable && fenceable; | 2867 | obj->map_and_fenceable = mappable && fenceable; |
2882 | 2868 | ||
2883 | trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); | 2869 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
2884 | return 0; | 2870 | return 0; |
2885 | } | 2871 | } |
2886 | 2872 | ||
@@ -2903,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) | |||
2903 | static int | 2889 | static int |
2904 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) | 2890 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2905 | { | 2891 | { |
2906 | struct drm_device *dev = obj->base.dev; | ||
2907 | |||
2908 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2892 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2909 | return 0; | 2893 | return 0; |
2910 | 2894 | ||
2911 | /* Queue the GPU write cache flushing we need. */ | 2895 | /* Queue the GPU write cache flushing we need. */ |
2912 | return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); | 2896 | return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
2913 | } | 2897 | } |
2914 | 2898 | ||
2915 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2899 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2976,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
2976 | if (obj->gtt_space == NULL) | 2960 | if (obj->gtt_space == NULL) |
2977 | return -EINVAL; | 2961 | return -EINVAL; |
2978 | 2962 | ||
2963 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) | ||
2964 | return 0; | ||
2965 | |||
2979 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 2966 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2980 | if (ret) | 2967 | if (ret) |
2981 | return ret; | 2968 | return ret; |
2982 | 2969 | ||
2983 | if (obj->pending_gpu_write || write) { | 2970 | if (obj->pending_gpu_write || write) { |
2984 | ret = i915_gem_object_wait_rendering(obj, true); | 2971 | ret = i915_gem_object_wait_rendering(obj); |
2985 | if (ret) | 2972 | if (ret) |
2986 | return ret; | 2973 | return ret; |
2987 | } | 2974 | } |
@@ -3031,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | |||
3031 | 3018 | ||
3032 | /* Currently, we are always called from an non-interruptible context. */ | 3019 | /* Currently, we are always called from an non-interruptible context. */ |
3033 | if (pipelined != obj->ring) { | 3020 | if (pipelined != obj->ring) { |
3034 | ret = i915_gem_object_wait_rendering(obj, false); | 3021 | ret = i915_gem_object_wait_rendering(obj); |
3035 | if (ret) | 3022 | if (ret) |
3036 | return ret; | 3023 | return ret; |
3037 | } | 3024 | } |
@@ -3049,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | |||
3049 | } | 3036 | } |
3050 | 3037 | ||
3051 | int | 3038 | int |
3052 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 3039 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) |
3053 | bool interruptible) | ||
3054 | { | 3040 | { |
3055 | int ret; | 3041 | int ret; |
3056 | 3042 | ||
@@ -3058,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
3058 | return 0; | 3044 | return 0; |
3059 | 3045 | ||
3060 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3046 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3061 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, | 3047 | ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
3062 | 0, obj->base.write_domain); | ||
3063 | if (ret) | 3048 | if (ret) |
3064 | return ret; | 3049 | return ret; |
3065 | } | 3050 | } |
3066 | 3051 | ||
3067 | return i915_gem_object_wait_rendering(obj, interruptible); | 3052 | return i915_gem_object_wait_rendering(obj); |
3068 | } | 3053 | } |
3069 | 3054 | ||
3070 | /** | 3055 | /** |
@@ -3079,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
3079 | uint32_t old_write_domain, old_read_domains; | 3064 | uint32_t old_write_domain, old_read_domains; |
3080 | int ret; | 3065 | int ret; |
3081 | 3066 | ||
3067 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) | ||
3068 | return 0; | ||
3069 | |||
3082 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 3070 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3083 | if (ret) | 3071 | if (ret) |
3084 | return ret; | 3072 | return ret; |
3085 | 3073 | ||
3086 | ret = i915_gem_object_wait_rendering(obj, true); | 3074 | ret = i915_gem_object_wait_rendering(obj); |
3087 | if (ret) | 3075 | if (ret) |
3088 | return ret; | 3076 | return ret; |
3089 | 3077 | ||
@@ -3181,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, | |||
3181 | if (ret) | 3169 | if (ret) |
3182 | return ret; | 3170 | return ret; |
3183 | 3171 | ||
3184 | ret = i915_gem_object_wait_rendering(obj, true); | 3172 | ret = i915_gem_object_wait_rendering(obj); |
3185 | if (ret) | 3173 | if (ret) |
3186 | return ret; | 3174 | return ret; |
3187 | 3175 | ||
@@ -3252,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3252 | u32 seqno = 0; | 3240 | u32 seqno = 0; |
3253 | int ret; | 3241 | int ret; |
3254 | 3242 | ||
3243 | if (atomic_read(&dev_priv->mm.wedged)) | ||
3244 | return -EIO; | ||
3245 | |||
3255 | spin_lock(&file_priv->mm.lock); | 3246 | spin_lock(&file_priv->mm.lock); |
3256 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { | 3247 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
3257 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 3248 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
@@ -3367,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3367 | return ret; | 3358 | return ret; |
3368 | 3359 | ||
3369 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 3360 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3370 | if (obj == NULL) { | 3361 | if (&obj->base == NULL) { |
3371 | ret = -ENOENT; | 3362 | ret = -ENOENT; |
3372 | goto unlock; | 3363 | goto unlock; |
3373 | } | 3364 | } |
@@ -3418,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
3418 | return ret; | 3409 | return ret; |
3419 | 3410 | ||
3420 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 3411 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3421 | if (obj == NULL) { | 3412 | if (&obj->base == NULL) { |
3422 | ret = -ENOENT; | 3413 | ret = -ENOENT; |
3423 | goto unlock; | 3414 | goto unlock; |
3424 | } | 3415 | } |
@@ -3455,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3455 | return ret; | 3446 | return ret; |
3456 | 3447 | ||
3457 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 3448 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3458 | if (obj == NULL) { | 3449 | if (&obj->base == NULL) { |
3459 | ret = -ENOENT; | 3450 | ret = -ENOENT; |
3460 | goto unlock; | 3451 | goto unlock; |
3461 | } | 3452 | } |
@@ -3473,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3473 | * flush earlier is beneficial. | 3464 | * flush earlier is beneficial. |
3474 | */ | 3465 | */ |
3475 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | 3466 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
3476 | ret = i915_gem_flush_ring(dev, obj->ring, | 3467 | ret = i915_gem_flush_ring(obj->ring, |
3477 | 0, obj->base.write_domain); | 3468 | 0, obj->base.write_domain); |
3478 | } else if (obj->ring->outstanding_lazy_request == | 3469 | } else if (obj->ring->outstanding_lazy_request == |
3479 | obj->last_rendering_seqno) { | 3470 | obj->last_rendering_seqno) { |
@@ -3484,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3484 | */ | 3475 | */ |
3485 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 3476 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
3486 | if (request) | 3477 | if (request) |
3487 | ret = i915_add_request(dev, | 3478 | ret = i915_add_request(obj->ring, NULL,request); |
3488 | NULL, request, | ||
3489 | obj->ring); | ||
3490 | else | 3479 | else |
3491 | ret = -ENOMEM; | 3480 | ret = -ENOMEM; |
3492 | } | 3481 | } |
@@ -3496,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3496 | * are actually unmasked, and our working set ends up being | 3485 | * are actually unmasked, and our working set ends up being |
3497 | * larger than required. | 3486 | * larger than required. |
3498 | */ | 3487 | */ |
3499 | i915_gem_retire_requests_ring(dev, obj->ring); | 3488 | i915_gem_retire_requests_ring(obj->ring); |
3500 | 3489 | ||
3501 | args->busy = obj->active; | 3490 | args->busy = obj->active; |
3502 | } | 3491 | } |
@@ -3535,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
3535 | return ret; | 3524 | return ret; |
3536 | 3525 | ||
3537 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); | 3526 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
3538 | if (obj == NULL) { | 3527 | if (&obj->base == NULL) { |
3539 | ret = -ENOENT; | 3528 | ret = -ENOENT; |
3540 | goto unlock; | 3529 | goto unlock; |
3541 | } | 3530 | } |
@@ -3626,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) | |||
3626 | kfree(obj->page_cpu_valid); | 3615 | kfree(obj->page_cpu_valid); |
3627 | kfree(obj->bit_17); | 3616 | kfree(obj->bit_17); |
3628 | kfree(obj); | 3617 | kfree(obj); |
3618 | |||
3619 | trace_i915_gem_object_destroy(obj); | ||
3629 | } | 3620 | } |
3630 | 3621 | ||
3631 | void i915_gem_free_object(struct drm_gem_object *gem_obj) | 3622 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
@@ -3633,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
3633 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | 3624 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
3634 | struct drm_device *dev = obj->base.dev; | 3625 | struct drm_device *dev = obj->base.dev; |
3635 | 3626 | ||
3636 | trace_i915_gem_object_destroy(obj); | ||
3637 | |||
3638 | while (obj->pin_count > 0) | 3627 | while (obj->pin_count > 0) |
3639 | i915_gem_object_unpin(obj); | 3628 | i915_gem_object_unpin(obj); |
3640 | 3629 | ||
@@ -3880,6 +3869,8 @@ i915_gem_load(struct drm_device *dev) | |||
3880 | i915_gem_detect_bit_6_swizzle(dev); | 3869 | i915_gem_detect_bit_6_swizzle(dev); |
3881 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 3870 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
3882 | 3871 | ||
3872 | dev_priv->mm.interruptible = true; | ||
3873 | |||
3883 | dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; | 3874 | dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; |
3884 | dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; | 3875 | dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; |
3885 | register_shrinker(&dev_priv->mm.inactive_shrinker); | 3876 | register_shrinker(&dev_priv->mm.inactive_shrinker); |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 29d014c48ca2..8da1899bd24f 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev) | |||
134 | } | 134 | } |
135 | #endif /* WATCH_INACTIVE */ | 135 | #endif /* WATCH_INACTIVE */ |
136 | 136 | ||
137 | |||
138 | #if WATCH_EXEC | WATCH_PWRITE | ||
139 | static void | ||
140 | i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | ||
141 | uint32_t bias, uint32_t mark) | ||
142 | { | ||
143 | uint32_t *mem = kmap_atomic(page, KM_USER0); | ||
144 | int i; | ||
145 | for (i = start; i < end; i += 4) | ||
146 | DRM_INFO("%08x: %08x%s\n", | ||
147 | (int) (bias + i), mem[i / 4], | ||
148 | (bias + i == mark) ? " ********" : ""); | ||
149 | kunmap_atomic(mem, KM_USER0); | ||
150 | /* give syslog time to catch up */ | ||
151 | msleep(1); | ||
152 | } | ||
153 | |||
154 | void | ||
155 | i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, | ||
156 | const char *where, uint32_t mark) | ||
157 | { | ||
158 | int page; | ||
159 | |||
160 | DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); | ||
161 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { | ||
162 | int page_len, chunk, chunk_len; | ||
163 | |||
164 | page_len = len - page * PAGE_SIZE; | ||
165 | if (page_len > PAGE_SIZE) | ||
166 | page_len = PAGE_SIZE; | ||
167 | |||
168 | for (chunk = 0; chunk < page_len; chunk += 128) { | ||
169 | chunk_len = page_len - chunk; | ||
170 | if (chunk_len > 128) | ||
171 | chunk_len = 128; | ||
172 | i915_gem_dump_page(obj->pages[page], | ||
173 | chunk, chunk + chunk_len, | ||
174 | obj->gtt_offset + | ||
175 | page * PAGE_SIZE, | ||
176 | mark); | ||
177 | } | ||
178 | } | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | #if WATCH_COHERENCY | 137 | #if WATCH_COHERENCY |
183 | void | 138 | void |
184 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | 139 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3d39005540aa..da05a2692a75 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | #include "i915_trace.h" | ||
33 | 34 | ||
34 | static bool | 35 | static bool |
35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) | 36 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
63 | return 0; | 64 | return 0; |
64 | } | 65 | } |
65 | 66 | ||
67 | trace_i915_gem_evict(dev, min_size, alignment, mappable); | ||
68 | |||
66 | /* | 69 | /* |
67 | * The goal is to evict objects and amalgamate space in LRU order. | 70 | * The goal is to evict objects and amalgamate space in LRU order. |
68 | * The oldest idle objects reside on the inactive list, which is in | 71 | * The oldest idle objects reside on the inactive list, which is in |
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
189 | if (lists_empty) | 192 | if (lists_empty) |
190 | return -ENOSPC; | 193 | return -ENOSPC; |
191 | 194 | ||
195 | trace_i915_gem_evict_everything(dev, purgeable_only); | ||
196 | |||
192 | /* Flush everything (on to the inactive lists) and evict */ | 197 | /* Flush everything (on to the inactive lists) and evict */ |
193 | ret = i915_gpu_idle(dev); | 198 | ret = i915_gpu_idle(dev); |
194 | if (ret) | 199 | if (ret) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e825f2..7ff7f933ddf1 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -37,6 +37,7 @@ struct change_domains { | |||
37 | uint32_t invalidate_domains; | 37 | uint32_t invalidate_domains; |
38 | uint32_t flush_domains; | 38 | uint32_t flush_domains; |
39 | uint32_t flush_rings; | 39 | uint32_t flush_rings; |
40 | uint32_t flips; | ||
40 | }; | 41 | }; |
41 | 42 | ||
42 | /* | 43 | /* |
@@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | |||
190 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) | 191 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) |
191 | i915_gem_release_mmap(obj); | 192 | i915_gem_release_mmap(obj); |
192 | 193 | ||
194 | if (obj->base.pending_write_domain) | ||
195 | cd->flips |= atomic_read(&obj->pending_flip); | ||
196 | |||
193 | /* The actual obj->write_domain will be updated with | 197 | /* The actual obj->write_domain will be updated with |
194 | * pending_write_domain after we emit the accumulated flush for all | 198 | * pending_write_domain after we emit the accumulated flush for all |
195 | * of our domain changes in execbuffers (which clears objects' | 199 | * of our domain changes in execbuffers (which clears objects' |
@@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
282 | 286 | ||
283 | target_offset = to_intel_bo(target_obj)->gtt_offset; | 287 | target_offset = to_intel_bo(target_obj)->gtt_offset; |
284 | 288 | ||
285 | #if WATCH_RELOC | ||
286 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
287 | "read %08x write %08x gtt %08x " | ||
288 | "presumed %08x delta %08x\n", | ||
289 | __func__, | ||
290 | obj, | ||
291 | (int) reloc->offset, | ||
292 | (int) reloc->target_handle, | ||
293 | (int) reloc->read_domains, | ||
294 | (int) reloc->write_domain, | ||
295 | (int) target_offset, | ||
296 | (int) reloc->presumed_offset, | ||
297 | reloc->delta); | ||
298 | #endif | ||
299 | |||
300 | /* The target buffer should have appeared before us in the | 289 | /* The target buffer should have appeared before us in the |
301 | * exec_object list, so it should have a GTT space bound by now. | 290 | * exec_object list, so it should have a GTT space bound by now. |
302 | */ | 291 | */ |
@@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
365 | return ret; | 354 | return ret; |
366 | } | 355 | } |
367 | 356 | ||
368 | /* and points to somewhere within the target object. */ | ||
369 | if (unlikely(reloc->delta >= target_obj->size)) { | ||
370 | DRM_ERROR("Relocation beyond target object bounds: " | ||
371 | "obj %p target %d delta %d size %d.\n", | ||
372 | obj, reloc->target_handle, | ||
373 | (int) reloc->delta, | ||
374 | (int) target_obj->size); | ||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | reloc->delta += target_offset; | 357 | reloc->delta += target_offset; |
379 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | 358 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
380 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; | 359 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; |
@@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
575 | 554 | ||
576 | if (has_fenced_gpu_access) { | 555 | if (has_fenced_gpu_access) { |
577 | if (need_fence) { | 556 | if (need_fence) { |
578 | ret = i915_gem_object_get_fence(obj, ring, 1); | 557 | ret = i915_gem_object_get_fence(obj, ring); |
579 | if (ret) | 558 | if (ret) |
580 | break; | 559 | break; |
581 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 560 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
@@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
690 | /* reacquire the objects */ | 669 | /* reacquire the objects */ |
691 | eb_reset(eb); | 670 | eb_reset(eb); |
692 | for (i = 0; i < count; i++) { | 671 | for (i = 0; i < count; i++) { |
693 | struct drm_i915_gem_object *obj; | ||
694 | |||
695 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | 672 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
696 | exec[i].handle)); | 673 | exec[i].handle)); |
697 | if (obj == NULL) { | 674 | if (&obj->base == NULL) { |
698 | DRM_ERROR("Invalid object handle %d at index %d\n", | 675 | DRM_ERROR("Invalid object handle %d at index %d\n", |
699 | exec[i].handle, i); | 676 | exec[i].handle, i); |
700 | ret = -ENOENT; | 677 | ret = -ENOENT; |
@@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev, | |||
749 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 726 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { |
750 | for (i = 0; i < I915_NUM_RINGS; i++) | 727 | for (i = 0; i < I915_NUM_RINGS; i++) |
751 | if (flush_rings & (1 << i)) { | 728 | if (flush_rings & (1 << i)) { |
752 | ret = i915_gem_flush_ring(dev, | 729 | ret = i915_gem_flush_ring(&dev_priv->ring[i], |
753 | &dev_priv->ring[i], | ||
754 | invalidate_domains, | 730 | invalidate_domains, |
755 | flush_domains); | 731 | flush_domains); |
756 | if (ret) | 732 | if (ret) |
@@ -772,9 +748,9 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
772 | if (from == NULL || to == from) | 748 | if (from == NULL || to == from) |
773 | return 0; | 749 | return 0; |
774 | 750 | ||
775 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ | 751 | /* XXX gpu semaphores are implicated in various hard hangs on SNB */ |
776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | 752 | if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) |
777 | return i915_gem_object_wait_rendering(obj, true); | 753 | return i915_gem_object_wait_rendering(obj); |
778 | 754 | ||
779 | idx = intel_ring_sync_index(from, to); | 755 | idx = intel_ring_sync_index(from, to); |
780 | 756 | ||
@@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
789 | if (request == NULL) | 765 | if (request == NULL) |
790 | return -ENOMEM; | 766 | return -ENOMEM; |
791 | 767 | ||
792 | ret = i915_add_request(obj->base.dev, NULL, request, from); | 768 | ret = i915_add_request(from, NULL, request); |
793 | if (ret) { | 769 | if (ret) { |
794 | kfree(request); | 770 | kfree(request); |
795 | return ret; | 771 | return ret; |
@@ -803,6 +779,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
803 | } | 779 | } |
804 | 780 | ||
805 | static int | 781 | static int |
782 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) | ||
783 | { | ||
784 | u32 plane, flip_mask; | ||
785 | int ret; | ||
786 | |||
787 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
788 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
789 | * to executing the batch and avoid stalling the CPU. | ||
790 | */ | ||
791 | |||
792 | for (plane = 0; flips >> plane; plane++) { | ||
793 | if (((flips >> plane) & 1) == 0) | ||
794 | continue; | ||
795 | |||
796 | if (plane) | ||
797 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
798 | else | ||
799 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
800 | |||
801 | ret = intel_ring_begin(ring, 2); | ||
802 | if (ret) | ||
803 | return ret; | ||
804 | |||
805 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | ||
806 | intel_ring_emit(ring, MI_NOOP); | ||
807 | intel_ring_advance(ring); | ||
808 | } | ||
809 | |||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | |||
814 | static int | ||
806 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | 815 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
807 | struct list_head *objects) | 816 | struct list_head *objects) |
808 | { | 817 | { |
@@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
810 | struct change_domains cd; | 819 | struct change_domains cd; |
811 | int ret; | 820 | int ret; |
812 | 821 | ||
813 | cd.invalidate_domains = 0; | 822 | memset(&cd, 0, sizeof(cd)); |
814 | cd.flush_domains = 0; | ||
815 | cd.flush_rings = 0; | ||
816 | list_for_each_entry(obj, objects, exec_list) | 823 | list_for_each_entry(obj, objects, exec_list) |
817 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | 824 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); |
818 | 825 | ||
819 | if (cd.invalidate_domains | cd.flush_domains) { | 826 | if (cd.invalidate_domains | cd.flush_domains) { |
820 | #if WATCH_EXEC | ||
821 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
822 | __func__, | ||
823 | cd.invalidate_domains, | ||
824 | cd.flush_domains); | ||
825 | #endif | ||
826 | ret = i915_gem_execbuffer_flush(ring->dev, | 827 | ret = i915_gem_execbuffer_flush(ring->dev, |
827 | cd.invalidate_domains, | 828 | cd.invalidate_domains, |
828 | cd.flush_domains, | 829 | cd.flush_domains, |
@@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
831 | return ret; | 832 | return ret; |
832 | } | 833 | } |
833 | 834 | ||
835 | if (cd.flips) { | ||
836 | ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); | ||
837 | if (ret) | ||
838 | return ret; | ||
839 | } | ||
840 | |||
834 | list_for_each_entry(obj, objects, exec_list) { | 841 | list_for_each_entry(obj, objects, exec_list) { |
835 | ret = i915_gem_execbuffer_sync_rings(obj, ring); | 842 | ret = i915_gem_execbuffer_sync_rings(obj, ring); |
836 | if (ret) | 843 | if (ret) |
@@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
877 | return 0; | 884 | return 0; |
878 | } | 885 | } |
879 | 886 | ||
880 | static int | ||
881 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, | ||
882 | struct list_head *objects) | ||
883 | { | ||
884 | struct drm_i915_gem_object *obj; | ||
885 | int flips; | ||
886 | |||
887 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
888 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
889 | * to executing the batch and avoid stalling the CPU. | ||
890 | */ | ||
891 | flips = 0; | ||
892 | list_for_each_entry(obj, objects, exec_list) { | ||
893 | if (obj->base.write_domain) | ||
894 | flips |= atomic_read(&obj->pending_flip); | ||
895 | } | ||
896 | if (flips) { | ||
897 | int plane, flip_mask, ret; | ||
898 | |||
899 | for (plane = 0; flips >> plane; plane++) { | ||
900 | if (((flips >> plane) & 1) == 0) | ||
901 | continue; | ||
902 | |||
903 | if (plane) | ||
904 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
905 | else | ||
906 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
907 | |||
908 | ret = intel_ring_begin(ring, 2); | ||
909 | if (ret) | ||
910 | return ret; | ||
911 | |||
912 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | ||
913 | intel_ring_emit(ring, MI_NOOP); | ||
914 | intel_ring_advance(ring); | ||
915 | } | ||
916 | } | ||
917 | |||
918 | return 0; | ||
919 | } | ||
920 | |||
921 | static void | 887 | static void |
922 | i915_gem_execbuffer_move_to_active(struct list_head *objects, | 888 | i915_gem_execbuffer_move_to_active(struct list_head *objects, |
923 | struct intel_ring_buffer *ring, | 889 | struct intel_ring_buffer *ring, |
@@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
926 | struct drm_i915_gem_object *obj; | 892 | struct drm_i915_gem_object *obj; |
927 | 893 | ||
928 | list_for_each_entry(obj, objects, exec_list) { | 894 | list_for_each_entry(obj, objects, exec_list) { |
895 | u32 old_read = obj->base.read_domains; | ||
896 | u32 old_write = obj->base.write_domain; | ||
897 | |||
898 | |||
929 | obj->base.read_domains = obj->base.pending_read_domains; | 899 | obj->base.read_domains = obj->base.pending_read_domains; |
930 | obj->base.write_domain = obj->base.pending_write_domain; | 900 | obj->base.write_domain = obj->base.pending_write_domain; |
931 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | 901 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
@@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
939 | intel_mark_busy(ring->dev, obj); | 909 | intel_mark_busy(ring->dev, obj); |
940 | } | 910 | } |
941 | 911 | ||
942 | trace_i915_gem_object_change_domain(obj, | 912 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
943 | obj->base.read_domains, | ||
944 | obj->base.write_domain); | ||
945 | } | 913 | } |
946 | } | 914 | } |
947 | 915 | ||
@@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |||
963 | if (INTEL_INFO(dev)->gen >= 4) | 931 | if (INTEL_INFO(dev)->gen >= 4) |
964 | invalidate |= I915_GEM_DOMAIN_SAMPLER; | 932 | invalidate |= I915_GEM_DOMAIN_SAMPLER; |
965 | if (ring->flush(ring, invalidate, 0)) { | 933 | if (ring->flush(ring, invalidate, 0)) { |
966 | i915_gem_next_request_seqno(dev, ring); | 934 | i915_gem_next_request_seqno(ring); |
967 | return; | 935 | return; |
968 | } | 936 | } |
969 | 937 | ||
970 | /* Add a breadcrumb for the completion of the batch buffer */ | 938 | /* Add a breadcrumb for the completion of the batch buffer */ |
971 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 939 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
972 | if (request == NULL || i915_add_request(dev, file, request, ring)) { | 940 | if (request == NULL || i915_add_request(ring, file, request)) { |
973 | i915_gem_next_request_seqno(dev, ring); | 941 | i915_gem_next_request_seqno(ring); |
974 | kfree(request); | 942 | kfree(request); |
975 | } | 943 | } |
976 | } | 944 | } |
@@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1000 | if (ret) | 968 | if (ret) |
1001 | return ret; | 969 | return ret; |
1002 | 970 | ||
1003 | #if WATCH_EXEC | ||
1004 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1005 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1006 | #endif | ||
1007 | switch (args->flags & I915_EXEC_RING_MASK) { | 971 | switch (args->flags & I915_EXEC_RING_MASK) { |
1008 | case I915_EXEC_DEFAULT: | 972 | case I915_EXEC_DEFAULT: |
1009 | case I915_EXEC_RENDER: | 973 | case I915_EXEC_RENDER: |
@@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1113 | 1077 | ||
1114 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | 1078 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
1115 | exec[i].handle)); | 1079 | exec[i].handle)); |
1116 | if (obj == NULL) { | 1080 | if (&obj->base == NULL) { |
1117 | DRM_ERROR("Invalid object handle %d at index %d\n", | 1081 | DRM_ERROR("Invalid object handle %d at index %d\n", |
1118 | exec[i].handle, i); | 1082 | exec[i].handle, i); |
1119 | /* prevent error path from reading uninitialized data */ | 1083 | /* prevent error path from reading uninitialized data */ |
@@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1170 | if (ret) | 1134 | if (ret) |
1171 | goto err; | 1135 | goto err; |
1172 | 1136 | ||
1173 | ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); | 1137 | seqno = i915_gem_next_request_seqno(ring); |
1174 | if (ret) | ||
1175 | goto err; | ||
1176 | |||
1177 | seqno = i915_gem_next_request_seqno(dev, ring); | ||
1178 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { | 1138 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { |
1179 | if (seqno < ring->sync_seqno[i]) { | 1139 | if (seqno < ring->sync_seqno[i]) { |
1180 | /* The GPU can not handle its semaphore value wrapping, | 1140 | /* The GPU can not handle its semaphore value wrapping, |
@@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1189 | } | 1149 | } |
1190 | } | 1150 | } |
1191 | 1151 | ||
1152 | trace_i915_gem_ring_dispatch(ring, seqno); | ||
1153 | |||
1192 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; | 1154 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
1193 | exec_len = args->batch_len; | 1155 | exec_len = args->batch_len; |
1194 | if (cliprects) { | 1156 | if (cliprects) { |
@@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1245 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 1207 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1246 | int ret, i; | 1208 | int ret, i; |
1247 | 1209 | ||
1248 | #if WATCH_EXEC | ||
1249 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1250 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1251 | #endif | ||
1252 | |||
1253 | if (args->buffer_count < 1) { | 1210 | if (args->buffer_count < 1) { |
1254 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 1211 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
1255 | return -EINVAL; | 1212 | return -EINVAL; |
@@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1330 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 1287 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1331 | int ret; | 1288 | int ret; |
1332 | 1289 | ||
1333 | #if WATCH_EXEC | ||
1334 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1335 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1336 | #endif | ||
1337 | |||
1338 | if (args->buffer_count < 1) { | 1290 | if (args->buffer_count < 1) { |
1339 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | 1291 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); |
1340 | return -EINVAL; | 1292 | return -EINVAL; |
1341 | } | 1293 | } |
1342 | 1294 | ||
1343 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | 1295 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
1296 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | ||
1297 | if (exec2_list == NULL) | ||
1298 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), | ||
1299 | args->buffer_count); | ||
1344 | if (exec2_list == NULL) { | 1300 | if (exec2_list == NULL) { |
1345 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 1301 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", |
1346 | args->buffer_count); | 1302 | args->buffer_count); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c5..281ad3d6115d 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
284 | struct drm_i915_gem_set_tiling *args = data; | 284 | struct drm_i915_gem_set_tiling *args = data; |
285 | drm_i915_private_t *dev_priv = dev->dev_private; | 285 | drm_i915_private_t *dev_priv = dev->dev_private; |
286 | struct drm_i915_gem_object *obj; | 286 | struct drm_i915_gem_object *obj; |
287 | int ret; | 287 | int ret = 0; |
288 | |||
289 | ret = i915_gem_check_is_wedged(dev); | ||
290 | if (ret) | ||
291 | return ret; | ||
292 | 288 | ||
293 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 289 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
294 | if (obj == NULL) | 290 | if (&obj->base == NULL) |
295 | return -ENOENT; | 291 | return -ENOENT; |
296 | 292 | ||
297 | if (!i915_tiling_ok(dev, | 293 | if (!i915_tiling_ok(dev, |
@@ -349,14 +345,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | 345 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | 346 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
351 | 347 | ||
352 | obj->tiling_changed = true; | 348 | /* Rebind if we need a change of alignment */ |
353 | obj->tiling_mode = args->tiling_mode; | 349 | if (!obj->map_and_fenceable) { |
354 | obj->stride = args->stride; | 350 | u32 unfenced_alignment = |
351 | i915_gem_get_unfenced_gtt_alignment(obj); | ||
352 | if (obj->gtt_offset & (unfenced_alignment - 1)) | ||
353 | ret = i915_gem_object_unbind(obj); | ||
354 | } | ||
355 | |||
356 | if (ret == 0) { | ||
357 | obj->tiling_changed = true; | ||
358 | obj->tiling_mode = args->tiling_mode; | ||
359 | obj->stride = args->stride; | ||
360 | } | ||
355 | } | 361 | } |
362 | /* we have to maintain this existing ABI... */ | ||
363 | args->stride = obj->stride; | ||
364 | args->tiling_mode = obj->tiling_mode; | ||
356 | drm_gem_object_unreference(&obj->base); | 365 | drm_gem_object_unreference(&obj->base); |
357 | mutex_unlock(&dev->struct_mutex); | 366 | mutex_unlock(&dev->struct_mutex); |
358 | 367 | ||
359 | return 0; | 368 | return ret; |
360 | } | 369 | } |
361 | 370 | ||
362 | /** | 371 | /** |
@@ -371,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
371 | struct drm_i915_gem_object *obj; | 380 | struct drm_i915_gem_object *obj; |
372 | 381 | ||
373 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 382 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
374 | if (obj == NULL) | 383 | if (&obj->base == NULL) |
375 | return -ENOENT; | 384 | return -ENOENT; |
376 | 385 | ||
377 | mutex_lock(&dev->struct_mutex); | 386 | mutex_lock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dcc1aa..188b497e5076 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |||
85 | } | 85 | } |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline u32 | ||
89 | i915_pipestat(int pipe) | ||
90 | { | ||
91 | if (pipe == 0) | ||
92 | return PIPEASTAT; | ||
93 | if (pipe == 1) | ||
94 | return PIPEBSTAT; | ||
95 | BUG(); | ||
96 | } | ||
97 | |||
98 | void | 88 | void |
99 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 89 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
100 | { | 90 | { |
101 | if ((dev_priv->pipestat[pipe] & mask) != mask) { | 91 | if ((dev_priv->pipestat[pipe] & mask) != mask) { |
102 | u32 reg = i915_pipestat(pipe); | 92 | u32 reg = PIPESTAT(pipe); |
103 | 93 | ||
104 | dev_priv->pipestat[pipe] |= mask; | 94 | dev_priv->pipestat[pipe] |= mask; |
105 | /* Enable the interrupt, clear any pending status */ | 95 | /* Enable the interrupt, clear any pending status */ |
@@ -112,7 +102,7 @@ void | |||
112 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 102 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
113 | { | 103 | { |
114 | if ((dev_priv->pipestat[pipe] & mask) != 0) { | 104 | if ((dev_priv->pipestat[pipe] & mask) != 0) { |
115 | u32 reg = i915_pipestat(pipe); | 105 | u32 reg = PIPESTAT(pipe); |
116 | 106 | ||
117 | dev_priv->pipestat[pipe] &= ~mask; | 107 | dev_priv->pipestat[pipe] &= ~mask; |
118 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | 108 | I915_WRITE(reg, dev_priv->pipestat[pipe]); |
@@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
171 | 161 | ||
172 | if (!i915_pipe_enabled(dev, pipe)) { | 162 | if (!i915_pipe_enabled(dev, pipe)) { |
173 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 163 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
174 | "pipe %d\n", pipe); | 164 | "pipe %c\n", pipe_name(pipe)); |
175 | return 0; | 165 | return 0; |
176 | } | 166 | } |
177 | 167 | ||
178 | high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; | 168 | high_frame = PIPEFRAME(pipe); |
179 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | 169 | low_frame = PIPEFRAMEPIXEL(pipe); |
180 | 170 | ||
181 | /* | 171 | /* |
182 | * High & low register fields aren't synchronized, so make sure | 172 | * High & low register fields aren't synchronized, so make sure |
@@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
197 | u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 187 | u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
198 | { | 188 | { |
199 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 189 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
200 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | 190 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
201 | 191 | ||
202 | if (!i915_pipe_enabled(dev, pipe)) { | 192 | if (!i915_pipe_enabled(dev, pipe)) { |
203 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 193 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
204 | "pipe %d\n", pipe); | 194 | "pipe %c\n", pipe_name(pipe)); |
205 | return 0; | 195 | return 0; |
206 | } | 196 | } |
207 | 197 | ||
@@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
219 | 209 | ||
220 | if (!i915_pipe_enabled(dev, pipe)) { | 210 | if (!i915_pipe_enabled(dev, pipe)) { |
221 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | 211 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
222 | "pipe %d\n", pipe); | 212 | "pipe %c\n", pipe_name(pipe)); |
223 | return 0; | 213 | return 0; |
224 | } | 214 | } |
225 | 215 | ||
@@ -316,6 +306,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
316 | struct drm_mode_config *mode_config = &dev->mode_config; | 306 | struct drm_mode_config *mode_config = &dev->mode_config; |
317 | struct intel_encoder *encoder; | 307 | struct intel_encoder *encoder; |
318 | 308 | ||
309 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
310 | |||
319 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 311 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
320 | if (encoder->hot_plug) | 312 | if (encoder->hot_plug) |
321 | encoder->hot_plug(encoder); | 313 | encoder->hot_plug(encoder); |
@@ -365,7 +357,7 @@ static void notify_ring(struct drm_device *dev, | |||
365 | return; | 357 | return; |
366 | 358 | ||
367 | seqno = ring->get_seqno(ring); | 359 | seqno = ring->get_seqno(ring); |
368 | trace_i915_gem_request_complete(dev, seqno); | 360 | trace_i915_gem_request_complete(ring, seqno); |
369 | 361 | ||
370 | ring->irq_seqno = seqno; | 362 | ring->irq_seqno = seqno; |
371 | wake_up_all(&ring->irq_queue); | 363 | wake_up_all(&ring->irq_queue); |
@@ -417,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev) | |||
417 | { | 409 | { |
418 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 410 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
419 | u32 pch_iir; | 411 | u32 pch_iir; |
412 | int pipe; | ||
420 | 413 | ||
421 | pch_iir = I915_READ(SDEIIR); | 414 | pch_iir = I915_READ(SDEIIR); |
422 | 415 | ||
@@ -437,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev) | |||
437 | if (pch_iir & SDE_POISON) | 430 | if (pch_iir & SDE_POISON) |
438 | DRM_ERROR("PCH poison interrupt\n"); | 431 | DRM_ERROR("PCH poison interrupt\n"); |
439 | 432 | ||
440 | if (pch_iir & SDE_FDI_MASK) { | 433 | if (pch_iir & SDE_FDI_MASK) |
441 | u32 fdia, fdib; | 434 | for_each_pipe(pipe) |
442 | 435 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
443 | fdia = I915_READ(FDI_RXA_IIR); | 436 | pipe_name(pipe), |
444 | fdib = I915_READ(FDI_RXB_IIR); | 437 | I915_READ(FDI_RX_IIR(pipe))); |
445 | DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); | ||
446 | } | ||
447 | 438 | ||
448 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | 439 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) |
449 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | 440 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); |
@@ -648,9 +639,14 @@ static void | |||
648 | i915_error_state_free(struct drm_device *dev, | 639 | i915_error_state_free(struct drm_device *dev, |
649 | struct drm_i915_error_state *error) | 640 | struct drm_i915_error_state *error) |
650 | { | 641 | { |
651 | i915_error_object_free(error->batchbuffer[0]); | 642 | int i; |
652 | i915_error_object_free(error->batchbuffer[1]); | 643 | |
653 | i915_error_object_free(error->ringbuffer); | 644 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) |
645 | i915_error_object_free(error->batchbuffer[i]); | ||
646 | |||
647 | for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) | ||
648 | i915_error_object_free(error->ringbuffer[i]); | ||
649 | |||
654 | kfree(error->active_bo); | 650 | kfree(error->active_bo); |
655 | kfree(error->overlay); | 651 | kfree(error->overlay); |
656 | kfree(error); | 652 | kfree(error); |
@@ -765,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
765 | struct drm_i915_gem_object *obj; | 761 | struct drm_i915_gem_object *obj; |
766 | struct drm_i915_error_state *error; | 762 | struct drm_i915_error_state *error; |
767 | unsigned long flags; | 763 | unsigned long flags; |
768 | int i; | 764 | int i, pipe; |
769 | 765 | ||
770 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 766 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
771 | error = dev_priv->first_error; | 767 | error = dev_priv->first_error; |
@@ -773,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
773 | if (error) | 769 | if (error) |
774 | return; | 770 | return; |
775 | 771 | ||
772 | /* Account for pipe specific data like PIPE*STAT */ | ||
776 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 773 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
777 | if (!error) { | 774 | if (!error) { |
778 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 775 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
779 | return; | 776 | return; |
780 | } | 777 | } |
781 | 778 | ||
782 | DRM_DEBUG_DRIVER("generating error event\n"); | 779 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
780 | dev->primary->index); | ||
783 | 781 | ||
784 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); | 782 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); |
785 | error->eir = I915_READ(EIR); | 783 | error->eir = I915_READ(EIR); |
786 | error->pgtbl_er = I915_READ(PGTBL_ER); | 784 | error->pgtbl_er = I915_READ(PGTBL_ER); |
787 | error->pipeastat = I915_READ(PIPEASTAT); | 785 | for_each_pipe(pipe) |
788 | error->pipebstat = I915_READ(PIPEBSTAT); | 786 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
789 | error->instpm = I915_READ(INSTPM); | 787 | error->instpm = I915_READ(INSTPM); |
790 | error->error = 0; | 788 | error->error = 0; |
791 | if (INTEL_INFO(dev)->gen >= 6) { | 789 | if (INTEL_INFO(dev)->gen >= 6) { |
@@ -824,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
824 | } | 822 | } |
825 | i915_gem_record_fences(dev, error); | 823 | i915_gem_record_fences(dev, error); |
826 | 824 | ||
827 | /* Record the active batchbuffers */ | 825 | /* Record the active batch and ring buffers */ |
828 | for (i = 0; i < I915_NUM_RINGS; i++) | 826 | for (i = 0; i < I915_NUM_RINGS; i++) { |
829 | error->batchbuffer[i] = | 827 | error->batchbuffer[i] = |
830 | i915_error_first_batchbuffer(dev_priv, | 828 | i915_error_first_batchbuffer(dev_priv, |
831 | &dev_priv->ring[i]); | 829 | &dev_priv->ring[i]); |
832 | 830 | ||
833 | /* Record the ringbuffer */ | 831 | error->ringbuffer[i] = |
834 | error->ringbuffer = i915_error_object_create(dev_priv, | 832 | i915_error_object_create(dev_priv, |
835 | dev_priv->ring[RCS].obj); | 833 | dev_priv->ring[i].obj); |
834 | } | ||
836 | 835 | ||
837 | /* Record buffers on the active and pinned lists. */ | 836 | /* Record buffers on the active and pinned lists. */ |
838 | error->active_bo = NULL; | 837 | error->active_bo = NULL; |
@@ -905,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
905 | { | 904 | { |
906 | struct drm_i915_private *dev_priv = dev->dev_private; | 905 | struct drm_i915_private *dev_priv = dev->dev_private; |
907 | u32 eir = I915_READ(EIR); | 906 | u32 eir = I915_READ(EIR); |
907 | int pipe; | ||
908 | 908 | ||
909 | if (!eir) | 909 | if (!eir) |
910 | return; | 910 | return; |
@@ -953,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
953 | } | 953 | } |
954 | 954 | ||
955 | if (eir & I915_ERROR_MEMORY_REFRESH) { | 955 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
956 | u32 pipea_stats = I915_READ(PIPEASTAT); | 956 | printk(KERN_ERR "memory refresh error:\n"); |
957 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | 957 | for_each_pipe(pipe) |
958 | 958 | printk(KERN_ERR "pipe %c stat: 0x%08x\n", | |
959 | printk(KERN_ERR "memory refresh error\n"); | 959 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
960 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | ||
961 | pipea_stats); | ||
962 | printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", | ||
963 | pipeb_stats); | ||
964 | /* pipestat has already been acked */ | 960 | /* pipestat has already been acked */ |
965 | } | 961 | } |
966 | if (eir & I915_ERROR_INSTRUCTION) { | 962 | if (eir & I915_ERROR_INSTRUCTION) { |
@@ -1074,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
1074 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 1070 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
1075 | obj = work->pending_flip_obj; | 1071 | obj = work->pending_flip_obj; |
1076 | if (INTEL_INFO(dev)->gen >= 4) { | 1072 | if (INTEL_INFO(dev)->gen >= 4) { |
1077 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | 1073 | int dspsurf = DSPSURF(intel_crtc->plane); |
1078 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; | 1074 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; |
1079 | } else { | 1075 | } else { |
1080 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | 1076 | int dspaddr = DSPADDR(intel_crtc->plane); |
1081 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + | 1077 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
1082 | crtc->y * crtc->fb->pitch + | 1078 | crtc->y * crtc->fb->pitch + |
1083 | crtc->x * crtc->fb->bits_per_pixel/8); | 1079 | crtc->x * crtc->fb->bits_per_pixel/8); |
@@ -1097,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1097 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1093 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1098 | struct drm_i915_master_private *master_priv; | 1094 | struct drm_i915_master_private *master_priv; |
1099 | u32 iir, new_iir; | 1095 | u32 iir, new_iir; |
1100 | u32 pipea_stats, pipeb_stats; | 1096 | u32 pipe_stats[I915_MAX_PIPES]; |
1101 | u32 vblank_status; | 1097 | u32 vblank_status; |
1102 | int vblank = 0; | 1098 | int vblank = 0; |
1103 | unsigned long irqflags; | 1099 | unsigned long irqflags; |
1104 | int irq_received; | 1100 | int irq_received; |
1105 | int ret = IRQ_NONE; | 1101 | int ret = IRQ_NONE, pipe; |
1102 | bool blc_event = false; | ||
1106 | 1103 | ||
1107 | atomic_inc(&dev_priv->irq_received); | 1104 | atomic_inc(&dev_priv->irq_received); |
1108 | 1105 | ||
@@ -1125,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1125 | * interrupts (for non-MSI). | 1122 | * interrupts (for non-MSI). |
1126 | */ | 1123 | */ |
1127 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1124 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1128 | pipea_stats = I915_READ(PIPEASTAT); | ||
1129 | pipeb_stats = I915_READ(PIPEBSTAT); | ||
1130 | |||
1131 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 1125 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
1132 | i915_handle_error(dev, false); | 1126 | i915_handle_error(dev, false); |
1133 | 1127 | ||
1134 | /* | 1128 | for_each_pipe(pipe) { |
1135 | * Clear the PIPE(A|B)STAT regs before the IIR | 1129 | int reg = PIPESTAT(pipe); |
1136 | */ | 1130 | pipe_stats[pipe] = I915_READ(reg); |
1137 | if (pipea_stats & 0x8000ffff) { | 1131 | |
1138 | if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) | 1132 | /* |
1139 | DRM_DEBUG_DRIVER("pipe a underrun\n"); | 1133 | * Clear the PIPE*STAT regs before the IIR |
1140 | I915_WRITE(PIPEASTAT, pipea_stats); | 1134 | */ |
1141 | irq_received = 1; | 1135 | if (pipe_stats[pipe] & 0x8000ffff) { |
1142 | } | 1136 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
1143 | 1137 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
1144 | if (pipeb_stats & 0x8000ffff) { | 1138 | pipe_name(pipe)); |
1145 | if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) | 1139 | I915_WRITE(reg, pipe_stats[pipe]); |
1146 | DRM_DEBUG_DRIVER("pipe b underrun\n"); | 1140 | irq_received = 1; |
1147 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 1141 | } |
1148 | irq_received = 1; | ||
1149 | } | 1142 | } |
1150 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1143 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1151 | 1144 | ||
@@ -1196,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1196 | intel_finish_page_flip_plane(dev, 1); | 1189 | intel_finish_page_flip_plane(dev, 1); |
1197 | } | 1190 | } |
1198 | 1191 | ||
1199 | if (pipea_stats & vblank_status && | 1192 | for_each_pipe(pipe) { |
1200 | drm_handle_vblank(dev, 0)) { | 1193 | if (pipe_stats[pipe] & vblank_status && |
1201 | vblank++; | 1194 | drm_handle_vblank(dev, pipe)) { |
1202 | if (!dev_priv->flip_pending_is_done) { | 1195 | vblank++; |
1203 | i915_pageflip_stall_check(dev, 0); | 1196 | if (!dev_priv->flip_pending_is_done) { |
1204 | intel_finish_page_flip(dev, 0); | 1197 | i915_pageflip_stall_check(dev, pipe); |
1198 | intel_finish_page_flip(dev, pipe); | ||
1199 | } | ||
1205 | } | 1200 | } |
1206 | } | ||
1207 | 1201 | ||
1208 | if (pipeb_stats & vblank_status && | 1202 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
1209 | drm_handle_vblank(dev, 1)) { | 1203 | blc_event = true; |
1210 | vblank++; | ||
1211 | if (!dev_priv->flip_pending_is_done) { | ||
1212 | i915_pageflip_stall_check(dev, 1); | ||
1213 | intel_finish_page_flip(dev, 1); | ||
1214 | } | ||
1215 | } | 1204 | } |
1216 | 1205 | ||
1217 | if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1206 | |
1218 | (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || | 1207 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
1219 | (iir & I915_ASLE_INTERRUPT)) | ||
1220 | intel_opregion_asle_intr(dev); | 1208 | intel_opregion_asle_intr(dev); |
1221 | 1209 | ||
1222 | /* With MSI, interrupts are only generated when iir | 1210 | /* With MSI, interrupts are only generated when iir |
@@ -1266,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1266 | return dev_priv->counter; | 1254 | return dev_priv->counter; |
1267 | } | 1255 | } |
1268 | 1256 | ||
1269 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | ||
1270 | { | ||
1271 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1272 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
1273 | |||
1274 | if (dev_priv->trace_irq_seqno == 0 && | ||
1275 | ring->irq_get(ring)) | ||
1276 | dev_priv->trace_irq_seqno = seqno; | ||
1277 | } | ||
1278 | |||
1279 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 1257 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
1280 | { | 1258 | { |
1281 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1259 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -1375,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
1375 | else | 1353 | else |
1376 | i915_enable_pipestat(dev_priv, pipe, | 1354 | i915_enable_pipestat(dev_priv, pipe, |
1377 | PIPE_VBLANK_INTERRUPT_ENABLE); | 1355 | PIPE_VBLANK_INTERRUPT_ENABLE); |
1356 | |||
1357 | /* maintain vblank delivery even in deep C-states */ | ||
1358 | if (dev_priv->info->gen == 3) | ||
1359 | I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); | ||
1378 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1360 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1361 | |||
1379 | return 0; | 1362 | return 0; |
1380 | } | 1363 | } |
1381 | 1364 | ||
@@ -1388,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
1388 | unsigned long irqflags; | 1371 | unsigned long irqflags; |
1389 | 1372 | ||
1390 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1373 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1374 | if (dev_priv->info->gen == 3) | ||
1375 | I915_WRITE(INSTPM, | ||
1376 | INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); | ||
1377 | |||
1391 | if (HAS_PCH_SPLIT(dev)) | 1378 | if (HAS_PCH_SPLIT(dev)) |
1392 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1379 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
1393 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1380 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
@@ -1398,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
1398 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1385 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1399 | } | 1386 | } |
1400 | 1387 | ||
1401 | void i915_enable_interrupt (struct drm_device *dev) | ||
1402 | { | ||
1403 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1404 | |||
1405 | if (!HAS_PCH_SPLIT(dev)) | ||
1406 | intel_opregion_enable_asle(dev); | ||
1407 | dev_priv->irq_enabled = 1; | ||
1408 | } | ||
1409 | |||
1410 | |||
1411 | /* Set the vblank monitor pipe | 1388 | /* Set the vblank monitor pipe |
1412 | */ | 1389 | */ |
1413 | int i915_vblank_pipe_set(struct drm_device *dev, void *data, | 1390 | int i915_vblank_pipe_set(struct drm_device *dev, void *data, |
@@ -1644,14 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1644 | POSTING_READ(GTIER); | 1621 | POSTING_READ(GTIER); |
1645 | 1622 | ||
1646 | if (HAS_PCH_CPT(dev)) { | 1623 | if (HAS_PCH_CPT(dev)) { |
1647 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | | 1624 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | |
1648 | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; | 1625 | SDE_PORTB_HOTPLUG_CPT | |
1626 | SDE_PORTC_HOTPLUG_CPT | | ||
1627 | SDE_PORTD_HOTPLUG_CPT); | ||
1649 | } else { | 1628 | } else { |
1650 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1629 | hotplug_mask = (SDE_CRT_HOTPLUG | |
1651 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1630 | SDE_PORTB_HOTPLUG | |
1652 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | 1631 | SDE_PORTC_HOTPLUG | |
1653 | I915_WRITE(FDI_RXA_IMR, 0); | 1632 | SDE_PORTD_HOTPLUG | |
1654 | I915_WRITE(FDI_RXB_IMR, 0); | 1633 | SDE_AUX_MASK); |
1655 | } | 1634 | } |
1656 | 1635 | ||
1657 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1636 | dev_priv->pch_irq_mask = ~hotplug_mask; |
@@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1674 | void i915_driver_irq_preinstall(struct drm_device * dev) | 1653 | void i915_driver_irq_preinstall(struct drm_device * dev) |
1675 | { | 1654 | { |
1676 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1655 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1656 | int pipe; | ||
1677 | 1657 | ||
1678 | atomic_set(&dev_priv->irq_received, 0); | 1658 | atomic_set(&dev_priv->irq_received, 0); |
1679 | 1659 | ||
@@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1691 | } | 1671 | } |
1692 | 1672 | ||
1693 | I915_WRITE(HWSTAM, 0xeffe); | 1673 | I915_WRITE(HWSTAM, 0xeffe); |
1694 | I915_WRITE(PIPEASTAT, 0); | 1674 | for_each_pipe(pipe) |
1695 | I915_WRITE(PIPEBSTAT, 0); | 1675 | I915_WRITE(PIPESTAT(pipe), 0); |
1696 | I915_WRITE(IMR, 0xffffffff); | 1676 | I915_WRITE(IMR, 0xffffffff); |
1697 | I915_WRITE(IER, 0x0); | 1677 | I915_WRITE(IER, 0x0); |
1698 | POSTING_READ(IER); | 1678 | POSTING_READ(IER); |
@@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
1804 | void i915_driver_irq_uninstall(struct drm_device * dev) | 1784 | void i915_driver_irq_uninstall(struct drm_device * dev) |
1805 | { | 1785 | { |
1806 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1786 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1787 | int pipe; | ||
1807 | 1788 | ||
1808 | if (!dev_priv) | 1789 | if (!dev_priv) |
1809 | return; | 1790 | return; |
@@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev) | |||
1821 | } | 1802 | } |
1822 | 1803 | ||
1823 | I915_WRITE(HWSTAM, 0xffffffff); | 1804 | I915_WRITE(HWSTAM, 0xffffffff); |
1824 | I915_WRITE(PIPEASTAT, 0); | 1805 | for_each_pipe(pipe) |
1825 | I915_WRITE(PIPEBSTAT, 0); | 1806 | I915_WRITE(PIPESTAT(pipe), 0); |
1826 | I915_WRITE(IMR, 0xffffffff); | 1807 | I915_WRITE(IMR, 0xffffffff); |
1827 | I915_WRITE(IER, 0x0); | 1808 | I915_WRITE(IER, 0x0); |
1828 | 1809 | ||
1829 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | 1810 | for_each_pipe(pipe) |
1830 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | 1811 | I915_WRITE(PIPESTAT(pipe), |
1812 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | ||
1831 | I915_WRITE(IIR, I915_READ(IIR)); | 1813 | I915_WRITE(IIR, I915_READ(IIR)); |
1832 | } | 1814 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5cfc68940f17..363f66ca5d33 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -174,7 +174,9 @@ | |||
174 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! | 174 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! |
175 | */ | 175 | */ |
176 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | 176 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) |
177 | #define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ | 177 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
178 | #define MI_INVALIDATE_TLB (1<<18) | ||
179 | #define MI_INVALIDATE_BSD (1<<7) | ||
178 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 180 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
179 | #define MI_BATCH_NON_SECURE (1) | 181 | #define MI_BATCH_NON_SECURE (1) |
180 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 182 | #define MI_BATCH_NON_SECURE_I965 (1<<8) |
@@ -403,9 +405,12 @@ | |||
403 | #define I915_ERROR_INSTRUCTION (1<<0) | 405 | #define I915_ERROR_INSTRUCTION (1<<0) |
404 | #define INSTPM 0x020c0 | 406 | #define INSTPM 0x020c0 |
405 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ | 407 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ |
408 | #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts | ||
409 | will not assert AGPBUSY# and will only | ||
410 | be delivered when out of C3. */ | ||
406 | #define ACTHD 0x020c8 | 411 | #define ACTHD 0x020c8 |
407 | #define FW_BLC 0x020d8 | 412 | #define FW_BLC 0x020d8 |
408 | #define FW_BLC2 0x020dc | 413 | #define FW_BLC2 0x020dc |
409 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ | 414 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ |
410 | #define FW_BLC_SELF_EN_MASK (1<<31) | 415 | #define FW_BLC_SELF_EN_MASK (1<<31) |
411 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ | 416 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ |
@@ -704,9 +709,9 @@ | |||
704 | #define VGA1_PD_P1_DIV_2 (1 << 13) | 709 | #define VGA1_PD_P1_DIV_2 (1 << 13) |
705 | #define VGA1_PD_P1_SHIFT 8 | 710 | #define VGA1_PD_P1_SHIFT 8 |
706 | #define VGA1_PD_P1_MASK (0x1f << 8) | 711 | #define VGA1_PD_P1_MASK (0x1f << 8) |
707 | #define DPLL_A 0x06014 | 712 | #define _DPLL_A 0x06014 |
708 | #define DPLL_B 0x06018 | 713 | #define _DPLL_B 0x06018 |
709 | #define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B) | 714 | #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) |
710 | #define DPLL_VCO_ENABLE (1 << 31) | 715 | #define DPLL_VCO_ENABLE (1 << 31) |
711 | #define DPLL_DVO_HIGH_SPEED (1 << 30) | 716 | #define DPLL_DVO_HIGH_SPEED (1 << 30) |
712 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) | 717 | #define DPLL_SYNCLOCK_ENABLE (1 << 29) |
@@ -777,7 +782,7 @@ | |||
777 | #define SDVO_MULTIPLIER_MASK 0x000000ff | 782 | #define SDVO_MULTIPLIER_MASK 0x000000ff |
778 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 | 783 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 |
779 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 | 784 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 |
780 | #define DPLL_A_MD 0x0601c /* 965+ only */ | 785 | #define _DPLL_A_MD 0x0601c /* 965+ only */ |
781 | /* | 786 | /* |
782 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. | 787 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. |
783 | * | 788 | * |
@@ -814,14 +819,14 @@ | |||
814 | */ | 819 | */ |
815 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f | 820 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f |
816 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 | 821 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 |
817 | #define DPLL_B_MD 0x06020 /* 965+ only */ | 822 | #define _DPLL_B_MD 0x06020 /* 965+ only */ |
818 | #define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD) | 823 | #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) |
819 | #define FPA0 0x06040 | 824 | #define _FPA0 0x06040 |
820 | #define FPA1 0x06044 | 825 | #define _FPA1 0x06044 |
821 | #define FPB0 0x06048 | 826 | #define _FPB0 0x06048 |
822 | #define FPB1 0x0604c | 827 | #define _FPB1 0x0604c |
823 | #define FP0(pipe) _PIPE(pipe, FPA0, FPB0) | 828 | #define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) |
824 | #define FP1(pipe) _PIPE(pipe, FPA1, FPB1) | 829 | #define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) |
825 | #define FP_N_DIV_MASK 0x003f0000 | 830 | #define FP_N_DIV_MASK 0x003f0000 |
826 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 | 831 | #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 |
827 | #define FP_N_DIV_SHIFT 16 | 832 | #define FP_N_DIV_SHIFT 16 |
@@ -960,8 +965,9 @@ | |||
960 | * Palette regs | 965 | * Palette regs |
961 | */ | 966 | */ |
962 | 967 | ||
963 | #define PALETTE_A 0x0a000 | 968 | #define _PALETTE_A 0x0a000 |
964 | #define PALETTE_B 0x0a800 | 969 | #define _PALETTE_B 0x0a800 |
970 | #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) | ||
965 | 971 | ||
966 | /* MCH MMIO space */ | 972 | /* MCH MMIO space */ |
967 | 973 | ||
@@ -1265,32 +1271,32 @@ | |||
1265 | */ | 1271 | */ |
1266 | 1272 | ||
1267 | /* Pipe A timing regs */ | 1273 | /* Pipe A timing regs */ |
1268 | #define HTOTAL_A 0x60000 | 1274 | #define _HTOTAL_A 0x60000 |
1269 | #define HBLANK_A 0x60004 | 1275 | #define _HBLANK_A 0x60004 |
1270 | #define HSYNC_A 0x60008 | 1276 | #define _HSYNC_A 0x60008 |
1271 | #define VTOTAL_A 0x6000c | 1277 | #define _VTOTAL_A 0x6000c |
1272 | #define VBLANK_A 0x60010 | 1278 | #define _VBLANK_A 0x60010 |
1273 | #define VSYNC_A 0x60014 | 1279 | #define _VSYNC_A 0x60014 |
1274 | #define PIPEASRC 0x6001c | 1280 | #define _PIPEASRC 0x6001c |
1275 | #define BCLRPAT_A 0x60020 | 1281 | #define _BCLRPAT_A 0x60020 |
1276 | 1282 | ||
1277 | /* Pipe B timing regs */ | 1283 | /* Pipe B timing regs */ |
1278 | #define HTOTAL_B 0x61000 | 1284 | #define _HTOTAL_B 0x61000 |
1279 | #define HBLANK_B 0x61004 | 1285 | #define _HBLANK_B 0x61004 |
1280 | #define HSYNC_B 0x61008 | 1286 | #define _HSYNC_B 0x61008 |
1281 | #define VTOTAL_B 0x6100c | 1287 | #define _VTOTAL_B 0x6100c |
1282 | #define VBLANK_B 0x61010 | 1288 | #define _VBLANK_B 0x61010 |
1283 | #define VSYNC_B 0x61014 | 1289 | #define _VSYNC_B 0x61014 |
1284 | #define PIPEBSRC 0x6101c | 1290 | #define _PIPEBSRC 0x6101c |
1285 | #define BCLRPAT_B 0x61020 | 1291 | #define _BCLRPAT_B 0x61020 |
1286 | 1292 | ||
1287 | #define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B) | 1293 | #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) |
1288 | #define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B) | 1294 | #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) |
1289 | #define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B) | 1295 | #define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) |
1290 | #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) | 1296 | #define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) |
1291 | #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) | 1297 | #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) |
1292 | #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) | 1298 | #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) |
1293 | #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) | 1299 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) |
1294 | 1300 | ||
1295 | /* VGA port control */ | 1301 | /* VGA port control */ |
1296 | #define ADPA 0x61100 | 1302 | #define ADPA 0x61100 |
@@ -1384,6 +1390,7 @@ | |||
1384 | #define SDVO_ENCODING_HDMI (0x2 << 10) | 1390 | #define SDVO_ENCODING_HDMI (0x2 << 10) |
1385 | /** Requird for HDMI operation */ | 1391 | /** Requird for HDMI operation */ |
1386 | #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) | 1392 | #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) |
1393 | #define SDVO_COLOR_RANGE_16_235 (1 << 8) | ||
1387 | #define SDVO_BORDER_ENABLE (1 << 7) | 1394 | #define SDVO_BORDER_ENABLE (1 << 7) |
1388 | #define SDVO_AUDIO_ENABLE (1 << 6) | 1395 | #define SDVO_AUDIO_ENABLE (1 << 6) |
1389 | /** New with 965, default is to be set */ | 1396 | /** New with 965, default is to be set */ |
@@ -1439,8 +1446,13 @@ | |||
1439 | #define LVDS_PORT_EN (1 << 31) | 1446 | #define LVDS_PORT_EN (1 << 31) |
1440 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 1447 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
1441 | #define LVDS_PIPEB_SELECT (1 << 30) | 1448 | #define LVDS_PIPEB_SELECT (1 << 30) |
1449 | #define LVDS_PIPE_MASK (1 << 30) | ||
1442 | /* LVDS dithering flag on 965/g4x platform */ | 1450 | /* LVDS dithering flag on 965/g4x platform */ |
1443 | #define LVDS_ENABLE_DITHER (1 << 25) | 1451 | #define LVDS_ENABLE_DITHER (1 << 25) |
1452 | /* LVDS sync polarity flags. Set to invert (i.e. negative) */ | ||
1453 | #define LVDS_VSYNC_POLARITY (1 << 21) | ||
1454 | #define LVDS_HSYNC_POLARITY (1 << 20) | ||
1455 | |||
1444 | /* Enable border for unscaled (or aspect-scaled) display */ | 1456 | /* Enable border for unscaled (or aspect-scaled) display */ |
1445 | #define LVDS_BORDER_ENABLE (1 << 15) | 1457 | #define LVDS_BORDER_ENABLE (1 << 15) |
1446 | /* | 1458 | /* |
@@ -1474,6 +1486,9 @@ | |||
1474 | #define LVDS_B0B3_POWER_DOWN (0 << 2) | 1486 | #define LVDS_B0B3_POWER_DOWN (0 << 2) |
1475 | #define LVDS_B0B3_POWER_UP (3 << 2) | 1487 | #define LVDS_B0B3_POWER_UP (3 << 2) |
1476 | 1488 | ||
1489 | #define LVDS_PIPE_ENABLED(V, P) \ | ||
1490 | (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) | ||
1491 | |||
1477 | /* Video Data Island Packet control */ | 1492 | /* Video Data Island Packet control */ |
1478 | #define VIDEO_DIP_DATA 0x61178 | 1493 | #define VIDEO_DIP_DATA 0x61178 |
1479 | #define VIDEO_DIP_CTL 0x61170 | 1494 | #define VIDEO_DIP_CTL 0x61170 |
@@ -1551,17 +1566,7 @@ | |||
1551 | 1566 | ||
1552 | /* Backlight control */ | 1567 | /* Backlight control */ |
1553 | #define BLC_PWM_CTL 0x61254 | 1568 | #define BLC_PWM_CTL 0x61254 |
1554 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) | ||
1555 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ | 1569 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
1556 | #define BLM_COMBINATION_MODE (1 << 30) | ||
1557 | /* | ||
1558 | * This is the most significant 15 bits of the number of backlight cycles in a | ||
1559 | * complete cycle of the modulated backlight control. | ||
1560 | * | ||
1561 | * The actual value is this field multiplied by two. | ||
1562 | */ | ||
1563 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) | ||
1564 | #define BLM_LEGACY_MODE (1 << 16) | ||
1565 | /* | 1570 | /* |
1566 | * This is the number of cycles out of the backlight modulation cycle for which | 1571 | * This is the number of cycles out of the backlight modulation cycle for which |
1567 | * the backlight is on. | 1572 | * the backlight is on. |
@@ -2062,6 +2067,10 @@ | |||
2062 | 2067 | ||
2063 | #define DP_PORT_EN (1 << 31) | 2068 | #define DP_PORT_EN (1 << 31) |
2064 | #define DP_PIPEB_SELECT (1 << 30) | 2069 | #define DP_PIPEB_SELECT (1 << 30) |
2070 | #define DP_PIPE_MASK (1 << 30) | ||
2071 | |||
2072 | #define DP_PIPE_ENABLED(V, P) \ | ||
2073 | (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN)) | ||
2065 | 2074 | ||
2066 | /* Link training mode - select a suitable mode for each stage */ | 2075 | /* Link training mode - select a suitable mode for each stage */ |
2067 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) | 2076 | #define DP_LINK_TRAIN_PAT_1 (0 << 28) |
@@ -2204,8 +2213,8 @@ | |||
2204 | * which is after the LUTs, so we want the bytes for our color format. | 2213 | * which is after the LUTs, so we want the bytes for our color format. |
2205 | * For our current usage, this is always 3, one byte for R, G and B. | 2214 | * For our current usage, this is always 3, one byte for R, G and B. |
2206 | */ | 2215 | */ |
2207 | #define PIPEA_GMCH_DATA_M 0x70050 | 2216 | #define _PIPEA_GMCH_DATA_M 0x70050 |
2208 | #define PIPEB_GMCH_DATA_M 0x71050 | 2217 | #define _PIPEB_GMCH_DATA_M 0x71050 |
2209 | 2218 | ||
2210 | /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ | 2219 | /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ |
2211 | #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) | 2220 | #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) |
@@ -2213,8 +2222,8 @@ | |||
2213 | 2222 | ||
2214 | #define PIPE_GMCH_DATA_M_MASK (0xffffff) | 2223 | #define PIPE_GMCH_DATA_M_MASK (0xffffff) |
2215 | 2224 | ||
2216 | #define PIPEA_GMCH_DATA_N 0x70054 | 2225 | #define _PIPEA_GMCH_DATA_N 0x70054 |
2217 | #define PIPEB_GMCH_DATA_N 0x71054 | 2226 | #define _PIPEB_GMCH_DATA_N 0x71054 |
2218 | #define PIPE_GMCH_DATA_N_MASK (0xffffff) | 2227 | #define PIPE_GMCH_DATA_N_MASK (0xffffff) |
2219 | 2228 | ||
2220 | /* | 2229 | /* |
@@ -2228,20 +2237,25 @@ | |||
2228 | * Attributes and VB-ID. | 2237 | * Attributes and VB-ID. |
2229 | */ | 2238 | */ |
2230 | 2239 | ||
2231 | #define PIPEA_DP_LINK_M 0x70060 | 2240 | #define _PIPEA_DP_LINK_M 0x70060 |
2232 | #define PIPEB_DP_LINK_M 0x71060 | 2241 | #define _PIPEB_DP_LINK_M 0x71060 |
2233 | #define PIPEA_DP_LINK_M_MASK (0xffffff) | 2242 | #define PIPEA_DP_LINK_M_MASK (0xffffff) |
2234 | 2243 | ||
2235 | #define PIPEA_DP_LINK_N 0x70064 | 2244 | #define _PIPEA_DP_LINK_N 0x70064 |
2236 | #define PIPEB_DP_LINK_N 0x71064 | 2245 | #define _PIPEB_DP_LINK_N 0x71064 |
2237 | #define PIPEA_DP_LINK_N_MASK (0xffffff) | 2246 | #define PIPEA_DP_LINK_N_MASK (0xffffff) |
2238 | 2247 | ||
2248 | #define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) | ||
2249 | #define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) | ||
2250 | #define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) | ||
2251 | #define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) | ||
2252 | |||
2239 | /* Display & cursor control */ | 2253 | /* Display & cursor control */ |
2240 | 2254 | ||
2241 | /* Pipe A */ | 2255 | /* Pipe A */ |
2242 | #define PIPEADSL 0x70000 | 2256 | #define _PIPEADSL 0x70000 |
2243 | #define DSL_LINEMASK 0x00000fff | 2257 | #define DSL_LINEMASK 0x00000fff |
2244 | #define PIPEACONF 0x70008 | 2258 | #define _PIPEACONF 0x70008 |
2245 | #define PIPECONF_ENABLE (1<<31) | 2259 | #define PIPECONF_ENABLE (1<<31) |
2246 | #define PIPECONF_DISABLE 0 | 2260 | #define PIPECONF_DISABLE 0 |
2247 | #define PIPECONF_DOUBLE_WIDE (1<<30) | 2261 | #define PIPECONF_DOUBLE_WIDE (1<<30) |
@@ -2267,7 +2281,7 @@ | |||
2267 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) | 2281 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) |
2268 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) | 2282 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) |
2269 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) | 2283 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) |
2270 | #define PIPEASTAT 0x70024 | 2284 | #define _PIPEASTAT 0x70024 |
2271 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) | 2285 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) |
2272 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) | 2286 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) |
2273 | #define PIPE_CRC_DONE_ENABLE (1UL<<28) | 2287 | #define PIPE_CRC_DONE_ENABLE (1UL<<28) |
@@ -2303,10 +2317,12 @@ | |||
2303 | #define PIPE_6BPC (2 << 5) | 2317 | #define PIPE_6BPC (2 << 5) |
2304 | #define PIPE_12BPC (3 << 5) | 2318 | #define PIPE_12BPC (3 << 5) |
2305 | 2319 | ||
2306 | #define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) | 2320 | #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) |
2307 | #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) | 2321 | #define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) |
2308 | #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) | 2322 | #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) |
2309 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) | 2323 | #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) |
2324 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) | ||
2325 | #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) | ||
2310 | 2326 | ||
2311 | #define DSPARB 0x70030 | 2327 | #define DSPARB 0x70030 |
2312 | #define DSPARB_CSTART_MASK (0x7f << 7) | 2328 | #define DSPARB_CSTART_MASK (0x7f << 7) |
@@ -2468,20 +2484,21 @@ | |||
2468 | * } while (high1 != high2); | 2484 | * } while (high1 != high2); |
2469 | * frame = (high1 << 8) | low1; | 2485 | * frame = (high1 << 8) | low1; |
2470 | */ | 2486 | */ |
2471 | #define PIPEAFRAMEHIGH 0x70040 | 2487 | #define _PIPEAFRAMEHIGH 0x70040 |
2472 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff | 2488 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff |
2473 | #define PIPE_FRAME_HIGH_SHIFT 0 | 2489 | #define PIPE_FRAME_HIGH_SHIFT 0 |
2474 | #define PIPEAFRAMEPIXEL 0x70044 | 2490 | #define _PIPEAFRAMEPIXEL 0x70044 |
2475 | #define PIPE_FRAME_LOW_MASK 0xff000000 | 2491 | #define PIPE_FRAME_LOW_MASK 0xff000000 |
2476 | #define PIPE_FRAME_LOW_SHIFT 24 | 2492 | #define PIPE_FRAME_LOW_SHIFT 24 |
2477 | #define PIPE_PIXEL_MASK 0x00ffffff | 2493 | #define PIPE_PIXEL_MASK 0x00ffffff |
2478 | #define PIPE_PIXEL_SHIFT 0 | 2494 | #define PIPE_PIXEL_SHIFT 0 |
2479 | /* GM45+ just has to be different */ | 2495 | /* GM45+ just has to be different */ |
2480 | #define PIPEA_FRMCOUNT_GM45 0x70040 | 2496 | #define _PIPEA_FRMCOUNT_GM45 0x70040 |
2481 | #define PIPEA_FLIPCOUNT_GM45 0x70044 | 2497 | #define _PIPEA_FLIPCOUNT_GM45 0x70044 |
2498 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) | ||
2482 | 2499 | ||
2483 | /* Cursor A & B regs */ | 2500 | /* Cursor A & B regs */ |
2484 | #define CURACNTR 0x70080 | 2501 | #define _CURACNTR 0x70080 |
2485 | /* Old style CUR*CNTR flags (desktop 8xx) */ | 2502 | /* Old style CUR*CNTR flags (desktop 8xx) */ |
2486 | #define CURSOR_ENABLE 0x80000000 | 2503 | #define CURSOR_ENABLE 0x80000000 |
2487 | #define CURSOR_GAMMA_ENABLE 0x40000000 | 2504 | #define CURSOR_GAMMA_ENABLE 0x40000000 |
@@ -2502,23 +2519,23 @@ | |||
2502 | #define MCURSOR_PIPE_A 0x00 | 2519 | #define MCURSOR_PIPE_A 0x00 |
2503 | #define MCURSOR_PIPE_B (1 << 28) | 2520 | #define MCURSOR_PIPE_B (1 << 28) |
2504 | #define MCURSOR_GAMMA_ENABLE (1 << 26) | 2521 | #define MCURSOR_GAMMA_ENABLE (1 << 26) |
2505 | #define CURABASE 0x70084 | 2522 | #define _CURABASE 0x70084 |
2506 | #define CURAPOS 0x70088 | 2523 | #define _CURAPOS 0x70088 |
2507 | #define CURSOR_POS_MASK 0x007FF | 2524 | #define CURSOR_POS_MASK 0x007FF |
2508 | #define CURSOR_POS_SIGN 0x8000 | 2525 | #define CURSOR_POS_SIGN 0x8000 |
2509 | #define CURSOR_X_SHIFT 0 | 2526 | #define CURSOR_X_SHIFT 0 |
2510 | #define CURSOR_Y_SHIFT 16 | 2527 | #define CURSOR_Y_SHIFT 16 |
2511 | #define CURSIZE 0x700a0 | 2528 | #define CURSIZE 0x700a0 |
2512 | #define CURBCNTR 0x700c0 | 2529 | #define _CURBCNTR 0x700c0 |
2513 | #define CURBBASE 0x700c4 | 2530 | #define _CURBBASE 0x700c4 |
2514 | #define CURBPOS 0x700c8 | 2531 | #define _CURBPOS 0x700c8 |
2515 | 2532 | ||
2516 | #define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) | 2533 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) |
2517 | #define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) | 2534 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) |
2518 | #define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) | 2535 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) |
2519 | 2536 | ||
2520 | /* Display A control */ | 2537 | /* Display A control */ |
2521 | #define DSPACNTR 0x70180 | 2538 | #define _DSPACNTR 0x70180 |
2522 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2539 | #define DISPLAY_PLANE_ENABLE (1<<31) |
2523 | #define DISPLAY_PLANE_DISABLE 0 | 2540 | #define DISPLAY_PLANE_DISABLE 0 |
2524 | #define DISPPLANE_GAMMA_ENABLE (1<<30) | 2541 | #define DISPPLANE_GAMMA_ENABLE (1<<30) |
@@ -2532,9 +2549,10 @@ | |||
2532 | #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) | 2549 | #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) |
2533 | #define DISPPLANE_STEREO_ENABLE (1<<25) | 2550 | #define DISPPLANE_STEREO_ENABLE (1<<25) |
2534 | #define DISPPLANE_STEREO_DISABLE 0 | 2551 | #define DISPPLANE_STEREO_DISABLE 0 |
2535 | #define DISPPLANE_SEL_PIPE_MASK (1<<24) | 2552 | #define DISPPLANE_SEL_PIPE_SHIFT 24 |
2553 | #define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) | ||
2536 | #define DISPPLANE_SEL_PIPE_A 0 | 2554 | #define DISPPLANE_SEL_PIPE_A 0 |
2537 | #define DISPPLANE_SEL_PIPE_B (1<<24) | 2555 | #define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT) |
2538 | #define DISPPLANE_SRC_KEY_ENABLE (1<<22) | 2556 | #define DISPPLANE_SRC_KEY_ENABLE (1<<22) |
2539 | #define DISPPLANE_SRC_KEY_DISABLE 0 | 2557 | #define DISPPLANE_SRC_KEY_DISABLE 0 |
2540 | #define DISPPLANE_LINE_DOUBLE (1<<20) | 2558 | #define DISPPLANE_LINE_DOUBLE (1<<20) |
@@ -2543,20 +2561,20 @@ | |||
2543 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) | 2561 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) |
2544 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ | 2562 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ |
2545 | #define DISPPLANE_TILED (1<<10) | 2563 | #define DISPPLANE_TILED (1<<10) |
2546 | #define DSPAADDR 0x70184 | 2564 | #define _DSPAADDR 0x70184 |
2547 | #define DSPASTRIDE 0x70188 | 2565 | #define _DSPASTRIDE 0x70188 |
2548 | #define DSPAPOS 0x7018C /* reserved */ | 2566 | #define _DSPAPOS 0x7018C /* reserved */ |
2549 | #define DSPASIZE 0x70190 | 2567 | #define _DSPASIZE 0x70190 |
2550 | #define DSPASURF 0x7019C /* 965+ only */ | 2568 | #define _DSPASURF 0x7019C /* 965+ only */ |
2551 | #define DSPATILEOFF 0x701A4 /* 965+ only */ | 2569 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ |
2552 | 2570 | ||
2553 | #define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR) | 2571 | #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) |
2554 | #define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR) | 2572 | #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) |
2555 | #define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE) | 2573 | #define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) |
2556 | #define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS) | 2574 | #define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) |
2557 | #define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE) | 2575 | #define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) |
2558 | #define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF) | 2576 | #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) |
2559 | #define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF) | 2577 | #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) |
2560 | 2578 | ||
2561 | /* VBIOS flags */ | 2579 | /* VBIOS flags */ |
2562 | #define SWF00 0x71410 | 2580 | #define SWF00 0x71410 |
@@ -2574,27 +2592,27 @@ | |||
2574 | #define SWF32 0x7241c | 2592 | #define SWF32 0x7241c |
2575 | 2593 | ||
2576 | /* Pipe B */ | 2594 | /* Pipe B */ |
2577 | #define PIPEBDSL 0x71000 | 2595 | #define _PIPEBDSL 0x71000 |
2578 | #define PIPEBCONF 0x71008 | 2596 | #define _PIPEBCONF 0x71008 |
2579 | #define PIPEBSTAT 0x71024 | 2597 | #define _PIPEBSTAT 0x71024 |
2580 | #define PIPEBFRAMEHIGH 0x71040 | 2598 | #define _PIPEBFRAMEHIGH 0x71040 |
2581 | #define PIPEBFRAMEPIXEL 0x71044 | 2599 | #define _PIPEBFRAMEPIXEL 0x71044 |
2582 | #define PIPEB_FRMCOUNT_GM45 0x71040 | 2600 | #define _PIPEB_FRMCOUNT_GM45 0x71040 |
2583 | #define PIPEB_FLIPCOUNT_GM45 0x71044 | 2601 | #define _PIPEB_FLIPCOUNT_GM45 0x71044 |
2584 | 2602 | ||
2585 | 2603 | ||
2586 | /* Display B control */ | 2604 | /* Display B control */ |
2587 | #define DSPBCNTR 0x71180 | 2605 | #define _DSPBCNTR 0x71180 |
2588 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) | 2606 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) |
2589 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 | 2607 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 |
2590 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 | 2608 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 |
2591 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) | 2609 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) |
2592 | #define DSPBADDR 0x71184 | 2610 | #define _DSPBADDR 0x71184 |
2593 | #define DSPBSTRIDE 0x71188 | 2611 | #define _DSPBSTRIDE 0x71188 |
2594 | #define DSPBPOS 0x7118C | 2612 | #define _DSPBPOS 0x7118C |
2595 | #define DSPBSIZE 0x71190 | 2613 | #define _DSPBSIZE 0x71190 |
2596 | #define DSPBSURF 0x7119C | 2614 | #define _DSPBSURF 0x7119C |
2597 | #define DSPBTILEOFF 0x711A4 | 2615 | #define _DSPBTILEOFF 0x711A4 |
2598 | 2616 | ||
2599 | /* VBIOS regs */ | 2617 | /* VBIOS regs */ |
2600 | #define VGACNTRL 0x71400 | 2618 | #define VGACNTRL 0x71400 |
@@ -2648,68 +2666,80 @@ | |||
2648 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff | 2666 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff |
2649 | 2667 | ||
2650 | 2668 | ||
2651 | #define PIPEA_DATA_M1 0x60030 | 2669 | #define _PIPEA_DATA_M1 0x60030 |
2652 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ | 2670 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ |
2653 | #define TU_SIZE_MASK 0x7e000000 | 2671 | #define TU_SIZE_MASK 0x7e000000 |
2654 | #define PIPE_DATA_M1_OFFSET 0 | 2672 | #define PIPE_DATA_M1_OFFSET 0 |
2655 | #define PIPEA_DATA_N1 0x60034 | 2673 | #define _PIPEA_DATA_N1 0x60034 |
2656 | #define PIPE_DATA_N1_OFFSET 0 | 2674 | #define PIPE_DATA_N1_OFFSET 0 |
2657 | 2675 | ||
2658 | #define PIPEA_DATA_M2 0x60038 | 2676 | #define _PIPEA_DATA_M2 0x60038 |
2659 | #define PIPE_DATA_M2_OFFSET 0 | 2677 | #define PIPE_DATA_M2_OFFSET 0 |
2660 | #define PIPEA_DATA_N2 0x6003c | 2678 | #define _PIPEA_DATA_N2 0x6003c |
2661 | #define PIPE_DATA_N2_OFFSET 0 | 2679 | #define PIPE_DATA_N2_OFFSET 0 |
2662 | 2680 | ||
2663 | #define PIPEA_LINK_M1 0x60040 | 2681 | #define _PIPEA_LINK_M1 0x60040 |
2664 | #define PIPE_LINK_M1_OFFSET 0 | 2682 | #define PIPE_LINK_M1_OFFSET 0 |
2665 | #define PIPEA_LINK_N1 0x60044 | 2683 | #define _PIPEA_LINK_N1 0x60044 |
2666 | #define PIPE_LINK_N1_OFFSET 0 | 2684 | #define PIPE_LINK_N1_OFFSET 0 |
2667 | 2685 | ||
2668 | #define PIPEA_LINK_M2 0x60048 | 2686 | #define _PIPEA_LINK_M2 0x60048 |
2669 | #define PIPE_LINK_M2_OFFSET 0 | 2687 | #define PIPE_LINK_M2_OFFSET 0 |
2670 | #define PIPEA_LINK_N2 0x6004c | 2688 | #define _PIPEA_LINK_N2 0x6004c |
2671 | #define PIPE_LINK_N2_OFFSET 0 | 2689 | #define PIPE_LINK_N2_OFFSET 0 |
2672 | 2690 | ||
2673 | /* PIPEB timing regs are same start from 0x61000 */ | 2691 | /* PIPEB timing regs are same start from 0x61000 */ |
2674 | 2692 | ||
2675 | #define PIPEB_DATA_M1 0x61030 | 2693 | #define _PIPEB_DATA_M1 0x61030 |
2676 | #define PIPEB_DATA_N1 0x61034 | 2694 | #define _PIPEB_DATA_N1 0x61034 |
2677 | 2695 | ||
2678 | #define PIPEB_DATA_M2 0x61038 | 2696 | #define _PIPEB_DATA_M2 0x61038 |
2679 | #define PIPEB_DATA_N2 0x6103c | 2697 | #define _PIPEB_DATA_N2 0x6103c |
2680 | 2698 | ||
2681 | #define PIPEB_LINK_M1 0x61040 | 2699 | #define _PIPEB_LINK_M1 0x61040 |
2682 | #define PIPEB_LINK_N1 0x61044 | 2700 | #define _PIPEB_LINK_N1 0x61044 |
2683 | 2701 | ||
2684 | #define PIPEB_LINK_M2 0x61048 | 2702 | #define _PIPEB_LINK_M2 0x61048 |
2685 | #define PIPEB_LINK_N2 0x6104c | 2703 | #define _PIPEB_LINK_N2 0x6104c |
2686 | 2704 | ||
2687 | #define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1) | 2705 | #define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
2688 | #define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1) | 2706 | #define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
2689 | #define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2) | 2707 | #define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) |
2690 | #define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2) | 2708 | #define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) |
2691 | #define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1) | 2709 | #define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) |
2692 | #define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1) | 2710 | #define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) |
2693 | #define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2) | 2711 | #define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) |
2694 | #define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2) | 2712 | #define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) |
2695 | 2713 | ||
2696 | /* CPU panel fitter */ | 2714 | /* CPU panel fitter */ |
2697 | #define PFA_CTL_1 0x68080 | 2715 | /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ |
2698 | #define PFB_CTL_1 0x68880 | 2716 | #define _PFA_CTL_1 0x68080 |
2717 | #define _PFB_CTL_1 0x68880 | ||
2699 | #define PF_ENABLE (1<<31) | 2718 | #define PF_ENABLE (1<<31) |
2700 | #define PF_FILTER_MASK (3<<23) | 2719 | #define PF_FILTER_MASK (3<<23) |
2701 | #define PF_FILTER_PROGRAMMED (0<<23) | 2720 | #define PF_FILTER_PROGRAMMED (0<<23) |
2702 | #define PF_FILTER_MED_3x3 (1<<23) | 2721 | #define PF_FILTER_MED_3x3 (1<<23) |
2703 | #define PF_FILTER_EDGE_ENHANCE (2<<23) | 2722 | #define PF_FILTER_EDGE_ENHANCE (2<<23) |
2704 | #define PF_FILTER_EDGE_SOFTEN (3<<23) | 2723 | #define PF_FILTER_EDGE_SOFTEN (3<<23) |
2705 | #define PFA_WIN_SZ 0x68074 | 2724 | #define _PFA_WIN_SZ 0x68074 |
2706 | #define PFB_WIN_SZ 0x68874 | 2725 | #define _PFB_WIN_SZ 0x68874 |
2707 | #define PFA_WIN_POS 0x68070 | 2726 | #define _PFA_WIN_POS 0x68070 |
2708 | #define PFB_WIN_POS 0x68870 | 2727 | #define _PFB_WIN_POS 0x68870 |
2728 | #define _PFA_VSCALE 0x68084 | ||
2729 | #define _PFB_VSCALE 0x68884 | ||
2730 | #define _PFA_HSCALE 0x68090 | ||
2731 | #define _PFB_HSCALE 0x68890 | ||
2732 | |||
2733 | #define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) | ||
2734 | #define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) | ||
2735 | #define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) | ||
2736 | #define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) | ||
2737 | #define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) | ||
2709 | 2738 | ||
2710 | /* legacy palette */ | 2739 | /* legacy palette */ |
2711 | #define LGC_PALETTE_A 0x4a000 | 2740 | #define _LGC_PALETTE_A 0x4a000 |
2712 | #define LGC_PALETTE_B 0x4a800 | 2741 | #define _LGC_PALETTE_B 0x4a800 |
2742 | #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) | ||
2713 | 2743 | ||
2714 | /* interrupts */ | 2744 | /* interrupts */ |
2715 | #define DE_MASTER_IRQ_CONTROL (1 << 31) | 2745 | #define DE_MASTER_IRQ_CONTROL (1 << 31) |
@@ -2875,17 +2905,17 @@ | |||
2875 | #define PCH_GMBUS4 0xc5110 | 2905 | #define PCH_GMBUS4 0xc5110 |
2876 | #define PCH_GMBUS5 0xc5120 | 2906 | #define PCH_GMBUS5 0xc5120 |
2877 | 2907 | ||
2878 | #define PCH_DPLL_A 0xc6014 | 2908 | #define _PCH_DPLL_A 0xc6014 |
2879 | #define PCH_DPLL_B 0xc6018 | 2909 | #define _PCH_DPLL_B 0xc6018 |
2880 | #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) | 2910 | #define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B) |
2881 | 2911 | ||
2882 | #define PCH_FPA0 0xc6040 | 2912 | #define _PCH_FPA0 0xc6040 |
2883 | #define FP_CB_TUNE (0x3<<22) | 2913 | #define FP_CB_TUNE (0x3<<22) |
2884 | #define PCH_FPA1 0xc6044 | 2914 | #define _PCH_FPA1 0xc6044 |
2885 | #define PCH_FPB0 0xc6048 | 2915 | #define _PCH_FPB0 0xc6048 |
2886 | #define PCH_FPB1 0xc604c | 2916 | #define _PCH_FPB1 0xc604c |
2887 | #define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0) | 2917 | #define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0) |
2888 | #define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1) | 2918 | #define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1) |
2889 | 2919 | ||
2890 | #define PCH_DPLL_TEST 0xc606c | 2920 | #define PCH_DPLL_TEST 0xc606c |
2891 | 2921 | ||
@@ -2904,6 +2934,7 @@ | |||
2904 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) | 2934 | #define DREF_NONSPREAD_SOURCE_MASK (3<<9) |
2905 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) | 2935 | #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) |
2906 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) | 2936 | #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) |
2937 | #define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) | ||
2907 | #define DREF_SSC4_DOWNSPREAD (0<<6) | 2938 | #define DREF_SSC4_DOWNSPREAD (0<<6) |
2908 | #define DREF_SSC4_CENTERSPREAD (1<<6) | 2939 | #define DREF_SSC4_CENTERSPREAD (1<<6) |
2909 | #define DREF_SSC1_DISABLE (0<<1) | 2940 | #define DREF_SSC1_DISABLE (0<<1) |
@@ -2936,60 +2967,69 @@ | |||
2936 | 2967 | ||
2937 | /* transcoder */ | 2968 | /* transcoder */ |
2938 | 2969 | ||
2939 | #define TRANS_HTOTAL_A 0xe0000 | 2970 | #define _TRANS_HTOTAL_A 0xe0000 |
2940 | #define TRANS_HTOTAL_SHIFT 16 | 2971 | #define TRANS_HTOTAL_SHIFT 16 |
2941 | #define TRANS_HACTIVE_SHIFT 0 | 2972 | #define TRANS_HACTIVE_SHIFT 0 |
2942 | #define TRANS_HBLANK_A 0xe0004 | 2973 | #define _TRANS_HBLANK_A 0xe0004 |
2943 | #define TRANS_HBLANK_END_SHIFT 16 | 2974 | #define TRANS_HBLANK_END_SHIFT 16 |
2944 | #define TRANS_HBLANK_START_SHIFT 0 | 2975 | #define TRANS_HBLANK_START_SHIFT 0 |
2945 | #define TRANS_HSYNC_A 0xe0008 | 2976 | #define _TRANS_HSYNC_A 0xe0008 |
2946 | #define TRANS_HSYNC_END_SHIFT 16 | 2977 | #define TRANS_HSYNC_END_SHIFT 16 |
2947 | #define TRANS_HSYNC_START_SHIFT 0 | 2978 | #define TRANS_HSYNC_START_SHIFT 0 |
2948 | #define TRANS_VTOTAL_A 0xe000c | 2979 | #define _TRANS_VTOTAL_A 0xe000c |
2949 | #define TRANS_VTOTAL_SHIFT 16 | 2980 | #define TRANS_VTOTAL_SHIFT 16 |
2950 | #define TRANS_VACTIVE_SHIFT 0 | 2981 | #define TRANS_VACTIVE_SHIFT 0 |
2951 | #define TRANS_VBLANK_A 0xe0010 | 2982 | #define _TRANS_VBLANK_A 0xe0010 |
2952 | #define TRANS_VBLANK_END_SHIFT 16 | 2983 | #define TRANS_VBLANK_END_SHIFT 16 |
2953 | #define TRANS_VBLANK_START_SHIFT 0 | 2984 | #define TRANS_VBLANK_START_SHIFT 0 |
2954 | #define TRANS_VSYNC_A 0xe0014 | 2985 | #define _TRANS_VSYNC_A 0xe0014 |
2955 | #define TRANS_VSYNC_END_SHIFT 16 | 2986 | #define TRANS_VSYNC_END_SHIFT 16 |
2956 | #define TRANS_VSYNC_START_SHIFT 0 | 2987 | #define TRANS_VSYNC_START_SHIFT 0 |
2957 | 2988 | ||
2958 | #define TRANSA_DATA_M1 0xe0030 | 2989 | #define _TRANSA_DATA_M1 0xe0030 |
2959 | #define TRANSA_DATA_N1 0xe0034 | 2990 | #define _TRANSA_DATA_N1 0xe0034 |
2960 | #define TRANSA_DATA_M2 0xe0038 | 2991 | #define _TRANSA_DATA_M2 0xe0038 |
2961 | #define TRANSA_DATA_N2 0xe003c | 2992 | #define _TRANSA_DATA_N2 0xe003c |
2962 | #define TRANSA_DP_LINK_M1 0xe0040 | 2993 | #define _TRANSA_DP_LINK_M1 0xe0040 |
2963 | #define TRANSA_DP_LINK_N1 0xe0044 | 2994 | #define _TRANSA_DP_LINK_N1 0xe0044 |
2964 | #define TRANSA_DP_LINK_M2 0xe0048 | 2995 | #define _TRANSA_DP_LINK_M2 0xe0048 |
2965 | #define TRANSA_DP_LINK_N2 0xe004c | 2996 | #define _TRANSA_DP_LINK_N2 0xe004c |
2966 | 2997 | ||
2967 | #define TRANS_HTOTAL_B 0xe1000 | 2998 | #define _TRANS_HTOTAL_B 0xe1000 |
2968 | #define TRANS_HBLANK_B 0xe1004 | 2999 | #define _TRANS_HBLANK_B 0xe1004 |
2969 | #define TRANS_HSYNC_B 0xe1008 | 3000 | #define _TRANS_HSYNC_B 0xe1008 |
2970 | #define TRANS_VTOTAL_B 0xe100c | 3001 | #define _TRANS_VTOTAL_B 0xe100c |
2971 | #define TRANS_VBLANK_B 0xe1010 | 3002 | #define _TRANS_VBLANK_B 0xe1010 |
2972 | #define TRANS_VSYNC_B 0xe1014 | 3003 | #define _TRANS_VSYNC_B 0xe1014 |
2973 | 3004 | ||
2974 | #define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B) | 3005 | #define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) |
2975 | #define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B) | 3006 | #define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) |
2976 | #define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B) | 3007 | #define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B) |
2977 | #define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B) | 3008 | #define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) |
2978 | #define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B) | 3009 | #define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) |
2979 | #define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B) | 3010 | #define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) |
2980 | 3011 | ||
2981 | #define TRANSB_DATA_M1 0xe1030 | 3012 | #define _TRANSB_DATA_M1 0xe1030 |
2982 | #define TRANSB_DATA_N1 0xe1034 | 3013 | #define _TRANSB_DATA_N1 0xe1034 |
2983 | #define TRANSB_DATA_M2 0xe1038 | 3014 | #define _TRANSB_DATA_M2 0xe1038 |
2984 | #define TRANSB_DATA_N2 0xe103c | 3015 | #define _TRANSB_DATA_N2 0xe103c |
2985 | #define TRANSB_DP_LINK_M1 0xe1040 | 3016 | #define _TRANSB_DP_LINK_M1 0xe1040 |
2986 | #define TRANSB_DP_LINK_N1 0xe1044 | 3017 | #define _TRANSB_DP_LINK_N1 0xe1044 |
2987 | #define TRANSB_DP_LINK_M2 0xe1048 | 3018 | #define _TRANSB_DP_LINK_M2 0xe1048 |
2988 | #define TRANSB_DP_LINK_N2 0xe104c | 3019 | #define _TRANSB_DP_LINK_N2 0xe104c |
2989 | 3020 | ||
2990 | #define TRANSACONF 0xf0008 | 3021 | #define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1) |
2991 | #define TRANSBCONF 0xf1008 | 3022 | #define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1) |
2992 | #define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF) | 3023 | #define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2) |
3024 | #define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2) | ||
3025 | #define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1) | ||
3026 | #define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1) | ||
3027 | #define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2) | ||
3028 | #define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2) | ||
3029 | |||
3030 | #define _TRANSACONF 0xf0008 | ||
3031 | #define _TRANSBCONF 0xf1008 | ||
3032 | #define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF) | ||
2993 | #define TRANS_DISABLE (0<<31) | 3033 | #define TRANS_DISABLE (0<<31) |
2994 | #define TRANS_ENABLE (1<<31) | 3034 | #define TRANS_ENABLE (1<<31) |
2995 | #define TRANS_STATE_MASK (1<<30) | 3035 | #define TRANS_STATE_MASK (1<<30) |
@@ -3007,18 +3047,19 @@ | |||
3007 | #define TRANS_6BPC (2<<5) | 3047 | #define TRANS_6BPC (2<<5) |
3008 | #define TRANS_12BPC (3<<5) | 3048 | #define TRANS_12BPC (3<<5) |
3009 | 3049 | ||
3010 | #define FDI_RXA_CHICKEN 0xc200c | 3050 | #define _FDI_RXA_CHICKEN 0xc200c |
3011 | #define FDI_RXB_CHICKEN 0xc2010 | 3051 | #define _FDI_RXB_CHICKEN 0xc2010 |
3012 | #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) | 3052 | #define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) |
3013 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) | 3053 | #define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) |
3054 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) | ||
3014 | 3055 | ||
3015 | #define SOUTH_DSPCLK_GATE_D 0xc2020 | 3056 | #define SOUTH_DSPCLK_GATE_D 0xc2020 |
3016 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) | 3057 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) |
3017 | 3058 | ||
3018 | /* CPU: FDI_TX */ | 3059 | /* CPU: FDI_TX */ |
3019 | #define FDI_TXA_CTL 0x60100 | 3060 | #define _FDI_TXA_CTL 0x60100 |
3020 | #define FDI_TXB_CTL 0x61100 | 3061 | #define _FDI_TXB_CTL 0x61100 |
3021 | #define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL) | 3062 | #define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) |
3022 | #define FDI_TX_DISABLE (0<<31) | 3063 | #define FDI_TX_DISABLE (0<<31) |
3023 | #define FDI_TX_ENABLE (1<<31) | 3064 | #define FDI_TX_ENABLE (1<<31) |
3024 | #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) | 3065 | #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) |
@@ -3058,9 +3099,9 @@ | |||
3058 | #define FDI_SCRAMBLING_DISABLE (1<<7) | 3099 | #define FDI_SCRAMBLING_DISABLE (1<<7) |
3059 | 3100 | ||
3060 | /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ | 3101 | /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ |
3061 | #define FDI_RXA_CTL 0xf000c | 3102 | #define _FDI_RXA_CTL 0xf000c |
3062 | #define FDI_RXB_CTL 0xf100c | 3103 | #define _FDI_RXB_CTL 0xf100c |
3063 | #define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL) | 3104 | #define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) |
3064 | #define FDI_RX_ENABLE (1<<31) | 3105 | #define FDI_RX_ENABLE (1<<31) |
3065 | /* train, dp width same as FDI_TX */ | 3106 | /* train, dp width same as FDI_TX */ |
3066 | #define FDI_DP_PORT_WIDTH_X8 (7<<19) | 3107 | #define FDI_DP_PORT_WIDTH_X8 (7<<19) |
@@ -3085,15 +3126,15 @@ | |||
3085 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) | 3126 | #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) |
3086 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) | 3127 | #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) |
3087 | 3128 | ||
3088 | #define FDI_RXA_MISC 0xf0010 | 3129 | #define _FDI_RXA_MISC 0xf0010 |
3089 | #define FDI_RXB_MISC 0xf1010 | 3130 | #define _FDI_RXB_MISC 0xf1010 |
3090 | #define FDI_RXA_TUSIZE1 0xf0030 | 3131 | #define _FDI_RXA_TUSIZE1 0xf0030 |
3091 | #define FDI_RXA_TUSIZE2 0xf0038 | 3132 | #define _FDI_RXA_TUSIZE2 0xf0038 |
3092 | #define FDI_RXB_TUSIZE1 0xf1030 | 3133 | #define _FDI_RXB_TUSIZE1 0xf1030 |
3093 | #define FDI_RXB_TUSIZE2 0xf1038 | 3134 | #define _FDI_RXB_TUSIZE2 0xf1038 |
3094 | #define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC) | 3135 | #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) |
3095 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1) | 3136 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) |
3096 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2) | 3137 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) |
3097 | 3138 | ||
3098 | /* FDI_RX interrupt register format */ | 3139 | /* FDI_RX interrupt register format */ |
3099 | #define FDI_RX_INTER_LANE_ALIGN (1<<10) | 3140 | #define FDI_RX_INTER_LANE_ALIGN (1<<10) |
@@ -3108,12 +3149,12 @@ | |||
3108 | #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) | 3149 | #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) |
3109 | #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) | 3150 | #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) |
3110 | 3151 | ||
3111 | #define FDI_RXA_IIR 0xf0014 | 3152 | #define _FDI_RXA_IIR 0xf0014 |
3112 | #define FDI_RXA_IMR 0xf0018 | 3153 | #define _FDI_RXA_IMR 0xf0018 |
3113 | #define FDI_RXB_IIR 0xf1014 | 3154 | #define _FDI_RXB_IIR 0xf1014 |
3114 | #define FDI_RXB_IMR 0xf1018 | 3155 | #define _FDI_RXB_IMR 0xf1018 |
3115 | #define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR) | 3156 | #define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) |
3116 | #define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR) | 3157 | #define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) |
3117 | 3158 | ||
3118 | #define FDI_PLL_CTL_1 0xfe000 | 3159 | #define FDI_PLL_CTL_1 0xfe000 |
3119 | #define FDI_PLL_CTL_2 0xfe004 | 3160 | #define FDI_PLL_CTL_2 0xfe004 |
@@ -3143,11 +3184,15 @@ | |||
3143 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) | 3184 | #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) |
3144 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 3185 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
3145 | 3186 | ||
3187 | #define ADPA_PIPE_ENABLED(V, P) \ | ||
3188 | (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) | ||
3189 | |||
3146 | /* or SDVOB */ | 3190 | /* or SDVOB */ |
3147 | #define HDMIB 0xe1140 | 3191 | #define HDMIB 0xe1140 |
3148 | #define PORT_ENABLE (1 << 31) | 3192 | #define PORT_ENABLE (1 << 31) |
3149 | #define TRANSCODER_A (0) | 3193 | #define TRANSCODER_A (0) |
3150 | #define TRANSCODER_B (1 << 30) | 3194 | #define TRANSCODER_B (1 << 30) |
3195 | #define TRANSCODER_MASK (1 << 30) | ||
3151 | #define COLOR_FORMAT_8bpc (0) | 3196 | #define COLOR_FORMAT_8bpc (0) |
3152 | #define COLOR_FORMAT_12bpc (3 << 26) | 3197 | #define COLOR_FORMAT_12bpc (3 << 26) |
3153 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) | 3198 | #define SDVOB_HOTPLUG_ENABLE (1 << 23) |
@@ -3163,6 +3208,9 @@ | |||
3163 | #define HSYNC_ACTIVE_HIGH (1 << 3) | 3208 | #define HSYNC_ACTIVE_HIGH (1 << 3) |
3164 | #define PORT_DETECTED (1 << 2) | 3209 | #define PORT_DETECTED (1 << 2) |
3165 | 3210 | ||
3211 | #define HDMI_PIPE_ENABLED(V, P) \ | ||
3212 | (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) | ||
3213 | |||
3166 | /* PCH SDVOB multiplex with HDMIB */ | 3214 | /* PCH SDVOB multiplex with HDMIB */ |
3167 | #define PCH_SDVOB HDMIB | 3215 | #define PCH_SDVOB HDMIB |
3168 | 3216 | ||
@@ -3238,6 +3286,7 @@ | |||
3238 | #define TRANS_DP_PORT_SEL_B (0<<29) | 3286 | #define TRANS_DP_PORT_SEL_B (0<<29) |
3239 | #define TRANS_DP_PORT_SEL_C (1<<29) | 3287 | #define TRANS_DP_PORT_SEL_C (1<<29) |
3240 | #define TRANS_DP_PORT_SEL_D (2<<29) | 3288 | #define TRANS_DP_PORT_SEL_D (2<<29) |
3289 | #define TRANS_DP_PORT_SEL_NONE (3<<29) | ||
3241 | #define TRANS_DP_PORT_SEL_MASK (3<<29) | 3290 | #define TRANS_DP_PORT_SEL_MASK (3<<29) |
3242 | #define TRANS_DP_AUDIO_ONLY (1<<26) | 3291 | #define TRANS_DP_AUDIO_ONLY (1<<26) |
3243 | #define TRANS_DP_ENH_FRAMING (1<<18) | 3292 | #define TRANS_DP_ENH_FRAMING (1<<18) |
@@ -3269,6 +3318,8 @@ | |||
3269 | #define FORCEWAKE 0xA18C | 3318 | #define FORCEWAKE 0xA18C |
3270 | #define FORCEWAKE_ACK 0x130090 | 3319 | #define FORCEWAKE_ACK 0x130090 |
3271 | 3320 | ||
3321 | #define GT_FIFO_FREE_ENTRIES 0x120008 | ||
3322 | |||
3272 | #define GEN6_RPNSWREQ 0xA008 | 3323 | #define GEN6_RPNSWREQ 0xA008 |
3273 | #define GEN6_TURBO_DISABLE (1<<31) | 3324 | #define GEN6_TURBO_DISABLE (1<<31) |
3274 | #define GEN6_FREQUENCY(x) ((x)<<25) | 3325 | #define GEN6_FREQUENCY(x) ((x)<<25) |
@@ -3286,15 +3337,28 @@ | |||
3286 | #define GEN6_RP_DOWN_TIMEOUT 0xA010 | 3337 | #define GEN6_RP_DOWN_TIMEOUT 0xA010 |
3287 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 | 3338 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 |
3288 | #define GEN6_RPSTAT1 0xA01C | 3339 | #define GEN6_RPSTAT1 0xA01C |
3340 | #define GEN6_CAGF_SHIFT 8 | ||
3341 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) | ||
3289 | #define GEN6_RP_CONTROL 0xA024 | 3342 | #define GEN6_RP_CONTROL 0xA024 |
3290 | #define GEN6_RP_MEDIA_TURBO (1<<11) | 3343 | #define GEN6_RP_MEDIA_TURBO (1<<11) |
3291 | #define GEN6_RP_USE_NORMAL_FREQ (1<<9) | 3344 | #define GEN6_RP_USE_NORMAL_FREQ (1<<9) |
3292 | #define GEN6_RP_MEDIA_IS_GFX (1<<8) | 3345 | #define GEN6_RP_MEDIA_IS_GFX (1<<8) |
3293 | #define GEN6_RP_ENABLE (1<<7) | 3346 | #define GEN6_RP_ENABLE (1<<7) |
3294 | #define GEN6_RP_UP_BUSY_MAX (0x2<<3) | 3347 | #define GEN6_RP_UP_IDLE_MIN (0x1<<3) |
3295 | #define GEN6_RP_DOWN_BUSY_MIN (0x2<<0) | 3348 | #define GEN6_RP_UP_BUSY_AVG (0x2<<3) |
3349 | #define GEN6_RP_UP_BUSY_CONT (0x4<<3) | ||
3350 | #define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) | ||
3296 | #define GEN6_RP_UP_THRESHOLD 0xA02C | 3351 | #define GEN6_RP_UP_THRESHOLD 0xA02C |
3297 | #define GEN6_RP_DOWN_THRESHOLD 0xA030 | 3352 | #define GEN6_RP_DOWN_THRESHOLD 0xA030 |
3353 | #define GEN6_RP_CUR_UP_EI 0xA050 | ||
3354 | #define GEN6_CURICONT_MASK 0xffffff | ||
3355 | #define GEN6_RP_CUR_UP 0xA054 | ||
3356 | #define GEN6_CURBSYTAVG_MASK 0xffffff | ||
3357 | #define GEN6_RP_PREV_UP 0xA058 | ||
3358 | #define GEN6_RP_CUR_DOWN_EI 0xA05C | ||
3359 | #define GEN6_CURIAVG_MASK 0xffffff | ||
3360 | #define GEN6_RP_CUR_DOWN 0xA060 | ||
3361 | #define GEN6_RP_PREV_DOWN 0xA064 | ||
3298 | #define GEN6_RP_UP_EI 0xA068 | 3362 | #define GEN6_RP_UP_EI 0xA068 |
3299 | #define GEN6_RP_DOWN_EI 0xA06C | 3363 | #define GEN6_RP_DOWN_EI 0xA06C |
3300 | #define GEN6_RP_IDLE_HYSTERSIS 0xA070 | 3364 | #define GEN6_RP_IDLE_HYSTERSIS 0xA070 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 0521ecf26017..7e992a8e9098 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | |||
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | u32 dpll_reg; | 35 | u32 dpll_reg; |
36 | 36 | ||
37 | if (HAS_PCH_SPLIT(dev)) { | 37 | if (HAS_PCH_SPLIT(dev)) |
38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; | 38 | dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B; |
39 | } else { | 39 | else |
40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; | 40 | dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; |
41 | } | ||
42 | 41 | ||
43 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); | 42 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); |
44 | } | 43 | } |
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | |||
46 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | 45 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) |
47 | { | 46 | { |
48 | struct drm_i915_private *dev_priv = dev->dev_private; | 47 | struct drm_i915_private *dev_priv = dev->dev_private; |
49 | unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); | 48 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); |
50 | u32 *array; | 49 | u32 *array; |
51 | int i; | 50 | int i; |
52 | 51 | ||
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
54 | return; | 53 | return; |
55 | 54 | ||
56 | if (HAS_PCH_SPLIT(dev)) | 55 | if (HAS_PCH_SPLIT(dev)) |
57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 56 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; |
58 | 57 | ||
59 | if (pipe == PIPE_A) | 58 | if (pipe == PIPE_A) |
60 | array = dev_priv->save_palette_a; | 59 | array = dev_priv->save_palette_a; |
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
68 | static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | 67 | static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) |
69 | { | 68 | { |
70 | struct drm_i915_private *dev_priv = dev->dev_private; | 69 | struct drm_i915_private *dev_priv = dev->dev_private; |
71 | unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); | 70 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); |
72 | u32 *array; | 71 | u32 *array; |
73 | int i; | 72 | int i; |
74 | 73 | ||
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
76 | return; | 75 | return; |
77 | 76 | ||
78 | if (HAS_PCH_SPLIT(dev)) | 77 | if (HAS_PCH_SPLIT(dev)) |
79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 78 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; |
80 | 79 | ||
81 | if (pipe == PIPE_A) | 80 | if (pipe == PIPE_A) |
82 | array = dev_priv->save_palette_a; | 81 | array = dev_priv->save_palette_a; |
@@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
241 | return; | 240 | return; |
242 | 241 | ||
243 | /* Cursor state */ | 242 | /* Cursor state */ |
244 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | 243 | dev_priv->saveCURACNTR = I915_READ(_CURACNTR); |
245 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | 244 | dev_priv->saveCURAPOS = I915_READ(_CURAPOS); |
246 | dev_priv->saveCURABASE = I915_READ(CURABASE); | 245 | dev_priv->saveCURABASE = I915_READ(_CURABASE); |
247 | dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); | 246 | dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); |
248 | dev_priv->saveCURBPOS = I915_READ(CURBPOS); | 247 | dev_priv->saveCURBPOS = I915_READ(_CURBPOS); |
249 | dev_priv->saveCURBBASE = I915_READ(CURBBASE); | 248 | dev_priv->saveCURBBASE = I915_READ(_CURBBASE); |
250 | if (IS_GEN2(dev)) | 249 | if (IS_GEN2(dev)) |
251 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | 250 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); |
252 | 251 | ||
@@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
256 | } | 255 | } |
257 | 256 | ||
258 | /* Pipe & plane A info */ | 257 | /* Pipe & plane A info */ |
259 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 258 | dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); |
260 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 259 | dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); |
261 | if (HAS_PCH_SPLIT(dev)) { | 260 | if (HAS_PCH_SPLIT(dev)) { |
262 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); | 261 | dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); |
263 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); | 262 | dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); |
264 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); | 263 | dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); |
265 | } else { | 264 | } else { |
266 | dev_priv->saveFPA0 = I915_READ(FPA0); | 265 | dev_priv->saveFPA0 = I915_READ(_FPA0); |
267 | dev_priv->saveFPA1 = I915_READ(FPA1); | 266 | dev_priv->saveFPA1 = I915_READ(_FPA1); |
268 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); | 267 | dev_priv->saveDPLL_A = I915_READ(_DPLL_A); |
269 | } | 268 | } |
270 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | 269 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
271 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); | 270 | dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); |
272 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); | 271 | dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); |
273 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); | 272 | dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); |
274 | dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); | 273 | dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); |
275 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); | 274 | dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); |
276 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); | 275 | dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); |
277 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); | 276 | dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); |
278 | if (!HAS_PCH_SPLIT(dev)) | 277 | if (!HAS_PCH_SPLIT(dev)) |
279 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 278 | dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); |
280 | 279 | ||
281 | if (HAS_PCH_SPLIT(dev)) { | 280 | if (HAS_PCH_SPLIT(dev)) { |
282 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); | 281 | dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); |
283 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); | 282 | dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); |
284 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); | 283 | dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); |
285 | dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1); | 284 | dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); |
286 | 285 | ||
287 | dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); | 286 | dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); |
288 | dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); | 287 | dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); |
289 | 288 | ||
290 | dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1); | 289 | dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); |
291 | dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); | 290 | dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); |
292 | dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); | 291 | dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); |
293 | 292 | ||
294 | dev_priv->saveTRANSACONF = I915_READ(TRANSACONF); | 293 | dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); |
295 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); | 294 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); |
296 | dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); | 295 | dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); |
297 | dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); | 296 | dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); |
298 | dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A); | 297 | dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); |
299 | dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A); | 298 | dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); |
300 | dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A); | 299 | dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); |
301 | } | 300 | } |
302 | 301 | ||
303 | dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); | 302 | dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); |
304 | dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); | 303 | dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); |
305 | dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); | 304 | dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); |
306 | dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); | 305 | dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); |
307 | dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); | 306 | dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); |
308 | if (INTEL_INFO(dev)->gen >= 4) { | 307 | if (INTEL_INFO(dev)->gen >= 4) { |
309 | dev_priv->saveDSPASURF = I915_READ(DSPASURF); | 308 | dev_priv->saveDSPASURF = I915_READ(_DSPASURF); |
310 | dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); | 309 | dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); |
311 | } | 310 | } |
312 | i915_save_palette(dev, PIPE_A); | 311 | i915_save_palette(dev, PIPE_A); |
313 | dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); | 312 | dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); |
314 | 313 | ||
315 | /* Pipe & plane B info */ | 314 | /* Pipe & plane B info */ |
316 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); | 315 | dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); |
317 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); | 316 | dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); |
318 | if (HAS_PCH_SPLIT(dev)) { | 317 | if (HAS_PCH_SPLIT(dev)) { |
319 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); | 318 | dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); |
320 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); | 319 | dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); |
321 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); | 320 | dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); |
322 | } else { | 321 | } else { |
323 | dev_priv->saveFPB0 = I915_READ(FPB0); | 322 | dev_priv->saveFPB0 = I915_READ(_FPB0); |
324 | dev_priv->saveFPB1 = I915_READ(FPB1); | 323 | dev_priv->saveFPB1 = I915_READ(_FPB1); |
325 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); | 324 | dev_priv->saveDPLL_B = I915_READ(_DPLL_B); |
326 | } | 325 | } |
327 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | 326 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
328 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); | 327 | dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); |
329 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); | 328 | dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); |
330 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); | 329 | dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); |
331 | dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); | 330 | dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); |
332 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); | 331 | dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); |
333 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); | 332 | dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); |
334 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); | 333 | dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); |
335 | if (!HAS_PCH_SPLIT(dev)) | 334 | if (!HAS_PCH_SPLIT(dev)) |
336 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); | 335 | dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); |
337 | 336 | ||
338 | if (HAS_PCH_SPLIT(dev)) { | 337 | if (HAS_PCH_SPLIT(dev)) { |
339 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); | 338 | dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); |
340 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); | 339 | dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); |
341 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); | 340 | dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); |
342 | dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1); | 341 | dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); |
343 | 342 | ||
344 | dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); | 343 | dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); |
345 | dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); | 344 | dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); |
346 | 345 | ||
347 | dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1); | 346 | dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); |
348 | dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); | 347 | dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); |
349 | dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); | 348 | dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); |
350 | 349 | ||
351 | dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF); | 350 | dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); |
352 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); | 351 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); |
353 | dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); | 352 | dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); |
354 | dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); | 353 | dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); |
355 | dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B); | 354 | dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); |
356 | dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B); | 355 | dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); |
357 | dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B); | 356 | dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); |
358 | } | 357 | } |
359 | 358 | ||
360 | dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); | 359 | dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); |
361 | dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); | 360 | dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); |
362 | dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); | 361 | dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); |
363 | dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); | 362 | dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); |
364 | dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); | 363 | dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); |
365 | if (INTEL_INFO(dev)->gen >= 4) { | 364 | if (INTEL_INFO(dev)->gen >= 4) { |
366 | dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); | 365 | dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); |
367 | dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); | 366 | dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); |
368 | } | 367 | } |
369 | i915_save_palette(dev, PIPE_B); | 368 | i915_save_palette(dev, PIPE_B); |
370 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); | 369 | dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); |
371 | 370 | ||
372 | /* Fences */ | 371 | /* Fences */ |
373 | switch (INTEL_INFO(dev)->gen) { | 372 | switch (INTEL_INFO(dev)->gen) { |
@@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
426 | 425 | ||
427 | 426 | ||
428 | if (HAS_PCH_SPLIT(dev)) { | 427 | if (HAS_PCH_SPLIT(dev)) { |
429 | dpll_a_reg = PCH_DPLL_A; | 428 | dpll_a_reg = _PCH_DPLL_A; |
430 | dpll_b_reg = PCH_DPLL_B; | 429 | dpll_b_reg = _PCH_DPLL_B; |
431 | fpa0_reg = PCH_FPA0; | 430 | fpa0_reg = _PCH_FPA0; |
432 | fpb0_reg = PCH_FPB0; | 431 | fpb0_reg = _PCH_FPB0; |
433 | fpa1_reg = PCH_FPA1; | 432 | fpa1_reg = _PCH_FPA1; |
434 | fpb1_reg = PCH_FPB1; | 433 | fpb1_reg = _PCH_FPB1; |
435 | } else { | 434 | } else { |
436 | dpll_a_reg = DPLL_A; | 435 | dpll_a_reg = _DPLL_A; |
437 | dpll_b_reg = DPLL_B; | 436 | dpll_b_reg = _DPLL_B; |
438 | fpa0_reg = FPA0; | 437 | fpa0_reg = _FPA0; |
439 | fpb0_reg = FPB0; | 438 | fpb0_reg = _FPB0; |
440 | fpa1_reg = FPA1; | 439 | fpa1_reg = _FPA1; |
441 | fpb1_reg = FPB1; | 440 | fpb1_reg = _FPB1; |
442 | } | 441 | } |
443 | 442 | ||
444 | if (HAS_PCH_SPLIT(dev)) { | 443 | if (HAS_PCH_SPLIT(dev)) { |
@@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
461 | POSTING_READ(dpll_a_reg); | 460 | POSTING_READ(dpll_a_reg); |
462 | udelay(150); | 461 | udelay(150); |
463 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | 462 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
464 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | 463 | I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); |
465 | POSTING_READ(DPLL_A_MD); | 464 | POSTING_READ(_DPLL_A_MD); |
466 | } | 465 | } |
467 | udelay(150); | 466 | udelay(150); |
468 | 467 | ||
469 | /* Restore mode */ | 468 | /* Restore mode */ |
470 | I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); | 469 | I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); |
471 | I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); | 470 | I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); |
472 | I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); | 471 | I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); |
473 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | 472 | I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); |
474 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | 473 | I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); |
475 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | 474 | I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); |
476 | if (!HAS_PCH_SPLIT(dev)) | 475 | if (!HAS_PCH_SPLIT(dev)) |
477 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 476 | I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); |
478 | 477 | ||
479 | if (HAS_PCH_SPLIT(dev)) { | 478 | if (HAS_PCH_SPLIT(dev)) { |
480 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); | 479 | I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); |
481 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); | 480 | I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); |
482 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); | 481 | I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); |
483 | I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); | 482 | I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); |
484 | 483 | ||
485 | I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); | 484 | I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); |
486 | I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); | 485 | I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); |
487 | 486 | ||
488 | I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1); | 487 | I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); |
489 | I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); | 488 | I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); |
490 | I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); | 489 | I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); |
491 | 490 | ||
492 | I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF); | 491 | I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); |
493 | I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); | 492 | I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); |
494 | I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); | 493 | I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); |
495 | I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); | 494 | I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); |
496 | I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); | 495 | I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); |
497 | I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); | 496 | I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); |
498 | I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); | 497 | I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); |
499 | } | 498 | } |
500 | 499 | ||
501 | /* Restore plane info */ | 500 | /* Restore plane info */ |
502 | I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); | 501 | I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); |
503 | I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); | 502 | I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); |
504 | I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); | 503 | I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); |
505 | I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); | 504 | I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); |
506 | I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); | 505 | I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); |
507 | if (INTEL_INFO(dev)->gen >= 4) { | 506 | if (INTEL_INFO(dev)->gen >= 4) { |
508 | I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); | 507 | I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); |
509 | I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); | 508 | I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); |
510 | } | 509 | } |
511 | 510 | ||
512 | I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); | 511 | I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); |
513 | 512 | ||
514 | i915_restore_palette(dev, PIPE_A); | 513 | i915_restore_palette(dev, PIPE_A); |
515 | /* Enable the plane */ | 514 | /* Enable the plane */ |
516 | I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); | 515 | I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); |
517 | I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); | 516 | I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); |
518 | 517 | ||
519 | /* Pipe & plane B info */ | 518 | /* Pipe & plane B info */ |
520 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | 519 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { |
@@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
530 | POSTING_READ(dpll_b_reg); | 529 | POSTING_READ(dpll_b_reg); |
531 | udelay(150); | 530 | udelay(150); |
532 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | 531 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
533 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 532 | I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); |
534 | POSTING_READ(DPLL_B_MD); | 533 | POSTING_READ(_DPLL_B_MD); |
535 | } | 534 | } |
536 | udelay(150); | 535 | udelay(150); |
537 | 536 | ||
538 | /* Restore mode */ | 537 | /* Restore mode */ |
539 | I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); | 538 | I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); |
540 | I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); | 539 | I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); |
541 | I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); | 540 | I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); |
542 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | 541 | I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); |
543 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | 542 | I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); |
544 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | 543 | I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); |
545 | if (!HAS_PCH_SPLIT(dev)) | 544 | if (!HAS_PCH_SPLIT(dev)) |
546 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 545 | I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); |
547 | 546 | ||
548 | if (HAS_PCH_SPLIT(dev)) { | 547 | if (HAS_PCH_SPLIT(dev)) { |
549 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); | 548 | I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); |
550 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); | 549 | I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); |
551 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); | 550 | I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); |
552 | I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); | 551 | I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); |
553 | 552 | ||
554 | I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); | 553 | I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); |
555 | I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); | 554 | I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); |
556 | 555 | ||
557 | I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1); | 556 | I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); |
558 | I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); | 557 | I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); |
559 | I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); | 558 | I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); |
560 | 559 | ||
561 | I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF); | 560 | I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); |
562 | I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); | 561 | I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); |
563 | I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); | 562 | I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); |
564 | I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); | 563 | I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); |
565 | I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); | 564 | I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); |
566 | I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); | 565 | I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); |
567 | I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); | 566 | I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); |
568 | } | 567 | } |
569 | 568 | ||
570 | /* Restore plane info */ | 569 | /* Restore plane info */ |
571 | I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); | 570 | I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); |
572 | I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); | 571 | I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); |
573 | I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); | 572 | I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); |
574 | I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); | 573 | I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); |
575 | I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); | 574 | I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); |
576 | if (INTEL_INFO(dev)->gen >= 4) { | 575 | if (INTEL_INFO(dev)->gen >= 4) { |
577 | I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); | 576 | I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); |
578 | I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); | 577 | I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); |
579 | } | 578 | } |
580 | 579 | ||
581 | I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); | 580 | I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); |
582 | 581 | ||
583 | i915_restore_palette(dev, PIPE_B); | 582 | i915_restore_palette(dev, PIPE_B); |
584 | /* Enable the plane */ | 583 | /* Enable the plane */ |
585 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); | 584 | I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); |
586 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); | 585 | I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); |
587 | 586 | ||
588 | /* Cursor state */ | 587 | /* Cursor state */ |
589 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | 588 | I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); |
590 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | 589 | I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); |
591 | I915_WRITE(CURABASE, dev_priv->saveCURABASE); | 590 | I915_WRITE(_CURABASE, dev_priv->saveCURABASE); |
592 | I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); | 591 | I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); |
593 | I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); | 592 | I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); |
594 | I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); | 593 | I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); |
595 | if (IS_GEN2(dev)) | 594 | if (IS_GEN2(dev)) |
596 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | 595 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); |
597 | 596 | ||
@@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev) | |||
653 | dev_priv->saveDP_B = I915_READ(DP_B); | 652 | dev_priv->saveDP_B = I915_READ(DP_B); |
654 | dev_priv->saveDP_C = I915_READ(DP_C); | 653 | dev_priv->saveDP_C = I915_READ(DP_C); |
655 | dev_priv->saveDP_D = I915_READ(DP_D); | 654 | dev_priv->saveDP_D = I915_READ(DP_D); |
656 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); | 655 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); |
657 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); | 656 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); |
658 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); | 657 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); |
659 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); | 658 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); |
660 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); | 659 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); |
661 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); | 660 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); |
662 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); | 661 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); |
663 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); | 662 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); |
664 | } | 663 | } |
665 | /* FIXME: save TV & SDVO state */ | 664 | /* FIXME: save TV & SDVO state */ |
666 | 665 | ||
@@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev) | |||
699 | 698 | ||
700 | /* Display port ratios (must be done before clock is set) */ | 699 | /* Display port ratios (must be done before clock is set) */ |
701 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 700 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
702 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); | 701 | I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); |
703 | I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); | 702 | I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); |
704 | I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); | 703 | I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); |
705 | I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); | 704 | I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); |
706 | I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); | 705 | I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); |
707 | I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); | 706 | I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); |
708 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | 707 | I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); |
709 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | 708 | I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); |
710 | } | 709 | } |
711 | 710 | ||
712 | /* This is only meaningful in non-KMS mode */ | 711 | /* This is only meaningful in non-KMS mode */ |
@@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev) | |||
797 | 796 | ||
798 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 797 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
799 | 798 | ||
800 | /* Hardware status page */ | ||
801 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
802 | |||
803 | i915_save_display(dev); | 799 | i915_save_display(dev); |
804 | 800 | ||
805 | /* Interrupt state */ | 801 | /* Interrupt state */ |
@@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev) | |||
808 | dev_priv->saveDEIMR = I915_READ(DEIMR); | 804 | dev_priv->saveDEIMR = I915_READ(DEIMR); |
809 | dev_priv->saveGTIER = I915_READ(GTIER); | 805 | dev_priv->saveGTIER = I915_READ(GTIER); |
810 | dev_priv->saveGTIMR = I915_READ(GTIMR); | 806 | dev_priv->saveGTIMR = I915_READ(GTIMR); |
811 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); | 807 | dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); |
812 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); | 808 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); |
813 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 809 | dev_priv->saveMCHBAR_RENDER_STANDBY = |
814 | I915_READ(RSTDBYCTL); | 810 | I915_READ(RSTDBYCTL); |
815 | } else { | 811 | } else { |
@@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev) | |||
846 | 842 | ||
847 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 843 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
848 | 844 | ||
849 | /* Hardware status page */ | ||
850 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | ||
851 | |||
852 | i915_restore_display(dev); | 845 | i915_restore_display(dev); |
853 | 846 | ||
854 | /* Interrupt state */ | 847 | /* Interrupt state */ |
@@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev) | |||
857 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); | 850 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); |
858 | I915_WRITE(GTIER, dev_priv->saveGTIER); | 851 | I915_WRITE(GTIER, dev_priv->saveGTIER); |
859 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); | 852 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); |
860 | I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); | 853 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); |
861 | I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); | 854 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); |
862 | } else { | 855 | } else { |
863 | I915_WRITE (IER, dev_priv->saveIER); | 856 | I915_WRITE(IER, dev_priv->saveIER); |
864 | I915_WRITE (IMR, dev_priv->saveIMR); | 857 | I915_WRITE(IMR, dev_priv->saveIMR); |
865 | } | 858 | } |
866 | 859 | ||
867 | /* Clock gating state */ | 860 | /* Clock gating state */ |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 7f0fc3ed61aa..d623fefbfaca 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
9 | #include "i915_drv.h" | 9 | #include "i915_drv.h" |
10 | #include "intel_ringbuffer.h" | ||
10 | 11 | ||
11 | #undef TRACE_SYSTEM | 12 | #undef TRACE_SYSTEM |
12 | #define TRACE_SYSTEM i915 | 13 | #define TRACE_SYSTEM i915 |
@@ -16,9 +17,7 @@ | |||
16 | /* object tracking */ | 17 | /* object tracking */ |
17 | 18 | ||
18 | TRACE_EVENT(i915_gem_object_create, | 19 | TRACE_EVENT(i915_gem_object_create, |
19 | |||
20 | TP_PROTO(struct drm_i915_gem_object *obj), | 20 | TP_PROTO(struct drm_i915_gem_object *obj), |
21 | |||
22 | TP_ARGS(obj), | 21 | TP_ARGS(obj), |
23 | 22 | ||
24 | TP_STRUCT__entry( | 23 | TP_STRUCT__entry( |
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create, | |||
35 | ); | 34 | ); |
36 | 35 | ||
37 | TRACE_EVENT(i915_gem_object_bind, | 36 | TRACE_EVENT(i915_gem_object_bind, |
38 | 37 | TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), | |
39 | TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), | 38 | TP_ARGS(obj, mappable), |
40 | |||
41 | TP_ARGS(obj, gtt_offset, mappable), | ||
42 | 39 | ||
43 | TP_STRUCT__entry( | 40 | TP_STRUCT__entry( |
44 | __field(struct drm_i915_gem_object *, obj) | 41 | __field(struct drm_i915_gem_object *, obj) |
45 | __field(u32, gtt_offset) | 42 | __field(u32, offset) |
43 | __field(u32, size) | ||
46 | __field(bool, mappable) | 44 | __field(bool, mappable) |
47 | ), | 45 | ), |
48 | 46 | ||
49 | TP_fast_assign( | 47 | TP_fast_assign( |
50 | __entry->obj = obj; | 48 | __entry->obj = obj; |
51 | __entry->gtt_offset = gtt_offset; | 49 | __entry->offset = obj->gtt_space->start; |
50 | __entry->size = obj->gtt_space->size; | ||
52 | __entry->mappable = mappable; | 51 | __entry->mappable = mappable; |
53 | ), | 52 | ), |
54 | 53 | ||
55 | TP_printk("obj=%p, gtt_offset=%08x%s", | 54 | TP_printk("obj=%p, offset=%08x size=%x%s", |
56 | __entry->obj, __entry->gtt_offset, | 55 | __entry->obj, __entry->offset, __entry->size, |
57 | __entry->mappable ? ", mappable" : "") | 56 | __entry->mappable ? ", mappable" : "") |
58 | ); | 57 | ); |
59 | 58 | ||
60 | TRACE_EVENT(i915_gem_object_change_domain, | 59 | TRACE_EVENT(i915_gem_object_unbind, |
60 | TP_PROTO(struct drm_i915_gem_object *obj), | ||
61 | TP_ARGS(obj), | ||
62 | |||
63 | TP_STRUCT__entry( | ||
64 | __field(struct drm_i915_gem_object *, obj) | ||
65 | __field(u32, offset) | ||
66 | __field(u32, size) | ||
67 | ), | ||
61 | 68 | ||
62 | TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | 69 | TP_fast_assign( |
70 | __entry->obj = obj; | ||
71 | __entry->offset = obj->gtt_space->start; | ||
72 | __entry->size = obj->gtt_space->size; | ||
73 | ), | ||
63 | 74 | ||
64 | TP_ARGS(obj, old_read_domains, old_write_domain), | 75 | TP_printk("obj=%p, offset=%08x size=%x", |
76 | __entry->obj, __entry->offset, __entry->size) | ||
77 | ); | ||
78 | |||
79 | TRACE_EVENT(i915_gem_object_change_domain, | ||
80 | TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), | ||
81 | TP_ARGS(obj, old_read, old_write), | ||
65 | 82 | ||
66 | TP_STRUCT__entry( | 83 | TP_STRUCT__entry( |
67 | __field(struct drm_i915_gem_object *, obj) | 84 | __field(struct drm_i915_gem_object *, obj) |
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain, | |||
71 | 88 | ||
72 | TP_fast_assign( | 89 | TP_fast_assign( |
73 | __entry->obj = obj; | 90 | __entry->obj = obj; |
74 | __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); | 91 | __entry->read_domains = obj->base.read_domains | (old_read << 16); |
75 | __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); | 92 | __entry->write_domain = obj->base.write_domain | (old_write << 16); |
76 | ), | 93 | ), |
77 | 94 | ||
78 | TP_printk("obj=%p, read=%04x, write=%04x", | 95 | TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", |
79 | __entry->obj, | 96 | __entry->obj, |
80 | __entry->read_domains, __entry->write_domain) | 97 | __entry->read_domains >> 16, |
98 | __entry->read_domains & 0xffff, | ||
99 | __entry->write_domain >> 16, | ||
100 | __entry->write_domain & 0xffff) | ||
81 | ); | 101 | ); |
82 | 102 | ||
83 | DECLARE_EVENT_CLASS(i915_gem_object, | 103 | TRACE_EVENT(i915_gem_object_pwrite, |
104 | TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), | ||
105 | TP_ARGS(obj, offset, len), | ||
84 | 106 | ||
85 | TP_PROTO(struct drm_i915_gem_object *obj), | 107 | TP_STRUCT__entry( |
108 | __field(struct drm_i915_gem_object *, obj) | ||
109 | __field(u32, offset) | ||
110 | __field(u32, len) | ||
111 | ), | ||
86 | 112 | ||
87 | TP_ARGS(obj), | 113 | TP_fast_assign( |
114 | __entry->obj = obj; | ||
115 | __entry->offset = offset; | ||
116 | __entry->len = len; | ||
117 | ), | ||
118 | |||
119 | TP_printk("obj=%p, offset=%u, len=%u", | ||
120 | __entry->obj, __entry->offset, __entry->len) | ||
121 | ); | ||
122 | |||
123 | TRACE_EVENT(i915_gem_object_pread, | ||
124 | TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), | ||
125 | TP_ARGS(obj, offset, len), | ||
88 | 126 | ||
89 | TP_STRUCT__entry( | 127 | TP_STRUCT__entry( |
90 | __field(struct drm_i915_gem_object *, obj) | 128 | __field(struct drm_i915_gem_object *, obj) |
129 | __field(u32, offset) | ||
130 | __field(u32, len) | ||
91 | ), | 131 | ), |
92 | 132 | ||
93 | TP_fast_assign( | 133 | TP_fast_assign( |
94 | __entry->obj = obj; | 134 | __entry->obj = obj; |
135 | __entry->offset = offset; | ||
136 | __entry->len = len; | ||
95 | ), | 137 | ), |
96 | 138 | ||
97 | TP_printk("obj=%p", __entry->obj) | 139 | TP_printk("obj=%p, offset=%u, len=%u", |
140 | __entry->obj, __entry->offset, __entry->len) | ||
98 | ); | 141 | ); |
99 | 142 | ||
100 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, | 143 | TRACE_EVENT(i915_gem_object_fault, |
144 | TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), | ||
145 | TP_ARGS(obj, index, gtt, write), | ||
146 | |||
147 | TP_STRUCT__entry( | ||
148 | __field(struct drm_i915_gem_object *, obj) | ||
149 | __field(u32, index) | ||
150 | __field(bool, gtt) | ||
151 | __field(bool, write) | ||
152 | ), | ||
153 | |||
154 | TP_fast_assign( | ||
155 | __entry->obj = obj; | ||
156 | __entry->index = index; | ||
157 | __entry->gtt = gtt; | ||
158 | __entry->write = write; | ||
159 | ), | ||
101 | 160 | ||
161 | TP_printk("obj=%p, %s index=%u %s", | ||
162 | __entry->obj, | ||
163 | __entry->gtt ? "GTT" : "CPU", | ||
164 | __entry->index, | ||
165 | __entry->write ? ", writable" : "") | ||
166 | ); | ||
167 | |||
168 | DECLARE_EVENT_CLASS(i915_gem_object, | ||
102 | TP_PROTO(struct drm_i915_gem_object *obj), | 169 | TP_PROTO(struct drm_i915_gem_object *obj), |
170 | TP_ARGS(obj), | ||
103 | 171 | ||
104 | TP_ARGS(obj) | 172 | TP_STRUCT__entry( |
173 | __field(struct drm_i915_gem_object *, obj) | ||
174 | ), | ||
175 | |||
176 | TP_fast_assign( | ||
177 | __entry->obj = obj; | ||
178 | ), | ||
179 | |||
180 | TP_printk("obj=%p", __entry->obj) | ||
105 | ); | 181 | ); |
106 | 182 | ||
107 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, | 183 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, |
184 | TP_PROTO(struct drm_i915_gem_object *obj), | ||
185 | TP_ARGS(obj) | ||
186 | ); | ||
108 | 187 | ||
188 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | ||
109 | TP_PROTO(struct drm_i915_gem_object *obj), | 189 | TP_PROTO(struct drm_i915_gem_object *obj), |
110 | |||
111 | TP_ARGS(obj) | 190 | TP_ARGS(obj) |
112 | ); | 191 | ); |
113 | 192 | ||
114 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | 193 | TRACE_EVENT(i915_gem_evict, |
194 | TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), | ||
195 | TP_ARGS(dev, size, align, mappable), | ||
115 | 196 | ||
116 | TP_PROTO(struct drm_i915_gem_object *obj), | 197 | TP_STRUCT__entry( |
198 | __field(u32, dev) | ||
199 | __field(u32, size) | ||
200 | __field(u32, align) | ||
201 | __field(bool, mappable) | ||
202 | ), | ||
117 | 203 | ||
118 | TP_ARGS(obj) | 204 | TP_fast_assign( |
205 | __entry->dev = dev->primary->index; | ||
206 | __entry->size = size; | ||
207 | __entry->align = align; | ||
208 | __entry->mappable = mappable; | ||
209 | ), | ||
210 | |||
211 | TP_printk("dev=%d, size=%d, align=%d %s", | ||
212 | __entry->dev, __entry->size, __entry->align, | ||
213 | __entry->mappable ? ", mappable" : "") | ||
119 | ); | 214 | ); |
120 | 215 | ||
121 | /* batch tracing */ | 216 | TRACE_EVENT(i915_gem_evict_everything, |
217 | TP_PROTO(struct drm_device *dev, bool purgeable), | ||
218 | TP_ARGS(dev, purgeable), | ||
122 | 219 | ||
123 | TRACE_EVENT(i915_gem_request_submit, | 220 | TP_STRUCT__entry( |
221 | __field(u32, dev) | ||
222 | __field(bool, purgeable) | ||
223 | ), | ||
224 | |||
225 | TP_fast_assign( | ||
226 | __entry->dev = dev->primary->index; | ||
227 | __entry->purgeable = purgeable; | ||
228 | ), | ||
124 | 229 | ||
125 | TP_PROTO(struct drm_device *dev, u32 seqno), | 230 | TP_printk("dev=%d%s", |
231 | __entry->dev, | ||
232 | __entry->purgeable ? ", purgeable only" : "") | ||
233 | ); | ||
126 | 234 | ||
127 | TP_ARGS(dev, seqno), | 235 | TRACE_EVENT(i915_gem_ring_dispatch, |
236 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||
237 | TP_ARGS(ring, seqno), | ||
128 | 238 | ||
129 | TP_STRUCT__entry( | 239 | TP_STRUCT__entry( |
130 | __field(u32, dev) | 240 | __field(u32, dev) |
241 | __field(u32, ring) | ||
131 | __field(u32, seqno) | 242 | __field(u32, seqno) |
132 | ), | 243 | ), |
133 | 244 | ||
134 | TP_fast_assign( | 245 | TP_fast_assign( |
135 | __entry->dev = dev->primary->index; | 246 | __entry->dev = ring->dev->primary->index; |
247 | __entry->ring = ring->id; | ||
136 | __entry->seqno = seqno; | 248 | __entry->seqno = seqno; |
137 | i915_trace_irq_get(dev, seqno); | 249 | i915_trace_irq_get(ring, seqno); |
138 | ), | 250 | ), |
139 | 251 | ||
140 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 252 | TP_printk("dev=%u, ring=%u, seqno=%u", |
253 | __entry->dev, __entry->ring, __entry->seqno) | ||
141 | ); | 254 | ); |
142 | 255 | ||
143 | TRACE_EVENT(i915_gem_request_flush, | 256 | TRACE_EVENT(i915_gem_ring_flush, |
144 | 257 | TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), | |
145 | TP_PROTO(struct drm_device *dev, u32 seqno, | 258 | TP_ARGS(ring, invalidate, flush), |
146 | u32 flush_domains, u32 invalidate_domains), | ||
147 | |||
148 | TP_ARGS(dev, seqno, flush_domains, invalidate_domains), | ||
149 | 259 | ||
150 | TP_STRUCT__entry( | 260 | TP_STRUCT__entry( |
151 | __field(u32, dev) | 261 | __field(u32, dev) |
152 | __field(u32, seqno) | 262 | __field(u32, ring) |
153 | __field(u32, flush_domains) | 263 | __field(u32, invalidate) |
154 | __field(u32, invalidate_domains) | 264 | __field(u32, flush) |
155 | ), | 265 | ), |
156 | 266 | ||
157 | TP_fast_assign( | 267 | TP_fast_assign( |
158 | __entry->dev = dev->primary->index; | 268 | __entry->dev = ring->dev->primary->index; |
159 | __entry->seqno = seqno; | 269 | __entry->ring = ring->id; |
160 | __entry->flush_domains = flush_domains; | 270 | __entry->invalidate = invalidate; |
161 | __entry->invalidate_domains = invalidate_domains; | 271 | __entry->flush = flush; |
162 | ), | 272 | ), |
163 | 273 | ||
164 | TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", | 274 | TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", |
165 | __entry->dev, __entry->seqno, | 275 | __entry->dev, __entry->ring, |
166 | __entry->flush_domains, __entry->invalidate_domains) | 276 | __entry->invalidate, __entry->flush) |
167 | ); | 277 | ); |
168 | 278 | ||
169 | DECLARE_EVENT_CLASS(i915_gem_request, | 279 | DECLARE_EVENT_CLASS(i915_gem_request, |
170 | 280 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
171 | TP_PROTO(struct drm_device *dev, u32 seqno), | 281 | TP_ARGS(ring, seqno), |
172 | |||
173 | TP_ARGS(dev, seqno), | ||
174 | 282 | ||
175 | TP_STRUCT__entry( | 283 | TP_STRUCT__entry( |
176 | __field(u32, dev) | 284 | __field(u32, dev) |
285 | __field(u32, ring) | ||
177 | __field(u32, seqno) | 286 | __field(u32, seqno) |
178 | ), | 287 | ), |
179 | 288 | ||
180 | TP_fast_assign( | 289 | TP_fast_assign( |
181 | __entry->dev = dev->primary->index; | 290 | __entry->dev = ring->dev->primary->index; |
291 | __entry->ring = ring->id; | ||
182 | __entry->seqno = seqno; | 292 | __entry->seqno = seqno; |
183 | ), | 293 | ), |
184 | 294 | ||
185 | TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) | 295 | TP_printk("dev=%u, ring=%u, seqno=%u", |
296 | __entry->dev, __entry->ring, __entry->seqno) | ||
186 | ); | 297 | ); |
187 | 298 | ||
188 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, | 299 | DEFINE_EVENT(i915_gem_request, i915_gem_request_add, |
189 | 300 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
190 | TP_PROTO(struct drm_device *dev, u32 seqno), | 301 | TP_ARGS(ring, seqno) |
302 | ); | ||
191 | 303 | ||
192 | TP_ARGS(dev, seqno) | 304 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, |
305 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | ||
306 | TP_ARGS(ring, seqno) | ||
193 | ); | 307 | ); |
194 | 308 | ||
195 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, | 309 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, |
196 | 310 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
197 | TP_PROTO(struct drm_device *dev, u32 seqno), | 311 | TP_ARGS(ring, seqno) |
198 | |||
199 | TP_ARGS(dev, seqno) | ||
200 | ); | 312 | ); |
201 | 313 | ||
202 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, | 314 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, |
203 | 315 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
204 | TP_PROTO(struct drm_device *dev, u32 seqno), | 316 | TP_ARGS(ring, seqno) |
205 | |||
206 | TP_ARGS(dev, seqno) | ||
207 | ); | 317 | ); |
208 | 318 | ||
209 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, | 319 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, |
210 | 320 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | |
211 | TP_PROTO(struct drm_device *dev, u32 seqno), | 321 | TP_ARGS(ring, seqno) |
212 | |||
213 | TP_ARGS(dev, seqno) | ||
214 | ); | 322 | ); |
215 | 323 | ||
216 | DECLARE_EVENT_CLASS(i915_ring, | 324 | DECLARE_EVENT_CLASS(i915_ring, |
217 | 325 | TP_PROTO(struct intel_ring_buffer *ring), | |
218 | TP_PROTO(struct drm_device *dev), | 326 | TP_ARGS(ring), |
219 | |||
220 | TP_ARGS(dev), | ||
221 | 327 | ||
222 | TP_STRUCT__entry( | 328 | TP_STRUCT__entry( |
223 | __field(u32, dev) | 329 | __field(u32, dev) |
330 | __field(u32, ring) | ||
224 | ), | 331 | ), |
225 | 332 | ||
226 | TP_fast_assign( | 333 | TP_fast_assign( |
227 | __entry->dev = dev->primary->index; | 334 | __entry->dev = ring->dev->primary->index; |
335 | __entry->ring = ring->id; | ||
228 | ), | 336 | ), |
229 | 337 | ||
230 | TP_printk("dev=%u", __entry->dev) | 338 | TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) |
231 | ); | 339 | ); |
232 | 340 | ||
233 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, | 341 | DEFINE_EVENT(i915_ring, i915_ring_wait_begin, |
234 | 342 | TP_PROTO(struct intel_ring_buffer *ring), | |
235 | TP_PROTO(struct drm_device *dev), | 343 | TP_ARGS(ring) |
236 | |||
237 | TP_ARGS(dev) | ||
238 | ); | 344 | ); |
239 | 345 | ||
240 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, | 346 | DEFINE_EVENT(i915_ring, i915_ring_wait_end, |
241 | 347 | TP_PROTO(struct intel_ring_buffer *ring), | |
242 | TP_PROTO(struct drm_device *dev), | 348 | TP_ARGS(ring) |
243 | |||
244 | TP_ARGS(dev) | ||
245 | ); | 349 | ); |
246 | 350 | ||
247 | TRACE_EVENT(i915_flip_request, | 351 | TRACE_EVENT(i915_flip_request, |
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete, | |||
281 | ); | 385 | ); |
282 | 386 | ||
283 | TRACE_EVENT(i915_reg_rw, | 387 | TRACE_EVENT(i915_reg_rw, |
284 | TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), | 388 | TP_PROTO(bool write, u32 reg, u64 val, int len), |
285 | 389 | ||
286 | TP_ARGS(cmd, reg, val, len), | 390 | TP_ARGS(write, reg, val, len), |
287 | 391 | ||
288 | TP_STRUCT__entry( | 392 | TP_STRUCT__entry( |
289 | __field(int, cmd) | 393 | __field(u64, val) |
290 | __field(uint32_t, reg) | 394 | __field(u32, reg) |
291 | __field(uint64_t, val) | 395 | __field(u16, write) |
292 | __field(int, len) | 396 | __field(u16, len) |
293 | ), | 397 | ), |
294 | 398 | ||
295 | TP_fast_assign( | 399 | TP_fast_assign( |
296 | __entry->cmd = cmd; | 400 | __entry->val = (u64)val; |
297 | __entry->reg = reg; | 401 | __entry->reg = reg; |
298 | __entry->val = (uint64_t)val; | 402 | __entry->write = write; |
299 | __entry->len = len; | 403 | __entry->len = len; |
300 | ), | 404 | ), |
301 | 405 | ||
302 | TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", | 406 | TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", |
303 | __entry->cmd, __entry->reg, __entry->val, __entry->len) | 407 | __entry->write ? "write" : "read", |
408 | __entry->reg, __entry->len, | ||
409 | (u32)(__entry->val & 0xffffffff), | ||
410 | (u32)(__entry->val >> 32)) | ||
304 | ); | 411 | ); |
305 | 412 | ||
306 | #endif /* _I915_TRACE_H_ */ | 413 | #endif /* _I915_TRACE_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 0b44956c336b..fb5b4d426ae0 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -226,29 +226,49 @@ static void | |||
226 | parse_sdvo_panel_data(struct drm_i915_private *dev_priv, | 226 | parse_sdvo_panel_data(struct drm_i915_private *dev_priv, |
227 | struct bdb_header *bdb) | 227 | struct bdb_header *bdb) |
228 | { | 228 | { |
229 | struct bdb_sdvo_lvds_options *sdvo_lvds_options; | ||
230 | struct lvds_dvo_timing *dvo_timing; | 229 | struct lvds_dvo_timing *dvo_timing; |
231 | struct drm_display_mode *panel_fixed_mode; | 230 | struct drm_display_mode *panel_fixed_mode; |
231 | int index; | ||
232 | 232 | ||
233 | sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); | 233 | index = i915_vbt_sdvo_panel_type; |
234 | if (!sdvo_lvds_options) | 234 | if (index == -1) { |
235 | return; | 235 | struct bdb_sdvo_lvds_options *sdvo_lvds_options; |
236 | |||
237 | sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); | ||
238 | if (!sdvo_lvds_options) | ||
239 | return; | ||
240 | |||
241 | index = sdvo_lvds_options->panel_type; | ||
242 | } | ||
236 | 243 | ||
237 | dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); | 244 | dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); |
238 | if (!dvo_timing) | 245 | if (!dvo_timing) |
239 | return; | 246 | return; |
240 | 247 | ||
241 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 248 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
242 | |||
243 | if (!panel_fixed_mode) | 249 | if (!panel_fixed_mode) |
244 | return; | 250 | return; |
245 | 251 | ||
246 | fill_detail_timing_data(panel_fixed_mode, | 252 | fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); |
247 | dvo_timing + sdvo_lvds_options->panel_type); | ||
248 | 253 | ||
249 | dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; | 254 | dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; |
250 | 255 | ||
251 | return; | 256 | DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); |
257 | drm_mode_debug_printmodeline(panel_fixed_mode); | ||
258 | } | ||
259 | |||
260 | static int intel_bios_ssc_frequency(struct drm_device *dev, | ||
261 | bool alternate) | ||
262 | { | ||
263 | switch (INTEL_INFO(dev)->gen) { | ||
264 | case 2: | ||
265 | return alternate ? 66 : 48; | ||
266 | case 3: | ||
267 | case 4: | ||
268 | return alternate ? 100 : 96; | ||
269 | default: | ||
270 | return alternate ? 100 : 120; | ||
271 | } | ||
252 | } | 272 | } |
253 | 273 | ||
254 | static void | 274 | static void |
@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
263 | dev_priv->int_tv_support = general->int_tv_support; | 283 | dev_priv->int_tv_support = general->int_tv_support; |
264 | dev_priv->int_crt_support = general->int_crt_support; | 284 | dev_priv->int_crt_support = general->int_crt_support; |
265 | dev_priv->lvds_use_ssc = general->enable_ssc; | 285 | dev_priv->lvds_use_ssc = general->enable_ssc; |
266 | 286 | dev_priv->lvds_ssc_freq = | |
267 | if (IS_I85X(dev)) | 287 | intel_bios_ssc_frequency(dev, general->ssc_freq); |
268 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; | ||
269 | else if (IS_GEN5(dev) || IS_GEN6(dev)) | ||
270 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; | ||
271 | else | ||
272 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; | ||
273 | } | 288 | } |
274 | } | 289 | } |
275 | 290 | ||
@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
553 | static void | 568 | static void |
554 | init_vbt_defaults(struct drm_i915_private *dev_priv) | 569 | init_vbt_defaults(struct drm_i915_private *dev_priv) |
555 | { | 570 | { |
571 | struct drm_device *dev = dev_priv->dev; | ||
572 | |||
556 | dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; | 573 | dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; |
557 | 574 | ||
558 | /* LFP panel data */ | 575 | /* LFP panel data */ |
@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
565 | /* general features */ | 582 | /* general features */ |
566 | dev_priv->int_tv_support = 1; | 583 | dev_priv->int_tv_support = 1; |
567 | dev_priv->int_crt_support = 1; | 584 | dev_priv->int_crt_support = 1; |
568 | dev_priv->lvds_use_ssc = 0; | 585 | |
586 | /* Default to using SSC */ | ||
587 | dev_priv->lvds_use_ssc = 1; | ||
588 | dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); | ||
589 | DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); | ||
569 | 590 | ||
570 | /* eDP data */ | 591 | /* eDP data */ |
571 | dev_priv->edp.bpp = 18; | 592 | dev_priv->edp.bpp = 18; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 8a77ff4a7237..8342259f3160 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
129 | u32 adpa, dpll_md; | 129 | u32 adpa, dpll_md; |
130 | u32 adpa_reg; | 130 | u32 adpa_reg; |
131 | 131 | ||
132 | if (intel_crtc->pipe == 0) | 132 | dpll_md_reg = DPLL_MD(intel_crtc->pipe); |
133 | dpll_md_reg = DPLL_A_MD; | ||
134 | else | ||
135 | dpll_md_reg = DPLL_B_MD; | ||
136 | 133 | ||
137 | if (HAS_PCH_SPLIT(dev)) | 134 | if (HAS_PCH_SPLIT(dev)) |
138 | adpa_reg = PCH_ADPA; | 135 | adpa_reg = PCH_ADPA; |
@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
160 | adpa |= PORT_TRANS_A_SEL_CPT; | 157 | adpa |= PORT_TRANS_A_SEL_CPT; |
161 | else | 158 | else |
162 | adpa |= ADPA_PIPE_A_SELECT; | 159 | adpa |= ADPA_PIPE_A_SELECT; |
163 | if (!HAS_PCH_SPLIT(dev)) | ||
164 | I915_WRITE(BCLRPAT_A, 0); | ||
165 | } else { | 160 | } else { |
166 | if (HAS_PCH_CPT(dev)) | 161 | if (HAS_PCH_CPT(dev)) |
167 | adpa |= PORT_TRANS_B_SEL_CPT; | 162 | adpa |= PORT_TRANS_B_SEL_CPT; |
168 | else | 163 | else |
169 | adpa |= ADPA_PIPE_B_SELECT; | 164 | adpa |= ADPA_PIPE_B_SELECT; |
170 | if (!HAS_PCH_SPLIT(dev)) | ||
171 | I915_WRITE(BCLRPAT_B, 0); | ||
172 | } | 165 | } |
173 | 166 | ||
167 | if (!HAS_PCH_SPLIT(dev)) | ||
168 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); | ||
169 | |||
174 | I915_WRITE(adpa_reg, adpa); | 170 | I915_WRITE(adpa_reg, adpa); |
175 | } | 171 | } |
176 | 172 | ||
@@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) | |||
353 | 349 | ||
354 | DRM_DEBUG_KMS("starting load-detect on CRT\n"); | 350 | DRM_DEBUG_KMS("starting load-detect on CRT\n"); |
355 | 351 | ||
356 | if (pipe == 0) { | 352 | bclrpat_reg = BCLRPAT(pipe); |
357 | bclrpat_reg = BCLRPAT_A; | 353 | vtotal_reg = VTOTAL(pipe); |
358 | vtotal_reg = VTOTAL_A; | 354 | vblank_reg = VBLANK(pipe); |
359 | vblank_reg = VBLANK_A; | 355 | vsync_reg = VSYNC(pipe); |
360 | vsync_reg = VSYNC_A; | 356 | pipeconf_reg = PIPECONF(pipe); |
361 | pipeconf_reg = PIPEACONF; | 357 | pipe_dsl_reg = PIPEDSL(pipe); |
362 | pipe_dsl_reg = PIPEADSL; | ||
363 | } else { | ||
364 | bclrpat_reg = BCLRPAT_B; | ||
365 | vtotal_reg = VTOTAL_B; | ||
366 | vblank_reg = VBLANK_B; | ||
367 | vsync_reg = VSYNC_B; | ||
368 | pipeconf_reg = PIPEBCONF; | ||
369 | pipe_dsl_reg = PIPEBDSL; | ||
370 | } | ||
371 | 358 | ||
372 | save_bclrpat = I915_READ(bclrpat_reg); | 359 | save_bclrpat = I915_READ(bclrpat_reg); |
373 | save_vtotal = I915_READ(vtotal_reg); | 360 | save_vtotal = I915_READ(vtotal_reg); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7e42aa586504..3106c0dc8389 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
989 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) | 989 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
990 | { | 990 | { |
991 | struct drm_i915_private *dev_priv = dev->dev_private; | 991 | struct drm_i915_private *dev_priv = dev->dev_private; |
992 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); | 992 | int pipestat_reg = PIPESTAT(pipe); |
993 | 993 | ||
994 | /* Clear existing vblank status. Note this will clear any other | 994 | /* Clear existing vblank status. Note this will clear any other |
995 | * sticky status fields as well. | 995 | * sticky status fields as well. |
@@ -1058,6 +1058,612 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) | |||
1058 | } | 1058 | } |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | static const char *state_string(bool enabled) | ||
1062 | { | ||
1063 | return enabled ? "on" : "off"; | ||
1064 | } | ||
1065 | |||
1066 | /* Only for pre-ILK configs */ | ||
1067 | static void assert_pll(struct drm_i915_private *dev_priv, | ||
1068 | enum pipe pipe, bool state) | ||
1069 | { | ||
1070 | int reg; | ||
1071 | u32 val; | ||
1072 | bool cur_state; | ||
1073 | |||
1074 | reg = DPLL(pipe); | ||
1075 | val = I915_READ(reg); | ||
1076 | cur_state = !!(val & DPLL_VCO_ENABLE); | ||
1077 | WARN(cur_state != state, | ||
1078 | "PLL state assertion failure (expected %s, current %s)\n", | ||
1079 | state_string(state), state_string(cur_state)); | ||
1080 | } | ||
1081 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) | ||
1082 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) | ||
1083 | |||
1084 | /* For ILK+ */ | ||
1085 | static void assert_pch_pll(struct drm_i915_private *dev_priv, | ||
1086 | enum pipe pipe, bool state) | ||
1087 | { | ||
1088 | int reg; | ||
1089 | u32 val; | ||
1090 | bool cur_state; | ||
1091 | |||
1092 | reg = PCH_DPLL(pipe); | ||
1093 | val = I915_READ(reg); | ||
1094 | cur_state = !!(val & DPLL_VCO_ENABLE); | ||
1095 | WARN(cur_state != state, | ||
1096 | "PCH PLL state assertion failure (expected %s, current %s)\n", | ||
1097 | state_string(state), state_string(cur_state)); | ||
1098 | } | ||
1099 | #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) | ||
1100 | #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) | ||
1101 | |||
1102 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, | ||
1103 | enum pipe pipe, bool state) | ||
1104 | { | ||
1105 | int reg; | ||
1106 | u32 val; | ||
1107 | bool cur_state; | ||
1108 | |||
1109 | reg = FDI_TX_CTL(pipe); | ||
1110 | val = I915_READ(reg); | ||
1111 | cur_state = !!(val & FDI_TX_ENABLE); | ||
1112 | WARN(cur_state != state, | ||
1113 | "FDI TX state assertion failure (expected %s, current %s)\n", | ||
1114 | state_string(state), state_string(cur_state)); | ||
1115 | } | ||
1116 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) | ||
1117 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) | ||
1118 | |||
1119 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, | ||
1120 | enum pipe pipe, bool state) | ||
1121 | { | ||
1122 | int reg; | ||
1123 | u32 val; | ||
1124 | bool cur_state; | ||
1125 | |||
1126 | reg = FDI_RX_CTL(pipe); | ||
1127 | val = I915_READ(reg); | ||
1128 | cur_state = !!(val & FDI_RX_ENABLE); | ||
1129 | WARN(cur_state != state, | ||
1130 | "FDI RX state assertion failure (expected %s, current %s)\n", | ||
1131 | state_string(state), state_string(cur_state)); | ||
1132 | } | ||
1133 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) | ||
1134 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) | ||
1135 | |||
1136 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | ||
1137 | enum pipe pipe) | ||
1138 | { | ||
1139 | int reg; | ||
1140 | u32 val; | ||
1141 | |||
1142 | /* ILK FDI PLL is always enabled */ | ||
1143 | if (dev_priv->info->gen == 5) | ||
1144 | return; | ||
1145 | |||
1146 | reg = FDI_TX_CTL(pipe); | ||
1147 | val = I915_READ(reg); | ||
1148 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | ||
1149 | } | ||
1150 | |||
1151 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, | ||
1152 | enum pipe pipe) | ||
1153 | { | ||
1154 | int reg; | ||
1155 | u32 val; | ||
1156 | |||
1157 | reg = FDI_RX_CTL(pipe); | ||
1158 | val = I915_READ(reg); | ||
1159 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); | ||
1160 | } | ||
1161 | |||
1162 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, | ||
1163 | enum pipe pipe) | ||
1164 | { | ||
1165 | int pp_reg, lvds_reg; | ||
1166 | u32 val; | ||
1167 | enum pipe panel_pipe = PIPE_A; | ||
1168 | bool locked = locked; | ||
1169 | |||
1170 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | ||
1171 | pp_reg = PCH_PP_CONTROL; | ||
1172 | lvds_reg = PCH_LVDS; | ||
1173 | } else { | ||
1174 | pp_reg = PP_CONTROL; | ||
1175 | lvds_reg = LVDS; | ||
1176 | } | ||
1177 | |||
1178 | val = I915_READ(pp_reg); | ||
1179 | if (!(val & PANEL_POWER_ON) || | ||
1180 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) | ||
1181 | locked = false; | ||
1182 | |||
1183 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) | ||
1184 | panel_pipe = PIPE_B; | ||
1185 | |||
1186 | WARN(panel_pipe == pipe && locked, | ||
1187 | "panel assertion failure, pipe %c regs locked\n", | ||
1188 | pipe_name(pipe)); | ||
1189 | } | ||
1190 | |||
1191 | static void assert_pipe(struct drm_i915_private *dev_priv, | ||
1192 | enum pipe pipe, bool state) | ||
1193 | { | ||
1194 | int reg; | ||
1195 | u32 val; | ||
1196 | bool cur_state; | ||
1197 | |||
1198 | reg = PIPECONF(pipe); | ||
1199 | val = I915_READ(reg); | ||
1200 | cur_state = !!(val & PIPECONF_ENABLE); | ||
1201 | WARN(cur_state != state, | ||
1202 | "pipe %c assertion failure (expected %s, current %s)\n", | ||
1203 | pipe_name(pipe), state_string(state), state_string(cur_state)); | ||
1204 | } | ||
1205 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) | ||
1206 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | ||
1207 | |||
1208 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, | ||
1209 | enum plane plane) | ||
1210 | { | ||
1211 | int reg; | ||
1212 | u32 val; | ||
1213 | |||
1214 | reg = DSPCNTR(plane); | ||
1215 | val = I915_READ(reg); | ||
1216 | WARN(!(val & DISPLAY_PLANE_ENABLE), | ||
1217 | "plane %c assertion failure, should be active but is disabled\n", | ||
1218 | plane_name(plane)); | ||
1219 | } | ||
1220 | |||
1221 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, | ||
1222 | enum pipe pipe) | ||
1223 | { | ||
1224 | int reg, i; | ||
1225 | u32 val; | ||
1226 | int cur_pipe; | ||
1227 | |||
1228 | /* Planes are fixed to pipes on ILK+ */ | ||
1229 | if (HAS_PCH_SPLIT(dev_priv->dev)) | ||
1230 | return; | ||
1231 | |||
1232 | /* Need to check both planes against the pipe */ | ||
1233 | for (i = 0; i < 2; i++) { | ||
1234 | reg = DSPCNTR(i); | ||
1235 | val = I915_READ(reg); | ||
1236 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | ||
1237 | DISPPLANE_SEL_PIPE_SHIFT; | ||
1238 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | ||
1239 | "plane %c assertion failure, should be off on pipe %c but is still active\n", | ||
1240 | plane_name(i), pipe_name(pipe)); | ||
1241 | } | ||
1242 | } | ||
1243 | |||
1244 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | ||
1245 | { | ||
1246 | u32 val; | ||
1247 | bool enabled; | ||
1248 | |||
1249 | val = I915_READ(PCH_DREF_CONTROL); | ||
1250 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | ||
1251 | DREF_SUPERSPREAD_SOURCE_MASK)); | ||
1252 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); | ||
1253 | } | ||
1254 | |||
1255 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | ||
1256 | enum pipe pipe) | ||
1257 | { | ||
1258 | int reg; | ||
1259 | u32 val; | ||
1260 | bool enabled; | ||
1261 | |||
1262 | reg = TRANSCONF(pipe); | ||
1263 | val = I915_READ(reg); | ||
1264 | enabled = !!(val & TRANS_ENABLE); | ||
1265 | WARN(enabled, | ||
1266 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | ||
1267 | pipe_name(pipe)); | ||
1268 | } | ||
1269 | |||
1270 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | ||
1271 | enum pipe pipe, int reg) | ||
1272 | { | ||
1273 | u32 val = I915_READ(reg); | ||
1274 | WARN(DP_PIPE_ENABLED(val, pipe), | ||
1275 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||
1276 | reg, pipe_name(pipe)); | ||
1277 | } | ||
1278 | |||
1279 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | ||
1280 | enum pipe pipe, int reg) | ||
1281 | { | ||
1282 | u32 val = I915_READ(reg); | ||
1283 | WARN(HDMI_PIPE_ENABLED(val, pipe), | ||
1284 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | ||
1285 | reg, pipe_name(pipe)); | ||
1286 | } | ||
1287 | |||
1288 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | ||
1289 | enum pipe pipe) | ||
1290 | { | ||
1291 | int reg; | ||
1292 | u32 val; | ||
1293 | |||
1294 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); | ||
1295 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); | ||
1296 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); | ||
1297 | |||
1298 | reg = PCH_ADPA; | ||
1299 | val = I915_READ(reg); | ||
1300 | WARN(ADPA_PIPE_ENABLED(val, pipe), | ||
1301 | "PCH VGA enabled on transcoder %c, should be disabled\n", | ||
1302 | pipe_name(pipe)); | ||
1303 | |||
1304 | reg = PCH_LVDS; | ||
1305 | val = I915_READ(reg); | ||
1306 | WARN(LVDS_PIPE_ENABLED(val, pipe), | ||
1307 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | ||
1308 | pipe_name(pipe)); | ||
1309 | |||
1310 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); | ||
1311 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); | ||
1312 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); | ||
1313 | } | ||
1314 | |||
1315 | /** | ||
1316 | * intel_enable_pll - enable a PLL | ||
1317 | * @dev_priv: i915 private structure | ||
1318 | * @pipe: pipe PLL to enable | ||
1319 | * | ||
1320 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to | ||
1321 | * make sure the PLL reg is writable first though, since the panel write | ||
1322 | * protect mechanism may be enabled. | ||
1323 | * | ||
1324 | * Note! This is for pre-ILK only. | ||
1325 | */ | ||
1326 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
1327 | { | ||
1328 | int reg; | ||
1329 | u32 val; | ||
1330 | |||
1331 | /* No really, not for ILK+ */ | ||
1332 | BUG_ON(dev_priv->info->gen >= 5); | ||
1333 | |||
1334 | /* PLL is protected by panel, make sure we can write it */ | ||
1335 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) | ||
1336 | assert_panel_unlocked(dev_priv, pipe); | ||
1337 | |||
1338 | reg = DPLL(pipe); | ||
1339 | val = I915_READ(reg); | ||
1340 | val |= DPLL_VCO_ENABLE; | ||
1341 | |||
1342 | /* We do this three times for luck */ | ||
1343 | I915_WRITE(reg, val); | ||
1344 | POSTING_READ(reg); | ||
1345 | udelay(150); /* wait for warmup */ | ||
1346 | I915_WRITE(reg, val); | ||
1347 | POSTING_READ(reg); | ||
1348 | udelay(150); /* wait for warmup */ | ||
1349 | I915_WRITE(reg, val); | ||
1350 | POSTING_READ(reg); | ||
1351 | udelay(150); /* wait for warmup */ | ||
1352 | } | ||
1353 | |||
1354 | /** | ||
1355 | * intel_disable_pll - disable a PLL | ||
1356 | * @dev_priv: i915 private structure | ||
1357 | * @pipe: pipe PLL to disable | ||
1358 | * | ||
1359 | * Disable the PLL for @pipe, making sure the pipe is off first. | ||
1360 | * | ||
1361 | * Note! This is for pre-ILK only. | ||
1362 | */ | ||
1363 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
1364 | { | ||
1365 | int reg; | ||
1366 | u32 val; | ||
1367 | |||
1368 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
1369 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
1370 | return; | ||
1371 | |||
1372 | /* Make sure the pipe isn't still relying on us */ | ||
1373 | assert_pipe_disabled(dev_priv, pipe); | ||
1374 | |||
1375 | reg = DPLL(pipe); | ||
1376 | val = I915_READ(reg); | ||
1377 | val &= ~DPLL_VCO_ENABLE; | ||
1378 | I915_WRITE(reg, val); | ||
1379 | POSTING_READ(reg); | ||
1380 | } | ||
1381 | |||
1382 | /** | ||
1383 | * intel_enable_pch_pll - enable PCH PLL | ||
1384 | * @dev_priv: i915 private structure | ||
1385 | * @pipe: pipe PLL to enable | ||
1386 | * | ||
1387 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | ||
1388 | * drives the transcoder clock. | ||
1389 | */ | ||
1390 | static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, | ||
1391 | enum pipe pipe) | ||
1392 | { | ||
1393 | int reg; | ||
1394 | u32 val; | ||
1395 | |||
1396 | /* PCH only available on ILK+ */ | ||
1397 | BUG_ON(dev_priv->info->gen < 5); | ||
1398 | |||
1399 | /* PCH refclock must be enabled first */ | ||
1400 | assert_pch_refclk_enabled(dev_priv); | ||
1401 | |||
1402 | reg = PCH_DPLL(pipe); | ||
1403 | val = I915_READ(reg); | ||
1404 | val |= DPLL_VCO_ENABLE; | ||
1405 | I915_WRITE(reg, val); | ||
1406 | POSTING_READ(reg); | ||
1407 | udelay(200); | ||
1408 | } | ||
1409 | |||
1410 | static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, | ||
1411 | enum pipe pipe) | ||
1412 | { | ||
1413 | int reg; | ||
1414 | u32 val; | ||
1415 | |||
1416 | /* PCH only available on ILK+ */ | ||
1417 | BUG_ON(dev_priv->info->gen < 5); | ||
1418 | |||
1419 | /* Make sure transcoder isn't still depending on us */ | ||
1420 | assert_transcoder_disabled(dev_priv, pipe); | ||
1421 | |||
1422 | reg = PCH_DPLL(pipe); | ||
1423 | val = I915_READ(reg); | ||
1424 | val &= ~DPLL_VCO_ENABLE; | ||
1425 | I915_WRITE(reg, val); | ||
1426 | POSTING_READ(reg); | ||
1427 | udelay(200); | ||
1428 | } | ||
1429 | |||
1430 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | ||
1431 | enum pipe pipe) | ||
1432 | { | ||
1433 | int reg; | ||
1434 | u32 val; | ||
1435 | |||
1436 | /* PCH only available on ILK+ */ | ||
1437 | BUG_ON(dev_priv->info->gen < 5); | ||
1438 | |||
1439 | /* Make sure PCH DPLL is enabled */ | ||
1440 | assert_pch_pll_enabled(dev_priv, pipe); | ||
1441 | |||
1442 | /* FDI must be feeding us bits for PCH ports */ | ||
1443 | assert_fdi_tx_enabled(dev_priv, pipe); | ||
1444 | assert_fdi_rx_enabled(dev_priv, pipe); | ||
1445 | |||
1446 | reg = TRANSCONF(pipe); | ||
1447 | val = I915_READ(reg); | ||
1448 | /* | ||
1449 | * make the BPC in transcoder be consistent with | ||
1450 | * that in pipeconf reg. | ||
1451 | */ | ||
1452 | val &= ~PIPE_BPC_MASK; | ||
1453 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | ||
1454 | I915_WRITE(reg, val | TRANS_ENABLE); | ||
1455 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | ||
1456 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | ||
1457 | } | ||
1458 | |||
1459 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | ||
1460 | enum pipe pipe) | ||
1461 | { | ||
1462 | int reg; | ||
1463 | u32 val; | ||
1464 | |||
1465 | /* FDI relies on the transcoder */ | ||
1466 | assert_fdi_tx_disabled(dev_priv, pipe); | ||
1467 | assert_fdi_rx_disabled(dev_priv, pipe); | ||
1468 | |||
1469 | /* Ports must be off as well */ | ||
1470 | assert_pch_ports_disabled(dev_priv, pipe); | ||
1471 | |||
1472 | reg = TRANSCONF(pipe); | ||
1473 | val = I915_READ(reg); | ||
1474 | val &= ~TRANS_ENABLE; | ||
1475 | I915_WRITE(reg, val); | ||
1476 | /* wait for PCH transcoder off, transcoder state */ | ||
1477 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | ||
1478 | DRM_ERROR("failed to disable transcoder\n"); | ||
1479 | } | ||
1480 | |||
1481 | /** | ||
1482 | * intel_enable_pipe - enable a pipe, asserting requirements | ||
1483 | * @dev_priv: i915 private structure | ||
1484 | * @pipe: pipe to enable | ||
1485 | * @pch_port: on ILK+, is this pipe driving a PCH port or not | ||
1486 | * | ||
1487 | * Enable @pipe, making sure that various hardware specific requirements | ||
1488 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. | ||
1489 | * | ||
1490 | * @pipe should be %PIPE_A or %PIPE_B. | ||
1491 | * | ||
1492 | * Will wait until the pipe is actually running (i.e. first vblank) before | ||
1493 | * returning. | ||
1494 | */ | ||
1495 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | ||
1496 | bool pch_port) | ||
1497 | { | ||
1498 | int reg; | ||
1499 | u32 val; | ||
1500 | |||
1501 | /* | ||
1502 | * A pipe without a PLL won't actually be able to drive bits from | ||
1503 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't | ||
1504 | * need the check. | ||
1505 | */ | ||
1506 | if (!HAS_PCH_SPLIT(dev_priv->dev)) | ||
1507 | assert_pll_enabled(dev_priv, pipe); | ||
1508 | else { | ||
1509 | if (pch_port) { | ||
1510 | /* if driving the PCH, we need FDI enabled */ | ||
1511 | assert_fdi_rx_pll_enabled(dev_priv, pipe); | ||
1512 | assert_fdi_tx_pll_enabled(dev_priv, pipe); | ||
1513 | } | ||
1514 | /* FIXME: assert CPU port conditions for SNB+ */ | ||
1515 | } | ||
1516 | |||
1517 | reg = PIPECONF(pipe); | ||
1518 | val = I915_READ(reg); | ||
1519 | val |= PIPECONF_ENABLE; | ||
1520 | I915_WRITE(reg, val); | ||
1521 | POSTING_READ(reg); | ||
1522 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1523 | } | ||
1524 | |||
1525 | /** | ||
1526 | * intel_disable_pipe - disable a pipe, asserting requirements | ||
1527 | * @dev_priv: i915 private structure | ||
1528 | * @pipe: pipe to disable | ||
1529 | * | ||
1530 | * Disable @pipe, making sure that various hardware specific requirements | ||
1531 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. | ||
1532 | * | ||
1533 | * @pipe should be %PIPE_A or %PIPE_B. | ||
1534 | * | ||
1535 | * Will wait until the pipe has shut down before returning. | ||
1536 | */ | ||
1537 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, | ||
1538 | enum pipe pipe) | ||
1539 | { | ||
1540 | int reg; | ||
1541 | u32 val; | ||
1542 | |||
1543 | /* | ||
1544 | * Make sure planes won't keep trying to pump pixels to us, | ||
1545 | * or we might hang the display. | ||
1546 | */ | ||
1547 | assert_planes_disabled(dev_priv, pipe); | ||
1548 | |||
1549 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
1550 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
1551 | return; | ||
1552 | |||
1553 | reg = PIPECONF(pipe); | ||
1554 | val = I915_READ(reg); | ||
1555 | val &= ~PIPECONF_ENABLE; | ||
1556 | I915_WRITE(reg, val); | ||
1557 | POSTING_READ(reg); | ||
1558 | intel_wait_for_pipe_off(dev_priv->dev, pipe); | ||
1559 | } | ||
1560 | |||
1561 | /** | ||
1562 | * intel_enable_plane - enable a display plane on a given pipe | ||
1563 | * @dev_priv: i915 private structure | ||
1564 | * @plane: plane to enable | ||
1565 | * @pipe: pipe being fed | ||
1566 | * | ||
1567 | * Enable @plane on @pipe, making sure that @pipe is running first. | ||
1568 | */ | ||
1569 | static void intel_enable_plane(struct drm_i915_private *dev_priv, | ||
1570 | enum plane plane, enum pipe pipe) | ||
1571 | { | ||
1572 | int reg; | ||
1573 | u32 val; | ||
1574 | |||
1575 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | ||
1576 | assert_pipe_enabled(dev_priv, pipe); | ||
1577 | |||
1578 | reg = DSPCNTR(plane); | ||
1579 | val = I915_READ(reg); | ||
1580 | val |= DISPLAY_PLANE_ENABLE; | ||
1581 | I915_WRITE(reg, val); | ||
1582 | POSTING_READ(reg); | ||
1583 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1584 | } | ||
1585 | |||
1586 | /* | ||
1587 | * Plane regs are double buffered, going from enabled->disabled needs a | ||
1588 | * trigger in order to latch. The display address reg provides this. | ||
1589 | */ | ||
1590 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | ||
1591 | enum plane plane) | ||
1592 | { | ||
1593 | u32 reg = DSPADDR(plane); | ||
1594 | I915_WRITE(reg, I915_READ(reg)); | ||
1595 | } | ||
1596 | |||
1597 | /** | ||
1598 | * intel_disable_plane - disable a display plane | ||
1599 | * @dev_priv: i915 private structure | ||
1600 | * @plane: plane to disable | ||
1601 | * @pipe: pipe consuming the data | ||
1602 | * | ||
1603 | * Disable @plane; should be an independent operation. | ||
1604 | */ | ||
1605 | static void intel_disable_plane(struct drm_i915_private *dev_priv, | ||
1606 | enum plane plane, enum pipe pipe) | ||
1607 | { | ||
1608 | int reg; | ||
1609 | u32 val; | ||
1610 | |||
1611 | reg = DSPCNTR(plane); | ||
1612 | val = I915_READ(reg); | ||
1613 | val &= ~DISPLAY_PLANE_ENABLE; | ||
1614 | I915_WRITE(reg, val); | ||
1615 | POSTING_READ(reg); | ||
1616 | intel_flush_display_plane(dev_priv, plane); | ||
1617 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
1618 | } | ||
1619 | |||
1620 | static void disable_pch_dp(struct drm_i915_private *dev_priv, | ||
1621 | enum pipe pipe, int reg) | ||
1622 | { | ||
1623 | u32 val = I915_READ(reg); | ||
1624 | if (DP_PIPE_ENABLED(val, pipe)) | ||
1625 | I915_WRITE(reg, val & ~DP_PORT_EN); | ||
1626 | } | ||
1627 | |||
1628 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | ||
1629 | enum pipe pipe, int reg) | ||
1630 | { | ||
1631 | u32 val = I915_READ(reg); | ||
1632 | if (HDMI_PIPE_ENABLED(val, pipe)) | ||
1633 | I915_WRITE(reg, val & ~PORT_ENABLE); | ||
1634 | } | ||
1635 | |||
1636 | /* Disable any ports connected to this transcoder */ | ||
1637 | static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | ||
1638 | enum pipe pipe) | ||
1639 | { | ||
1640 | u32 reg, val; | ||
1641 | |||
1642 | val = I915_READ(PCH_PP_CONTROL); | ||
1643 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); | ||
1644 | |||
1645 | disable_pch_dp(dev_priv, pipe, PCH_DP_B); | ||
1646 | disable_pch_dp(dev_priv, pipe, PCH_DP_C); | ||
1647 | disable_pch_dp(dev_priv, pipe, PCH_DP_D); | ||
1648 | |||
1649 | reg = PCH_ADPA; | ||
1650 | val = I915_READ(reg); | ||
1651 | if (ADPA_PIPE_ENABLED(val, pipe)) | ||
1652 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | ||
1653 | |||
1654 | reg = PCH_LVDS; | ||
1655 | val = I915_READ(reg); | ||
1656 | if (LVDS_PIPE_ENABLED(val, pipe)) { | ||
1657 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | ||
1658 | POSTING_READ(reg); | ||
1659 | udelay(100); | ||
1660 | } | ||
1661 | |||
1662 | disable_pch_hdmi(dev_priv, pipe, HDMIB); | ||
1663 | disable_pch_hdmi(dev_priv, pipe, HDMIC); | ||
1664 | disable_pch_hdmi(dev_priv, pipe, HDMID); | ||
1665 | } | ||
1666 | |||
1061 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1667 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1062 | { | 1668 | { |
1063 | struct drm_device *dev = crtc->dev; | 1669 | struct drm_device *dev = crtc->dev; |
@@ -1219,7 +1825,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
1219 | u32 blt_ecoskpd; | 1825 | u32 blt_ecoskpd; |
1220 | 1826 | ||
1221 | /* Make sure blitter notifies FBC of writes */ | 1827 | /* Make sure blitter notifies FBC of writes */ |
1222 | __gen6_force_wake_get(dev_priv); | 1828 | __gen6_gt_force_wake_get(dev_priv); |
1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | 1829 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | 1830 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
1225 | GEN6_BLITTER_LOCK_SHIFT; | 1831 | GEN6_BLITTER_LOCK_SHIFT; |
@@ -1230,7 +1836,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
1230 | GEN6_BLITTER_LOCK_SHIFT); | 1836 | GEN6_BLITTER_LOCK_SHIFT); |
1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | 1837 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | 1838 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
1233 | __gen6_force_wake_put(dev_priv); | 1839 | __gen6_gt_force_wake_put(dev_priv); |
1234 | } | 1840 | } |
1235 | 1841 | ||
1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1842 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
@@ -1390,7 +1996,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1390 | * - going to an unsupported config (interlace, pixel multiply, etc.) | 1996 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1391 | */ | 1997 | */ |
1392 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | 1998 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
1393 | if (tmp_crtc->enabled) { | 1999 | if (tmp_crtc->enabled && tmp_crtc->fb) { |
1394 | if (crtc) { | 2000 | if (crtc) { |
1395 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | 2001 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); |
1396 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | 2002 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; |
@@ -1461,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1461 | struct drm_i915_gem_object *obj, | 2067 | struct drm_i915_gem_object *obj, |
1462 | struct intel_ring_buffer *pipelined) | 2068 | struct intel_ring_buffer *pipelined) |
1463 | { | 2069 | { |
2070 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1464 | u32 alignment; | 2071 | u32 alignment; |
1465 | int ret; | 2072 | int ret; |
1466 | 2073 | ||
@@ -1485,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1485 | BUG(); | 2092 | BUG(); |
1486 | } | 2093 | } |
1487 | 2094 | ||
2095 | dev_priv->mm.interruptible = false; | ||
1488 | ret = i915_gem_object_pin(obj, alignment, true); | 2096 | ret = i915_gem_object_pin(obj, alignment, true); |
1489 | if (ret) | 2097 | if (ret) |
1490 | return ret; | 2098 | goto err_interruptible; |
1491 | 2099 | ||
1492 | ret = i915_gem_object_set_to_display_plane(obj, pipelined); | 2100 | ret = i915_gem_object_set_to_display_plane(obj, pipelined); |
1493 | if (ret) | 2101 | if (ret) |
@@ -1499,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1499 | * a fence as the cost is not that onerous. | 2107 | * a fence as the cost is not that onerous. |
1500 | */ | 2108 | */ |
1501 | if (obj->tiling_mode != I915_TILING_NONE) { | 2109 | if (obj->tiling_mode != I915_TILING_NONE) { |
1502 | ret = i915_gem_object_get_fence(obj, pipelined, false); | 2110 | ret = i915_gem_object_get_fence(obj, pipelined); |
1503 | if (ret) | 2111 | if (ret) |
1504 | goto err_unpin; | 2112 | goto err_unpin; |
1505 | } | 2113 | } |
1506 | 2114 | ||
2115 | dev_priv->mm.interruptible = true; | ||
1507 | return 0; | 2116 | return 0; |
1508 | 2117 | ||
1509 | err_unpin: | 2118 | err_unpin: |
1510 | i915_gem_object_unpin(obj); | 2119 | i915_gem_object_unpin(obj); |
2120 | err_interruptible: | ||
2121 | dev_priv->mm.interruptible = true; | ||
1511 | return ret; | 2122 | return ret; |
1512 | } | 2123 | } |
1513 | 2124 | ||
@@ -1630,19 +2241,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 2241 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1631 | 2242 | ||
1632 | wait_event(dev_priv->pending_flip_queue, | 2243 | wait_event(dev_priv->pending_flip_queue, |
2244 | atomic_read(&dev_priv->mm.wedged) || | ||
1633 | atomic_read(&obj->pending_flip) == 0); | 2245 | atomic_read(&obj->pending_flip) == 0); |
1634 | 2246 | ||
1635 | /* Big Hammer, we also need to ensure that any pending | 2247 | /* Big Hammer, we also need to ensure that any pending |
1636 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 2248 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1637 | * current scanout is retired before unpinning the old | 2249 | * current scanout is retired before unpinning the old |
1638 | * framebuffer. | 2250 | * framebuffer. |
2251 | * | ||
2252 | * This should only fail upon a hung GPU, in which case we | ||
2253 | * can safely continue. | ||
1639 | */ | 2254 | */ |
1640 | ret = i915_gem_object_flush_gpu(obj, false); | 2255 | ret = i915_gem_object_flush_gpu(obj); |
1641 | if (ret) { | 2256 | (void) ret; |
1642 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1643 | mutex_unlock(&dev->struct_mutex); | ||
1644 | return ret; | ||
1645 | } | ||
1646 | } | 2257 | } |
1647 | 2258 | ||
1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 2259 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
@@ -1753,8 +2364,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1753 | struct drm_i915_private *dev_priv = dev->dev_private; | 2364 | struct drm_i915_private *dev_priv = dev->dev_private; |
1754 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2365 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1755 | int pipe = intel_crtc->pipe; | 2366 | int pipe = intel_crtc->pipe; |
2367 | int plane = intel_crtc->plane; | ||
1756 | u32 reg, temp, tries; | 2368 | u32 reg, temp, tries; |
1757 | 2369 | ||
2370 | /* FDI needs bits from pipe & plane first */ | ||
2371 | assert_pipe_enabled(dev_priv, pipe); | ||
2372 | assert_plane_enabled(dev_priv, plane); | ||
2373 | |||
1758 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | 2374 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
1759 | for train result */ | 2375 | for train result */ |
1760 | reg = FDI_RX_IMR(pipe); | 2376 | reg = FDI_RX_IMR(pipe); |
@@ -1784,7 +2400,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1784 | udelay(150); | 2400 | udelay(150); |
1785 | 2401 | ||
1786 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | 2402 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
1787 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); | 2403 | if (HAS_PCH_IBX(dev)) { |
2404 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | ||
2405 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | | ||
2406 | FDI_RX_PHASE_SYNC_POINTER_EN); | ||
2407 | } | ||
1788 | 2408 | ||
1789 | reg = FDI_RX_IIR(pipe); | 2409 | reg = FDI_RX_IIR(pipe); |
1790 | for (tries = 0; tries < 5; tries++) { | 2410 | for (tries = 0; tries < 5; tries++) { |
@@ -1834,7 +2454,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1834 | 2454 | ||
1835 | } | 2455 | } |
1836 | 2456 | ||
1837 | static const int const snb_b_fdi_train_param [] = { | 2457 | static const int snb_b_fdi_train_param [] = { |
1838 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, | 2458 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
1839 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | 2459 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
1840 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | 2460 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
@@ -2003,12 +2623,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc) | |||
2003 | } | 2623 | } |
2004 | } | 2624 | } |
2005 | 2625 | ||
2006 | static void intel_flush_display_plane(struct drm_device *dev, | 2626 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2007 | int plane) | ||
2008 | { | 2627 | { |
2628 | struct drm_device *dev = crtc->dev; | ||
2009 | struct drm_i915_private *dev_priv = dev->dev_private; | 2629 | struct drm_i915_private *dev_priv = dev->dev_private; |
2010 | u32 reg = DSPADDR(plane); | 2630 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2011 | I915_WRITE(reg, I915_READ(reg)); | 2631 | int pipe = intel_crtc->pipe; |
2632 | u32 reg, temp; | ||
2633 | |||
2634 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2635 | reg = FDI_TX_CTL(pipe); | ||
2636 | temp = I915_READ(reg); | ||
2637 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2638 | POSTING_READ(reg); | ||
2639 | |||
2640 | reg = FDI_RX_CTL(pipe); | ||
2641 | temp = I915_READ(reg); | ||
2642 | temp &= ~(0x7 << 16); | ||
2643 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2644 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2645 | |||
2646 | POSTING_READ(reg); | ||
2647 | udelay(100); | ||
2648 | |||
2649 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2650 | if (HAS_PCH_IBX(dev)) { | ||
2651 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | ||
2652 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2653 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2654 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); | ||
2655 | } | ||
2656 | |||
2657 | /* still set train pattern 1 */ | ||
2658 | reg = FDI_TX_CTL(pipe); | ||
2659 | temp = I915_READ(reg); | ||
2660 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2661 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2662 | I915_WRITE(reg, temp); | ||
2663 | |||
2664 | reg = FDI_RX_CTL(pipe); | ||
2665 | temp = I915_READ(reg); | ||
2666 | if (HAS_PCH_CPT(dev)) { | ||
2667 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2668 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2669 | } else { | ||
2670 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2671 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2672 | } | ||
2673 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2674 | temp &= ~(0x07 << 16); | ||
2675 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2676 | I915_WRITE(reg, temp); | ||
2677 | |||
2678 | POSTING_READ(reg); | ||
2679 | udelay(100); | ||
2012 | } | 2680 | } |
2013 | 2681 | ||
2014 | /* | 2682 | /* |
@@ -2045,60 +2713,46 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2045 | atomic_read(&obj->pending_flip) == 0); | 2713 | atomic_read(&obj->pending_flip) == 0); |
2046 | } | 2714 | } |
2047 | 2715 | ||
2048 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2716 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2049 | { | 2717 | { |
2050 | struct drm_device *dev = crtc->dev; | 2718 | struct drm_device *dev = crtc->dev; |
2051 | struct drm_i915_private *dev_priv = dev->dev_private; | 2719 | struct drm_mode_config *mode_config = &dev->mode_config; |
2052 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2720 | struct intel_encoder *encoder; |
2053 | int pipe = intel_crtc->pipe; | ||
2054 | int plane = intel_crtc->plane; | ||
2055 | u32 reg, temp; | ||
2056 | |||
2057 | if (intel_crtc->active) | ||
2058 | return; | ||
2059 | |||
2060 | intel_crtc->active = true; | ||
2061 | intel_update_watermarks(dev); | ||
2062 | |||
2063 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
2064 | temp = I915_READ(PCH_LVDS); | ||
2065 | if ((temp & LVDS_PORT_EN) == 0) | ||
2066 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
2067 | } | ||
2068 | 2721 | ||
2069 | ironlake_fdi_enable(crtc); | 2722 | /* |
2723 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | ||
2724 | * must be driven by its own crtc; no sharing is possible. | ||
2725 | */ | ||
2726 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2727 | if (encoder->base.crtc != crtc) | ||
2728 | continue; | ||
2070 | 2729 | ||
2071 | /* Enable panel fitting for LVDS */ | 2730 | switch (encoder->type) { |
2072 | if (dev_priv->pch_pf_size && | 2731 | case INTEL_OUTPUT_EDP: |
2073 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { | 2732 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
2074 | /* Force use of hard-coded filter coefficients | 2733 | return false; |
2075 | * as some pre-programmed values are broken, | 2734 | continue; |
2076 | * e.g. x201. | 2735 | } |
2077 | */ | ||
2078 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, | ||
2079 | PF_ENABLE | PF_FILTER_MED_3x3); | ||
2080 | I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, | ||
2081 | dev_priv->pch_pf_pos); | ||
2082 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, | ||
2083 | dev_priv->pch_pf_size); | ||
2084 | } | 2736 | } |
2085 | 2737 | ||
2086 | /* Enable CPU pipe */ | 2738 | return true; |
2087 | reg = PIPECONF(pipe); | 2739 | } |
2088 | temp = I915_READ(reg); | ||
2089 | if ((temp & PIPECONF_ENABLE) == 0) { | ||
2090 | I915_WRITE(reg, temp | PIPECONF_ENABLE); | ||
2091 | POSTING_READ(reg); | ||
2092 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
2093 | } | ||
2094 | 2740 | ||
2095 | /* configure and enable CPU plane */ | 2741 | /* |
2096 | reg = DSPCNTR(plane); | 2742 | * Enable PCH resources required for PCH ports: |
2097 | temp = I915_READ(reg); | 2743 | * - PCH PLLs |
2098 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | 2744 | * - FDI training & RX/TX |
2099 | I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); | 2745 | * - update transcoder timings |
2100 | intel_flush_display_plane(dev, plane); | 2746 | * - DP transcoding bits |
2101 | } | 2747 | * - transcoder |
2748 | */ | ||
2749 | static void ironlake_pch_enable(struct drm_crtc *crtc) | ||
2750 | { | ||
2751 | struct drm_device *dev = crtc->dev; | ||
2752 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2753 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2754 | int pipe = intel_crtc->pipe; | ||
2755 | u32 reg, temp; | ||
2102 | 2756 | ||
2103 | /* For PCH output, training FDI link */ | 2757 | /* For PCH output, training FDI link */ |
2104 | if (IS_GEN6(dev)) | 2758 | if (IS_GEN6(dev)) |
@@ -2106,14 +2760,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2106 | else | 2760 | else |
2107 | ironlake_fdi_link_train(crtc); | 2761 | ironlake_fdi_link_train(crtc); |
2108 | 2762 | ||
2109 | /* enable PCH DPLL */ | 2763 | intel_enable_pch_pll(dev_priv, pipe); |
2110 | reg = PCH_DPLL(pipe); | ||
2111 | temp = I915_READ(reg); | ||
2112 | if ((temp & DPLL_VCO_ENABLE) == 0) { | ||
2113 | I915_WRITE(reg, temp | DPLL_VCO_ENABLE); | ||
2114 | POSTING_READ(reg); | ||
2115 | udelay(200); | ||
2116 | } | ||
2117 | 2764 | ||
2118 | if (HAS_PCH_CPT(dev)) { | 2765 | if (HAS_PCH_CPT(dev)) { |
2119 | /* Be sure PCH DPLL SEL is set */ | 2766 | /* Be sure PCH DPLL SEL is set */ |
@@ -2125,7 +2772,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2125 | I915_WRITE(PCH_DPLL_SEL, temp); | 2772 | I915_WRITE(PCH_DPLL_SEL, temp); |
2126 | } | 2773 | } |
2127 | 2774 | ||
2128 | /* set transcoder timing */ | 2775 | /* set transcoder timing, panel must allow it */ |
2776 | assert_panel_unlocked(dev_priv, pipe); | ||
2129 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); | 2777 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); |
2130 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); | 2778 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); |
2131 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); | 2779 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); |
@@ -2172,18 +2820,55 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2172 | I915_WRITE(reg, temp); | 2820 | I915_WRITE(reg, temp); |
2173 | } | 2821 | } |
2174 | 2822 | ||
2175 | /* enable PCH transcoder */ | 2823 | intel_enable_transcoder(dev_priv, pipe); |
2176 | reg = TRANSCONF(pipe); | 2824 | } |
2177 | temp = I915_READ(reg); | 2825 | |
2178 | /* | 2826 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2179 | * make the BPC in transcoder be consistent with | 2827 | { |
2180 | * that in pipeconf reg. | 2828 | struct drm_device *dev = crtc->dev; |
2181 | */ | 2829 | struct drm_i915_private *dev_priv = dev->dev_private; |
2182 | temp &= ~PIPE_BPC_MASK; | 2830 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2183 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | 2831 | int pipe = intel_crtc->pipe; |
2184 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2832 | int plane = intel_crtc->plane; |
2185 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2833 | u32 temp; |
2186 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 2834 | bool is_pch_port; |
2835 | |||
2836 | if (intel_crtc->active) | ||
2837 | return; | ||
2838 | |||
2839 | intel_crtc->active = true; | ||
2840 | intel_update_watermarks(dev); | ||
2841 | |||
2842 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
2843 | temp = I915_READ(PCH_LVDS); | ||
2844 | if ((temp & LVDS_PORT_EN) == 0) | ||
2845 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
2846 | } | ||
2847 | |||
2848 | is_pch_port = intel_crtc_driving_pch(crtc); | ||
2849 | |||
2850 | if (is_pch_port) | ||
2851 | ironlake_fdi_enable(crtc); | ||
2852 | else | ||
2853 | ironlake_fdi_disable(crtc); | ||
2854 | |||
2855 | /* Enable panel fitting for LVDS */ | ||
2856 | if (dev_priv->pch_pf_size && | ||
2857 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { | ||
2858 | /* Force use of hard-coded filter coefficients | ||
2859 | * as some pre-programmed values are broken, | ||
2860 | * e.g. x201. | ||
2861 | */ | ||
2862 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); | ||
2863 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); | ||
2864 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | ||
2865 | } | ||
2866 | |||
2867 | intel_enable_pipe(dev_priv, pipe, is_pch_port); | ||
2868 | intel_enable_plane(dev_priv, plane, pipe); | ||
2869 | |||
2870 | if (is_pch_port) | ||
2871 | ironlake_pch_enable(crtc); | ||
2187 | 2872 | ||
2188 | intel_crtc_load_lut(crtc); | 2873 | intel_crtc_load_lut(crtc); |
2189 | intel_update_fbc(dev); | 2874 | intel_update_fbc(dev); |
@@ -2206,116 +2891,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2206 | drm_vblank_off(dev, pipe); | 2891 | drm_vblank_off(dev, pipe); |
2207 | intel_crtc_update_cursor(crtc, false); | 2892 | intel_crtc_update_cursor(crtc, false); |
2208 | 2893 | ||
2209 | /* Disable display plane */ | 2894 | intel_disable_plane(dev_priv, plane, pipe); |
2210 | reg = DSPCNTR(plane); | ||
2211 | temp = I915_READ(reg); | ||
2212 | if (temp & DISPLAY_PLANE_ENABLE) { | ||
2213 | I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); | ||
2214 | intel_flush_display_plane(dev, plane); | ||
2215 | } | ||
2216 | 2895 | ||
2217 | if (dev_priv->cfb_plane == plane && | 2896 | if (dev_priv->cfb_plane == plane && |
2218 | dev_priv->display.disable_fbc) | 2897 | dev_priv->display.disable_fbc) |
2219 | dev_priv->display.disable_fbc(dev); | 2898 | dev_priv->display.disable_fbc(dev); |
2220 | 2899 | ||
2221 | /* disable cpu pipe, disable after all planes disabled */ | 2900 | intel_disable_pipe(dev_priv, pipe); |
2222 | reg = PIPECONF(pipe); | ||
2223 | temp = I915_READ(reg); | ||
2224 | if (temp & PIPECONF_ENABLE) { | ||
2225 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); | ||
2226 | POSTING_READ(reg); | ||
2227 | /* wait for cpu pipe off, pipe state */ | ||
2228 | intel_wait_for_pipe_off(dev, intel_crtc->pipe); | ||
2229 | } | ||
2230 | 2901 | ||
2231 | /* Disable PF */ | 2902 | /* Disable PF */ |
2232 | I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); | 2903 | I915_WRITE(PF_CTL(pipe), 0); |
2233 | I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); | 2904 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
2234 | |||
2235 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2236 | reg = FDI_TX_CTL(pipe); | ||
2237 | temp = I915_READ(reg); | ||
2238 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2239 | POSTING_READ(reg); | ||
2240 | |||
2241 | reg = FDI_RX_CTL(pipe); | ||
2242 | temp = I915_READ(reg); | ||
2243 | temp &= ~(0x7 << 16); | ||
2244 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2245 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2246 | |||
2247 | POSTING_READ(reg); | ||
2248 | udelay(100); | ||
2249 | |||
2250 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2251 | if (HAS_PCH_IBX(dev)) | ||
2252 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2253 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2254 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2255 | |||
2256 | /* still set train pattern 1 */ | ||
2257 | reg = FDI_TX_CTL(pipe); | ||
2258 | temp = I915_READ(reg); | ||
2259 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2260 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2261 | I915_WRITE(reg, temp); | ||
2262 | |||
2263 | reg = FDI_RX_CTL(pipe); | ||
2264 | temp = I915_READ(reg); | ||
2265 | if (HAS_PCH_CPT(dev)) { | ||
2266 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2267 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2268 | } else { | ||
2269 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2270 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2271 | } | ||
2272 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2273 | temp &= ~(0x07 << 16); | ||
2274 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2275 | I915_WRITE(reg, temp); | ||
2276 | 2905 | ||
2277 | POSTING_READ(reg); | 2906 | ironlake_fdi_disable(crtc); |
2278 | udelay(100); | ||
2279 | 2907 | ||
2280 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 2908 | /* This is a horrible layering violation; we should be doing this in |
2281 | temp = I915_READ(PCH_LVDS); | 2909 | * the connector/encoder ->prepare instead, but we don't always have |
2282 | if (temp & LVDS_PORT_EN) { | 2910 | * enough information there about the config to know whether it will |
2283 | I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); | 2911 | * actually be necessary or just cause undesired flicker. |
2284 | POSTING_READ(PCH_LVDS); | 2912 | */ |
2285 | udelay(100); | 2913 | intel_disable_pch_ports(dev_priv, pipe); |
2286 | } | ||
2287 | } | ||
2288 | 2914 | ||
2289 | /* disable PCH transcoder */ | 2915 | intel_disable_transcoder(dev_priv, pipe); |
2290 | reg = TRANSCONF(plane); | ||
2291 | temp = I915_READ(reg); | ||
2292 | if (temp & TRANS_ENABLE) { | ||
2293 | I915_WRITE(reg, temp & ~TRANS_ENABLE); | ||
2294 | /* wait for PCH transcoder off, transcoder state */ | ||
2295 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | ||
2296 | DRM_ERROR("failed to disable transcoder\n"); | ||
2297 | } | ||
2298 | 2916 | ||
2299 | if (HAS_PCH_CPT(dev)) { | 2917 | if (HAS_PCH_CPT(dev)) { |
2300 | /* disable TRANS_DP_CTL */ | 2918 | /* disable TRANS_DP_CTL */ |
2301 | reg = TRANS_DP_CTL(pipe); | 2919 | reg = TRANS_DP_CTL(pipe); |
2302 | temp = I915_READ(reg); | 2920 | temp = I915_READ(reg); |
2303 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | 2921 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); |
2922 | temp |= TRANS_DP_PORT_SEL_NONE; | ||
2304 | I915_WRITE(reg, temp); | 2923 | I915_WRITE(reg, temp); |
2305 | 2924 | ||
2306 | /* disable DPLL_SEL */ | 2925 | /* disable DPLL_SEL */ |
2307 | temp = I915_READ(PCH_DPLL_SEL); | 2926 | temp = I915_READ(PCH_DPLL_SEL); |
2308 | if (pipe == 0) | 2927 | switch (pipe) { |
2309 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); | 2928 | case 0: |
2310 | else | 2929 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2930 | break; | ||
2931 | case 1: | ||
2311 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 2932 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
2933 | break; | ||
2934 | case 2: | ||
2935 | /* FIXME: manage transcoder PLLs? */ | ||
2936 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); | ||
2937 | break; | ||
2938 | default: | ||
2939 | BUG(); /* wtf */ | ||
2940 | } | ||
2312 | I915_WRITE(PCH_DPLL_SEL, temp); | 2941 | I915_WRITE(PCH_DPLL_SEL, temp); |
2313 | } | 2942 | } |
2314 | 2943 | ||
2315 | /* disable PCH DPLL */ | 2944 | /* disable PCH DPLL */ |
2316 | reg = PCH_DPLL(pipe); | 2945 | intel_disable_pch_pll(dev_priv, pipe); |
2317 | temp = I915_READ(reg); | ||
2318 | I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); | ||
2319 | 2946 | ||
2320 | /* Switch from PCDclk to Rawclk */ | 2947 | /* Switch from PCDclk to Rawclk */ |
2321 | reg = FDI_RX_CTL(pipe); | 2948 | reg = FDI_RX_CTL(pipe); |
@@ -2372,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | |||
2372 | { | 2999 | { |
2373 | if (!enable && intel_crtc->overlay) { | 3000 | if (!enable && intel_crtc->overlay) { |
2374 | struct drm_device *dev = intel_crtc->base.dev; | 3001 | struct drm_device *dev = intel_crtc->base.dev; |
3002 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2375 | 3003 | ||
2376 | mutex_lock(&dev->struct_mutex); | 3004 | mutex_lock(&dev->struct_mutex); |
2377 | (void) intel_overlay_switch_off(intel_crtc->overlay, false); | 3005 | dev_priv->mm.interruptible = false; |
3006 | (void) intel_overlay_switch_off(intel_crtc->overlay); | ||
3007 | dev_priv->mm.interruptible = true; | ||
2378 | mutex_unlock(&dev->struct_mutex); | 3008 | mutex_unlock(&dev->struct_mutex); |
2379 | } | 3009 | } |
2380 | 3010 | ||
@@ -2390,7 +3020,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
2390 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3020 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2391 | int pipe = intel_crtc->pipe; | 3021 | int pipe = intel_crtc->pipe; |
2392 | int plane = intel_crtc->plane; | 3022 | int plane = intel_crtc->plane; |
2393 | u32 reg, temp; | ||
2394 | 3023 | ||
2395 | if (intel_crtc->active) | 3024 | if (intel_crtc->active) |
2396 | return; | 3025 | return; |
@@ -2398,42 +3027,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
2398 | intel_crtc->active = true; | 3027 | intel_crtc->active = true; |
2399 | intel_update_watermarks(dev); | 3028 | intel_update_watermarks(dev); |
2400 | 3029 | ||
2401 | /* Enable the DPLL */ | 3030 | intel_enable_pll(dev_priv, pipe); |
2402 | reg = DPLL(pipe); | 3031 | intel_enable_pipe(dev_priv, pipe, false); |
2403 | temp = I915_READ(reg); | 3032 | intel_enable_plane(dev_priv, plane, pipe); |
2404 | if ((temp & DPLL_VCO_ENABLE) == 0) { | ||
2405 | I915_WRITE(reg, temp); | ||
2406 | |||
2407 | /* Wait for the clocks to stabilize. */ | ||
2408 | POSTING_READ(reg); | ||
2409 | udelay(150); | ||
2410 | |||
2411 | I915_WRITE(reg, temp | DPLL_VCO_ENABLE); | ||
2412 | |||
2413 | /* Wait for the clocks to stabilize. */ | ||
2414 | POSTING_READ(reg); | ||
2415 | udelay(150); | ||
2416 | |||
2417 | I915_WRITE(reg, temp | DPLL_VCO_ENABLE); | ||
2418 | |||
2419 | /* Wait for the clocks to stabilize. */ | ||
2420 | POSTING_READ(reg); | ||
2421 | udelay(150); | ||
2422 | } | ||
2423 | |||
2424 | /* Enable the pipe */ | ||
2425 | reg = PIPECONF(pipe); | ||
2426 | temp = I915_READ(reg); | ||
2427 | if ((temp & PIPECONF_ENABLE) == 0) | ||
2428 | I915_WRITE(reg, temp | PIPECONF_ENABLE); | ||
2429 | |||
2430 | /* Enable the plane */ | ||
2431 | reg = DSPCNTR(plane); | ||
2432 | temp = I915_READ(reg); | ||
2433 | if ((temp & DISPLAY_PLANE_ENABLE) == 0) { | ||
2434 | I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); | ||
2435 | intel_flush_display_plane(dev, plane); | ||
2436 | } | ||
2437 | 3033 | ||
2438 | intel_crtc_load_lut(crtc); | 3034 | intel_crtc_load_lut(crtc); |
2439 | intel_update_fbc(dev); | 3035 | intel_update_fbc(dev); |
@@ -2450,7 +3046,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
2450 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3046 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2451 | int pipe = intel_crtc->pipe; | 3047 | int pipe = intel_crtc->pipe; |
2452 | int plane = intel_crtc->plane; | 3048 | int plane = intel_crtc->plane; |
2453 | u32 reg, temp; | ||
2454 | 3049 | ||
2455 | if (!intel_crtc->active) | 3050 | if (!intel_crtc->active) |
2456 | return; | 3051 | return; |
@@ -2465,45 +3060,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
2465 | dev_priv->display.disable_fbc) | 3060 | dev_priv->display.disable_fbc) |
2466 | dev_priv->display.disable_fbc(dev); | 3061 | dev_priv->display.disable_fbc(dev); |
2467 | 3062 | ||
2468 | /* Disable display plane */ | 3063 | intel_disable_plane(dev_priv, plane, pipe); |
2469 | reg = DSPCNTR(plane); | 3064 | intel_disable_pipe(dev_priv, pipe); |
2470 | temp = I915_READ(reg); | 3065 | intel_disable_pll(dev_priv, pipe); |
2471 | if (temp & DISPLAY_PLANE_ENABLE) { | ||
2472 | I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); | ||
2473 | /* Flush the plane changes */ | ||
2474 | intel_flush_display_plane(dev, plane); | ||
2475 | |||
2476 | /* Wait for vblank for the disable to take effect */ | ||
2477 | if (IS_GEN2(dev)) | ||
2478 | intel_wait_for_vblank(dev, pipe); | ||
2479 | } | ||
2480 | |||
2481 | /* Don't disable pipe A or pipe A PLLs if needed */ | ||
2482 | if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
2483 | goto done; | ||
2484 | |||
2485 | /* Next, disable display pipes */ | ||
2486 | reg = PIPECONF(pipe); | ||
2487 | temp = I915_READ(reg); | ||
2488 | if (temp & PIPECONF_ENABLE) { | ||
2489 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); | ||
2490 | |||
2491 | /* Wait for the pipe to turn off */ | ||
2492 | POSTING_READ(reg); | ||
2493 | intel_wait_for_pipe_off(dev, pipe); | ||
2494 | } | ||
2495 | |||
2496 | reg = DPLL(pipe); | ||
2497 | temp = I915_READ(reg); | ||
2498 | if (temp & DPLL_VCO_ENABLE) { | ||
2499 | I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); | ||
2500 | |||
2501 | /* Wait for the clocks to turn off. */ | ||
2502 | POSTING_READ(reg); | ||
2503 | udelay(150); | ||
2504 | } | ||
2505 | 3066 | ||
2506 | done: | ||
2507 | intel_crtc->active = false; | 3067 | intel_crtc->active = false; |
2508 | intel_update_fbc(dev); | 3068 | intel_update_fbc(dev); |
2509 | intel_update_watermarks(dev); | 3069 | intel_update_watermarks(dev); |
@@ -2565,7 +3125,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2565 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | 3125 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
2566 | break; | 3126 | break; |
2567 | default: | 3127 | default: |
2568 | DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); | 3128 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
2569 | break; | 3129 | break; |
2570 | } | 3130 | } |
2571 | } | 3131 | } |
@@ -2762,77 +3322,77 @@ struct intel_watermark_params { | |||
2762 | }; | 3322 | }; |
2763 | 3323 | ||
2764 | /* Pineview has different values for various configs */ | 3324 | /* Pineview has different values for various configs */ |
2765 | static struct intel_watermark_params pineview_display_wm = { | 3325 | static const struct intel_watermark_params pineview_display_wm = { |
2766 | PINEVIEW_DISPLAY_FIFO, | 3326 | PINEVIEW_DISPLAY_FIFO, |
2767 | PINEVIEW_MAX_WM, | 3327 | PINEVIEW_MAX_WM, |
2768 | PINEVIEW_DFT_WM, | 3328 | PINEVIEW_DFT_WM, |
2769 | PINEVIEW_GUARD_WM, | 3329 | PINEVIEW_GUARD_WM, |
2770 | PINEVIEW_FIFO_LINE_SIZE | 3330 | PINEVIEW_FIFO_LINE_SIZE |
2771 | }; | 3331 | }; |
2772 | static struct intel_watermark_params pineview_display_hplloff_wm = { | 3332 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
2773 | PINEVIEW_DISPLAY_FIFO, | 3333 | PINEVIEW_DISPLAY_FIFO, |
2774 | PINEVIEW_MAX_WM, | 3334 | PINEVIEW_MAX_WM, |
2775 | PINEVIEW_DFT_HPLLOFF_WM, | 3335 | PINEVIEW_DFT_HPLLOFF_WM, |
2776 | PINEVIEW_GUARD_WM, | 3336 | PINEVIEW_GUARD_WM, |
2777 | PINEVIEW_FIFO_LINE_SIZE | 3337 | PINEVIEW_FIFO_LINE_SIZE |
2778 | }; | 3338 | }; |
2779 | static struct intel_watermark_params pineview_cursor_wm = { | 3339 | static const struct intel_watermark_params pineview_cursor_wm = { |
2780 | PINEVIEW_CURSOR_FIFO, | 3340 | PINEVIEW_CURSOR_FIFO, |
2781 | PINEVIEW_CURSOR_MAX_WM, | 3341 | PINEVIEW_CURSOR_MAX_WM, |
2782 | PINEVIEW_CURSOR_DFT_WM, | 3342 | PINEVIEW_CURSOR_DFT_WM, |
2783 | PINEVIEW_CURSOR_GUARD_WM, | 3343 | PINEVIEW_CURSOR_GUARD_WM, |
2784 | PINEVIEW_FIFO_LINE_SIZE, | 3344 | PINEVIEW_FIFO_LINE_SIZE, |
2785 | }; | 3345 | }; |
2786 | static struct intel_watermark_params pineview_cursor_hplloff_wm = { | 3346 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
2787 | PINEVIEW_CURSOR_FIFO, | 3347 | PINEVIEW_CURSOR_FIFO, |
2788 | PINEVIEW_CURSOR_MAX_WM, | 3348 | PINEVIEW_CURSOR_MAX_WM, |
2789 | PINEVIEW_CURSOR_DFT_WM, | 3349 | PINEVIEW_CURSOR_DFT_WM, |
2790 | PINEVIEW_CURSOR_GUARD_WM, | 3350 | PINEVIEW_CURSOR_GUARD_WM, |
2791 | PINEVIEW_FIFO_LINE_SIZE | 3351 | PINEVIEW_FIFO_LINE_SIZE |
2792 | }; | 3352 | }; |
2793 | static struct intel_watermark_params g4x_wm_info = { | 3353 | static const struct intel_watermark_params g4x_wm_info = { |
2794 | G4X_FIFO_SIZE, | 3354 | G4X_FIFO_SIZE, |
2795 | G4X_MAX_WM, | 3355 | G4X_MAX_WM, |
2796 | G4X_MAX_WM, | 3356 | G4X_MAX_WM, |
2797 | 2, | 3357 | 2, |
2798 | G4X_FIFO_LINE_SIZE, | 3358 | G4X_FIFO_LINE_SIZE, |
2799 | }; | 3359 | }; |
2800 | static struct intel_watermark_params g4x_cursor_wm_info = { | 3360 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
2801 | I965_CURSOR_FIFO, | 3361 | I965_CURSOR_FIFO, |
2802 | I965_CURSOR_MAX_WM, | 3362 | I965_CURSOR_MAX_WM, |
2803 | I965_CURSOR_DFT_WM, | 3363 | I965_CURSOR_DFT_WM, |
2804 | 2, | 3364 | 2, |
2805 | G4X_FIFO_LINE_SIZE, | 3365 | G4X_FIFO_LINE_SIZE, |
2806 | }; | 3366 | }; |
2807 | static struct intel_watermark_params i965_cursor_wm_info = { | 3367 | static const struct intel_watermark_params i965_cursor_wm_info = { |
2808 | I965_CURSOR_FIFO, | 3368 | I965_CURSOR_FIFO, |
2809 | I965_CURSOR_MAX_WM, | 3369 | I965_CURSOR_MAX_WM, |
2810 | I965_CURSOR_DFT_WM, | 3370 | I965_CURSOR_DFT_WM, |
2811 | 2, | 3371 | 2, |
2812 | I915_FIFO_LINE_SIZE, | 3372 | I915_FIFO_LINE_SIZE, |
2813 | }; | 3373 | }; |
2814 | static struct intel_watermark_params i945_wm_info = { | 3374 | static const struct intel_watermark_params i945_wm_info = { |
2815 | I945_FIFO_SIZE, | 3375 | I945_FIFO_SIZE, |
2816 | I915_MAX_WM, | 3376 | I915_MAX_WM, |
2817 | 1, | 3377 | 1, |
2818 | 2, | 3378 | 2, |
2819 | I915_FIFO_LINE_SIZE | 3379 | I915_FIFO_LINE_SIZE |
2820 | }; | 3380 | }; |
2821 | static struct intel_watermark_params i915_wm_info = { | 3381 | static const struct intel_watermark_params i915_wm_info = { |
2822 | I915_FIFO_SIZE, | 3382 | I915_FIFO_SIZE, |
2823 | I915_MAX_WM, | 3383 | I915_MAX_WM, |
2824 | 1, | 3384 | 1, |
2825 | 2, | 3385 | 2, |
2826 | I915_FIFO_LINE_SIZE | 3386 | I915_FIFO_LINE_SIZE |
2827 | }; | 3387 | }; |
2828 | static struct intel_watermark_params i855_wm_info = { | 3388 | static const struct intel_watermark_params i855_wm_info = { |
2829 | I855GM_FIFO_SIZE, | 3389 | I855GM_FIFO_SIZE, |
2830 | I915_MAX_WM, | 3390 | I915_MAX_WM, |
2831 | 1, | 3391 | 1, |
2832 | 2, | 3392 | 2, |
2833 | I830_FIFO_LINE_SIZE | 3393 | I830_FIFO_LINE_SIZE |
2834 | }; | 3394 | }; |
2835 | static struct intel_watermark_params i830_wm_info = { | 3395 | static const struct intel_watermark_params i830_wm_info = { |
2836 | I830_FIFO_SIZE, | 3396 | I830_FIFO_SIZE, |
2837 | I915_MAX_WM, | 3397 | I915_MAX_WM, |
2838 | 1, | 3398 | 1, |
@@ -2840,31 +3400,28 @@ static struct intel_watermark_params i830_wm_info = { | |||
2840 | I830_FIFO_LINE_SIZE | 3400 | I830_FIFO_LINE_SIZE |
2841 | }; | 3401 | }; |
2842 | 3402 | ||
2843 | static struct intel_watermark_params ironlake_display_wm_info = { | 3403 | static const struct intel_watermark_params ironlake_display_wm_info = { |
2844 | ILK_DISPLAY_FIFO, | 3404 | ILK_DISPLAY_FIFO, |
2845 | ILK_DISPLAY_MAXWM, | 3405 | ILK_DISPLAY_MAXWM, |
2846 | ILK_DISPLAY_DFTWM, | 3406 | ILK_DISPLAY_DFTWM, |
2847 | 2, | 3407 | 2, |
2848 | ILK_FIFO_LINE_SIZE | 3408 | ILK_FIFO_LINE_SIZE |
2849 | }; | 3409 | }; |
2850 | 3410 | static const struct intel_watermark_params ironlake_cursor_wm_info = { | |
2851 | static struct intel_watermark_params ironlake_cursor_wm_info = { | ||
2852 | ILK_CURSOR_FIFO, | 3411 | ILK_CURSOR_FIFO, |
2853 | ILK_CURSOR_MAXWM, | 3412 | ILK_CURSOR_MAXWM, |
2854 | ILK_CURSOR_DFTWM, | 3413 | ILK_CURSOR_DFTWM, |
2855 | 2, | 3414 | 2, |
2856 | ILK_FIFO_LINE_SIZE | 3415 | ILK_FIFO_LINE_SIZE |
2857 | }; | 3416 | }; |
2858 | 3417 | static const struct intel_watermark_params ironlake_display_srwm_info = { | |
2859 | static struct intel_watermark_params ironlake_display_srwm_info = { | ||
2860 | ILK_DISPLAY_SR_FIFO, | 3418 | ILK_DISPLAY_SR_FIFO, |
2861 | ILK_DISPLAY_MAX_SRWM, | 3419 | ILK_DISPLAY_MAX_SRWM, |
2862 | ILK_DISPLAY_DFT_SRWM, | 3420 | ILK_DISPLAY_DFT_SRWM, |
2863 | 2, | 3421 | 2, |
2864 | ILK_FIFO_LINE_SIZE | 3422 | ILK_FIFO_LINE_SIZE |
2865 | }; | 3423 | }; |
2866 | 3424 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { | |
2867 | static struct intel_watermark_params ironlake_cursor_srwm_info = { | ||
2868 | ILK_CURSOR_SR_FIFO, | 3425 | ILK_CURSOR_SR_FIFO, |
2869 | ILK_CURSOR_MAX_SRWM, | 3426 | ILK_CURSOR_MAX_SRWM, |
2870 | ILK_CURSOR_DFT_SRWM, | 3427 | ILK_CURSOR_DFT_SRWM, |
@@ -2872,31 +3429,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2872 | ILK_FIFO_LINE_SIZE | 3429 | ILK_FIFO_LINE_SIZE |
2873 | }; | 3430 | }; |
2874 | 3431 | ||
2875 | static struct intel_watermark_params sandybridge_display_wm_info = { | 3432 | static const struct intel_watermark_params sandybridge_display_wm_info = { |
2876 | SNB_DISPLAY_FIFO, | 3433 | SNB_DISPLAY_FIFO, |
2877 | SNB_DISPLAY_MAXWM, | 3434 | SNB_DISPLAY_MAXWM, |
2878 | SNB_DISPLAY_DFTWM, | 3435 | SNB_DISPLAY_DFTWM, |
2879 | 2, | 3436 | 2, |
2880 | SNB_FIFO_LINE_SIZE | 3437 | SNB_FIFO_LINE_SIZE |
2881 | }; | 3438 | }; |
2882 | 3439 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { | |
2883 | static struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
2884 | SNB_CURSOR_FIFO, | 3440 | SNB_CURSOR_FIFO, |
2885 | SNB_CURSOR_MAXWM, | 3441 | SNB_CURSOR_MAXWM, |
2886 | SNB_CURSOR_DFTWM, | 3442 | SNB_CURSOR_DFTWM, |
2887 | 2, | 3443 | 2, |
2888 | SNB_FIFO_LINE_SIZE | 3444 | SNB_FIFO_LINE_SIZE |
2889 | }; | 3445 | }; |
2890 | 3446 | static const struct intel_watermark_params sandybridge_display_srwm_info = { | |
2891 | static struct intel_watermark_params sandybridge_display_srwm_info = { | ||
2892 | SNB_DISPLAY_SR_FIFO, | 3447 | SNB_DISPLAY_SR_FIFO, |
2893 | SNB_DISPLAY_MAX_SRWM, | 3448 | SNB_DISPLAY_MAX_SRWM, |
2894 | SNB_DISPLAY_DFT_SRWM, | 3449 | SNB_DISPLAY_DFT_SRWM, |
2895 | 2, | 3450 | 2, |
2896 | SNB_FIFO_LINE_SIZE | 3451 | SNB_FIFO_LINE_SIZE |
2897 | }; | 3452 | }; |
2898 | 3453 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { | |
2899 | static struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
2900 | SNB_CURSOR_SR_FIFO, | 3454 | SNB_CURSOR_SR_FIFO, |
2901 | SNB_CURSOR_MAX_SRWM, | 3455 | SNB_CURSOR_MAX_SRWM, |
2902 | SNB_CURSOR_DFT_SRWM, | 3456 | SNB_CURSOR_DFT_SRWM, |
@@ -2924,7 +3478,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = { | |||
2924 | * will occur, and a display engine hang could result. | 3478 | * will occur, and a display engine hang could result. |
2925 | */ | 3479 | */ |
2926 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | 3480 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
2927 | struct intel_watermark_params *wm, | 3481 | const struct intel_watermark_params *wm, |
3482 | int fifo_size, | ||
2928 | int pixel_size, | 3483 | int pixel_size, |
2929 | unsigned long latency_ns) | 3484 | unsigned long latency_ns) |
2930 | { | 3485 | { |
@@ -2942,7 +3497,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | |||
2942 | 3497 | ||
2943 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); | 3498 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); |
2944 | 3499 | ||
2945 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); | 3500 | wm_size = fifo_size - (entries_required + wm->guard_size); |
2946 | 3501 | ||
2947 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); | 3502 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); |
2948 | 3503 | ||
@@ -3115,15 +3670,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) | |||
3115 | return size; | 3670 | return size; |
3116 | } | 3671 | } |
3117 | 3672 | ||
3118 | static void pineview_update_wm(struct drm_device *dev, int planea_clock, | 3673 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) |
3119 | int planeb_clock, int sr_hdisplay, int unused, | 3674 | { |
3120 | int pixel_size) | 3675 | struct drm_crtc *crtc, *enabled = NULL; |
3676 | |||
3677 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3678 | if (crtc->enabled && crtc->fb) { | ||
3679 | if (enabled) | ||
3680 | return NULL; | ||
3681 | enabled = crtc; | ||
3682 | } | ||
3683 | } | ||
3684 | |||
3685 | return enabled; | ||
3686 | } | ||
3687 | |||
3688 | static void pineview_update_wm(struct drm_device *dev) | ||
3121 | { | 3689 | { |
3122 | struct drm_i915_private *dev_priv = dev->dev_private; | 3690 | struct drm_i915_private *dev_priv = dev->dev_private; |
3691 | struct drm_crtc *crtc; | ||
3123 | const struct cxsr_latency *latency; | 3692 | const struct cxsr_latency *latency; |
3124 | u32 reg; | 3693 | u32 reg; |
3125 | unsigned long wm; | 3694 | unsigned long wm; |
3126 | int sr_clock; | ||
3127 | 3695 | ||
3128 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, | 3696 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
3129 | dev_priv->fsb_freq, dev_priv->mem_freq); | 3697 | dev_priv->fsb_freq, dev_priv->mem_freq); |
@@ -3133,11 +3701,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3133 | return; | 3701 | return; |
3134 | } | 3702 | } |
3135 | 3703 | ||
3136 | if (!planea_clock || !planeb_clock) { | 3704 | crtc = single_enabled_crtc(dev); |
3137 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3705 | if (crtc) { |
3706 | int clock = crtc->mode.clock; | ||
3707 | int pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3138 | 3708 | ||
3139 | /* Display SR */ | 3709 | /* Display SR */ |
3140 | wm = intel_calculate_wm(sr_clock, &pineview_display_wm, | 3710 | wm = intel_calculate_wm(clock, &pineview_display_wm, |
3711 | pineview_display_wm.fifo_size, | ||
3141 | pixel_size, latency->display_sr); | 3712 | pixel_size, latency->display_sr); |
3142 | reg = I915_READ(DSPFW1); | 3713 | reg = I915_READ(DSPFW1); |
3143 | reg &= ~DSPFW_SR_MASK; | 3714 | reg &= ~DSPFW_SR_MASK; |
@@ -3146,7 +3717,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3146 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | 3717 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
3147 | 3718 | ||
3148 | /* cursor SR */ | 3719 | /* cursor SR */ |
3149 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, | 3720 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, |
3721 | pineview_display_wm.fifo_size, | ||
3150 | pixel_size, latency->cursor_sr); | 3722 | pixel_size, latency->cursor_sr); |
3151 | reg = I915_READ(DSPFW3); | 3723 | reg = I915_READ(DSPFW3); |
3152 | reg &= ~DSPFW_CURSOR_SR_MASK; | 3724 | reg &= ~DSPFW_CURSOR_SR_MASK; |
@@ -3154,7 +3726,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3154 | I915_WRITE(DSPFW3, reg); | 3726 | I915_WRITE(DSPFW3, reg); |
3155 | 3727 | ||
3156 | /* Display HPLL off SR */ | 3728 | /* Display HPLL off SR */ |
3157 | wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, | 3729 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
3730 | pineview_display_hplloff_wm.fifo_size, | ||
3158 | pixel_size, latency->display_hpll_disable); | 3731 | pixel_size, latency->display_hpll_disable); |
3159 | reg = I915_READ(DSPFW3); | 3732 | reg = I915_READ(DSPFW3); |
3160 | reg &= ~DSPFW_HPLL_SR_MASK; | 3733 | reg &= ~DSPFW_HPLL_SR_MASK; |
@@ -3162,7 +3735,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3162 | I915_WRITE(DSPFW3, reg); | 3735 | I915_WRITE(DSPFW3, reg); |
3163 | 3736 | ||
3164 | /* cursor HPLL off SR */ | 3737 | /* cursor HPLL off SR */ |
3165 | wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, | 3738 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, |
3739 | pineview_display_hplloff_wm.fifo_size, | ||
3166 | pixel_size, latency->cursor_hpll_disable); | 3740 | pixel_size, latency->cursor_hpll_disable); |
3167 | reg = I915_READ(DSPFW3); | 3741 | reg = I915_READ(DSPFW3); |
3168 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | 3742 | reg &= ~DSPFW_HPLL_CURSOR_MASK; |
@@ -3180,125 +3754,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3180 | } | 3754 | } |
3181 | } | 3755 | } |
3182 | 3756 | ||
3183 | static void g4x_update_wm(struct drm_device *dev, int planea_clock, | 3757 | static bool g4x_compute_wm0(struct drm_device *dev, |
3184 | int planeb_clock, int sr_hdisplay, int sr_htotal, | 3758 | int plane, |
3185 | int pixel_size) | 3759 | const struct intel_watermark_params *display, |
3760 | int display_latency_ns, | ||
3761 | const struct intel_watermark_params *cursor, | ||
3762 | int cursor_latency_ns, | ||
3763 | int *plane_wm, | ||
3764 | int *cursor_wm) | ||
3186 | { | 3765 | { |
3187 | struct drm_i915_private *dev_priv = dev->dev_private; | 3766 | struct drm_crtc *crtc; |
3188 | int total_size, cacheline_size; | 3767 | int htotal, hdisplay, clock, pixel_size; |
3189 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; | 3768 | int line_time_us, line_count; |
3190 | struct intel_watermark_params planea_params, planeb_params; | 3769 | int entries, tlb_miss; |
3191 | unsigned long line_time_us; | ||
3192 | int sr_clock, sr_entries = 0, entries_required; | ||
3193 | 3770 | ||
3194 | /* Create copies of the base settings for each pipe */ | 3771 | crtc = intel_get_crtc_for_plane(dev, plane); |
3195 | planea_params = planeb_params = g4x_wm_info; | 3772 | if (crtc->fb == NULL || !crtc->enabled) |
3773 | return false; | ||
3196 | 3774 | ||
3197 | /* Grab a couple of global values before we overwrite them */ | 3775 | htotal = crtc->mode.htotal; |
3198 | total_size = planea_params.fifo_size; | 3776 | hdisplay = crtc->mode.hdisplay; |
3199 | cacheline_size = planea_params.cacheline_size; | 3777 | clock = crtc->mode.clock; |
3778 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3200 | 3779 | ||
3201 | /* | 3780 | /* Use the small buffer method to calculate plane watermark */ |
3202 | * Note: we need to make sure we don't overflow for various clock & | 3781 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
3203 | * latency values. | 3782 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; |
3204 | * clocks go from a few thousand to several hundred thousand. | 3783 | if (tlb_miss > 0) |
3205 | * latency is usually a few thousand | 3784 | entries += tlb_miss; |
3206 | */ | 3785 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
3207 | entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / | 3786 | *plane_wm = entries + display->guard_size; |
3208 | 1000; | 3787 | if (*plane_wm > (int)display->max_wm) |
3209 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); | 3788 | *plane_wm = display->max_wm; |
3210 | planea_wm = entries_required + planea_params.guard_size; | ||
3211 | 3789 | ||
3212 | entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / | 3790 | /* Use the large buffer method to calculate cursor watermark */ |
3213 | 1000; | 3791 | line_time_us = ((htotal * 1000) / clock); |
3214 | entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); | 3792 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
3215 | planeb_wm = entries_required + planeb_params.guard_size; | 3793 | entries = line_count * 64 * pixel_size; |
3794 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | ||
3795 | if (tlb_miss > 0) | ||
3796 | entries += tlb_miss; | ||
3797 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3798 | *cursor_wm = entries + cursor->guard_size; | ||
3799 | if (*cursor_wm > (int)cursor->max_wm) | ||
3800 | *cursor_wm = (int)cursor->max_wm; | ||
3216 | 3801 | ||
3217 | cursora_wm = cursorb_wm = 16; | 3802 | return true; |
3218 | cursor_sr = 32; | 3803 | } |
3219 | 3804 | ||
3220 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 3805 | /* |
3806 | * Check the wm result. | ||
3807 | * | ||
3808 | * If any calculated watermark values is larger than the maximum value that | ||
3809 | * can be programmed into the associated watermark register, that watermark | ||
3810 | * must be disabled. | ||
3811 | */ | ||
3812 | static bool g4x_check_srwm(struct drm_device *dev, | ||
3813 | int display_wm, int cursor_wm, | ||
3814 | const struct intel_watermark_params *display, | ||
3815 | const struct intel_watermark_params *cursor) | ||
3816 | { | ||
3817 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", | ||
3818 | display_wm, cursor_wm); | ||
3221 | 3819 | ||
3222 | /* Calc sr entries for one plane configs */ | 3820 | if (display_wm > display->max_wm) { |
3223 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3821 | DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n", |
3224 | /* self-refresh has much higher latency */ | 3822 | display_wm, display->max_wm); |
3225 | static const int sr_latency_ns = 12000; | 3823 | return false; |
3824 | } | ||
3226 | 3825 | ||
3227 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3826 | if (cursor_wm > cursor->max_wm) { |
3228 | line_time_us = ((sr_htotal * 1000) / sr_clock); | 3827 | DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n", |
3828 | cursor_wm, cursor->max_wm); | ||
3829 | return false; | ||
3830 | } | ||
3229 | 3831 | ||
3230 | /* Use ns/us then divide to preserve precision */ | 3832 | if (!(display_wm || cursor_wm)) { |
3231 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3833 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); |
3232 | pixel_size * sr_hdisplay; | 3834 | return false; |
3233 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); | 3835 | } |
3234 | |||
3235 | entries_required = (((sr_latency_ns / line_time_us) + | ||
3236 | 1000) / 1000) * pixel_size * 64; | ||
3237 | entries_required = DIV_ROUND_UP(entries_required, | ||
3238 | g4x_cursor_wm_info.cacheline_size); | ||
3239 | cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; | ||
3240 | |||
3241 | if (cursor_sr > g4x_cursor_wm_info.max_wm) | ||
3242 | cursor_sr = g4x_cursor_wm_info.max_wm; | ||
3243 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | ||
3244 | "cursor %d\n", sr_entries, cursor_sr); | ||
3245 | 3836 | ||
3246 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 3837 | return true; |
3247 | } else { | 3838 | } |
3248 | /* Turn off self refresh if both pipes are enabled */ | 3839 | |
3249 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | 3840 | static bool g4x_compute_srwm(struct drm_device *dev, |
3250 | & ~FW_BLC_SELF_EN); | 3841 | int plane, |
3842 | int latency_ns, | ||
3843 | const struct intel_watermark_params *display, | ||
3844 | const struct intel_watermark_params *cursor, | ||
3845 | int *display_wm, int *cursor_wm) | ||
3846 | { | ||
3847 | struct drm_crtc *crtc; | ||
3848 | int hdisplay, htotal, pixel_size, clock; | ||
3849 | unsigned long line_time_us; | ||
3850 | int line_count, line_size; | ||
3851 | int small, large; | ||
3852 | int entries; | ||
3853 | |||
3854 | if (!latency_ns) { | ||
3855 | *display_wm = *cursor_wm = 0; | ||
3856 | return false; | ||
3251 | } | 3857 | } |
3252 | 3858 | ||
3253 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 3859 | crtc = intel_get_crtc_for_plane(dev, plane); |
3254 | planea_wm, planeb_wm, sr_entries); | 3860 | hdisplay = crtc->mode.hdisplay; |
3861 | htotal = crtc->mode.htotal; | ||
3862 | clock = crtc->mode.clock; | ||
3863 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3864 | |||
3865 | line_time_us = (htotal * 1000) / clock; | ||
3866 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
3867 | line_size = hdisplay * pixel_size; | ||
3868 | |||
3869 | /* Use the minimum of the small and large buffer method for primary */ | ||
3870 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
3871 | large = line_count * line_size; | ||
3872 | |||
3873 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
3874 | *display_wm = entries + display->guard_size; | ||
3875 | |||
3876 | /* calculate the self-refresh watermark for display cursor */ | ||
3877 | entries = line_count * pixel_size * 64; | ||
3878 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3879 | *cursor_wm = entries + cursor->guard_size; | ||
3880 | |||
3881 | return g4x_check_srwm(dev, | ||
3882 | *display_wm, *cursor_wm, | ||
3883 | display, cursor); | ||
3884 | } | ||
3885 | |||
3886 | static inline bool single_plane_enabled(unsigned int mask) | ||
3887 | { | ||
3888 | return mask && (mask & -mask) == 0; | ||
3889 | } | ||
3890 | |||
3891 | static void g4x_update_wm(struct drm_device *dev) | ||
3892 | { | ||
3893 | static const int sr_latency_ns = 12000; | ||
3894 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3895 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | ||
3896 | int plane_sr, cursor_sr; | ||
3897 | unsigned int enabled = 0; | ||
3898 | |||
3899 | if (g4x_compute_wm0(dev, 0, | ||
3900 | &g4x_wm_info, latency_ns, | ||
3901 | &g4x_cursor_wm_info, latency_ns, | ||
3902 | &planea_wm, &cursora_wm)) | ||
3903 | enabled |= 1; | ||
3904 | |||
3905 | if (g4x_compute_wm0(dev, 1, | ||
3906 | &g4x_wm_info, latency_ns, | ||
3907 | &g4x_cursor_wm_info, latency_ns, | ||
3908 | &planeb_wm, &cursorb_wm)) | ||
3909 | enabled |= 2; | ||
3910 | |||
3911 | plane_sr = cursor_sr = 0; | ||
3912 | if (single_plane_enabled(enabled) && | ||
3913 | g4x_compute_srwm(dev, ffs(enabled) - 1, | ||
3914 | sr_latency_ns, | ||
3915 | &g4x_wm_info, | ||
3916 | &g4x_cursor_wm_info, | ||
3917 | &plane_sr, &cursor_sr)) | ||
3918 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
3919 | else | ||
3920 | I915_WRITE(FW_BLC_SELF, | ||
3921 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); | ||
3255 | 3922 | ||
3256 | planea_wm &= 0x3f; | 3923 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
3257 | planeb_wm &= 0x3f; | 3924 | planea_wm, cursora_wm, |
3925 | planeb_wm, cursorb_wm, | ||
3926 | plane_sr, cursor_sr); | ||
3258 | 3927 | ||
3259 | I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | | 3928 | I915_WRITE(DSPFW1, |
3929 | (plane_sr << DSPFW_SR_SHIFT) | | ||
3260 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | 3930 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | |
3261 | (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); | 3931 | (planeb_wm << DSPFW_PLANEB_SHIFT) | |
3262 | I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | 3932 | planea_wm); |
3933 | I915_WRITE(DSPFW2, | ||
3934 | (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | ||
3263 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | 3935 | (cursora_wm << DSPFW_CURSORA_SHIFT)); |
3264 | /* HPLL off in SR has some issues on G4x... disable it */ | 3936 | /* HPLL off in SR has some issues on G4x... disable it */ |
3265 | I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | 3937 | I915_WRITE(DSPFW3, |
3938 | (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | ||
3266 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 3939 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
3267 | } | 3940 | } |
3268 | 3941 | ||
3269 | static void i965_update_wm(struct drm_device *dev, int planea_clock, | 3942 | static void i965_update_wm(struct drm_device *dev) |
3270 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
3271 | int pixel_size) | ||
3272 | { | 3943 | { |
3273 | struct drm_i915_private *dev_priv = dev->dev_private; | 3944 | struct drm_i915_private *dev_priv = dev->dev_private; |
3274 | unsigned long line_time_us; | 3945 | struct drm_crtc *crtc; |
3275 | int sr_clock, sr_entries, srwm = 1; | 3946 | int srwm = 1; |
3276 | int cursor_sr = 16; | 3947 | int cursor_sr = 16; |
3277 | 3948 | ||
3278 | /* Calc sr entries for one plane configs */ | 3949 | /* Calc sr entries for one plane configs */ |
3279 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 3950 | crtc = single_enabled_crtc(dev); |
3951 | if (crtc) { | ||
3280 | /* self-refresh has much higher latency */ | 3952 | /* self-refresh has much higher latency */ |
3281 | static const int sr_latency_ns = 12000; | 3953 | static const int sr_latency_ns = 12000; |
3954 | int clock = crtc->mode.clock; | ||
3955 | int htotal = crtc->mode.htotal; | ||
3956 | int hdisplay = crtc->mode.hdisplay; | ||
3957 | int pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3958 | unsigned long line_time_us; | ||
3959 | int entries; | ||
3282 | 3960 | ||
3283 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3961 | line_time_us = ((htotal * 1000) / clock); |
3284 | line_time_us = ((sr_htotal * 1000) / sr_clock); | ||
3285 | 3962 | ||
3286 | /* Use ns/us then divide to preserve precision */ | 3963 | /* Use ns/us then divide to preserve precision */ |
3287 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3964 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3288 | pixel_size * sr_hdisplay; | 3965 | pixel_size * hdisplay; |
3289 | sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); | 3966 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); |
3290 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 3967 | srwm = I965_FIFO_SIZE - entries; |
3291 | srwm = I965_FIFO_SIZE - sr_entries; | ||
3292 | if (srwm < 0) | 3968 | if (srwm < 0) |
3293 | srwm = 1; | 3969 | srwm = 1; |
3294 | srwm &= 0x1ff; | 3970 | srwm &= 0x1ff; |
3971 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", | ||
3972 | entries, srwm); | ||
3295 | 3973 | ||
3296 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 3974 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3297 | pixel_size * 64; | 3975 | pixel_size * 64; |
3298 | sr_entries = DIV_ROUND_UP(sr_entries, | 3976 | entries = DIV_ROUND_UP(entries, |
3299 | i965_cursor_wm_info.cacheline_size); | 3977 | i965_cursor_wm_info.cacheline_size); |
3300 | cursor_sr = i965_cursor_wm_info.fifo_size - | 3978 | cursor_sr = i965_cursor_wm_info.fifo_size - |
3301 | (sr_entries + i965_cursor_wm_info.guard_size); | 3979 | (entries + i965_cursor_wm_info.guard_size); |
3302 | 3980 | ||
3303 | if (cursor_sr > i965_cursor_wm_info.max_wm) | 3981 | if (cursor_sr > i965_cursor_wm_info.max_wm) |
3304 | cursor_sr = i965_cursor_wm_info.max_wm; | 3982 | cursor_sr = i965_cursor_wm_info.max_wm; |
@@ -3319,46 +3997,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
3319 | srwm); | 3997 | srwm); |
3320 | 3998 | ||
3321 | /* 965 has limitations... */ | 3999 | /* 965 has limitations... */ |
3322 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | 4000 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | |
3323 | (8 << 0)); | 4001 | (8 << 16) | (8 << 8) | (8 << 0)); |
3324 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | 4002 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
3325 | /* update cursor SR watermark */ | 4003 | /* update cursor SR watermark */ |
3326 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 4004 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
3327 | } | 4005 | } |
3328 | 4006 | ||
3329 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | 4007 | static void i9xx_update_wm(struct drm_device *dev) |
3330 | int planeb_clock, int sr_hdisplay, int sr_htotal, | ||
3331 | int pixel_size) | ||
3332 | { | 4008 | { |
3333 | struct drm_i915_private *dev_priv = dev->dev_private; | 4009 | struct drm_i915_private *dev_priv = dev->dev_private; |
4010 | const struct intel_watermark_params *wm_info; | ||
3334 | uint32_t fwater_lo; | 4011 | uint32_t fwater_lo; |
3335 | uint32_t fwater_hi; | 4012 | uint32_t fwater_hi; |
3336 | int total_size, cacheline_size, cwm, srwm = 1; | 4013 | int cwm, srwm = 1; |
4014 | int fifo_size; | ||
3337 | int planea_wm, planeb_wm; | 4015 | int planea_wm, planeb_wm; |
3338 | struct intel_watermark_params planea_params, planeb_params; | 4016 | struct drm_crtc *crtc, *enabled = NULL; |
3339 | unsigned long line_time_us; | ||
3340 | int sr_clock, sr_entries = 0; | ||
3341 | 4017 | ||
3342 | /* Create copies of the base settings for each pipe */ | 4018 | if (IS_I945GM(dev)) |
3343 | if (IS_CRESTLINE(dev) || IS_I945GM(dev)) | 4019 | wm_info = &i945_wm_info; |
3344 | planea_params = planeb_params = i945_wm_info; | ||
3345 | else if (!IS_GEN2(dev)) | 4020 | else if (!IS_GEN2(dev)) |
3346 | planea_params = planeb_params = i915_wm_info; | 4021 | wm_info = &i915_wm_info; |
3347 | else | 4022 | else |
3348 | planea_params = planeb_params = i855_wm_info; | 4023 | wm_info = &i855_wm_info; |
3349 | 4024 | ||
3350 | /* Grab a couple of global values before we overwrite them */ | 4025 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
3351 | total_size = planea_params.fifo_size; | 4026 | crtc = intel_get_crtc_for_plane(dev, 0); |
3352 | cacheline_size = planea_params.cacheline_size; | 4027 | if (crtc->enabled && crtc->fb) { |
3353 | 4028 | planea_wm = intel_calculate_wm(crtc->mode.clock, | |
3354 | /* Update per-plane FIFO sizes */ | 4029 | wm_info, fifo_size, |
3355 | planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 4030 | crtc->fb->bits_per_pixel / 8, |
3356 | planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); | 4031 | latency_ns); |
4032 | enabled = crtc; | ||
4033 | } else | ||
4034 | planea_wm = fifo_size - wm_info->guard_size; | ||
4035 | |||
4036 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | ||
4037 | crtc = intel_get_crtc_for_plane(dev, 1); | ||
4038 | if (crtc->enabled && crtc->fb) { | ||
4039 | planeb_wm = intel_calculate_wm(crtc->mode.clock, | ||
4040 | wm_info, fifo_size, | ||
4041 | crtc->fb->bits_per_pixel / 8, | ||
4042 | latency_ns); | ||
4043 | if (enabled == NULL) | ||
4044 | enabled = crtc; | ||
4045 | else | ||
4046 | enabled = NULL; | ||
4047 | } else | ||
4048 | planeb_wm = fifo_size - wm_info->guard_size; | ||
3357 | 4049 | ||
3358 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | ||
3359 | pixel_size, latency_ns); | ||
3360 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, | ||
3361 | pixel_size, latency_ns); | ||
3362 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | 4050 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
3363 | 4051 | ||
3364 | /* | 4052 | /* |
@@ -3366,39 +4054,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3366 | */ | 4054 | */ |
3367 | cwm = 2; | 4055 | cwm = 2; |
3368 | 4056 | ||
4057 | /* Play safe and disable self-refresh before adjusting watermarks. */ | ||
4058 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
4059 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | ||
4060 | else if (IS_I915GM(dev)) | ||
4061 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
4062 | |||
3369 | /* Calc sr entries for one plane configs */ | 4063 | /* Calc sr entries for one plane configs */ |
3370 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 4064 | if (HAS_FW_BLC(dev) && enabled) { |
3371 | (!planea_clock || !planeb_clock)) { | ||
3372 | /* self-refresh has much higher latency */ | 4065 | /* self-refresh has much higher latency */ |
3373 | static const int sr_latency_ns = 6000; | 4066 | static const int sr_latency_ns = 6000; |
4067 | int clock = enabled->mode.clock; | ||
4068 | int htotal = enabled->mode.htotal; | ||
4069 | int hdisplay = enabled->mode.hdisplay; | ||
4070 | int pixel_size = enabled->fb->bits_per_pixel / 8; | ||
4071 | unsigned long line_time_us; | ||
4072 | int entries; | ||
3374 | 4073 | ||
3375 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 4074 | line_time_us = (htotal * 1000) / clock; |
3376 | line_time_us = ((sr_htotal * 1000) / sr_clock); | ||
3377 | 4075 | ||
3378 | /* Use ns/us then divide to preserve precision */ | 4076 | /* Use ns/us then divide to preserve precision */ |
3379 | sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 4077 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
3380 | pixel_size * sr_hdisplay; | 4078 | pixel_size * hdisplay; |
3381 | sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); | 4079 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); |
3382 | DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); | 4080 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); |
3383 | srwm = total_size - sr_entries; | 4081 | srwm = wm_info->fifo_size - entries; |
3384 | if (srwm < 0) | 4082 | if (srwm < 0) |
3385 | srwm = 1; | 4083 | srwm = 1; |
3386 | 4084 | ||
3387 | if (IS_I945G(dev) || IS_I945GM(dev)) | 4085 | if (IS_I945G(dev) || IS_I945GM(dev)) |
3388 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | 4086 | I915_WRITE(FW_BLC_SELF, |
3389 | else if (IS_I915GM(dev)) { | 4087 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); |
3390 | /* 915M has a smaller SRWM field */ | 4088 | else if (IS_I915GM(dev)) |
3391 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); | 4089 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
3392 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
3393 | } | ||
3394 | } else { | ||
3395 | /* Turn off self refresh if both pipes are enabled */ | ||
3396 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
3397 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
3398 | & ~FW_BLC_SELF_EN); | ||
3399 | } else if (IS_I915GM(dev)) { | ||
3400 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
3401 | } | ||
3402 | } | 4090 | } |
3403 | 4091 | ||
3404 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 4092 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
@@ -3413,19 +4101,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
3413 | 4101 | ||
3414 | I915_WRITE(FW_BLC, fwater_lo); | 4102 | I915_WRITE(FW_BLC, fwater_lo); |
3415 | I915_WRITE(FW_BLC2, fwater_hi); | 4103 | I915_WRITE(FW_BLC2, fwater_hi); |
4104 | |||
4105 | if (HAS_FW_BLC(dev)) { | ||
4106 | if (enabled) { | ||
4107 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
4108 | I915_WRITE(FW_BLC_SELF, | ||
4109 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
4110 | else if (IS_I915GM(dev)) | ||
4111 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
4112 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | ||
4113 | } else | ||
4114 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | ||
4115 | } | ||
3416 | } | 4116 | } |
3417 | 4117 | ||
3418 | static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | 4118 | static void i830_update_wm(struct drm_device *dev) |
3419 | int unused2, int unused3, int pixel_size) | ||
3420 | { | 4119 | { |
3421 | struct drm_i915_private *dev_priv = dev->dev_private; | 4120 | struct drm_i915_private *dev_priv = dev->dev_private; |
3422 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 4121 | struct drm_crtc *crtc; |
4122 | uint32_t fwater_lo; | ||
3423 | int planea_wm; | 4123 | int planea_wm; |
3424 | 4124 | ||
3425 | i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 4125 | crtc = single_enabled_crtc(dev); |
4126 | if (crtc == NULL) | ||
4127 | return; | ||
3426 | 4128 | ||
3427 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | 4129 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
3428 | pixel_size, latency_ns); | 4130 | dev_priv->display.get_fifo_size(dev, 0), |
4131 | crtc->fb->bits_per_pixel / 8, | ||
4132 | latency_ns); | ||
4133 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | ||
3429 | fwater_lo |= (3<<8) | planea_wm; | 4134 | fwater_lo |= (3<<8) | planea_wm; |
3430 | 4135 | ||
3431 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); | 4136 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
@@ -3534,15 +4239,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level, | |||
3534 | /* | 4239 | /* |
3535 | * Compute watermark values of WM[1-3], | 4240 | * Compute watermark values of WM[1-3], |
3536 | */ | 4241 | */ |
3537 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, | 4242 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, |
3538 | int hdisplay, int htotal, | 4243 | int latency_ns, |
3539 | int pixel_size, int clock, int latency_ns, | ||
3540 | const struct intel_watermark_params *display, | 4244 | const struct intel_watermark_params *display, |
3541 | const struct intel_watermark_params *cursor, | 4245 | const struct intel_watermark_params *cursor, |
3542 | int *fbc_wm, int *display_wm, int *cursor_wm) | 4246 | int *fbc_wm, int *display_wm, int *cursor_wm) |
3543 | { | 4247 | { |
3544 | 4248 | struct drm_crtc *crtc; | |
3545 | unsigned long line_time_us; | 4249 | unsigned long line_time_us; |
4250 | int hdisplay, htotal, pixel_size, clock; | ||
3546 | int line_count, line_size; | 4251 | int line_count, line_size; |
3547 | int small, large; | 4252 | int small, large; |
3548 | int entries; | 4253 | int entries; |
@@ -3552,6 +4257,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, | |||
3552 | return false; | 4257 | return false; |
3553 | } | 4258 | } |
3554 | 4259 | ||
4260 | crtc = intel_get_crtc_for_plane(dev, plane); | ||
4261 | hdisplay = crtc->mode.hdisplay; | ||
4262 | htotal = crtc->mode.htotal; | ||
4263 | clock = crtc->mode.clock; | ||
4264 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
4265 | |||
3555 | line_time_us = (htotal * 1000) / clock; | 4266 | line_time_us = (htotal * 1000) / clock; |
3556 | line_count = (latency_ns / line_time_us + 1000) / 1000; | 4267 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
3557 | line_size = hdisplay * pixel_size; | 4268 | line_size = hdisplay * pixel_size; |
@@ -3579,14 +4290,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, | |||
3579 | display, cursor); | 4290 | display, cursor); |
3580 | } | 4291 | } |
3581 | 4292 | ||
3582 | static void ironlake_update_wm(struct drm_device *dev, | 4293 | static void ironlake_update_wm(struct drm_device *dev) |
3583 | int planea_clock, int planeb_clock, | ||
3584 | int hdisplay, int htotal, | ||
3585 | int pixel_size) | ||
3586 | { | 4294 | { |
3587 | struct drm_i915_private *dev_priv = dev->dev_private; | 4295 | struct drm_i915_private *dev_priv = dev->dev_private; |
3588 | int fbc_wm, plane_wm, cursor_wm, enabled; | 4296 | int fbc_wm, plane_wm, cursor_wm; |
3589 | int clock; | 4297 | unsigned int enabled; |
3590 | 4298 | ||
3591 | enabled = 0; | 4299 | enabled = 0; |
3592 | if (ironlake_compute_wm0(dev, 0, | 4300 | if (ironlake_compute_wm0(dev, 0, |
@@ -3600,7 +4308,7 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3600 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 4308 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
3601 | " plane %d, " "cursor: %d\n", | 4309 | " plane %d, " "cursor: %d\n", |
3602 | plane_wm, cursor_wm); | 4310 | plane_wm, cursor_wm); |
3603 | enabled++; | 4311 | enabled |= 1; |
3604 | } | 4312 | } |
3605 | 4313 | ||
3606 | if (ironlake_compute_wm0(dev, 1, | 4314 | if (ironlake_compute_wm0(dev, 1, |
@@ -3614,7 +4322,7 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3614 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 4322 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
3615 | " plane %d, cursor: %d\n", | 4323 | " plane %d, cursor: %d\n", |
3616 | plane_wm, cursor_wm); | 4324 | plane_wm, cursor_wm); |
3617 | enabled++; | 4325 | enabled |= 2; |
3618 | } | 4326 | } |
3619 | 4327 | ||
3620 | /* | 4328 | /* |
@@ -3625,14 +4333,13 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3625 | I915_WRITE(WM2_LP_ILK, 0); | 4333 | I915_WRITE(WM2_LP_ILK, 0); |
3626 | I915_WRITE(WM1_LP_ILK, 0); | 4334 | I915_WRITE(WM1_LP_ILK, 0); |
3627 | 4335 | ||
3628 | if (enabled != 1) | 4336 | if (!single_plane_enabled(enabled)) |
3629 | return; | 4337 | return; |
3630 | 4338 | enabled = ffs(enabled) - 1; | |
3631 | clock = planea_clock ? planea_clock : planeb_clock; | ||
3632 | 4339 | ||
3633 | /* WM1 */ | 4340 | /* WM1 */ |
3634 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, | 4341 | if (!ironlake_compute_srwm(dev, 1, enabled, |
3635 | clock, ILK_READ_WM1_LATENCY() * 500, | 4342 | ILK_READ_WM1_LATENCY() * 500, |
3636 | &ironlake_display_srwm_info, | 4343 | &ironlake_display_srwm_info, |
3637 | &ironlake_cursor_srwm_info, | 4344 | &ironlake_cursor_srwm_info, |
3638 | &fbc_wm, &plane_wm, &cursor_wm)) | 4345 | &fbc_wm, &plane_wm, &cursor_wm)) |
@@ -3646,8 +4353,8 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3646 | cursor_wm); | 4353 | cursor_wm); |
3647 | 4354 | ||
3648 | /* WM2 */ | 4355 | /* WM2 */ |
3649 | if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, | 4356 | if (!ironlake_compute_srwm(dev, 2, enabled, |
3650 | clock, ILK_READ_WM2_LATENCY() * 500, | 4357 | ILK_READ_WM2_LATENCY() * 500, |
3651 | &ironlake_display_srwm_info, | 4358 | &ironlake_display_srwm_info, |
3652 | &ironlake_cursor_srwm_info, | 4359 | &ironlake_cursor_srwm_info, |
3653 | &fbc_wm, &plane_wm, &cursor_wm)) | 4360 | &fbc_wm, &plane_wm, &cursor_wm)) |
@@ -3666,15 +4373,12 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3666 | */ | 4373 | */ |
3667 | } | 4374 | } |
3668 | 4375 | ||
3669 | static void sandybridge_update_wm(struct drm_device *dev, | 4376 | static void sandybridge_update_wm(struct drm_device *dev) |
3670 | int planea_clock, int planeb_clock, | ||
3671 | int hdisplay, int htotal, | ||
3672 | int pixel_size) | ||
3673 | { | 4377 | { |
3674 | struct drm_i915_private *dev_priv = dev->dev_private; | 4378 | struct drm_i915_private *dev_priv = dev->dev_private; |
3675 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | 4379 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
3676 | int fbc_wm, plane_wm, cursor_wm, enabled; | 4380 | int fbc_wm, plane_wm, cursor_wm; |
3677 | int clock; | 4381 | unsigned int enabled; |
3678 | 4382 | ||
3679 | enabled = 0; | 4383 | enabled = 0; |
3680 | if (ironlake_compute_wm0(dev, 0, | 4384 | if (ironlake_compute_wm0(dev, 0, |
@@ -3686,7 +4390,7 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3686 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 4390 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
3687 | " plane %d, " "cursor: %d\n", | 4391 | " plane %d, " "cursor: %d\n", |
3688 | plane_wm, cursor_wm); | 4392 | plane_wm, cursor_wm); |
3689 | enabled++; | 4393 | enabled |= 1; |
3690 | } | 4394 | } |
3691 | 4395 | ||
3692 | if (ironlake_compute_wm0(dev, 1, | 4396 | if (ironlake_compute_wm0(dev, 1, |
@@ -3698,7 +4402,7 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3698 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 4402 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
3699 | " plane %d, cursor: %d\n", | 4403 | " plane %d, cursor: %d\n", |
3700 | plane_wm, cursor_wm); | 4404 | plane_wm, cursor_wm); |
3701 | enabled++; | 4405 | enabled |= 2; |
3702 | } | 4406 | } |
3703 | 4407 | ||
3704 | /* | 4408 | /* |
@@ -3715,14 +4419,13 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3715 | I915_WRITE(WM2_LP_ILK, 0); | 4419 | I915_WRITE(WM2_LP_ILK, 0); |
3716 | I915_WRITE(WM1_LP_ILK, 0); | 4420 | I915_WRITE(WM1_LP_ILK, 0); |
3717 | 4421 | ||
3718 | if (enabled != 1) | 4422 | if (!single_plane_enabled(enabled)) |
3719 | return; | 4423 | return; |
3720 | 4424 | enabled = ffs(enabled) - 1; | |
3721 | clock = planea_clock ? planea_clock : planeb_clock; | ||
3722 | 4425 | ||
3723 | /* WM1 */ | 4426 | /* WM1 */ |
3724 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, | 4427 | if (!ironlake_compute_srwm(dev, 1, enabled, |
3725 | clock, SNB_READ_WM1_LATENCY() * 500, | 4428 | SNB_READ_WM1_LATENCY() * 500, |
3726 | &sandybridge_display_srwm_info, | 4429 | &sandybridge_display_srwm_info, |
3727 | &sandybridge_cursor_srwm_info, | 4430 | &sandybridge_cursor_srwm_info, |
3728 | &fbc_wm, &plane_wm, &cursor_wm)) | 4431 | &fbc_wm, &plane_wm, &cursor_wm)) |
@@ -3736,9 +4439,8 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3736 | cursor_wm); | 4439 | cursor_wm); |
3737 | 4440 | ||
3738 | /* WM2 */ | 4441 | /* WM2 */ |
3739 | if (!ironlake_compute_srwm(dev, 2, | 4442 | if (!ironlake_compute_srwm(dev, 2, enabled, |
3740 | hdisplay, htotal, pixel_size, | 4443 | SNB_READ_WM2_LATENCY() * 500, |
3741 | clock, SNB_READ_WM2_LATENCY() * 500, | ||
3742 | &sandybridge_display_srwm_info, | 4444 | &sandybridge_display_srwm_info, |
3743 | &sandybridge_cursor_srwm_info, | 4445 | &sandybridge_cursor_srwm_info, |
3744 | &fbc_wm, &plane_wm, &cursor_wm)) | 4446 | &fbc_wm, &plane_wm, &cursor_wm)) |
@@ -3752,9 +4454,8 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3752 | cursor_wm); | 4454 | cursor_wm); |
3753 | 4455 | ||
3754 | /* WM3 */ | 4456 | /* WM3 */ |
3755 | if (!ironlake_compute_srwm(dev, 3, | 4457 | if (!ironlake_compute_srwm(dev, 3, enabled, |
3756 | hdisplay, htotal, pixel_size, | 4458 | SNB_READ_WM3_LATENCY() * 500, |
3757 | clock, SNB_READ_WM3_LATENCY() * 500, | ||
3758 | &sandybridge_display_srwm_info, | 4459 | &sandybridge_display_srwm_info, |
3759 | &sandybridge_cursor_srwm_info, | 4460 | &sandybridge_cursor_srwm_info, |
3760 | &fbc_wm, &plane_wm, &cursor_wm)) | 4461 | &fbc_wm, &plane_wm, &cursor_wm)) |
@@ -3803,44 +4504,9 @@ static void sandybridge_update_wm(struct drm_device *dev, | |||
3803 | static void intel_update_watermarks(struct drm_device *dev) | 4504 | static void intel_update_watermarks(struct drm_device *dev) |
3804 | { | 4505 | { |
3805 | struct drm_i915_private *dev_priv = dev->dev_private; | 4506 | struct drm_i915_private *dev_priv = dev->dev_private; |
3806 | struct drm_crtc *crtc; | ||
3807 | int sr_hdisplay = 0; | ||
3808 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | ||
3809 | int enabled = 0, pixel_size = 0; | ||
3810 | int sr_htotal = 0; | ||
3811 | |||
3812 | if (!dev_priv->display.update_wm) | ||
3813 | return; | ||
3814 | |||
3815 | /* Get the clock config from both planes */ | ||
3816 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
3817 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3818 | if (intel_crtc->active) { | ||
3819 | enabled++; | ||
3820 | if (intel_crtc->plane == 0) { | ||
3821 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", | ||
3822 | intel_crtc->pipe, crtc->mode.clock); | ||
3823 | planea_clock = crtc->mode.clock; | ||
3824 | } else { | ||
3825 | DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", | ||
3826 | intel_crtc->pipe, crtc->mode.clock); | ||
3827 | planeb_clock = crtc->mode.clock; | ||
3828 | } | ||
3829 | sr_hdisplay = crtc->mode.hdisplay; | ||
3830 | sr_clock = crtc->mode.clock; | ||
3831 | sr_htotal = crtc->mode.htotal; | ||
3832 | if (crtc->fb) | ||
3833 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
3834 | else | ||
3835 | pixel_size = 4; /* by default */ | ||
3836 | } | ||
3837 | } | ||
3838 | |||
3839 | if (enabled <= 0) | ||
3840 | return; | ||
3841 | 4507 | ||
3842 | dev_priv->display.update_wm(dev, planea_clock, planeb_clock, | 4508 | if (dev_priv->display.update_wm) |
3843 | sr_hdisplay, sr_htotal, pixel_size); | 4509 | dev_priv->display.update_wm(dev); |
3844 | } | 4510 | } |
3845 | 4511 | ||
3846 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | 4512 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
@@ -3872,6 +4538,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3872 | int ret; | 4538 | int ret; |
3873 | struct fdi_m_n m_n = {0}; | 4539 | struct fdi_m_n m_n = {0}; |
3874 | u32 reg, temp; | 4540 | u32 reg, temp; |
4541 | u32 lvds_sync = 0; | ||
3875 | int target_clock; | 4542 | int target_clock; |
3876 | 4543 | ||
3877 | drm_vblank_pre_modeset(dev, pipe); | 4544 | drm_vblank_pre_modeset(dev, pipe); |
@@ -4243,9 +4910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4243 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; | 4910 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
4244 | } | 4911 | } |
4245 | 4912 | ||
4246 | dspcntr |= DISPLAY_PLANE_ENABLE; | 4913 | if (!HAS_PCH_SPLIT(dev)) |
4247 | pipeconf |= PIPECONF_ENABLE; | 4914 | dpll |= DPLL_VCO_ENABLE; |
4248 | dpll |= DPLL_VCO_ENABLE; | ||
4249 | 4915 | ||
4250 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 4916 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
4251 | drm_mode_debug_printmodeline(mode); | 4917 | drm_mode_debug_printmodeline(mode); |
@@ -4271,10 +4937,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4271 | /* enable transcoder DPLL */ | 4937 | /* enable transcoder DPLL */ |
4272 | if (HAS_PCH_CPT(dev)) { | 4938 | if (HAS_PCH_CPT(dev)) { |
4273 | temp = I915_READ(PCH_DPLL_SEL); | 4939 | temp = I915_READ(PCH_DPLL_SEL); |
4274 | if (pipe == 0) | 4940 | switch (pipe) { |
4941 | case 0: | ||
4275 | temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; | 4942 | temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; |
4276 | else | 4943 | break; |
4944 | case 1: | ||
4277 | temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; | 4945 | temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; |
4946 | break; | ||
4947 | case 2: | ||
4948 | /* FIXME: manage transcoder PLLs? */ | ||
4949 | temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; | ||
4950 | break; | ||
4951 | default: | ||
4952 | BUG(); | ||
4953 | } | ||
4278 | I915_WRITE(PCH_DPLL_SEL, temp); | 4954 | I915_WRITE(PCH_DPLL_SEL, temp); |
4279 | 4955 | ||
4280 | POSTING_READ(PCH_DPLL_SEL); | 4956 | POSTING_READ(PCH_DPLL_SEL); |
@@ -4324,6 +5000,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4324 | else | 5000 | else |
4325 | temp &= ~LVDS_ENABLE_DITHER; | 5001 | temp &= ~LVDS_ENABLE_DITHER; |
4326 | } | 5002 | } |
5003 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
5004 | lvds_sync |= LVDS_HSYNC_POLARITY; | ||
5005 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
5006 | lvds_sync |= LVDS_VSYNC_POLARITY; | ||
5007 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) | ||
5008 | != lvds_sync) { | ||
5009 | char flags[2] = "-+"; | ||
5010 | DRM_INFO("Changing LVDS panel from " | ||
5011 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", | ||
5012 | flags[!(temp & LVDS_HSYNC_POLARITY)], | ||
5013 | flags[!(temp & LVDS_VSYNC_POLARITY)], | ||
5014 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], | ||
5015 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); | ||
5016 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | ||
5017 | temp |= lvds_sync; | ||
5018 | } | ||
4327 | I915_WRITE(reg, temp); | 5019 | I915_WRITE(reg, temp); |
4328 | } | 5020 | } |
4329 | 5021 | ||
@@ -4341,17 +5033,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4341 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 5033 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4342 | } else if (HAS_PCH_SPLIT(dev)) { | 5034 | } else if (HAS_PCH_SPLIT(dev)) { |
4343 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | 5035 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
4344 | if (pipe == 0) { | 5036 | I915_WRITE(TRANSDATA_M1(pipe), 0); |
4345 | I915_WRITE(TRANSA_DATA_M1, 0); | 5037 | I915_WRITE(TRANSDATA_N1(pipe), 0); |
4346 | I915_WRITE(TRANSA_DATA_N1, 0); | 5038 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
4347 | I915_WRITE(TRANSA_DP_LINK_M1, 0); | 5039 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
4348 | I915_WRITE(TRANSA_DP_LINK_N1, 0); | ||
4349 | } else { | ||
4350 | I915_WRITE(TRANSB_DATA_M1, 0); | ||
4351 | I915_WRITE(TRANSB_DATA_N1, 0); | ||
4352 | I915_WRITE(TRANSB_DP_LINK_M1, 0); | ||
4353 | I915_WRITE(TRANSB_DP_LINK_N1, 0); | ||
4354 | } | ||
4355 | } | 5040 | } |
4356 | 5041 | ||
4357 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5042 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
@@ -4454,6 +5139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4454 | 5139 | ||
4455 | I915_WRITE(PIPECONF(pipe), pipeconf); | 5140 | I915_WRITE(PIPECONF(pipe), pipeconf); |
4456 | POSTING_READ(PIPECONF(pipe)); | 5141 | POSTING_READ(PIPECONF(pipe)); |
5142 | if (!HAS_PCH_SPLIT(dev)) | ||
5143 | intel_enable_pipe(dev_priv, pipe, false); | ||
4457 | 5144 | ||
4458 | intel_wait_for_vblank(dev, pipe); | 5145 | intel_wait_for_vblank(dev, pipe); |
4459 | 5146 | ||
@@ -4464,6 +5151,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4464 | } | 5151 | } |
4465 | 5152 | ||
4466 | I915_WRITE(DSPCNTR(plane), dspcntr); | 5153 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5154 | POSTING_READ(DSPCNTR(plane)); | ||
5155 | if (!HAS_PCH_SPLIT(dev)) | ||
5156 | intel_enable_plane(dev_priv, plane, pipe); | ||
4467 | 5157 | ||
4468 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 5158 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
4469 | 5159 | ||
@@ -4480,7 +5170,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4480 | struct drm_device *dev = crtc->dev; | 5170 | struct drm_device *dev = crtc->dev; |
4481 | struct drm_i915_private *dev_priv = dev->dev_private; | 5171 | struct drm_i915_private *dev_priv = dev->dev_private; |
4482 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5172 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4483 | int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; | 5173 | int palreg = PALETTE(intel_crtc->pipe); |
4484 | int i; | 5174 | int i; |
4485 | 5175 | ||
4486 | /* The clocks have to be on to load the palette. */ | 5176 | /* The clocks have to be on to load the palette. */ |
@@ -4489,8 +5179,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4489 | 5179 | ||
4490 | /* use legacy palette for Ironlake */ | 5180 | /* use legacy palette for Ironlake */ |
4491 | if (HAS_PCH_SPLIT(dev)) | 5181 | if (HAS_PCH_SPLIT(dev)) |
4492 | palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : | 5182 | palreg = LGC_PALETTE(intel_crtc->pipe); |
4493 | LGC_PALETTE_B; | ||
4494 | 5183 | ||
4495 | for (i = 0; i < 256; i++) { | 5184 | for (i = 0; i < 256; i++) { |
4496 | I915_WRITE(palreg + 4 * i, | 5185 | I915_WRITE(palreg + 4 * i, |
@@ -4511,12 +5200,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4511 | if (intel_crtc->cursor_visible == visible) | 5200 | if (intel_crtc->cursor_visible == visible) |
4512 | return; | 5201 | return; |
4513 | 5202 | ||
4514 | cntl = I915_READ(CURACNTR); | 5203 | cntl = I915_READ(_CURACNTR); |
4515 | if (visible) { | 5204 | if (visible) { |
4516 | /* On these chipsets we can only modify the base whilst | 5205 | /* On these chipsets we can only modify the base whilst |
4517 | * the cursor is disabled. | 5206 | * the cursor is disabled. |
4518 | */ | 5207 | */ |
4519 | I915_WRITE(CURABASE, base); | 5208 | I915_WRITE(_CURABASE, base); |
4520 | 5209 | ||
4521 | cntl &= ~(CURSOR_FORMAT_MASK); | 5210 | cntl &= ~(CURSOR_FORMAT_MASK); |
4522 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ | 5211 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ |
@@ -4525,7 +5214,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4525 | CURSOR_FORMAT_ARGB; | 5214 | CURSOR_FORMAT_ARGB; |
4526 | } else | 5215 | } else |
4527 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | 5216 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); |
4528 | I915_WRITE(CURACNTR, cntl); | 5217 | I915_WRITE(_CURACNTR, cntl); |
4529 | 5218 | ||
4530 | intel_crtc->cursor_visible = visible; | 5219 | intel_crtc->cursor_visible = visible; |
4531 | } | 5220 | } |
@@ -4539,7 +5228,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4539 | bool visible = base != 0; | 5228 | bool visible = base != 0; |
4540 | 5229 | ||
4541 | if (intel_crtc->cursor_visible != visible) { | 5230 | if (intel_crtc->cursor_visible != visible) { |
4542 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | 5231 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
4543 | if (base) { | 5232 | if (base) { |
4544 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | 5233 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
4545 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | 5234 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
@@ -4548,12 +5237,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
4548 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | 5237 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
4549 | cntl |= CURSOR_MODE_DISABLE; | 5238 | cntl |= CURSOR_MODE_DISABLE; |
4550 | } | 5239 | } |
4551 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | 5240 | I915_WRITE(CURCNTR(pipe), cntl); |
4552 | 5241 | ||
4553 | intel_crtc->cursor_visible = visible; | 5242 | intel_crtc->cursor_visible = visible; |
4554 | } | 5243 | } |
4555 | /* and commit changes on next vblank */ | 5244 | /* and commit changes on next vblank */ |
4556 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | 5245 | I915_WRITE(CURBASE(pipe), base); |
4557 | } | 5246 | } |
4558 | 5247 | ||
4559 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 5248 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
@@ -4603,7 +5292,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
4603 | if (!visible && !intel_crtc->cursor_visible) | 5292 | if (!visible && !intel_crtc->cursor_visible) |
4604 | return; | 5293 | return; |
4605 | 5294 | ||
4606 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); | 5295 | I915_WRITE(CURPOS(pipe), pos); |
4607 | if (IS_845G(dev) || IS_I865G(dev)) | 5296 | if (IS_845G(dev) || IS_I865G(dev)) |
4608 | i845_update_cursor(crtc, base); | 5297 | i845_update_cursor(crtc, base); |
4609 | else | 5298 | else |
@@ -4643,7 +5332,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4643 | } | 5332 | } |
4644 | 5333 | ||
4645 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | 5334 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
4646 | if (!obj) | 5335 | if (&obj->base == NULL) |
4647 | return -ENOENT; | 5336 | return -ENOENT; |
4648 | 5337 | ||
4649 | if (obj->base.size < width * height * 4) { | 5338 | if (obj->base.size < width * height * 4) { |
@@ -4909,14 +5598,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
4909 | struct drm_i915_private *dev_priv = dev->dev_private; | 5598 | struct drm_i915_private *dev_priv = dev->dev_private; |
4910 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5599 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4911 | int pipe = intel_crtc->pipe; | 5600 | int pipe = intel_crtc->pipe; |
4912 | u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); | 5601 | u32 dpll = I915_READ(DPLL(pipe)); |
4913 | u32 fp; | 5602 | u32 fp; |
4914 | intel_clock_t clock; | 5603 | intel_clock_t clock; |
4915 | 5604 | ||
4916 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | 5605 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
4917 | fp = I915_READ((pipe == 0) ? FPA0 : FPB0); | 5606 | fp = FP0(pipe); |
4918 | else | 5607 | else |
4919 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | 5608 | fp = FP1(pipe); |
4920 | 5609 | ||
4921 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 5610 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
4922 | if (IS_PINEVIEW(dev)) { | 5611 | if (IS_PINEVIEW(dev)) { |
@@ -4998,10 +5687,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
4998 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5687 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4999 | int pipe = intel_crtc->pipe; | 5688 | int pipe = intel_crtc->pipe; |
5000 | struct drm_display_mode *mode; | 5689 | struct drm_display_mode *mode; |
5001 | int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); | 5690 | int htot = I915_READ(HTOTAL(pipe)); |
5002 | int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); | 5691 | int hsync = I915_READ(HSYNC(pipe)); |
5003 | int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); | 5692 | int vtot = I915_READ(VTOTAL(pipe)); |
5004 | int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); | 5693 | int vsync = I915_READ(VSYNC(pipe)); |
5005 | 5694 | ||
5006 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 5695 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
5007 | if (!mode) | 5696 | if (!mode) |
@@ -5110,7 +5799,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
5110 | drm_i915_private_t *dev_priv = dev->dev_private; | 5799 | drm_i915_private_t *dev_priv = dev->dev_private; |
5111 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5800 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5112 | int pipe = intel_crtc->pipe; | 5801 | int pipe = intel_crtc->pipe; |
5113 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 5802 | int dpll_reg = DPLL(pipe); |
5114 | int dpll = I915_READ(dpll_reg); | 5803 | int dpll = I915_READ(dpll_reg); |
5115 | 5804 | ||
5116 | if (HAS_PCH_SPLIT(dev)) | 5805 | if (HAS_PCH_SPLIT(dev)) |
@@ -5158,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work) | |||
5158 | struct drm_device *dev = dev_priv->dev; | 5847 | struct drm_device *dev = dev_priv->dev; |
5159 | struct drm_crtc *crtc; | 5848 | struct drm_crtc *crtc; |
5160 | struct intel_crtc *intel_crtc; | 5849 | struct intel_crtc *intel_crtc; |
5161 | int enabled = 0; | ||
5162 | 5850 | ||
5163 | if (!i915_powersave) | 5851 | if (!i915_powersave) |
5164 | return; | 5852 | return; |
@@ -5172,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work) | |||
5172 | if (!crtc->fb) | 5860 | if (!crtc->fb) |
5173 | continue; | 5861 | continue; |
5174 | 5862 | ||
5175 | enabled++; | ||
5176 | intel_crtc = to_intel_crtc(crtc); | 5863 | intel_crtc = to_intel_crtc(crtc); |
5177 | if (!intel_crtc->busy) | 5864 | if (!intel_crtc->busy) |
5178 | intel_decrease_pllclock(crtc); | 5865 | intel_decrease_pllclock(crtc); |
5179 | } | 5866 | } |
5180 | 5867 | ||
5181 | if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { | ||
5182 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
5183 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
5184 | } | ||
5185 | 5868 | ||
5186 | mutex_unlock(&dev->struct_mutex); | 5869 | mutex_unlock(&dev->struct_mutex); |
5187 | } | 5870 | } |
@@ -5206,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) | |||
5206 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 5889 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
5207 | return; | 5890 | return; |
5208 | 5891 | ||
5209 | if (!dev_priv->busy) { | 5892 | if (!dev_priv->busy) |
5210 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
5211 | u32 fw_blc_self; | ||
5212 | |||
5213 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
5214 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
5215 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
5216 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
5217 | } | ||
5218 | dev_priv->busy = true; | 5893 | dev_priv->busy = true; |
5219 | } else | 5894 | else |
5220 | mod_timer(&dev_priv->idle_timer, jiffies + | 5895 | mod_timer(&dev_priv->idle_timer, jiffies + |
5221 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 5896 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
5222 | 5897 | ||
@@ -5228,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) | |||
5228 | intel_fb = to_intel_framebuffer(crtc->fb); | 5903 | intel_fb = to_intel_framebuffer(crtc->fb); |
5229 | if (intel_fb->obj == obj) { | 5904 | if (intel_fb->obj == obj) { |
5230 | if (!intel_crtc->busy) { | 5905 | if (!intel_crtc->busy) { |
5231 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
5232 | u32 fw_blc_self; | ||
5233 | |||
5234 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
5235 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
5236 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
5237 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
5238 | } | ||
5239 | /* Non-busy -> busy, upclock */ | 5906 | /* Non-busy -> busy, upclock */ |
5240 | intel_increase_pllclock(crtc); | 5907 | intel_increase_pllclock(crtc); |
5241 | intel_crtc->busy = true; | 5908 | intel_crtc->busy = true; |
@@ -5513,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5513 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | 6180 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
5514 | */ | 6181 | */ |
5515 | pf = 0; | 6182 | pf = 0; |
5516 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | 6183 | pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff; |
5517 | OUT_RING(pf | pipesrc); | 6184 | OUT_RING(pf | pipesrc); |
5518 | break; | 6185 | break; |
5519 | 6186 | ||
@@ -5523,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5523 | OUT_RING(fb->pitch | obj->tiling_mode); | 6190 | OUT_RING(fb->pitch | obj->tiling_mode); |
5524 | OUT_RING(obj->gtt_offset); | 6191 | OUT_RING(obj->gtt_offset); |
5525 | 6192 | ||
5526 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | 6193 | pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE; |
5527 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | 6194 | pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff; |
5528 | OUT_RING(pf | pipesrc); | 6195 | OUT_RING(pf | pipesrc); |
5529 | break; | 6196 | break; |
5530 | } | 6197 | } |
@@ -5558,9 +6225,7 @@ static void intel_crtc_reset(struct drm_crtc *crtc) | |||
5558 | /* Reset flags back to the 'unknown' status so that they | 6225 | /* Reset flags back to the 'unknown' status so that they |
5559 | * will be correctly set on the initial modeset. | 6226 | * will be correctly set on the initial modeset. |
5560 | */ | 6227 | */ |
5561 | intel_crtc->cursor_addr = 0; | ||
5562 | intel_crtc->dpms_mode = -1; | 6228 | intel_crtc->dpms_mode = -1; |
5563 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ | ||
5564 | } | 6229 | } |
5565 | 6230 | ||
5566 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | 6231 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
@@ -5615,22 +6280,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev, | |||
5615 | pipe = !pipe; | 6280 | pipe = !pipe; |
5616 | 6281 | ||
5617 | /* Disable the plane and wait for it to stop reading from the pipe. */ | 6282 | /* Disable the plane and wait for it to stop reading from the pipe. */ |
5618 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | 6283 | intel_disable_plane(dev_priv, plane, pipe); |
5619 | intel_flush_display_plane(dev, plane); | 6284 | intel_disable_pipe(dev_priv, pipe); |
5620 | |||
5621 | if (IS_GEN2(dev)) | ||
5622 | intel_wait_for_vblank(dev, pipe); | ||
5623 | |||
5624 | if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
5625 | return; | ||
5626 | |||
5627 | /* Switch off the pipe. */ | ||
5628 | reg = PIPECONF(pipe); | ||
5629 | val = I915_READ(reg); | ||
5630 | if (val & PIPECONF_ENABLE) { | ||
5631 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | ||
5632 | intel_wait_for_pipe_off(dev, pipe); | ||
5633 | } | ||
5634 | } | 6285 | } |
5635 | 6286 | ||
5636 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 6287 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
@@ -5666,6 +6317,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5666 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 6317 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5667 | 6318 | ||
5668 | intel_crtc_reset(&intel_crtc->base); | 6319 | intel_crtc_reset(&intel_crtc->base); |
6320 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ | ||
5669 | 6321 | ||
5670 | if (HAS_PCH_SPLIT(dev)) { | 6322 | if (HAS_PCH_SPLIT(dev)) { |
5671 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | 6323 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
@@ -5919,7 +6571,7 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5919 | int ret; | 6571 | int ret; |
5920 | 6572 | ||
5921 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); | 6573 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
5922 | if (!obj) | 6574 | if (&obj->base == NULL) |
5923 | return ERR_PTR(-ENOENT); | 6575 | return ERR_PTR(-ENOENT); |
5924 | 6576 | ||
5925 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 6577 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
@@ -6204,7 +6856,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6204 | * userspace... | 6856 | * userspace... |
6205 | */ | 6857 | */ |
6206 | I915_WRITE(GEN6_RC_STATE, 0); | 6858 | I915_WRITE(GEN6_RC_STATE, 0); |
6207 | __gen6_force_wake_get(dev_priv); | 6859 | __gen6_gt_force_wake_get(dev_priv); |
6208 | 6860 | ||
6209 | /* disable the counters and set deterministic thresholds */ | 6861 | /* disable the counters and set deterministic thresholds */ |
6210 | I915_WRITE(GEN6_RC_CONTROL, 0); | 6862 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -6241,18 +6893,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6241 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 6893 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
6242 | 18 << 24 | | 6894 | 18 << 24 | |
6243 | 6 << 16); | 6895 | 6 << 16); |
6244 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); | 6896 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); |
6245 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); | 6897 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); |
6246 | I915_WRITE(GEN6_RP_UP_EI, 100000); | 6898 | I915_WRITE(GEN6_RP_UP_EI, 100000); |
6247 | I915_WRITE(GEN6_RP_DOWN_EI, 300000); | 6899 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); |
6248 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | 6900 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); |
6249 | I915_WRITE(GEN6_RP_CONTROL, | 6901 | I915_WRITE(GEN6_RP_CONTROL, |
6250 | GEN6_RP_MEDIA_TURBO | | 6902 | GEN6_RP_MEDIA_TURBO | |
6251 | GEN6_RP_USE_NORMAL_FREQ | | 6903 | GEN6_RP_USE_NORMAL_FREQ | |
6252 | GEN6_RP_MEDIA_IS_GFX | | 6904 | GEN6_RP_MEDIA_IS_GFX | |
6253 | GEN6_RP_ENABLE | | 6905 | GEN6_RP_ENABLE | |
6254 | GEN6_RP_UP_BUSY_MAX | | 6906 | GEN6_RP_UP_BUSY_AVG | |
6255 | GEN6_RP_DOWN_BUSY_MIN); | 6907 | GEN6_RP_DOWN_IDLE_CONT); |
6256 | 6908 | ||
6257 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | 6909 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
6258 | 500)) | 6910 | 500)) |
@@ -6302,12 +6954,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6302 | /* enable all PM interrupts */ | 6954 | /* enable all PM interrupts */ |
6303 | I915_WRITE(GEN6_PMINTRMSK, 0); | 6955 | I915_WRITE(GEN6_PMINTRMSK, 0); |
6304 | 6956 | ||
6305 | __gen6_force_wake_put(dev_priv); | 6957 | __gen6_gt_force_wake_put(dev_priv); |
6306 | } | 6958 | } |
6307 | 6959 | ||
6308 | void intel_enable_clock_gating(struct drm_device *dev) | 6960 | void intel_enable_clock_gating(struct drm_device *dev) |
6309 | { | 6961 | { |
6310 | struct drm_i915_private *dev_priv = dev->dev_private; | 6962 | struct drm_i915_private *dev_priv = dev->dev_private; |
6963 | int pipe; | ||
6311 | 6964 | ||
6312 | /* | 6965 | /* |
6313 | * Disable clock gating reported to work incorrectly according to the | 6966 | * Disable clock gating reported to work incorrectly according to the |
@@ -6417,12 +7070,10 @@ void intel_enable_clock_gating(struct drm_device *dev) | |||
6417 | ILK_DPARB_CLK_GATE | | 7070 | ILK_DPARB_CLK_GATE | |
6418 | ILK_DPFD_CLK_GATE); | 7071 | ILK_DPFD_CLK_GATE); |
6419 | 7072 | ||
6420 | I915_WRITE(DSPACNTR, | 7073 | for_each_pipe(pipe) |
6421 | I915_READ(DSPACNTR) | | 7074 | I915_WRITE(DSPCNTR(pipe), |
6422 | DISPPLANE_TRICKLE_FEED_DISABLE); | 7075 | I915_READ(DSPCNTR(pipe)) | |
6423 | I915_WRITE(DSPBCNTR, | 7076 | DISPPLANE_TRICKLE_FEED_DISABLE); |
6424 | I915_READ(DSPBCNTR) | | ||
6425 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
6426 | } | 7077 | } |
6427 | } else if (IS_G4X(dev)) { | 7078 | } else if (IS_G4X(dev)) { |
6428 | uint32_t dspclk_gate; | 7079 | uint32_t dspclk_gate; |
@@ -6463,52 +7114,60 @@ void intel_enable_clock_gating(struct drm_device *dev) | |||
6463 | } | 7114 | } |
6464 | } | 7115 | } |
6465 | 7116 | ||
6466 | void intel_disable_clock_gating(struct drm_device *dev) | 7117 | static void ironlake_teardown_rc6(struct drm_device *dev) |
6467 | { | 7118 | { |
6468 | struct drm_i915_private *dev_priv = dev->dev_private; | 7119 | struct drm_i915_private *dev_priv = dev->dev_private; |
6469 | 7120 | ||
6470 | if (dev_priv->renderctx) { | 7121 | if (dev_priv->renderctx) { |
6471 | struct drm_i915_gem_object *obj = dev_priv->renderctx; | 7122 | i915_gem_object_unpin(dev_priv->renderctx); |
6472 | 7123 | drm_gem_object_unreference(&dev_priv->renderctx->base); | |
6473 | I915_WRITE(CCID, 0); | ||
6474 | POSTING_READ(CCID); | ||
6475 | |||
6476 | i915_gem_object_unpin(obj); | ||
6477 | drm_gem_object_unreference(&obj->base); | ||
6478 | dev_priv->renderctx = NULL; | 7124 | dev_priv->renderctx = NULL; |
6479 | } | 7125 | } |
6480 | 7126 | ||
6481 | if (dev_priv->pwrctx) { | 7127 | if (dev_priv->pwrctx) { |
6482 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; | 7128 | i915_gem_object_unpin(dev_priv->pwrctx); |
7129 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
7130 | dev_priv->pwrctx = NULL; | ||
7131 | } | ||
7132 | } | ||
7133 | |||
7134 | static void ironlake_disable_rc6(struct drm_device *dev) | ||
7135 | { | ||
7136 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
7137 | |||
7138 | if (I915_READ(PWRCTXA)) { | ||
7139 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
7140 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
7141 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
7142 | 50); | ||
6483 | 7143 | ||
6484 | I915_WRITE(PWRCTXA, 0); | 7144 | I915_WRITE(PWRCTXA, 0); |
6485 | POSTING_READ(PWRCTXA); | 7145 | POSTING_READ(PWRCTXA); |
6486 | 7146 | ||
6487 | i915_gem_object_unpin(obj); | 7147 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
6488 | drm_gem_object_unreference(&obj->base); | 7148 | POSTING_READ(RSTDBYCTL); |
6489 | dev_priv->pwrctx = NULL; | ||
6490 | } | 7149 | } |
7150 | |||
7151 | ironlake_teardown_rc6(dev); | ||
6491 | } | 7152 | } |
6492 | 7153 | ||
6493 | static void ironlake_disable_rc6(struct drm_device *dev) | 7154 | static int ironlake_setup_rc6(struct drm_device *dev) |
6494 | { | 7155 | { |
6495 | struct drm_i915_private *dev_priv = dev->dev_private; | 7156 | struct drm_i915_private *dev_priv = dev->dev_private; |
6496 | 7157 | ||
6497 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | 7158 | if (dev_priv->renderctx == NULL) |
6498 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | 7159 | dev_priv->renderctx = intel_alloc_context_page(dev); |
6499 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | 7160 | if (!dev_priv->renderctx) |
6500 | 10); | 7161 | return -ENOMEM; |
6501 | POSTING_READ(CCID); | 7162 | |
6502 | I915_WRITE(PWRCTXA, 0); | 7163 | if (dev_priv->pwrctx == NULL) |
6503 | POSTING_READ(PWRCTXA); | 7164 | dev_priv->pwrctx = intel_alloc_context_page(dev); |
6504 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | 7165 | if (!dev_priv->pwrctx) { |
6505 | POSTING_READ(RSTDBYCTL); | 7166 | ironlake_teardown_rc6(dev); |
6506 | i915_gem_object_unpin(dev_priv->renderctx); | 7167 | return -ENOMEM; |
6507 | drm_gem_object_unreference(&dev_priv->renderctx->base); | 7168 | } |
6508 | dev_priv->renderctx = NULL; | 7169 | |
6509 | i915_gem_object_unpin(dev_priv->pwrctx); | 7170 | return 0; |
6510 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
6511 | dev_priv->pwrctx = NULL; | ||
6512 | } | 7171 | } |
6513 | 7172 | ||
6514 | void ironlake_enable_rc6(struct drm_device *dev) | 7173 | void ironlake_enable_rc6(struct drm_device *dev) |
@@ -6516,15 +7175,26 @@ void ironlake_enable_rc6(struct drm_device *dev) | |||
6516 | struct drm_i915_private *dev_priv = dev->dev_private; | 7175 | struct drm_i915_private *dev_priv = dev->dev_private; |
6517 | int ret; | 7176 | int ret; |
6518 | 7177 | ||
7178 | /* rc6 disabled by default due to repeated reports of hanging during | ||
7179 | * boot and resume. | ||
7180 | */ | ||
7181 | if (!i915_enable_rc6) | ||
7182 | return; | ||
7183 | |||
7184 | ret = ironlake_setup_rc6(dev); | ||
7185 | if (ret) | ||
7186 | return; | ||
7187 | |||
6519 | /* | 7188 | /* |
6520 | * GPU can automatically power down the render unit if given a page | 7189 | * GPU can automatically power down the render unit if given a page |
6521 | * to save state. | 7190 | * to save state. |
6522 | */ | 7191 | */ |
6523 | ret = BEGIN_LP_RING(6); | 7192 | ret = BEGIN_LP_RING(6); |
6524 | if (ret) { | 7193 | if (ret) { |
6525 | ironlake_disable_rc6(dev); | 7194 | ironlake_teardown_rc6(dev); |
6526 | return; | 7195 | return; |
6527 | } | 7196 | } |
7197 | |||
6528 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | 7198 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
6529 | OUT_RING(MI_SET_CONTEXT); | 7199 | OUT_RING(MI_SET_CONTEXT); |
6530 | OUT_RING(dev_priv->renderctx->gtt_offset | | 7200 | OUT_RING(dev_priv->renderctx->gtt_offset | |
@@ -6541,6 +7211,7 @@ void ironlake_enable_rc6(struct drm_device *dev) | |||
6541 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | 7211 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
6542 | } | 7212 | } |
6543 | 7213 | ||
7214 | |||
6544 | /* Set up chip specific display functions */ | 7215 | /* Set up chip specific display functions */ |
6545 | static void intel_init_display(struct drm_device *dev) | 7216 | static void intel_init_display(struct drm_device *dev) |
6546 | { | 7217 | { |
@@ -6757,10 +7428,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
6757 | } | 7428 | } |
6758 | dev->mode_config.fb_base = dev->agp->base; | 7429 | dev->mode_config.fb_base = dev->agp->base; |
6759 | 7430 | ||
6760 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) | ||
6761 | dev_priv->num_pipe = 2; | ||
6762 | else | ||
6763 | dev_priv->num_pipe = 1; | ||
6764 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 7431 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
6765 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); | 7432 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
6766 | 7433 | ||
@@ -6783,21 +7450,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
6783 | if (IS_GEN6(dev)) | 7450 | if (IS_GEN6(dev)) |
6784 | gen6_enable_rps(dev_priv); | 7451 | gen6_enable_rps(dev_priv); |
6785 | 7452 | ||
6786 | if (IS_IRONLAKE_M(dev)) { | 7453 | if (IS_IRONLAKE_M(dev)) |
6787 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
6788 | if (!dev_priv->renderctx) | ||
6789 | goto skip_rc6; | ||
6790 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
6791 | if (!dev_priv->pwrctx) { | ||
6792 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6793 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6794 | dev_priv->renderctx = NULL; | ||
6795 | goto skip_rc6; | ||
6796 | } | ||
6797 | ironlake_enable_rc6(dev); | 7454 | ironlake_enable_rc6(dev); |
6798 | } | ||
6799 | 7455 | ||
6800 | skip_rc6: | ||
6801 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 7456 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6802 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 7457 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6803 | (unsigned long)dev); | 7458 | (unsigned long)dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1f4242b682c8..d29e33f815d7 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -49,6 +49,7 @@ struct intel_dp { | |||
49 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 49 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
50 | bool has_audio; | 50 | bool has_audio; |
51 | int force_audio; | 51 | int force_audio; |
52 | uint32_t color_range; | ||
52 | int dpms_mode; | 53 | int dpms_mode; |
53 | uint8_t link_bw; | 54 | uint8_t link_bw; |
54 | uint8_t lane_count; | 55 | uint8_t lane_count; |
@@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
685 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 686 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
686 | int lane_count = 4, bpp = 24; | 687 | int lane_count = 4, bpp = 24; |
687 | struct intel_dp_m_n m_n; | 688 | struct intel_dp_m_n m_n; |
689 | int pipe = intel_crtc->pipe; | ||
688 | 690 | ||
689 | /* | 691 | /* |
690 | * Find the lane count in the intel_encoder private | 692 | * Find the lane count in the intel_encoder private |
@@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
715 | mode->clock, adjusted_mode->clock, &m_n); | 717 | mode->clock, adjusted_mode->clock, &m_n); |
716 | 718 | ||
717 | if (HAS_PCH_SPLIT(dev)) { | 719 | if (HAS_PCH_SPLIT(dev)) { |
718 | if (intel_crtc->pipe == 0) { | 720 | I915_WRITE(TRANSDATA_M1(pipe), |
719 | I915_WRITE(TRANSA_DATA_M1, | 721 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
720 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 722 | m_n.gmch_m); |
721 | m_n.gmch_m); | 723 | I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); |
722 | I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); | 724 | I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); |
723 | I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); | 725 | I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); |
724 | I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); | ||
725 | } else { | ||
726 | I915_WRITE(TRANSB_DATA_M1, | ||
727 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
728 | m_n.gmch_m); | ||
729 | I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); | ||
730 | I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); | ||
731 | I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); | ||
732 | } | ||
733 | } else { | 726 | } else { |
734 | if (intel_crtc->pipe == 0) { | 727 | I915_WRITE(PIPE_GMCH_DATA_M(pipe), |
735 | I915_WRITE(PIPEA_GMCH_DATA_M, | 728 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
736 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 729 | m_n.gmch_m); |
737 | m_n.gmch_m); | 730 | I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); |
738 | I915_WRITE(PIPEA_GMCH_DATA_N, | 731 | I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); |
739 | m_n.gmch_n); | 732 | I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); |
740 | I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); | ||
741 | I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); | ||
742 | } else { | ||
743 | I915_WRITE(PIPEB_GMCH_DATA_M, | ||
744 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
745 | m_n.gmch_m); | ||
746 | I915_WRITE(PIPEB_GMCH_DATA_N, | ||
747 | m_n.gmch_n); | ||
748 | I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); | ||
749 | I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); | ||
750 | } | ||
751 | } | 733 | } |
752 | } | 734 | } |
753 | 735 | ||
@@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
760 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 742 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
761 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 743 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
762 | 744 | ||
763 | intel_dp->DP = (DP_VOLTAGE_0_4 | | 745 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
764 | DP_PRE_EMPHASIS_0); | 746 | intel_dp->DP |= intel_dp->color_range; |
765 | 747 | ||
766 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 748 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
767 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 749 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
@@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
813 | } | 795 | } |
814 | } | 796 | } |
815 | 797 | ||
798 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | ||
799 | { | ||
800 | struct drm_device *dev = intel_dp->base.base.dev; | ||
801 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
802 | u32 pp; | ||
803 | |||
804 | /* | ||
805 | * If the panel wasn't on, make sure there's not a currently | ||
806 | * active PP sequence before enabling AUX VDD. | ||
807 | */ | ||
808 | if (!(I915_READ(PCH_PP_STATUS) & PP_ON)) | ||
809 | msleep(dev_priv->panel_t3); | ||
810 | |||
811 | pp = I915_READ(PCH_PP_CONTROL); | ||
812 | pp |= EDP_FORCE_VDD; | ||
813 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
814 | POSTING_READ(PCH_PP_CONTROL); | ||
815 | } | ||
816 | |||
817 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp) | ||
818 | { | ||
819 | struct drm_device *dev = intel_dp->base.base.dev; | ||
820 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
821 | u32 pp; | ||
822 | |||
823 | pp = I915_READ(PCH_PP_CONTROL); | ||
824 | pp &= ~EDP_FORCE_VDD; | ||
825 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
826 | POSTING_READ(PCH_PP_CONTROL); | ||
827 | |||
828 | /* Make sure sequencer is idle before allowing subsequent activity */ | ||
829 | msleep(dev_priv->panel_t12); | ||
830 | } | ||
831 | |||
816 | /* Returns true if the panel was already on when called */ | 832 | /* Returns true if the panel was already on when called */ |
817 | static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) | 833 | static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) |
818 | { | 834 | { |
@@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) | |||
834 | I915_WRITE(PCH_PP_CONTROL, pp); | 850 | I915_WRITE(PCH_PP_CONTROL, pp); |
835 | POSTING_READ(PCH_PP_CONTROL); | 851 | POSTING_READ(PCH_PP_CONTROL); |
836 | 852 | ||
837 | /* Ouch. We need to wait here for some panels, like Dell e6510 | ||
838 | * https://bugs.freedesktop.org/show_bug.cgi?id=29278i | ||
839 | */ | ||
840 | msleep(300); | ||
841 | |||
842 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, | 853 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, |
843 | 5000)) | 854 | 5000)) |
844 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | 855 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
@@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev) | |||
875 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 886 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
876 | I915_WRITE(PCH_PP_CONTROL, pp); | 887 | I915_WRITE(PCH_PP_CONTROL, pp); |
877 | POSTING_READ(PCH_PP_CONTROL); | 888 | POSTING_READ(PCH_PP_CONTROL); |
878 | |||
879 | /* Ouch. We need to wait here for some panels, like Dell e6510 | ||
880 | * https://bugs.freedesktop.org/show_bug.cgi?id=29278i | ||
881 | */ | ||
882 | msleep(300); | ||
883 | } | 889 | } |
884 | 890 | ||
885 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 891 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
@@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) | |||
945 | 951 | ||
946 | if (is_edp(intel_dp)) { | 952 | if (is_edp(intel_dp)) { |
947 | ironlake_edp_backlight_off(dev); | 953 | ironlake_edp_backlight_off(dev); |
948 | ironlake_edp_panel_on(intel_dp); | 954 | ironlake_edp_panel_off(dev); |
949 | if (!is_pch_edp(intel_dp)) | 955 | if (!is_pch_edp(intel_dp)) |
950 | ironlake_edp_pll_on(encoder); | 956 | ironlake_edp_pll_on(encoder); |
951 | else | 957 | else |
@@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder) | |||
959 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 965 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
960 | struct drm_device *dev = encoder->dev; | 966 | struct drm_device *dev = encoder->dev; |
961 | 967 | ||
968 | if (is_edp(intel_dp)) | ||
969 | ironlake_edp_panel_vdd_on(intel_dp); | ||
970 | |||
962 | intel_dp_start_link_train(intel_dp); | 971 | intel_dp_start_link_train(intel_dp); |
963 | 972 | ||
964 | if (is_edp(intel_dp)) | 973 | if (is_edp(intel_dp)) { |
965 | ironlake_edp_panel_on(intel_dp); | 974 | ironlake_edp_panel_on(intel_dp); |
975 | ironlake_edp_panel_vdd_off(intel_dp); | ||
976 | } | ||
966 | 977 | ||
967 | intel_dp_complete_link_train(intel_dp); | 978 | intel_dp_complete_link_train(intel_dp); |
968 | 979 | ||
@@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
988 | ironlake_edp_pll_off(encoder); | 999 | ironlake_edp_pll_off(encoder); |
989 | } else { | 1000 | } else { |
990 | if (is_edp(intel_dp)) | 1001 | if (is_edp(intel_dp)) |
991 | ironlake_edp_panel_on(intel_dp); | 1002 | ironlake_edp_panel_vdd_on(intel_dp); |
992 | if (!(dp_reg & DP_PORT_EN)) { | 1003 | if (!(dp_reg & DP_PORT_EN)) { |
993 | intel_dp_start_link_train(intel_dp); | 1004 | intel_dp_start_link_train(intel_dp); |
1005 | if (is_edp(intel_dp)) { | ||
1006 | ironlake_edp_panel_on(intel_dp); | ||
1007 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1008 | } | ||
994 | intel_dp_complete_link_train(intel_dp); | 1009 | intel_dp_complete_link_train(intel_dp); |
995 | } | 1010 | } |
996 | if (is_edp(intel_dp)) | 1011 | if (is_edp(intel_dp)) |
@@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
1508 | { | 1523 | { |
1509 | enum drm_connector_status status; | 1524 | enum drm_connector_status status; |
1510 | 1525 | ||
1511 | /* Can't disconnect eDP */ | 1526 | /* Can't disconnect eDP, but you can close the lid... */ |
1512 | if (is_edp(intel_dp)) | 1527 | if (is_edp(intel_dp)) { |
1513 | return connector_status_connected; | 1528 | status = intel_panel_detect(intel_dp->base.base.dev); |
1529 | if (status == connector_status_unknown) | ||
1530 | status = connector_status_connected; | ||
1531 | return status; | ||
1532 | } | ||
1514 | 1533 | ||
1515 | status = connector_status_disconnected; | 1534 | status = connector_status_disconnected; |
1516 | if (intel_dp_aux_native_read(intel_dp, | 1535 | if (intel_dp_aux_native_read(intel_dp, |
@@ -1639,11 +1658,30 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1639 | return 0; | 1658 | return 0; |
1640 | } | 1659 | } |
1641 | 1660 | ||
1661 | static bool | ||
1662 | intel_dp_detect_audio(struct drm_connector *connector) | ||
1663 | { | ||
1664 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1665 | struct edid *edid; | ||
1666 | bool has_audio = false; | ||
1667 | |||
1668 | edid = drm_get_edid(connector, &intel_dp->adapter); | ||
1669 | if (edid) { | ||
1670 | has_audio = drm_detect_monitor_audio(edid); | ||
1671 | |||
1672 | connector->display_info.raw_edid = NULL; | ||
1673 | kfree(edid); | ||
1674 | } | ||
1675 | |||
1676 | return has_audio; | ||
1677 | } | ||
1678 | |||
1642 | static int | 1679 | static int |
1643 | intel_dp_set_property(struct drm_connector *connector, | 1680 | intel_dp_set_property(struct drm_connector *connector, |
1644 | struct drm_property *property, | 1681 | struct drm_property *property, |
1645 | uint64_t val) | 1682 | uint64_t val) |
1646 | { | 1683 | { |
1684 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
1647 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 1685 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1648 | int ret; | 1686 | int ret; |
1649 | 1687 | ||
@@ -1652,17 +1690,31 @@ intel_dp_set_property(struct drm_connector *connector, | |||
1652 | return ret; | 1690 | return ret; |
1653 | 1691 | ||
1654 | if (property == intel_dp->force_audio_property) { | 1692 | if (property == intel_dp->force_audio_property) { |
1655 | if (val == intel_dp->force_audio) | 1693 | int i = val; |
1694 | bool has_audio; | ||
1695 | |||
1696 | if (i == intel_dp->force_audio) | ||
1656 | return 0; | 1697 | return 0; |
1657 | 1698 | ||
1658 | intel_dp->force_audio = val; | 1699 | intel_dp->force_audio = i; |
1659 | 1700 | ||
1660 | if (val > 0 && intel_dp->has_audio) | 1701 | if (i == 0) |
1702 | has_audio = intel_dp_detect_audio(connector); | ||
1703 | else | ||
1704 | has_audio = i > 0; | ||
1705 | |||
1706 | if (has_audio == intel_dp->has_audio) | ||
1661 | return 0; | 1707 | return 0; |
1662 | if (val < 0 && !intel_dp->has_audio) | 1708 | |
1709 | intel_dp->has_audio = has_audio; | ||
1710 | goto done; | ||
1711 | } | ||
1712 | |||
1713 | if (property == dev_priv->broadcast_rgb_property) { | ||
1714 | if (val == !!intel_dp->color_range) | ||
1663 | return 0; | 1715 | return 0; |
1664 | 1716 | ||
1665 | intel_dp->has_audio = val > 0; | 1717 | intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; |
1666 | goto done; | 1718 | goto done; |
1667 | } | 1719 | } |
1668 | 1720 | ||
@@ -1785,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect | |||
1785 | intel_dp->force_audio_property->values[1] = 1; | 1837 | intel_dp->force_audio_property->values[1] = 1; |
1786 | drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); | 1838 | drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); |
1787 | } | 1839 | } |
1840 | |||
1841 | intel_attach_broadcast_rgb_property(connector); | ||
1788 | } | 1842 | } |
1789 | 1843 | ||
1790 | void | 1844 | void |
@@ -1802,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1802 | if (!intel_dp) | 1856 | if (!intel_dp) |
1803 | return; | 1857 | return; |
1804 | 1858 | ||
1859 | intel_dp->output_reg = output_reg; | ||
1860 | intel_dp->dpms_mode = -1; | ||
1861 | |||
1805 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1862 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1806 | if (!intel_connector) { | 1863 | if (!intel_connector) { |
1807 | kfree(intel_dp); | 1864 | kfree(intel_dp); |
@@ -1841,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1841 | connector->interlace_allowed = true; | 1898 | connector->interlace_allowed = true; |
1842 | connector->doublescan_allowed = 0; | 1899 | connector->doublescan_allowed = 0; |
1843 | 1900 | ||
1844 | intel_dp->output_reg = output_reg; | ||
1845 | intel_dp->has_audio = false; | ||
1846 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | ||
1847 | |||
1848 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, | 1901 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
1849 | DRM_MODE_ENCODER_TMDS); | 1902 | DRM_MODE_ENCODER_TMDS); |
1850 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); | 1903 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); |
@@ -1882,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1882 | /* Cache some DPCD data in the eDP case */ | 1935 | /* Cache some DPCD data in the eDP case */ |
1883 | if (is_edp(intel_dp)) { | 1936 | if (is_edp(intel_dp)) { |
1884 | int ret; | 1937 | int ret; |
1885 | bool was_on; | 1938 | u32 pp_on, pp_div; |
1939 | |||
1940 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | ||
1941 | pp_div = I915_READ(PCH_PP_DIVISOR); | ||
1886 | 1942 | ||
1887 | was_on = ironlake_edp_panel_on(intel_dp); | 1943 | /* Get T3 & T12 values (note: VESA not bspec terminology) */ |
1944 | dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16; | ||
1945 | dev_priv->panel_t3 /= 10; /* t3 in 100us units */ | ||
1946 | dev_priv->panel_t12 = pp_div & 0xf; | ||
1947 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ | ||
1948 | |||
1949 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1888 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, | 1950 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, |
1889 | intel_dp->dpcd, | 1951 | intel_dp->dpcd, |
1890 | sizeof(intel_dp->dpcd)); | 1952 | sizeof(intel_dp->dpcd)); |
1953 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1891 | if (ret == sizeof(intel_dp->dpcd)) { | 1954 | if (ret == sizeof(intel_dp->dpcd)) { |
1892 | if (intel_dp->dpcd[0] >= 0x11) | 1955 | if (intel_dp->dpcd[0] >= 0x11) |
1893 | dev_priv->no_aux_handshake = intel_dp->dpcd[3] & | 1956 | dev_priv->no_aux_handshake = intel_dp->dpcd[3] & |
1894 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | 1957 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
1895 | } else { | 1958 | } else { |
1959 | /* if this fails, presume the device is a ghost */ | ||
1896 | DRM_ERROR("failed to retrieve link info\n"); | 1960 | DRM_ERROR("failed to retrieve link info\n"); |
1961 | intel_dp_destroy(&intel_connector->base); | ||
1962 | intel_dp_encoder_destroy(&intel_dp->base.base); | ||
1963 | return; | ||
1897 | } | 1964 | } |
1898 | if (!was_on) | ||
1899 | ironlake_edp_panel_off(dev); | ||
1900 | } | 1965 | } |
1901 | 1966 | ||
1902 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1967 | intel_encoder->hot_plug = intel_dp_hot_plug; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 74db2557d644..5daa991cb287 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | |||
217 | return dev_priv->pipe_to_crtc_mapping[pipe]; | 217 | return dev_priv->pipe_to_crtc_mapping[pipe]; |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline struct drm_crtc * | ||
221 | intel_get_crtc_for_plane(struct drm_device *dev, int plane) | ||
222 | { | ||
223 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
224 | return dev_priv->plane_to_crtc_mapping[plane]; | ||
225 | } | ||
226 | |||
220 | struct intel_unpin_work { | 227 | struct intel_unpin_work { |
221 | struct work_struct work; | 228 | struct work_struct work; |
222 | struct drm_device *dev; | 229 | struct drm_device *dev; |
@@ -230,6 +237,8 @@ struct intel_unpin_work { | |||
230 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | 237 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
231 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); | 238 | extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); |
232 | 239 | ||
240 | extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | ||
241 | |||
233 | extern void intel_crt_init(struct drm_device *dev); | 242 | extern void intel_crt_init(struct drm_device *dev); |
234 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | 243 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); |
235 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | 244 | void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); |
@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | |||
260 | extern void intel_panel_setup_backlight(struct drm_device *dev); | 269 | extern void intel_panel_setup_backlight(struct drm_device *dev); |
261 | extern void intel_panel_enable_backlight(struct drm_device *dev); | 270 | extern void intel_panel_enable_backlight(struct drm_device *dev); |
262 | extern void intel_panel_disable_backlight(struct drm_device *dev); | 271 | extern void intel_panel_disable_backlight(struct drm_device *dev); |
272 | extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); | ||
263 | 273 | ||
264 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 274 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
265 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 275 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
@@ -298,7 +308,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
298 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 308 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
299 | u16 *blue, int regno); | 309 | u16 *blue, int regno); |
300 | extern void intel_enable_clock_gating(struct drm_device *dev); | 310 | extern void intel_enable_clock_gating(struct drm_device *dev); |
301 | extern void intel_disable_clock_gating(struct drm_device *dev); | ||
302 | extern void ironlake_enable_drps(struct drm_device *dev); | 311 | extern void ironlake_enable_drps(struct drm_device *dev); |
303 | extern void ironlake_disable_drps(struct drm_device *dev); | 312 | extern void ironlake_disable_drps(struct drm_device *dev); |
304 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | 313 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); |
@@ -322,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | |||
322 | 331 | ||
323 | extern void intel_setup_overlay(struct drm_device *dev); | 332 | extern void intel_setup_overlay(struct drm_device *dev); |
324 | extern void intel_cleanup_overlay(struct drm_device *dev); | 333 | extern void intel_cleanup_overlay(struct drm_device *dev); |
325 | extern int intel_overlay_switch_off(struct intel_overlay *overlay, | 334 | extern int intel_overlay_switch_off(struct intel_overlay *overlay); |
326 | bool interruptible); | ||
327 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, | 335 | extern int intel_overlay_put_image(struct drm_device *dev, void *data, |
328 | struct drm_file *file_priv); | 336 | struct drm_file *file_priv); |
329 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, | 337 | extern int intel_overlay_attrs(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index ea373283c93b..6eda1b51c636 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
178 | int pipe = intel_crtc->pipe; | 178 | int pipe = intel_crtc->pipe; |
179 | u32 dvo_val; | 179 | u32 dvo_val; |
180 | u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; | 180 | u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; |
181 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 181 | int dpll_reg = DPLL(pipe); |
182 | 182 | ||
183 | switch (dvo_reg) { | 183 | switch (dvo_reg) { |
184 | case DVOA: | 184 | case DVOA: |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 0d0273e7b029..f289b8642976 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -41,6 +41,7 @@ struct intel_hdmi { | |||
41 | struct intel_encoder base; | 41 | struct intel_encoder base; |
42 | u32 sdvox_reg; | 42 | u32 sdvox_reg; |
43 | int ddc_bus; | 43 | int ddc_bus; |
44 | uint32_t color_range; | ||
44 | bool has_hdmi_sink; | 45 | bool has_hdmi_sink; |
45 | bool has_audio; | 46 | bool has_audio; |
46 | int force_audio; | 47 | int force_audio; |
@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
124 | u32 sdvox; | 125 | u32 sdvox; |
125 | 126 | ||
126 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; | 127 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; |
128 | sdvox |= intel_hdmi->color_range; | ||
127 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 129 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
128 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; | 130 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
129 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 131 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
@@ -251,12 +253,34 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) | |||
251 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | 253 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); |
252 | } | 254 | } |
253 | 255 | ||
256 | static bool | ||
257 | intel_hdmi_detect_audio(struct drm_connector *connector) | ||
258 | { | ||
259 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | ||
260 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
261 | struct edid *edid; | ||
262 | bool has_audio = false; | ||
263 | |||
264 | edid = drm_get_edid(connector, | ||
265 | &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); | ||
266 | if (edid) { | ||
267 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
268 | has_audio = drm_detect_monitor_audio(edid); | ||
269 | |||
270 | connector->display_info.raw_edid = NULL; | ||
271 | kfree(edid); | ||
272 | } | ||
273 | |||
274 | return has_audio; | ||
275 | } | ||
276 | |||
254 | static int | 277 | static int |
255 | intel_hdmi_set_property(struct drm_connector *connector, | 278 | intel_hdmi_set_property(struct drm_connector *connector, |
256 | struct drm_property *property, | 279 | struct drm_property *property, |
257 | uint64_t val) | 280 | uint64_t val) |
258 | { | 281 | { |
259 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 282 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
283 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
260 | int ret; | 284 | int ret; |
261 | 285 | ||
262 | ret = drm_connector_property_set_value(connector, property, val); | 286 | ret = drm_connector_property_set_value(connector, property, val); |
@@ -264,17 +288,31 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
264 | return ret; | 288 | return ret; |
265 | 289 | ||
266 | if (property == intel_hdmi->force_audio_property) { | 290 | if (property == intel_hdmi->force_audio_property) { |
267 | if (val == intel_hdmi->force_audio) | 291 | int i = val; |
292 | bool has_audio; | ||
293 | |||
294 | if (i == intel_hdmi->force_audio) | ||
268 | return 0; | 295 | return 0; |
269 | 296 | ||
270 | intel_hdmi->force_audio = val; | 297 | intel_hdmi->force_audio = i; |
298 | |||
299 | if (i == 0) | ||
300 | has_audio = intel_hdmi_detect_audio(connector); | ||
301 | else | ||
302 | has_audio = i > 0; | ||
271 | 303 | ||
272 | if (val > 0 && intel_hdmi->has_audio) | 304 | if (has_audio == intel_hdmi->has_audio) |
273 | return 0; | 305 | return 0; |
274 | if (val < 0 && !intel_hdmi->has_audio) | 306 | |
307 | intel_hdmi->has_audio = has_audio; | ||
308 | goto done; | ||
309 | } | ||
310 | |||
311 | if (property == dev_priv->broadcast_rgb_property) { | ||
312 | if (val == !!intel_hdmi->color_range) | ||
275 | return 0; | 313 | return 0; |
276 | 314 | ||
277 | intel_hdmi->has_audio = val > 0; | 315 | intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; |
278 | goto done; | 316 | goto done; |
279 | } | 317 | } |
280 | 318 | ||
@@ -336,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c | |||
336 | intel_hdmi->force_audio_property->values[1] = 1; | 374 | intel_hdmi->force_audio_property->values[1] = 1; |
337 | drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); | 375 | drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); |
338 | } | 376 | } |
377 | |||
378 | intel_attach_broadcast_rgb_property(connector); | ||
339 | } | 379 | } |
340 | 380 | ||
341 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 381 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 58040f68ed7a..82d04c5899d2 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
384 | bus->reg0 = i | GMBUS_RATE_100KHZ; | 384 | bus->reg0 = i | GMBUS_RATE_100KHZ; |
385 | 385 | ||
386 | /* XXX force bit banging until GMBUS is fully debugged */ | 386 | /* XXX force bit banging until GMBUS is fully debugged */ |
387 | bus->force_bit = intel_gpio_create(dev_priv, i); | 387 | if (IS_GEN2(dev)) |
388 | bus->force_bit = intel_gpio_create(dev_priv, i); | ||
388 | } | 389 | } |
389 | 390 | ||
390 | intel_i2c_reset(dev_priv->dev); | 391 | intel_i2c_reset(dev_priv->dev); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ace8d5d30dd2..1a311ad01116 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
231 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 231 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
232 | struct drm_encoder *tmp_encoder; | 232 | struct drm_encoder *tmp_encoder; |
233 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; | 233 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
234 | int pipe; | ||
234 | 235 | ||
235 | /* Should never happen!! */ | 236 | /* Should never happen!! */ |
236 | if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { | 237 | if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { |
@@ -261,12 +262,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
261 | return true; | 262 | return true; |
262 | } | 263 | } |
263 | 264 | ||
264 | /* Make sure pre-965s set dither correctly */ | ||
265 | if (INTEL_INFO(dev)->gen < 4) { | ||
266 | if (dev_priv->lvds_dither) | ||
267 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
268 | } | ||
269 | |||
270 | /* Native modes don't need fitting */ | 265 | /* Native modes don't need fitting */ |
271 | if (adjusted_mode->hdisplay == mode->hdisplay && | 266 | if (adjusted_mode->hdisplay == mode->hdisplay && |
272 | adjusted_mode->vdisplay == mode->vdisplay) | 267 | adjusted_mode->vdisplay == mode->vdisplay) |
@@ -283,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
283 | * to register description and PRM. | 278 | * to register description and PRM. |
284 | * Change the value here to see the borders for debugging | 279 | * Change the value here to see the borders for debugging |
285 | */ | 280 | */ |
286 | I915_WRITE(BCLRPAT_A, 0); | 281 | for_each_pipe(pipe) |
287 | I915_WRITE(BCLRPAT_B, 0); | 282 | I915_WRITE(BCLRPAT(pipe), 0); |
288 | 283 | ||
289 | switch (intel_lvds->fitting_mode) { | 284 | switch (intel_lvds->fitting_mode) { |
290 | case DRM_MODE_SCALE_CENTER: | 285 | case DRM_MODE_SCALE_CENTER: |
@@ -374,10 +369,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
374 | } | 369 | } |
375 | 370 | ||
376 | out: | 371 | out: |
372 | /* If not enabling scaling, be consistent and always use 0. */ | ||
377 | if ((pfit_control & PFIT_ENABLE) == 0) { | 373 | if ((pfit_control & PFIT_ENABLE) == 0) { |
378 | pfit_control = 0; | 374 | pfit_control = 0; |
379 | pfit_pgm_ratios = 0; | 375 | pfit_pgm_ratios = 0; |
380 | } | 376 | } |
377 | |||
378 | /* Make sure pre-965 set dither correctly */ | ||
379 | if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) | ||
380 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
381 | |||
381 | if (pfit_control != intel_lvds->pfit_control || | 382 | if (pfit_control != intel_lvds->pfit_control || |
382 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { | 383 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { |
383 | intel_lvds->pfit_control = pfit_control; | 384 | intel_lvds->pfit_control = pfit_control; |
@@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force) | |||
474 | struct drm_device *dev = connector->dev; | 475 | struct drm_device *dev = connector->dev; |
475 | enum drm_connector_status status = connector_status_connected; | 476 | enum drm_connector_status status = connector_status_connected; |
476 | 477 | ||
478 | status = intel_panel_detect(dev); | ||
479 | if (status != connector_status_unknown) | ||
480 | return status; | ||
481 | |||
477 | /* ACPI lid methods were generally unreliable in this generation, so | 482 | /* ACPI lid methods were generally unreliable in this generation, so |
478 | * don't even bother. | 483 | * don't even bother. |
479 | */ | 484 | */ |
@@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
496 | return drm_add_edid_modes(connector, intel_lvds->edid); | 501 | return drm_add_edid_modes(connector, intel_lvds->edid); |
497 | 502 | ||
498 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); | 503 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); |
499 | if (mode == 0) | 504 | if (mode == NULL) |
500 | return 0; | 505 | return 0; |
501 | 506 | ||
502 | drm_mode_probed_add(connector, mode); | 507 | drm_mode_probed_add(connector, mode); |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index f70b7cf32bff..9034dd8f33c7 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector, | |||
80 | 80 | ||
81 | return ret; | 81 | return ret; |
82 | } | 82 | } |
83 | |||
84 | static const char *broadcast_rgb_names[] = { | ||
85 | "Full", | ||
86 | "Limited 16:235", | ||
87 | }; | ||
88 | |||
89 | void | ||
90 | intel_attach_broadcast_rgb_property(struct drm_connector *connector) | ||
91 | { | ||
92 | struct drm_device *dev = connector->dev; | ||
93 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
94 | struct drm_property *prop; | ||
95 | int i; | ||
96 | |||
97 | prop = dev_priv->broadcast_rgb_property; | ||
98 | if (prop == NULL) { | ||
99 | prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, | ||
100 | "Broadcast RGB", | ||
101 | ARRAY_SIZE(broadcast_rgb_names)); | ||
102 | if (prop == NULL) | ||
103 | return; | ||
104 | |||
105 | for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) | ||
106 | drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); | ||
107 | |||
108 | dev_priv->broadcast_rgb_property = prop; | ||
109 | } | ||
110 | |||
111 | drm_connector_attach_property(connector, prop, 0); | ||
112 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 64fd64443ca6..d2c710422908 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -39,6 +39,8 @@ | |||
39 | 39 | ||
40 | #define OPREGION_HEADER_OFFSET 0 | 40 | #define OPREGION_HEADER_OFFSET 0 |
41 | #define OPREGION_ACPI_OFFSET 0x100 | 41 | #define OPREGION_ACPI_OFFSET 0x100 |
42 | #define ACPI_CLID 0x01ac /* current lid state indicator */ | ||
43 | #define ACPI_CDCK 0x01b0 /* current docking state indicator */ | ||
42 | #define OPREGION_SWSCI_OFFSET 0x200 | 44 | #define OPREGION_SWSCI_OFFSET 0x200 |
43 | #define OPREGION_ASLE_OFFSET 0x300 | 45 | #define OPREGION_ASLE_OFFSET 0x300 |
44 | #define OPREGION_VBT_OFFSET 0x400 | 46 | #define OPREGION_VBT_OFFSET 0x400 |
@@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev) | |||
489 | opregion->header = base; | 491 | opregion->header = base; |
490 | opregion->vbt = base + OPREGION_VBT_OFFSET; | 492 | opregion->vbt = base + OPREGION_VBT_OFFSET; |
491 | 493 | ||
494 | opregion->lid_state = base + ACPI_CLID; | ||
495 | |||
492 | mboxes = opregion->header->mboxes; | 496 | mboxes = opregion->header->mboxes; |
493 | if (mboxes & MBOX_ACPI) { | 497 | if (mboxes & MBOX_ACPI) { |
494 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 498 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3fbb98b948d6..a670c006982e 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, | |||
213 | 213 | ||
214 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | 214 | static int intel_overlay_do_wait_request(struct intel_overlay *overlay, |
215 | struct drm_i915_gem_request *request, | 215 | struct drm_i915_gem_request *request, |
216 | bool interruptible, | ||
217 | void (*tail)(struct intel_overlay *)) | 216 | void (*tail)(struct intel_overlay *)) |
218 | { | 217 | { |
219 | struct drm_device *dev = overlay->dev; | 218 | struct drm_device *dev = overlay->dev; |
@@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
221 | int ret; | 220 | int ret; |
222 | 221 | ||
223 | BUG_ON(overlay->last_flip_req); | 222 | BUG_ON(overlay->last_flip_req); |
224 | ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); | 223 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); |
225 | if (ret) { | 224 | if (ret) { |
226 | kfree(request); | 225 | kfree(request); |
227 | return ret; | 226 | return ret; |
228 | } | 227 | } |
229 | overlay->last_flip_req = request->seqno; | 228 | overlay->last_flip_req = request->seqno; |
230 | overlay->flip_tail = tail; | 229 | overlay->flip_tail = tail; |
231 | ret = i915_do_wait_request(dev, | 230 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); |
232 | overlay->last_flip_req, true, | ||
233 | LP_RING(dev_priv)); | ||
234 | if (ret) | 231 | if (ret) |
235 | return ret; | 232 | return ret; |
236 | 233 | ||
@@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev) | |||
256 | return 0; | 253 | return 0; |
257 | 254 | ||
258 | /* most i8xx have pipe a forced on, so don't trust dpms mode */ | 255 | /* most i8xx have pipe a forced on, so don't trust dpms mode */ |
259 | if (I915_READ(PIPEACONF) & PIPECONF_ENABLE) | 256 | if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) |
260 | return 0; | 257 | return 0; |
261 | 258 | ||
262 | crtc_funcs = crtc->base.helper_private; | 259 | crtc_funcs = crtc->base.helper_private; |
@@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
322 | OUT_RING(MI_NOOP); | 319 | OUT_RING(MI_NOOP); |
323 | ADVANCE_LP_RING(); | 320 | ADVANCE_LP_RING(); |
324 | 321 | ||
325 | ret = intel_overlay_do_wait_request(overlay, request, true, NULL); | 322 | ret = intel_overlay_do_wait_request(overlay, request, NULL); |
326 | out: | 323 | out: |
327 | if (pipe_a_quirk) | 324 | if (pipe_a_quirk) |
328 | i830_deactivate_pipe_a(dev); | 325 | i830_deactivate_pipe_a(dev); |
@@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
364 | OUT_RING(flip_addr); | 361 | OUT_RING(flip_addr); |
365 | ADVANCE_LP_RING(); | 362 | ADVANCE_LP_RING(); |
366 | 363 | ||
367 | ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); | 364 | ret = i915_add_request(LP_RING(dev_priv), NULL, request); |
368 | if (ret) { | 365 | if (ret) { |
369 | kfree(request); | 366 | kfree(request); |
370 | return ret; | 367 | return ret; |
@@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) | |||
401 | } | 398 | } |
402 | 399 | ||
403 | /* overlay needs to be disabled in OCMD reg */ | 400 | /* overlay needs to be disabled in OCMD reg */ |
404 | static int intel_overlay_off(struct intel_overlay *overlay, | 401 | static int intel_overlay_off(struct intel_overlay *overlay) |
405 | bool interruptible) | ||
406 | { | 402 | { |
407 | struct drm_device *dev = overlay->dev; | 403 | struct drm_device *dev = overlay->dev; |
408 | struct drm_i915_private *dev_priv = dev->dev_private; | 404 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay, | |||
437 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 433 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
438 | ADVANCE_LP_RING(); | 434 | ADVANCE_LP_RING(); |
439 | 435 | ||
440 | return intel_overlay_do_wait_request(overlay, request, interruptible, | 436 | return intel_overlay_do_wait_request(overlay, request, |
441 | intel_overlay_off_tail); | 437 | intel_overlay_off_tail); |
442 | } | 438 | } |
443 | 439 | ||
444 | /* recover from an interruption due to a signal | 440 | /* recover from an interruption due to a signal |
445 | * We have to be careful not to repeat work forever an make forward progess. */ | 441 | * We have to be careful not to repeat work forever an make forward progess. */ |
446 | static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | 442 | static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) |
447 | bool interruptible) | ||
448 | { | 443 | { |
449 | struct drm_device *dev = overlay->dev; | 444 | struct drm_device *dev = overlay->dev; |
450 | drm_i915_private_t *dev_priv = dev->dev_private; | 445 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | |||
453 | if (overlay->last_flip_req == 0) | 448 | if (overlay->last_flip_req == 0) |
454 | return 0; | 449 | return 0; |
455 | 450 | ||
456 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | 451 | ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); |
457 | interruptible, LP_RING(dev_priv)); | ||
458 | if (ret) | 452 | if (ret) |
459 | return ret; | 453 | return ret; |
460 | 454 | ||
@@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
499 | OUT_RING(MI_NOOP); | 493 | OUT_RING(MI_NOOP); |
500 | ADVANCE_LP_RING(); | 494 | ADVANCE_LP_RING(); |
501 | 495 | ||
502 | ret = intel_overlay_do_wait_request(overlay, request, true, | 496 | ret = intel_overlay_do_wait_request(overlay, request, |
503 | intel_overlay_release_old_vid_tail); | 497 | intel_overlay_release_old_vid_tail); |
504 | if (ret) | 498 | if (ret) |
505 | return ret; | 499 | return ret; |
@@ -868,8 +862,7 @@ out_unpin: | |||
868 | return ret; | 862 | return ret; |
869 | } | 863 | } |
870 | 864 | ||
871 | int intel_overlay_switch_off(struct intel_overlay *overlay, | 865 | int intel_overlay_switch_off(struct intel_overlay *overlay) |
872 | bool interruptible) | ||
873 | { | 866 | { |
874 | struct overlay_registers *regs; | 867 | struct overlay_registers *regs; |
875 | struct drm_device *dev = overlay->dev; | 868 | struct drm_device *dev = overlay->dev; |
@@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, | |||
878 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 871 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
879 | BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); | 872 | BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
880 | 873 | ||
881 | ret = intel_overlay_recover_from_interrupt(overlay, interruptible); | 874 | ret = intel_overlay_recover_from_interrupt(overlay); |
882 | if (ret != 0) | 875 | if (ret != 0) |
883 | return ret; | 876 | return ret; |
884 | 877 | ||
@@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, | |||
893 | regs->OCMD = 0; | 886 | regs->OCMD = 0; |
894 | intel_overlay_unmap_regs(overlay, regs); | 887 | intel_overlay_unmap_regs(overlay, regs); |
895 | 888 | ||
896 | ret = intel_overlay_off(overlay, interruptible); | 889 | ret = intel_overlay_off(overlay); |
897 | if (ret != 0) | 890 | if (ret != 0) |
898 | return ret; | 891 | return ret; |
899 | 892 | ||
@@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1135 | mutex_lock(&dev->mode_config.mutex); | 1128 | mutex_lock(&dev->mode_config.mutex); |
1136 | mutex_lock(&dev->struct_mutex); | 1129 | mutex_lock(&dev->struct_mutex); |
1137 | 1130 | ||
1138 | ret = intel_overlay_switch_off(overlay, true); | 1131 | ret = intel_overlay_switch_off(overlay); |
1139 | 1132 | ||
1140 | mutex_unlock(&dev->struct_mutex); | 1133 | mutex_unlock(&dev->struct_mutex); |
1141 | mutex_unlock(&dev->mode_config.mutex); | 1134 | mutex_unlock(&dev->mode_config.mutex); |
@@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1157 | 1150 | ||
1158 | new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, | 1151 | new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, |
1159 | put_image_rec->bo_handle)); | 1152 | put_image_rec->bo_handle)); |
1160 | if (!new_bo) { | 1153 | if (&new_bo->base == NULL) { |
1161 | ret = -ENOENT; | 1154 | ret = -ENOENT; |
1162 | goto out_free; | 1155 | goto out_free; |
1163 | } | 1156 | } |
@@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1171 | goto out_unlock; | 1164 | goto out_unlock; |
1172 | } | 1165 | } |
1173 | 1166 | ||
1174 | ret = intel_overlay_recover_from_interrupt(overlay, true); | 1167 | ret = intel_overlay_recover_from_interrupt(overlay); |
1175 | if (ret != 0) | 1168 | if (ret != 0) |
1176 | goto out_unlock; | 1169 | goto out_unlock; |
1177 | 1170 | ||
1178 | if (overlay->crtc != crtc) { | 1171 | if (overlay->crtc != crtc) { |
1179 | struct drm_display_mode *mode = &crtc->base.mode; | 1172 | struct drm_display_mode *mode = &crtc->base.mode; |
1180 | ret = intel_overlay_switch_off(overlay, true); | 1173 | ret = intel_overlay_switch_off(overlay); |
1181 | if (ret != 0) | 1174 | if (ret != 0) |
1182 | goto out_unlock; | 1175 | goto out_unlock; |
1183 | 1176 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c65992df458d..18391b3ec2c1 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -30,8 +30,6 @@ | |||
30 | 30 | ||
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ | ||
34 | |||
35 | void | 33 | void |
36 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
37 | struct drm_display_mode *adjusted_mode) | 35 | struct drm_display_mode *adjusted_mode) |
@@ -112,19 +110,6 @@ done: | |||
112 | dev_priv->pch_pf_size = (width << 16) | height; | 110 | dev_priv->pch_pf_size = (width << 16) | height; |
113 | } | 111 | } |
114 | 112 | ||
115 | static int is_backlight_combination_mode(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | |||
119 | if (INTEL_INFO(dev)->gen >= 4) | ||
120 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
121 | |||
122 | if (IS_GEN2(dev)) | ||
123 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | 113 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) |
129 | { | 114 | { |
130 | u32 val; | 115 | u32 val; |
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) | |||
181 | if (INTEL_INFO(dev)->gen < 4) | 166 | if (INTEL_INFO(dev)->gen < 4) |
182 | max &= ~1; | 167 | max &= ~1; |
183 | } | 168 | } |
184 | |||
185 | if (is_backlight_combination_mode(dev)) | ||
186 | max *= 0xff; | ||
187 | } | 169 | } |
188 | 170 | ||
189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 171 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); |
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) | |||
201 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 183 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
202 | if (IS_PINEVIEW(dev)) | 184 | if (IS_PINEVIEW(dev)) |
203 | val >>= 1; | 185 | val >>= 1; |
204 | |||
205 | if (is_backlight_combination_mode(dev)){ | ||
206 | u8 lbpc; | ||
207 | |||
208 | val &= ~1; | ||
209 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | ||
210 | val *= lbpc; | ||
211 | val >>= 1; | ||
212 | } | ||
213 | } | 186 | } |
214 | 187 | ||
215 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | 188 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); |
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
232 | 205 | ||
233 | if (HAS_PCH_SPLIT(dev)) | 206 | if (HAS_PCH_SPLIT(dev)) |
234 | return intel_pch_panel_set_backlight(dev, level); | 207 | return intel_pch_panel_set_backlight(dev, level); |
235 | |||
236 | if (is_backlight_combination_mode(dev)){ | ||
237 | u32 max = intel_panel_get_max_backlight(dev); | ||
238 | u8 lpbc; | ||
239 | |||
240 | lpbc = level * 0xfe / max + 1; | ||
241 | level /= lpbc; | ||
242 | pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); | ||
243 | } | ||
244 | |||
245 | tmp = I915_READ(BLC_PWM_CTL); | 208 | tmp = I915_READ(BLC_PWM_CTL); |
246 | if (IS_PINEVIEW(dev)) { | 209 | if (IS_PINEVIEW(dev)) { |
247 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | 210 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); |
@@ -281,3 +244,22 @@ void intel_panel_setup_backlight(struct drm_device *dev) | |||
281 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | 244 | dev_priv->backlight_level = intel_panel_get_backlight(dev); |
282 | dev_priv->backlight_enabled = dev_priv->backlight_level != 0; | 245 | dev_priv->backlight_enabled = dev_priv->backlight_level != 0; |
283 | } | 246 | } |
247 | |||
248 | enum drm_connector_status | ||
249 | intel_panel_detect(struct drm_device *dev) | ||
250 | { | ||
251 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
252 | |||
253 | if (i915_panel_ignore_lid) | ||
254 | return i915_panel_ignore_lid > 0 ? | ||
255 | connector_status_connected : | ||
256 | connector_status_disconnected; | ||
257 | |||
258 | /* Assume that the BIOS does not lie through the OpRegion... */ | ||
259 | if (dev_priv->opregion.lid_state) | ||
260 | return ioread32(dev_priv->opregion.lid_state) & 0x1 ? | ||
261 | connector_status_connected : | ||
262 | connector_status_disconnected; | ||
263 | |||
264 | return connector_status_unknown; | ||
265 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6218fa97aa1e..789c47801ba8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
62 | u32 flush_domains) | 62 | u32 flush_domains) |
63 | { | 63 | { |
64 | struct drm_device *dev = ring->dev; | 64 | struct drm_device *dev = ring->dev; |
65 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
66 | u32 cmd; | 65 | u32 cmd; |
67 | int ret; | 66 | int ret; |
68 | 67 | ||
69 | #if WATCH_EXEC | ||
70 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | ||
71 | invalidate_domains, flush_domains); | ||
72 | #endif | ||
73 | |||
74 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, | ||
75 | invalidate_domains, flush_domains); | ||
76 | |||
77 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 68 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { |
78 | /* | 69 | /* |
79 | * read/write caches: | 70 | * read/write caches: |
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
122 | (IS_G4X(dev) || IS_GEN5(dev))) | 113 | (IS_G4X(dev) || IS_GEN5(dev))) |
123 | cmd |= MI_INVALIDATE_ISP; | 114 | cmd |= MI_INVALIDATE_ISP; |
124 | 115 | ||
125 | #if WATCH_EXEC | ||
126 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
127 | #endif | ||
128 | ret = intel_ring_begin(ring, 2); | 116 | ret = intel_ring_begin(ring, 2); |
129 | if (ret) | 117 | if (ret) |
130 | return ret; | 118 | return ret; |
@@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
612 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 600 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
613 | intel_ring_advance(ring); | 601 | intel_ring_advance(ring); |
614 | 602 | ||
615 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
616 | *result = seqno; | 603 | *result = seqno; |
617 | return 0; | 604 | return 0; |
618 | } | 605 | } |
@@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
715 | u32 offset, u32 len) | 702 | u32 offset, u32 len) |
716 | { | 703 | { |
717 | struct drm_device *dev = ring->dev; | 704 | struct drm_device *dev = ring->dev; |
718 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
719 | int ret; | 705 | int ret; |
720 | 706 | ||
721 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | ||
722 | |||
723 | if (IS_I830(dev) || IS_845G(dev)) { | 707 | if (IS_I830(dev) || IS_845G(dev)) { |
724 | ret = intel_ring_begin(ring, 4); | 708 | ret = intel_ring_begin(ring, 4); |
725 | if (ret) | 709 | if (ret) |
@@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
894 | /* Disable the ring buffer. The ring must be idle at this point */ | 878 | /* Disable the ring buffer. The ring must be idle at this point */ |
895 | dev_priv = ring->dev->dev_private; | 879 | dev_priv = ring->dev->dev_private; |
896 | ret = intel_wait_ring_buffer(ring, ring->size - 8); | 880 | ret = intel_wait_ring_buffer(ring, ring->size - 8); |
881 | if (ret) | ||
882 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | ||
883 | ring->name, ret); | ||
884 | |||
897 | I915_WRITE_CTL(ring, 0); | 885 | I915_WRITE_CTL(ring, 0); |
898 | 886 | ||
899 | drm_core_ioremapfree(&ring->map, ring->dev); | 887 | drm_core_ioremapfree(&ring->map, ring->dev); |
@@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
950 | return 0; | 938 | return 0; |
951 | } | 939 | } |
952 | 940 | ||
953 | trace_i915_ring_wait_begin (dev); | 941 | trace_i915_ring_wait_begin(ring); |
954 | end = jiffies + 3 * HZ; | 942 | end = jiffies + 3 * HZ; |
955 | do { | 943 | do { |
956 | ring->head = I915_READ_HEAD(ring); | 944 | ring->head = I915_READ_HEAD(ring); |
957 | ring->space = ring_space(ring); | 945 | ring->space = ring_space(ring); |
958 | if (ring->space >= n) { | 946 | if (ring->space >= n) { |
959 | trace_i915_ring_wait_end(dev); | 947 | trace_i915_ring_wait_end(ring); |
960 | return 0; | 948 | return 0; |
961 | } | 949 | } |
962 | 950 | ||
@@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
970 | if (atomic_read(&dev_priv->mm.wedged)) | 958 | if (atomic_read(&dev_priv->mm.wedged)) |
971 | return -EAGAIN; | 959 | return -EAGAIN; |
972 | } while (!time_after(jiffies, end)); | 960 | } while (!time_after(jiffies, end)); |
973 | trace_i915_ring_wait_end (dev); | 961 | trace_i915_ring_wait_end(ring); |
974 | return -EBUSY; | 962 | return -EBUSY; |
975 | } | 963 | } |
976 | 964 | ||
977 | int intel_ring_begin(struct intel_ring_buffer *ring, | 965 | int intel_ring_begin(struct intel_ring_buffer *ring, |
978 | int num_dwords) | 966 | int num_dwords) |
979 | { | 967 | { |
968 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
980 | int n = 4*num_dwords; | 969 | int n = 4*num_dwords; |
981 | int ret; | 970 | int ret; |
982 | 971 | ||
972 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) | ||
973 | return -EIO; | ||
974 | |||
983 | if (unlikely(ring->tail + n > ring->effective_size)) { | 975 | if (unlikely(ring->tail + n > ring->effective_size)) { |
984 | ret = intel_wrap_ring_buffer(ring); | 976 | ret = intel_wrap_ring_buffer(ring); |
985 | if (unlikely(ret)) | 977 | if (unlikely(ret)) |
@@ -1059,22 +1051,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | |||
1059 | } | 1051 | } |
1060 | 1052 | ||
1061 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | 1053 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
1062 | u32 invalidate_domains, | 1054 | u32 invalidate, u32 flush) |
1063 | u32 flush_domains) | ||
1064 | { | 1055 | { |
1056 | uint32_t cmd; | ||
1065 | int ret; | 1057 | int ret; |
1066 | 1058 | ||
1067 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 1059 | if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0) |
1068 | return 0; | 1060 | return 0; |
1069 | 1061 | ||
1070 | ret = intel_ring_begin(ring, 4); | 1062 | ret = intel_ring_begin(ring, 4); |
1071 | if (ret) | 1063 | if (ret) |
1072 | return ret; | 1064 | return ret; |
1073 | 1065 | ||
1074 | intel_ring_emit(ring, MI_FLUSH_DW); | 1066 | cmd = MI_FLUSH_DW; |
1075 | intel_ring_emit(ring, 0); | 1067 | if (invalidate & I915_GEM_GPU_DOMAINS) |
1068 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; | ||
1069 | intel_ring_emit(ring, cmd); | ||
1076 | intel_ring_emit(ring, 0); | 1070 | intel_ring_emit(ring, 0); |
1077 | intel_ring_emit(ring, 0); | 1071 | intel_ring_emit(ring, 0); |
1072 | intel_ring_emit(ring, MI_NOOP); | ||
1078 | intel_ring_advance(ring); | 1073 | intel_ring_advance(ring); |
1079 | return 0; | 1074 | return 0; |
1080 | } | 1075 | } |
@@ -1230,22 +1225,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring, | |||
1230 | } | 1225 | } |
1231 | 1226 | ||
1232 | static int blt_ring_flush(struct intel_ring_buffer *ring, | 1227 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1233 | u32 invalidate_domains, | 1228 | u32 invalidate, u32 flush) |
1234 | u32 flush_domains) | ||
1235 | { | 1229 | { |
1230 | uint32_t cmd; | ||
1236 | int ret; | 1231 | int ret; |
1237 | 1232 | ||
1238 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) | 1233 | if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0) |
1239 | return 0; | 1234 | return 0; |
1240 | 1235 | ||
1241 | ret = blt_ring_begin(ring, 4); | 1236 | ret = blt_ring_begin(ring, 4); |
1242 | if (ret) | 1237 | if (ret) |
1243 | return ret; | 1238 | return ret; |
1244 | 1239 | ||
1245 | intel_ring_emit(ring, MI_FLUSH_DW); | 1240 | cmd = MI_FLUSH_DW; |
1246 | intel_ring_emit(ring, 0); | 1241 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
1242 | cmd |= MI_INVALIDATE_TLB; | ||
1243 | intel_ring_emit(ring, cmd); | ||
1247 | intel_ring_emit(ring, 0); | 1244 | intel_ring_emit(ring, 0); |
1248 | intel_ring_emit(ring, 0); | 1245 | intel_ring_emit(ring, 0); |
1246 | intel_ring_emit(ring, MI_NOOP); | ||
1249 | intel_ring_advance(ring); | 1247 | intel_ring_advance(ring); |
1250 | return 0; | 1248 | return 0; |
1251 | } | 1249 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde85a636..f23cc5f037a6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -14,22 +14,23 @@ struct intel_hw_status_page { | |||
14 | struct drm_i915_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 17 | #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) |
18 | #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) | ||
18 | 19 | ||
19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) | 20 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 21 | #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) |
21 | 22 | ||
22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) | 23 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 24 | #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) |
24 | 25 | ||
25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) | 26 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) |
26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 27 | #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) |
27 | 28 | ||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) | 29 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) |
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 30 | #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) |
30 | 31 | ||
31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) | 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) |
33 | #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) | ||
33 | 34 | ||
34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | 35 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) |
35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | 36 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) |
@@ -43,7 +44,7 @@ struct intel_ring_buffer { | |||
43 | RING_BLT = 0x4, | 44 | RING_BLT = 0x4, |
44 | } id; | 45 | } id; |
45 | u32 mmio_base; | 46 | u32 mmio_base; |
46 | void *virtual_start; | 47 | void __iomem *virtual_start; |
47 | struct drm_device *dev; | 48 | struct drm_device *dev; |
48 | struct drm_i915_gem_object *obj; | 49 | struct drm_i915_gem_object *obj; |
49 | 50 | ||
@@ -58,6 +59,7 @@ struct intel_ring_buffer { | |||
58 | u32 irq_refcount; | 59 | u32 irq_refcount; |
59 | u32 irq_mask; | 60 | u32 irq_mask; |
60 | u32 irq_seqno; /* last seq seem at irq time */ | 61 | u32 irq_seqno; /* last seq seem at irq time */ |
62 | u32 trace_irq_seqno; | ||
61 | u32 waiting_seqno; | 63 | u32 waiting_seqno; |
62 | u32 sync_seqno[I915_NUM_RINGS-1]; | 64 | u32 sync_seqno[I915_NUM_RINGS-1]; |
63 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); | 65 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
@@ -141,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring, | |||
141 | return ioread32(ring->status_page.page_addr + reg); | 143 | return ioread32(ring->status_page.page_addr + reg); |
142 | } | 144 | } |
143 | 145 | ||
146 | /** | ||
147 | * Reads a dword out of the status page, which is written to from the command | ||
148 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | ||
149 | * MI_STORE_DATA_IMM. | ||
150 | * | ||
151 | * The following dwords have a reserved meaning: | ||
152 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | ||
153 | * 0x04: ring 0 head pointer | ||
154 | * 0x05: ring 1 head pointer (915-class) | ||
155 | * 0x06: ring 2 head pointer (915-class) | ||
156 | * 0x10-0x1b: Context status DWords (GM45) | ||
157 | * 0x1f: Last written status offset. (GM45) | ||
158 | * | ||
159 | * The area from dword 0x20 to 0x3ff is available for driver usage. | ||
160 | */ | ||
161 | #define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) | ||
162 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | ||
163 | #define I915_GEM_HWS_INDEX 0x20 | ||
164 | #define I915_BREADCRUMB_INDEX 0x21 | ||
165 | |||
144 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 166 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
145 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); | 167 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
146 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 168 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
@@ -166,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); | |||
166 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | 188 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
167 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | 189 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
168 | 190 | ||
191 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | ||
192 | { | ||
193 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | ||
194 | ring->trace_irq_seqno = seqno; | ||
195 | } | ||
196 | |||
169 | /* DRI warts */ | 197 | /* DRI warts */ |
170 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | 198 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
171 | 199 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 318f398e6b2e..4324f33212d6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -46,6 +46,7 @@ | |||
46 | SDVO_TV_MASK) | 46 | SDVO_TV_MASK) |
47 | 47 | ||
48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) | 48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) |
49 | #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) | ||
49 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) | 50 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) |
50 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) | 51 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) |
51 | 52 | ||
@@ -92,6 +93,12 @@ struct intel_sdvo { | |||
92 | uint16_t attached_output; | 93 | uint16_t attached_output; |
93 | 94 | ||
94 | /** | 95 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | ||
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | ||
98 | */ | ||
99 | uint32_t color_range; | ||
100 | |||
101 | /** | ||
95 | * This is set if we're going to treat the device as TV-out. | 102 | * This is set if we're going to treat the device as TV-out. |
96 | * | 103 | * |
97 | * While we have these nice friendly flags for output types that ought | 104 | * While we have these nice friendly flags for output types that ought |
@@ -584,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i | |||
584 | { | 591 | { |
585 | struct intel_sdvo_get_trained_inputs_response response; | 592 | struct intel_sdvo_get_trained_inputs_response response; |
586 | 593 | ||
594 | BUILD_BUG_ON(sizeof(response) != 1); | ||
587 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, | 595 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, |
588 | &response, sizeof(response))) | 596 | &response, sizeof(response))) |
589 | return false; | 597 | return false; |
@@ -631,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo | |||
631 | { | 639 | { |
632 | struct intel_sdvo_pixel_clock_range clocks; | 640 | struct intel_sdvo_pixel_clock_range clocks; |
633 | 641 | ||
642 | BUILD_BUG_ON(sizeof(clocks) != 4); | ||
634 | if (!intel_sdvo_get_value(intel_sdvo, | 643 | if (!intel_sdvo_get_value(intel_sdvo, |
635 | SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | 644 | SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, |
636 | &clocks, sizeof(clocks))) | 645 | &clocks, sizeof(clocks))) |
@@ -698,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, | |||
698 | static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, | 707 | static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, |
699 | struct intel_sdvo_dtd *dtd) | 708 | struct intel_sdvo_dtd *dtd) |
700 | { | 709 | { |
710 | BUILD_BUG_ON(sizeof(dtd->part1) != 8); | ||
711 | BUILD_BUG_ON(sizeof(dtd->part2) != 8); | ||
701 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | 712 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, |
702 | &dtd->part1, sizeof(dtd->part1)) && | 713 | &dtd->part1, sizeof(dtd->part1)) && |
703 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | 714 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, |
@@ -795,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) | |||
795 | { | 806 | { |
796 | struct intel_sdvo_encode encode; | 807 | struct intel_sdvo_encode encode; |
797 | 808 | ||
809 | BUILD_BUG_ON(sizeof(encode) != 2); | ||
798 | return intel_sdvo_get_value(intel_sdvo, | 810 | return intel_sdvo_get_value(intel_sdvo, |
799 | SDVO_CMD_GET_SUPP_ENCODE, | 811 | SDVO_CMD_GET_SUPP_ENCODE, |
800 | &encode, sizeof(encode)); | 812 | &encode, sizeof(encode)); |
@@ -1050,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1050 | /* Set the SDVO control regs. */ | 1062 | /* Set the SDVO control regs. */ |
1051 | if (INTEL_INFO(dev)->gen >= 4) { | 1063 | if (INTEL_INFO(dev)->gen >= 4) { |
1052 | sdvox = 0; | 1064 | sdvox = 0; |
1065 | if (intel_sdvo->is_hdmi) | ||
1066 | sdvox |= intel_sdvo->color_range; | ||
1053 | if (INTEL_INFO(dev)->gen < 5) | 1067 | if (INTEL_INFO(dev)->gen < 5) |
1054 | sdvox |= SDVO_BORDER_ENABLE; | 1068 | sdvox |= SDVO_BORDER_ENABLE; |
1055 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 1069 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
@@ -1161,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1161 | 1175 | ||
1162 | static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) | 1176 | static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) |
1163 | { | 1177 | { |
1178 | BUILD_BUG_ON(sizeof(*caps) != 8); | ||
1164 | if (!intel_sdvo_get_value(intel_sdvo, | 1179 | if (!intel_sdvo_get_value(intel_sdvo, |
1165 | SDVO_CMD_GET_DEVICE_CAPS, | 1180 | SDVO_CMD_GET_DEVICE_CAPS, |
1166 | caps, sizeof(*caps))) | 1181 | caps, sizeof(*caps))) |
@@ -1267,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1267 | static bool | 1282 | static bool |
1268 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1283 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
1269 | { | 1284 | { |
1270 | int caps = 0; | 1285 | /* Is there more than one type of output? */ |
1271 | 1286 | int caps = intel_sdvo->caps.output_flags & 0xf; | |
1272 | if (intel_sdvo->caps.output_flags & | 1287 | return caps & -caps; |
1273 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | ||
1274 | caps++; | ||
1275 | if (intel_sdvo->caps.output_flags & | ||
1276 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) | ||
1277 | caps++; | ||
1278 | if (intel_sdvo->caps.output_flags & | ||
1279 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) | ||
1280 | caps++; | ||
1281 | if (intel_sdvo->caps.output_flags & | ||
1282 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) | ||
1283 | caps++; | ||
1284 | if (intel_sdvo->caps.output_flags & | ||
1285 | (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) | ||
1286 | caps++; | ||
1287 | |||
1288 | if (intel_sdvo->caps.output_flags & | ||
1289 | (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) | ||
1290 | caps++; | ||
1291 | |||
1292 | if (intel_sdvo->caps.output_flags & | ||
1293 | (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) | ||
1294 | caps++; | ||
1295 | |||
1296 | return (caps > 1); | ||
1297 | } | 1288 | } |
1298 | 1289 | ||
1299 | static struct edid * | 1290 | static struct edid * |
@@ -1359,7 +1350,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1359 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); | 1350 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); |
1360 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); | 1351 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); |
1361 | } | 1352 | } |
1362 | } | 1353 | } else |
1354 | status = connector_status_disconnected; | ||
1363 | connector->display_info.raw_edid = NULL; | 1355 | connector->display_info.raw_edid = NULL; |
1364 | kfree(edid); | 1356 | kfree(edid); |
1365 | } | 1357 | } |
@@ -1407,10 +1399,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1407 | 1399 | ||
1408 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1400 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1409 | ret = connector_status_disconnected; | 1401 | ret = connector_status_disconnected; |
1410 | else if (response & SDVO_TMDS_MASK) | 1402 | else if (IS_TMDS(intel_sdvo_connector)) |
1411 | ret = intel_sdvo_hdmi_sink_detect(connector); | 1403 | ret = intel_sdvo_hdmi_sink_detect(connector); |
1412 | else | 1404 | else { |
1413 | ret = connector_status_connected; | 1405 | struct edid *edid; |
1406 | |||
1407 | /* if we have an edid check it matches the connection */ | ||
1408 | edid = intel_sdvo_get_edid(connector); | ||
1409 | if (edid == NULL) | ||
1410 | edid = intel_sdvo_get_analog_edid(connector); | ||
1411 | if (edid != NULL) { | ||
1412 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1413 | ret = connector_status_disconnected; | ||
1414 | else | ||
1415 | ret = connector_status_connected; | ||
1416 | connector->display_info.raw_edid = NULL; | ||
1417 | kfree(edid); | ||
1418 | } else | ||
1419 | ret = connector_status_connected; | ||
1420 | } | ||
1414 | 1421 | ||
1415 | /* May update encoder flag for like clock for SDVO TV, etc.*/ | 1422 | /* May update encoder flag for like clock for SDVO TV, etc.*/ |
1416 | if (ret == connector_status_connected) { | 1423 | if (ret == connector_status_connected) { |
@@ -1446,10 +1453,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1446 | edid = intel_sdvo_get_analog_edid(connector); | 1453 | edid = intel_sdvo_get_analog_edid(connector); |
1447 | 1454 | ||
1448 | if (edid != NULL) { | 1455 | if (edid != NULL) { |
1449 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 1456 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1457 | bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); | ||
1458 | bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector); | ||
1459 | |||
1460 | if (connector_is_digital == monitor_is_digital) { | ||
1450 | drm_mode_connector_update_edid_property(connector, edid); | 1461 | drm_mode_connector_update_edid_property(connector, edid); |
1451 | drm_add_edid_modes(connector, edid); | 1462 | drm_add_edid_modes(connector, edid); |
1452 | } | 1463 | } |
1464 | |||
1453 | connector->display_info.raw_edid = NULL; | 1465 | connector->display_info.raw_edid = NULL; |
1454 | kfree(edid); | 1466 | kfree(edid); |
1455 | } | 1467 | } |
@@ -1668,6 +1680,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector) | |||
1668 | kfree(connector); | 1680 | kfree(connector); |
1669 | } | 1681 | } |
1670 | 1682 | ||
1683 | static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) | ||
1684 | { | ||
1685 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | ||
1686 | struct edid *edid; | ||
1687 | bool has_audio = false; | ||
1688 | |||
1689 | if (!intel_sdvo->is_hdmi) | ||
1690 | return false; | ||
1691 | |||
1692 | edid = intel_sdvo_get_edid(connector); | ||
1693 | if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1694 | has_audio = drm_detect_monitor_audio(edid); | ||
1695 | |||
1696 | return has_audio; | ||
1697 | } | ||
1698 | |||
1671 | static int | 1699 | static int |
1672 | intel_sdvo_set_property(struct drm_connector *connector, | 1700 | intel_sdvo_set_property(struct drm_connector *connector, |
1673 | struct drm_property *property, | 1701 | struct drm_property *property, |
@@ -1675,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1675 | { | 1703 | { |
1676 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | 1704 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1677 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1705 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1706 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | ||
1678 | uint16_t temp_value; | 1707 | uint16_t temp_value; |
1679 | uint8_t cmd; | 1708 | uint8_t cmd; |
1680 | int ret; | 1709 | int ret; |
@@ -1684,17 +1713,31 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1684 | return ret; | 1713 | return ret; |
1685 | 1714 | ||
1686 | if (property == intel_sdvo_connector->force_audio_property) { | 1715 | if (property == intel_sdvo_connector->force_audio_property) { |
1687 | if (val == intel_sdvo_connector->force_audio) | 1716 | int i = val; |
1717 | bool has_audio; | ||
1718 | |||
1719 | if (i == intel_sdvo_connector->force_audio) | ||
1688 | return 0; | 1720 | return 0; |
1689 | 1721 | ||
1690 | intel_sdvo_connector->force_audio = val; | 1722 | intel_sdvo_connector->force_audio = i; |
1723 | |||
1724 | if (i == 0) | ||
1725 | has_audio = intel_sdvo_detect_hdmi_audio(connector); | ||
1726 | else | ||
1727 | has_audio = i > 0; | ||
1691 | 1728 | ||
1692 | if (val > 0 && intel_sdvo->has_hdmi_audio) | 1729 | if (has_audio == intel_sdvo->has_hdmi_audio) |
1693 | return 0; | 1730 | return 0; |
1694 | if (val < 0 && !intel_sdvo->has_hdmi_audio) | 1731 | |
1732 | intel_sdvo->has_hdmi_audio = has_audio; | ||
1733 | goto done; | ||
1734 | } | ||
1735 | |||
1736 | if (property == dev_priv->broadcast_rgb_property) { | ||
1737 | if (val == !!intel_sdvo->color_range) | ||
1695 | return 0; | 1738 | return 0; |
1696 | 1739 | ||
1697 | intel_sdvo->has_hdmi_audio = val > 0; | 1740 | intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; |
1698 | goto done; | 1741 | goto done; |
1699 | } | 1742 | } |
1700 | 1743 | ||
@@ -2002,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) | |||
2002 | drm_connector_attach_property(&connector->base.base, | 2045 | drm_connector_attach_property(&connector->base.base, |
2003 | connector->force_audio_property, 0); | 2046 | connector->force_audio_property, 0); |
2004 | } | 2047 | } |
2048 | |||
2049 | if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) | ||
2050 | intel_attach_broadcast_rgb_property(&connector->base.base); | ||
2005 | } | 2051 | } |
2006 | 2052 | ||
2007 | static bool | 2053 | static bool |
@@ -2224,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, | |||
2224 | if (!intel_sdvo_set_target_output(intel_sdvo, type)) | 2270 | if (!intel_sdvo_set_target_output(intel_sdvo, type)) |
2225 | return false; | 2271 | return false; |
2226 | 2272 | ||
2273 | BUILD_BUG_ON(sizeof(format) != 6); | ||
2227 | if (!intel_sdvo_get_value(intel_sdvo, | 2274 | if (!intel_sdvo_get_value(intel_sdvo, |
2228 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, | 2275 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, |
2229 | &format, sizeof(format))) | 2276 | &format, sizeof(format))) |
@@ -2430,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | |||
2430 | uint16_t response; | 2477 | uint16_t response; |
2431 | } enhancements; | 2478 | } enhancements; |
2432 | 2479 | ||
2480 | BUILD_BUG_ON(sizeof(enhancements) != 2); | ||
2481 | |||
2433 | enhancements.response = 0; | 2482 | enhancements.response = 0; |
2434 | intel_sdvo_get_value(intel_sdvo, | 2483 | intel_sdvo_get_value(intel_sdvo, |
2435 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2484 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 93206e4eaa6f..4256b8ef3947 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1006 | const struct video_levels *video_levels; | 1006 | const struct video_levels *video_levels; |
1007 | const struct color_conversion *color_conversion; | 1007 | const struct color_conversion *color_conversion; |
1008 | bool burst_ena; | 1008 | bool burst_ena; |
1009 | int pipe = intel_crtc->pipe; | ||
1009 | 1010 | ||
1010 | if (!tv_mode) | 1011 | if (!tv_mode) |
1011 | return; /* can't happen (mode_prepare prevents this) */ | 1012 | return; /* can't happen (mode_prepare prevents this) */ |
@@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1149 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | | 1150 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | |
1150 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); | 1151 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); |
1151 | { | 1152 | { |
1152 | int pipeconf_reg = (intel_crtc->pipe == 0) ? | 1153 | int pipeconf_reg = PIPECONF(pipe); |
1153 | PIPEACONF : PIPEBCONF; | 1154 | int dspcntr_reg = DSPCNTR(pipe); |
1154 | int dspcntr_reg = (intel_crtc->plane == 0) ? | ||
1155 | DSPACNTR : DSPBCNTR; | ||
1156 | int pipeconf = I915_READ(pipeconf_reg); | 1155 | int pipeconf = I915_READ(pipeconf_reg); |
1157 | int dspcntr = I915_READ(dspcntr_reg); | 1156 | int dspcntr = I915_READ(dspcntr_reg); |
1158 | int dspbase_reg = (intel_crtc->plane == 0) ? | 1157 | int dspbase_reg = DSPADDR(pipe); |
1159 | DSPAADDR : DSPBADDR; | ||
1160 | int xpos = 0x0, ypos = 0x0; | 1158 | int xpos = 0x0, ypos = 0x0; |
1161 | unsigned int xsize, ysize; | 1159 | unsigned int xsize, ysize; |
1162 | /* Pipe must be off here */ | 1160 | /* Pipe must be off here */ |
@@ -1234,7 +1232,8 @@ static const struct drm_display_mode reported_modes[] = { | |||
1234 | * \return false if TV is disconnected. | 1232 | * \return false if TV is disconnected. |
1235 | */ | 1233 | */ |
1236 | static int | 1234 | static int |
1237 | intel_tv_detect_type (struct intel_tv *intel_tv) | 1235 | intel_tv_detect_type (struct intel_tv *intel_tv, |
1236 | struct drm_connector *connector) | ||
1238 | { | 1237 | { |
1239 | struct drm_encoder *encoder = &intel_tv->base.base; | 1238 | struct drm_encoder *encoder = &intel_tv->base.base; |
1240 | struct drm_device *dev = encoder->dev; | 1239 | struct drm_device *dev = encoder->dev; |
@@ -1245,11 +1244,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1245 | int type; | 1244 | int type; |
1246 | 1245 | ||
1247 | /* Disable TV interrupts around load detect or we'll recurse */ | 1246 | /* Disable TV interrupts around load detect or we'll recurse */ |
1248 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1247 | if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |
1249 | i915_disable_pipestat(dev_priv, 0, | 1248 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1250 | PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1249 | i915_disable_pipestat(dev_priv, 0, |
1251 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1250 | PIPE_HOTPLUG_INTERRUPT_ENABLE | |
1252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1251 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); |
1252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1253 | } | ||
1253 | 1254 | ||
1254 | save_tv_dac = tv_dac = I915_READ(TV_DAC); | 1255 | save_tv_dac = tv_dac = I915_READ(TV_DAC); |
1255 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); | 1256 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); |
@@ -1302,11 +1303,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1302 | I915_WRITE(TV_CTL, save_tv_ctl); | 1303 | I915_WRITE(TV_CTL, save_tv_ctl); |
1303 | 1304 | ||
1304 | /* Restore interrupt config */ | 1305 | /* Restore interrupt config */ |
1305 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1306 | if (connector->polled & DRM_CONNECTOR_POLL_HPD) { |
1306 | i915_enable_pipestat(dev_priv, 0, | 1307 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1307 | PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1308 | i915_enable_pipestat(dev_priv, 0, |
1308 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1309 | PIPE_HOTPLUG_INTERRUPT_ENABLE | |
1309 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1310 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); |
1311 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1312 | } | ||
1310 | 1313 | ||
1311 | return type; | 1314 | return type; |
1312 | } | 1315 | } |
@@ -1356,7 +1359,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1356 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1359 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1357 | 1360 | ||
1358 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { | 1361 | if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { |
1359 | type = intel_tv_detect_type(intel_tv); | 1362 | type = intel_tv_detect_type(intel_tv, connector); |
1360 | } else if (force) { | 1363 | } else if (force) { |
1361 | struct drm_crtc *crtc; | 1364 | struct drm_crtc *crtc; |
1362 | int dpms_mode; | 1365 | int dpms_mode; |
@@ -1364,7 +1367,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1364 | crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, | 1367 | crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, |
1365 | &mode, &dpms_mode); | 1368 | &mode, &dpms_mode); |
1366 | if (crtc) { | 1369 | if (crtc) { |
1367 | type = intel_tv_detect_type(intel_tv); | 1370 | type = intel_tv_detect_type(intel_tv, connector); |
1368 | intel_release_load_detect_pipe(&intel_tv->base, connector, | 1371 | intel_release_load_detect_pipe(&intel_tv->base, connector, |
1369 | dpms_mode); | 1372 | dpms_mode); |
1370 | } else | 1373 | } else |
@@ -1658,6 +1661,18 @@ intel_tv_init(struct drm_device *dev) | |||
1658 | intel_encoder = &intel_tv->base; | 1661 | intel_encoder = &intel_tv->base; |
1659 | connector = &intel_connector->base; | 1662 | connector = &intel_connector->base; |
1660 | 1663 | ||
1664 | /* The documentation, for the older chipsets at least, recommend | ||
1665 | * using a polling method rather than hotplug detection for TVs. | ||
1666 | * This is because in order to perform the hotplug detection, the PLLs | ||
1667 | * for the TV must be kept alive increasing power drain and starving | ||
1668 | * bandwidth from other encoders. Notably for instance, it causes | ||
1669 | * pipe underruns on Crestline when this encoder is supposedly idle. | ||
1670 | * | ||
1671 | * More recent chipsets favour HDMI rather than integrated S-Video. | ||
1672 | */ | ||
1673 | connector->polled = | ||
1674 | DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
1675 | |||
1661 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1676 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1662 | DRM_MODE_CONNECTOR_SVIDEO); | 1677 | DRM_MODE_CONNECTOR_SVIDEO); |
1663 | 1678 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 3fcffcf75e35..2ad49cbf7c8b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | nouveau_vm_put(&nvbo->vma); | 52 | if (nvbo->vma.node) { |
53 | nouveau_vm_unmap(&nvbo->vma); | ||
54 | nouveau_vm_put(&nvbo->vma); | ||
55 | } | ||
53 | kfree(nvbo); | 56 | kfree(nvbo); |
54 | } | 57 | } |
55 | 58 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index d56f08d3cbdc..a2199fe9fa9b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, | |||
48 | 48 | ||
49 | switch (radeon_crtc->rmx_type) { | 49 | switch (radeon_crtc->rmx_type) { |
50 | case RMX_CENTER: | 50 | case RMX_CENTER: |
51 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | 51 | args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); |
52 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | 52 | args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); |
53 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | 53 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); |
54 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | 54 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); |
55 | break; | 55 | break; |
56 | case RMX_ASPECT: | 56 | case RMX_ASPECT: |
57 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | 57 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; |
58 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | 58 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; |
59 | 59 | ||
60 | if (a1 > a2) { | 60 | if (a1 > a2) { |
61 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | 61 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); |
62 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | 62 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); |
63 | } else if (a2 > a1) { | 63 | } else if (a2 > a1) { |
64 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | 64 | args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); |
65 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | 65 | args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); |
66 | } | 66 | } |
67 | break; | 67 | break; |
68 | case RMX_FULL: | 68 | case RMX_FULL: |
69 | default: | 69 | default: |
70 | args.usOverscanRight = radeon_crtc->h_border; | 70 | args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); |
71 | args.usOverscanLeft = radeon_crtc->h_border; | 71 | args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); |
72 | args.usOverscanBottom = radeon_crtc->v_border; | 72 | args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); |
73 | args.usOverscanTop = radeon_crtc->v_border; | 73 | args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); |
74 | break; | 74 | break; |
75 | } | 75 | } |
76 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 76 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
419 | memset(&args, 0, sizeof(args)); | 419 | memset(&args, 0, sizeof(args)); |
420 | 420 | ||
421 | if (ASIC_IS_DCE5(rdev)) { | 421 | if (ASIC_IS_DCE5(rdev)) { |
422 | args.v3.usSpreadSpectrumAmountFrac = 0; | 422 | args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); |
423 | args.v3.ucSpreadSpectrumType = ss->type; | 423 | args.v3.ucSpreadSpectrumType = ss->type; |
424 | switch (pll_id) { | 424 | switch (pll_id) { |
425 | case ATOM_PPLL1: | 425 | case ATOM_PPLL1: |
426 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; | 426 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; |
427 | args.v3.usSpreadSpectrumAmount = ss->amount; | 427 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
428 | args.v3.usSpreadSpectrumStep = ss->step; | 428 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
429 | break; | 429 | break; |
430 | case ATOM_PPLL2: | 430 | case ATOM_PPLL2: |
431 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; | 431 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; |
432 | args.v3.usSpreadSpectrumAmount = ss->amount; | 432 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
433 | args.v3.usSpreadSpectrumStep = ss->step; | 433 | args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
434 | break; | 434 | break; |
435 | case ATOM_DCPLL: | 435 | case ATOM_DCPLL: |
436 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; | 436 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; |
437 | args.v3.usSpreadSpectrumAmount = 0; | 437 | args.v3.usSpreadSpectrumAmount = cpu_to_le16(0); |
438 | args.v3.usSpreadSpectrumStep = 0; | 438 | args.v3.usSpreadSpectrumStep = cpu_to_le16(0); |
439 | break; | 439 | break; |
440 | case ATOM_PPLL_INVALID: | 440 | case ATOM_PPLL_INVALID: |
441 | return; | 441 | return; |
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
447 | switch (pll_id) { | 447 | switch (pll_id) { |
448 | case ATOM_PPLL1: | 448 | case ATOM_PPLL1: |
449 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; | 449 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; |
450 | args.v2.usSpreadSpectrumAmount = ss->amount; | 450 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
451 | args.v2.usSpreadSpectrumStep = ss->step; | 451 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
452 | break; | 452 | break; |
453 | case ATOM_PPLL2: | 453 | case ATOM_PPLL2: |
454 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; | 454 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; |
455 | args.v2.usSpreadSpectrumAmount = ss->amount; | 455 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); |
456 | args.v2.usSpreadSpectrumStep = ss->step; | 456 | args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); |
457 | break; | 457 | break; |
458 | case ATOM_DCPLL: | 458 | case ATOM_DCPLL: |
459 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; | 459 | args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; |
460 | args.v2.usSpreadSpectrumAmount = 0; | 460 | args.v2.usSpreadSpectrumAmount = cpu_to_le16(0); |
461 | args.v2.usSpreadSpectrumStep = 0; | 461 | args.v2.usSpreadSpectrumStep = cpu_to_le16(0); |
462 | break; | 462 | break; |
463 | case ATOM_PPLL_INVALID: | 463 | case ATOM_PPLL_INVALID: |
464 | return; | 464 | return; |
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
538 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 538 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
539 | else | 539 | else |
540 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 540 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
541 | |||
542 | } | 541 | } |
543 | 542 | ||
544 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 543 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
@@ -555,29 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
555 | dp_clock = dig_connector->dp_clock; | 554 | dp_clock = dig_connector->dp_clock; |
556 | } | 555 | } |
557 | } | 556 | } |
558 | /* this might work properly with the new pll algo */ | 557 | |
559 | #if 0 /* doesn't work properly on some laptops */ | ||
560 | /* use recommended ref_div for ss */ | 558 | /* use recommended ref_div for ss */ |
561 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
562 | if (ss_enabled) { | 560 | if (ss_enabled) { |
563 | if (ss->refdiv) { | 561 | if (ss->refdiv) { |
562 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
564 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 563 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
565 | pll->reference_div = ss->refdiv; | 564 | pll->reference_div = ss->refdiv; |
565 | if (ASIC_IS_AVIVO(rdev)) | ||
566 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
566 | } | 567 | } |
567 | } | 568 | } |
568 | } | 569 | } |
569 | #endif | 570 | |
570 | if (ASIC_IS_AVIVO(rdev)) { | 571 | if (ASIC_IS_AVIVO(rdev)) { |
571 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 572 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
572 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 573 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
573 | adjusted_clock = mode->clock * 2; | 574 | adjusted_clock = mode->clock * 2; |
574 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | 575 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
575 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 576 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
576 | /* rv515 needs more testing with this option */ | 577 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
577 | if (rdev->family != CHIP_RV515) { | 578 | pll->flags |= RADEON_PLL_IS_LCD; |
578 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
579 | pll->flags |= RADEON_PLL_IS_LCD; | ||
580 | } | ||
581 | } else { | 579 | } else { |
582 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 580 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
583 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 581 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -664,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
664 | index, (uint32_t *)&args); | 662 | index, (uint32_t *)&args); |
665 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | 663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; |
666 | if (args.v3.sOutput.ucRefDiv) { | 664 | if (args.v3.sOutput.ucRefDiv) { |
665 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
667 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 666 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
668 | pll->reference_div = args.v3.sOutput.ucRefDiv; | 667 | pll->reference_div = args.v3.sOutput.ucRefDiv; |
669 | } | 668 | } |
670 | if (args.v3.sOutput.ucPostDiv) { | 669 | if (args.v3.sOutput.ucPostDiv) { |
670 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
671 | pll->flags |= RADEON_PLL_USE_POST_DIV; | 671 | pll->flags |= RADEON_PLL_USE_POST_DIV; |
672 | pll->post_div = args.v3.sOutput.ucPostDiv; | 672 | pll->post_div = args.v3.sOutput.ucPostDiv; |
673 | } | 673 | } |
@@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, | |||
721 | * SetPixelClock provides the dividers | 721 | * SetPixelClock provides the dividers |
722 | */ | 722 | */ |
723 | args.v5.ucCRTC = ATOM_CRTC_INVALID; | 723 | args.v5.ucCRTC = ATOM_CRTC_INVALID; |
724 | args.v5.usPixelClock = dispclk; | 724 | args.v5.usPixelClock = cpu_to_le16(dispclk); |
725 | args.v5.ucPpll = ATOM_DCPLL; | 725 | args.v5.ucPpll = ATOM_DCPLL; |
726 | break; | 726 | break; |
727 | case 6: | 727 | case 6: |
728 | /* if the default dcpll clock is specified, | 728 | /* if the default dcpll clock is specified, |
729 | * SetPixelClock provides the dividers | 729 | * SetPixelClock provides the dividers |
730 | */ | 730 | */ |
731 | args.v6.ulDispEngClkFreq = dispclk; | 731 | args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); |
732 | args.v6.ucPpll = ATOM_DCPLL; | 732 | args.v6.ucPpll = ATOM_DCPLL; |
733 | break; | 733 | break; |
734 | default: | 734 | default: |
@@ -957,11 +957,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
957 | /* adjust pixel clock as needed */ | 957 | /* adjust pixel clock as needed */ |
958 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); | 958 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); |
959 | 959 | ||
960 | /* rv515 seems happier with the old algo */ | 960 | if (ASIC_IS_AVIVO(rdev)) |
961 | if (rdev->family == CHIP_RV515) | ||
962 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
963 | &ref_div, &post_div); | ||
964 | else if (ASIC_IS_AVIVO(rdev)) | ||
965 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 961 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
966 | &ref_div, &post_div); | 962 | &ref_div, &post_div); |
967 | else | 963 | else |
@@ -995,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
995 | } | 991 | } |
996 | } | 992 | } |
997 | 993 | ||
998 | static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | 994 | static int dce4_crtc_do_set_base(struct drm_crtc *crtc, |
999 | struct drm_framebuffer *fb, | 995 | struct drm_framebuffer *fb, |
1000 | int x, int y, int atomic) | 996 | int x, int y, int atomic) |
1001 | { | 997 | { |
1002 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 998 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
1003 | struct drm_device *dev = crtc->dev; | 999 | struct drm_device *dev = crtc->dev; |
@@ -1137,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1137 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1133 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1138 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); | 1134 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); |
1139 | 1135 | ||
1140 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) | ||
1141 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1142 | EVERGREEN_INTERLEAVE_EN); | ||
1143 | else | ||
1144 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1145 | |||
1146 | if (!atomic && fb && fb != crtc->fb) { | 1136 | if (!atomic && fb && fb != crtc->fb) { |
1147 | radeon_fb = to_radeon_framebuffer(fb); | 1137 | radeon_fb = to_radeon_framebuffer(fb); |
1148 | rbo = gem_to_radeon_bo(radeon_fb->obj); | 1138 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
@@ -1300,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1300 | WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1290 | WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1301 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); | 1291 | (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); |
1302 | 1292 | ||
1303 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) | ||
1304 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1305 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1306 | else | ||
1307 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1308 | |||
1309 | if (!atomic && fb && fb != crtc->fb) { | 1293 | if (!atomic && fb && fb != crtc->fb) { |
1310 | radeon_fb = to_radeon_framebuffer(fb); | 1294 | radeon_fb = to_radeon_framebuffer(fb); |
1311 | rbo = gem_to_radeon_bo(radeon_fb->obj); | 1295 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
@@ -1329,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
1329 | struct radeon_device *rdev = dev->dev_private; | 1313 | struct radeon_device *rdev = dev->dev_private; |
1330 | 1314 | ||
1331 | if (ASIC_IS_DCE4(rdev)) | 1315 | if (ASIC_IS_DCE4(rdev)) |
1332 | return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); | 1316 | return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1333 | else if (ASIC_IS_AVIVO(rdev)) | 1317 | else if (ASIC_IS_AVIVO(rdev)) |
1334 | return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); | 1318 | return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); |
1335 | else | 1319 | else |
@@ -1344,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, | |||
1344 | struct radeon_device *rdev = dev->dev_private; | 1328 | struct radeon_device *rdev = dev->dev_private; |
1345 | 1329 | ||
1346 | if (ASIC_IS_DCE4(rdev)) | 1330 | if (ASIC_IS_DCE4(rdev)) |
1347 | return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); | 1331 | return dce4_crtc_do_set_base(crtc, fb, x, y, 1); |
1348 | else if (ASIC_IS_AVIVO(rdev)) | 1332 | else if (ASIC_IS_AVIVO(rdev)) |
1349 | return avivo_crtc_do_set_base(crtc, fb, x, y, 1); | 1333 | return avivo_crtc_do_set_base(crtc, fb, x, y, 1); |
1350 | else | 1334 | else |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d4045223d0ff..789441ed9837 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1192,7 +1192,11 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
1192 | radeon_ring_write(rdev, 1); | 1192 | radeon_ring_write(rdev, 1); |
1193 | /* FIXME: implement */ | 1193 | /* FIXME: implement */ |
1194 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 1194 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
1195 | radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); | 1195 | radeon_ring_write(rdev, |
1196 | #ifdef __BIG_ENDIAN | ||
1197 | (2 << 0) | | ||
1198 | #endif | ||
1199 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
1196 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | 1200 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); |
1197 | radeon_ring_write(rdev, ib->length_dw); | 1201 | radeon_ring_write(rdev, ib->length_dw); |
1198 | } | 1202 | } |
@@ -1207,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
1207 | return -EINVAL; | 1211 | return -EINVAL; |
1208 | 1212 | ||
1209 | r700_cp_stop(rdev); | 1213 | r700_cp_stop(rdev); |
1210 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | 1214 | WREG32(CP_RB_CNTL, |
1215 | #ifdef __BIG_ENDIAN | ||
1216 | BUF_SWAP_32BIT | | ||
1217 | #endif | ||
1218 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
1211 | 1219 | ||
1212 | fw_data = (const __be32 *)rdev->pfp_fw->data; | 1220 | fw_data = (const __be32 *)rdev->pfp_fw->data; |
1213 | WREG32(CP_PFP_UCODE_ADDR, 0); | 1221 | WREG32(CP_PFP_UCODE_ADDR, 0); |
@@ -1326,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1326 | WREG32(CP_RB_WPTR, 0); | 1334 | WREG32(CP_RB_WPTR, 0); |
1327 | 1335 | ||
1328 | /* set the wb address wether it's enabled or not */ | 1336 | /* set the wb address wether it's enabled or not */ |
1329 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1337 | WREG32(CP_RB_RPTR_ADDR, |
1338 | #ifdef __BIG_ENDIAN | ||
1339 | RB_RPTR_SWAP(2) | | ||
1340 | #endif | ||
1341 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | ||
1330 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 1342 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
1331 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | 1343 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
1332 | 1344 | ||
@@ -2627,8 +2639,8 @@ restart_ih: | |||
2627 | while (rptr != wptr) { | 2639 | while (rptr != wptr) { |
2628 | /* wptr/rptr are in bytes! */ | 2640 | /* wptr/rptr are in bytes! */ |
2629 | ring_index = rptr / 4; | 2641 | ring_index = rptr / 4; |
2630 | src_id = rdev->ih.ring[ring_index] & 0xff; | 2642 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
2631 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | 2643 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
2632 | 2644 | ||
2633 | switch (src_id) { | 2645 | switch (src_id) { |
2634 | case 1: /* D1 vblank/vline */ | 2646 | case 1: /* D1 vblank/vline */ |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2ed930e02f3a..3218287f4c51 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format, | |||
55 | if (h < 8) | 55 | if (h < 8) |
56 | h = 8; | 56 | h = 8; |
57 | 57 | ||
58 | cb_color_info = ((format << 2) | (1 << 24)); | 58 | cb_color_info = ((format << 2) | (1 << 24) | (1 << 8)); |
59 | pitch = (w / 8) - 1; | 59 | pitch = (w / 8) - 1; |
60 | slice = ((w * h) / 64) - 1; | 60 | slice = ((w * h) / 64) - 1; |
61 | 61 | ||
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
133 | 133 | ||
134 | /* high addr, stride */ | 134 | /* high addr, stride */ |
135 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 135 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); |
136 | #ifdef __BIG_ENDIAN | ||
137 | sq_vtx_constant_word2 |= (2 << 30); | ||
138 | #endif | ||
136 | /* xyzw swizzles */ | 139 | /* xyzw swizzles */ |
137 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); | 140 | sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); |
138 | 141 | ||
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
173 | sq_tex_resource_word0 = (1 << 0); /* 2D */ | 176 | sq_tex_resource_word0 = (1 << 0); /* 2D */ |
174 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | | 177 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | |
175 | ((w - 1) << 18)); | 178 | ((w - 1) << 18)); |
176 | sq_tex_resource_word1 = ((h - 1) << 0); | 179 | sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28); |
177 | /* xyzw swizzles */ | 180 | /* xyzw swizzles */ |
178 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); | 181 | sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); |
179 | 182 | ||
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev) | |||
221 | radeon_ring_write(rdev, DI_PT_RECTLIST); | 224 | radeon_ring_write(rdev, DI_PT_RECTLIST); |
222 | 225 | ||
223 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | 226 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); |
224 | radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); | 227 | radeon_ring_write(rdev, |
228 | #ifdef __BIG_ENDIAN | ||
229 | (2 << 2) | | ||
230 | #endif | ||
231 | DI_INDEX_SIZE_16_BIT); | ||
225 | 232 | ||
226 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 233 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
227 | radeon_ring_write(rdev, 1); | 234 | radeon_ring_write(rdev, 1); |
@@ -541,7 +548,7 @@ static inline uint32_t i2f(uint32_t input) | |||
541 | int evergreen_blit_init(struct radeon_device *rdev) | 548 | int evergreen_blit_init(struct radeon_device *rdev) |
542 | { | 549 | { |
543 | u32 obj_size; | 550 | u32 obj_size; |
544 | int r, dwords; | 551 | int i, r, dwords; |
545 | void *ptr; | 552 | void *ptr; |
546 | u32 packet2s[16]; | 553 | u32 packet2s[16]; |
547 | int num_packet2s = 0; | 554 | int num_packet2s = 0; |
@@ -557,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
557 | 564 | ||
558 | dwords = rdev->r600_blit.state_len; | 565 | dwords = rdev->r600_blit.state_len; |
559 | while (dwords & 0xf) { | 566 | while (dwords & 0xf) { |
560 | packet2s[num_packet2s++] = PACKET2(0); | 567 | packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); |
561 | dwords++; | 568 | dwords++; |
562 | } | 569 | } |
563 | 570 | ||
@@ -598,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
598 | if (num_packet2s) | 605 | if (num_packet2s) |
599 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | 606 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), |
600 | packet2s, num_packet2s * 4); | 607 | packet2s, num_packet2s * 4); |
601 | memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); | 608 | for (i = 0; i < evergreen_vs_size; i++) |
602 | memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); | 609 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); |
610 | for (i = 0; i < evergreen_ps_size; i++) | ||
611 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); | ||
603 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 612 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
604 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 613 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
605 | 614 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c index ef1d28c07fbf..3a10399e0066 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c | |||
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] = | |||
311 | 0x00000000, | 311 | 0x00000000, |
312 | 0x3c000000, | 312 | 0x3c000000, |
313 | 0x67961001, | 313 | 0x67961001, |
314 | #ifdef __BIG_ENDIAN | ||
315 | 0x000a0000, | ||
316 | #else | ||
314 | 0x00080000, | 317 | 0x00080000, |
318 | #endif | ||
315 | 0x00000000, | 319 | 0x00000000, |
316 | 0x1c000000, | 320 | 0x1c000000, |
317 | 0x67961000, | 321 | 0x67961000, |
322 | #ifdef __BIG_ENDIAN | ||
323 | 0x00020008, | ||
324 | #else | ||
318 | 0x00000008, | 325 | 0x00000008, |
326 | #endif | ||
319 | 0x00000000, | 327 | 0x00000000, |
320 | }; | 328 | }; |
321 | 329 | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 21e839bd20e7..9aaa3f0c9372 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -98,6 +98,7 @@ | |||
98 | #define BUF_SWAP_32BIT (2 << 16) | 98 | #define BUF_SWAP_32BIT (2 << 16) |
99 | #define CP_RB_RPTR 0x8700 | 99 | #define CP_RB_RPTR 0x8700 |
100 | #define CP_RB_RPTR_ADDR 0xC10C | 100 | #define CP_RB_RPTR_ADDR 0xC10C |
101 | #define RB_RPTR_SWAP(x) ((x) << 0) | ||
101 | #define CP_RB_RPTR_ADDR_HI 0xC110 | 102 | #define CP_RB_RPTR_ADDR_HI 0xC110 |
102 | #define CP_RB_RPTR_WR 0xC108 | 103 | #define CP_RB_RPTR_WR 0xC108 |
103 | #define CP_RB_WPTR 0xC114 | 104 | #define CP_RB_WPTR 0xC114 |
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 607241c6a8a9..5a82b6b75849 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename) | |||
673 | last_reg = strtol(last_reg_s, NULL, 16); | 673 | last_reg = strtol(last_reg_s, NULL, 16); |
674 | 674 | ||
675 | do { | 675 | do { |
676 | if (fgets(buf, 1024, file) == NULL) | 676 | if (fgets(buf, 1024, file) == NULL) { |
677 | fclose(file); | ||
677 | return -1; | 678 | return -1; |
679 | } | ||
678 | len = strlen(buf); | 680 | len = strlen(buf); |
679 | if (ftell(file) == end) | 681 | if (ftell(file) == end) |
680 | done = 1; | 682 | done = 1; |
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename) | |||
685 | fprintf(stderr, | 687 | fprintf(stderr, |
686 | "Error matching regular expression %d in %s\n", | 688 | "Error matching regular expression %d in %s\n", |
687 | r, filename); | 689 | r, filename); |
690 | fclose(file); | ||
688 | return -1; | 691 | return -1; |
689 | } else { | 692 | } else { |
690 | buf[match[0].rm_eo] = 0; | 693 | buf[match[0].rm_eo] = 0; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5f15820efe12..93fa735c8c1a 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1427 | } | 1427 | } |
1428 | track->zb.robj = reloc->robj; | 1428 | track->zb.robj = reloc->robj; |
1429 | track->zb.offset = idx_value; | 1429 | track->zb.offset = idx_value; |
1430 | track->zb_dirty = true; | ||
1430 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1431 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1431 | break; | 1432 | break; |
1432 | case RADEON_RB3D_COLOROFFSET: | 1433 | case RADEON_RB3D_COLOROFFSET: |
@@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1439 | } | 1440 | } |
1440 | track->cb[0].robj = reloc->robj; | 1441 | track->cb[0].robj = reloc->robj; |
1441 | track->cb[0].offset = idx_value; | 1442 | track->cb[0].offset = idx_value; |
1443 | track->cb_dirty = true; | ||
1442 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1444 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1443 | break; | 1445 | break; |
1444 | case RADEON_PP_TXOFFSET_0: | 1446 | case RADEON_PP_TXOFFSET_0: |
@@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1454 | } | 1456 | } |
1455 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1457 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1456 | track->textures[i].robj = reloc->robj; | 1458 | track->textures[i].robj = reloc->robj; |
1459 | track->tex_dirty = true; | ||
1457 | break; | 1460 | break; |
1458 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1461 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
1459 | case RADEON_PP_CUBIC_OFFSET_T0_1: | 1462 | case RADEON_PP_CUBIC_OFFSET_T0_1: |
@@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1471 | track->textures[0].cube_info[i].offset = idx_value; | 1474 | track->textures[0].cube_info[i].offset = idx_value; |
1472 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1475 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1473 | track->textures[0].cube_info[i].robj = reloc->robj; | 1476 | track->textures[0].cube_info[i].robj = reloc->robj; |
1477 | track->tex_dirty = true; | ||
1474 | break; | 1478 | break; |
1475 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1479 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
1476 | case RADEON_PP_CUBIC_OFFSET_T1_1: | 1480 | case RADEON_PP_CUBIC_OFFSET_T1_1: |
@@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1488 | track->textures[1].cube_info[i].offset = idx_value; | 1492 | track->textures[1].cube_info[i].offset = idx_value; |
1489 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1493 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1490 | track->textures[1].cube_info[i].robj = reloc->robj; | 1494 | track->textures[1].cube_info[i].robj = reloc->robj; |
1495 | track->tex_dirty = true; | ||
1491 | break; | 1496 | break; |
1492 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1497 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
1493 | case RADEON_PP_CUBIC_OFFSET_T2_1: | 1498 | case RADEON_PP_CUBIC_OFFSET_T2_1: |
@@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1505 | track->textures[2].cube_info[i].offset = idx_value; | 1510 | track->textures[2].cube_info[i].offset = idx_value; |
1506 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1511 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1507 | track->textures[2].cube_info[i].robj = reloc->robj; | 1512 | track->textures[2].cube_info[i].robj = reloc->robj; |
1513 | track->tex_dirty = true; | ||
1508 | break; | 1514 | break; |
1509 | case RADEON_RE_WIDTH_HEIGHT: | 1515 | case RADEON_RE_WIDTH_HEIGHT: |
1510 | track->maxy = ((idx_value >> 16) & 0x7FF); | 1516 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1517 | track->cb_dirty = true; | ||
1518 | track->zb_dirty = true; | ||
1511 | break; | 1519 | break; |
1512 | case RADEON_RB3D_COLORPITCH: | 1520 | case RADEON_RB3D_COLORPITCH: |
1513 | r = r100_cs_packet_next_reloc(p, &reloc); | 1521 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1528 | ib[idx] = tmp; | 1536 | ib[idx] = tmp; |
1529 | 1537 | ||
1530 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 1538 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1539 | track->cb_dirty = true; | ||
1531 | break; | 1540 | break; |
1532 | case RADEON_RB3D_DEPTHPITCH: | 1541 | case RADEON_RB3D_DEPTHPITCH: |
1533 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 1542 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1543 | track->zb_dirty = true; | ||
1534 | break; | 1544 | break; |
1535 | case RADEON_RB3D_CNTL: | 1545 | case RADEON_RB3D_CNTL: |
1536 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1546 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1555 | return -EINVAL; | 1565 | return -EINVAL; |
1556 | } | 1566 | } |
1557 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 1567 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1568 | track->cb_dirty = true; | ||
1569 | track->zb_dirty = true; | ||
1558 | break; | 1570 | break; |
1559 | case RADEON_RB3D_ZSTENCILCNTL: | 1571 | case RADEON_RB3D_ZSTENCILCNTL: |
1560 | switch (idx_value & 0xf) { | 1572 | switch (idx_value & 0xf) { |
@@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1572 | default: | 1584 | default: |
1573 | break; | 1585 | break; |
1574 | } | 1586 | } |
1587 | track->zb_dirty = true; | ||
1575 | break; | 1588 | break; |
1576 | case RADEON_RB3D_ZPASS_ADDR: | 1589 | case RADEON_RB3D_ZPASS_ADDR: |
1577 | r = r100_cs_packet_next_reloc(p, &reloc); | 1590 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1588 | uint32_t temp = idx_value >> 4; | 1601 | uint32_t temp = idx_value >> 4; |
1589 | for (i = 0; i < track->num_texture; i++) | 1602 | for (i = 0; i < track->num_texture; i++) |
1590 | track->textures[i].enabled = !!(temp & (1 << i)); | 1603 | track->textures[i].enabled = !!(temp & (1 << i)); |
1604 | track->tex_dirty = true; | ||
1591 | } | 1605 | } |
1592 | break; | 1606 | break; |
1593 | case RADEON_SE_VF_CNTL: | 1607 | case RADEON_SE_VF_CNTL: |
@@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1602 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1616 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1603 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 1617 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1604 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1618 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1619 | track->tex_dirty = true; | ||
1605 | break; | 1620 | break; |
1606 | case RADEON_PP_TEX_PITCH_0: | 1621 | case RADEON_PP_TEX_PITCH_0: |
1607 | case RADEON_PP_TEX_PITCH_1: | 1622 | case RADEON_PP_TEX_PITCH_1: |
1608 | case RADEON_PP_TEX_PITCH_2: | 1623 | case RADEON_PP_TEX_PITCH_2: |
1609 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1624 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1610 | track->textures[i].pitch = idx_value + 32; | 1625 | track->textures[i].pitch = idx_value + 32; |
1626 | track->tex_dirty = true; | ||
1611 | break; | 1627 | break; |
1612 | case RADEON_PP_TXFILTER_0: | 1628 | case RADEON_PP_TXFILTER_0: |
1613 | case RADEON_PP_TXFILTER_1: | 1629 | case RADEON_PP_TXFILTER_1: |
@@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1621 | tmp = (idx_value >> 27) & 0x7; | 1637 | tmp = (idx_value >> 27) & 0x7; |
1622 | if (tmp == 2 || tmp == 6) | 1638 | if (tmp == 2 || tmp == 6) |
1623 | track->textures[i].roundup_h = false; | 1639 | track->textures[i].roundup_h = false; |
1640 | track->tex_dirty = true; | ||
1624 | break; | 1641 | break; |
1625 | case RADEON_PP_TXFORMAT_0: | 1642 | case RADEON_PP_TXFORMAT_0: |
1626 | case RADEON_PP_TXFORMAT_1: | 1643 | case RADEON_PP_TXFORMAT_1: |
@@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1673 | } | 1690 | } |
1674 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 1691 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1675 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 1692 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1693 | track->tex_dirty = true; | ||
1676 | break; | 1694 | break; |
1677 | case RADEON_PP_CUBIC_FACES_0: | 1695 | case RADEON_PP_CUBIC_FACES_0: |
1678 | case RADEON_PP_CUBIC_FACES_1: | 1696 | case RADEON_PP_CUBIC_FACES_1: |
@@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1683 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1701 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
1684 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 1702 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
1685 | } | 1703 | } |
1704 | track->tex_dirty = true; | ||
1686 | break; | 1705 | break; |
1687 | default: | 1706 | default: |
1688 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1707 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
@@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3318 | unsigned long size; | 3337 | unsigned long size; |
3319 | unsigned prim_walk; | 3338 | unsigned prim_walk; |
3320 | unsigned nverts; | 3339 | unsigned nverts; |
3321 | unsigned num_cb = track->num_cb; | 3340 | unsigned num_cb = track->cb_dirty ? track->num_cb : 0; |
3322 | 3341 | ||
3323 | if (!track->zb_cb_clear && !track->color_channel_mask && | 3342 | if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && |
3324 | !track->blend_read_enable) | 3343 | !track->blend_read_enable) |
3325 | num_cb = 0; | 3344 | num_cb = 0; |
3326 | 3345 | ||
@@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3341 | return -EINVAL; | 3360 | return -EINVAL; |
3342 | } | 3361 | } |
3343 | } | 3362 | } |
3344 | if (track->z_enabled) { | 3363 | track->cb_dirty = false; |
3364 | |||
3365 | if (track->zb_dirty && track->z_enabled) { | ||
3345 | if (track->zb.robj == NULL) { | 3366 | if (track->zb.robj == NULL) { |
3346 | DRM_ERROR("[drm] No buffer for z buffer !\n"); | 3367 | DRM_ERROR("[drm] No buffer for z buffer !\n"); |
3347 | return -EINVAL; | 3368 | return -EINVAL; |
@@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3358 | return -EINVAL; | 3379 | return -EINVAL; |
3359 | } | 3380 | } |
3360 | } | 3381 | } |
3382 | track->zb_dirty = false; | ||
3383 | |||
3384 | if (track->aa_dirty && track->aaresolve) { | ||
3385 | if (track->aa.robj == NULL) { | ||
3386 | DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); | ||
3387 | return -EINVAL; | ||
3388 | } | ||
3389 | /* I believe the format comes from colorbuffer0. */ | ||
3390 | size = track->aa.pitch * track->cb[0].cpp * track->maxy; | ||
3391 | size += track->aa.offset; | ||
3392 | if (size > radeon_bo_size(track->aa.robj)) { | ||
3393 | DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " | ||
3394 | "(need %lu have %lu) !\n", i, size, | ||
3395 | radeon_bo_size(track->aa.robj)); | ||
3396 | DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", | ||
3397 | i, track->aa.pitch, track->cb[0].cpp, | ||
3398 | track->aa.offset, track->maxy); | ||
3399 | return -EINVAL; | ||
3400 | } | ||
3401 | } | ||
3402 | track->aa_dirty = false; | ||
3403 | |||
3361 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3404 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
3362 | if (track->vap_vf_cntl & (1 << 14)) { | 3405 | if (track->vap_vf_cntl & (1 << 14)) { |
3363 | nverts = track->vap_alt_nverts; | 3406 | nverts = track->vap_alt_nverts; |
@@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3417 | prim_walk); | 3460 | prim_walk); |
3418 | return -EINVAL; | 3461 | return -EINVAL; |
3419 | } | 3462 | } |
3420 | return r100_cs_track_texture_check(rdev, track); | 3463 | |
3464 | if (track->tex_dirty) { | ||
3465 | track->tex_dirty = false; | ||
3466 | return r100_cs_track_texture_check(rdev, track); | ||
3467 | } | ||
3468 | return 0; | ||
3421 | } | 3469 | } |
3422 | 3470 | ||
3423 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) | 3471 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) |
3424 | { | 3472 | { |
3425 | unsigned i, face; | 3473 | unsigned i, face; |
3426 | 3474 | ||
3475 | track->cb_dirty = true; | ||
3476 | track->zb_dirty = true; | ||
3477 | track->tex_dirty = true; | ||
3478 | track->aa_dirty = true; | ||
3479 | |||
3427 | if (rdev->family < CHIP_R300) { | 3480 | if (rdev->family < CHIP_R300) { |
3428 | track->num_cb = 1; | 3481 | track->num_cb = 1; |
3429 | if (rdev->family <= CHIP_RS200) | 3482 | if (rdev->family <= CHIP_RS200) |
@@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3437 | track->num_texture = 16; | 3490 | track->num_texture = 16; |
3438 | track->maxy = 4096; | 3491 | track->maxy = 4096; |
3439 | track->separate_cube = 0; | 3492 | track->separate_cube = 0; |
3493 | track->aaresolve = false; | ||
3494 | track->aa.robj = NULL; | ||
3440 | } | 3495 | } |
3441 | 3496 | ||
3442 | for (i = 0; i < track->num_cb; i++) { | 3497 | for (i = 0; i < track->num_cb; i++) { |
@@ -3746,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3746 | r100_mc_program(rdev); | 3801 | r100_mc_program(rdev); |
3747 | /* Resume clock */ | 3802 | /* Resume clock */ |
3748 | r100_clock_startup(rdev); | 3803 | r100_clock_startup(rdev); |
3749 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3750 | // r100_gpu_init(rdev); | ||
3751 | /* Initialize GART (initialize after TTM so we can allocate | 3804 | /* Initialize GART (initialize after TTM so we can allocate |
3752 | * memory through TTM but finalize after TTM) */ | 3805 | * memory through TTM but finalize after TTM) */ |
3753 | r100_enable_bm(rdev); | 3806 | r100_enable_bm(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index af65600e6564..2fef9de7f363 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -52,14 +52,7 @@ struct r100_cs_track_texture { | |||
52 | unsigned compress_format; | 52 | unsigned compress_format; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct r100_cs_track_limits { | ||
56 | unsigned num_cb; | ||
57 | unsigned num_texture; | ||
58 | unsigned max_levels; | ||
59 | }; | ||
60 | |||
61 | struct r100_cs_track { | 55 | struct r100_cs_track { |
62 | struct radeon_device *rdev; | ||
63 | unsigned num_cb; | 56 | unsigned num_cb; |
64 | unsigned num_texture; | 57 | unsigned num_texture; |
65 | unsigned maxy; | 58 | unsigned maxy; |
@@ -73,11 +66,17 @@ struct r100_cs_track { | |||
73 | struct r100_cs_track_array arrays[11]; | 66 | struct r100_cs_track_array arrays[11]; |
74 | struct r100_cs_track_cb cb[R300_MAX_CB]; | 67 | struct r100_cs_track_cb cb[R300_MAX_CB]; |
75 | struct r100_cs_track_cb zb; | 68 | struct r100_cs_track_cb zb; |
69 | struct r100_cs_track_cb aa; | ||
76 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; | 70 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; |
77 | bool z_enabled; | 71 | bool z_enabled; |
78 | bool separate_cube; | 72 | bool separate_cube; |
79 | bool zb_cb_clear; | 73 | bool zb_cb_clear; |
80 | bool blend_read_enable; | 74 | bool blend_read_enable; |
75 | bool cb_dirty; | ||
76 | bool zb_dirty; | ||
77 | bool tex_dirty; | ||
78 | bool aa_dirty; | ||
79 | bool aaresolve; | ||
81 | }; | 80 | }; |
82 | 81 | ||
83 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); | 82 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index d2408c395619..f24058300413 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
184 | } | 184 | } |
185 | track->zb.robj = reloc->robj; | 185 | track->zb.robj = reloc->robj; |
186 | track->zb.offset = idx_value; | 186 | track->zb.offset = idx_value; |
187 | track->zb_dirty = true; | ||
187 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 188 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
188 | break; | 189 | break; |
189 | case RADEON_RB3D_COLOROFFSET: | 190 | case RADEON_RB3D_COLOROFFSET: |
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
196 | } | 197 | } |
197 | track->cb[0].robj = reloc->robj; | 198 | track->cb[0].robj = reloc->robj; |
198 | track->cb[0].offset = idx_value; | 199 | track->cb[0].offset = idx_value; |
200 | track->cb_dirty = true; | ||
199 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 201 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
200 | break; | 202 | break; |
201 | case R200_PP_TXOFFSET_0: | 203 | case R200_PP_TXOFFSET_0: |
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
214 | } | 216 | } |
215 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 217 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
216 | track->textures[i].robj = reloc->robj; | 218 | track->textures[i].robj = reloc->robj; |
219 | track->tex_dirty = true; | ||
217 | break; | 220 | break; |
218 | case R200_PP_CUBIC_OFFSET_F1_0: | 221 | case R200_PP_CUBIC_OFFSET_F1_0: |
219 | case R200_PP_CUBIC_OFFSET_F2_0: | 222 | case R200_PP_CUBIC_OFFSET_F2_0: |
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
257 | track->textures[i].cube_info[face - 1].offset = idx_value; | 260 | track->textures[i].cube_info[face - 1].offset = idx_value; |
258 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 261 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
259 | track->textures[i].cube_info[face - 1].robj = reloc->robj; | 262 | track->textures[i].cube_info[face - 1].robj = reloc->robj; |
263 | track->tex_dirty = true; | ||
260 | break; | 264 | break; |
261 | case RADEON_RE_WIDTH_HEIGHT: | 265 | case RADEON_RE_WIDTH_HEIGHT: |
262 | track->maxy = ((idx_value >> 16) & 0x7FF); | 266 | track->maxy = ((idx_value >> 16) & 0x7FF); |
267 | track->cb_dirty = true; | ||
268 | track->zb_dirty = true; | ||
263 | break; | 269 | break; |
264 | case RADEON_RB3D_COLORPITCH: | 270 | case RADEON_RB3D_COLORPITCH: |
265 | r = r100_cs_packet_next_reloc(p, &reloc); | 271 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
280 | ib[idx] = tmp; | 286 | ib[idx] = tmp; |
281 | 287 | ||
282 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 288 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
289 | track->cb_dirty = true; | ||
283 | break; | 290 | break; |
284 | case RADEON_RB3D_DEPTHPITCH: | 291 | case RADEON_RB3D_DEPTHPITCH: |
285 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 292 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
293 | track->zb_dirty = true; | ||
286 | break; | 294 | break; |
287 | case RADEON_RB3D_CNTL: | 295 | case RADEON_RB3D_CNTL: |
288 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 296 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
312 | } | 320 | } |
313 | 321 | ||
314 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 322 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
323 | track->cb_dirty = true; | ||
324 | track->zb_dirty = true; | ||
315 | break; | 325 | break; |
316 | case RADEON_RB3D_ZSTENCILCNTL: | 326 | case RADEON_RB3D_ZSTENCILCNTL: |
317 | switch (idx_value & 0xf) { | 327 | switch (idx_value & 0xf) { |
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
329 | default: | 339 | default: |
330 | break; | 340 | break; |
331 | } | 341 | } |
342 | track->zb_dirty = true; | ||
332 | break; | 343 | break; |
333 | case RADEON_RB3D_ZPASS_ADDR: | 344 | case RADEON_RB3D_ZPASS_ADDR: |
334 | r = r100_cs_packet_next_reloc(p, &reloc); | 345 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
345 | uint32_t temp = idx_value >> 4; | 356 | uint32_t temp = idx_value >> 4; |
346 | for (i = 0; i < track->num_texture; i++) | 357 | for (i = 0; i < track->num_texture; i++) |
347 | track->textures[i].enabled = !!(temp & (1 << i)); | 358 | track->textures[i].enabled = !!(temp & (1 << i)); |
359 | track->tex_dirty = true; | ||
348 | } | 360 | } |
349 | break; | 361 | break; |
350 | case RADEON_SE_VF_CNTL: | 362 | case RADEON_SE_VF_CNTL: |
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
369 | i = (reg - R200_PP_TXSIZE_0) / 32; | 381 | i = (reg - R200_PP_TXSIZE_0) / 32; |
370 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 382 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
371 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 383 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
384 | track->tex_dirty = true; | ||
372 | break; | 385 | break; |
373 | case R200_PP_TXPITCH_0: | 386 | case R200_PP_TXPITCH_0: |
374 | case R200_PP_TXPITCH_1: | 387 | case R200_PP_TXPITCH_1: |
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
378 | case R200_PP_TXPITCH_5: | 391 | case R200_PP_TXPITCH_5: |
379 | i = (reg - R200_PP_TXPITCH_0) / 32; | 392 | i = (reg - R200_PP_TXPITCH_0) / 32; |
380 | track->textures[i].pitch = idx_value + 32; | 393 | track->textures[i].pitch = idx_value + 32; |
394 | track->tex_dirty = true; | ||
381 | break; | 395 | break; |
382 | case R200_PP_TXFILTER_0: | 396 | case R200_PP_TXFILTER_0: |
383 | case R200_PP_TXFILTER_1: | 397 | case R200_PP_TXFILTER_1: |
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
394 | tmp = (idx_value >> 27) & 0x7; | 408 | tmp = (idx_value >> 27) & 0x7; |
395 | if (tmp == 2 || tmp == 6) | 409 | if (tmp == 2 || tmp == 6) |
396 | track->textures[i].roundup_h = false; | 410 | track->textures[i].roundup_h = false; |
411 | track->tex_dirty = true; | ||
397 | break; | 412 | break; |
398 | case R200_PP_TXMULTI_CTL_0: | 413 | case R200_PP_TXMULTI_CTL_0: |
399 | case R200_PP_TXMULTI_CTL_1: | 414 | case R200_PP_TXMULTI_CTL_1: |
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
432 | track->textures[i].tex_coord_type = 1; | 447 | track->textures[i].tex_coord_type = 1; |
433 | break; | 448 | break; |
434 | } | 449 | } |
450 | track->tex_dirty = true; | ||
435 | break; | 451 | break; |
436 | case R200_PP_TXFORMAT_0: | 452 | case R200_PP_TXFORMAT_0: |
437 | case R200_PP_TXFORMAT_1: | 453 | case R200_PP_TXFORMAT_1: |
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
488 | } | 504 | } |
489 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 505 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
490 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 506 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
507 | track->tex_dirty = true; | ||
491 | break; | 508 | break; |
492 | case R200_PP_CUBIC_FACES_0: | 509 | case R200_PP_CUBIC_FACES_0: |
493 | case R200_PP_CUBIC_FACES_1: | 510 | case R200_PP_CUBIC_FACES_1: |
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
501 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 518 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
502 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 519 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
503 | } | 520 | } |
521 | track->tex_dirty = true; | ||
504 | break; | 522 | break; |
505 | default: | 523 | default: |
506 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 524 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 55fe5ba7def3..069efa8c8ecf 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
667 | } | 667 | } |
668 | track->cb[i].robj = reloc->robj; | 668 | track->cb[i].robj = reloc->robj; |
669 | track->cb[i].offset = idx_value; | 669 | track->cb[i].offset = idx_value; |
670 | track->cb_dirty = true; | ||
670 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 671 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
671 | break; | 672 | break; |
672 | case R300_ZB_DEPTHOFFSET: | 673 | case R300_ZB_DEPTHOFFSET: |
@@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
679 | } | 680 | } |
680 | track->zb.robj = reloc->robj; | 681 | track->zb.robj = reloc->robj; |
681 | track->zb.offset = idx_value; | 682 | track->zb.offset = idx_value; |
683 | track->zb_dirty = true; | ||
682 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 684 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
683 | break; | 685 | break; |
684 | case R300_TX_OFFSET_0: | 686 | case R300_TX_OFFSET_0: |
@@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
717 | tmp |= tile_flags; | 719 | tmp |= tile_flags; |
718 | ib[idx] = tmp; | 720 | ib[idx] = tmp; |
719 | track->textures[i].robj = reloc->robj; | 721 | track->textures[i].robj = reloc->robj; |
722 | track->tex_dirty = true; | ||
720 | break; | 723 | break; |
721 | /* Tracked registers */ | 724 | /* Tracked registers */ |
722 | case 0x2084: | 725 | case 0x2084: |
@@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
743 | if (p->rdev->family < CHIP_RV515) { | 746 | if (p->rdev->family < CHIP_RV515) { |
744 | track->maxy -= 1440; | 747 | track->maxy -= 1440; |
745 | } | 748 | } |
749 | track->cb_dirty = true; | ||
750 | track->zb_dirty = true; | ||
746 | break; | 751 | break; |
747 | case 0x4E00: | 752 | case 0x4E00: |
748 | /* RB3D_CCTL */ | 753 | /* RB3D_CCTL */ |
@@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
752 | return -EINVAL; | 757 | return -EINVAL; |
753 | } | 758 | } |
754 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; | 759 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
760 | track->cb_dirty = true; | ||
755 | break; | 761 | break; |
756 | case 0x4E38: | 762 | case 0x4E38: |
757 | case 0x4E3C: | 763 | case 0x4E3C: |
@@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
814 | ((idx_value >> 21) & 0xF)); | 820 | ((idx_value >> 21) & 0xF)); |
815 | return -EINVAL; | 821 | return -EINVAL; |
816 | } | 822 | } |
823 | track->cb_dirty = true; | ||
817 | break; | 824 | break; |
818 | case 0x4F00: | 825 | case 0x4F00: |
819 | /* ZB_CNTL */ | 826 | /* ZB_CNTL */ |
@@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
822 | } else { | 829 | } else { |
823 | track->z_enabled = false; | 830 | track->z_enabled = false; |
824 | } | 831 | } |
832 | track->zb_dirty = true; | ||
825 | break; | 833 | break; |
826 | case 0x4F10: | 834 | case 0x4F10: |
827 | /* ZB_FORMAT */ | 835 | /* ZB_FORMAT */ |
@@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
838 | (idx_value & 0xF)); | 846 | (idx_value & 0xF)); |
839 | return -EINVAL; | 847 | return -EINVAL; |
840 | } | 848 | } |
849 | track->zb_dirty = true; | ||
841 | break; | 850 | break; |
842 | case 0x4F24: | 851 | case 0x4F24: |
843 | /* ZB_DEPTHPITCH */ | 852 | /* ZB_DEPTHPITCH */ |
@@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
861 | ib[idx] = tmp; | 870 | ib[idx] = tmp; |
862 | 871 | ||
863 | track->zb.pitch = idx_value & 0x3FFC; | 872 | track->zb.pitch = idx_value & 0x3FFC; |
873 | track->zb_dirty = true; | ||
864 | break; | 874 | break; |
865 | case 0x4104: | 875 | case 0x4104: |
876 | /* TX_ENABLE */ | ||
866 | for (i = 0; i < 16; i++) { | 877 | for (i = 0; i < 16; i++) { |
867 | bool enabled; | 878 | bool enabled; |
868 | 879 | ||
869 | enabled = !!(idx_value & (1 << i)); | 880 | enabled = !!(idx_value & (1 << i)); |
870 | track->textures[i].enabled = enabled; | 881 | track->textures[i].enabled = enabled; |
871 | } | 882 | } |
883 | track->tex_dirty = true; | ||
872 | break; | 884 | break; |
873 | case 0x44C0: | 885 | case 0x44C0: |
874 | case 0x44C4: | 886 | case 0x44C4: |
@@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
898 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
899 | break; | 911 | break; |
900 | case R300_TX_FORMAT_X16: | 912 | case R300_TX_FORMAT_X16: |
913 | case R300_TX_FORMAT_FL_I16: | ||
901 | case R300_TX_FORMAT_Y8X8: | 914 | case R300_TX_FORMAT_Y8X8: |
902 | case R300_TX_FORMAT_Z5Y6X5: | 915 | case R300_TX_FORMAT_Z5Y6X5: |
903 | case R300_TX_FORMAT_Z6Y5X5: | 916 | case R300_TX_FORMAT_Z6Y5X5: |
@@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 923 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
911 | break; | 924 | break; |
912 | case R300_TX_FORMAT_Y16X16: | 925 | case R300_TX_FORMAT_Y16X16: |
926 | case R300_TX_FORMAT_FL_I16A16: | ||
913 | case R300_TX_FORMAT_Z11Y11X10: | 927 | case R300_TX_FORMAT_Z11Y11X10: |
914 | case R300_TX_FORMAT_Z10Y11X11: | 928 | case R300_TX_FORMAT_Z10Y11X11: |
915 | case R300_TX_FORMAT_W8Z8Y8X8: | 929 | case R300_TX_FORMAT_W8Z8Y8X8: |
@@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
951 | DRM_ERROR("Invalid texture format %u\n", | 965 | DRM_ERROR("Invalid texture format %u\n", |
952 | (idx_value & 0x1F)); | 966 | (idx_value & 0x1F)); |
953 | return -EINVAL; | 967 | return -EINVAL; |
954 | break; | ||
955 | } | 968 | } |
969 | track->tex_dirty = true; | ||
956 | break; | 970 | break; |
957 | case 0x4400: | 971 | case 0x4400: |
958 | case 0x4404: | 972 | case 0x4404: |
@@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
980 | if (tmp == 2 || tmp == 4 || tmp == 6) { | 994 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
981 | track->textures[i].roundup_h = false; | 995 | track->textures[i].roundup_h = false; |
982 | } | 996 | } |
997 | track->tex_dirty = true; | ||
983 | break; | 998 | break; |
984 | case 0x4500: | 999 | case 0x4500: |
985 | case 0x4504: | 1000 | case 0x4504: |
@@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1017 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); | 1032 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); |
1018 | return -EINVAL; | 1033 | return -EINVAL; |
1019 | } | 1034 | } |
1035 | track->tex_dirty = true; | ||
1020 | break; | 1036 | break; |
1021 | case 0x4480: | 1037 | case 0x4480: |
1022 | case 0x4484: | 1038 | case 0x4484: |
@@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1046 | track->textures[i].use_pitch = !!tmp; | 1062 | track->textures[i].use_pitch = !!tmp; |
1047 | tmp = (idx_value >> 22) & 0xF; | 1063 | tmp = (idx_value >> 22) & 0xF; |
1048 | track->textures[i].txdepth = tmp; | 1064 | track->textures[i].txdepth = tmp; |
1065 | track->tex_dirty = true; | ||
1049 | break; | 1066 | break; |
1050 | case R300_ZB_ZPASS_ADDR: | 1067 | case R300_ZB_ZPASS_ADDR: |
1051 | r = r100_cs_packet_next_reloc(p, &reloc); | 1068 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1060 | case 0x4e0c: | 1077 | case 0x4e0c: |
1061 | /* RB3D_COLOR_CHANNEL_MASK */ | 1078 | /* RB3D_COLOR_CHANNEL_MASK */ |
1062 | track->color_channel_mask = idx_value; | 1079 | track->color_channel_mask = idx_value; |
1080 | track->cb_dirty = true; | ||
1063 | break; | 1081 | break; |
1064 | case 0x43a4: | 1082 | case 0x43a4: |
1065 | /* SC_HYPERZ_EN */ | 1083 | /* SC_HYPERZ_EN */ |
@@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1073 | case 0x4f1c: | 1091 | case 0x4f1c: |
1074 | /* ZB_BW_CNTL */ | 1092 | /* ZB_BW_CNTL */ |
1075 | track->zb_cb_clear = !!(idx_value & (1 << 5)); | 1093 | track->zb_cb_clear = !!(idx_value & (1 << 5)); |
1094 | track->cb_dirty = true; | ||
1095 | track->zb_dirty = true; | ||
1076 | if (p->rdev->hyperz_filp != p->filp) { | 1096 | if (p->rdev->hyperz_filp != p->filp) { |
1077 | if (idx_value & (R300_HIZ_ENABLE | | 1097 | if (idx_value & (R300_HIZ_ENABLE | |
1078 | R300_RD_COMP_ENABLE | | 1098 | R300_RD_COMP_ENABLE | |
@@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1084 | case 0x4e04: | 1104 | case 0x4e04: |
1085 | /* RB3D_BLENDCNTL */ | 1105 | /* RB3D_BLENDCNTL */ |
1086 | track->blend_read_enable = !!(idx_value & (1 << 2)); | 1106 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1107 | track->cb_dirty = true; | ||
1108 | break; | ||
1109 | case R300_RB3D_AARESOLVE_OFFSET: | ||
1110 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1111 | if (r) { | ||
1112 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1113 | idx, reg); | ||
1114 | r100_cs_dump_packet(p, pkt); | ||
1115 | return r; | ||
1116 | } | ||
1117 | track->aa.robj = reloc->robj; | ||
1118 | track->aa.offset = idx_value; | ||
1119 | track->aa_dirty = true; | ||
1120 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | ||
1121 | break; | ||
1122 | case R300_RB3D_AARESOLVE_PITCH: | ||
1123 | track->aa.pitch = idx_value & 0x3FFE; | ||
1124 | track->aa_dirty = true; | ||
1087 | break; | 1125 | break; |
1088 | case 0x4f28: /* ZB_DEPTHCLEARVALUE */ | 1126 | case R300_RB3D_AARESOLVE_CTL: |
1127 | track->aaresolve = idx_value & 0x1; | ||
1128 | track->aa_dirty = true; | ||
1089 | break; | 1129 | break; |
1090 | case 0x4f30: /* ZB_MASK_OFFSET */ | 1130 | case 0x4f30: /* ZB_MASK_OFFSET */ |
1091 | case 0x4f34: /* ZB_ZMASK_PITCH */ | 1131 | case 0x4f34: /* ZB_ZMASK_PITCH */ |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 1a0d5362cd79..f0bce399c9f3 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -1371,6 +1371,8 @@ | |||
1371 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ | 1371 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ |
1372 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ | 1372 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ |
1373 | 1373 | ||
1374 | #define R300_RB3D_AARESOLVE_OFFSET 0x4E80 | ||
1375 | #define R300_RB3D_AARESOLVE_PITCH 0x4E84 | ||
1374 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 | 1376 | #define R300_RB3D_AARESOLVE_CTL 0x4E88 |
1375 | /* gap */ | 1377 | /* gap */ |
1376 | 1378 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1cd56dc8c8ab..b409b24207a1 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2106,7 +2106,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) | |||
2106 | 2106 | ||
2107 | r600_cp_stop(rdev); | 2107 | r600_cp_stop(rdev); |
2108 | 2108 | ||
2109 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | 2109 | WREG32(CP_RB_CNTL, |
2110 | #ifdef __BIG_ENDIAN | ||
2111 | BUF_SWAP_32BIT | | ||
2112 | #endif | ||
2113 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
2110 | 2114 | ||
2111 | /* Reset cp */ | 2115 | /* Reset cp */ |
2112 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | 2116 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
@@ -2193,7 +2197,11 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2193 | WREG32(CP_RB_WPTR, 0); | 2197 | WREG32(CP_RB_WPTR, 0); |
2194 | 2198 | ||
2195 | /* set the wb address whether it's enabled or not */ | 2199 | /* set the wb address whether it's enabled or not */ |
2196 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 2200 | WREG32(CP_RB_RPTR_ADDR, |
2201 | #ifdef __BIG_ENDIAN | ||
2202 | RB_RPTR_SWAP(2) | | ||
2203 | #endif | ||
2204 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | ||
2197 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 2205 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
2198 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | 2206 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); |
2199 | 2207 | ||
@@ -2629,7 +2637,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
2629 | { | 2637 | { |
2630 | /* FIXME: implement */ | 2638 | /* FIXME: implement */ |
2631 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 2639 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
2632 | radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); | 2640 | radeon_ring_write(rdev, |
2641 | #ifdef __BIG_ENDIAN | ||
2642 | (2 << 0) | | ||
2643 | #endif | ||
2644 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
2633 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | 2645 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); |
2634 | radeon_ring_write(rdev, ib->length_dw); | 2646 | radeon_ring_write(rdev, ib->length_dw); |
2635 | } | 2647 | } |
@@ -3305,8 +3317,8 @@ restart_ih: | |||
3305 | while (rptr != wptr) { | 3317 | while (rptr != wptr) { |
3306 | /* wptr/rptr are in bytes! */ | 3318 | /* wptr/rptr are in bytes! */ |
3307 | ring_index = rptr / 4; | 3319 | ring_index = rptr / 4; |
3308 | src_id = rdev->ih.ring[ring_index] & 0xff; | 3320 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
3309 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | 3321 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
3310 | 3322 | ||
3311 | switch (src_id) { | 3323 | switch (src_id) { |
3312 | case 1: /* D1 vblank/vline */ | 3324 | case 1: /* D1 vblank/vline */ |
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index ca5c29f70779..7f1043448d25 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c | |||
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev) | |||
137 | ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); | 137 | ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); |
138 | 138 | ||
139 | for (i = 0; i < r6xx_vs_size; i++) | 139 | for (i = 0; i < r6xx_vs_size; i++) |
140 | vs[i] = r6xx_vs[i]; | 140 | vs[i] = cpu_to_le32(r6xx_vs[i]); |
141 | for (i = 0; i < r6xx_ps_size; i++) | 141 | for (i = 0; i < r6xx_ps_size; i++) |
142 | ps[i] = r6xx_ps[i]; | 142 | ps[i] = cpu_to_le32(r6xx_ps[i]); |
143 | 143 | ||
144 | dev_priv->blit_vb->used = 512; | 144 | dev_priv->blit_vb->used = 512; |
145 | 145 | ||
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) | |||
192 | DRM_DEBUG("\n"); | 192 | DRM_DEBUG("\n"); |
193 | 193 | ||
194 | sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); | 194 | sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); |
195 | #ifdef __BIG_ENDIAN | ||
196 | sq_vtx_constant_word2 |= (2 << 30); | ||
197 | #endif | ||
195 | 198 | ||
196 | BEGIN_RING(9); | 199 | BEGIN_RING(9); |
197 | OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); | 200 | OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); |
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv) | |||
291 | OUT_RING(DI_PT_RECTLIST); | 294 | OUT_RING(DI_PT_RECTLIST); |
292 | 295 | ||
293 | OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); | 296 | OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); |
297 | #ifdef __BIG_ENDIAN | ||
298 | OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT); | ||
299 | #else | ||
294 | OUT_RING(DI_INDEX_SIZE_16_BIT); | 300 | OUT_RING(DI_INDEX_SIZE_16_BIT); |
301 | #endif | ||
295 | 302 | ||
296 | OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); | 303 | OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); |
297 | OUT_RING(1); | 304 | OUT_RING(1); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 16e211a614d7..2fed91750126 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format, | |||
54 | if (h < 8) | 54 | if (h < 8) |
55 | h = 8; | 55 | h = 8; |
56 | 56 | ||
57 | cb_color_info = ((format << 2) | (1 << 27)); | 57 | cb_color_info = ((format << 2) | (1 << 27) | (1 << 8)); |
58 | pitch = (w / 8) - 1; | 58 | pitch = (w / 8) - 1; |
59 | slice = ((w * h) / 64) - 1; | 59 | slice = ((w * h) / 64) - 1; |
60 | 60 | ||
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
165 | u32 sq_vtx_constant_word2; | 165 | u32 sq_vtx_constant_word2; |
166 | 166 | ||
167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); | 167 | sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); |
168 | #ifdef __BIG_ENDIAN | ||
169 | sq_vtx_constant_word2 |= (2 << 30); | ||
170 | #endif | ||
168 | 171 | ||
169 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); | 172 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); |
170 | radeon_ring_write(rdev, 0x460); | 173 | radeon_ring_write(rdev, 0x460); |
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
199 | if (h < 1) | 202 | if (h < 1) |
200 | h = 1; | 203 | h = 1; |
201 | 204 | ||
202 | sq_tex_resource_word0 = (1 << 0); | 205 | sq_tex_resource_word0 = (1 << 0) | (1 << 3); |
203 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | | 206 | sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | |
204 | ((w - 1) << 19)); | 207 | ((w - 1) << 19)); |
205 | 208 | ||
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev) | |||
253 | radeon_ring_write(rdev, DI_PT_RECTLIST); | 256 | radeon_ring_write(rdev, DI_PT_RECTLIST); |
254 | 257 | ||
255 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); | 258 | radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); |
256 | radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); | 259 | radeon_ring_write(rdev, |
260 | #ifdef __BIG_ENDIAN | ||
261 | (2 << 2) | | ||
262 | #endif | ||
263 | DI_INDEX_SIZE_16_BIT); | ||
257 | 264 | ||
258 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 265 | radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
259 | radeon_ring_write(rdev, 1); | 266 | radeon_ring_write(rdev, 1); |
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev) | |||
424 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | 431 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); |
425 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | 432 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; |
426 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 433 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
427 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | 434 | radeon_ring_write(rdev, |
435 | #ifdef __BIG_ENDIAN | ||
436 | (2 << 0) | | ||
437 | #endif | ||
438 | (gpu_addr & 0xFFFFFFFC)); | ||
428 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | 439 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); |
429 | radeon_ring_write(rdev, dwords); | 440 | radeon_ring_write(rdev, dwords); |
430 | 441 | ||
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input) | |||
467 | int r600_blit_init(struct radeon_device *rdev) | 478 | int r600_blit_init(struct radeon_device *rdev) |
468 | { | 479 | { |
469 | u32 obj_size; | 480 | u32 obj_size; |
470 | int r, dwords; | 481 | int i, r, dwords; |
471 | void *ptr; | 482 | void *ptr; |
472 | u32 packet2s[16]; | 483 | u32 packet2s[16]; |
473 | int num_packet2s = 0; | 484 | int num_packet2s = 0; |
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
486 | 497 | ||
487 | dwords = rdev->r600_blit.state_len; | 498 | dwords = rdev->r600_blit.state_len; |
488 | while (dwords & 0xf) { | 499 | while (dwords & 0xf) { |
489 | packet2s[num_packet2s++] = PACKET2(0); | 500 | packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); |
490 | dwords++; | 501 | dwords++; |
491 | } | 502 | } |
492 | 503 | ||
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev) | |||
529 | if (num_packet2s) | 540 | if (num_packet2s) |
530 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | 541 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), |
531 | packet2s, num_packet2s * 4); | 542 | packet2s, num_packet2s * 4); |
532 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); | 543 | for (i = 0; i < r6xx_vs_size; i++) |
533 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 544 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); |
545 | for (i = 0; i < r6xx_ps_size; i++) | ||
546 | *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); | ||
534 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 547 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
535 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 548 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
536 | 549 | ||
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index e8151c1d55b2..2d1f6c5ee2a7 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c | |||
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] = | |||
684 | 0x00000000, | 684 | 0x00000000, |
685 | 0x3c000000, | 685 | 0x3c000000, |
686 | 0x68cd1000, | 686 | 0x68cd1000, |
687 | #ifdef __BIG_ENDIAN | ||
688 | 0x000a0000, | ||
689 | #else | ||
687 | 0x00080000, | 690 | 0x00080000, |
691 | #endif | ||
688 | 0x00000000, | 692 | 0x00000000, |
689 | }; | 693 | }; |
690 | 694 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 4f4cd8b286d5..c3ab959bdc7c 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
396 | r600_do_cp_stop(dev_priv); | 396 | r600_do_cp_stop(dev_priv); |
397 | 397 | ||
398 | RADEON_WRITE(R600_CP_RB_CNTL, | 398 | RADEON_WRITE(R600_CP_RB_CNTL, |
399 | #ifdef __BIG_ENDIAN | ||
400 | R600_BUF_SWAP_32BIT | | ||
401 | #endif | ||
399 | R600_RB_NO_UPDATE | | 402 | R600_RB_NO_UPDATE | |
400 | R600_RB_BLKSZ(15) | | 403 | R600_RB_BLKSZ(15) | |
401 | R600_RB_BUFSZ(3)); | 404 | R600_RB_BUFSZ(3)); |
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
486 | r600_do_cp_stop(dev_priv); | 489 | r600_do_cp_stop(dev_priv); |
487 | 490 | ||
488 | RADEON_WRITE(R600_CP_RB_CNTL, | 491 | RADEON_WRITE(R600_CP_RB_CNTL, |
492 | #ifdef __BIG_ENDIAN | ||
493 | R600_BUF_SWAP_32BIT | | ||
494 | #endif | ||
489 | R600_RB_NO_UPDATE | | 495 | R600_RB_NO_UPDATE | |
490 | (15 << 8) | | 496 | R600_RB_BLKSZ(15) | |
491 | (3 << 0)); | 497 | R600_RB_BUFSZ(3)); |
492 | 498 | ||
493 | RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); | 499 | RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); |
494 | RADEON_READ(R600_GRBM_SOFT_RESET); | 500 | RADEON_READ(R600_GRBM_SOFT_RESET); |
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv) | |||
550 | 556 | ||
551 | if (!dev_priv->writeback_works) { | 557 | if (!dev_priv->writeback_works) { |
552 | /* Disable writeback to avoid unnecessary bus master transfer */ | 558 | /* Disable writeback to avoid unnecessary bus master transfer */ |
553 | RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | | 559 | RADEON_WRITE(R600_CP_RB_CNTL, |
554 | RADEON_RB_NO_UPDATE); | 560 | #ifdef __BIG_ENDIAN |
561 | R600_BUF_SWAP_32BIT | | ||
562 | #endif | ||
563 | RADEON_READ(R600_CP_RB_CNTL) | | ||
564 | R600_RB_NO_UPDATE); | ||
555 | RADEON_WRITE(R600_SCRATCH_UMSK, 0); | 565 | RADEON_WRITE(R600_SCRATCH_UMSK, 0); |
556 | } | 566 | } |
557 | } | 567 | } |
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev) | |||
575 | 585 | ||
576 | RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); | 586 | RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); |
577 | cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); | 587 | cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); |
578 | RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); | 588 | RADEON_WRITE(R600_CP_RB_CNTL, |
589 | #ifdef __BIG_ENDIAN | ||
590 | R600_BUF_SWAP_32BIT | | ||
591 | #endif | ||
592 | R600_RB_RPTR_WR_ENA); | ||
579 | 593 | ||
580 | RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); | 594 | RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); |
581 | RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); | 595 | RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); |
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1838 | + dev_priv->gart_vm_start; | 1852 | + dev_priv->gart_vm_start; |
1839 | } | 1853 | } |
1840 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, | 1854 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR, |
1841 | rptr_addr & 0xffffffff); | 1855 | #ifdef __BIG_ENDIAN |
1856 | (2 << 0) | | ||
1857 | #endif | ||
1858 | (rptr_addr & 0xfffffffc)); | ||
1842 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, | 1859 | RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, |
1843 | upper_32_bits(rptr_addr)); | 1860 | upper_32_bits(rptr_addr)); |
1844 | 1861 | ||
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, | |||
1889 | { | 1906 | { |
1890 | u64 scratch_addr; | 1907 | u64 scratch_addr; |
1891 | 1908 | ||
1892 | scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); | 1909 | scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC; |
1893 | scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; | 1910 | scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; |
1894 | scratch_addr += R600_SCRATCH_REG_OFFSET; | 1911 | scratch_addr += R600_SCRATCH_REG_OFFSET; |
1895 | scratch_addr >>= 8; | 1912 | scratch_addr >>= 8; |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index fe0c8eb76010..0a0848f0346d 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -388,17 +388,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
388 | } | 388 | } |
389 | 389 | ||
390 | if (!IS_ALIGNED(pitch, pitch_align)) { | 390 | if (!IS_ALIGNED(pitch, pitch_align)) { |
391 | dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", | 391 | dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", |
392 | __func__, __LINE__, pitch); | 392 | __func__, __LINE__, pitch, pitch_align, array_mode); |
393 | return -EINVAL; | 393 | return -EINVAL; |
394 | } | 394 | } |
395 | if (!IS_ALIGNED(height, height_align)) { | 395 | if (!IS_ALIGNED(height, height_align)) { |
396 | dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", | 396 | dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", |
397 | __func__, __LINE__, height); | 397 | __func__, __LINE__, height, height_align, array_mode); |
398 | return -EINVAL; | 398 | return -EINVAL; |
399 | } | 399 | } |
400 | if (!IS_ALIGNED(base_offset, base_align)) { | 400 | if (!IS_ALIGNED(base_offset, base_align)) { |
401 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | 401 | dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, |
402 | base_offset, base_align, array_mode); | ||
402 | return -EINVAL; | 403 | return -EINVAL; |
403 | } | 404 | } |
404 | 405 | ||
@@ -413,7 +414,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
413 | * broken userspace. | 414 | * broken userspace. |
414 | */ | 415 | */ |
415 | } else { | 416 | } else { |
416 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); | 417 | dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i, |
418 | array_mode, | ||
419 | track->cb_color_bo_offset[i], tmp, | ||
420 | radeon_bo_size(track->cb_color_bo[i])); | ||
417 | return -EINVAL; | 421 | return -EINVAL; |
418 | } | 422 | } |
419 | } | 423 | } |
@@ -548,17 +552,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
548 | } | 552 | } |
549 | 553 | ||
550 | if (!IS_ALIGNED(pitch, pitch_align)) { | 554 | if (!IS_ALIGNED(pitch, pitch_align)) { |
551 | dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", | 555 | dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", |
552 | __func__, __LINE__, pitch); | 556 | __func__, __LINE__, pitch, pitch_align, array_mode); |
553 | return -EINVAL; | 557 | return -EINVAL; |
554 | } | 558 | } |
555 | if (!IS_ALIGNED(height, height_align)) { | 559 | if (!IS_ALIGNED(height, height_align)) { |
556 | dev_warn(p->dev, "%s:%d db height (%d) invalid\n", | 560 | dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", |
557 | __func__, __LINE__, height); | 561 | __func__, __LINE__, height, height_align, array_mode); |
558 | return -EINVAL; | 562 | return -EINVAL; |
559 | } | 563 | } |
560 | if (!IS_ALIGNED(base_offset, base_align)) { | 564 | if (!IS_ALIGNED(base_offset, base_align)) { |
561 | dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); | 565 | dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, |
566 | base_offset, base_align, array_mode); | ||
562 | return -EINVAL; | 567 | return -EINVAL; |
563 | } | 568 | } |
564 | 569 | ||
@@ -566,9 +571,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
566 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; | 571 | nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; |
567 | tmp = ntiles * bpe * 64 * nviews; | 572 | tmp = ntiles * bpe * 64 * nviews; |
568 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { | 573 | if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { |
569 | dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", | 574 | dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", |
570 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, | 575 | array_mode, |
571 | radeon_bo_size(track->db_bo)); | 576 | track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, |
577 | radeon_bo_size(track->db_bo)); | ||
572 | return -EINVAL; | 578 | return -EINVAL; |
573 | } | 579 | } |
574 | } | 580 | } |
@@ -1350,18 +1356,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1350 | /* XXX check height as well... */ | 1356 | /* XXX check height as well... */ |
1351 | 1357 | ||
1352 | if (!IS_ALIGNED(pitch, pitch_align)) { | 1358 | if (!IS_ALIGNED(pitch, pitch_align)) { |
1353 | dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", | 1359 | dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", |
1354 | __func__, __LINE__, pitch); | 1360 | __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); |
1355 | return -EINVAL; | 1361 | return -EINVAL; |
1356 | } | 1362 | } |
1357 | if (!IS_ALIGNED(base_offset, base_align)) { | 1363 | if (!IS_ALIGNED(base_offset, base_align)) { |
1358 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", | 1364 | dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", |
1359 | __func__, __LINE__, base_offset); | 1365 | __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); |
1360 | return -EINVAL; | 1366 | return -EINVAL; |
1361 | } | 1367 | } |
1362 | if (!IS_ALIGNED(mip_offset, base_align)) { | 1368 | if (!IS_ALIGNED(mip_offset, base_align)) { |
1363 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", | 1369 | dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", |
1364 | __func__, __LINE__, mip_offset); | 1370 | __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); |
1365 | return -EINVAL; | 1371 | return -EINVAL; |
1366 | } | 1372 | } |
1367 | 1373 | ||
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index d1f598663da7..b2b944bcd05a 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -154,13 +154,14 @@ | |||
154 | #define ROQ_IB2_START(x) ((x) << 8) | 154 | #define ROQ_IB2_START(x) ((x) << 8) |
155 | #define CP_RB_BASE 0xC100 | 155 | #define CP_RB_BASE 0xC100 |
156 | #define CP_RB_CNTL 0xC104 | 156 | #define CP_RB_CNTL 0xC104 |
157 | #define RB_BUFSZ(x) ((x)<<0) | 157 | #define RB_BUFSZ(x) ((x) << 0) |
158 | #define RB_BLKSZ(x) ((x)<<8) | 158 | #define RB_BLKSZ(x) ((x) << 8) |
159 | #define RB_NO_UPDATE (1<<27) | 159 | #define RB_NO_UPDATE (1 << 27) |
160 | #define RB_RPTR_WR_ENA (1<<31) | 160 | #define RB_RPTR_WR_ENA (1 << 31) |
161 | #define BUF_SWAP_32BIT (2 << 16) | 161 | #define BUF_SWAP_32BIT (2 << 16) |
162 | #define CP_RB_RPTR 0x8700 | 162 | #define CP_RB_RPTR 0x8700 |
163 | #define CP_RB_RPTR_ADDR 0xC10C | 163 | #define CP_RB_RPTR_ADDR 0xC10C |
164 | #define RB_RPTR_SWAP(x) ((x) << 0) | ||
164 | #define CP_RB_RPTR_ADDR_HI 0xC110 | 165 | #define CP_RB_RPTR_ADDR_HI 0xC110 |
165 | #define CP_RB_RPTR_WR 0xC108 | 166 | #define CP_RB_RPTR_WR 0xC108 |
166 | #define CP_RB_WPTR 0xC114 | 167 | #define CP_RB_WPTR 0xC114 |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5c1cc7ad9a15..02d5c415f499 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
88 | /* some evergreen boards have bad data for this entry */ | 88 | /* some evergreen boards have bad data for this entry */ |
89 | if (ASIC_IS_DCE4(rdev)) { | 89 | if (ASIC_IS_DCE4(rdev)) { |
90 | if ((i == 7) && | 90 | if ((i == 7) && |
91 | (gpio->usClkMaskRegisterIndex == 0x1936) && | 91 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && |
92 | (gpio->sucI2cId.ucAccess == 0)) { | 92 | (gpio->sucI2cId.ucAccess == 0)) { |
93 | gpio->sucI2cId.ucAccess = 0x97; | 93 | gpio->sucI2cId.ucAccess = 0x97; |
94 | gpio->ucDataMaskShift = 8; | 94 | gpio->ucDataMaskShift = 8; |
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
101 | /* some DCE3 boards have bad data for this entry */ | 101 | /* some DCE3 boards have bad data for this entry */ |
102 | if (ASIC_IS_DCE3(rdev)) { | 102 | if (ASIC_IS_DCE3(rdev)) { |
103 | if ((i == 4) && | 103 | if ((i == 4) && |
104 | (gpio->usClkMaskRegisterIndex == 0x1fda) && | 104 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && |
105 | (gpio->sucI2cId.ucAccess == 0x94)) | 105 | (gpio->sucI2cId.ucAccess == 0x94)) |
106 | gpio->sucI2cId.ucAccess = 0x14; | 106 | gpio->sucI2cId.ucAccess = 0x14; |
107 | } | 107 | } |
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
172 | /* some evergreen boards have bad data for this entry */ | 172 | /* some evergreen boards have bad data for this entry */ |
173 | if (ASIC_IS_DCE4(rdev)) { | 173 | if (ASIC_IS_DCE4(rdev)) { |
174 | if ((i == 7) && | 174 | if ((i == 7) && |
175 | (gpio->usClkMaskRegisterIndex == 0x1936) && | 175 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && |
176 | (gpio->sucI2cId.ucAccess == 0)) { | 176 | (gpio->sucI2cId.ucAccess == 0)) { |
177 | gpio->sucI2cId.ucAccess = 0x97; | 177 | gpio->sucI2cId.ucAccess = 0x97; |
178 | gpio->ucDataMaskShift = 8; | 178 | gpio->ucDataMaskShift = 8; |
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
185 | /* some DCE3 boards have bad data for this entry */ | 185 | /* some DCE3 boards have bad data for this entry */ |
186 | if (ASIC_IS_DCE3(rdev)) { | 186 | if (ASIC_IS_DCE3(rdev)) { |
187 | if ((i == 4) && | 187 | if ((i == 4) && |
188 | (gpio->usClkMaskRegisterIndex == 0x1fda) && | 188 | (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && |
189 | (gpio->sucI2cId.ucAccess == 0x94)) | 189 | (gpio->sucI2cId.ucAccess == 0x94)) |
190 | gpio->sucI2cId.ucAccess = 0x14; | 190 | gpio->sucI2cId.ucAccess = 0x14; |
191 | } | 191 | } |
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd | |||
252 | pin = &gpio_info->asGPIO_Pin[i]; | 252 | pin = &gpio_info->asGPIO_Pin[i]; |
253 | if (id == pin->ucGPIO_ID) { | 253 | if (id == pin->ucGPIO_ID) { |
254 | gpio.id = pin->ucGPIO_ID; | 254 | gpio.id = pin->ucGPIO_ID; |
255 | gpio.reg = pin->usGpioPin_AIndex * 4; | 255 | gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; |
256 | gpio.mask = (1 << pin->ucGpioPinBitShift); | 256 | gpio.mask = (1 << pin->ucGpioPinBitShift); |
257 | gpio.valid = true; | 257 | gpio.valid = true; |
258 | break; | 258 | break; |
@@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
1274 | data_offset); | 1274 | data_offset); |
1275 | switch (crev) { | 1275 | switch (crev) { |
1276 | case 1: | 1276 | case 1: |
1277 | if (igp_info->info.ulBootUpMemoryClock) | 1277 | if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) |
1278 | return true; | 1278 | return true; |
1279 | break; | 1279 | break; |
1280 | case 2: | 1280 | case 2: |
1281 | if (igp_info->info_2.ulBootUpSidePortClock) | 1281 | if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) |
1282 | return true; | 1282 | return true; |
1283 | break; | 1283 | break; |
1284 | default: | 1284 | default: |
@@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1442 | 1442 | ||
1443 | for (i = 0; i < num_indices; i++) { | 1443 | for (i = 0; i < num_indices; i++) { |
1444 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && | 1444 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && |
1445 | (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { | 1445 | (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { |
1446 | ss->percentage = | 1446 | ss->percentage = |
1447 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1447 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1448 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1448 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
@@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1456 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); | 1456 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); |
1457 | for (i = 0; i < num_indices; i++) { | 1457 | for (i = 0; i < num_indices; i++) { |
1458 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && | 1458 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && |
1459 | (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { | 1459 | (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { |
1460 | ss->percentage = | 1460 | ss->percentage = |
1461 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1461 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1462 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1462 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
@@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1470 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); | 1470 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); |
1471 | for (i = 0; i < num_indices; i++) { | 1471 | for (i = 0; i < num_indices; i++) { |
1472 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && | 1472 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && |
1473 | (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { | 1473 | (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { |
1474 | ss->percentage = | 1474 | ss->percentage = |
1475 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1475 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); |
1476 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1476 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; |
@@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1553 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | 1553 | if (misc & ATOM_DOUBLE_CLOCK_MODE) |
1554 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | 1554 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; |
1555 | 1555 | ||
1556 | lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; | 1556 | lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); |
1557 | lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; | 1557 | lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); |
1558 | 1558 | ||
1559 | /* set crtc values */ | 1559 | /* set crtc values */ |
1560 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 1560 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
@@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1569 | lvds->linkb = false; | 1569 | lvds->linkb = false; |
1570 | 1570 | ||
1571 | /* parse the lcd record table */ | 1571 | /* parse the lcd record table */ |
1572 | if (lvds_info->info.usModePatchTableOffset) { | 1572 | if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { |
1573 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | 1573 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; |
1574 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | 1574 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; |
1575 | bool bad_record = false; | 1575 | bool bad_record = false; |
1576 | u8 *record = (u8 *)(mode_info->atom_context->bios + | 1576 | u8 *record = (u8 *)(mode_info->atom_context->bios + |
1577 | data_offset + | 1577 | data_offset + |
1578 | lvds_info->info.usModePatchTableOffset); | 1578 | le16_to_cpu(lvds_info->info.usModePatchTableOffset)); |
1579 | while (*record != ATOM_RECORD_END_TYPE) { | 1579 | while (*record != ATOM_RECORD_END_TYPE) { |
1580 | switch (*record) { | 1580 | switch (*record) { |
1581 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | 1581 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: |
@@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) | |||
2189 | firmware_info = | 2189 | firmware_info = |
2190 | (union firmware_info *)(mode_info->atom_context->bios + | 2190 | (union firmware_info *)(mode_info->atom_context->bios + |
2191 | data_offset); | 2191 | data_offset); |
2192 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; | 2192 | vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); |
2193 | } | 2193 | } |
2194 | 2194 | ||
2195 | return vddc; | 2195 | return vddc; |
@@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2284 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2284 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
2285 | VOLTAGE_SW; | 2285 | VOLTAGE_SW; |
2286 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2286 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
2287 | clock_info->evergreen.usVDDC; | 2287 | le16_to_cpu(clock_info->evergreen.usVDDC); |
2288 | } else { | 2288 | } else { |
2289 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); | 2289 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
2290 | sclk |= clock_info->r600.ucEngineClockHigh << 16; | 2290 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
@@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2295 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = | 2295 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = |
2296 | VOLTAGE_SW; | 2296 | VOLTAGE_SW; |
2297 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2297 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
2298 | clock_info->r600.usVDDC; | 2298 | le16_to_cpu(clock_info->r600.usVDDC); |
2299 | } | 2299 | } |
2300 | 2300 | ||
2301 | if (rdev->flags & RADEON_IS_IGP) { | 2301 | if (rdev->flags & RADEON_IS_IGP) { |
@@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) | |||
2408 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | 2408 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); |
2409 | state_array = (struct StateArray *) | 2409 | state_array = (struct StateArray *) |
2410 | (mode_info->atom_context->bios + data_offset + | 2410 | (mode_info->atom_context->bios + data_offset + |
2411 | power_info->pplib.usStateArrayOffset); | 2411 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); |
2412 | clock_info_array = (struct ClockInfoArray *) | 2412 | clock_info_array = (struct ClockInfoArray *) |
2413 | (mode_info->atom_context->bios + data_offset + | 2413 | (mode_info->atom_context->bios + data_offset + |
2414 | power_info->pplib.usClockInfoArrayOffset); | 2414 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); |
2415 | non_clock_info_array = (struct NonClockInfoArray *) | 2415 | non_clock_info_array = (struct NonClockInfoArray *) |
2416 | (mode_info->atom_context->bios + data_offset + | 2416 | (mode_info->atom_context->bios + data_offset + |
2417 | power_info->pplib.usNonClockInfoArrayOffset); | 2417 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); |
2418 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | 2418 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * |
2419 | state_array->ucNumEntries, GFP_KERNEL); | 2419 | state_array->ucNumEntries, GFP_KERNEL); |
2420 | if (!rdev->pm.power_state) | 2420 | if (!rdev->pm.power_state) |
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) | |||
2533 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | 2533 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); |
2534 | 2534 | ||
2535 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2535 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2536 | return args.ulReturnEngineClock; | 2536 | return le32_to_cpu(args.ulReturnEngineClock); |
2537 | } | 2537 | } |
2538 | 2538 | ||
2539 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | 2539 | uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) |
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) | |||
2542 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | 2542 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); |
2543 | 2543 | ||
2544 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2544 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2545 | return args.ulReturnMemoryClock; | 2545 | return le32_to_cpu(args.ulReturnMemoryClock); |
2546 | } | 2546 | } |
2547 | 2547 | ||
2548 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, | 2548 | void radeon_atom_set_engine_clock(struct radeon_device *rdev, |
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev, | |||
2551 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | 2551 | SET_ENGINE_CLOCK_PS_ALLOCATION args; |
2552 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); | 2552 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); |
2553 | 2553 | ||
2554 | args.ulTargetEngineClock = eng_clock; /* 10 khz */ | 2554 | args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ |
2555 | 2555 | ||
2556 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2556 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2557 | } | 2557 | } |
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, | |||
2565 | if (rdev->flags & RADEON_IS_IGP) | 2565 | if (rdev->flags & RADEON_IS_IGP) |
2566 | return; | 2566 | return; |
2567 | 2567 | ||
2568 | args.ulTargetMemoryClock = mem_clock; /* 10 khz */ | 2568 | args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ |
2569 | 2569 | ||
2570 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2570 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
2571 | } | 2571 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index d27ef74590cd..cf7c8d5b4ec2 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1504 | (rdev->pdev->subsystem_device == 0x4a48)) { | 1504 | (rdev->pdev->subsystem_device == 0x4a48)) { |
1505 | /* Mac X800 */ | 1505 | /* Mac X800 */ |
1506 | rdev->mode_info.connector_table = CT_MAC_X800; | 1506 | rdev->mode_info.connector_table = CT_MAC_X800; |
1507 | } else if ((rdev->pdev->device == 0x4150) && | ||
1508 | (rdev->pdev->subsystem_vendor == 0x1002) && | ||
1509 | (rdev->pdev->subsystem_device == 0x4150)) { | ||
1510 | /* Mac G5 9600 */ | ||
1511 | rdev->mode_info.connector_table = CT_MAC_G5_9600; | ||
1507 | } else | 1512 | } else |
1508 | #endif /* CONFIG_PPC_PMAC */ | 1513 | #endif /* CONFIG_PPC_PMAC */ |
1509 | #ifdef CONFIG_PPC64 | 1514 | #ifdef CONFIG_PPC64 |
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
2022 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, | 2027 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, |
2023 | &hpd); | 2028 | &hpd); |
2024 | break; | 2029 | break; |
2030 | case CT_MAC_G5_9600: | ||
2031 | DRM_INFO("Connector Table: %d (mac g5 9600)\n", | ||
2032 | rdev->mode_info.connector_table); | ||
2033 | /* DVI - tv dac, dvo */ | ||
2034 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | ||
2035 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
2036 | radeon_add_legacy_encoder(dev, | ||
2037 | radeon_get_encoder_enum(dev, | ||
2038 | ATOM_DEVICE_DFP2_SUPPORT, | ||
2039 | 0), | ||
2040 | ATOM_DEVICE_DFP2_SUPPORT); | ||
2041 | radeon_add_legacy_encoder(dev, | ||
2042 | radeon_get_encoder_enum(dev, | ||
2043 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2044 | 2), | ||
2045 | ATOM_DEVICE_CRT2_SUPPORT); | ||
2046 | radeon_add_legacy_connector(dev, 0, | ||
2047 | ATOM_DEVICE_DFP2_SUPPORT | | ||
2048 | ATOM_DEVICE_CRT2_SUPPORT, | ||
2049 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2050 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
2051 | &hpd); | ||
2052 | /* ADC - primary dac, internal tmds */ | ||
2053 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | ||
2054 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
2055 | radeon_add_legacy_encoder(dev, | ||
2056 | radeon_get_encoder_enum(dev, | ||
2057 | ATOM_DEVICE_DFP1_SUPPORT, | ||
2058 | 0), | ||
2059 | ATOM_DEVICE_DFP1_SUPPORT); | ||
2060 | radeon_add_legacy_encoder(dev, | ||
2061 | radeon_get_encoder_enum(dev, | ||
2062 | ATOM_DEVICE_CRT1_SUPPORT, | ||
2063 | 1), | ||
2064 | ATOM_DEVICE_CRT1_SUPPORT); | ||
2065 | radeon_add_legacy_connector(dev, 1, | ||
2066 | ATOM_DEVICE_DFP1_SUPPORT | | ||
2067 | ATOM_DEVICE_CRT1_SUPPORT, | ||
2068 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | ||
2069 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, | ||
2070 | &hpd); | ||
2071 | break; | ||
2025 | default: | 2072 | default: |
2026 | DRM_INFO("Connector table: %d (invalid)\n", | 2073 | DRM_INFO("Connector table: %d (invalid)\n", |
2027 | rdev->mode_info.connector_table); | 2074 | rdev->mode_info.connector_table); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ca5eb217929..f0209be7a34b 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -937,8 +937,11 @@ int radeon_resume_kms(struct drm_device *dev) | |||
937 | int radeon_gpu_reset(struct radeon_device *rdev) | 937 | int radeon_gpu_reset(struct radeon_device *rdev) |
938 | { | 938 | { |
939 | int r; | 939 | int r; |
940 | int resched; | ||
940 | 941 | ||
941 | radeon_save_bios_scratch_regs(rdev); | 942 | radeon_save_bios_scratch_regs(rdev); |
943 | /* block TTM */ | ||
944 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | ||
942 | radeon_suspend(rdev); | 945 | radeon_suspend(rdev); |
943 | 946 | ||
944 | r = radeon_asic_reset(rdev); | 947 | r = radeon_asic_reset(rdev); |
@@ -947,6 +950,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
947 | radeon_resume(rdev); | 950 | radeon_resume(rdev); |
948 | radeon_restore_bios_scratch_regs(rdev); | 951 | radeon_restore_bios_scratch_regs(rdev); |
949 | drm_helper_resume_force_mode(rdev->ddev); | 952 | drm_helper_resume_force_mode(rdev->ddev); |
953 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | ||
950 | return 0; | 954 | return 0; |
951 | } | 955 | } |
952 | /* bad news, how to tell it to userspace ? */ | 956 | /* bad news, how to tell it to userspace ? */ |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 4409975a363c..4be58793dc17 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -793,6 +793,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll, | |||
793 | tmp *= target_clock; | 793 | tmp *= target_clock; |
794 | *fb_div = tmp / pll->reference_freq; | 794 | *fb_div = tmp / pll->reference_freq; |
795 | *frac_fb_div = tmp % pll->reference_freq; | 795 | *frac_fb_div = tmp % pll->reference_freq; |
796 | |||
797 | if (*fb_div > pll->max_feedback_div) | ||
798 | *fb_div = pll->max_feedback_div; | ||
799 | else if (*fb_div < pll->min_feedback_div) | ||
800 | *fb_div = pll->min_feedback_div; | ||
796 | } | 801 | } |
797 | 802 | ||
798 | static u32 avivo_get_post_div(struct radeon_pll *pll, | 803 | static u32 avivo_get_post_div(struct radeon_pll *pll, |
@@ -826,6 +831,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll, | |||
826 | post_div--; | 831 | post_div--; |
827 | } | 832 | } |
828 | 833 | ||
834 | if (post_div > pll->max_post_div) | ||
835 | post_div = pll->max_post_div; | ||
836 | else if (post_div < pll->min_post_div) | ||
837 | post_div = pll->min_post_div; | ||
838 | |||
829 | return post_div; | 839 | return post_div; |
830 | } | 840 | } |
831 | 841 | ||
@@ -961,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
961 | max_fractional_feed_div = pll->max_frac_feedback_div; | 971 | max_fractional_feed_div = pll->max_frac_feedback_div; |
962 | } | 972 | } |
963 | 973 | ||
964 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { | 974 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
965 | uint32_t ref_div; | 975 | uint32_t ref_div; |
966 | 976 | ||
967 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 977 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 448eba89d1e6..5cba46b9779a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); | |||
1524 | #define R600_CP_RB_CNTL 0xc104 | 1524 | #define R600_CP_RB_CNTL 0xc104 |
1525 | # define R600_RB_BUFSZ(x) ((x) << 0) | 1525 | # define R600_RB_BUFSZ(x) ((x) << 0) |
1526 | # define R600_RB_BLKSZ(x) ((x) << 8) | 1526 | # define R600_RB_BLKSZ(x) ((x) << 8) |
1527 | # define R600_BUF_SWAP_32BIT (2 << 16) | ||
1527 | # define R600_RB_NO_UPDATE (1 << 27) | 1528 | # define R600_RB_NO_UPDATE (1 << 27) |
1528 | # define R600_RB_RPTR_WR_ENA (1 << 31) | 1529 | # define R600_RB_RPTR_WR_ENA (1 << 31) |
1529 | #define R600_CP_RB_RPTR_WR 0xc108 | 1530 | #define R600_CP_RB_RPTR_WR 0xc108 |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index d4a542247618..b4274883227f 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
910 | 910 | ||
911 | args.v1.ucAction = action; | 911 | args.v1.ucAction = action; |
912 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 912 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
913 | args.v1.usInitInfo = connector_object_id; | 913 | args.v1.usInitInfo = cpu_to_le16(connector_object_id); |
914 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | 914 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { |
915 | args.v1.asMode.ucLaneSel = lane_num; | 915 | args.v1.asMode.ucLaneSel = lane_num; |
916 | args.v1.asMode.ucLaneSet = lane_set; | 916 | args.v1.asMode.ucLaneSet = lane_set; |
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, | |||
1140 | case 3: | 1140 | case 3: |
1141 | args.v3.sExtEncoder.ucAction = action; | 1141 | args.v3.sExtEncoder.ucAction = action; |
1142 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | 1142 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) |
1143 | args.v3.sExtEncoder.usConnectorId = connector_object_id; | 1143 | args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); |
1144 | else | 1144 | else |
1145 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 1145 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
1146 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | 1146 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); |
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, | |||
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | /* set scaler clears this on some chips */ | 1572 | /* set scaler clears this on some chips */ |
1573 | /* XXX check DCE4 */ | 1573 | if (ASIC_IS_AVIVO(rdev) && |
1574 | if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { | 1574 | (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { |
1575 | if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 1575 | if (ASIC_IS_DCE4(rdev)) { |
1576 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | 1576 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
1577 | AVIVO_D1MODE_INTERLEAVE_EN); | 1577 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, |
1578 | EVERGREEN_INTERLEAVE_EN); | ||
1579 | else | ||
1580 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1581 | } else { | ||
1582 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1583 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1584 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1585 | else | ||
1586 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1587 | } | ||
1578 | } | 1588 | } |
1579 | } | 1589 | } |
1580 | 1590 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 28431e78ab56..0b7b486c97e8 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
113 | u32 tiling_flags = 0; | 113 | u32 tiling_flags = 0; |
114 | int ret; | 114 | int ret; |
115 | int aligned_size, size; | 115 | int aligned_size, size; |
116 | int height = mode_cmd->height; | ||
116 | 117 | ||
117 | /* need to align pitch with crtc limits */ | 118 | /* need to align pitch with crtc limits */ |
118 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); | 119 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); |
119 | 120 | ||
120 | size = mode_cmd->pitch * mode_cmd->height; | 121 | if (rdev->family >= CHIP_R600) |
122 | height = ALIGN(mode_cmd->height, 8); | ||
123 | size = mode_cmd->pitch * height; | ||
121 | aligned_size = ALIGN(size, PAGE_SIZE); | 124 | aligned_size = ALIGN(size, PAGE_SIZE); |
122 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 125 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
123 | RADEON_GEM_DOMAIN_VRAM, | 126 | RADEON_GEM_DOMAIN_VRAM, |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index c3f23f6ff60e..5067d18d0009 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -209,6 +209,7 @@ enum radeon_connector_table { | |||
209 | CT_EMAC, | 209 | CT_EMAC, |
210 | CT_RN50_POWER, | 210 | CT_RN50_POWER, |
211 | CT_MAC_X800, | 211 | CT_MAC_X800, |
212 | CT_MAC_G5_9600, | ||
212 | }; | 213 | }; |
213 | 214 | ||
214 | enum radeon_dvo_chip { | 215 | enum radeon_dvo_chip { |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index df5734d0c4af..e446979e0e0a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -791,9 +791,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |||
791 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | 791 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; |
792 | radeon_mem_types_list[i].driver_features = 0; | 792 | radeon_mem_types_list[i].driver_features = 0; |
793 | if (i == 0) | 793 | if (i == 0) |
794 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; | 794 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
795 | else | 795 | else |
796 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; | 796 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; |
797 | 797 | ||
798 | } | 798 | } |
799 | /* Add ttm page pool to debugfs */ | 799 | /* Add ttm page pool to debugfs */ |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index b506ec1cab4b..e8a1786b6426 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 | |||
@@ -683,9 +683,7 @@ r300 0x4f60 | |||
683 | 0x4DF4 US_ALU_CONST_G_31 | 683 | 0x4DF4 US_ALU_CONST_G_31 |
684 | 0x4DF8 US_ALU_CONST_B_31 | 684 | 0x4DF8 US_ALU_CONST_B_31 |
685 | 0x4DFC US_ALU_CONST_A_31 | 685 | 0x4DFC US_ALU_CONST_A_31 |
686 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
687 | 0x4E08 RB3D_ABLENDCNTL_R3 | 686 | 0x4E08 RB3D_ABLENDCNTL_R3 |
688 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
689 | 0x4E10 RB3D_CONSTANT_COLOR | 687 | 0x4E10 RB3D_CONSTANT_COLOR |
690 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 688 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
691 | 0x4E18 RB3D_ROPCNTL_R3 | 689 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -706,13 +704,11 @@ r300 0x4f60 | |||
706 | 0x4E74 RB3D_CMASK_WRINDEX | 704 | 0x4E74 RB3D_CMASK_WRINDEX |
707 | 0x4E78 RB3D_CMASK_DWORD | 705 | 0x4E78 RB3D_CMASK_DWORD |
708 | 0x4E7C RB3D_CMASK_RDINDEX | 706 | 0x4E7C RB3D_CMASK_RDINDEX |
709 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
710 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
711 | 0x4E88 RB3D_AARESOLVE_CTL | ||
712 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 707 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
713 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 708 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
714 | 0x4F04 ZB_ZSTENCILCNTL | 709 | 0x4F04 ZB_ZSTENCILCNTL |
715 | 0x4F08 ZB_STENCILREFMASK | 710 | 0x4F08 ZB_STENCILREFMASK |
716 | 0x4F14 ZB_ZTOP | 711 | 0x4F14 ZB_ZTOP |
717 | 0x4F18 ZB_ZCACHE_CTLSTAT | 712 | 0x4F18 ZB_ZCACHE_CTLSTAT |
713 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
718 | 0x4F58 ZB_ZPASS_DATA | 714 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 8c1214c2390f..722074e21e2f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 | |||
@@ -130,7 +130,6 @@ r420 0x4f60 | |||
130 | 0x401C GB_SELECT | 130 | 0x401C GB_SELECT |
131 | 0x4020 GB_AA_CONFIG | 131 | 0x4020 GB_AA_CONFIG |
132 | 0x4024 GB_FIFO_SIZE | 132 | 0x4024 GB_FIFO_SIZE |
133 | 0x4028 GB_Z_PEQ_CONFIG | ||
134 | 0x4100 TX_INVALTAGS | 133 | 0x4100 TX_INVALTAGS |
135 | 0x4200 GA_POINT_S0 | 134 | 0x4200 GA_POINT_S0 |
136 | 0x4204 GA_POINT_T0 | 135 | 0x4204 GA_POINT_T0 |
@@ -750,9 +749,7 @@ r420 0x4f60 | |||
750 | 0x4DF4 US_ALU_CONST_G_31 | 749 | 0x4DF4 US_ALU_CONST_G_31 |
751 | 0x4DF8 US_ALU_CONST_B_31 | 750 | 0x4DF8 US_ALU_CONST_B_31 |
752 | 0x4DFC US_ALU_CONST_A_31 | 751 | 0x4DFC US_ALU_CONST_A_31 |
753 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
754 | 0x4E08 RB3D_ABLENDCNTL_R3 | 752 | 0x4E08 RB3D_ABLENDCNTL_R3 |
755 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
756 | 0x4E10 RB3D_CONSTANT_COLOR | 753 | 0x4E10 RB3D_CONSTANT_COLOR |
757 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 754 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
758 | 0x4E18 RB3D_ROPCNTL_R3 | 755 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -773,13 +770,11 @@ r420 0x4f60 | |||
773 | 0x4E74 RB3D_CMASK_WRINDEX | 770 | 0x4E74 RB3D_CMASK_WRINDEX |
774 | 0x4E78 RB3D_CMASK_DWORD | 771 | 0x4E78 RB3D_CMASK_DWORD |
775 | 0x4E7C RB3D_CMASK_RDINDEX | 772 | 0x4E7C RB3D_CMASK_RDINDEX |
776 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
777 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
778 | 0x4E88 RB3D_AARESOLVE_CTL | ||
779 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 773 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
780 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 774 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
781 | 0x4F04 ZB_ZSTENCILCNTL | 775 | 0x4F04 ZB_ZSTENCILCNTL |
782 | 0x4F08 ZB_STENCILREFMASK | 776 | 0x4F08 ZB_STENCILREFMASK |
783 | 0x4F14 ZB_ZTOP | 777 | 0x4F14 ZB_ZTOP |
784 | 0x4F18 ZB_ZCACHE_CTLSTAT | 778 | 0x4F18 ZB_ZCACHE_CTLSTAT |
779 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
785 | 0x4F58 ZB_ZPASS_DATA | 780 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 0828d80396f2..d9f62866bbc1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 | |||
@@ -749,9 +749,7 @@ rs600 0x6d40 | |||
749 | 0x4DF4 US_ALU_CONST_G_31 | 749 | 0x4DF4 US_ALU_CONST_G_31 |
750 | 0x4DF8 US_ALU_CONST_B_31 | 750 | 0x4DF8 US_ALU_CONST_B_31 |
751 | 0x4DFC US_ALU_CONST_A_31 | 751 | 0x4DFC US_ALU_CONST_A_31 |
752 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
753 | 0x4E08 RB3D_ABLENDCNTL_R3 | 752 | 0x4E08 RB3D_ABLENDCNTL_R3 |
754 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
755 | 0x4E10 RB3D_CONSTANT_COLOR | 753 | 0x4E10 RB3D_CONSTANT_COLOR |
756 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 754 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
757 | 0x4E18 RB3D_ROPCNTL_R3 | 755 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -772,13 +770,11 @@ rs600 0x6d40 | |||
772 | 0x4E74 RB3D_CMASK_WRINDEX | 770 | 0x4E74 RB3D_CMASK_WRINDEX |
773 | 0x4E78 RB3D_CMASK_DWORD | 771 | 0x4E78 RB3D_CMASK_DWORD |
774 | 0x4E7C RB3D_CMASK_RDINDEX | 772 | 0x4E7C RB3D_CMASK_RDINDEX |
775 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
776 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
777 | 0x4E88 RB3D_AARESOLVE_CTL | ||
778 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 773 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
779 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 774 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
780 | 0x4F04 ZB_ZSTENCILCNTL | 775 | 0x4F04 ZB_ZSTENCILCNTL |
781 | 0x4F08 ZB_STENCILREFMASK | 776 | 0x4F08 ZB_STENCILREFMASK |
782 | 0x4F14 ZB_ZTOP | 777 | 0x4F14 ZB_ZTOP |
783 | 0x4F18 ZB_ZCACHE_CTLSTAT | 778 | 0x4F18 ZB_ZCACHE_CTLSTAT |
779 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
784 | 0x4F58 ZB_ZPASS_DATA | 780 | 0x4F58 ZB_ZPASS_DATA |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index ef422bbacfc1..911a8fbd32bb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -164,7 +164,6 @@ rv515 0x6d40 | |||
164 | 0x401C GB_SELECT | 164 | 0x401C GB_SELECT |
165 | 0x4020 GB_AA_CONFIG | 165 | 0x4020 GB_AA_CONFIG |
166 | 0x4024 GB_FIFO_SIZE | 166 | 0x4024 GB_FIFO_SIZE |
167 | 0x4028 GB_Z_PEQ_CONFIG | ||
168 | 0x4100 TX_INVALTAGS | 167 | 0x4100 TX_INVALTAGS |
169 | 0x4114 SU_TEX_WRAP_PS3 | 168 | 0x4114 SU_TEX_WRAP_PS3 |
170 | 0x4118 PS3_ENABLE | 169 | 0x4118 PS3_ENABLE |
@@ -461,9 +460,7 @@ rv515 0x6d40 | |||
461 | 0x4DF4 US_ALU_CONST_G_31 | 460 | 0x4DF4 US_ALU_CONST_G_31 |
462 | 0x4DF8 US_ALU_CONST_B_31 | 461 | 0x4DF8 US_ALU_CONST_B_31 |
463 | 0x4DFC US_ALU_CONST_A_31 | 462 | 0x4DFC US_ALU_CONST_A_31 |
464 | 0x4E04 RB3D_BLENDCNTL_R3 | ||
465 | 0x4E08 RB3D_ABLENDCNTL_R3 | 463 | 0x4E08 RB3D_ABLENDCNTL_R3 |
466 | 0x4E0C RB3D_COLOR_CHANNEL_MASK | ||
467 | 0x4E10 RB3D_CONSTANT_COLOR | 464 | 0x4E10 RB3D_CONSTANT_COLOR |
468 | 0x4E14 RB3D_COLOR_CLEAR_VALUE | 465 | 0x4E14 RB3D_COLOR_CLEAR_VALUE |
469 | 0x4E18 RB3D_ROPCNTL_R3 | 466 | 0x4E18 RB3D_ROPCNTL_R3 |
@@ -484,9 +481,6 @@ rv515 0x6d40 | |||
484 | 0x4E74 RB3D_CMASK_WRINDEX | 481 | 0x4E74 RB3D_CMASK_WRINDEX |
485 | 0x4E78 RB3D_CMASK_DWORD | 482 | 0x4E78 RB3D_CMASK_DWORD |
486 | 0x4E7C RB3D_CMASK_RDINDEX | 483 | 0x4E7C RB3D_CMASK_RDINDEX |
487 | 0x4E80 RB3D_AARESOLVE_OFFSET | ||
488 | 0x4E84 RB3D_AARESOLVE_PITCH | ||
489 | 0x4E88 RB3D_AARESOLVE_CTL | ||
490 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD | 484 | 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD |
491 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD | 485 | 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD |
492 | 0x4EF8 RB3D_CONSTANT_COLOR_AR | 486 | 0x4EF8 RB3D_CONSTANT_COLOR_AR |
@@ -496,4 +490,5 @@ rv515 0x6d40 | |||
496 | 0x4F14 ZB_ZTOP | 490 | 0x4F14 ZB_ZTOP |
497 | 0x4F18 ZB_ZCACHE_CTLSTAT | 491 | 0x4F18 ZB_ZCACHE_CTLSTAT |
498 | 0x4F58 ZB_ZPASS_DATA | 492 | 0x4F58 ZB_ZPASS_DATA |
493 | 0x4F28 ZB_DEPTHCLEARVALUE | ||
499 | 0x4FD4 ZB_STENCILREFMASK_BF | 494 | 0x4FD4 ZB_STENCILREFMASK_BF |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 0137d3e3728d..6638c8e4c81b 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
77 | switch (crev) { | 77 | switch (crev) { |
78 | case 1: | 78 | case 1: |
79 | tmp.full = dfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); |
81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | if (info->info.usK8MemoryClock) | 82 | if (le16_to_cpu(info->info.usK8MemoryClock)) |
83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); |
84 | else if (rdev->clock.default_mclk) { | 84 | else if (rdev->clock.default_mclk) { |
85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
91 | break; | 91 | break; |
92 | case 2: | 92 | case 2: |
93 | tmp.full = dfixed_const(100); | 93 | tmp.full = dfixed_const(100); |
94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); |
95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
96 | if (info->info_v2.ulBootUpUMAClock) | 96 | if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) |
97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 97 | rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); |
98 | else if (rdev->clock.default_mclk) | 98 | else if (rdev->clock.default_mclk) |
99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | 99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); |
100 | else | 100 | else |
101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | 101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); |
102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); |
104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
106 | break; | 106 | break; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 3a95999d2fef..ee5541c6a623 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -321,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) | |||
321 | return -EINVAL; | 321 | return -EINVAL; |
322 | 322 | ||
323 | r700_cp_stop(rdev); | 323 | r700_cp_stop(rdev); |
324 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | 324 | WREG32(CP_RB_CNTL, |
325 | #ifdef __BIG_ENDIAN | ||
326 | BUF_SWAP_32BIT | | ||
327 | #endif | ||
328 | RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); | ||
325 | 329 | ||
326 | /* Reset cp */ | 330 | /* Reset cp */ |
327 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); | 331 | WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index abc8cf5a3672..79fa588e9ed5 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -76,10 +76,10 @@ | |||
76 | #define ROQ_IB1_START(x) ((x) << 0) | 76 | #define ROQ_IB1_START(x) ((x) << 0) |
77 | #define ROQ_IB2_START(x) ((x) << 8) | 77 | #define ROQ_IB2_START(x) ((x) << 8) |
78 | #define CP_RB_CNTL 0xC104 | 78 | #define CP_RB_CNTL 0xC104 |
79 | #define RB_BUFSZ(x) ((x)<<0) | 79 | #define RB_BUFSZ(x) ((x) << 0) |
80 | #define RB_BLKSZ(x) ((x)<<8) | 80 | #define RB_BLKSZ(x) ((x) << 8) |
81 | #define RB_NO_UPDATE (1<<27) | 81 | #define RB_NO_UPDATE (1 << 27) |
82 | #define RB_RPTR_WR_ENA (1<<31) | 82 | #define RB_RPTR_WR_ENA (1 << 31) |
83 | #define BUF_SWAP_32BIT (2 << 16) | 83 | #define BUF_SWAP_32BIT (2 << 16) |
84 | #define CP_RB_RPTR 0x8700 | 84 | #define CP_RB_RPTR 0x8700 |
85 | #define CP_RB_RPTR_ADDR 0xC10C | 85 | #define CP_RB_RPTR_ADDR 0xC10C |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 773e484f1646..297bc9a7d6e6 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP | |||
238 | will be called k8temp. | 238 | will be called k8temp. |
239 | 239 | ||
240 | config SENSORS_K10TEMP | 240 | config SENSORS_K10TEMP |
241 | tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" | 241 | tristate "AMD Family 10h/11h/12h/14h temperature sensor" |
242 | depends on X86 && PCI | 242 | depends on X86 && PCI |
243 | help | 243 | help |
244 | If you say yes here you get support for the temperature | 244 | If you say yes here you get support for the temperature |
245 | sensor(s) inside your CPU. Supported are later revisions of | 245 | sensor(s) inside your CPU. Supported are later revisions of |
246 | the AMD Family 10h and all revisions of the AMD Family 11h | 246 | the AMD Family 10h and all revisions of the AMD Family 11h, |
247 | microarchitectures. | 247 | 12h (Llano), and 14h (Brazos) microarchitectures. |
248 | 248 | ||
249 | This driver can also be built as a module. If so, the module | 249 | This driver can also be built as a module. If so, the module |
250 | will be called k10temp. | 250 | will be called k10temp. |
@@ -455,13 +455,14 @@ config SENSORS_JZ4740 | |||
455 | called jz4740-hwmon. | 455 | called jz4740-hwmon. |
456 | 456 | ||
457 | config SENSORS_JC42 | 457 | config SENSORS_JC42 |
458 | tristate "JEDEC JC42.4 compliant temperature sensors" | 458 | tristate "JEDEC JC42.4 compliant memory module temperature sensors" |
459 | depends on I2C | 459 | depends on I2C |
460 | help | 460 | help |
461 | If you say yes here you get support for Jedec JC42.4 compliant | 461 | If you say yes here, you get support for JEDEC JC42.4 compliant |
462 | temperature sensors. Support will include, but not be limited to, | 462 | temperature sensors, which are used on many DDR3 memory modules for |
463 | ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, | 463 | mobile devices and servers. Support will include, but not be limited |
464 | MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. | 464 | to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, |
465 | MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3. | ||
465 | 466 | ||
466 | This driver can also be built as a module. If so, the module | 467 | This driver can also be built as a module. If so, the module |
467 | will be called jc42. | 468 | will be called jc42. |
@@ -574,7 +575,7 @@ config SENSORS_LM85 | |||
574 | help | 575 | help |
575 | If you say yes here you get support for National Semiconductor LM85 | 576 | If you say yes here you get support for National Semiconductor LM85 |
576 | sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, | 577 | sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, |
577 | EMC6D101 and EMC6D102. | 578 | EMC6D101, EMC6D102, and EMC6D103. |
578 | 579 | ||
579 | This driver can also be built as a module. If so, the module | 580 | This driver can also be built as a module. If so, the module |
580 | will be called lm85. | 581 | will be called lm85. |
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c index 86d822aa9bbf..d46c0c758ddf 100644 --- a/drivers/hwmon/ad7414.c +++ b/drivers/hwmon/ad7414.c | |||
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = { | |||
242 | { "ad7414", 0 }, | 242 | { "ad7414", 0 }, |
243 | {} | 243 | {} |
244 | }; | 244 | }; |
245 | MODULE_DEVICE_TABLE(i2c, ad7414_id); | ||
245 | 246 | ||
246 | static struct i2c_driver ad7414_driver = { | 247 | static struct i2c_driver ad7414_driver = { |
247 | .driver = { | 248 | .driver = { |
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index f13c843a2964..5cc3e3784b42 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c | |||
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = { | |||
334 | { "adt7411", 0 }, | 334 | { "adt7411", 0 }, |
335 | { } | 335 | { } |
336 | }; | 336 | }; |
337 | MODULE_DEVICE_TABLE(i2c, adt7411_id); | ||
337 | 338 | ||
338 | static struct i2c_driver adt7411_driver = { | 339 | static struct i2c_driver adt7411_driver = { |
339 | .driver = { | 340 | .driver = { |
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 5dea9faa1656..cd2a6e437aec 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c | |||
@@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | static const unsigned short emc1403_address_list[] = { | 346 | static const unsigned short emc1403_address_list[] = { |
347 | 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END | 347 | 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END |
348 | }; | 348 | }; |
349 | 349 | ||
350 | static const struct i2c_device_id emc1403_idtable[] = { | 350 | static const struct i2c_device_id emc1403_idtable[] = { |
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 340fc78c8dde..934991237061 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c | |||
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = { | |||
53 | 53 | ||
54 | /* Configuration register defines */ | 54 | /* Configuration register defines */ |
55 | #define JC42_CFG_CRIT_ONLY (1 << 2) | 55 | #define JC42_CFG_CRIT_ONLY (1 << 2) |
56 | #define JC42_CFG_TCRIT_LOCK (1 << 6) | ||
57 | #define JC42_CFG_EVENT_LOCK (1 << 7) | ||
56 | #define JC42_CFG_SHUTDOWN (1 << 8) | 58 | #define JC42_CFG_SHUTDOWN (1 << 8) |
57 | #define JC42_CFG_HYST_SHIFT 9 | 59 | #define JC42_CFG_HYST_SHIFT 9 |
58 | #define JC42_CFG_HYST_MASK 0x03 | 60 | #define JC42_CFG_HYST_MASK 0x03 |
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev, | |||
332 | { | 334 | { |
333 | struct i2c_client *client = to_i2c_client(dev); | 335 | struct i2c_client *client = to_i2c_client(dev); |
334 | struct jc42_data *data = i2c_get_clientdata(client); | 336 | struct jc42_data *data = i2c_get_clientdata(client); |
335 | long val; | 337 | unsigned long val; |
336 | int diff, hyst; | 338 | int diff, hyst; |
337 | int err; | 339 | int err; |
338 | int ret = count; | 340 | int ret = count; |
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev, | |||
380 | 382 | ||
381 | static DEVICE_ATTR(temp1_input, S_IRUGO, | 383 | static DEVICE_ATTR(temp1_input, S_IRUGO, |
382 | show_temp_input, NULL); | 384 | show_temp_input, NULL); |
383 | static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, | 385 | static DEVICE_ATTR(temp1_crit, S_IRUGO, |
384 | show_temp_crit, set_temp_crit); | 386 | show_temp_crit, set_temp_crit); |
385 | static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, | 387 | static DEVICE_ATTR(temp1_min, S_IRUGO, |
386 | show_temp_min, set_temp_min); | 388 | show_temp_min, set_temp_min); |
387 | static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, | 389 | static DEVICE_ATTR(temp1_max, S_IRUGO, |
388 | show_temp_max, set_temp_max); | 390 | show_temp_max, set_temp_max); |
389 | 391 | ||
390 | static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, | 392 | static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, |
391 | show_temp_crit_hyst, set_temp_crit_hyst); | 393 | show_temp_crit_hyst, set_temp_crit_hyst); |
392 | static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, | 394 | static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, |
393 | show_temp_max_hyst, NULL); | 395 | show_temp_max_hyst, NULL); |
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = { | |||
412 | NULL | 414 | NULL |
413 | }; | 415 | }; |
414 | 416 | ||
417 | static mode_t jc42_attribute_mode(struct kobject *kobj, | ||
418 | struct attribute *attr, int index) | ||
419 | { | ||
420 | struct device *dev = container_of(kobj, struct device, kobj); | ||
421 | struct i2c_client *client = to_i2c_client(dev); | ||
422 | struct jc42_data *data = i2c_get_clientdata(client); | ||
423 | unsigned int config = data->config; | ||
424 | bool readonly; | ||
425 | |||
426 | if (attr == &dev_attr_temp1_crit.attr) | ||
427 | readonly = config & JC42_CFG_TCRIT_LOCK; | ||
428 | else if (attr == &dev_attr_temp1_min.attr || | ||
429 | attr == &dev_attr_temp1_max.attr) | ||
430 | readonly = config & JC42_CFG_EVENT_LOCK; | ||
431 | else if (attr == &dev_attr_temp1_crit_hyst.attr) | ||
432 | readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK); | ||
433 | else | ||
434 | readonly = true; | ||
435 | |||
436 | return S_IRUGO | (readonly ? 0 : S_IWUSR); | ||
437 | } | ||
438 | |||
415 | static const struct attribute_group jc42_group = { | 439 | static const struct attribute_group jc42_group = { |
416 | .attrs = jc42_attributes, | 440 | .attrs = jc42_attributes, |
441 | .is_visible = jc42_attribute_mode, | ||
417 | }; | 442 | }; |
418 | 443 | ||
419 | /* Return 0 if detection is successful, -ENODEV otherwise */ | 444 | /* Return 0 if detection is successful, -ENODEV otherwise */ |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index da5a2404cd3e..82bf65aa2968 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * k10temp.c - AMD Family 10h/11h processor hardware monitoring | 2 | * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> | 4 | * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> |
5 | * | 5 | * |
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
27 | 27 | ||
28 | MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); | 28 | MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); |
29 | MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); | 29 | MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); |
30 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
31 | 31 | ||
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) | |||
208 | static const struct pci_device_id k10temp_id_table[] = { | 208 | static const struct pci_device_id k10temp_id_table[] = { |
209 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 209 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
210 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, | 210 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, |
211 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, | ||
211 | {} | 212 | {} |
212 | }; | 213 | }; |
213 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); | 214 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); |
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 776aeb3019d2..508cb291f71b 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c | |||
@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; | |||
98 | * value, it uses signed 8-bit values with LSB = 1 degree Celsius. | 98 | * value, it uses signed 8-bit values with LSB = 1 degree Celsius. |
99 | * For remote temperature, low and high limits, it uses signed 11-bit values | 99 | * For remote temperature, low and high limits, it uses signed 11-bit values |
100 | * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers. | 100 | * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers. |
101 | * For LM64 the actual remote diode temperature is 16 degree Celsius higher | ||
102 | * than the register reading. Remote temperature setpoints have to be | ||
103 | * adapted accordingly. | ||
101 | */ | 104 | */ |
102 | 105 | ||
103 | #define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \ | 106 | #define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \ |
@@ -165,6 +168,8 @@ struct lm63_data { | |||
165 | struct mutex update_lock; | 168 | struct mutex update_lock; |
166 | char valid; /* zero until following fields are valid */ | 169 | char valid; /* zero until following fields are valid */ |
167 | unsigned long last_updated; /* in jiffies */ | 170 | unsigned long last_updated; /* in jiffies */ |
171 | int kind; | ||
172 | int temp2_offset; | ||
168 | 173 | ||
169 | /* registers values */ | 174 | /* registers values */ |
170 | u8 config, config_fan; | 175 | u8 config, config_fan; |
@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum | |||
247 | return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2); | 252 | return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2); |
248 | } | 253 | } |
249 | 254 | ||
250 | static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, | 255 | /* |
251 | char *buf) | 256 | * There are 8bit registers for both local(temp1) and remote(temp2) sensor. |
257 | * For remote sensor registers temp2_offset has to be considered, | ||
258 | * for local sensor it must not. | ||
259 | * So we need separate 8bit accessors for local and remote sensor. | ||
260 | */ | ||
261 | static ssize_t show_local_temp8(struct device *dev, | ||
262 | struct device_attribute *devattr, | ||
263 | char *buf) | ||
252 | { | 264 | { |
253 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 265 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
254 | struct lm63_data *data = lm63_update_device(dev); | 266 | struct lm63_data *data = lm63_update_device(dev); |
255 | return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])); | 267 | return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])); |
256 | } | 268 | } |
257 | 269 | ||
258 | static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy, | 270 | static ssize_t show_remote_temp8(struct device *dev, |
259 | const char *buf, size_t count) | 271 | struct device_attribute *devattr, |
272 | char *buf) | ||
273 | { | ||
274 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
275 | struct lm63_data *data = lm63_update_device(dev); | ||
276 | return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]) | ||
277 | + data->temp2_offset); | ||
278 | } | ||
279 | |||
280 | static ssize_t set_local_temp8(struct device *dev, | ||
281 | struct device_attribute *dummy, | ||
282 | const char *buf, size_t count) | ||
260 | { | 283 | { |
261 | struct i2c_client *client = to_i2c_client(dev); | 284 | struct i2c_client *client = to_i2c_client(dev); |
262 | struct lm63_data *data = i2c_get_clientdata(client); | 285 | struct lm63_data *data = i2c_get_clientdata(client); |
@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr, | |||
274 | { | 297 | { |
275 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 298 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
276 | struct lm63_data *data = lm63_update_device(dev); | 299 | struct lm63_data *data = lm63_update_device(dev); |
277 | return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])); | 300 | return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]) |
301 | + data->temp2_offset); | ||
278 | } | 302 | } |
279 | 303 | ||
280 | static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, | 304 | static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, |
@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, | |||
294 | int nr = attr->index; | 318 | int nr = attr->index; |
295 | 319 | ||
296 | mutex_lock(&data->update_lock); | 320 | mutex_lock(&data->update_lock); |
297 | data->temp11[nr] = TEMP11_TO_REG(val); | 321 | data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset); |
298 | i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], | 322 | i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], |
299 | data->temp11[nr] >> 8); | 323 | data->temp11[nr] >> 8); |
300 | i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], | 324 | i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], |
@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute | |||
310 | { | 334 | { |
311 | struct lm63_data *data = lm63_update_device(dev); | 335 | struct lm63_data *data = lm63_update_device(dev); |
312 | return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2]) | 336 | return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2]) |
337 | + data->temp2_offset | ||
313 | - TEMP8_FROM_REG(data->temp2_crit_hyst)); | 338 | - TEMP8_FROM_REG(data->temp2_crit_hyst)); |
314 | } | 339 | } |
315 | 340 | ||
@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute * | |||
324 | long hyst; | 349 | long hyst; |
325 | 350 | ||
326 | mutex_lock(&data->update_lock); | 351 | mutex_lock(&data->update_lock); |
327 | hyst = TEMP8_FROM_REG(data->temp8[2]) - val; | 352 | hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val; |
328 | i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, | 353 | i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, |
329 | HYST_TO_REG(hyst)); | 354 | HYST_TO_REG(hyst)); |
330 | mutex_unlock(&data->update_lock); | 355 | mutex_unlock(&data->update_lock); |
@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan, | |||
355 | static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1); | 380 | static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1); |
356 | static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL); | 381 | static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL); |
357 | 382 | ||
358 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0); | 383 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0); |
359 | static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8, | 384 | static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8, |
360 | set_temp8, 1); | 385 | set_local_temp8, 1); |
361 | 386 | ||
362 | static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0); | 387 | static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0); |
363 | static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11, | 388 | static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11, |
364 | set_temp11, 1); | 389 | set_temp11, 1); |
365 | static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11, | 390 | static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11, |
366 | set_temp11, 2); | 391 | set_temp11, 2); |
367 | static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2); | 392 | /* |
393 | * On LM63, temp2_crit can be set only once, which should be job | ||
394 | * of the bootloader. | ||
395 | */ | ||
396 | static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8, | ||
397 | NULL, 2); | ||
368 | static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst, | 398 | static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst, |
369 | set_temp2_crit_hyst); | 399 | set_temp2_crit_hyst); |
370 | 400 | ||
@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client, | |||
479 | data->valid = 0; | 509 | data->valid = 0; |
480 | mutex_init(&data->update_lock); | 510 | mutex_init(&data->update_lock); |
481 | 511 | ||
482 | /* Initialize the LM63 chip */ | 512 | /* Set the device type */ |
513 | data->kind = id->driver_data; | ||
514 | if (data->kind == lm64) | ||
515 | data->temp2_offset = 16000; | ||
516 | |||
517 | /* Initialize chip */ | ||
483 | lm63_init_client(new_client); | 518 | lm63_init_client(new_client); |
484 | 519 | ||
485 | /* Register sysfs hooks */ | 520 | /* Register sysfs hooks */ |
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 1e229847f37a..d2cc28660816 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c | |||
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; | |||
41 | enum chips { | 41 | enum chips { |
42 | any_chip, lm85b, lm85c, | 42 | any_chip, lm85b, lm85c, |
43 | adm1027, adt7463, adt7468, | 43 | adm1027, adt7463, adt7468, |
44 | emc6d100, emc6d102 | 44 | emc6d100, emc6d102, emc6d103 |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* The LM85 registers */ | 47 | /* The LM85 registers */ |
@@ -90,6 +90,9 @@ enum chips { | |||
90 | #define LM85_VERSTEP_EMC6D100_A0 0x60 | 90 | #define LM85_VERSTEP_EMC6D100_A0 0x60 |
91 | #define LM85_VERSTEP_EMC6D100_A1 0x61 | 91 | #define LM85_VERSTEP_EMC6D100_A1 0x61 |
92 | #define LM85_VERSTEP_EMC6D102 0x65 | 92 | #define LM85_VERSTEP_EMC6D102 0x65 |
93 | #define LM85_VERSTEP_EMC6D103_A0 0x68 | ||
94 | #define LM85_VERSTEP_EMC6D103_A1 0x69 | ||
95 | #define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */ | ||
93 | 96 | ||
94 | #define LM85_REG_CONFIG 0x40 | 97 | #define LM85_REG_CONFIG 0x40 |
95 | 98 | ||
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = { | |||
348 | { "emc6d100", emc6d100 }, | 351 | { "emc6d100", emc6d100 }, |
349 | { "emc6d101", emc6d100 }, | 352 | { "emc6d101", emc6d100 }, |
350 | { "emc6d102", emc6d102 }, | 353 | { "emc6d102", emc6d102 }, |
354 | { "emc6d103", emc6d103 }, | ||
351 | { } | 355 | { } |
352 | }; | 356 | }; |
353 | MODULE_DEVICE_TABLE(i2c, lm85_id); | 357 | MODULE_DEVICE_TABLE(i2c, lm85_id); |
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) | |||
1250 | case LM85_VERSTEP_EMC6D102: | 1254 | case LM85_VERSTEP_EMC6D102: |
1251 | type_name = "emc6d102"; | 1255 | type_name = "emc6d102"; |
1252 | break; | 1256 | break; |
1257 | case LM85_VERSTEP_EMC6D103_A0: | ||
1258 | case LM85_VERSTEP_EMC6D103_A1: | ||
1259 | type_name = "emc6d103"; | ||
1260 | break; | ||
1261 | /* | ||
1262 | * Registers apparently missing in EMC6D103S/EMC6D103:A2 | ||
1263 | * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102 | ||
1264 | * (according to the data sheets), but used unconditionally | ||
1265 | * in the driver: 62[5:7], 6D[0:7], and 6E[0:7]. | ||
1266 | * So skip EMC6D103S for now. | ||
1267 | case LM85_VERSTEP_EMC6D103S: | ||
1268 | type_name = "emc6d103s"; | ||
1269 | break; | ||
1270 | */ | ||
1253 | } | 1271 | } |
1254 | } else { | 1272 | } else { |
1255 | dev_dbg(&adapter->dev, | 1273 | dev_dbg(&adapter->dev, |
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client, | |||
1283 | case adt7468: | 1301 | case adt7468: |
1284 | case emc6d100: | 1302 | case emc6d100: |
1285 | case emc6d102: | 1303 | case emc6d102: |
1304 | case emc6d103: | ||
1286 | data->freq_map = adm1027_freq_map; | 1305 | data->freq_map = adm1027_freq_map; |
1287 | break; | 1306 | break; |
1288 | default: | 1307 | default: |
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) | |||
1468 | /* More alarm bits */ | 1487 | /* More alarm bits */ |
1469 | data->alarms |= lm85_read_value(client, | 1488 | data->alarms |= lm85_read_value(client, |
1470 | EMC6D100_REG_ALARM3) << 16; | 1489 | EMC6D100_REG_ALARM3) << 16; |
1471 | } else if (data->type == emc6d102) { | 1490 | } else if (data->type == emc6d102 || data->type == emc6d103) { |
1472 | /* Have to read LSB bits after the MSB ones because | 1491 | /* Have to read LSB bits after the MSB ones because |
1473 | the reading of the MSB bits has frozen the | 1492 | the reading of the MSB bits has frozen the |
1474 | LSBs (backward from the ADM1027). | 1493 | LSBs (backward from the ADM1027). |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b605ff3a1fa0..829a2a1029f7 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -847,11 +847,15 @@ complete: | |||
847 | dev_err(dev->dev, "Arbitration lost\n"); | 847 | dev_err(dev->dev, "Arbitration lost\n"); |
848 | err |= OMAP_I2C_STAT_AL; | 848 | err |= OMAP_I2C_STAT_AL; |
849 | } | 849 | } |
850 | /* | ||
851 | * ProDB0017052: Clear ARDY bit twice | ||
852 | */ | ||
850 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | | 853 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | |
851 | OMAP_I2C_STAT_AL)) { | 854 | OMAP_I2C_STAT_AL)) { |
852 | omap_i2c_ack_stat(dev, stat & | 855 | omap_i2c_ack_stat(dev, stat & |
853 | (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | | 856 | (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | |
854 | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); | 857 | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | |
858 | OMAP_I2C_STAT_ARDY)); | ||
855 | omap_i2c_complete_cmd(dev, err); | 859 | omap_i2c_complete_cmd(dev, err); |
856 | return IRQ_HANDLED; | 860 | return IRQ_HANDLED; |
857 | } | 861 | } |
@@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev) | |||
1137 | return 0; | 1141 | return 0; |
1138 | } | 1142 | } |
1139 | 1143 | ||
1144 | #ifdef CONFIG_SUSPEND | ||
1145 | static int omap_i2c_suspend(struct device *dev) | ||
1146 | { | ||
1147 | if (!pm_runtime_suspended(dev)) | ||
1148 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) | ||
1149 | dev->bus->pm->runtime_suspend(dev); | ||
1150 | |||
1151 | return 0; | ||
1152 | } | ||
1153 | |||
1154 | static int omap_i2c_resume(struct device *dev) | ||
1155 | { | ||
1156 | if (!pm_runtime_suspended(dev)) | ||
1157 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) | ||
1158 | dev->bus->pm->runtime_resume(dev); | ||
1159 | |||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | static struct dev_pm_ops omap_i2c_pm_ops = { | ||
1164 | .suspend = omap_i2c_suspend, | ||
1165 | .resume = omap_i2c_resume, | ||
1166 | }; | ||
1167 | #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) | ||
1168 | #else | ||
1169 | #define OMAP_I2C_PM_OPS NULL | ||
1170 | #endif | ||
1171 | |||
1140 | static struct platform_driver omap_i2c_driver = { | 1172 | static struct platform_driver omap_i2c_driver = { |
1141 | .probe = omap_i2c_probe, | 1173 | .probe = omap_i2c_probe, |
1142 | .remove = omap_i2c_remove, | 1174 | .remove = omap_i2c_remove, |
1143 | .driver = { | 1175 | .driver = { |
1144 | .name = "omap_i2c", | 1176 | .name = "omap_i2c", |
1145 | .owner = THIS_MODULE, | 1177 | .owner = THIS_MODULE, |
1178 | .pm = OMAP_I2C_PM_OPS, | ||
1146 | }, | 1179 | }, |
1147 | }; | 1180 | }; |
1148 | 1181 | ||
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 495be451d326..266135ddf7fa 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev) | |||
942 | adap->owner = THIS_MODULE; | 942 | adap->owner = THIS_MODULE; |
943 | /* DDC class but actually often used for more generic I2C */ | 943 | /* DDC class but actually often used for more generic I2C */ |
944 | adap->class = I2C_CLASS_DDC; | 944 | adap->class = I2C_CLASS_DDC; |
945 | strncpy(adap->name, "ST Microelectronics DDC I2C adapter", | 945 | strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", |
946 | sizeof(adap->name)); | 946 | sizeof(adap->name)); |
947 | adap->nr = bus_nr; | 947 | adap->nr = bus_nr; |
948 | adap->algo = &stu300_algo; | 948 | adap->algo = &stu300_algo; |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b606fd64022..08c194861af5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2610 | netif_carrier_on(nesvnic->netdev); | 2610 | netif_carrier_on(nesvnic->netdev); |
2611 | 2611 | ||
2612 | spin_lock(&nesvnic->port_ibevent_lock); | 2612 | spin_lock(&nesvnic->port_ibevent_lock); |
2613 | if (nesdev->iw_status == 0) { | 2613 | if (nesvnic->of_device_registered) { |
2614 | nesdev->iw_status = 1; | 2614 | if (nesdev->iw_status == 0) { |
2615 | nes_port_ibevent(nesvnic); | 2615 | nesdev->iw_status = 1; |
2616 | nes_port_ibevent(nesvnic); | ||
2617 | } | ||
2616 | } | 2618 | } |
2617 | spin_unlock(&nesvnic->port_ibevent_lock); | 2619 | spin_unlock(&nesvnic->port_ibevent_lock); |
2618 | } | 2620 | } |
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2642 | netif_carrier_off(nesvnic->netdev); | 2644 | netif_carrier_off(nesvnic->netdev); |
2643 | 2645 | ||
2644 | spin_lock(&nesvnic->port_ibevent_lock); | 2646 | spin_lock(&nesvnic->port_ibevent_lock); |
2645 | if (nesdev->iw_status == 1) { | 2647 | if (nesvnic->of_device_registered) { |
2646 | nesdev->iw_status = 0; | 2648 | if (nesdev->iw_status == 1) { |
2647 | nes_port_ibevent(nesvnic); | 2649 | nesdev->iw_status = 0; |
2650 | nes_port_ibevent(nesvnic); | ||
2651 | } | ||
2648 | } | 2652 | } |
2649 | spin_unlock(&nesvnic->port_ibevent_lock); | 2653 | spin_unlock(&nesvnic->port_ibevent_lock); |
2650 | } | 2654 | } |
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2703 | netif_carrier_on(nesvnic->netdev); | 2707 | netif_carrier_on(nesvnic->netdev); |
2704 | 2708 | ||
2705 | spin_lock(&nesvnic->port_ibevent_lock); | 2709 | spin_lock(&nesvnic->port_ibevent_lock); |
2706 | if (nesdev->iw_status == 0) { | 2710 | if (nesvnic->of_device_registered) { |
2707 | nesdev->iw_status = 1; | 2711 | if (nesdev->iw_status == 0) { |
2708 | nes_port_ibevent(nesvnic); | 2712 | nesdev->iw_status = 1; |
2713 | nes_port_ibevent(nesvnic); | ||
2714 | } | ||
2709 | } | 2715 | } |
2710 | spin_unlock(&nesvnic->port_ibevent_lock); | 2716 | spin_unlock(&nesvnic->port_ibevent_lock); |
2711 | } | 2717 | } |
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2723 | netif_carrier_off(nesvnic->netdev); | 2729 | netif_carrier_off(nesvnic->netdev); |
2724 | 2730 | ||
2725 | spin_lock(&nesvnic->port_ibevent_lock); | 2731 | spin_lock(&nesvnic->port_ibevent_lock); |
2726 | if (nesdev->iw_status == 1) { | 2732 | if (nesvnic->of_device_registered) { |
2727 | nesdev->iw_status = 0; | 2733 | if (nesdev->iw_status == 1) { |
2728 | nes_port_ibevent(nesvnic); | 2734 | nesdev->iw_status = 0; |
2735 | nes_port_ibevent(nesvnic); | ||
2736 | } | ||
2729 | } | 2737 | } |
2730 | spin_unlock(&nesvnic->port_ibevent_lock); | 2738 | spin_unlock(&nesvnic->port_ibevent_lock); |
2731 | } | 2739 | } |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 8245237b67ce..eca0c41f1226 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1005 | * there are still requests that haven't been acked. | 1005 | * there are still requests that haven't been acked. |
1006 | */ | 1006 | */ |
1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && | 1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && |
1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) | 1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && |
1009 | (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1009 | start_timer(qp); | 1010 | start_timer(qp); |
1010 | 1011 | ||
1011 | while (qp->s_last != qp->s_acked) { | 1012 | while (qp->s_last != qp->s_acked) { |
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1439 | } | 1440 | } |
1440 | 1441 | ||
1441 | spin_lock_irqsave(&qp->s_lock, flags); | 1442 | spin_lock_irqsave(&qp->s_lock, flags); |
1443 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1444 | goto ack_done; | ||
1442 | 1445 | ||
1443 | /* Ignore invalid responses. */ | 1446 | /* Ignore invalid responses. */ |
1444 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1447 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 23cf8fc933ec..5b8f59d6c3e8 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c | |||
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner, | |||
360 | event->owner = owner; | 360 | event->owner = owner; |
361 | 361 | ||
362 | list_add_tail(&event->node, &gameport_event_list); | 362 | list_add_tail(&event->node, &gameport_event_list); |
363 | schedule_work(&gameport_event_work); | 363 | queue_work(system_long_wq, &gameport_event_work); |
364 | 364 | ||
365 | out: | 365 | out: |
366 | spin_unlock_irqrestore(&gameport_event_lock, flags); | 366 | spin_unlock_irqrestore(&gameport_event_lock, flags); |
diff --git a/drivers/input/input.c b/drivers/input/input.c index 7985114beac7..11905b6a3023 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) | |||
75 | * dev->event_lock held and interrupts disabled. | 75 | * dev->event_lock held and interrupts disabled. |
76 | */ | 76 | */ |
77 | static void input_pass_event(struct input_dev *dev, | 77 | static void input_pass_event(struct input_dev *dev, |
78 | struct input_handler *src_handler, | ||
79 | unsigned int type, unsigned int code, int value) | 78 | unsigned int type, unsigned int code, int value) |
80 | { | 79 | { |
81 | struct input_handler *handler; | 80 | struct input_handler *handler; |
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev, | |||
94 | continue; | 93 | continue; |
95 | 94 | ||
96 | handler = handle->handler; | 95 | handler = handle->handler; |
97 | |||
98 | /* | ||
99 | * If this is the handler that injected this | ||
100 | * particular event we want to skip it to avoid | ||
101 | * filters firing again and again. | ||
102 | */ | ||
103 | if (handler == src_handler) | ||
104 | continue; | ||
105 | |||
106 | if (!handler->filter) { | 96 | if (!handler->filter) { |
107 | if (filtered) | 97 | if (filtered) |
108 | break; | 98 | break; |
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data) | |||
132 | if (test_bit(dev->repeat_key, dev->key) && | 122 | if (test_bit(dev->repeat_key, dev->key) && |
133 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { | 123 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { |
134 | 124 | ||
135 | input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); | 125 | input_pass_event(dev, EV_KEY, dev->repeat_key, 2); |
136 | 126 | ||
137 | if (dev->sync) { | 127 | if (dev->sync) { |
138 | /* | 128 | /* |
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data) | |||
141 | * Otherwise assume that the driver will send | 131 | * Otherwise assume that the driver will send |
142 | * SYN_REPORT once it's done. | 132 | * SYN_REPORT once it's done. |
143 | */ | 133 | */ |
144 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); | 134 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
145 | } | 135 | } |
146 | 136 | ||
147 | if (dev->rep[REP_PERIOD]) | 137 | if (dev->rep[REP_PERIOD]) |
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev) | |||
174 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) | 164 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) |
175 | 165 | ||
176 | static int input_handle_abs_event(struct input_dev *dev, | 166 | static int input_handle_abs_event(struct input_dev *dev, |
177 | struct input_handler *src_handler, | ||
178 | unsigned int code, int *pval) | 167 | unsigned int code, int *pval) |
179 | { | 168 | { |
180 | bool is_mt_event; | 169 | bool is_mt_event; |
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev, | |||
218 | /* Flush pending "slot" event */ | 207 | /* Flush pending "slot" event */ |
219 | if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { | 208 | if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { |
220 | input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); | 209 | input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); |
221 | input_pass_event(dev, src_handler, | 210 | input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); |
222 | EV_ABS, ABS_MT_SLOT, dev->slot); | ||
223 | } | 211 | } |
224 | 212 | ||
225 | return INPUT_PASS_TO_HANDLERS; | 213 | return INPUT_PASS_TO_HANDLERS; |
226 | } | 214 | } |
227 | 215 | ||
228 | static void input_handle_event(struct input_dev *dev, | 216 | static void input_handle_event(struct input_dev *dev, |
229 | struct input_handler *src_handler, | ||
230 | unsigned int type, unsigned int code, int value) | 217 | unsigned int type, unsigned int code, int value) |
231 | { | 218 | { |
232 | int disposition = INPUT_IGNORE_EVENT; | 219 | int disposition = INPUT_IGNORE_EVENT; |
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev, | |||
279 | 266 | ||
280 | case EV_ABS: | 267 | case EV_ABS: |
281 | if (is_event_supported(code, dev->absbit, ABS_MAX)) | 268 | if (is_event_supported(code, dev->absbit, ABS_MAX)) |
282 | disposition = input_handle_abs_event(dev, src_handler, | 269 | disposition = input_handle_abs_event(dev, code, &value); |
283 | code, &value); | ||
284 | 270 | ||
285 | break; | 271 | break; |
286 | 272 | ||
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev, | |||
338 | dev->event(dev, type, code, value); | 324 | dev->event(dev, type, code, value); |
339 | 325 | ||
340 | if (disposition & INPUT_PASS_TO_HANDLERS) | 326 | if (disposition & INPUT_PASS_TO_HANDLERS) |
341 | input_pass_event(dev, src_handler, type, code, value); | 327 | input_pass_event(dev, type, code, value); |
342 | } | 328 | } |
343 | 329 | ||
344 | /** | 330 | /** |
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev, | |||
367 | 353 | ||
368 | spin_lock_irqsave(&dev->event_lock, flags); | 354 | spin_lock_irqsave(&dev->event_lock, flags); |
369 | add_input_randomness(type, code, value); | 355 | add_input_randomness(type, code, value); |
370 | input_handle_event(dev, NULL, type, code, value); | 356 | input_handle_event(dev, type, code, value); |
371 | spin_unlock_irqrestore(&dev->event_lock, flags); | 357 | spin_unlock_irqrestore(&dev->event_lock, flags); |
372 | } | 358 | } |
373 | } | 359 | } |
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle, | |||
397 | rcu_read_lock(); | 383 | rcu_read_lock(); |
398 | grab = rcu_dereference(dev->grab); | 384 | grab = rcu_dereference(dev->grab); |
399 | if (!grab || grab == handle) | 385 | if (!grab || grab == handle) |
400 | input_handle_event(dev, handle->handler, | 386 | input_handle_event(dev, type, code, value); |
401 | type, code, value); | ||
402 | rcu_read_unlock(); | 387 | rcu_read_unlock(); |
403 | 388 | ||
404 | spin_unlock_irqrestore(&dev->event_lock, flags); | 389 | spin_unlock_irqrestore(&dev->event_lock, flags); |
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev) | |||
611 | for (code = 0; code <= KEY_MAX; code++) { | 596 | for (code = 0; code <= KEY_MAX; code++) { |
612 | if (is_event_supported(code, dev->keybit, KEY_MAX) && | 597 | if (is_event_supported(code, dev->keybit, KEY_MAX) && |
613 | __test_and_clear_bit(code, dev->key)) { | 598 | __test_and_clear_bit(code, dev->key)) { |
614 | input_pass_event(dev, NULL, EV_KEY, code, 0); | 599 | input_pass_event(dev, EV_KEY, code, 0); |
615 | } | 600 | } |
616 | } | 601 | } |
617 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); | 602 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
618 | } | 603 | } |
619 | } | 604 | } |
620 | 605 | ||
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev, | |||
889 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && | 874 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && |
890 | __test_and_clear_bit(old_keycode, dev->key)) { | 875 | __test_and_clear_bit(old_keycode, dev->key)) { |
891 | 876 | ||
892 | input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); | 877 | input_pass_event(dev, EV_KEY, old_keycode, 0); |
893 | if (dev->sync) | 878 | if (dev->sync) |
894 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); | 879 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
895 | } | 880 | } |
896 | 881 | ||
897 | out: | 882 | out: |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index ac471b77c18e..99ce9032d08c 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -71,8 +71,9 @@ struct tegra_kbc { | |||
71 | spinlock_t lock; | 71 | spinlock_t lock; |
72 | unsigned int repoll_dly; | 72 | unsigned int repoll_dly; |
73 | unsigned long cp_dly_jiffies; | 73 | unsigned long cp_dly_jiffies; |
74 | bool use_fn_map; | ||
74 | const struct tegra_kbc_platform_data *pdata; | 75 | const struct tegra_kbc_platform_data *pdata; |
75 | unsigned short keycode[KBC_MAX_KEY]; | 76 | unsigned short keycode[KBC_MAX_KEY * 2]; |
76 | unsigned short current_keys[KBC_MAX_KPENT]; | 77 | unsigned short current_keys[KBC_MAX_KPENT]; |
77 | unsigned int num_pressed_keys; | 78 | unsigned int num_pressed_keys; |
78 | struct timer_list timer; | 79 | struct timer_list timer; |
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = { | |||
178 | KEY(15, 5, KEY_F2), | 179 | KEY(15, 5, KEY_F2), |
179 | KEY(15, 6, KEY_CAPSLOCK), | 180 | KEY(15, 6, KEY_CAPSLOCK), |
180 | KEY(15, 7, KEY_F6), | 181 | KEY(15, 7, KEY_F6), |
182 | |||
183 | /* Software Handled Function Keys */ | ||
184 | KEY(20, 0, KEY_KP7), | ||
185 | |||
186 | KEY(21, 0, KEY_KP9), | ||
187 | KEY(21, 1, KEY_KP8), | ||
188 | KEY(21, 2, KEY_KP4), | ||
189 | KEY(21, 4, KEY_KP1), | ||
190 | |||
191 | KEY(22, 1, KEY_KPSLASH), | ||
192 | KEY(22, 2, KEY_KP6), | ||
193 | KEY(22, 3, KEY_KP5), | ||
194 | KEY(22, 4, KEY_KP3), | ||
195 | KEY(22, 5, KEY_KP2), | ||
196 | KEY(22, 7, KEY_KP0), | ||
197 | |||
198 | KEY(27, 1, KEY_KPASTERISK), | ||
199 | KEY(27, 3, KEY_KPMINUS), | ||
200 | KEY(27, 4, KEY_KPPLUS), | ||
201 | KEY(27, 5, KEY_KPDOT), | ||
202 | |||
203 | KEY(28, 5, KEY_VOLUMEUP), | ||
204 | |||
205 | KEY(29, 3, KEY_HOME), | ||
206 | KEY(29, 4, KEY_END), | ||
207 | KEY(29, 5, KEY_BRIGHTNESSDOWN), | ||
208 | KEY(29, 6, KEY_VOLUMEDOWN), | ||
209 | KEY(29, 7, KEY_BRIGHTNESSUP), | ||
210 | |||
211 | KEY(30, 0, KEY_NUMLOCK), | ||
212 | KEY(30, 1, KEY_SCROLLLOCK), | ||
213 | KEY(30, 2, KEY_MUTE), | ||
214 | |||
215 | KEY(31, 4, KEY_HELP), | ||
181 | }; | 216 | }; |
182 | 217 | ||
183 | static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { | 218 | static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { |
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) | |||
224 | unsigned int i; | 259 | unsigned int i; |
225 | unsigned int num_down = 0; | 260 | unsigned int num_down = 0; |
226 | unsigned long flags; | 261 | unsigned long flags; |
262 | bool fn_keypress = false; | ||
227 | 263 | ||
228 | spin_lock_irqsave(&kbc->lock, flags); | 264 | spin_lock_irqsave(&kbc->lock, flags); |
229 | for (i = 0; i < KBC_MAX_KPENT; i++) { | 265 | for (i = 0; i < KBC_MAX_KPENT; i++) { |
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) | |||
237 | MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); | 273 | MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); |
238 | 274 | ||
239 | scancodes[num_down] = scancode; | 275 | scancodes[num_down] = scancode; |
240 | keycodes[num_down++] = kbc->keycode[scancode]; | 276 | keycodes[num_down] = kbc->keycode[scancode]; |
277 | /* If driver uses Fn map, do not report the Fn key. */ | ||
278 | if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map) | ||
279 | fn_keypress = true; | ||
280 | else | ||
281 | num_down++; | ||
241 | } | 282 | } |
242 | 283 | ||
243 | val >>= 8; | 284 | val >>= 8; |
244 | } | 285 | } |
286 | |||
287 | /* | ||
288 | * If the platform uses Fn keymaps, translate keys on a Fn keypress. | ||
289 | * Function keycodes are KBC_MAX_KEY apart from the plain keycodes. | ||
290 | */ | ||
291 | if (fn_keypress) { | ||
292 | for (i = 0; i < num_down; i++) { | ||
293 | scancodes[i] += KBC_MAX_KEY; | ||
294 | keycodes[i] = kbc->keycode[scancodes[i]]; | ||
295 | } | ||
296 | } | ||
297 | |||
245 | spin_unlock_irqrestore(&kbc->lock, flags); | 298 | spin_unlock_irqrestore(&kbc->lock, flags); |
246 | 299 | ||
247 | tegra_kbc_report_released_keys(kbc->idev, | 300 | tegra_kbc_report_released_keys(kbc->idev, |
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev) | |||
594 | 647 | ||
595 | input_dev->keycode = kbc->keycode; | 648 | input_dev->keycode = kbc->keycode; |
596 | input_dev->keycodesize = sizeof(kbc->keycode[0]); | 649 | input_dev->keycodesize = sizeof(kbc->keycode[0]); |
597 | input_dev->keycodemax = ARRAY_SIZE(kbc->keycode); | 650 | input_dev->keycodemax = KBC_MAX_KEY; |
651 | if (pdata->use_fn_map) | ||
652 | input_dev->keycodemax *= 2; | ||
598 | 653 | ||
654 | kbc->use_fn_map = pdata->use_fn_map; | ||
599 | keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; | 655 | keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; |
600 | matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, | 656 | matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, |
601 | input_dev->keycode, input_dev->keybit); | 657 | input_dev->keycode, input_dev->keybit); |
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index 1f8e0108962e..7e64d01da2be 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c | |||
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev) | |||
176 | 176 | ||
177 | /* request the IRQs */ | 177 | /* request the IRQs */ |
178 | err = request_irq(encoder->irq_a, &rotary_encoder_irq, | 178 | err = request_irq(encoder->irq_a, &rotary_encoder_irq, |
179 | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, | 179 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
180 | DRV_NAME, encoder); | 180 | DRV_NAME, encoder); |
181 | if (err) { | 181 | if (err) { |
182 | dev_err(&pdev->dev, "unable to request IRQ %d\n", | 182 | dev_err(&pdev->dev, "unable to request IRQ %d\n", |
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | err = request_irq(encoder->irq_b, &rotary_encoder_irq, | 187 | err = request_irq(encoder->irq_b, &rotary_encoder_irq, |
188 | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, | 188 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
189 | DRV_NAME, encoder); | 189 | DRV_NAME, encoder); |
190 | if (err) { | 190 | if (err) { |
191 | dev_err(&pdev->dev, "unable to request IRQ %d\n", | 191 | dev_err(&pdev->dev, "unable to request IRQ %d\n", |
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 25e5d042a72c..7453938bf5ef 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h | |||
@@ -51,6 +51,29 @@ | |||
51 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) | 51 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) |
52 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) | 52 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) |
53 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) | 53 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) |
54 | |||
55 | /* | ||
56 | * The following describes response for the 0x0c query. | ||
57 | * | ||
58 | * byte mask name meaning | ||
59 | * ---- ---- ------- ------------ | ||
60 | * 1 0x01 adjustable threshold capacitive button sensitivity | ||
61 | * can be adjusted | ||
62 | * 1 0x02 report max query 0x0d gives max coord reported | ||
63 | * 1 0x04 clearpad sensor is ClearPad product | ||
64 | * 1 0x08 advanced gesture not particularly meaningful | ||
65 | * 1 0x10 clickpad bit 0 1-button ClickPad | ||
66 | * 1 0x60 multifinger mode identifies firmware finger counting | ||
67 | * (not reporting!) algorithm. | ||
68 | * Not particularly meaningful | ||
69 | * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered | ||
70 | * 2 0x01 clickpad bit 1 2-button ClickPad | ||
71 | * 2 0x02 deluxe LED controls touchpad support LED commands | ||
72 | * ala multimedia control bar | ||
73 | * 2 0x04 reduced filtering firmware does less filtering on | ||
74 | * position data, driver should watch | ||
75 | * for noise. | ||
76 | */ | ||
54 | #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ | 77 | #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ |
55 | #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ | 78 | #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ |
56 | #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) | 79 | #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) |
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index db5b0bca1a1a..ba70058e2be3 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event) | |||
188 | kfree(event); | 188 | kfree(event); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void serio_remove_duplicate_events(struct serio_event *event) | 191 | static void serio_remove_duplicate_events(void *object, |
192 | enum serio_event_type type) | ||
192 | { | 193 | { |
193 | struct serio_event *e, *next; | 194 | struct serio_event *e, *next; |
194 | unsigned long flags; | 195 | unsigned long flags; |
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event) | |||
196 | spin_lock_irqsave(&serio_event_lock, flags); | 197 | spin_lock_irqsave(&serio_event_lock, flags); |
197 | 198 | ||
198 | list_for_each_entry_safe(e, next, &serio_event_list, node) { | 199 | list_for_each_entry_safe(e, next, &serio_event_list, node) { |
199 | if (event->object == e->object) { | 200 | if (object == e->object) { |
200 | /* | 201 | /* |
201 | * If this event is of different type we should not | 202 | * If this event is of different type we should not |
202 | * look further - we only suppress duplicate events | 203 | * look further - we only suppress duplicate events |
203 | * that were sent back-to-back. | 204 | * that were sent back-to-back. |
204 | */ | 205 | */ |
205 | if (event->type != e->type) | 206 | if (type != e->type) |
206 | break; | 207 | break; |
207 | 208 | ||
208 | list_del_init(&e->node); | 209 | list_del_init(&e->node); |
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work) | |||
245 | break; | 246 | break; |
246 | } | 247 | } |
247 | 248 | ||
248 | serio_remove_duplicate_events(event); | 249 | serio_remove_duplicate_events(event->object, event->type); |
249 | serio_free_event(event); | 250 | serio_free_event(event); |
250 | } | 251 | } |
251 | 252 | ||
@@ -298,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner, | |||
298 | event->owner = owner; | 299 | event->owner = owner; |
299 | 300 | ||
300 | list_add_tail(&event->node, &serio_event_list); | 301 | list_add_tail(&event->node, &serio_event_list); |
301 | schedule_work(&serio_event_work); | 302 | queue_work(system_long_wq, &serio_event_work); |
302 | 303 | ||
303 | out: | 304 | out: |
304 | spin_unlock_irqrestore(&serio_event_lock, flags); | 305 | spin_unlock_irqrestore(&serio_event_lock, flags); |
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute * | |||
436 | } else if (!strncmp(buf, "rescan", count)) { | 437 | } else if (!strncmp(buf, "rescan", count)) { |
437 | serio_disconnect_port(serio); | 438 | serio_disconnect_port(serio); |
438 | serio_find_driver(serio); | 439 | serio_find_driver(serio); |
440 | serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); | ||
439 | } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { | 441 | } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { |
440 | serio_disconnect_port(serio); | 442 | serio_disconnect_port(serio); |
441 | error = serio_bind_driver(serio, to_serio_driver(drv)); | 443 | error = serio_bind_driver(serio, to_serio_driver(drv)); |
442 | put_driver(drv); | 444 | put_driver(drv); |
445 | serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); | ||
443 | } else { | 446 | } else { |
444 | error = -EINVAL; | 447 | error = -EINVAL; |
445 | } | 448 | } |
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index fc381498b798..cf8fb9f5d4a8 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i | |||
519 | /* Retrieve the physical and logical size for OEM devices */ | 519 | /* Retrieve the physical and logical size for OEM devices */ |
520 | error = wacom_retrieve_hid_descriptor(intf, features); | 520 | error = wacom_retrieve_hid_descriptor(intf, features); |
521 | if (error) | 521 | if (error) |
522 | goto fail2; | 522 | goto fail3; |
523 | 523 | ||
524 | wacom_setup_device_quirks(features); | 524 | wacom_setup_device_quirks(features); |
525 | 525 | ||
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 14ea54b78e46..4bf2316e3284 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 | |||
941 | struct ads7846_platform_data *pdata = spi->dev.platform_data; | 941 | struct ads7846_platform_data *pdata = spi->dev.platform_data; |
942 | int err; | 942 | int err; |
943 | 943 | ||
944 | /* REVISIT when the irq can be triggered active-low, or if for some | 944 | /* |
945 | * REVISIT when the irq can be triggered active-low, or if for some | ||
945 | * reason the touchscreen isn't hooked up, we don't need to access | 946 | * reason the touchscreen isn't hooked up, we don't need to access |
946 | * the pendown state. | 947 | * the pendown state. |
947 | */ | 948 | */ |
948 | if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) { | ||
949 | dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); | ||
950 | return -EINVAL; | ||
951 | } | ||
952 | 949 | ||
953 | if (pdata->get_pendown_state) { | 950 | if (pdata->get_pendown_state) { |
954 | ts->get_pendown_state = pdata->get_pendown_state; | 951 | ts->get_pendown_state = pdata->get_pendown_state; |
955 | return 0; | 952 | } else if (gpio_is_valid(pdata->gpio_pendown)) { |
956 | } | ||
957 | 953 | ||
958 | err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); | 954 | err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); |
959 | if (err) { | 955 | if (err) { |
960 | dev_err(&spi->dev, "failed to request pendown GPIO%d\n", | 956 | dev_err(&spi->dev, "failed to request pendown GPIO%d\n", |
961 | pdata->gpio_pendown); | 957 | pdata->gpio_pendown); |
962 | return err; | 958 | return err; |
963 | } | 959 | } |
964 | 960 | ||
965 | ts->gpio_pendown = pdata->gpio_pendown; | 961 | ts->gpio_pendown = pdata->gpio_pendown; |
962 | |||
963 | } else { | ||
964 | dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); | ||
965 | return -EINVAL; | ||
966 | } | ||
966 | 967 | ||
967 | return 0; | 968 | return 0; |
968 | } | 969 | } |
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi) | |||
1353 | err_put_regulator: | 1354 | err_put_regulator: |
1354 | regulator_put(ts->reg); | 1355 | regulator_put(ts->reg); |
1355 | err_free_gpio: | 1356 | err_free_gpio: |
1356 | if (ts->gpio_pendown != -1) | 1357 | if (!ts->get_pendown_state) |
1357 | gpio_free(ts->gpio_pendown); | 1358 | gpio_free(ts->gpio_pendown); |
1358 | err_cleanup_filter: | 1359 | err_cleanup_filter: |
1359 | if (ts->filter_cleanup) | 1360 | if (ts->filter_cleanup) |
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi) | |||
1383 | regulator_disable(ts->reg); | 1384 | regulator_disable(ts->reg); |
1384 | regulator_put(ts->reg); | 1385 | regulator_put(ts->reg); |
1385 | 1386 | ||
1386 | if (ts->gpio_pendown != -1) | 1387 | if (!ts->get_pendown_state) { |
1388 | /* | ||
1389 | * If we are not using specialized pendown method we must | ||
1390 | * have been relying on gpio we set up ourselves. | ||
1391 | */ | ||
1387 | gpio_free(ts->gpio_pendown); | 1392 | gpio_free(ts->gpio_pendown); |
1393 | } | ||
1388 | 1394 | ||
1389 | if (ts->filter_cleanup) | 1395 | if (ts->filter_cleanup) |
1390 | ts->filter_cleanup(ts->filter_data); | 1396 | ts->filter_cleanup(ts->filter_data); |
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index 5cb8449c909d..c14412ef4648 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c | |||
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL"); | |||
51 | #define W8001_PKTLEN_TPCCTL 11 /* control packet */ | 51 | #define W8001_PKTLEN_TPCCTL 11 /* control packet */ |
52 | #define W8001_PKTLEN_TOUCH2FG 13 | 52 | #define W8001_PKTLEN_TOUCH2FG 13 |
53 | 53 | ||
54 | /* resolution in points/mm */ | ||
55 | #define W8001_PEN_RESOLUTION 100 | ||
56 | #define W8001_TOUCH_RESOLUTION 10 | ||
57 | |||
54 | struct w8001_coord { | 58 | struct w8001_coord { |
55 | u8 rdy; | 59 | u8 rdy; |
56 | u8 tsw; | 60 | u8 tsw; |
@@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query) | |||
198 | query->y = 1024; | 202 | query->y = 1024; |
199 | if (query->panel_res) | 203 | if (query->panel_res) |
200 | query->x = query->y = (1 << query->panel_res); | 204 | query->x = query->y = (1 << query->panel_res); |
201 | query->panel_res = 10; | 205 | query->panel_res = W8001_TOUCH_RESOLUTION; |
202 | } | 206 | } |
203 | } | 207 | } |
204 | 208 | ||
@@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001) | |||
394 | 398 | ||
395 | input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); | 399 | input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); |
396 | input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); | 400 | input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); |
401 | input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION); | ||
402 | input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION); | ||
397 | input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); | 403 | input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); |
398 | if (coord.tilt_x && coord.tilt_y) { | 404 | if (coord.tilt_x && coord.tilt_y) { |
399 | input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); | 405 | input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); |
@@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001) | |||
418 | w8001->max_touch_x = touch.x; | 424 | w8001->max_touch_x = touch.x; |
419 | w8001->max_touch_y = touch.y; | 425 | w8001->max_touch_y = touch.y; |
420 | 426 | ||
421 | /* scale to pen maximum */ | ||
422 | if (w8001->max_pen_x && w8001->max_pen_y) { | 427 | if (w8001->max_pen_x && w8001->max_pen_y) { |
428 | /* if pen is supported scale to pen maximum */ | ||
423 | touch.x = w8001->max_pen_x; | 429 | touch.x = w8001->max_pen_x; |
424 | touch.y = w8001->max_pen_y; | 430 | touch.y = w8001->max_pen_y; |
431 | touch.panel_res = W8001_PEN_RESOLUTION; | ||
425 | } | 432 | } |
426 | 433 | ||
427 | input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); | 434 | input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); |
428 | input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); | 435 | input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); |
436 | input_abs_set_res(dev, ABS_X, touch.panel_res); | ||
437 | input_abs_set_res(dev, ABS_Y, touch.panel_res); | ||
429 | 438 | ||
430 | switch (touch.sensor_id) { | 439 | switch (touch.sensor_id) { |
431 | case 0: | 440 | case 0: |
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 0858791978d8..cfff0c41d298 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c | |||
@@ -1247,10 +1247,10 @@ static void | |||
1247 | l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | 1247 | l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) |
1248 | { | 1248 | { |
1249 | struct PStack *st = fi->userdata; | 1249 | struct PStack *st = fi->userdata; |
1250 | struct sk_buff *skb, *oskb; | 1250 | struct sk_buff *skb; |
1251 | struct Layer2 *l2 = &st->l2; | 1251 | struct Layer2 *l2 = &st->l2; |
1252 | u_char header[MAX_HEADER_LEN]; | 1252 | u_char header[MAX_HEADER_LEN]; |
1253 | int i; | 1253 | int i, hdr_space_needed; |
1254 | int unsigned p1; | 1254 | int unsigned p1; |
1255 | u_long flags; | 1255 | u_long flags; |
1256 | 1256 | ||
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1261 | if (!skb) | 1261 | if (!skb) |
1262 | return; | 1262 | return; |
1263 | 1263 | ||
1264 | hdr_space_needed = l2headersize(l2, 0); | ||
1265 | if (hdr_space_needed > skb_headroom(skb)) { | ||
1266 | struct sk_buff *orig_skb = skb; | ||
1267 | |||
1268 | skb = skb_realloc_headroom(skb, hdr_space_needed); | ||
1269 | if (!skb) { | ||
1270 | dev_kfree_skb(orig_skb); | ||
1271 | return; | ||
1272 | } | ||
1273 | } | ||
1264 | spin_lock_irqsave(&l2->lock, flags); | 1274 | spin_lock_irqsave(&l2->lock, flags); |
1265 | if(test_bit(FLG_MOD128, &l2->flag)) | 1275 | if(test_bit(FLG_MOD128, &l2->flag)) |
1266 | p1 = (l2->vs - l2->va) % 128; | 1276 | p1 = (l2->vs - l2->va) % 128; |
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1285 | l2->vs = (l2->vs + 1) % 8; | 1295 | l2->vs = (l2->vs + 1) % 8; |
1286 | } | 1296 | } |
1287 | spin_unlock_irqrestore(&l2->lock, flags); | 1297 | spin_unlock_irqrestore(&l2->lock, flags); |
1288 | p1 = skb->data - skb->head; | 1298 | memcpy(skb_push(skb, i), header, i); |
1289 | if (p1 >= i) | ||
1290 | memcpy(skb_push(skb, i), header, i); | ||
1291 | else { | ||
1292 | printk(KERN_WARNING | ||
1293 | "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); | ||
1294 | oskb = skb; | ||
1295 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); | ||
1296 | memcpy(skb_put(skb, i), header, i); | ||
1297 | skb_copy_from_linear_data(oskb, | ||
1298 | skb_put(skb, oskb->len), oskb->len); | ||
1299 | dev_kfree_skb(oskb); | ||
1300 | } | ||
1301 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); | 1299 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); |
1302 | test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); | 1300 | test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); |
1303 | if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { | 1301 | if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { |
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h index 729df4089385..18b801ad97a4 100644 --- a/drivers/isdn/hysdn/hysdn_defs.h +++ b/drivers/isdn/hysdn/hysdn_defs.h | |||
@@ -227,7 +227,6 @@ extern hysdn_card *card_root; /* pointer to first card */ | |||
227 | /*************************/ | 227 | /*************************/ |
228 | /* im/exported functions */ | 228 | /* im/exported functions */ |
229 | /*************************/ | 229 | /*************************/ |
230 | extern char *hysdn_getrev(const char *); | ||
231 | 230 | ||
232 | /* hysdn_procconf.c */ | 231 | /* hysdn_procconf.c */ |
233 | extern int hysdn_procconf_init(void); /* init proc config filesys */ | 232 | extern int hysdn_procconf_init(void); /* init proc config filesys */ |
@@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *, | |||
259 | 258 | ||
260 | /* hysdn_net.c */ | 259 | /* hysdn_net.c */ |
261 | extern unsigned int hynet_enable; | 260 | extern unsigned int hynet_enable; |
262 | extern char *hysdn_net_revision; | ||
263 | extern int hysdn_net_create(hysdn_card *); /* create a new net device */ | 261 | extern int hysdn_net_create(hysdn_card *); /* create a new net device */ |
264 | extern int hysdn_net_release(hysdn_card *); /* delete the device */ | 262 | extern int hysdn_net_release(hysdn_card *); /* delete the device */ |
265 | extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */ | 263 | extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */ |
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c index b7cc5c2f08c6..0ab42ace1692 100644 --- a/drivers/isdn/hysdn/hysdn_init.c +++ b/drivers/isdn/hysdn/hysdn_init.c | |||
@@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards"); | |||
36 | MODULE_AUTHOR("Werner Cornelius"); | 36 | MODULE_AUTHOR("Werner Cornelius"); |
37 | MODULE_LICENSE("GPL"); | 37 | MODULE_LICENSE("GPL"); |
38 | 38 | ||
39 | static char *hysdn_init_revision = "$Revision: 1.6.6.6 $"; | ||
40 | static int cardmax; /* number of found cards */ | 39 | static int cardmax; /* number of found cards */ |
41 | hysdn_card *card_root = NULL; /* pointer to first card */ | 40 | hysdn_card *card_root = NULL; /* pointer to first card */ |
42 | static hysdn_card *card_last = NULL; /* pointer to first card */ | 41 | static hysdn_card *card_last = NULL; /* pointer to first card */ |
@@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL; /* pointer to first card */ | |||
49 | /* Additionally newer versions may be activated without rebooting. */ | 48 | /* Additionally newer versions may be activated without rebooting. */ |
50 | /****************************************************************************/ | 49 | /****************************************************************************/ |
51 | 50 | ||
52 | /******************************************************/ | ||
53 | /* extract revision number from string for log output */ | ||
54 | /******************************************************/ | ||
55 | char * | ||
56 | hysdn_getrev(const char *revision) | ||
57 | { | ||
58 | char *rev; | ||
59 | char *p; | ||
60 | |||
61 | if ((p = strchr(revision, ':'))) { | ||
62 | rev = p + 2; | ||
63 | p = strchr(rev, '$'); | ||
64 | *--p = 0; | ||
65 | } else | ||
66 | rev = "???"; | ||
67 | return rev; | ||
68 | } | ||
69 | |||
70 | |||
71 | /****************************************************************************/ | 51 | /****************************************************************************/ |
72 | /* init_module is called once when the module is loaded to do all necessary */ | 52 | /* init_module is called once when the module is loaded to do all necessary */ |
73 | /* things like autodetect... */ | 53 | /* things like autodetect... */ |
@@ -175,13 +155,9 @@ static int hysdn_have_procfs; | |||
175 | static int __init | 155 | static int __init |
176 | hysdn_init(void) | 156 | hysdn_init(void) |
177 | { | 157 | { |
178 | char tmp[50]; | ||
179 | int rc; | 158 | int rc; |
180 | 159 | ||
181 | strcpy(tmp, hysdn_init_revision); | 160 | printk(KERN_NOTICE "HYSDN: module loaded\n"); |
182 | printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp)); | ||
183 | strcpy(tmp, hysdn_net_revision); | ||
184 | printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp)); | ||
185 | 161 | ||
186 | rc = pci_register_driver(&hysdn_pci_driver); | 162 | rc = pci_register_driver(&hysdn_pci_driver); |
187 | if (rc) | 163 | if (rc) |
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index feec8d89d719..11f2cce26005 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c | |||
@@ -26,9 +26,6 @@ | |||
26 | unsigned int hynet_enable = 0xffffffff; | 26 | unsigned int hynet_enable = 0xffffffff; |
27 | module_param(hynet_enable, uint, 0); | 27 | module_param(hynet_enable, uint, 0); |
28 | 28 | ||
29 | /* store the actual version for log reporting */ | ||
30 | char *hysdn_net_revision = "$Revision: 1.8.6.4 $"; | ||
31 | |||
32 | #define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */ | 29 | #define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */ |
33 | 30 | ||
34 | /****************************************************************************/ | 31 | /****************************************************************************/ |
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 96b3e39c3356..5fe83bd42061 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include "hysdn_defs.h" | 23 | #include "hysdn_defs.h" |
24 | 24 | ||
25 | static DEFINE_MUTEX(hysdn_conf_mutex); | 25 | static DEFINE_MUTEX(hysdn_conf_mutex); |
26 | static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $"; | ||
27 | 26 | ||
28 | #define INFO_OUT_LEN 80 /* length of info line including lf */ | 27 | #define INFO_OUT_LEN 80 /* length of info line including lf */ |
29 | 28 | ||
@@ -404,7 +403,7 @@ hysdn_procconf_init(void) | |||
404 | card = card->next; /* next entry */ | 403 | card = card->next; /* next entry */ |
405 | } | 404 | } |
406 | 405 | ||
407 | printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision)); | 406 | printk(KERN_NOTICE "HYSDN: procfs initialised\n"); |
408 | return (0); | 407 | return (0); |
409 | } /* hysdn_procconf_init */ | 408 | } /* hysdn_procconf_init */ |
410 | 409 | ||
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 8a2f767f26d8..0ed7f6bc2a7f 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev) | |||
216 | 216 | ||
217 | if (md_check_no_bitmap(mddev)) | 217 | if (md_check_no_bitmap(mddev)) |
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
220 | conf = linear_conf(mddev, mddev->raid_disks); | 219 | conf = linear_conf(mddev, mddev->raid_disks); |
221 | 220 | ||
222 | if (!conf) | 221 | if (!conf) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index b76cfc89e1b5..818313e277e7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
287 | mddev_t *mddev = q->queuedata; | 287 | mddev_t *mddev = q->queuedata; |
288 | int rv; | 288 | int rv; |
289 | int cpu; | 289 | int cpu; |
290 | unsigned int sectors; | ||
290 | 291 | ||
291 | if (mddev == NULL || mddev->pers == NULL | 292 | if (mddev == NULL || mddev->pers == NULL |
292 | || !mddev->ready) { | 293 | || !mddev->ready) { |
@@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
311 | atomic_inc(&mddev->active_io); | 312 | atomic_inc(&mddev->active_io); |
312 | rcu_read_unlock(); | 313 | rcu_read_unlock(); |
313 | 314 | ||
315 | /* | ||
316 | * save the sectors now since our bio can | ||
317 | * go away inside make_request | ||
318 | */ | ||
319 | sectors = bio_sectors(bio); | ||
314 | rv = mddev->pers->make_request(mddev, bio); | 320 | rv = mddev->pers->make_request(mddev, bio); |
315 | 321 | ||
316 | cpu = part_stat_lock(); | 322 | cpu = part_stat_lock(); |
317 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | 323 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
318 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | 324 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); |
319 | bio_sectors(bio)); | ||
320 | part_stat_unlock(); | 325 | part_stat_unlock(); |
321 | 326 | ||
322 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | 327 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
@@ -548,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit) | |||
548 | { | 553 | { |
549 | mddev_t *mddev, *new = NULL; | 554 | mddev_t *mddev, *new = NULL; |
550 | 555 | ||
556 | if (unit && MAJOR(unit) != MD_MAJOR) | ||
557 | unit &= ~((1<<MdpMinorShift)-1); | ||
558 | |||
551 | retry: | 559 | retry: |
552 | spin_lock(&all_mddevs_lock); | 560 | spin_lock(&all_mddevs_lock); |
553 | 561 | ||
@@ -1947,8 +1955,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) | |||
1947 | __bdevname(dev, b)); | 1955 | __bdevname(dev, b)); |
1948 | return PTR_ERR(bdev); | 1956 | return PTR_ERR(bdev); |
1949 | } | 1957 | } |
1950 | if (!shared) | ||
1951 | set_bit(AllReserved, &rdev->flags); | ||
1952 | rdev->bdev = bdev; | 1958 | rdev->bdev = bdev; |
1953 | return err; | 1959 | return err; |
1954 | } | 1960 | } |
@@ -2465,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2465 | if (rdev->raid_disk != -1) | 2471 | if (rdev->raid_disk != -1) |
2466 | return -EBUSY; | 2472 | return -EBUSY; |
2467 | 2473 | ||
2474 | if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) | ||
2475 | return -EBUSY; | ||
2476 | |||
2468 | if (rdev->mddev->pers->hot_add_disk == NULL) | 2477 | if (rdev->mddev->pers->hot_add_disk == NULL) |
2469 | return -EINVAL; | 2478 | return -EINVAL; |
2470 | 2479 | ||
@@ -2610,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2610 | 2619 | ||
2611 | mddev_lock(mddev); | 2620 | mddev_lock(mddev); |
2612 | list_for_each_entry(rdev2, &mddev->disks, same_set) | 2621 | list_for_each_entry(rdev2, &mddev->disks, same_set) |
2613 | if (test_bit(AllReserved, &rdev2->flags) || | 2622 | if (rdev->bdev == rdev2->bdev && |
2614 | (rdev->bdev == rdev2->bdev && | 2623 | rdev != rdev2 && |
2615 | rdev != rdev2 && | 2624 | overlaps(rdev->data_offset, rdev->sectors, |
2616 | overlaps(rdev->data_offset, rdev->sectors, | 2625 | rdev2->data_offset, |
2617 | rdev2->data_offset, | 2626 | rdev2->sectors)) { |
2618 | rdev2->sectors))) { | ||
2619 | overlap = 1; | 2627 | overlap = 1; |
2620 | break; | 2628 | break; |
2621 | } | 2629 | } |
@@ -4133,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) | |||
4133 | } | 4141 | } |
4134 | 4142 | ||
4135 | mddev->array_sectors = sectors; | 4143 | mddev->array_sectors = sectors; |
4136 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4144 | if (mddev->pers) { |
4137 | if (mddev->pers) | 4145 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4138 | revalidate_disk(mddev->gendisk); | 4146 | revalidate_disk(mddev->gendisk); |
4139 | 4147 | } | |
4140 | return len; | 4148 | return len; |
4141 | } | 4149 | } |
4142 | 4150 | ||
@@ -4619,6 +4627,7 @@ static int do_md_run(mddev_t *mddev) | |||
4619 | } | 4627 | } |
4620 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4628 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4621 | revalidate_disk(mddev->gendisk); | 4629 | revalidate_disk(mddev->gendisk); |
4630 | mddev->changed = 1; | ||
4622 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); | 4631 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); |
4623 | out: | 4632 | out: |
4624 | return err; | 4633 | return err; |
@@ -4707,6 +4716,7 @@ static void md_clean(mddev_t *mddev) | |||
4707 | mddev->sync_speed_min = mddev->sync_speed_max = 0; | 4716 | mddev->sync_speed_min = mddev->sync_speed_max = 0; |
4708 | mddev->recovery = 0; | 4717 | mddev->recovery = 0; |
4709 | mddev->in_sync = 0; | 4718 | mddev->in_sync = 0; |
4719 | mddev->changed = 0; | ||
4710 | mddev->degraded = 0; | 4720 | mddev->degraded = 0; |
4711 | mddev->safemode = 0; | 4721 | mddev->safemode = 0; |
4712 | mddev->bitmap_info.offset = 0; | 4722 | mddev->bitmap_info.offset = 0; |
@@ -4822,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4822 | 4832 | ||
4823 | set_capacity(disk, 0); | 4833 | set_capacity(disk, 0); |
4824 | mutex_unlock(&mddev->open_mutex); | 4834 | mutex_unlock(&mddev->open_mutex); |
4835 | mddev->changed = 1; | ||
4825 | revalidate_disk(disk); | 4836 | revalidate_disk(disk); |
4826 | 4837 | ||
4827 | if (mddev->ro) | 4838 | if (mddev->ro) |
@@ -5578,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks) | |||
5578 | mddev->delta_disks = raid_disks - mddev->raid_disks; | 5589 | mddev->delta_disks = raid_disks - mddev->raid_disks; |
5579 | 5590 | ||
5580 | rv = mddev->pers->check_reshape(mddev); | 5591 | rv = mddev->pers->check_reshape(mddev); |
5592 | if (rv < 0) | ||
5593 | mddev->delta_disks = 0; | ||
5581 | return rv; | 5594 | return rv; |
5582 | } | 5595 | } |
5583 | 5596 | ||
@@ -6004,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
6004 | atomic_inc(&mddev->openers); | 6017 | atomic_inc(&mddev->openers); |
6005 | mutex_unlock(&mddev->open_mutex); | 6018 | mutex_unlock(&mddev->open_mutex); |
6006 | 6019 | ||
6007 | check_disk_size_change(mddev->gendisk, bdev); | 6020 | check_disk_change(bdev); |
6008 | out: | 6021 | out: |
6009 | return err; | 6022 | return err; |
6010 | } | 6023 | } |
@@ -6019,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) | |||
6019 | 6032 | ||
6020 | return 0; | 6033 | return 0; |
6021 | } | 6034 | } |
6035 | |||
6036 | static int md_media_changed(struct gendisk *disk) | ||
6037 | { | ||
6038 | mddev_t *mddev = disk->private_data; | ||
6039 | |||
6040 | return mddev->changed; | ||
6041 | } | ||
6042 | |||
6043 | static int md_revalidate(struct gendisk *disk) | ||
6044 | { | ||
6045 | mddev_t *mddev = disk->private_data; | ||
6046 | |||
6047 | mddev->changed = 0; | ||
6048 | return 0; | ||
6049 | } | ||
6022 | static const struct block_device_operations md_fops = | 6050 | static const struct block_device_operations md_fops = |
6023 | { | 6051 | { |
6024 | .owner = THIS_MODULE, | 6052 | .owner = THIS_MODULE, |
@@ -6029,6 +6057,8 @@ static const struct block_device_operations md_fops = | |||
6029 | .compat_ioctl = md_compat_ioctl, | 6057 | .compat_ioctl = md_compat_ioctl, |
6030 | #endif | 6058 | #endif |
6031 | .getgeo = md_getgeo, | 6059 | .getgeo = md_getgeo, |
6060 | .media_changed = md_media_changed, | ||
6061 | .revalidate_disk= md_revalidate, | ||
6032 | }; | 6062 | }; |
6033 | 6063 | ||
6034 | static int md_thread(void * arg) | 6064 | static int md_thread(void * arg) |
@@ -6985,9 +7015,6 @@ void md_do_sync(mddev_t *mddev) | |||
6985 | } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) | 7015 | } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) |
6986 | mddev->resync_min = mddev->curr_resync_completed; | 7016 | mddev->resync_min = mddev->curr_resync_completed; |
6987 | mddev->curr_resync = 0; | 7017 | mddev->curr_resync = 0; |
6988 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) | ||
6989 | mddev->curr_resync_completed = 0; | ||
6990 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | ||
6991 | wake_up(&resync_wait); | 7018 | wake_up(&resync_wait); |
6992 | set_bit(MD_RECOVERY_DONE, &mddev->recovery); | 7019 | set_bit(MD_RECOVERY_DONE, &mddev->recovery); |
6993 | md_wakeup_thread(mddev->thread); | 7020 | md_wakeup_thread(mddev->thread); |
@@ -7028,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
7028 | } | 7055 | } |
7029 | } | 7056 | } |
7030 | 7057 | ||
7031 | if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { | 7058 | if (mddev->degraded && !mddev->recovery_disabled) { |
7032 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 7059 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
7033 | if (rdev->raid_disk >= 0 && | 7060 | if (rdev->raid_disk >= 0 && |
7034 | !test_bit(In_sync, &rdev->flags) && | 7061 | !test_bit(In_sync, &rdev->flags) && |
@@ -7151,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev) | |||
7151 | /* Only thing we do on a ro array is remove | 7178 | /* Only thing we do on a ro array is remove |
7152 | * failed devices. | 7179 | * failed devices. |
7153 | */ | 7180 | */ |
7154 | remove_and_add_spares(mddev); | 7181 | mdk_rdev_t *rdev; |
7182 | list_for_each_entry(rdev, &mddev->disks, same_set) | ||
7183 | if (rdev->raid_disk >= 0 && | ||
7184 | !test_bit(Blocked, &rdev->flags) && | ||
7185 | test_bit(Faulty, &rdev->flags) && | ||
7186 | atomic_read(&rdev->nr_pending)==0) { | ||
7187 | if (mddev->pers->hot_remove_disk( | ||
7188 | mddev, rdev->raid_disk)==0) { | ||
7189 | char nm[20]; | ||
7190 | sprintf(nm,"rd%d", rdev->raid_disk); | ||
7191 | sysfs_remove_link(&mddev->kobj, nm); | ||
7192 | rdev->raid_disk = -1; | ||
7193 | } | ||
7194 | } | ||
7155 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 7195 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
7156 | goto unlock; | 7196 | goto unlock; |
7157 | } | 7197 | } |
diff --git a/drivers/md/md.h b/drivers/md/md.h index eec517ced31a..12215d437fcc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -93,8 +93,6 @@ struct mdk_rdev_s | |||
93 | #define Faulty 1 /* device is known to have a fault */ | 93 | #define Faulty 1 /* device is known to have a fault */ |
94 | #define In_sync 2 /* device is in_sync with rest of array */ | 94 | #define In_sync 2 /* device is in_sync with rest of array */ |
95 | #define WriteMostly 4 /* Avoid reading if at all possible */ | 95 | #define WriteMostly 4 /* Avoid reading if at all possible */ |
96 | #define AllReserved 6 /* If whole device is reserved for | ||
97 | * one array */ | ||
98 | #define AutoDetected 7 /* added by auto-detect */ | 96 | #define AutoDetected 7 /* added by auto-detect */ |
99 | #define Blocked 8 /* An error occured on an externally | 97 | #define Blocked 8 /* An error occured on an externally |
100 | * managed array, don't allow writes | 98 | * managed array, don't allow writes |
@@ -276,6 +274,8 @@ struct mddev_s | |||
276 | atomic_t active; /* general refcount */ | 274 | atomic_t active; /* general refcount */ |
277 | atomic_t openers; /* number of active opens */ | 275 | atomic_t openers; /* number of active opens */ |
278 | 276 | ||
277 | int changed; /* True if we might need to | ||
278 | * reread partition info */ | ||
279 | int degraded; /* whether md should consider | 279 | int degraded; /* whether md should consider |
280 | * adding a spare | 280 | * adding a spare |
281 | */ | 281 | */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 6d7ddf32ef2e..3a62d440e27b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev) | |||
435 | * bookkeeping area. [whatever we allocate in multipath_run(), | 435 | * bookkeeping area. [whatever we allocate in multipath_run(), |
436 | * should be freed in multipath_stop()] | 436 | * should be freed in multipath_stop()] |
437 | */ | 437 | */ |
438 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
439 | 438 | ||
440 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); | 439 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); |
441 | mddev->private = conf; | 440 | mddev->private = conf; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a39f4c355e55..c0ac457f1218 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
179 | rdev1->new_raid_disk = j; | 179 | rdev1->new_raid_disk = j; |
180 | } | 180 | } |
181 | 181 | ||
182 | if (mddev->level == 1) { | ||
183 | /* taiking over a raid1 array- | ||
184 | * we have only one active disk | ||
185 | */ | ||
186 | j = 0; | ||
187 | rdev1->new_raid_disk = j; | ||
188 | } | ||
189 | |||
182 | if (j < 0 || j >= mddev->raid_disks) { | 190 | if (j < 0 || j >= mddev->raid_disks) { |
183 | printk(KERN_ERR "md/raid0:%s: bad disk number %d - " | 191 | printk(KERN_ERR "md/raid0:%s: bad disk number %d - " |
184 | "aborting!\n", mdname(mddev), j); | 192 | "aborting!\n", mdname(mddev), j); |
@@ -353,7 +361,6 @@ static int raid0_run(mddev_t *mddev) | |||
353 | if (md_check_no_bitmap(mddev)) | 361 | if (md_check_no_bitmap(mddev)) |
354 | return -EINVAL; | 362 | return -EINVAL; |
355 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 363 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
356 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
357 | 364 | ||
358 | /* if private is not null, we are here after takeover */ | 365 | /* if private is not null, we are here after takeover */ |
359 | if (mddev->private == NULL) { | 366 | if (mddev->private == NULL) { |
@@ -644,12 +651,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev) | |||
644 | return priv_conf; | 651 | return priv_conf; |
645 | } | 652 | } |
646 | 653 | ||
654 | static void *raid0_takeover_raid1(mddev_t *mddev) | ||
655 | { | ||
656 | raid0_conf_t *priv_conf; | ||
657 | |||
658 | /* Check layout: | ||
659 | * - (N - 1) mirror drives must be already faulty | ||
660 | */ | ||
661 | if ((mddev->raid_disks - 1) != mddev->degraded) { | ||
662 | printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", | ||
663 | mdname(mddev)); | ||
664 | return ERR_PTR(-EINVAL); | ||
665 | } | ||
666 | |||
667 | /* Set new parameters */ | ||
668 | mddev->new_level = 0; | ||
669 | mddev->new_layout = 0; | ||
670 | mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ | ||
671 | mddev->delta_disks = 1 - mddev->raid_disks; | ||
672 | mddev->raid_disks = 1; | ||
673 | /* make sure it will be not marked as dirty */ | ||
674 | mddev->recovery_cp = MaxSector; | ||
675 | |||
676 | create_strip_zones(mddev, &priv_conf); | ||
677 | return priv_conf; | ||
678 | } | ||
679 | |||
647 | static void *raid0_takeover(mddev_t *mddev) | 680 | static void *raid0_takeover(mddev_t *mddev) |
648 | { | 681 | { |
649 | /* raid0 can take over: | 682 | /* raid0 can take over: |
650 | * raid4 - if all data disks are active. | 683 | * raid4 - if all data disks are active. |
651 | * raid5 - providing it is Raid4 layout and one disk is faulty | 684 | * raid5 - providing it is Raid4 layout and one disk is faulty |
652 | * raid10 - assuming we have all necessary active disks | 685 | * raid10 - assuming we have all necessary active disks |
686 | * raid1 - with (N -1) mirror drives faulty | ||
653 | */ | 687 | */ |
654 | if (mddev->level == 4) | 688 | if (mddev->level == 4) |
655 | return raid0_takeover_raid45(mddev); | 689 | return raid0_takeover_raid45(mddev); |
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev) | |||
665 | if (mddev->level == 10) | 699 | if (mddev->level == 10) |
666 | return raid0_takeover_raid10(mddev); | 700 | return raid0_takeover_raid10(mddev); |
667 | 701 | ||
702 | if (mddev->level == 1) | ||
703 | return raid0_takeover_raid1(mddev); | ||
704 | |||
705 | printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", | ||
706 | mddev->level); | ||
707 | |||
668 | return ERR_PTR(-EINVAL); | 708 | return ERR_PTR(-EINVAL); |
669 | } | 709 | } |
670 | 710 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba9..06cd712807d0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf) | |||
593 | if (conf->pending_bio_list.head) { | 593 | if (conf->pending_bio_list.head) { |
594 | struct bio *bio; | 594 | struct bio *bio; |
595 | bio = bio_list_get(&conf->pending_bio_list); | 595 | bio = bio_list_get(&conf->pending_bio_list); |
596 | /* Only take the spinlock to quiet a warning */ | ||
597 | spin_lock(conf->mddev->queue->queue_lock); | ||
596 | blk_remove_plug(conf->mddev->queue); | 598 | blk_remove_plug(conf->mddev->queue); |
599 | spin_unlock(conf->mddev->queue->queue_lock); | ||
597 | spin_unlock_irq(&conf->device_lock); | 600 | spin_unlock_irq(&conf->device_lock); |
598 | /* flush any pending bitmap writes to | 601 | /* flush any pending bitmap writes to |
599 | * disk before proceeding w/ I/O */ | 602 | * disk before proceeding w/ I/O */ |
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
959 | atomic_inc(&r1_bio->remaining); | 962 | atomic_inc(&r1_bio->remaining); |
960 | spin_lock_irqsave(&conf->device_lock, flags); | 963 | spin_lock_irqsave(&conf->device_lock, flags); |
961 | bio_list_add(&conf->pending_bio_list, mbio); | 964 | bio_list_add(&conf->pending_bio_list, mbio); |
962 | blk_plug_device(mddev->queue); | 965 | blk_plug_device_unlocked(mddev->queue); |
963 | spin_unlock_irqrestore(&conf->device_lock, flags); | 966 | spin_unlock_irqrestore(&conf->device_lock, flags); |
964 | } | 967 | } |
965 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); | 968 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); |
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev) | |||
2021 | if (IS_ERR(conf)) | 2024 | if (IS_ERR(conf)) |
2022 | return PTR_ERR(conf); | 2025 | return PTR_ERR(conf); |
2023 | 2026 | ||
2024 | mddev->queue->queue_lock = &conf->device_lock; | ||
2025 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2027 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2026 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 2028 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
2027 | rdev->data_offset << 9); | 2029 | rdev->data_offset << 9); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 69b659544390..747d061d8e05 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf) | |||
662 | if (conf->pending_bio_list.head) { | 662 | if (conf->pending_bio_list.head) { |
663 | struct bio *bio; | 663 | struct bio *bio; |
664 | bio = bio_list_get(&conf->pending_bio_list); | 664 | bio = bio_list_get(&conf->pending_bio_list); |
665 | /* Spinlock only taken to quiet a warning */ | ||
666 | spin_lock(conf->mddev->queue->queue_lock); | ||
665 | blk_remove_plug(conf->mddev->queue); | 667 | blk_remove_plug(conf->mddev->queue); |
668 | spin_unlock(conf->mddev->queue->queue_lock); | ||
666 | spin_unlock_irq(&conf->device_lock); | 669 | spin_unlock_irq(&conf->device_lock); |
667 | /* flush any pending bitmap writes to disk | 670 | /* flush any pending bitmap writes to disk |
668 | * before proceeding w/ I/O */ | 671 | * before proceeding w/ I/O */ |
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
971 | atomic_inc(&r10_bio->remaining); | 974 | atomic_inc(&r10_bio->remaining); |
972 | spin_lock_irqsave(&conf->device_lock, flags); | 975 | spin_lock_irqsave(&conf->device_lock, flags); |
973 | bio_list_add(&conf->pending_bio_list, mbio); | 976 | bio_list_add(&conf->pending_bio_list, mbio); |
974 | blk_plug_device(mddev->queue); | 977 | blk_plug_device_unlocked(mddev->queue); |
975 | spin_unlock_irqrestore(&conf->device_lock, flags); | 978 | spin_unlock_irqrestore(&conf->device_lock, flags); |
976 | } | 979 | } |
977 | 980 | ||
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev) | |||
2304 | if (!conf) | 2307 | if (!conf) |
2305 | goto out; | 2308 | goto out; |
2306 | 2309 | ||
2307 | mddev->queue->queue_lock = &conf->device_lock; | ||
2308 | |||
2309 | mddev->thread = conf->thread; | 2310 | mddev->thread = conf->thread; |
2310 | conf->thread = NULL; | 2311 | conf->thread = NULL; |
2311 | 2312 | ||
@@ -2463,11 +2464,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev) | |||
2463 | mddev->recovery_cp = MaxSector; | 2464 | mddev->recovery_cp = MaxSector; |
2464 | 2465 | ||
2465 | conf = setup_conf(mddev); | 2466 | conf = setup_conf(mddev); |
2466 | if (!IS_ERR(conf)) | 2467 | if (!IS_ERR(conf)) { |
2467 | list_for_each_entry(rdev, &mddev->disks, same_set) | 2468 | list_for_each_entry(rdev, &mddev->disks, same_set) |
2468 | if (rdev->raid_disk >= 0) | 2469 | if (rdev->raid_disk >= 0) |
2469 | rdev->new_raid_disk = rdev->raid_disk * 2; | 2470 | rdev->new_raid_disk = rdev->raid_disk * 2; |
2470 | 2471 | conf->barrier = 1; | |
2472 | } | ||
2473 | |||
2471 | return conf; | 2474 | return conf; |
2472 | } | 2475 | } |
2473 | 2476 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5044babfcda0..78536fdbd87f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev) | |||
5204 | 5204 | ||
5205 | mddev->queue->backing_dev_info.congested_data = mddev; | 5205 | mddev->queue->backing_dev_info.congested_data = mddev; |
5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5207 | mddev->queue->queue_lock = &conf->device_lock; | ||
5208 | mddev->queue->unplug_fn = raid5_unplug_queue; | 5207 | mddev->queue->unplug_fn = raid5_unplug_queue; |
5209 | 5208 | ||
5210 | chunk_size = mddev->chunk_sectors << 9; | 5209 | chunk_size = mddev->chunk_sectors << 9; |
@@ -5517,7 +5516,6 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5517 | raid5_conf_t *conf = mddev->private; | 5516 | raid5_conf_t *conf = mddev->private; |
5518 | mdk_rdev_t *rdev; | 5517 | mdk_rdev_t *rdev; |
5519 | int spares = 0; | 5518 | int spares = 0; |
5520 | int added_devices = 0; | ||
5521 | unsigned long flags; | 5519 | unsigned long flags; |
5522 | 5520 | ||
5523 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 5521 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
@@ -5527,8 +5525,8 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5527 | return -ENOSPC; | 5525 | return -ENOSPC; |
5528 | 5526 | ||
5529 | list_for_each_entry(rdev, &mddev->disks, same_set) | 5527 | list_for_each_entry(rdev, &mddev->disks, same_set) |
5530 | if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks) | 5528 | if (!test_bit(In_sync, &rdev->flags) |
5531 | && !test_bit(Faulty, &rdev->flags)) | 5529 | && !test_bit(Faulty, &rdev->flags)) |
5532 | spares++; | 5530 | spares++; |
5533 | 5531 | ||
5534 | if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) | 5532 | if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) |
@@ -5571,34 +5569,35 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5571 | * to correctly record the "partially reconstructed" state of | 5569 | * to correctly record the "partially reconstructed" state of |
5572 | * such devices during the reshape and confusion could result. | 5570 | * such devices during the reshape and confusion could result. |
5573 | */ | 5571 | */ |
5574 | if (mddev->delta_disks >= 0) | 5572 | if (mddev->delta_disks >= 0) { |
5575 | list_for_each_entry(rdev, &mddev->disks, same_set) | 5573 | int added_devices = 0; |
5576 | if (rdev->raid_disk < 0 && | 5574 | list_for_each_entry(rdev, &mddev->disks, same_set) |
5577 | !test_bit(Faulty, &rdev->flags)) { | 5575 | if (rdev->raid_disk < 0 && |
5578 | if (raid5_add_disk(mddev, rdev) == 0) { | 5576 | !test_bit(Faulty, &rdev->flags)) { |
5579 | char nm[20]; | 5577 | if (raid5_add_disk(mddev, rdev) == 0) { |
5580 | if (rdev->raid_disk >= conf->previous_raid_disks) { | 5578 | char nm[20]; |
5581 | set_bit(In_sync, &rdev->flags); | 5579 | if (rdev->raid_disk |
5582 | added_devices++; | 5580 | >= conf->previous_raid_disks) { |
5583 | } else | 5581 | set_bit(In_sync, &rdev->flags); |
5584 | rdev->recovery_offset = 0; | 5582 | added_devices++; |
5585 | sprintf(nm, "rd%d", rdev->raid_disk); | 5583 | } else |
5586 | if (sysfs_create_link(&mddev->kobj, | 5584 | rdev->recovery_offset = 0; |
5587 | &rdev->kobj, nm)) | 5585 | sprintf(nm, "rd%d", rdev->raid_disk); |
5588 | /* Failure here is OK */; | 5586 | if (sysfs_create_link(&mddev->kobj, |
5589 | } else | 5587 | &rdev->kobj, nm)) |
5590 | break; | 5588 | /* Failure here is OK */; |
5591 | } else if (rdev->raid_disk >= conf->previous_raid_disks | 5589 | } |
5592 | && !test_bit(Faulty, &rdev->flags)) { | 5590 | } else if (rdev->raid_disk >= conf->previous_raid_disks |
5593 | /* This is a spare that was manually added */ | 5591 | && !test_bit(Faulty, &rdev->flags)) { |
5594 | set_bit(In_sync, &rdev->flags); | 5592 | /* This is a spare that was manually added */ |
5595 | added_devices++; | 5593 | set_bit(In_sync, &rdev->flags); |
5596 | } | 5594 | added_devices++; |
5595 | } | ||
5597 | 5596 | ||
5598 | /* When a reshape changes the number of devices, ->degraded | 5597 | /* When a reshape changes the number of devices, |
5599 | * is measured against the larger of the pre and post number of | 5598 | * ->degraded is measured against the larger of the |
5600 | * devices.*/ | 5599 | * pre and post number of devices. |
5601 | if (mddev->delta_disks > 0) { | 5600 | */ |
5602 | spin_lock_irqsave(&conf->device_lock, flags); | 5601 | spin_lock_irqsave(&conf->device_lock, flags); |
5603 | mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) | 5602 | mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) |
5604 | - added_devices; | 5603 | - added_devices; |
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index e9a3eab7b0cf..8c1d85e27be4 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -621,7 +621,7 @@ static int __init memstick_init(void) | |||
621 | { | 621 | { |
622 | int rc; | 622 | int rc; |
623 | 623 | ||
624 | workqueue = create_freezeable_workqueue("kmemstick"); | 624 | workqueue = create_freezable_workqueue("kmemstick"); |
625 | if (!workqueue) | 625 | if (!workqueue) |
626 | return -ENOMEM; | 626 | return -ENOMEM; |
627 | 627 | ||
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index f71f22948477..1735c84ff757 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -76,8 +76,8 @@ | |||
76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR | 76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #define MPT_LINUX_VERSION_COMMON "3.04.17" | 79 | #define MPT_LINUX_VERSION_COMMON "3.04.18" |
80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" | 80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" |
81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" | 81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" |
82 | 82 | ||
83 | #define show_mptmod_ver(s,ver) \ | 83 | #define show_mptmod_ver(s,ver) \ |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index a3856ed90aef..e8deb8ed0499 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) | |||
597 | } | 597 | } |
598 | 598 | ||
599 | static int | 599 | static int |
600 | mptctl_release(struct inode *inode, struct file *filep) | ||
601 | { | ||
602 | fasync_helper(-1, filep, 0, &async_queue); | ||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static int | ||
600 | mptctl_fasync(int fd, struct file *filep, int mode) | 607 | mptctl_fasync(int fd, struct file *filep, int mode) |
601 | { | 608 | { |
602 | MPT_ADAPTER *ioc; | 609 | MPT_ADAPTER *ioc; |
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = { | |||
2815 | .llseek = no_llseek, | 2822 | .llseek = no_llseek, |
2816 | .fasync = mptctl_fasync, | 2823 | .fasync = mptctl_fasync, |
2817 | .unlocked_ioctl = mptctl_ioctl, | 2824 | .unlocked_ioctl = mptctl_ioctl, |
2825 | .release = mptctl_release, | ||
2818 | #ifdef CONFIG_COMPAT | 2826 | #ifdef CONFIG_COMPAT |
2819 | .compat_ioctl = compat_mpctl_ioctl, | 2827 | .compat_ioctl = compat_mpctl_ioctl, |
2820 | #endif | 2828 | #endif |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 59b8f53d1ece..0d9b82a44540 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) | |||
1873 | } | 1873 | } |
1874 | 1874 | ||
1875 | out: | 1875 | out: |
1876 | printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", | 1876 | printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", |
1877 | ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); | 1877 | ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, |
1878 | SCpnt, SCpnt->serial_number); | ||
1878 | 1879 | ||
1879 | return retval; | 1880 | return retval; |
1880 | } | 1881 | } |
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) | |||
1911 | 1912 | ||
1912 | vdevice = SCpnt->device->hostdata; | 1913 | vdevice = SCpnt->device->hostdata; |
1913 | if (!vdevice || !vdevice->vtarget) { | 1914 | if (!vdevice || !vdevice->vtarget) { |
1914 | retval = SUCCESS; | 1915 | retval = 0; |
1915 | goto out; | 1916 | goto out; |
1916 | } | 1917 | } |
1917 | 1918 | ||
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 5f6852dff40b..44d4475a09dd 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c | |||
@@ -329,7 +329,7 @@ static int __init tifm_init(void) | |||
329 | { | 329 | { |
330 | int rc; | 330 | int rc; |
331 | 331 | ||
332 | workqueue = create_freezeable_workqueue("tifm"); | 332 | workqueue = create_freezable_workqueue("tifm"); |
333 | if (!workqueue) | 333 | if (!workqueue) |
334 | return -ENOMEM; | 334 | return -ENOMEM; |
335 | 335 | ||
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 4d2ea8e80140..6df5a55da110 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c | |||
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void) | |||
785 | if (x86_hyper != &x86_hyper_vmware) | 785 | if (x86_hyper != &x86_hyper_vmware) |
786 | return -ENODEV; | 786 | return -ENODEV; |
787 | 787 | ||
788 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | 788 | vmballoon_wq = create_freezable_workqueue("vmmemctl"); |
789 | if (!vmballoon_wq) { | 789 | if (!vmballoon_wq) { |
790 | pr_err("failed to create workqueue\n"); | 790 | pr_err("failed to create workqueue\n"); |
791 | return -ENOMEM; | 791 | return -ENOMEM; |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index d9d7efbc77cc..6322d1fb5d62 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
930 | 930 | ||
931 | init_completion(&dev->dma_done); | 931 | init_completion(&dev->dma_done); |
932 | 932 | ||
933 | dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); | 933 | dev->card_workqueue = create_freezable_workqueue(DRV_NAME); |
934 | 934 | ||
935 | if (!dev->card_workqueue) | 935 | if (!dev->card_workqueue) |
936 | goto error9; | 936 | goto error9; |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 67822cf6c025..ac0d6a8613b5 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = { | |||
1258 | static __init int sm_module_init(void) | 1258 | static __init int sm_module_init(void) |
1259 | { | 1259 | { |
1260 | int error = 0; | 1260 | int error = 0; |
1261 | cache_flush_workqueue = create_freezeable_workqueue("smflush"); | 1261 | cache_flush_workqueue = create_freezable_workqueue("smflush"); |
1262 | 1262 | ||
1263 | if (IS_ERR(cache_flush_workqueue)) | 1263 | if (IS_ERR(cache_flush_workqueue)) |
1264 | return PTR_ERR(cache_flush_workqueue); | 1264 | return PTR_ERR(cache_flush_workqueue); |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 0c7811faf72c..a179cc6d79f2 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1786 | spin_lock_bh(&adapter->mcc_lock); | 1786 | spin_lock_bh(&adapter->mcc_lock); |
1787 | 1787 | ||
1788 | wrb = wrb_from_mccq(adapter); | 1788 | wrb = wrb_from_mccq(adapter); |
1789 | if (!wrb) { | ||
1790 | status = -EBUSY; | ||
1791 | goto err; | ||
1792 | } | ||
1789 | req = nonemb_cmd->va; | 1793 | req = nonemb_cmd->va; |
1790 | sge = nonembedded_sgl(wrb); | 1794 | sge = nonembedded_sgl(wrb); |
1791 | 1795 | ||
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1801 | 1805 | ||
1802 | status = be_mcc_notify_wait(adapter); | 1806 | status = be_mcc_notify_wait(adapter); |
1803 | 1807 | ||
1808 | err: | ||
1804 | spin_unlock_bh(&adapter->mcc_lock); | 1809 | spin_unlock_bh(&adapter->mcc_lock); |
1805 | return status; | 1810 | return status; |
1806 | } | 1811 | } |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f40740e68ea5..d584d32c747d 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -4276,9 +4276,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4276 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4276 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4277 | BNX2X_ACCEPT_MULTICAST; | 4277 | BNX2X_ACCEPT_MULTICAST; |
4278 | #ifdef BCM_CNIC | 4278 | #ifdef BCM_CNIC |
4279 | cl_id = bnx2x_fcoe(bp, cl_id); | 4279 | if (!NO_FCOE(bp)) { |
4280 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4280 | cl_id = bnx2x_fcoe(bp, cl_id); |
4281 | BNX2X_ACCEPT_MULTICAST); | 4281 | bnx2x_rxq_set_mac_filters(bp, cl_id, |
4282 | BNX2X_ACCEPT_UNICAST | | ||
4283 | BNX2X_ACCEPT_MULTICAST); | ||
4284 | } | ||
4282 | #endif | 4285 | #endif |
4283 | break; | 4286 | break; |
4284 | 4287 | ||
@@ -4286,18 +4289,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4286 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4289 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4287 | BNX2X_ACCEPT_ALL_MULTICAST; | 4290 | BNX2X_ACCEPT_ALL_MULTICAST; |
4288 | #ifdef BCM_CNIC | 4291 | #ifdef BCM_CNIC |
4289 | cl_id = bnx2x_fcoe(bp, cl_id); | 4292 | /* |
4290 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4293 | * Prevent duplication of multicast packets by configuring FCoE |
4291 | BNX2X_ACCEPT_MULTICAST); | 4294 | * L2 Client to receive only matched unicast frames. |
4295 | */ | ||
4296 | if (!NO_FCOE(bp)) { | ||
4297 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4298 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
4299 | BNX2X_ACCEPT_UNICAST); | ||
4300 | } | ||
4292 | #endif | 4301 | #endif |
4293 | break; | 4302 | break; |
4294 | 4303 | ||
4295 | case BNX2X_RX_MODE_PROMISC: | 4304 | case BNX2X_RX_MODE_PROMISC: |
4296 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; | 4305 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; |
4297 | #ifdef BCM_CNIC | 4306 | #ifdef BCM_CNIC |
4298 | cl_id = bnx2x_fcoe(bp, cl_id); | 4307 | /* |
4299 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4308 | * Prevent packets duplication by configuring DROP_ALL for FCoE |
4300 | BNX2X_ACCEPT_MULTICAST); | 4309 | * L2 Client. |
4310 | */ | ||
4311 | if (!NO_FCOE(bp)) { | ||
4312 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4313 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | ||
4314 | } | ||
4301 | #endif | 4315 | #endif |
4302 | /* pass management unicast packets as well */ | 4316 | /* pass management unicast packets as well */ |
4303 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 4317 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) | |||
940 | goto open_unlock; | 940 | goto open_unlock; |
941 | } | 941 | } |
942 | 942 | ||
943 | priv->wq = create_freezeable_workqueue("mcp251x_wq"); | 943 | priv->wq = create_freezable_workqueue("mcp251x_wq"); |
944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); | 944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); |
945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); | 945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); |
946 | 946 | ||
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index 27d1d398e25e..d38706958af6 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | config CAN_MSCAN | 1 | config CAN_MSCAN |
2 | depends on CAN_DEV && (PPC || M68K || M68KNOMMU) | 2 | depends on CAN_DEV && (PPC || M68K) |
3 | tristate "Support for Freescale MSCAN based chips" | 3 | tristate "Support for Freescale MSCAN based chips" |
4 | ---help--- | 4 | ---help--- |
5 | The Motorola Scalable Controller Area Network (MSCAN) definition | 5 | The Motorola Scalable Controller Area Network (MSCAN) definition |
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index c42e97268248..e54712b22c27 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c | |||
@@ -185,7 +185,7 @@ struct pch_can_priv { | |||
185 | 185 | ||
186 | static struct can_bittiming_const pch_can_bittiming_const = { | 186 | static struct can_bittiming_const pch_can_bittiming_const = { |
187 | .name = KBUILD_MODNAME, | 187 | .name = KBUILD_MODNAME, |
188 | .tseg1_min = 1, | 188 | .tseg1_min = 2, |
189 | .tseg1_max = 16, | 189 | .tseg1_max = 16, |
190 | .tseg2_min = 1, | 190 | .tseg2_min = 1, |
191 | .tseg2_max = 8, | 191 | .tseg2_max = 8, |
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev) | |||
959 | struct pch_can_priv *priv = netdev_priv(ndev); | 959 | struct pch_can_priv *priv = netdev_priv(ndev); |
960 | 960 | ||
961 | unregister_candev(priv->ndev); | 961 | unregister_candev(priv->ndev); |
962 | pci_iounmap(pdev, priv->regs); | ||
963 | if (priv->use_msi) | 962 | if (priv->use_msi) |
964 | pci_disable_msi(priv->dev); | 963 | pci_disable_msi(priv->dev); |
965 | pci_release_regions(pdev); | 964 | pci_release_regions(pdev); |
966 | pci_disable_device(pdev); | 965 | pci_disable_device(pdev); |
967 | pci_set_drvdata(pdev, NULL); | 966 | pci_set_drvdata(pdev, NULL); |
968 | pch_can_reset(priv); | 967 | pch_can_reset(priv); |
968 | pci_iounmap(pdev, priv->regs); | ||
969 | free_candev(priv->ndev); | 969 | free_candev(priv->ndev); |
970 | } | 970 | } |
971 | 971 | ||
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev, | |||
1238 | priv->use_msi = 0; | 1238 | priv->use_msi = 0; |
1239 | } else { | 1239 | } else { |
1240 | netdev_err(ndev, "PCH CAN opened with MSI\n"); | 1240 | netdev_err(ndev, "PCH CAN opened with MSI\n"); |
1241 | pci_set_master(pdev); | ||
1241 | priv->use_msi = 1; | 1242 | priv->use_msi = 1; |
1242 | } | 1243 | } |
1243 | 1244 | ||
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 8ba81b3ddd90..5de46a9a77bb 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig | |||
@@ -18,7 +18,7 @@ config CAN_SOFTING | |||
18 | config CAN_SOFTING_CS | 18 | config CAN_SOFTING_CS |
19 | tristate "Softing Gmbh CAN pcmcia cards" | 19 | tristate "Softing Gmbh CAN pcmcia cards" |
20 | depends on PCMCIA | 20 | depends on PCMCIA |
21 | select CAN_SOFTING | 21 | depends on CAN_SOFTING |
22 | ---help--- | 22 | ---help--- |
23 | Support for PCMCIA cards from Softing Gmbh & some cards | 23 | Support for PCMCIA cards from Softing Gmbh & some cards |
24 | from Vector Gmbh. | 24 | from Vector Gmbh. |
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 300fe75dd1a7..c11bb4de8630 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <pcmcia/cistpl.h> | 24 | #include <pcmcia/cistpl.h> |
24 | #include <pcmcia/ds.h> | 25 | #include <pcmcia/ds.h> |
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059f..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2040 | { | 2040 | { |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | BUG_ON(adapter->debugfs_root == NULL); | 2043 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2044 | 2044 | ||
2045 | /* | 2045 | /* |
2046 | * Debugfs support is best effort. | 2046 | * Debugfs support is best effort. |
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2061 | */ | 2061 | */ |
2062 | static void cleanup_debugfs(struct adapter *adapter) | 2062 | static void cleanup_debugfs(struct adapter *adapter) |
2063 | { | 2063 | { |
2064 | BUG_ON(adapter->debugfs_root == NULL); | 2064 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2065 | 2065 | ||
2066 | /* | 2066 | /* |
2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove | 2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove |
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2489 | struct net_device *netdev; | 2489 | struct net_device *netdev; |
2490 | 2490 | ||
2491 | /* | 2491 | /* |
2492 | * Vet our module parameters. | ||
2493 | */ | ||
2494 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2495 | dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" | ||
2496 | " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, | ||
2497 | MSI_MSI); | ||
2498 | err = -EINVAL; | ||
2499 | goto err_out; | ||
2500 | } | ||
2501 | |||
2502 | /* | ||
2503 | * Print our driver banner the first time we're called to initialize a | 2492 | * Print our driver banner the first time we're called to initialize a |
2504 | * device. | 2493 | * device. |
2505 | */ | 2494 | */ |
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2711 | /* | 2700 | /* |
2712 | * Set up our debugfs entries. | 2701 | * Set up our debugfs entries. |
2713 | */ | 2702 | */ |
2714 | if (cxgb4vf_debugfs_root) { | 2703 | if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { |
2715 | adapter->debugfs_root = | 2704 | adapter->debugfs_root = |
2716 | debugfs_create_dir(pci_name(pdev), | 2705 | debugfs_create_dir(pci_name(pdev), |
2717 | cxgb4vf_debugfs_root); | 2706 | cxgb4vf_debugfs_root); |
2718 | if (adapter->debugfs_root == NULL) | 2707 | if (IS_ERR_OR_NULL(adapter->debugfs_root)) |
2719 | dev_warn(&pdev->dev, "could not create debugfs" | 2708 | dev_warn(&pdev->dev, "could not create debugfs" |
2720 | " directory"); | 2709 | " directory"); |
2721 | else | 2710 | else |
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2770 | */ | 2759 | */ |
2771 | 2760 | ||
2772 | err_free_debugfs: | 2761 | err_free_debugfs: |
2773 | if (adapter->debugfs_root) { | 2762 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2774 | cleanup_debugfs(adapter); | 2763 | cleanup_debugfs(adapter); |
2775 | debugfs_remove_recursive(adapter->debugfs_root); | 2764 | debugfs_remove_recursive(adapter->debugfs_root); |
2776 | } | 2765 | } |
@@ -2802,7 +2791,6 @@ err_release_regions: | |||
2802 | err_disable_device: | 2791 | err_disable_device: |
2803 | pci_disable_device(pdev); | 2792 | pci_disable_device(pdev); |
2804 | 2793 | ||
2805 | err_out: | ||
2806 | return err; | 2794 | return err; |
2807 | } | 2795 | } |
2808 | 2796 | ||
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2840 | /* | 2828 | /* |
2841 | * Tear down our debugfs entries. | 2829 | * Tear down our debugfs entries. |
2842 | */ | 2830 | */ |
2843 | if (adapter->debugfs_root) { | 2831 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2844 | cleanup_debugfs(adapter); | 2832 | cleanup_debugfs(adapter); |
2845 | debugfs_remove_recursive(adapter->debugfs_root); | 2833 | debugfs_remove_recursive(adapter->debugfs_root); |
2846 | } | 2834 | } |
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2874 | } | 2862 | } |
2875 | 2863 | ||
2876 | /* | 2864 | /* |
2865 | * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt | ||
2866 | * delivery. | ||
2867 | */ | ||
2868 | static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) | ||
2869 | { | ||
2870 | struct adapter *adapter; | ||
2871 | int pidx; | ||
2872 | |||
2873 | adapter = pci_get_drvdata(pdev); | ||
2874 | if (!adapter) | ||
2875 | return; | ||
2876 | |||
2877 | /* | ||
2878 | * Disable all Virtual Interfaces. This will shut down the | ||
2879 | * delivery of all ingress packets into the chip for these | ||
2880 | * Virtual Interfaces. | ||
2881 | */ | ||
2882 | for_each_port(adapter, pidx) { | ||
2883 | struct net_device *netdev; | ||
2884 | struct port_info *pi; | ||
2885 | |||
2886 | if (!test_bit(pidx, &adapter->registered_device_map)) | ||
2887 | continue; | ||
2888 | |||
2889 | netdev = adapter->port[pidx]; | ||
2890 | if (!netdev) | ||
2891 | continue; | ||
2892 | |||
2893 | pi = netdev_priv(netdev); | ||
2894 | t4vf_enable_vi(adapter, pi->viid, false, false); | ||
2895 | } | ||
2896 | |||
2897 | /* | ||
2898 | * Free up all Queues which will prevent further DMA and | ||
2899 | * Interrupts allowing various internal pathways to drain. | ||
2900 | */ | ||
2901 | t4vf_free_sge_resources(adapter); | ||
2902 | } | ||
2903 | |||
2904 | /* | ||
2877 | * PCI Device registration data structures. | 2905 | * PCI Device registration data structures. |
2878 | */ | 2906 | */ |
2879 | #define CH_DEVICE(devid, idx) \ | 2907 | #define CH_DEVICE(devid, idx) \ |
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { | |||
2906 | .id_table = cxgb4vf_pci_tbl, | 2934 | .id_table = cxgb4vf_pci_tbl, |
2907 | .probe = cxgb4vf_pci_probe, | 2935 | .probe = cxgb4vf_pci_probe, |
2908 | .remove = __devexit_p(cxgb4vf_pci_remove), | 2936 | .remove = __devexit_p(cxgb4vf_pci_remove), |
2937 | .shutdown = __devexit_p(cxgb4vf_pci_shutdown), | ||
2909 | }; | 2938 | }; |
2910 | 2939 | ||
2911 | /* | 2940 | /* |
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) | |||
2915 | { | 2944 | { |
2916 | int ret; | 2945 | int ret; |
2917 | 2946 | ||
2947 | /* | ||
2948 | * Vet our module parameters. | ||
2949 | */ | ||
2950 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2951 | printk(KERN_WARNING KBUILD_MODNAME | ||
2952 | ": bad module parameter msi=%d; must be %d" | ||
2953 | " (MSI-X or MSI) or %d (MSI)\n", | ||
2954 | msi, MSI_MSIX, MSI_MSI); | ||
2955 | return -EINVAL; | ||
2956 | } | ||
2957 | |||
2918 | /* Debugfs support is optional, just warn if this fails */ | 2958 | /* Debugfs support is optional, just warn if this fails */ |
2919 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 2959 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
2920 | if (!cxgb4vf_debugfs_root) | 2960 | if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2921 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" | 2961 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" |
2922 | " debugfs entry, continuing\n"); | 2962 | " debugfs entry, continuing\n"); |
2923 | 2963 | ||
2924 | ret = pci_register_driver(&cxgb4vf_driver); | 2964 | ret = pci_register_driver(&cxgb4vf_driver); |
2925 | if (ret < 0) | 2965 | if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2926 | debugfs_remove(cxgb4vf_debugfs_root); | 2966 | debugfs_remove(cxgb4vf_debugfs_root); |
2927 | return ret; | 2967 | return ret; |
2928 | } | 2968 | } |
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475ce..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
171 | delay_idx = 0; | 171 | delay_idx = 0; |
172 | ms = delay[0]; | 172 | ms = delay[0]; |
173 | 173 | ||
174 | for (i = 0; i < 500; i += ms) { | 174 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { |
175 | if (sleep_ok) { | 175 | if (sleep_ok) { |
176 | ms = delay[delay_idx]; | 176 | ms = delay[delay_idx]; |
177 | if (delay_idx < ARRAY_SIZE(delay) - 1) | 177 | if (delay_idx < ARRAY_SIZE(delay) - 1) |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d900..461dd6f905f7 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) | |||
802 | /* Checksum mode */ | 802 | /* Checksum mode */ |
803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); | 803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); |
804 | 804 | ||
805 | /* GPIO0 on pre-activate PHY */ | ||
806 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
807 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 805 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
808 | iow(db, DM9000_GPR, 0); /* Enable PHY */ | ||
809 | 806 | ||
810 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 807 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
811 | 808 | ||
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) | |||
852 | unsigned long flags; | 849 | unsigned long flags; |
853 | 850 | ||
854 | /* Save previous register address */ | 851 | /* Save previous register address */ |
855 | reg_save = readb(db->io_addr); | ||
856 | spin_lock_irqsave(&db->lock, flags); | 852 | spin_lock_irqsave(&db->lock, flags); |
853 | reg_save = readb(db->io_addr); | ||
857 | 854 | ||
858 | netif_stop_queue(dev); | 855 | netif_stop_queue(dev); |
859 | dm9000_reset(db); | 856 | dm9000_reset(db); |
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) | |||
1194 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) | 1191 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) |
1195 | return -EAGAIN; | 1192 | return -EAGAIN; |
1196 | 1193 | ||
1194 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | ||
1195 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
1196 | mdelay(1); /* delay needs by DM9000B */ | ||
1197 | |||
1197 | /* Initialize DM9000 board */ | 1198 | /* Initialize DM9000 board */ |
1198 | dm9000_reset(db); | 1199 | dm9000_reset(db); |
1199 | dm9000_init_dm9000(dev); | 1200 | dm9000_init_dm9000(dev); |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index aed223b1b897..7501d977d992 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw) | |||
124 | case M88E1000_I_PHY_ID: | 124 | case M88E1000_I_PHY_ID: |
125 | case M88E1011_I_PHY_ID: | 125 | case M88E1011_I_PHY_ID: |
126 | case M88E1111_I_PHY_ID: | 126 | case M88E1111_I_PHY_ID: |
127 | case M88E1118_E_PHY_ID: | ||
127 | hw->phy_type = e1000_phy_m88; | 128 | hw->phy_type = e1000_phy_m88; |
128 | break; | 129 | break; |
129 | case IGP01E1000_I_PHY_ID: | 130 | case IGP01E1000_I_PHY_ID: |
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3222 | break; | 3223 | break; |
3223 | case e1000_ce4100: | 3224 | case e1000_ce4100: |
3224 | if ((hw->phy_id == RTL8211B_PHY_ID) || | 3225 | if ((hw->phy_id == RTL8211B_PHY_ID) || |
3225 | (hw->phy_id == RTL8201N_PHY_ID)) | 3226 | (hw->phy_id == RTL8201N_PHY_ID) || |
3227 | (hw->phy_id == M88E1118_E_PHY_ID)) | ||
3226 | match = true; | 3228 | match = true; |
3227 | break; | 3229 | break; |
3228 | case e1000_82541: | 3230 | case e1000_82541: |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 196eeda2dd6c..c70b23d52284 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info { | |||
2917 | #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID | 2917 | #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID |
2918 | #define M88E1011_I_REV_4 0x04 | 2918 | #define M88E1011_I_REV_4 0x04 |
2919 | #define M88E1111_I_PHY_ID 0x01410CC0 | 2919 | #define M88E1111_I_PHY_ID 0x01410CC0 |
2920 | #define M88E1118_E_PHY_ID 0x01410E40 | ||
2920 | #define L1LXT971A_PHY_ID 0x001378E0 | 2921 | #define L1LXT971A_PHY_ID 0x001378E0 |
2921 | 2922 | ||
2922 | #define RTL8211B_PHY_ID 0x001CC910 | 2923 | #define RTL8211B_PHY_ID 0x001CC910 |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 1c18f26b0812..3fa110ddb041 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
937 | u16 phy_status, phy_1000t_status, phy_ext_status; | 937 | u16 phy_status, phy_1000t_status, phy_ext_status; |
938 | u16 pci_status; | 938 | u16 pci_status; |
939 | 939 | ||
940 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
941 | return; | ||
942 | |||
940 | e1e_rphy(hw, PHY_STATUS, &phy_status); | 943 | e1e_rphy(hw, PHY_STATUS, &phy_status); |
941 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | 944 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); |
942 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | 945 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); |
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) | |||
1506 | struct e1000_adapter *adapter = container_of(work, | 1509 | struct e1000_adapter *adapter = container_of(work, |
1507 | struct e1000_adapter, downshift_task); | 1510 | struct e1000_adapter, downshift_task); |
1508 | 1511 | ||
1512 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
1513 | return; | ||
1514 | |||
1509 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | 1515 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); |
1510 | } | 1516 | } |
1511 | 1517 | ||
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
3338 | return 0; | 3344 | return 0; |
3339 | } | 3345 | } |
3340 | 3346 | ||
3347 | static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | ||
3348 | { | ||
3349 | struct e1000_hw *hw = &adapter->hw; | ||
3350 | |||
3351 | if (!(adapter->flags2 & FLAG2_DMA_BURST)) | ||
3352 | return; | ||
3353 | |||
3354 | /* flush pending descriptor writebacks to memory */ | ||
3355 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
3356 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
3357 | |||
3358 | /* execute the writes immediately */ | ||
3359 | e1e_flush(); | ||
3360 | } | ||
3361 | |||
3341 | void e1000e_down(struct e1000_adapter *adapter) | 3362 | void e1000e_down(struct e1000_adapter *adapter) |
3342 | { | 3363 | { |
3343 | struct net_device *netdev = adapter->netdev; | 3364 | struct net_device *netdev = adapter->netdev; |
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3377 | 3398 | ||
3378 | if (!pci_channel_offline(adapter->pdev)) | 3399 | if (!pci_channel_offline(adapter->pdev)) |
3379 | e1000e_reset(adapter); | 3400 | e1000e_reset(adapter); |
3401 | |||
3402 | e1000e_flush_descriptors(adapter); | ||
3403 | |||
3380 | e1000_clean_tx_ring(adapter); | 3404 | e1000_clean_tx_ring(adapter); |
3381 | e1000_clean_rx_ring(adapter); | 3405 | e1000_clean_rx_ring(adapter); |
3382 | 3406 | ||
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3765 | { | 3789 | { |
3766 | struct e1000_adapter *adapter = container_of(work, | 3790 | struct e1000_adapter *adapter = container_of(work, |
3767 | struct e1000_adapter, update_phy_task); | 3791 | struct e1000_adapter, update_phy_task); |
3792 | |||
3793 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3794 | return; | ||
3795 | |||
3768 | e1000_get_phy_info(&adapter->hw); | 3796 | e1000_get_phy_info(&adapter->hw); |
3769 | } | 3797 | } |
3770 | 3798 | ||
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3775 | static void e1000_update_phy_info(unsigned long data) | 3803 | static void e1000_update_phy_info(unsigned long data) |
3776 | { | 3804 | { |
3777 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 3805 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
3806 | |||
3807 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3808 | return; | ||
3809 | |||
3778 | schedule_work(&adapter->update_phy_task); | 3810 | schedule_work(&adapter->update_phy_task); |
3779 | } | 3811 | } |
3780 | 3812 | ||
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4149 | u32 link, tctl; | 4181 | u32 link, tctl; |
4150 | int tx_pending = 0; | 4182 | int tx_pending = 0; |
4151 | 4183 | ||
4184 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4185 | return; | ||
4186 | |||
4152 | link = e1000e_has_link(adapter); | 4187 | link = e1000e_has_link(adapter); |
4153 | if ((netif_carrier_ok(netdev)) && link) { | 4188 | if ((netif_carrier_ok(netdev)) && link) { |
4154 | /* Cancel scheduled suspend requests. */ | 4189 | /* Cancel scheduled suspend requests. */ |
@@ -4309,7 +4344,6 @@ link_up: | |||
4309 | * to get done, so reset controller to flush Tx. | 4344 | * to get done, so reset controller to flush Tx. |
4310 | * (Do the reset outside of interrupt context). | 4345 | * (Do the reset outside of interrupt context). |
4311 | */ | 4346 | */ |
4312 | adapter->tx_timeout_count++; | ||
4313 | schedule_work(&adapter->reset_task); | 4347 | schedule_work(&adapter->reset_task); |
4314 | /* return immediately since reset is imminent */ | 4348 | /* return immediately since reset is imminent */ |
4315 | return; | 4349 | return; |
@@ -4338,19 +4372,12 @@ link_up: | |||
4338 | else | 4372 | else |
4339 | ew32(ICS, E1000_ICS_RXDMT0); | 4373 | ew32(ICS, E1000_ICS_RXDMT0); |
4340 | 4374 | ||
4375 | /* flush pending descriptors to memory before detecting Tx hang */ | ||
4376 | e1000e_flush_descriptors(adapter); | ||
4377 | |||
4341 | /* Force detection of hung controller every watchdog period */ | 4378 | /* Force detection of hung controller every watchdog period */ |
4342 | adapter->detect_tx_hung = 1; | 4379 | adapter->detect_tx_hung = 1; |
4343 | 4380 | ||
4344 | /* flush partial descriptors to memory before detecting Tx hang */ | ||
4345 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
4346 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
4347 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
4348 | /* | ||
4349 | * no need to flush the writes because the timeout code does | ||
4350 | * an er32 first thing | ||
4351 | */ | ||
4352 | } | ||
4353 | |||
4354 | /* | 4381 | /* |
4355 | * With 82571 controllers, LAA may be overwritten due to controller | 4382 | * With 82571 controllers, LAA may be overwritten due to controller |
4356 | * reset from the other port. Set the appropriate LAA in RAR[0] | 4383 | * reset from the other port. Set the appropriate LAA in RAR[0] |
@@ -4888,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) | |||
4888 | struct e1000_adapter *adapter; | 4915 | struct e1000_adapter *adapter; |
4889 | adapter = container_of(work, struct e1000_adapter, reset_task); | 4916 | adapter = container_of(work, struct e1000_adapter, reset_task); |
4890 | 4917 | ||
4918 | /* don't run the task if already down */ | ||
4919 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4920 | return; | ||
4921 | |||
4891 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | 4922 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && |
4892 | (adapter->flags & FLAG_RX_RESTART_NOW))) { | 4923 | (adapter->flags & FLAG_RX_RESTART_NOW))) { |
4893 | e1000e_dump(adapter); | 4924 | e1000e_dump(adapter); |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0dd..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5645 | goto out_error; | 5645 | goto out_error; |
5646 | } | 5646 | } |
5647 | 5647 | ||
5648 | netif_carrier_off(dev); | ||
5649 | |||
5648 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", | 5650 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", |
5649 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); | 5651 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); |
5650 | 5652 | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index d5ede2df3e42..ebbda7d15254 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) | |||
1370 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); | 1370 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); |
1371 | 1371 | ||
1372 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 1372 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
1373 | |||
1374 | /* clear VMDq pool/queue selection for RAR 0 */ | ||
1375 | hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); | ||
1373 | } | 1376 | } |
1374 | hw->addr_ctrl.overflow_promisc = 0; | 1377 | hw->addr_ctrl.overflow_promisc = 0; |
1375 | 1378 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 6342d4859790..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
159 | struct scatterlist *sg; | 159 | struct scatterlist *sg; |
160 | unsigned int i, j, dmacount; | 160 | unsigned int i, j, dmacount; |
161 | unsigned int len; | 161 | unsigned int len; |
162 | static const unsigned int bufflen = 4096; | 162 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
163 | unsigned int firstoff = 0; | 163 | unsigned int firstoff = 0; |
164 | unsigned int lastsize; | 164 | unsigned int lastsize; |
165 | unsigned int thisoff = 0; | 165 | unsigned int thisoff = 0; |
166 | unsigned int thislen = 0; | 166 | unsigned int thislen = 0; |
167 | u32 fcbuff, fcdmarw, fcfltrw; | 167 | u32 fcbuff, fcdmarw, fcfltrw; |
168 | dma_addr_t addr; | 168 | dma_addr_t addr = 0; |
169 | 169 | ||
170 | if (!netdev || !sgl) | 170 | if (!netdev || !sgl) |
171 | return 0; | 171 | return 0; |
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
254 | /* only the last buffer may have non-full bufflen */ | 254 | /* only the last buffer may have non-full bufflen */ |
255 | lastsize = thisoff + thislen; | 255 | lastsize = thisoff + thislen; |
256 | 256 | ||
257 | /* | ||
258 | * lastsize can not be buffer len. | ||
259 | * If it is then adding another buffer with lastsize = 1. | ||
260 | */ | ||
261 | if (lastsize == bufflen) { | ||
262 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
263 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " | ||
264 | "not enough user buffers. We need an extra " | ||
265 | "buffer because lastsize is bufflen.\n", | ||
266 | xid, i, j, dmacount, (u64)addr); | ||
267 | goto out_noddp_free; | ||
268 | } | ||
269 | |||
270 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | ||
271 | j++; | ||
272 | lastsize = 1; | ||
273 | } | ||
274 | |||
257 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 275 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
258 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 276 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
259 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 277 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
532 | e_err(drv, "failed to allocated FCoE DDP pool\n"); | 550 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
533 | 551 | ||
534 | spin_lock_init(&fcoe->lock); | 552 | spin_lock_init(&fcoe->lock); |
553 | |||
554 | /* Extra buffer to be shared by all DDPs for HW work around */ | ||
555 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | ||
556 | if (fcoe->extra_ddp_buffer == NULL) { | ||
557 | e_err(drv, "failed to allocated extra DDP buffer\n"); | ||
558 | goto out_extra_ddp_buffer_alloc; | ||
559 | } | ||
560 | |||
561 | fcoe->extra_ddp_buffer_dma = | ||
562 | dma_map_single(&adapter->pdev->dev, | ||
563 | fcoe->extra_ddp_buffer, | ||
564 | IXGBE_FCBUFF_MIN, | ||
565 | DMA_FROM_DEVICE); | ||
566 | if (dma_mapping_error(&adapter->pdev->dev, | ||
567 | fcoe->extra_ddp_buffer_dma)) { | ||
568 | e_err(drv, "failed to map extra DDP buffer\n"); | ||
569 | goto out_extra_ddp_buffer_dma; | ||
570 | } | ||
535 | } | 571 | } |
536 | 572 | ||
537 | /* Enable L2 eth type filter for FCoE */ | 573 | /* Enable L2 eth type filter for FCoE */ |
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
581 | } | 617 | } |
582 | } | 618 | } |
583 | #endif | 619 | #endif |
620 | |||
621 | return; | ||
622 | |||
623 | out_extra_ddp_buffer_dma: | ||
624 | kfree(fcoe->extra_ddp_buffer); | ||
625 | out_extra_ddp_buffer_alloc: | ||
626 | pci_pool_destroy(fcoe->pool); | ||
627 | fcoe->pool = NULL; | ||
584 | } | 628 | } |
585 | 629 | ||
586 | /** | 630 | /** |
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) | |||
600 | if (fcoe->pool) { | 644 | if (fcoe->pool) { |
601 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) | 645 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
602 | ixgbe_fcoe_ddp_put(adapter->netdev, i); | 646 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
647 | dma_unmap_single(&adapter->pdev->dev, | ||
648 | fcoe->extra_ddp_buffer_dma, | ||
649 | IXGBE_FCBUFF_MIN, | ||
650 | DMA_FROM_DEVICE); | ||
651 | kfree(fcoe->extra_ddp_buffer); | ||
603 | pci_pool_destroy(fcoe->pool); | 652 | pci_pool_destroy(fcoe->pool); |
604 | fcoe->pool = NULL; | 653 | fcoe->pool = NULL; |
605 | } | 654 | } |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h | |||
@@ -70,6 +70,8 @@ struct ixgbe_fcoe { | |||
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | struct pci_pool *pool; | 71 | struct pci_pool *pool; |
72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; | 72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; |
73 | unsigned char *extra_ddp_buffer; | ||
74 | dma_addr_t extra_ddp_buffer_dma; | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #endif /* _IXGBE_FCOE_H */ | 77 | #endif /* _IXGBE_FCOE_H */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 602078b84892..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
52 | static const char ixgbe_driver_string[] = | 52 | static const char ixgbe_driver_string[] = |
53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
54 | 54 | ||
55 | #define DRV_VERSION "3.0.12-k2" | 55 | #define DRV_VERSION "3.2.9-k2" |
56 | const char ixgbe_driver_version[] = DRV_VERSION; | 56 | const char ixgbe_driver_version[] = DRV_VERSION; |
57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | 57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
58 | 58 | ||
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
3176 | u32 mhadd, hlreg0; | 3176 | u32 mhadd, hlreg0; |
3177 | 3177 | ||
3178 | /* Decide whether to use packet split mode or not */ | 3178 | /* Decide whether to use packet split mode or not */ |
3179 | /* On by default */ | ||
3180 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
3181 | |||
3179 | /* Do not use packet split if we're in SR-IOV Mode */ | 3182 | /* Do not use packet split if we're in SR-IOV Mode */ |
3180 | if (!adapter->num_vfs) | 3183 | if (adapter->num_vfs) |
3181 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 3184 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; |
3185 | |||
3186 | /* Disable packet split due to 82599 erratum #45 */ | ||
3187 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3188 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
3182 | 3189 | ||
3183 | /* Set the RX buffer length according to the mode */ | 3190 | /* Set the RX buffer length according to the mode */ |
3184 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 3191 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
@@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | |||
3721 | * We need to try and force an autonegotiation | 3728 | * We need to try and force an autonegotiation |
3722 | * session, then bring up link. | 3729 | * session, then bring up link. |
3723 | */ | 3730 | */ |
3724 | hw->mac.ops.setup_sfp(hw); | 3731 | if (hw->mac.ops.setup_sfp) |
3732 | hw->mac.ops.setup_sfp(hw); | ||
3725 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 3733 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
3726 | schedule_work(&adapter->multispeed_fiber_task); | 3734 | schedule_work(&adapter->multispeed_fiber_task); |
3727 | } else { | 3735 | } else { |
@@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
4863 | { | 4871 | { |
4864 | int q_idx, num_q_vectors; | 4872 | int q_idx, num_q_vectors; |
4865 | struct ixgbe_q_vector *q_vector; | 4873 | struct ixgbe_q_vector *q_vector; |
4866 | int napi_vectors; | ||
4867 | int (*poll)(struct napi_struct *, int); | 4874 | int (*poll)(struct napi_struct *, int); |
4868 | 4875 | ||
4869 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 4876 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
4870 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4877 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4871 | napi_vectors = adapter->num_rx_queues; | ||
4872 | poll = &ixgbe_clean_rxtx_many; | 4878 | poll = &ixgbe_clean_rxtx_many; |
4873 | } else { | 4879 | } else { |
4874 | num_q_vectors = 1; | 4880 | num_q_vectors = 1; |
4875 | napi_vectors = 1; | ||
4876 | poll = &ixgbe_poll; | 4881 | poll = &ixgbe_poll; |
4877 | } | 4882 | } |
4878 | 4883 | ||
@@ -5964,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
5964 | unregister_netdev(adapter->netdev); | 5969 | unregister_netdev(adapter->netdev); |
5965 | return; | 5970 | return; |
5966 | } | 5971 | } |
5967 | hw->mac.ops.setup_sfp(hw); | 5972 | if (hw->mac.ops.setup_sfp) |
5973 | hw->mac.ops.setup_sfp(hw); | ||
5968 | 5974 | ||
5969 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 5975 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
5970 | /* This will also work for DA Twinax connections */ | 5976 | /* This will also work for DA Twinax connections */ |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 47b15738b009..187b3a16ec1f 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, | |||
110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); | 110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); |
111 | } | 111 | } |
112 | 112 | ||
113 | |||
114 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) | 113 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
115 | { | 114 | { |
116 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | 115 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
117 | vmolr |= (IXGBE_VMOLR_ROMPE | | 116 | vmolr |= (IXGBE_VMOLR_ROMPE | |
118 | IXGBE_VMOLR_ROPE | | ||
119 | IXGBE_VMOLR_BAM); | 117 | IXGBE_VMOLR_BAM); |
120 | if (aupe) | 118 | if (aupe) |
121 | vmolr |= IXGBE_VMOLR_AUPE; | 119 | vmolr |= IXGBE_VMOLR_AUPE; |
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 3a8923993ce3..f2518b01067d 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c | |||
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | 136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); |
137 | IXGBE_WRITE_FLUSH(hw); | 137 | IXGBE_WRITE_FLUSH(hw); |
138 | 138 | ||
139 | /* Poll for reset bit to self-clear indicating reset is complete */ | 139 | /* Poll for reset bit to self-clear indicating reset is complete */ |
140 | for (i = 0; i < 10; i++) { | 140 | for (i = 0; i < 10; i++) { |
141 | udelay(1); | 141 | udelay(1); |
142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
143 | if (!(ctrl & IXGBE_CTRL_RST)) | 143 | if (!(ctrl & reset_bit)) |
144 | break; | 144 | break; |
145 | } | 145 | } |
146 | if (ctrl & IXGBE_CTRL_RST) { | 146 | if (ctrl & reset_bit) { |
147 | status = IXGBE_ERR_RESET_FAILED; | 147 | status = IXGBE_ERR_RESET_FAILED; |
148 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 148 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
149 | } | 149 | } |
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h | |||
@@ -73,7 +73,7 @@ struct pch_gbe_regs { | |||
73 | struct pch_gbe_regs_mac_adr mac_adr[16]; | 73 | struct pch_gbe_regs_mac_adr mac_adr[16]; |
74 | u32 ADDR_MASK; | 74 | u32 ADDR_MASK; |
75 | u32 MIIM; | 75 | u32 MIIM; |
76 | u32 reserve2; | 76 | u32 MAC_ADDR_LOAD; |
77 | u32 RGMII_ST; | 77 | u32 RGMII_ST; |
78 | u32 RGMII_CTRL; | 78 | u32 RGMII_CTRL; |
79 | u32 reserve3[3]; | 79 | u32 reserve3[3]; |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 1bf12339441b..b99e90aca37d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
29 | #define PCH_GBE_SHORT_PKT 64 | 29 | #define PCH_GBE_SHORT_PKT 64 |
30 | #define DSC_INIT16 0xC000 | 30 | #define DSC_INIT16 0xC000 |
31 | #define PCH_GBE_DMA_ALIGN 0 | 31 | #define PCH_GBE_DMA_ALIGN 0 |
32 | #define PCH_GBE_DMA_PADDING 2 | ||
32 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
33 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
34 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | |||
88 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); | 89 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); |
89 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, | 90 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, |
90 | int data); | 91 | int data); |
92 | |||
93 | inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) | ||
94 | { | ||
95 | iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); | ||
96 | } | ||
97 | |||
91 | /** | 98 | /** |
92 | * pch_gbe_mac_read_mac_addr - Read MAC address | 99 | * pch_gbe_mac_read_mac_addr - Read MAC address |
93 | * @hw: Pointer to the HW structure | 100 | * @hw: Pointer to the HW structure |
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work) | |||
519 | struct pch_gbe_adapter *adapter; | 526 | struct pch_gbe_adapter *adapter; |
520 | adapter = container_of(work, struct pch_gbe_adapter, reset_task); | 527 | adapter = container_of(work, struct pch_gbe_adapter, reset_task); |
521 | 528 | ||
529 | rtnl_lock(); | ||
522 | pch_gbe_reinit_locked(adapter); | 530 | pch_gbe_reinit_locked(adapter); |
531 | rtnl_unlock(); | ||
523 | } | 532 | } |
524 | 533 | ||
525 | /** | 534 | /** |
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work) | |||
528 | */ | 537 | */ |
529 | void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) | 538 | void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) |
530 | { | 539 | { |
531 | struct net_device *netdev = adapter->netdev; | 540 | pch_gbe_down(adapter); |
532 | 541 | pch_gbe_up(adapter); | |
533 | rtnl_lock(); | ||
534 | if (netif_running(netdev)) { | ||
535 | pch_gbe_down(adapter); | ||
536 | pch_gbe_up(adapter); | ||
537 | } | ||
538 | rtnl_unlock(); | ||
539 | } | 542 | } |
540 | 543 | ||
541 | /** | 544 | /** |
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1369 | struct pch_gbe_buffer *buffer_info; | 1372 | struct pch_gbe_buffer *buffer_info; |
1370 | struct pch_gbe_rx_desc *rx_desc; | 1373 | struct pch_gbe_rx_desc *rx_desc; |
1371 | u32 length; | 1374 | u32 length; |
1372 | unsigned char tmp_packet[ETH_HLEN]; | ||
1373 | unsigned int i; | 1375 | unsigned int i; |
1374 | unsigned int cleaned_count = 0; | 1376 | unsigned int cleaned_count = 0; |
1375 | bool cleaned = false; | 1377 | bool cleaned = false; |
1376 | struct sk_buff *skb; | 1378 | struct sk_buff *skb, *new_skb; |
1377 | u8 dma_status; | 1379 | u8 dma_status; |
1378 | u16 gbec_status; | 1380 | u16 gbec_status; |
1379 | u32 tcp_ip_status; | 1381 | u32 tcp_ip_status; |
1380 | u8 skb_copy_flag = 0; | ||
1381 | u8 skb_padding_flag = 0; | ||
1382 | 1382 | ||
1383 | i = rx_ring->next_to_clean; | 1383 | i = rx_ring->next_to_clean; |
1384 | 1384 | ||
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1422 | pr_err("Receive CRC Error\n"); | 1422 | pr_err("Receive CRC Error\n"); |
1423 | } else { | 1423 | } else { |
1424 | /* get receive length */ | 1424 | /* get receive length */ |
1425 | /* length convert[-3], padding[-2] */ | 1425 | /* length convert[-3] */ |
1426 | length = (rx_desc->rx_words_eob) - 3 - 2; | 1426 | length = (rx_desc->rx_words_eob) - 3; |
1427 | 1427 | ||
1428 | /* Decide the data conversion method */ | 1428 | /* Decide the data conversion method */ |
1429 | if (!adapter->rx_csum) { | 1429 | if (!adapter->rx_csum) { |
1430 | /* [Header:14][payload] */ | 1430 | /* [Header:14][payload] */ |
1431 | skb_padding_flag = 0; | 1431 | if (NET_IP_ALIGN) { |
1432 | skb_copy_flag = 1; | 1432 | /* Because alignment differs, |
1433 | * the new_skb is newly allocated, | ||
1434 | * and data is copied to new_skb.*/ | ||
1435 | new_skb = netdev_alloc_skb(netdev, | ||
1436 | length + NET_IP_ALIGN); | ||
1437 | if (!new_skb) { | ||
1438 | /* dorrop error */ | ||
1439 | pr_err("New skb allocation " | ||
1440 | "Error\n"); | ||
1441 | goto dorrop; | ||
1442 | } | ||
1443 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1444 | memcpy(new_skb->data, skb->data, | ||
1445 | length); | ||
1446 | skb = new_skb; | ||
1447 | } else { | ||
1448 | /* DMA buffer is used as SKB as it is.*/ | ||
1449 | buffer_info->skb = NULL; | ||
1450 | } | ||
1433 | } else { | 1451 | } else { |
1434 | /* [Header:14][padding:2][payload] */ | 1452 | /* [Header:14][padding:2][payload] */ |
1435 | skb_padding_flag = 1; | 1453 | /* The length includes padding length */ |
1436 | if (length < copybreak) | 1454 | length = length - PCH_GBE_DMA_PADDING; |
1437 | skb_copy_flag = 1; | 1455 | if ((length < copybreak) || |
1438 | else | 1456 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { |
1439 | skb_copy_flag = 0; | 1457 | /* Because alignment differs, |
1440 | } | 1458 | * the new_skb is newly allocated, |
1441 | 1459 | * and data is copied to new_skb. | |
1442 | /* Data conversion */ | 1460 | * Padding data is deleted |
1443 | if (skb_copy_flag) { /* recycle skb */ | 1461 | * at the time of a copy.*/ |
1444 | struct sk_buff *new_skb; | 1462 | new_skb = netdev_alloc_skb(netdev, |
1445 | new_skb = | 1463 | length + NET_IP_ALIGN); |
1446 | netdev_alloc_skb(netdev, | 1464 | if (!new_skb) { |
1447 | length + NET_IP_ALIGN); | 1465 | /* dorrop error */ |
1448 | if (new_skb) { | 1466 | pr_err("New skb allocation " |
1449 | if (!skb_padding_flag) { | 1467 | "Error\n"); |
1450 | skb_reserve(new_skb, | 1468 | goto dorrop; |
1451 | NET_IP_ALIGN); | ||
1452 | } | 1469 | } |
1470 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1453 | memcpy(new_skb->data, skb->data, | 1471 | memcpy(new_skb->data, skb->data, |
1454 | length); | 1472 | ETH_HLEN); |
1455 | /* save the skb | 1473 | memcpy(&new_skb->data[ETH_HLEN], |
1456 | * in buffer_info as good */ | 1474 | &skb->data[ETH_HLEN + |
1475 | PCH_GBE_DMA_PADDING], | ||
1476 | length - ETH_HLEN); | ||
1457 | skb = new_skb; | 1477 | skb = new_skb; |
1458 | } else if (!skb_padding_flag) { | 1478 | } else { |
1459 | /* dorrop error */ | 1479 | /* Padding data is deleted |
1460 | pr_err("New skb allocation Error\n"); | 1480 | * by moving header data.*/ |
1461 | goto dorrop; | 1481 | memmove(&skb->data[PCH_GBE_DMA_PADDING], |
1482 | &skb->data[0], ETH_HLEN); | ||
1483 | skb_reserve(skb, NET_IP_ALIGN); | ||
1484 | buffer_info->skb = NULL; | ||
1462 | } | 1485 | } |
1463 | } else { | ||
1464 | buffer_info->skb = NULL; | ||
1465 | } | 1486 | } |
1466 | if (skb_padding_flag) { | 1487 | /* The length includes FCS length */ |
1467 | memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); | 1488 | length = length - ETH_FCS_LEN; |
1468 | memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], | ||
1469 | ETH_HLEN); | ||
1470 | skb_reserve(skb, NET_IP_ALIGN); | ||
1471 | |||
1472 | } | ||
1473 | |||
1474 | /* update status of driver */ | 1489 | /* update status of driver */ |
1475 | adapter->stats.rx_bytes += length; | 1490 | adapter->stats.rx_bytes += length; |
1476 | adapter->stats.rx_packets++; | 1491 | adapter->stats.rx_packets++; |
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, | |||
2322 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 2337 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
2323 | pch_gbe_set_ethtool_ops(netdev); | 2338 | pch_gbe_set_ethtool_ops(netdev); |
2324 | 2339 | ||
2340 | pch_gbe_mac_load_mac_addr(&adapter->hw); | ||
2325 | pch_gbe_mac_reset_hw(&adapter->hw); | 2341 | pch_gbe_mac_reset_hw(&adapter->hw); |
2326 | 2342 | ||
2327 | /* setup the private structure */ | 2343 | /* setup the private structure */ |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 59ccf0c5c610..ef2133b16f8c 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -617,8 +617,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) | |||
617 | } | 617 | } |
618 | } | 618 | } |
619 | 619 | ||
620 | static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | 620 | static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) |
621 | { | 621 | { |
622 | void __iomem *ioaddr = tp->mmio_addr; | ||
622 | int i; | 623 | int i; |
623 | 624 | ||
624 | RTL_W8(ERIDR, cmd); | 625 | RTL_W8(ERIDR, cmd); |
@@ -630,7 +631,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | |||
630 | break; | 631 | break; |
631 | } | 632 | } |
632 | 633 | ||
633 | ocp_write(ioaddr, 0x1, 0x30, 0x00000001); | 634 | ocp_write(tp, 0x1, 0x30, 0x00000001); |
634 | } | 635 | } |
635 | 636 | ||
636 | #define OOB_CMD_RESET 0x00 | 637 | #define OOB_CMD_RESET 0x00 |
@@ -2868,8 +2869,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2868 | { | 2869 | { |
2869 | void __iomem *ioaddr = tp->mmio_addr; | 2870 | void __iomem *ioaddr = tp->mmio_addr; |
2870 | 2871 | ||
2871 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2872 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2873 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2874 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2872 | return; | 2875 | return; |
2876 | } | ||
2873 | 2877 | ||
2874 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || | 2878 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || |
2875 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && | 2879 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && |
@@ -2891,6 +2895,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2891 | switch (tp->mac_version) { | 2895 | switch (tp->mac_version) { |
2892 | case RTL_GIGA_MAC_VER_25: | 2896 | case RTL_GIGA_MAC_VER_25: |
2893 | case RTL_GIGA_MAC_VER_26: | 2897 | case RTL_GIGA_MAC_VER_26: |
2898 | case RTL_GIGA_MAC_VER_27: | ||
2899 | case RTL_GIGA_MAC_VER_28: | ||
2894 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); | 2900 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); |
2895 | break; | 2901 | break; |
2896 | } | 2902 | } |
@@ -2900,12 +2906,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) | |||
2900 | { | 2906 | { |
2901 | void __iomem *ioaddr = tp->mmio_addr; | 2907 | void __iomem *ioaddr = tp->mmio_addr; |
2902 | 2908 | ||
2903 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2909 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2910 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2911 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2904 | return; | 2912 | return; |
2913 | } | ||
2905 | 2914 | ||
2906 | switch (tp->mac_version) { | 2915 | switch (tp->mac_version) { |
2907 | case RTL_GIGA_MAC_VER_25: | 2916 | case RTL_GIGA_MAC_VER_25: |
2908 | case RTL_GIGA_MAC_VER_26: | 2917 | case RTL_GIGA_MAC_VER_26: |
2918 | case RTL_GIGA_MAC_VER_27: | ||
2919 | case RTL_GIGA_MAC_VER_28: | ||
2909 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); | 2920 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); |
2910 | break; | 2921 | break; |
2911 | } | 2922 | } |
@@ -3042,7 +3053,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3042 | goto err_out_mwi_2; | 3053 | goto err_out_mwi_2; |
3043 | } | 3054 | } |
3044 | 3055 | ||
3045 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3056 | tp->cp_cmd = RxChkSum; |
3046 | 3057 | ||
3047 | if ((sizeof(dma_addr_t) > 4) && | 3058 | if ((sizeof(dma_addr_t) > 4) && |
3048 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 3059 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -3190,6 +3201,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3190 | if (pci_dev_run_wake(pdev)) | 3201 | if (pci_dev_run_wake(pdev)) |
3191 | pm_runtime_put_noidle(&pdev->dev); | 3202 | pm_runtime_put_noidle(&pdev->dev); |
3192 | 3203 | ||
3204 | netif_carrier_off(dev); | ||
3205 | |||
3193 | out: | 3206 | out: |
3194 | return rc; | 3207 | return rc; |
3195 | 3208 | ||
@@ -3316,7 +3329,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
3316 | /* Disable interrupts */ | 3329 | /* Disable interrupts */ |
3317 | rtl8169_irq_mask_and_ack(ioaddr); | 3330 | rtl8169_irq_mask_and_ack(ioaddr); |
3318 | 3331 | ||
3319 | if (tp->mac_version == RTL_GIGA_MAC_VER_28) { | 3332 | if (tp->mac_version == RTL_GIGA_MAC_VER_27 || |
3333 | tp->mac_version == RTL_GIGA_MAC_VER_28) { | ||
3320 | while (RTL_R8(TxPoll) & NPQ) | 3334 | while (RTL_R8(TxPoll) & NPQ) |
3321 | udelay(20); | 3335 | udelay(20); |
3322 | 3336 | ||
@@ -3845,8 +3859,7 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
3845 | Cxpl_dbg_sel | \ | 3859 | Cxpl_dbg_sel | \ |
3846 | ASF | \ | 3860 | ASF | \ |
3847 | PktCntrDisable | \ | 3861 | PktCntrDisable | \ |
3848 | PCIDAC | \ | 3862 | Mac_dbgo_sel) |
3849 | PCIMulRW) | ||
3850 | 3863 | ||
3851 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | 3864 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) |
3852 | { | 3865 | { |
@@ -3876,8 +3889,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3876 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) | 3889 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) |
3877 | RTL_W8(Config1, cfg1 & ~LEDS0); | 3890 | RTL_W8(Config1, cfg1 & ~LEDS0); |
3878 | 3891 | ||
3879 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3880 | |||
3881 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); | 3892 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); |
3882 | } | 3893 | } |
3883 | 3894 | ||
@@ -3889,8 +3900,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3889 | 3900 | ||
3890 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); | 3901 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); |
3891 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); | 3902 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); |
3892 | |||
3893 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3894 | } | 3903 | } |
3895 | 3904 | ||
3896 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) | 3905 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) |
@@ -3916,6 +3925,8 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3916 | } | 3925 | } |
3917 | } | 3926 | } |
3918 | 3927 | ||
3928 | RTL_W8(Cfg9346, Cfg9346_Unlock); | ||
3929 | |||
3919 | switch (tp->mac_version) { | 3930 | switch (tp->mac_version) { |
3920 | case RTL_GIGA_MAC_VER_07: | 3931 | case RTL_GIGA_MAC_VER_07: |
3921 | rtl_hw_start_8102e_1(ioaddr, pdev); | 3932 | rtl_hw_start_8102e_1(ioaddr, pdev); |
@@ -3930,14 +3941,13 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3930 | break; | 3941 | break; |
3931 | } | 3942 | } |
3932 | 3943 | ||
3933 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 3944 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3934 | 3945 | ||
3935 | RTL_W8(MaxTxPacketSize, TxPacketMax); | 3946 | RTL_W8(MaxTxPacketSize, TxPacketMax); |
3936 | 3947 | ||
3937 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); | 3948 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3938 | 3949 | ||
3939 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; | 3950 | tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; |
3940 | |||
3941 | RTL_W16(CPlusCmd, tp->cp_cmd); | 3951 | RTL_W16(CPlusCmd, tp->cp_cmd); |
3942 | 3952 | ||
3943 | RTL_W16(IntrMitigate, 0x0000); | 3953 | RTL_W16(IntrMitigate, 0x0000); |
@@ -3947,14 +3957,10 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3947 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | 3957 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
3948 | rtl_set_rx_tx_config_registers(tp); | 3958 | rtl_set_rx_tx_config_registers(tp); |
3949 | 3959 | ||
3950 | RTL_W8(Cfg9346, Cfg9346_Lock); | ||
3951 | |||
3952 | RTL_R8(IntrMask); | 3960 | RTL_R8(IntrMask); |
3953 | 3961 | ||
3954 | rtl_set_rx_mode(dev); | 3962 | rtl_set_rx_mode(dev); |
3955 | 3963 | ||
3956 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | ||
3957 | |||
3958 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); | 3964 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); |
3959 | 3965 | ||
3960 | RTL_W16(IntrMask, tp->intr_event); | 3966 | RTL_W16(IntrMask, tp->intr_event); |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60d..ca886d98bdc7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
569 | struct ethtool_test *test, u64 *data) | 569 | struct ethtool_test *test, u64 *data) |
570 | { | 570 | { |
571 | struct efx_nic *efx = netdev_priv(net_dev); | 571 | struct efx_nic *efx = netdev_priv(net_dev); |
572 | struct efx_self_tests efx_tests; | 572 | struct efx_self_tests *efx_tests; |
573 | int already_up; | 573 | int already_up; |
574 | int rc; | 574 | int rc = -ENOMEM; |
575 | |||
576 | efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); | ||
577 | if (!efx_tests) | ||
578 | goto fail; | ||
579 | |||
575 | 580 | ||
576 | ASSERT_RTNL(); | 581 | ASSERT_RTNL(); |
577 | if (efx->state != STATE_RUNNING) { | 582 | if (efx->state != STATE_RUNNING) { |
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
589 | if (rc) { | 594 | if (rc) { |
590 | netif_err(efx, drv, efx->net_dev, | 595 | netif_err(efx, drv, efx->net_dev, |
591 | "failed opening device.\n"); | 596 | "failed opening device.\n"); |
592 | goto fail2; | 597 | goto fail1; |
593 | } | 598 | } |
594 | } | 599 | } |
595 | 600 | ||
596 | memset(&efx_tests, 0, sizeof(efx_tests)); | 601 | rc = efx_selftest(efx, efx_tests, test->flags); |
597 | |||
598 | rc = efx_selftest(efx, &efx_tests, test->flags); | ||
599 | 602 | ||
600 | if (!already_up) | 603 | if (!already_up) |
601 | dev_close(efx->net_dev); | 604 | dev_close(efx->net_dev); |
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
604 | rc == 0 ? "passed" : "failed", | 607 | rc == 0 ? "passed" : "failed", |
605 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 608 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
606 | 609 | ||
607 | fail2: | 610 | fail1: |
608 | fail1: | ||
609 | /* Fill ethtool results structures */ | 611 | /* Fill ethtool results structures */ |
610 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | 612 | efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); |
613 | kfree(efx_tests); | ||
614 | fail: | ||
611 | if (rc) | 615 | if (rc) |
612 | test->flags |= ETH_TEST_FL_FAILED; | 616 | test->flags |= ETH_TEST_FL_FAILED; |
613 | } | 617 | } |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 5976d1d51df1..640e368ebeee 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev) | |||
1777 | "cur_rx:%4.4d, dirty_rx:%4.4d\n", | 1777 | "cur_rx:%4.4d, dirty_rx:%4.4d\n", |
1778 | net_dev->name, sis_priv->cur_rx, | 1778 | net_dev->name, sis_priv->cur_rx, |
1779 | sis_priv->dirty_rx); | 1779 | sis_priv->dirty_rx); |
1780 | dev_kfree_skb(skb); | ||
1780 | break; | 1781 | break; |
1781 | } | 1782 | } |
1782 | 1783 | ||
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) | |||
1560 | 1560 | ||
1561 | priv->hw = device; | 1561 | priv->hw = device; |
1562 | 1562 | ||
1563 | if (device_can_wakeup(priv->device)) | 1563 | if (device_can_wakeup(priv->device)) { |
1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1565 | enable_irq_wake(dev->irq); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
1567 | } | 1569 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d366611..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11159 | break; /* We have no PHY */ | 11159 | break; /* We have no PHY */ |
11160 | 11160 | ||
11161 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11161 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11162 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11163 | !netif_running(dev))) | ||
11162 | return -EAGAIN; | 11164 | return -EAGAIN; |
11163 | 11165 | ||
11164 | spin_lock_bh(&tp->lock); | 11166 | spin_lock_bh(&tp->lock); |
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11174 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11176 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11175 | break; /* We have no PHY */ | 11177 | break; /* We have no PHY */ |
11176 | 11178 | ||
11177 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11179 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11180 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11181 | !netif_running(dev))) | ||
11178 | return -EAGAIN; | 11182 | return -EAGAIN; |
11179 | 11183 | ||
11180 | spin_lock_bh(&tp->lock); | 11184 | spin_lock_bh(&tp->lock); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 04e8ce14a1d0..7113168473cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * cdc_ncm.c | 2 | * cdc_ncm.c |
3 | * | 3 | * |
4 | * Copyright (C) ST-Ericsson 2010 | 4 | * Copyright (C) ST-Ericsson 2010-2011 |
5 | * Contact: Alexey Orishko <alexey.orishko@stericsson.com> | 5 | * Contact: Alexey Orishko <alexey.orishko@stericsson.com> |
6 | * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> | 6 | * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> |
7 | * | 7 | * |
@@ -54,7 +54,7 @@ | |||
54 | #include <linux/usb/usbnet.h> | 54 | #include <linux/usb/usbnet.h> |
55 | #include <linux/usb/cdc.h> | 55 | #include <linux/usb/cdc.h> |
56 | 56 | ||
57 | #define DRIVER_VERSION "17-Jan-2011" | 57 | #define DRIVER_VERSION "7-Feb-2011" |
58 | 58 | ||
59 | /* CDC NCM subclass 3.2.1 */ | 59 | /* CDC NCM subclass 3.2.1 */ |
60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 | 60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 |
@@ -77,6 +77,9 @@ | |||
77 | */ | 77 | */ |
78 | #define CDC_NCM_DPT_DATAGRAMS_MAX 32 | 78 | #define CDC_NCM_DPT_DATAGRAMS_MAX 32 |
79 | 79 | ||
80 | /* Maximum amount of IN datagrams in NTB */ | ||
81 | #define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */ | ||
82 | |||
80 | /* Restart the timer, if amount of datagrams is less than given value */ | 83 | /* Restart the timer, if amount of datagrams is less than given value */ |
81 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 | 84 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 |
82 | 85 | ||
@@ -85,11 +88,6 @@ | |||
85 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ | 88 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ |
86 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) | 89 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) |
87 | 90 | ||
88 | struct connection_speed_change { | ||
89 | __le32 USBitRate; /* holds 3GPP downlink value, bits per second */ | ||
90 | __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */ | ||
91 | } __attribute__ ((packed)); | ||
92 | |||
93 | struct cdc_ncm_data { | 91 | struct cdc_ncm_data { |
94 | struct usb_cdc_ncm_nth16 nth16; | 92 | struct usb_cdc_ncm_nth16 nth16; |
95 | struct usb_cdc_ncm_ndp16 ndp16; | 93 | struct usb_cdc_ncm_ndp16 ndp16; |
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
198 | { | 196 | { |
199 | struct usb_cdc_notification req; | 197 | struct usb_cdc_notification req; |
200 | u32 val; | 198 | u32 val; |
201 | __le16 max_datagram_size; | ||
202 | u8 flags; | 199 | u8 flags; |
203 | u8 iface_no; | 200 | u8 iface_no; |
204 | int err; | 201 | int err; |
202 | u16 ntb_fmt_supported; | ||
205 | 203 | ||
206 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; | 204 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; |
207 | 205 | ||
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
223 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); | 221 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); |
224 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); | 222 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); |
225 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); | 223 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); |
224 | /* devices prior to NCM Errata shall set this field to zero */ | ||
225 | ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); | ||
226 | ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); | ||
226 | 227 | ||
227 | if (ctx->func_desc != NULL) | 228 | if (ctx->func_desc != NULL) |
228 | flags = ctx->func_desc->bmNetworkCapabilities; | 229 | flags = ctx->func_desc->bmNetworkCapabilities; |
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
231 | 232 | ||
232 | pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " | 233 | pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " |
233 | "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " | 234 | "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " |
234 | "wNdpOutAlignment=%u flags=0x%x\n", | 235 | "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", |
235 | ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, | 236 | ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, |
236 | ctx->tx_ndp_modulus, flags); | 237 | ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); |
237 | 238 | ||
238 | /* max count of tx datagrams without terminating NULL entry */ | 239 | /* max count of tx datagrams */ |
239 | ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; | 240 | if ((ctx->tx_max_datagrams == 0) || |
241 | (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) | ||
242 | ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; | ||
240 | 243 | ||
241 | /* verify maximum size of received NTB in bytes */ | 244 | /* verify maximum size of received NTB in bytes */ |
242 | if ((ctx->rx_max < | 245 | if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { |
243 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || | 246 | pr_debug("Using min receive length=%d\n", |
244 | (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { | 247 | USB_CDC_NCM_NTB_MIN_IN_SIZE); |
248 | ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; | ||
249 | } | ||
250 | |||
251 | if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { | ||
245 | pr_debug("Using default maximum receive length=%d\n", | 252 | pr_debug("Using default maximum receive length=%d\n", |
246 | CDC_NCM_NTB_MAX_SIZE_RX); | 253 | CDC_NCM_NTB_MAX_SIZE_RX); |
247 | ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; | 254 | ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; |
248 | } | 255 | } |
249 | 256 | ||
257 | /* inform device about NTB input size changes */ | ||
258 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { | ||
259 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | ||
260 | USB_RECIP_INTERFACE; | ||
261 | req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; | ||
262 | req.wValue = 0; | ||
263 | req.wIndex = cpu_to_le16(iface_no); | ||
264 | |||
265 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { | ||
266 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; | ||
267 | |||
268 | req.wLength = 8; | ||
269 | ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
270 | ndp_in_sz.wNtbInMaxDatagrams = | ||
271 | cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); | ||
272 | ndp_in_sz.wReserved = 0; | ||
273 | err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, | ||
274 | 1000); | ||
275 | } else { | ||
276 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
277 | |||
278 | req.wLength = 4; | ||
279 | err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, | ||
280 | NULL, 1000); | ||
281 | } | ||
282 | |||
283 | if (err) | ||
284 | pr_debug("Setting NTB Input Size failed\n"); | ||
285 | } | ||
286 | |||
250 | /* verify maximum size of transmitted NTB in bytes */ | 287 | /* verify maximum size of transmitted NTB in bytes */ |
251 | if ((ctx->tx_max < | 288 | if ((ctx->tx_max < |
252 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || | 289 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || |
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
297 | /* additional configuration */ | 334 | /* additional configuration */ |
298 | 335 | ||
299 | /* set CRC Mode */ | 336 | /* set CRC Mode */ |
300 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; | 337 | if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { |
301 | req.bNotificationType = USB_CDC_SET_CRC_MODE; | 338 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
302 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); | 339 | USB_RECIP_INTERFACE; |
303 | req.wIndex = cpu_to_le16(iface_no); | 340 | req.bNotificationType = USB_CDC_SET_CRC_MODE; |
304 | req.wLength = 0; | 341 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); |
305 | 342 | req.wIndex = cpu_to_le16(iface_no); | |
306 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | 343 | req.wLength = 0; |
307 | if (err) | 344 | |
308 | pr_debug("Setting CRC mode off failed\n"); | 345 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); |
346 | if (err) | ||
347 | pr_debug("Setting CRC mode off failed\n"); | ||
348 | } | ||
309 | 349 | ||
310 | /* set NTB format */ | 350 | /* set NTB format, if both formats are supported */ |
311 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; | 351 | if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { |
312 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; | 352 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
313 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); | 353 | USB_RECIP_INTERFACE; |
314 | req.wIndex = cpu_to_le16(iface_no); | 354 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; |
315 | req.wLength = 0; | 355 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); |
356 | req.wIndex = cpu_to_le16(iface_no); | ||
357 | req.wLength = 0; | ||
358 | |||
359 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | ||
360 | if (err) | ||
361 | pr_debug("Setting NTB format to 16-bit failed\n"); | ||
362 | } | ||
316 | 363 | ||
317 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | 364 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; |
318 | if (err) | ||
319 | pr_debug("Setting NTB format to 16-bit failed\n"); | ||
320 | 365 | ||
321 | /* set Max Datagram Size (MTU) */ | 366 | /* set Max Datagram Size (MTU) */ |
322 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; | 367 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { |
323 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | 368 | __le16 max_datagram_size; |
324 | req.wValue = 0; | 369 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
325 | req.wIndex = cpu_to_le16(iface_no); | 370 | |
326 | req.wLength = cpu_to_le16(2); | 371 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | |
372 | USB_RECIP_INTERFACE; | ||
373 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | ||
374 | req.wValue = 0; | ||
375 | req.wIndex = cpu_to_le16(iface_no); | ||
376 | req.wLength = cpu_to_le16(2); | ||
377 | |||
378 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, | ||
379 | 1000); | ||
380 | if (err) { | ||
381 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", | ||
382 | CDC_NCM_MIN_DATAGRAM_SIZE); | ||
383 | } else { | ||
384 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | ||
385 | /* Check Eth descriptor value */ | ||
386 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { | ||
387 | if (ctx->max_datagram_size > eth_max_sz) | ||
388 | ctx->max_datagram_size = eth_max_sz; | ||
389 | } else { | ||
390 | if (ctx->max_datagram_size > | ||
391 | CDC_NCM_MAX_DATAGRAM_SIZE) | ||
392 | ctx->max_datagram_size = | ||
393 | CDC_NCM_MAX_DATAGRAM_SIZE; | ||
394 | } | ||
327 | 395 | ||
328 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); | 396 | if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) |
329 | if (err) { | 397 | ctx->max_datagram_size = |
330 | pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", | 398 | CDC_NCM_MIN_DATAGRAM_SIZE; |
331 | CDC_NCM_MIN_DATAGRAM_SIZE); | 399 | |
332 | /* use default */ | 400 | /* if value changed, update device */ |
333 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; | 401 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
334 | } else { | 402 | USB_RECIP_INTERFACE; |
335 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | 403 | req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; |
404 | req.wValue = 0; | ||
405 | req.wIndex = cpu_to_le16(iface_no); | ||
406 | req.wLength = 2; | ||
407 | max_datagram_size = cpu_to_le16(ctx->max_datagram_size); | ||
408 | |||
409 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, | ||
410 | 0, NULL, 1000); | ||
411 | if (err) | ||
412 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); | ||
413 | } | ||
336 | 414 | ||
337 | if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) | ||
338 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; | ||
339 | else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) | ||
340 | ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE; | ||
341 | } | 415 | } |
342 | 416 | ||
343 | if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) | 417 | if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) |
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | |||
466 | 540 | ||
467 | ctx->ether_desc = | 541 | ctx->ether_desc = |
468 | (const struct usb_cdc_ether_desc *)buf; | 542 | (const struct usb_cdc_ether_desc *)buf; |
469 | |||
470 | dev->hard_mtu = | 543 | dev->hard_mtu = |
471 | le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); | 544 | le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
472 | 545 | ||
473 | if (dev->hard_mtu < | 546 | if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE) |
474 | (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) | 547 | dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE; |
475 | dev->hard_mtu = | 548 | else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE) |
476 | CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; | 549 | dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE; |
477 | |||
478 | else if (dev->hard_mtu > | ||
479 | (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN)) | ||
480 | dev->hard_mtu = | ||
481 | CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN; | ||
482 | break; | 550 | break; |
483 | 551 | ||
484 | case USB_CDC_NCM_TYPE: | 552 | case USB_CDC_NCM_TYPE: |
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
628 | u32 offset; | 696 | u32 offset; |
629 | u32 last_offset; | 697 | u32 last_offset; |
630 | u16 n = 0; | 698 | u16 n = 0; |
631 | u8 timeout = 0; | 699 | u8 ready2send = 0; |
632 | 700 | ||
633 | /* if there is a remaining skb, it gets priority */ | 701 | /* if there is a remaining skb, it gets priority */ |
634 | if (skb != NULL) | 702 | if (skb != NULL) |
635 | swap(skb, ctx->tx_rem_skb); | 703 | swap(skb, ctx->tx_rem_skb); |
636 | else | 704 | else |
637 | timeout = 1; | 705 | ready2send = 1; |
638 | 706 | ||
639 | /* | 707 | /* |
640 | * +----------------+ | 708 | * +----------------+ |
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
682 | 750 | ||
683 | for (; n < ctx->tx_max_datagrams; n++) { | 751 | for (; n < ctx->tx_max_datagrams; n++) { |
684 | /* check if end of transmit buffer is reached */ | 752 | /* check if end of transmit buffer is reached */ |
685 | if (offset >= ctx->tx_max) | 753 | if (offset >= ctx->tx_max) { |
754 | ready2send = 1; | ||
686 | break; | 755 | break; |
687 | 756 | } | |
688 | /* compute maximum buffer size */ | 757 | /* compute maximum buffer size */ |
689 | rem = ctx->tx_max - offset; | 758 | rem = ctx->tx_max - offset; |
690 | 759 | ||
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
711 | } | 780 | } |
712 | ctx->tx_rem_skb = skb; | 781 | ctx->tx_rem_skb = skb; |
713 | skb = NULL; | 782 | skb = NULL; |
714 | 783 | ready2send = 1; | |
715 | /* loop one more time */ | ||
716 | timeout = 1; | ||
717 | } | 784 | } |
718 | break; | 785 | break; |
719 | } | 786 | } |
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
756 | ctx->tx_curr_last_offset = last_offset; | 823 | ctx->tx_curr_last_offset = last_offset; |
757 | goto exit_no_skb; | 824 | goto exit_no_skb; |
758 | 825 | ||
759 | } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { | 826 | } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { |
760 | /* wait for more frames */ | 827 | /* wait for more frames */ |
761 | /* push variables */ | 828 | /* push variables */ |
762 | ctx->tx_curr_skb = skb_out; | 829 | ctx->tx_curr_skb = skb_out; |
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
813 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); | 880 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); |
814 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); | 881 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); |
815 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); | 882 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); |
816 | ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), | 883 | ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), |
817 | ctx->tx_ndp_modulus); | 884 | ctx->tx_ndp_modulus); |
818 | 885 | ||
819 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); | 886 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); |
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
825 | rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * | 892 | rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * |
826 | sizeof(struct usb_cdc_ncm_dpe16)); | 893 | sizeof(struct usb_cdc_ncm_dpe16)); |
827 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); | 894 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); |
828 | ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ | 895 | ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ |
829 | 896 | ||
830 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, | 897 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, |
831 | &(ctx->tx_ncm.ndp16), | 898 | &(ctx->tx_ncm.ndp16), |
832 | sizeof(ctx->tx_ncm.ndp16)); | 899 | sizeof(ctx->tx_ncm.ndp16)); |
833 | 900 | ||
834 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + | 901 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + |
835 | sizeof(ctx->tx_ncm.ndp16), | 902 | sizeof(ctx->tx_ncm.ndp16), |
836 | &(ctx->tx_ncm.dpe16), | 903 | &(ctx->tx_ncm.dpe16), |
837 | (ctx->tx_curr_frame_num + 1) * | 904 | (ctx->tx_curr_frame_num + 1) * |
@@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) | |||
961 | goto error; | 1028 | goto error; |
962 | } | 1029 | } |
963 | 1030 | ||
964 | temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); | 1031 | temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex); |
965 | if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { | 1032 | if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { |
966 | pr_debug("invalid DPT16 index\n"); | 1033 | pr_debug("invalid DPT16 index\n"); |
967 | goto error; | 1034 | goto error; |
@@ -1048,10 +1115,10 @@ error: | |||
1048 | 1115 | ||
1049 | static void | 1116 | static void |
1050 | cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, | 1117 | cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, |
1051 | struct connection_speed_change *data) | 1118 | struct usb_cdc_speed_change *data) |
1052 | { | 1119 | { |
1053 | uint32_t rx_speed = le32_to_cpu(data->USBitRate); | 1120 | uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); |
1054 | uint32_t tx_speed = le32_to_cpu(data->DSBitRate); | 1121 | uint32_t tx_speed = le32_to_cpu(data->ULBitRate); |
1055 | 1122 | ||
1056 | /* | 1123 | /* |
1057 | * Currently the USB-NET API does not support reporting the actual | 1124 | * Currently the USB-NET API does not support reporting the actual |
@@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) | |||
1092 | /* test for split data in 8-byte chunks */ | 1159 | /* test for split data in 8-byte chunks */ |
1093 | if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { | 1160 | if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { |
1094 | cdc_ncm_speed_change(ctx, | 1161 | cdc_ncm_speed_change(ctx, |
1095 | (struct connection_speed_change *)urb->transfer_buffer); | 1162 | (struct usb_cdc_speed_change *)urb->transfer_buffer); |
1096 | return; | 1163 | return; |
1097 | } | 1164 | } |
1098 | 1165 | ||
@@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) | |||
1120 | break; | 1187 | break; |
1121 | 1188 | ||
1122 | case USB_CDC_NOTIFY_SPEED_CHANGE: | 1189 | case USB_CDC_NOTIFY_SPEED_CHANGE: |
1123 | if (urb->actual_length < | 1190 | if (urb->actual_length < (sizeof(*event) + |
1124 | (sizeof(*event) + sizeof(struct connection_speed_change))) | 1191 | sizeof(struct usb_cdc_speed_change))) |
1125 | set_bit(EVENT_STS_SPLIT, &dev->flags); | 1192 | set_bit(EVENT_STS_SPLIT, &dev->flags); |
1126 | else | 1193 | else |
1127 | cdc_ncm_speed_change(ctx, | 1194 | cdc_ncm_speed_change(ctx, |
1128 | (struct connection_speed_change *) &event[1]); | 1195 | (struct usb_cdc_speed_change *) &event[1]); |
1129 | break; | 1196 | break; |
1130 | 1197 | ||
1131 | default: | 1198 | default: |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9fb..5002f5be47be 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { | |||
651 | .driver_info = (unsigned long)&dm9601_info, | 651 | .driver_info = (unsigned long)&dm9601_info, |
652 | }, | 652 | }, |
653 | { | 653 | { |
654 | USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ | ||
655 | .driver_info = (unsigned long)&dm9601_info, | ||
656 | }, | ||
657 | { | ||
654 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ | 658 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ |
655 | .driver_info = (unsigned long)&dm9601_info, | 659 | .driver_info = (unsigned long)&dm9601_info, |
656 | }, | 660 | }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2628,15 +2628,15 @@ exit: | |||
2628 | 2628 | ||
2629 | static void hso_free_tiomget(struct hso_serial *serial) | 2629 | static void hso_free_tiomget(struct hso_serial *serial) |
2630 | { | 2630 | { |
2631 | struct hso_tiocmget *tiocmget = serial->tiocmget; | 2631 | struct hso_tiocmget *tiocmget; |
2632 | if (!serial) | ||
2633 | return; | ||
2634 | tiocmget = serial->tiocmget; | ||
2632 | if (tiocmget) { | 2635 | if (tiocmget) { |
2633 | if (tiocmget->urb) { | 2636 | usb_free_urb(tiocmget->urb); |
2634 | usb_free_urb(tiocmget->urb); | 2637 | tiocmget->urb = NULL; |
2635 | tiocmget->urb = NULL; | ||
2636 | } | ||
2637 | serial->tiocmget = NULL; | 2638 | serial->tiocmget = NULL; |
2638 | kfree(tiocmget); | 2639 | kfree(tiocmget); |
2639 | |||
2640 | } | 2640 | } |
2641 | } | 2641 | } |
2642 | 2642 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -931,8 +931,10 @@ fail_halt: | |||
931 | if (urb != NULL) { | 931 | if (urb != NULL) { |
932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); | 932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); |
933 | status = usb_autopm_get_interface(dev->intf); | 933 | status = usb_autopm_get_interface(dev->intf); |
934 | if (status < 0) | 934 | if (status < 0) { |
935 | usb_free_urb(urb); | ||
935 | goto fail_lowmem; | 936 | goto fail_lowmem; |
937 | } | ||
936 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) | 938 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) |
937 | resched = 0; | 939 | resched = 0; |
938 | usb_autopm_put_interface(dev->intf); | 940 | usb_autopm_put_interface(dev->intf); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 90a23e410d1b..82dba5aaf423 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | static void virtnet_napi_enable(struct virtnet_info *vi) | ||
450 | { | ||
451 | napi_enable(&vi->napi); | ||
452 | |||
453 | /* If all buffers were filled by other side before we napi_enabled, we | ||
454 | * won't get another interrupt, so process any outstanding packets | ||
455 | * now. virtnet_poll wants re-enable the queue, so we disable here. | ||
456 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | ||
457 | if (napi_schedule_prep(&vi->napi)) { | ||
458 | virtqueue_disable_cb(vi->rvq); | ||
459 | __napi_schedule(&vi->napi); | ||
460 | } | ||
461 | } | ||
462 | |||
449 | static void refill_work(struct work_struct *work) | 463 | static void refill_work(struct work_struct *work) |
450 | { | 464 | { |
451 | struct virtnet_info *vi; | 465 | struct virtnet_info *vi; |
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work) | |||
454 | vi = container_of(work, struct virtnet_info, refill.work); | 468 | vi = container_of(work, struct virtnet_info, refill.work); |
455 | napi_disable(&vi->napi); | 469 | napi_disable(&vi->napi); |
456 | still_empty = !try_fill_recv(vi, GFP_KERNEL); | 470 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
457 | napi_enable(&vi->napi); | 471 | virtnet_napi_enable(vi); |
458 | 472 | ||
459 | /* In theory, this can happen: if we don't get any buffers in | 473 | /* In theory, this can happen: if we don't get any buffers in |
460 | * we will *never* try to fill again. */ | 474 | * we will *never* try to fill again. */ |
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev) | |||
638 | { | 652 | { |
639 | struct virtnet_info *vi = netdev_priv(dev); | 653 | struct virtnet_info *vi = netdev_priv(dev); |
640 | 654 | ||
641 | napi_enable(&vi->napi); | 655 | virtnet_napi_enable(vi); |
642 | |||
643 | /* If all buffers were filled by other side before we napi_enabled, we | ||
644 | * won't get another interrupt, so process any outstanding packets | ||
645 | * now. virtnet_poll wants re-enable the queue, so we disable here. | ||
646 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | ||
647 | if (napi_schedule_prep(&vi->napi)) { | ||
648 | virtqueue_disable_cb(vi->rvq); | ||
649 | __napi_schedule(&vi->napi); | ||
650 | } | ||
651 | return 0; | 656 | return 0; |
652 | } | 657 | } |
653 | 658 | ||
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad1..62ce2f4e8605 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c | |||
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | /* | ||
286 | * Wait for synth to settle | ||
287 | */ | ||
288 | static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, | ||
289 | struct ieee80211_channel *channel) | ||
290 | { | ||
291 | /* | ||
292 | * On 5211+ read activation -> rx delay | ||
293 | * and use it (100ns steps). | ||
294 | */ | ||
295 | if (ah->ah_version != AR5K_AR5210) { | ||
296 | u32 delay; | ||
297 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | ||
298 | AR5K_PHY_RX_DELAY_M; | ||
299 | delay = (channel->hw_value & CHANNEL_CCK) ? | ||
300 | ((delay << 2) / 22) : (delay / 10); | ||
301 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
302 | delay = delay << 1; | ||
303 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
304 | delay = delay << 2; | ||
305 | /* XXX: /2 on turbo ? Let's be safe | ||
306 | * for now */ | ||
307 | udelay(100 + delay); | ||
308 | } else { | ||
309 | mdelay(1); | ||
310 | } | ||
311 | } | ||
312 | |||
285 | 313 | ||
286 | /**********************\ | 314 | /**********************\ |
287 | * RF Gain optimization * | 315 | * RF Gain optimization * |
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, | |||
1253 | case AR5K_RF5111: | 1281 | case AR5K_RF5111: |
1254 | ret = ath5k_hw_rf5111_channel(ah, channel); | 1282 | ret = ath5k_hw_rf5111_channel(ah, channel); |
1255 | break; | 1283 | break; |
1284 | case AR5K_RF2317: | ||
1256 | case AR5K_RF2425: | 1285 | case AR5K_RF2425: |
1257 | ret = ath5k_hw_rf2425_channel(ah, channel); | 1286 | ret = ath5k_hw_rf2425_channel(ah, channel); |
1258 | break; | 1287 | break; |
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3237 | /* Failed */ | 3266 | /* Failed */ |
3238 | if (i >= 100) | 3267 | if (i >= 100) |
3239 | return -EIO; | 3268 | return -EIO; |
3269 | |||
3270 | /* Set channel and wait for synth */ | ||
3271 | ret = ath5k_hw_channel(ah, channel); | ||
3272 | if (ret) | ||
3273 | return ret; | ||
3274 | |||
3275 | ath5k_hw_wait_for_synth(ah, channel); | ||
3240 | } | 3276 | } |
3241 | 3277 | ||
3242 | /* | 3278 | /* |
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3251 | if (ret) | 3287 | if (ret) |
3252 | return ret; | 3288 | return ret; |
3253 | 3289 | ||
3290 | /* Write OFDM timings on 5212*/ | ||
3291 | if (ah->ah_version == AR5K_AR5212 && | ||
3292 | channel->hw_value & CHANNEL_OFDM) { | ||
3293 | |||
3294 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3295 | if (ret) | ||
3296 | return ret; | ||
3297 | |||
3298 | /* Spur info is available only from EEPROM versions | ||
3299 | * greater than 5.3, but the EEPROM routines will use | ||
3300 | * static values for older versions */ | ||
3301 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3302 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3303 | channel); | ||
3304 | } | ||
3305 | |||
3306 | /* If we used fast channel switching | ||
3307 | * we are done, release RF bus and | ||
3308 | * fire up NF calibration. | ||
3309 | * | ||
3310 | * Note: Only NF calibration due to | ||
3311 | * channel change, not AGC calibration | ||
3312 | * since AGC is still running ! | ||
3313 | */ | ||
3314 | if (fast) { | ||
3315 | /* | ||
3316 | * Release RF Bus grant | ||
3317 | */ | ||
3318 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3319 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3320 | |||
3321 | /* | ||
3322 | * Start NF calibration | ||
3323 | */ | ||
3324 | AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, | ||
3325 | AR5K_PHY_AGCCTL_NF); | ||
3326 | |||
3327 | return ret; | ||
3328 | } | ||
3329 | |||
3254 | /* | 3330 | /* |
3255 | * For 5210 we do all initialization using | 3331 | * For 5210 we do all initialization using |
3256 | * initvals, so we don't have to modify | 3332 | * initvals, so we don't have to modify |
3257 | * any settings (5210 also only supports | 3333 | * any settings (5210 also only supports |
3258 | * a/aturbo modes) | 3334 | * a/aturbo modes) |
3259 | */ | 3335 | */ |
3260 | if ((ah->ah_version != AR5K_AR5210) && !fast) { | 3336 | if (ah->ah_version != AR5K_AR5210) { |
3261 | 3337 | ||
3262 | /* | 3338 | /* |
3263 | * Write initial RF gain settings | 3339 | * Write initial RF gain settings |
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3276 | if (ret) | 3352 | if (ret) |
3277 | return ret; | 3353 | return ret; |
3278 | 3354 | ||
3279 | /* Write OFDM timings on 5212*/ | ||
3280 | if (ah->ah_version == AR5K_AR5212 && | ||
3281 | channel->hw_value & CHANNEL_OFDM) { | ||
3282 | |||
3283 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3284 | if (ret) | ||
3285 | return ret; | ||
3286 | |||
3287 | /* Spur info is available only from EEPROM versions | ||
3288 | * greater than 5.3, but the EEPROM routines will use | ||
3289 | * static values for older versions */ | ||
3290 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3291 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3292 | channel); | ||
3293 | } | ||
3294 | |||
3295 | /*Enable/disable 802.11b mode on 5111 | 3355 | /*Enable/disable 802.11b mode on 5111 |
3296 | (enable 2111 frequency converter + CCK)*/ | 3356 | (enable 2111 frequency converter + CCK)*/ |
3297 | if (ah->ah_radio == AR5K_RF5111) { | 3357 | if (ah->ah_radio == AR5K_RF5111) { |
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3322 | */ | 3382 | */ |
3323 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); | 3383 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); |
3324 | 3384 | ||
3385 | ath5k_hw_wait_for_synth(ah, channel); | ||
3386 | |||
3325 | /* | 3387 | /* |
3326 | * On 5211+ read activation -> rx delay | 3388 | * Perform ADC test to see if baseband is ready |
3327 | * and use it. | 3389 | * Set tx hold and check adc test register |
3328 | */ | 3390 | */ |
3329 | if (ah->ah_version != AR5K_AR5210) { | 3391 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); |
3330 | u32 delay; | 3392 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); |
3331 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | 3393 | for (i = 0; i <= 20; i++) { |
3332 | AR5K_PHY_RX_DELAY_M; | 3394 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) |
3333 | delay = (channel->hw_value & CHANNEL_CCK) ? | 3395 | break; |
3334 | ((delay << 2) / 22) : (delay / 10); | 3396 | udelay(200); |
3335 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
3336 | delay = delay << 1; | ||
3337 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
3338 | delay = delay << 2; | ||
3339 | /* XXX: /2 on turbo ? Let's be safe | ||
3340 | * for now */ | ||
3341 | udelay(100 + delay); | ||
3342 | } else { | ||
3343 | mdelay(1); | ||
3344 | } | ||
3345 | |||
3346 | if (fast) | ||
3347 | /* | ||
3348 | * Release RF Bus grant | ||
3349 | */ | ||
3350 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3351 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3352 | else { | ||
3353 | /* | ||
3354 | * Perform ADC test to see if baseband is ready | ||
3355 | * Set tx hold and check adc test register | ||
3356 | */ | ||
3357 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); | ||
3358 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); | ||
3359 | for (i = 0; i <= 20; i++) { | ||
3360 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) | ||
3361 | break; | ||
3362 | udelay(200); | ||
3363 | } | ||
3364 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3365 | } | 3397 | } |
3398 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3366 | 3399 | ||
3367 | /* | 3400 | /* |
3368 | * Start automatic gain control calibration | 3401 | * Start automatic gain control calibration |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 3681caf54282..1a7fa6ea4cf5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/pm_qos_params.h> | ||
25 | 24 | ||
26 | #include "debug.h" | 25 | #include "debug.h" |
27 | #include "common.h" | 26 | #include "common.h" |
@@ -57,8 +56,6 @@ struct ath_node; | |||
57 | 56 | ||
58 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) | 57 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) |
59 | 58 | ||
60 | #define ATH9K_PM_QOS_DEFAULT_VALUE 55 | ||
61 | |||
62 | #define TSF_TO_TU(_h,_l) \ | 59 | #define TSF_TO_TU(_h,_l) \ |
63 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) | 60 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) |
64 | 61 | ||
@@ -218,6 +215,7 @@ struct ath_frame_info { | |||
218 | struct ath_buf_state { | 215 | struct ath_buf_state { |
219 | u8 bf_type; | 216 | u8 bf_type; |
220 | u8 bfs_paprd; | 217 | u8 bfs_paprd; |
218 | unsigned long bfs_paprd_timestamp; | ||
221 | enum ath9k_internal_frame_type bfs_ftype; | 219 | enum ath9k_internal_frame_type bfs_ftype; |
222 | }; | 220 | }; |
223 | 221 | ||
@@ -593,7 +591,6 @@ struct ath_softc { | |||
593 | struct work_struct paprd_work; | 591 | struct work_struct paprd_work; |
594 | struct work_struct hw_check_work; | 592 | struct work_struct hw_check_work; |
595 | struct completion paprd_complete; | 593 | struct completion paprd_complete; |
596 | bool paprd_pending; | ||
597 | 594 | ||
598 | u32 intrstatus; | 595 | u32 intrstatus; |
599 | u32 sc_flags; /* SC_OP_* */ | 596 | u32 sc_flags; /* SC_OP_* */ |
@@ -633,8 +630,6 @@ struct ath_softc { | |||
633 | struct ath_descdma txsdma; | 630 | struct ath_descdma txsdma; |
634 | 631 | ||
635 | struct ath_ant_comb ant_comb; | 632 | struct ath_ant_comb ant_comb; |
636 | |||
637 | struct pm_qos_request_list pm_qos_req; | ||
638 | }; | 633 | }; |
639 | 634 | ||
640 | struct ath_wiphy { | 635 | struct ath_wiphy { |
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) | |||
666 | extern struct ieee80211_ops ath9k_ops; | 661 | extern struct ieee80211_ops ath9k_ops; |
667 | extern int ath9k_modparam_nohwcrypt; | 662 | extern int ath9k_modparam_nohwcrypt; |
668 | extern int led_blink; | 663 | extern int led_blink; |
669 | extern int ath9k_pm_qos_value; | ||
670 | extern bool is_ath9k_unloaded; | 664 | extern bool is_ath9k_unloaded; |
671 | 665 | ||
672 | irqreturn_t ath_isr(int irq, void *dev); | 666 | irqreturn_t ath_isr(int irq, void *dev); |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 087a6a95edd5..a033d01bf8a0 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; | |||
41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); | 41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); |
42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); | 42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); |
43 | 43 | ||
44 | int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; | ||
45 | module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); | ||
46 | MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); | ||
47 | |||
48 | bool is_ath9k_unloaded; | 44 | bool is_ath9k_unloaded; |
49 | /* We use the hw_value as an index into our private channel structure */ | 45 | /* We use the hw_value as an index into our private channel structure */ |
50 | 46 | ||
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, | |||
762 | ath_init_leds(sc); | 758 | ath_init_leds(sc); |
763 | ath_start_rfkill_poll(sc); | 759 | ath_start_rfkill_poll(sc); |
764 | 760 | ||
765 | pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
766 | PM_QOS_DEFAULT_VALUE); | ||
767 | |||
768 | return 0; | 761 | return 0; |
769 | 762 | ||
770 | error_world: | 763 | error_world: |
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
831 | } | 824 | } |
832 | 825 | ||
833 | ieee80211_unregister_hw(hw); | 826 | ieee80211_unregister_hw(hw); |
834 | pm_qos_remove_request(&sc->pm_qos_req); | ||
835 | ath_rx_cleanup(sc); | 827 | ath_rx_cleanup(sc); |
836 | ath_tx_cleanup(sc); | 828 | ath_tx_cleanup(sc); |
837 | ath9k_deinit_softc(sc); | 829 | ath9k_deinit_softc(sc); |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9040c2ff1909..a09d15f7aa6e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -342,7 +342,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int | |||
342 | tx_info->control.rates[1].idx = -1; | 342 | tx_info->control.rates[1].idx = -1; |
343 | 343 | ||
344 | init_completion(&sc->paprd_complete); | 344 | init_completion(&sc->paprd_complete); |
345 | sc->paprd_pending = true; | ||
346 | txctl.paprd = BIT(chain); | 345 | txctl.paprd = BIT(chain); |
347 | 346 | ||
348 | if (ath_tx_start(hw, skb, &txctl) != 0) { | 347 | if (ath_tx_start(hw, skb, &txctl) != 0) { |
@@ -353,7 +352,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int | |||
353 | 352 | ||
354 | time_left = wait_for_completion_timeout(&sc->paprd_complete, | 353 | time_left = wait_for_completion_timeout(&sc->paprd_complete, |
355 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); | 354 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); |
356 | sc->paprd_pending = false; | ||
357 | 355 | ||
358 | if (!time_left) | 356 | if (!time_left) |
359 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, | 357 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, |
@@ -1175,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1175 | ath9k_btcoex_timer_resume(sc); | 1173 | ath9k_btcoex_timer_resume(sc); |
1176 | } | 1174 | } |
1177 | 1175 | ||
1178 | /* User has the option to provide pm-qos value as a module | ||
1179 | * parameter rather than using the default value of | ||
1180 | * 'ATH9K_PM_QOS_DEFAULT_VALUE'. | ||
1181 | */ | ||
1182 | pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); | ||
1183 | |||
1184 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) | 1176 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) |
1185 | common->bus_ops->extn_synch_en(common); | 1177 | common->bus_ops->extn_synch_en(common); |
1186 | 1178 | ||
@@ -1347,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1347 | 1339 | ||
1348 | sc->sc_flags |= SC_OP_INVALID; | 1340 | sc->sc_flags |= SC_OP_INVALID; |
1349 | 1341 | ||
1350 | pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); | ||
1351 | |||
1352 | mutex_unlock(&sc->mutex); | 1342 | mutex_unlock(&sc->mutex); |
1353 | 1343 | ||
1354 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); | 1344 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 33a37edbaf79..07b7804aec5b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1725 | ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, | 1725 | ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, |
1726 | bf->bf_state.bfs_paprd); | 1726 | bf->bf_state.bfs_paprd); |
1727 | 1727 | ||
1728 | if (txctl->paprd) | ||
1729 | bf->bf_state.bfs_paprd_timestamp = jiffies; | ||
1730 | |||
1728 | ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); | 1731 | ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); |
1729 | } | 1732 | } |
1730 | 1733 | ||
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | |||
1886 | bf->bf_buf_addr = 0; | 1889 | bf->bf_buf_addr = 0; |
1887 | 1890 | ||
1888 | if (bf->bf_state.bfs_paprd) { | 1891 | if (bf->bf_state.bfs_paprd) { |
1889 | if (!sc->paprd_pending) | 1892 | if (time_after(jiffies, |
1893 | bf->bf_state.bfs_paprd_timestamp + | ||
1894 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) | ||
1890 | dev_kfree_skb_any(skb); | 1895 | dev_kfree_skb_any(skb); |
1891 | else | 1896 | else |
1892 | complete(&sc->paprd_complete); | 1897 | complete(&sc->paprd_complete); |
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 939a0e96ed1f..84866a4b8350 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c | |||
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) | |||
564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); | 564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); |
565 | 565 | ||
566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ | 566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ |
567 | cam = !!(tim_ie->bitmap_ctrl & 0x01); | 567 | cam |= !!(tim_ie->bitmap_ctrl & 0x01); |
568 | 568 | ||
569 | if (!cam) { | 569 | if (!cam) { |
570 | /* back to low-power land. */ | 570 | /* back to low-power land. */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, | |||
402 | } | 402 | } |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | /** | ||
406 | * iwl3945_good_plcp_health - checks for plcp error. | ||
407 | * | ||
408 | * When the plcp error is exceeding the thresholds, reset the radio | ||
409 | * to improve the throughput. | ||
410 | */ | ||
411 | static bool iwl3945_good_plcp_health(struct iwl_priv *priv, | ||
412 | struct iwl_rx_packet *pkt) | ||
413 | { | ||
414 | bool rc = true; | ||
415 | struct iwl3945_notif_statistics current_stat; | ||
416 | int combined_plcp_delta; | ||
417 | unsigned int plcp_msec; | ||
418 | unsigned long plcp_received_jiffies; | ||
419 | |||
420 | if (priv->cfg->base_params->plcp_delta_threshold == | ||
421 | IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { | ||
422 | IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); | ||
423 | return rc; | ||
424 | } | ||
425 | memcpy(¤t_stat, pkt->u.raw, sizeof(struct | ||
426 | iwl3945_notif_statistics)); | ||
427 | /* | ||
428 | * check for plcp_err and trigger radio reset if it exceeds | ||
429 | * the plcp error threshold plcp_delta. | ||
430 | */ | ||
431 | plcp_received_jiffies = jiffies; | ||
432 | plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - | ||
433 | (long) priv->plcp_jiffies); | ||
434 | priv->plcp_jiffies = plcp_received_jiffies; | ||
435 | /* | ||
436 | * check to make sure plcp_msec is not 0 to prevent division | ||
437 | * by zero. | ||
438 | */ | ||
439 | if (plcp_msec) { | ||
440 | combined_plcp_delta = | ||
441 | (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - | ||
442 | le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); | ||
443 | |||
444 | if ((combined_plcp_delta > 0) && | ||
445 | ((combined_plcp_delta * 100) / plcp_msec) > | ||
446 | priv->cfg->base_params->plcp_delta_threshold) { | ||
447 | /* | ||
448 | * if plcp_err exceed the threshold, the following | ||
449 | * data is printed in csv format: | ||
450 | * Text: plcp_err exceeded %d, | ||
451 | * Received ofdm.plcp_err, | ||
452 | * Current ofdm.plcp_err, | ||
453 | * combined_plcp_delta, | ||
454 | * plcp_msec | ||
455 | */ | ||
456 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " | ||
457 | "%u, %d, %u mSecs\n", | ||
458 | priv->cfg->base_params->plcp_delta_threshold, | ||
459 | le32_to_cpu(current_stat.rx.ofdm.plcp_err), | ||
460 | combined_plcp_delta, plcp_msec); | ||
461 | /* | ||
462 | * Reset the RF radio due to the high plcp | ||
463 | * error rate | ||
464 | */ | ||
465 | rc = false; | ||
466 | } | ||
467 | } | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, | 405 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, |
472 | struct iwl_rx_mem_buffer *rxb) | 406 | struct iwl_rx_mem_buffer *rxb) |
473 | { | 407 | { |
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { | |||
2734 | .isr_ops = { | 2668 | .isr_ops = { |
2735 | .isr = iwl_isr_legacy, | 2669 | .isr = iwl_isr_legacy, |
2736 | }, | 2670 | }, |
2737 | .check_plcp_health = iwl3945_good_plcp_health, | ||
2738 | 2671 | ||
2739 | .debugfs_ops = { | 2672 | .debugfs_ops = { |
2740 | .rx_stats_read = iwl3945_ucode_rx_stats_read, | 2673 | .rx_stats_read = iwl3945_ucode_rx_stats_read, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af505bcd7ae0..ef36aff1bb43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = { | |||
681 | .fw_name_pre = IWL6050_FW_PRE, \ | 681 | .fw_name_pre = IWL6050_FW_PRE, \ |
682 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ | 682 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ |
683 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ | 683 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ |
684 | .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
685 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
684 | .ops = &iwl6050_ops, \ | 686 | .ops = &iwl6050_ops, \ |
685 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ | 687 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ |
686 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ | 688 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 36335b1b54d4..c1cfd9952e52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) | |||
1157 | /* only Re-enable if disabled by irq */ | 1157 | /* only Re-enable if disabled by irq */ |
1158 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1158 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1159 | iwl_enable_interrupts(priv); | 1159 | iwl_enable_interrupts(priv); |
1160 | /* Re-enable RF_KILL if it occurred */ | ||
1161 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
1162 | iwl_enable_rfkill_int(priv); | ||
1160 | 1163 | ||
1161 | #ifdef CONFIG_IWLWIFI_DEBUG | 1164 | #ifdef CONFIG_IWLWIFI_DEBUG |
1162 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | 1165 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { |
@@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) | |||
1371 | /* only Re-enable if disabled by irq */ | 1374 | /* only Re-enable if disabled by irq */ |
1372 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1375 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1373 | iwl_enable_interrupts(priv); | 1376 | iwl_enable_interrupts(priv); |
1377 | /* Re-enable RF_KILL if it occurred */ | ||
1378 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
1379 | iwl_enable_rfkill_int(priv); | ||
1374 | } | 1380 | } |
1375 | 1381 | ||
1376 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ | 1382 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5b..0494d7b102d4 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
199 | while (i != idx) { | 199 | while (i != idx) { |
200 | u16 len; | 200 | u16 len; |
201 | struct sk_buff *skb; | 201 | struct sk_buff *skb; |
202 | dma_addr_t dma_addr; | ||
202 | desc = &ring[i]; | 203 | desc = &ring[i]; |
203 | len = le16_to_cpu(desc->len); | 204 | len = le16_to_cpu(desc->len); |
204 | skb = rx_buf[i]; | 205 | skb = rx_buf[i]; |
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
216 | 217 | ||
217 | len = priv->common.rx_mtu; | 218 | len = priv->common.rx_mtu; |
218 | } | 219 | } |
220 | dma_addr = le32_to_cpu(desc->host_addr); | ||
221 | pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, | ||
222 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
219 | skb_put(skb, len); | 223 | skb_put(skb, len); |
220 | 224 | ||
221 | if (p54_rx(dev, skb)) { | 225 | if (p54_rx(dev, skb)) { |
222 | pci_unmap_single(priv->pdev, | 226 | pci_unmap_single(priv->pdev, dma_addr, |
223 | le32_to_cpu(desc->host_addr), | 227 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); |
224 | priv->common.rx_mtu + 32, | ||
225 | PCI_DMA_FROMDEVICE); | ||
226 | rx_buf[i] = NULL; | 228 | rx_buf[i] = NULL; |
227 | desc->host_addr = 0; | 229 | desc->host_addr = cpu_to_le32(0); |
228 | } else { | 230 | } else { |
229 | skb_trim(skb, 0); | 231 | skb_trim(skb, 0); |
232 | pci_dma_sync_single_for_device(priv->pdev, dma_addr, | ||
233 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
230 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); | 234 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); |
231 | } | 235 | } |
232 | 236 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38af..3b3f1e45ab3e 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, | |||
652 | */ | 652 | */ |
653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
654 | 654 | ||
655 | /* | ||
656 | * The hardware has already checked the Michael Mic and has | ||
657 | * stripped it from the frame. Signal this to mac80211. | ||
658 | */ | ||
659 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
660 | |||
655 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 661 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
656 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 662 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
657 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 663 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { | |||
1065 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1071 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1066 | #endif | 1072 | #endif |
1067 | #ifdef CONFIG_RT2800PCI_RT35XX | 1073 | #ifdef CONFIG_RT2800PCI_RT35XX |
1074 | { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1075 | { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1068 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1076 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1069 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1077 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1070 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1078 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4c..197a36c05fda 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, | |||
486 | */ | 486 | */ |
487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
488 | 488 | ||
489 | /* | ||
490 | * The hardware has already checked the Michael Mic and has | ||
491 | * stripped it from the frame. Signal this to mac80211. | ||
492 | */ | ||
493 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
494 | |||
489 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 495 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
490 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 496 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
491 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 497 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 012e1a4016fe..40372bac9482 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c | |||
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, | |||
1039 | 1039 | ||
1040 | if (changed & BSS_CHANGED_BEACON) { | 1040 | if (changed & BSS_CHANGED_BEACON) { |
1041 | beacon = ieee80211_beacon_get(hw, vif); | 1041 | beacon = ieee80211_beacon_get(hw, vif); |
1042 | if (!beacon) | ||
1043 | goto out_sleep; | ||
1044 | |||
1042 | ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, | 1045 | ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, |
1043 | beacon->len); | 1046 | beacon->len); |
1044 | 1047 | ||
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index ffedfd492754..ea1580085347 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig NFC_DEVICES | 5 | menuconfig NFC_DEVICES |
6 | bool "NFC devices" | 6 | bool "Near Field Communication (NFC) devices" |
7 | default n | 7 | default n |
8 | ---help--- | 8 | ---help--- |
9 | You'll have to say Y if your computer contains an NFC device that | 9 | You'll have to say Y if your computer contains an NFC device that |
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c index bae647264dd6..724f65d8f9e4 100644 --- a/drivers/nfc/pn544.c +++ b/drivers/nfc/pn544.c | |||
@@ -60,7 +60,7 @@ enum pn544_irq { | |||
60 | struct pn544_info { | 60 | struct pn544_info { |
61 | struct miscdevice miscdev; | 61 | struct miscdevice miscdev; |
62 | struct i2c_client *i2c_dev; | 62 | struct i2c_client *i2c_dev; |
63 | struct regulator_bulk_data regs[2]; | 63 | struct regulator_bulk_data regs[3]; |
64 | 64 | ||
65 | enum pn544_state state; | 65 | enum pn544_state state; |
66 | wait_queue_head_t read_wait; | 66 | wait_queue_head_t read_wait; |
@@ -74,6 +74,7 @@ struct pn544_info { | |||
74 | 74 | ||
75 | static const char reg_vdd_io[] = "Vdd_IO"; | 75 | static const char reg_vdd_io[] = "Vdd_IO"; |
76 | static const char reg_vbat[] = "VBat"; | 76 | static const char reg_vbat[] = "VBat"; |
77 | static const char reg_vsim[] = "VSim"; | ||
77 | 78 | ||
78 | /* sysfs interface */ | 79 | /* sysfs interface */ |
79 | static ssize_t pn544_test(struct device *dev, | 80 | static ssize_t pn544_test(struct device *dev, |
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client, | |||
740 | 741 | ||
741 | info->regs[0].supply = reg_vdd_io; | 742 | info->regs[0].supply = reg_vdd_io; |
742 | info->regs[1].supply = reg_vbat; | 743 | info->regs[1].supply = reg_vbat; |
744 | info->regs[2].supply = reg_vsim; | ||
743 | r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), | 745 | r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), |
744 | info->regs); | 746 | info->regs); |
745 | if (r < 0) | 747 | if (r < 0) |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 8ecaac983923..ea25e5bfcf23 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/capability.h> | 25 | #include <linux/capability.h> |
26 | #include <linux/security.h> | ||
26 | #include <linux/pci-aspm.h> | 27 | #include <linux/pci-aspm.h> |
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include "pci.h" | 29 | #include "pci.h" |
@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj, | |||
368 | u8 *data = (u8*) buf; | 369 | u8 *data = (u8*) buf; |
369 | 370 | ||
370 | /* Several chips lock up trying to read undefined config space */ | 371 | /* Several chips lock up trying to read undefined config space */ |
371 | if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) { | 372 | if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) { |
372 | size = dev->cfg_size; | 373 | size = dev->cfg_size; |
373 | } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { | 374 | } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { |
374 | size = 128; | 375 | size = 128; |
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 0bdda5b3ed55..42fbf1a75576 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev) | |||
518 | flags |= CONF_ENABLE_IOCARD; | 518 | flags |= CONF_ENABLE_IOCARD; |
519 | if (flags & CONF_ENABLE_IOCARD) | 519 | if (flags & CONF_ENABLE_IOCARD) |
520 | s->socket.flags |= SS_IOCARD; | 520 | s->socket.flags |= SS_IOCARD; |
521 | if (flags & CONF_ENABLE_ZVCARD) | ||
522 | s->socket.flags |= SS_ZVCARD | SS_IOCARD; | ||
521 | if (flags & CONF_ENABLE_SPKR) { | 523 | if (flags & CONF_ENABLE_SPKR) { |
522 | s->socket.flags |= SS_SPKR_ENA; | 524 | s->socket.flags |= SS_SPKR_ENA; |
523 | status = CCSR_AUDIO_ENA; | 525 | status = CCSR_AUDIO_ENA; |
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 3755e7c8c715..2c540542b5af 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, | |||
215 | } | 215 | } |
216 | #endif | 216 | #endif |
217 | 217 | ||
218 | static void pxa2xx_configure_sockets(struct device *dev) | 218 | void pxa2xx_configure_sockets(struct device *dev) |
219 | { | 219 | { |
220 | struct pcmcia_low_level *ops = dev->platform_data; | 220 | struct pcmcia_low_level *ops = dev->platform_data; |
221 | /* | 221 | /* |
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h index bb62ea87b8f9..b609b45469ed 100644 --- a/drivers/pcmcia/pxa2xx_base.h +++ b/drivers/pcmcia/pxa2xx_base.h | |||
@@ -1,3 +1,4 @@ | |||
1 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); | 1 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); |
2 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); | 2 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); |
3 | void pxa2xx_configure_sockets(struct device *dev); | ||
3 | 4 | ||
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c index b9f8c8fb42bd..25afe637c657 100644 --- a/drivers/pcmcia/pxa2xx_lubbock.c +++ b/drivers/pcmcia/pxa2xx_lubbock.c | |||
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev) | |||
226 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); | 226 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); |
227 | 227 | ||
228 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); | 228 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); |
229 | pxa2xx_configure_sockets(&sadev->dev); | ||
229 | ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, | 230 | ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, |
230 | pxa2xx_drv_pcmcia_add_one); | 231 | pxa2xx_drv_pcmcia_add_one); |
231 | } | 232 | } |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index d163bc2e2b9e..a59af5b24f0a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -227,7 +227,7 @@ config SONYPI_COMPAT | |||
227 | config IDEAPAD_LAPTOP | 227 | config IDEAPAD_LAPTOP |
228 | tristate "Lenovo IdeaPad Laptop Extras" | 228 | tristate "Lenovo IdeaPad Laptop Extras" |
229 | depends on ACPI | 229 | depends on ACPI |
230 | depends on RFKILL | 230 | depends on RFKILL && INPUT |
231 | select INPUT_SPARSEKMAP | 231 | select INPUT_SPARSEKMAP |
232 | help | 232 | help |
233 | This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. | 233 | This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. |
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c5c4b8c32eb8..38b34a73866a 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL"); | |||
84 | */ | 84 | */ |
85 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" | 85 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" |
86 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" | 86 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" |
87 | #define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" | 87 | #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" |
88 | #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" | 88 | #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" |
89 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" | 89 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" |
90 | 90 | ||
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev, | |||
1280 | return -EINVAL; | 1280 | return -EINVAL; |
1281 | return count; | 1281 | return count; |
1282 | } | 1282 | } |
1283 | static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, | 1283 | static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, |
1284 | set_bool_threeg); | 1284 | set_bool_threeg); |
1285 | 1285 | ||
1286 | static ssize_t show_interface(struct device *dev, struct device_attribute *attr, | 1286 | static ssize_t show_interface(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 4633fd8532cc..fe495939c307 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c | |||
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device) | |||
1081 | struct proc_dir_entry *proc; | 1081 | struct proc_dir_entry *proc; |
1082 | mode_t mode; | 1082 | mode_t mode; |
1083 | 1083 | ||
1084 | /* | ||
1085 | * If parameter uid or gid is not changed, keep the default setting for | ||
1086 | * our proc entries (-rw-rw-rw-) else, it means we care about security, | ||
1087 | * and then set to -rw-rw---- | ||
1088 | */ | ||
1089 | |||
1090 | if ((asus_uid == 0) && (asus_gid == 0)) { | 1084 | if ((asus_uid == 0) && (asus_gid == 0)) { |
1091 | mode = S_IFREG | S_IRUGO | S_IWUGO; | 1085 | mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; |
1092 | } else { | 1086 | } else { |
1093 | mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; | 1087 | mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; |
1094 | printk(KERN_WARNING " asus_uid and asus_gid parameters are " | 1088 | printk(KERN_WARNING " asus_uid and asus_gid parameters are " |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 34657f96b5a5..ad24ef36f9f7 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked) | |||
290 | dell_send_request(buffer, 17, 11); | 290 | dell_send_request(buffer, 17, 11); |
291 | 291 | ||
292 | /* If the hardware switch controls this radio, and the hardware | 292 | /* If the hardware switch controls this radio, and the hardware |
293 | switch is disabled, don't allow changing the software state */ | 293 | switch is disabled, don't allow changing the software state. |
294 | If the hardware switch is reported as not supported, always | ||
295 | fire the SMI to toggle the killswitch. */ | ||
294 | if ((hwswitch_state & BIT(hwswitch_bit)) && | 296 | if ((hwswitch_state & BIT(hwswitch_bit)) && |
295 | !(buffer->output[1] & BIT(16))) { | 297 | !(buffer->output[1] & BIT(16)) && |
298 | (buffer->output[1] & BIT(0))) { | ||
296 | ret = -EINVAL; | 299 | ret = -EINVAL; |
297 | goto out; | 300 | goto out; |
298 | } | 301 | } |
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = { | |||
398 | 401 | ||
399 | static void dell_update_rfkill(struct work_struct *ignored) | 402 | static void dell_update_rfkill(struct work_struct *ignored) |
400 | { | 403 | { |
404 | int status; | ||
405 | |||
406 | get_buffer(); | ||
407 | dell_send_request(buffer, 17, 11); | ||
408 | status = buffer->output[1]; | ||
409 | release_buffer(); | ||
410 | |||
411 | /* if hardware rfkill is not supported, set it explicitly */ | ||
412 | if (!(status & BIT(0))) { | ||
413 | if (wifi_rfkill) | ||
414 | dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17)); | ||
415 | if (bluetooth_rfkill) | ||
416 | dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18)); | ||
417 | if (wwan_rfkill) | ||
418 | dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19)); | ||
419 | } | ||
420 | |||
401 | if (wifi_rfkill) | 421 | if (wifi_rfkill) |
402 | dell_rfkill_query(wifi_rfkill, (void *)1); | 422 | dell_rfkill_query(wifi_rfkill, (void *)1); |
403 | if (bluetooth_rfkill) | 423 | if (bluetooth_rfkill) |
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 930e62762365..61433d492862 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
@@ -60,69 +60,20 @@ enum pmic_gpio_register { | |||
60 | #define GPOSW_DOU 0x08 | 60 | #define GPOSW_DOU 0x08 |
61 | #define GPOSW_RDRV 0x30 | 61 | #define GPOSW_RDRV 0x30 |
62 | 62 | ||
63 | #define GPIO_UPDATE_TYPE 0x80000000 | ||
63 | 64 | ||
64 | #define NUM_GPIO 24 | 65 | #define NUM_GPIO 24 |
65 | 66 | ||
66 | struct pmic_gpio_irq { | ||
67 | spinlock_t lock; | ||
68 | u32 trigger[NUM_GPIO]; | ||
69 | u32 dirty; | ||
70 | struct work_struct work; | ||
71 | }; | ||
72 | |||
73 | |||
74 | struct pmic_gpio { | 67 | struct pmic_gpio { |
68 | struct mutex buslock; | ||
75 | struct gpio_chip chip; | 69 | struct gpio_chip chip; |
76 | struct pmic_gpio_irq irqtypes; | ||
77 | void *gpiointr; | 70 | void *gpiointr; |
78 | int irq; | 71 | int irq; |
79 | unsigned irq_base; | 72 | unsigned irq_base; |
73 | unsigned int update_type; | ||
74 | u32 trigger_type; | ||
80 | }; | 75 | }; |
81 | 76 | ||
82 | static void pmic_program_irqtype(int gpio, int type) | ||
83 | { | ||
84 | if (type & IRQ_TYPE_EDGE_RISING) | ||
85 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); | ||
86 | else | ||
87 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); | ||
88 | |||
89 | if (type & IRQ_TYPE_EDGE_FALLING) | ||
90 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); | ||
91 | else | ||
92 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); | ||
93 | }; | ||
94 | |||
95 | static void pmic_irqtype_work(struct work_struct *work) | ||
96 | { | ||
97 | struct pmic_gpio_irq *t = | ||
98 | container_of(work, struct pmic_gpio_irq, work); | ||
99 | unsigned long flags; | ||
100 | int i; | ||
101 | u16 type; | ||
102 | |||
103 | spin_lock_irqsave(&t->lock, flags); | ||
104 | /* As we drop the lock, we may need multiple scans if we race the | ||
105 | pmic_irq_type function */ | ||
106 | while (t->dirty) { | ||
107 | /* | ||
108 | * For each pin that has the dirty bit set send an IPC | ||
109 | * message to configure the hardware via the PMIC | ||
110 | */ | ||
111 | for (i = 0; i < NUM_GPIO; i++) { | ||
112 | if (!(t->dirty & (1 << i))) | ||
113 | continue; | ||
114 | t->dirty &= ~(1 << i); | ||
115 | /* We can't trust the array entry or dirty | ||
116 | once the lock is dropped */ | ||
117 | type = t->trigger[i]; | ||
118 | spin_unlock_irqrestore(&t->lock, flags); | ||
119 | pmic_program_irqtype(i, type); | ||
120 | spin_lock_irqsave(&t->lock, flags); | ||
121 | } | ||
122 | } | ||
123 | spin_unlock_irqrestore(&t->lock, flags); | ||
124 | } | ||
125 | |||
126 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | 77 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) |
127 | { | 78 | { |
128 | if (offset > 8) { | 79 | if (offset > 8) { |
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
190 | 1 << (offset - 16)); | 141 | 1 << (offset - 16)); |
191 | } | 142 | } |
192 | 143 | ||
193 | static int pmic_irq_type(unsigned irq, unsigned type) | 144 | /* |
145 | * This is called from genirq with pg->buslock locked and | ||
146 | * irq_desc->lock held. We can not access the scu bus here, so we | ||
147 | * store the change and update in the bus_sync_unlock() function below | ||
148 | */ | ||
149 | static int pmic_irq_type(struct irq_data *data, unsigned type) | ||
194 | { | 150 | { |
195 | struct pmic_gpio *pg = get_irq_chip_data(irq); | 151 | struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); |
196 | u32 gpio = irq - pg->irq_base; | 152 | u32 gpio = data->irq - pg->irq_base; |
197 | unsigned long flags; | ||
198 | 153 | ||
199 | if (gpio >= pg->chip.ngpio) | 154 | if (gpio >= pg->chip.ngpio) |
200 | return -EINVAL; | 155 | return -EINVAL; |
201 | 156 | ||
202 | spin_lock_irqsave(&pg->irqtypes.lock, flags); | 157 | pg->trigger_type = type; |
203 | pg->irqtypes.trigger[gpio] = type; | 158 | pg->update_type = gpio | GPIO_UPDATE_TYPE; |
204 | pg->irqtypes.dirty |= (1 << gpio); | ||
205 | spin_unlock_irqrestore(&pg->irqtypes.lock, flags); | ||
206 | schedule_work(&pg->irqtypes.work); | ||
207 | return 0; | 159 | return 0; |
208 | } | 160 | } |
209 | 161 | ||
210 | |||
211 | |||
212 | static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | 162 | static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) |
213 | { | 163 | { |
214 | struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); | 164 | struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); |
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | |||
217 | } | 167 | } |
218 | 168 | ||
219 | /* the gpiointr register is read-clear, so just do nothing. */ | 169 | /* the gpiointr register is read-clear, so just do nothing. */ |
220 | static void pmic_irq_unmask(unsigned irq) | 170 | static void pmic_irq_unmask(struct irq_data *data) { } |
221 | { | ||
222 | }; | ||
223 | 171 | ||
224 | static void pmic_irq_mask(unsigned irq) | 172 | static void pmic_irq_mask(struct irq_data *data) { } |
225 | { | ||
226 | }; | ||
227 | 173 | ||
228 | static struct irq_chip pmic_irqchip = { | 174 | static struct irq_chip pmic_irqchip = { |
229 | .name = "PMIC-GPIO", | 175 | .name = "PMIC-GPIO", |
230 | .mask = pmic_irq_mask, | 176 | .irq_mask = pmic_irq_mask, |
231 | .unmask = pmic_irq_unmask, | 177 | .irq_unmask = pmic_irq_unmask, |
232 | .set_type = pmic_irq_type, | 178 | .irq_set_type = pmic_irq_type, |
233 | }; | 179 | }; |
234 | 180 | ||
235 | static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) | 181 | static irqreturn_t pmic_irq_handler(int irq, void *data) |
236 | { | 182 | { |
237 | struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); | 183 | struct pmic_gpio *pg = data; |
238 | u8 intsts = *((u8 *)pg->gpiointr + 4); | 184 | u8 intsts = *((u8 *)pg->gpiointr + 4); |
239 | int gpio; | 185 | int gpio; |
186 | irqreturn_t ret = IRQ_NONE; | ||
240 | 187 | ||
241 | for (gpio = 0; gpio < 8; gpio++) { | 188 | for (gpio = 0; gpio < 8; gpio++) { |
242 | if (intsts & (1 << gpio)) { | 189 | if (intsts & (1 << gpio)) { |
243 | pr_debug("pmic pin %d triggered\n", gpio); | 190 | pr_debug("pmic pin %d triggered\n", gpio); |
244 | generic_handle_irq(pg->irq_base + gpio); | 191 | generic_handle_irq(pg->irq_base + gpio); |
192 | ret = IRQ_HANDLED; | ||
245 | } | 193 | } |
246 | } | 194 | } |
247 | 195 | return ret; | |
248 | if (desc->chip->irq_eoi) | ||
249 | desc->chip->irq_eoi(irq_get_irq_data(irq)); | ||
250 | else | ||
251 | dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq); | ||
252 | } | 196 | } |
253 | 197 | ||
254 | static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | 198 | static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) |
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
297 | pg->chip.can_sleep = 1; | 241 | pg->chip.can_sleep = 1; |
298 | pg->chip.dev = dev; | 242 | pg->chip.dev = dev; |
299 | 243 | ||
300 | INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); | 244 | mutex_init(&pg->buslock); |
301 | spin_lock_init(&pg->irqtypes.lock); | ||
302 | 245 | ||
303 | pg->chip.dev = dev; | 246 | pg->chip.dev = dev; |
304 | retval = gpiochip_add(&pg->chip); | 247 | retval = gpiochip_add(&pg->chip); |
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
306 | printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); | 249 | printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); |
307 | goto err; | 250 | goto err; |
308 | } | 251 | } |
309 | set_irq_data(pg->irq, pg); | 252 | |
310 | set_irq_chained_handler(pg->irq, pmic_irq_handler); | 253 | retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg); |
254 | if (retval) { | ||
255 | printk(KERN_WARNING "pmic: Interrupt request failed\n"); | ||
256 | goto err; | ||
257 | } | ||
258 | |||
311 | for (i = 0; i < 8; i++) { | 259 | for (i = 0; i < 8; i++) { |
312 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, | 260 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, |
313 | handle_simple_irq, "demux"); | 261 | handle_simple_irq, "demux"); |
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index 1fe0f1feff71..865ef78d6f1a 100644 --- a/drivers/platform/x86/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c | |||
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \ | |||
162 | return -EINVAL; \ | 162 | return -EINVAL; \ |
163 | return count; \ | 163 | return count; \ |
164 | } \ | 164 | } \ |
165 | static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ | 165 | static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ |
166 | show_bool_##value, set_bool_##value); | 166 | show_bool_##value, set_bool_##value); |
167 | 167 | ||
168 | show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); | 168 | show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index dd599585c6a9..eb9922385ef8 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode) | |||
2275 | if (keycode != KEY_RESERVED) { | 2275 | if (keycode != KEY_RESERVED) { |
2276 | mutex_lock(&tpacpi_inputdev_send_mutex); | 2276 | mutex_lock(&tpacpi_inputdev_send_mutex); |
2277 | 2277 | ||
2278 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); | ||
2278 | input_report_key(tpacpi_inputdev, keycode, 1); | 2279 | input_report_key(tpacpi_inputdev, keycode, 1); |
2279 | if (keycode == KEY_UNKNOWN) | ||
2280 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, | ||
2281 | scancode); | ||
2282 | input_sync(tpacpi_inputdev); | 2280 | input_sync(tpacpi_inputdev); |
2283 | 2281 | ||
2282 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); | ||
2284 | input_report_key(tpacpi_inputdev, keycode, 0); | 2283 | input_report_key(tpacpi_inputdev, keycode, 0); |
2285 | if (keycode == KEY_UNKNOWN) | ||
2286 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, | ||
2287 | scancode); | ||
2288 | input_sync(tpacpi_inputdev); | 2284 | input_sync(tpacpi_inputdev); |
2289 | 2285 | ||
2290 | mutex_unlock(&tpacpi_inputdev_send_mutex); | 2286 | mutex_unlock(&tpacpi_inputdev_send_mutex); |
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index cba1b43f7519..a4e8eb9fece6 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c | |||
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, | |||
168 | { | 168 | { |
169 | unsigned long flags; | 169 | unsigned long flags; |
170 | int captured = 0; | 170 | int captured = 0; |
171 | struct pps_ktime ts_real; | 171 | struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 }; |
172 | 172 | ||
173 | /* check event type */ | 173 | /* check event type */ |
174 | BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); | 174 | BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); |
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 76b41853a877..1269fbd2deca 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c | |||
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj, | |||
77 | 77 | ||
78 | /* Several chips lock up trying to read undefined config space */ | 78 | /* Several chips lock up trying to read undefined config space */ |
79 | if (capable(CAP_SYS_ADMIN)) | 79 | if (capable(CAP_SYS_ADMIN)) |
80 | size = 0x200000; | 80 | size = RIO_MAINT_SPACE_SZ; |
81 | 81 | ||
82 | if (off > size) | 82 | if (off >= size) |
83 | return 0; | 83 | return 0; |
84 | if (off + count > size) { | 84 | if (off + count > size) { |
85 | size -= off; | 85 | size -= off; |
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj, | |||
147 | loff_t init_off = off; | 147 | loff_t init_off = off; |
148 | u8 *data = (u8 *) buf; | 148 | u8 *data = (u8 *) buf; |
149 | 149 | ||
150 | if (off > 0x200000) | 150 | if (off >= RIO_MAINT_SPACE_SZ) |
151 | return 0; | 151 | return 0; |
152 | if (off + count > 0x200000) { | 152 | if (off + count > RIO_MAINT_SPACE_SZ) { |
153 | size = 0x200000 - off; | 153 | size = RIO_MAINT_SPACE_SZ - off; |
154 | count = size; | 154 | count = size; |
155 | } | 155 | } |
156 | 156 | ||
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = { | |||
200 | .name = "config", | 200 | .name = "config", |
201 | .mode = S_IRUGO | S_IWUSR, | 201 | .mode = S_IRUGO | S_IWUSR, |
202 | }, | 202 | }, |
203 | .size = 0x200000, | 203 | .size = RIO_MAINT_SPACE_SZ, |
204 | .read = rio_read_config, | 204 | .read = rio_read_config, |
205 | .write = rio_write_config, | 205 | .write = rio_write_config, |
206 | }; | 206 | }; |
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index f53d31b950d4..2bb5de1f2421 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c | |||
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) | |||
174 | 174 | ||
175 | dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); | 175 | dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); |
176 | 176 | ||
177 | BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); | 177 | BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); |
178 | 178 | ||
179 | return mc13xxx_regulators[id].voltages[val]; | 179 | return mc13xxx_regulators[id].voltages[val]; |
180 | } | 180 | } |
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 8b0d2c4bde91..06df898842c0 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev) | |||
120 | return REGULATOR_MODE_IDLE; | 120 | return REGULATOR_MODE_IDLE; |
121 | default: | 121 | default: |
122 | BUG(); | 122 | BUG(); |
123 | return -EINVAL; | ||
123 | } | 124 | } |
124 | } | 125 | } |
125 | 126 | ||
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index cdd97192dc69..4941cade319f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -97,6 +97,18 @@ config RTC_INTF_DEV | |||
97 | 97 | ||
98 | If unsure, say Y. | 98 | If unsure, say Y. |
99 | 99 | ||
100 | config RTC_INTF_DEV_UIE_EMUL | ||
101 | bool "RTC UIE emulation on dev interface" | ||
102 | depends on RTC_INTF_DEV | ||
103 | help | ||
104 | Provides an emulation for RTC_UIE if the underlying rtc chip | ||
105 | driver does not expose RTC_UIE ioctls. Those requests generate | ||
106 | once-per-second update interrupts, used for synchronization. | ||
107 | |||
108 | The emulation code will read the time from the hardware | ||
109 | clock several times per second, please enable this option | ||
110 | only if you know that you really need it. | ||
111 | |||
100 | config RTC_DRV_TEST | 112 | config RTC_DRV_TEST |
101 | tristate "Test driver/device" | 113 | tristate "Test driver/device" |
102 | help | 114 | help |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c01967244d..cb2f0728fd70 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | if (err) | 211 | if (err) |
212 | return err; | 212 | /* nothing */; |
213 | 213 | else if (!rtc->ops) | |
214 | if (!rtc->ops) | ||
215 | err = -ENODEV; | 214 | err = -ENODEV; |
216 | else if (!rtc->ops->alarm_irq_enable) | 215 | else if (!rtc->ops->alarm_irq_enable) |
217 | err = -EINVAL; | 216 | err = -EINVAL; |
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
229 | if (err) | 228 | if (err) |
230 | return err; | 229 | return err; |
231 | 230 | ||
231 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
232 | if (enabled == 0 && rtc->uie_irq_active) { | ||
233 | mutex_unlock(&rtc->ops_lock); | ||
234 | return rtc_dev_update_irq_enable_emul(rtc, 0); | ||
235 | } | ||
236 | #endif | ||
232 | /* make sure we're changing state */ | 237 | /* make sure we're changing state */ |
233 | if (rtc->uie_rtctimer.enabled == enabled) | 238 | if (rtc->uie_rtctimer.enabled == enabled) |
234 | goto out; | 239 | goto out; |
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
248 | 253 | ||
249 | out: | 254 | out: |
250 | mutex_unlock(&rtc->ops_lock); | 255 | mutex_unlock(&rtc->ops_lock); |
256 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
257 | /* | ||
258 | * Enable emulation if the driver did not provide | ||
259 | * the update_irq_enable function pointer or if returned | ||
260 | * -EINVAL to signal that it has been configured without | ||
261 | * interrupts or that are not available at the moment. | ||
262 | */ | ||
263 | if (err == -EINVAL) | ||
264 | err = rtc_dev_update_irq_enable_emul(rtc, enabled); | ||
265 | #endif | ||
251 | return err; | 266 | return err; |
252 | 267 | ||
253 | } | 268 | } |
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable); | |||
263 | * | 278 | * |
264 | * Triggers the registered irq_task function callback. | 279 | * Triggers the registered irq_task function callback. |
265 | */ | 280 | */ |
266 | static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) | 281 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) |
267 | { | 282 | { |
268 | unsigned long flags; | 283 | unsigned long flags; |
269 | 284 | ||
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index b2752b6e7a2f..e725d51e773d 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c | |||
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
134 | return ret; | 134 | return ret; |
135 | } | 135 | } |
136 | 136 | ||
137 | static int at32_rtc_ioctl(struct device *dev, unsigned int cmd, | 137 | static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
138 | unsigned long arg) | ||
139 | { | 138 | { |
140 | struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); | 139 | struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); |
141 | int ret = 0; | 140 | int ret = 0; |
142 | 141 | ||
143 | spin_lock_irq(&rtc->lock); | 142 | spin_lock_irq(&rtc->lock); |
144 | 143 | ||
145 | switch (cmd) { | 144 | if(enabled) { |
146 | case RTC_AIE_ON: | ||
147 | if (rtc_readl(rtc, VAL) > rtc->alarm_time) { | 145 | if (rtc_readl(rtc, VAL) > rtc->alarm_time) { |
148 | ret = -EINVAL; | 146 | ret = -EINVAL; |
149 | break; | 147 | goto out; |
150 | } | 148 | } |
151 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | 149 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) |
152 | | RTC_BIT(CTRL_TOPEN)); | 150 | | RTC_BIT(CTRL_TOPEN)); |
153 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); | 151 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); |
154 | rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); | 152 | rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); |
155 | break; | 153 | } else { |
156 | case RTC_AIE_OFF: | ||
157 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | 154 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) |
158 | & ~RTC_BIT(CTRL_TOPEN)); | 155 | & ~RTC_BIT(CTRL_TOPEN)); |
159 | rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); | 156 | rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); |
160 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); | 157 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); |
161 | break; | ||
162 | default: | ||
163 | ret = -ENOIOCTLCMD; | ||
164 | break; | ||
165 | } | 158 | } |
166 | 159 | out: | |
167 | spin_unlock_irq(&rtc->lock); | 160 | spin_unlock_irq(&rtc->lock); |
168 | 161 | ||
169 | return ret; | 162 | return ret; |
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) | |||
195 | } | 188 | } |
196 | 189 | ||
197 | static struct rtc_class_ops at32_rtc_ops = { | 190 | static struct rtc_class_ops at32_rtc_ops = { |
198 | .ioctl = at32_rtc_ioctl, | ||
199 | .read_time = at32_rtc_readtime, | 191 | .read_time = at32_rtc_readtime, |
200 | .set_time = at32_rtc_settime, | 192 | .set_time = at32_rtc_settime, |
201 | .read_alarm = at32_rtc_readalarm, | 193 | .read_alarm = at32_rtc_readalarm, |
202 | .set_alarm = at32_rtc_setalarm, | 194 | .set_alarm = at32_rtc_setalarm, |
195 | .alarm_irq_enable = at32_rtc_alarm_irq_enable, | ||
203 | }; | 196 | }; |
204 | 197 | ||
205 | static int __init at32_rtc_probe(struct platform_device *pdev) | 198 | static int __init at32_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index bc8bbca9a2e2..26d1cf5d19ae 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
195 | 195 | ||
196 | /* important: scrub old status before enabling IRQs */ | 196 | /* important: scrub old status before enabling IRQs */ |
197 | switch (cmd) { | 197 | switch (cmd) { |
198 | case RTC_AIE_OFF: /* alarm off */ | ||
199 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); | ||
200 | break; | ||
201 | case RTC_AIE_ON: /* alarm on */ | ||
202 | at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | ||
203 | at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); | ||
204 | break; | ||
205 | case RTC_UIE_OFF: /* update off */ | 198 | case RTC_UIE_OFF: /* update off */ |
206 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); | 199 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); |
207 | break; | 200 | break; |
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
217 | return ret; | 210 | return ret; |
218 | } | 211 | } |
219 | 212 | ||
213 | static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
214 | { | ||
215 | pr_debug("%s(): cmd=%08x\n", __func__, enabled); | ||
216 | |||
217 | if (enabled) { | ||
218 | at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | ||
219 | at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); | ||
220 | } else | ||
221 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
220 | /* | 225 | /* |
221 | * Provide additional RTC information in /proc/driver/rtc | 226 | * Provide additional RTC information in /proc/driver/rtc |
222 | */ | 227 | */ |
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = { | |||
270 | .read_alarm = at91_rtc_readalarm, | 275 | .read_alarm = at91_rtc_readalarm, |
271 | .set_alarm = at91_rtc_setalarm, | 276 | .set_alarm = at91_rtc_setalarm, |
272 | .proc = at91_rtc_proc, | 277 | .proc = at91_rtc_proc, |
278 | .alarm_irq_enable = at91_rtc_alarm_irq_enable, | ||
273 | }; | 279 | }; |
274 | 280 | ||
275 | /* | 281 | /* |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index f677e0710ca1..5469c52cba3d 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
229 | dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); | 229 | dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); |
230 | 230 | ||
231 | switch (cmd) { | 231 | switch (cmd) { |
232 | case RTC_AIE_OFF: /* alarm off */ | ||
233 | rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); | ||
234 | break; | ||
235 | case RTC_AIE_ON: /* alarm on */ | ||
236 | rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); | ||
237 | break; | ||
238 | case RTC_UIE_OFF: /* update off */ | 232 | case RTC_UIE_OFF: /* update off */ |
239 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); | 233 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); |
240 | break; | 234 | break; |
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
249 | return ret; | 243 | return ret; |
250 | } | 244 | } |
251 | 245 | ||
246 | static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
247 | { | ||
248 | struct sam9_rtc *rtc = dev_get_drvdata(dev); | ||
249 | u32 mr = rtt_readl(rtc, MR); | ||
250 | |||
251 | dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr); | ||
252 | if (enabled) | ||
253 | rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); | ||
254 | else | ||
255 | rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
252 | /* | 259 | /* |
253 | * Provide additional RTC information in /proc/driver/rtc | 260 | * Provide additional RTC information in /proc/driver/rtc |
254 | */ | 261 | */ |
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = { | |||
302 | .read_alarm = at91_rtc_readalarm, | 309 | .read_alarm = at91_rtc_readalarm, |
303 | .set_alarm = at91_rtc_setalarm, | 310 | .set_alarm = at91_rtc_setalarm, |
304 | .proc = at91_rtc_proc, | 311 | .proc = at91_rtc_proc, |
312 | .alarm_irq_enable = at91_rtc_alarm_irq_enable, | ||
305 | }; | 313 | }; |
306 | 314 | ||
307 | /* | 315 | /* |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index b4b6087f2234..17971d93354d 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar | |||
259 | bfin_rtc_int_clear(~RTC_ISTAT_SEC); | 259 | bfin_rtc_int_clear(~RTC_ISTAT_SEC); |
260 | break; | 260 | break; |
261 | 261 | ||
262 | case RTC_AIE_ON: | ||
263 | dev_dbg_stamp(dev); | ||
264 | bfin_rtc_int_set_alarm(rtc); | ||
265 | break; | ||
266 | case RTC_AIE_OFF: | ||
267 | dev_dbg_stamp(dev); | ||
268 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | ||
269 | break; | ||
270 | |||
271 | default: | 262 | default: |
272 | dev_dbg_stamp(dev); | 263 | dev_dbg_stamp(dev); |
273 | ret = -ENOIOCTLCMD; | 264 | ret = -ENOIOCTLCMD; |
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar | |||
276 | return ret; | 267 | return ret; |
277 | } | 268 | } |
278 | 269 | ||
270 | static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
271 | { | ||
272 | struct bfin_rtc *rtc = dev_get_drvdata(dev); | ||
273 | |||
274 | dev_dbg_stamp(dev); | ||
275 | if (enabled) | ||
276 | bfin_rtc_int_set_alarm(rtc); | ||
277 | else | ||
278 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | ||
279 | } | ||
280 | |||
279 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) | 281 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) |
280 | { | 282 | { |
281 | struct bfin_rtc *rtc = dev_get_drvdata(dev); | 283 | struct bfin_rtc *rtc = dev_get_drvdata(dev); |
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = { | |||
362 | .read_alarm = bfin_rtc_read_alarm, | 364 | .read_alarm = bfin_rtc_read_alarm, |
363 | .set_alarm = bfin_rtc_set_alarm, | 365 | .set_alarm = bfin_rtc_set_alarm, |
364 | .proc = bfin_rtc_proc, | 366 | .proc = bfin_rtc_proc, |
367 | .alarm_irq_enable = bfin_rtc_alarm_irq_enable, | ||
365 | }; | 368 | }; |
366 | 369 | ||
367 | static int __devinit bfin_rtc_probe(struct platform_device *pdev) | 370 | static int __devinit bfin_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 212b16edafc0..d0e06edb14c5 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
46 | return err; | 46 | return err; |
47 | } | 47 | } |
48 | 48 | ||
49 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
50 | /* | ||
51 | * Routine to poll RTC seconds field for change as often as possible, | ||
52 | * after first RTC_UIE use timer to reduce polling | ||
53 | */ | ||
54 | static void rtc_uie_task(struct work_struct *work) | ||
55 | { | ||
56 | struct rtc_device *rtc = | ||
57 | container_of(work, struct rtc_device, uie_task); | ||
58 | struct rtc_time tm; | ||
59 | int num = 0; | ||
60 | int err; | ||
61 | |||
62 | err = rtc_read_time(rtc, &tm); | ||
63 | |||
64 | spin_lock_irq(&rtc->irq_lock); | ||
65 | if (rtc->stop_uie_polling || err) { | ||
66 | rtc->uie_task_active = 0; | ||
67 | } else if (rtc->oldsecs != tm.tm_sec) { | ||
68 | num = (tm.tm_sec + 60 - rtc->oldsecs) % 60; | ||
69 | rtc->oldsecs = tm.tm_sec; | ||
70 | rtc->uie_timer.expires = jiffies + HZ - (HZ/10); | ||
71 | rtc->uie_timer_active = 1; | ||
72 | rtc->uie_task_active = 0; | ||
73 | add_timer(&rtc->uie_timer); | ||
74 | } else if (schedule_work(&rtc->uie_task) == 0) { | ||
75 | rtc->uie_task_active = 0; | ||
76 | } | ||
77 | spin_unlock_irq(&rtc->irq_lock); | ||
78 | if (num) | ||
79 | rtc_handle_legacy_irq(rtc, num, RTC_UF); | ||
80 | } | ||
81 | static void rtc_uie_timer(unsigned long data) | ||
82 | { | ||
83 | struct rtc_device *rtc = (struct rtc_device *)data; | ||
84 | unsigned long flags; | ||
85 | |||
86 | spin_lock_irqsave(&rtc->irq_lock, flags); | ||
87 | rtc->uie_timer_active = 0; | ||
88 | rtc->uie_task_active = 1; | ||
89 | if ((schedule_work(&rtc->uie_task) == 0)) | ||
90 | rtc->uie_task_active = 0; | ||
91 | spin_unlock_irqrestore(&rtc->irq_lock, flags); | ||
92 | } | ||
93 | |||
94 | static int clear_uie(struct rtc_device *rtc) | ||
95 | { | ||
96 | spin_lock_irq(&rtc->irq_lock); | ||
97 | if (rtc->uie_irq_active) { | ||
98 | rtc->stop_uie_polling = 1; | ||
99 | if (rtc->uie_timer_active) { | ||
100 | spin_unlock_irq(&rtc->irq_lock); | ||
101 | del_timer_sync(&rtc->uie_timer); | ||
102 | spin_lock_irq(&rtc->irq_lock); | ||
103 | rtc->uie_timer_active = 0; | ||
104 | } | ||
105 | if (rtc->uie_task_active) { | ||
106 | spin_unlock_irq(&rtc->irq_lock); | ||
107 | flush_scheduled_work(); | ||
108 | spin_lock_irq(&rtc->irq_lock); | ||
109 | } | ||
110 | rtc->uie_irq_active = 0; | ||
111 | } | ||
112 | spin_unlock_irq(&rtc->irq_lock); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int set_uie(struct rtc_device *rtc) | ||
117 | { | ||
118 | struct rtc_time tm; | ||
119 | int err; | ||
120 | |||
121 | err = rtc_read_time(rtc, &tm); | ||
122 | if (err) | ||
123 | return err; | ||
124 | spin_lock_irq(&rtc->irq_lock); | ||
125 | if (!rtc->uie_irq_active) { | ||
126 | rtc->uie_irq_active = 1; | ||
127 | rtc->stop_uie_polling = 0; | ||
128 | rtc->oldsecs = tm.tm_sec; | ||
129 | rtc->uie_task_active = 1; | ||
130 | if (schedule_work(&rtc->uie_task) == 0) | ||
131 | rtc->uie_task_active = 0; | ||
132 | } | ||
133 | rtc->irq_data = 0; | ||
134 | spin_unlock_irq(&rtc->irq_lock); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled) | ||
139 | { | ||
140 | if (enabled) | ||
141 | return set_uie(rtc); | ||
142 | else | ||
143 | return clear_uie(rtc); | ||
144 | } | ||
145 | EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul); | ||
146 | |||
147 | #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */ | ||
49 | 148 | ||
50 | static ssize_t | 149 | static ssize_t |
51 | rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 150 | rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
@@ -154,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file, | |||
154 | if (err) | 253 | if (err) |
155 | goto done; | 254 | goto done; |
156 | 255 | ||
157 | /* try the driver's ioctl interface */ | 256 | /* |
158 | if (ops->ioctl) { | ||
159 | err = ops->ioctl(rtc->dev.parent, cmd, arg); | ||
160 | if (err != -ENOIOCTLCMD) { | ||
161 | mutex_unlock(&rtc->ops_lock); | ||
162 | return err; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* if the driver does not provide the ioctl interface | ||
167 | * or if that particular ioctl was not implemented | ||
168 | * (-ENOIOCTLCMD), we will try to emulate here. | ||
169 | * | ||
170 | * Drivers *SHOULD NOT* provide ioctl implementations | 257 | * Drivers *SHOULD NOT* provide ioctl implementations |
171 | * for these requests. Instead, provide methods to | 258 | * for these requests. Instead, provide methods to |
172 | * support the following code, so that the RTC's main | 259 | * support the following code, so that the RTC's main |
@@ -329,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file, | |||
329 | return err; | 416 | return err; |
330 | 417 | ||
331 | default: | 418 | default: |
332 | err = -ENOTTY; | 419 | /* Finally try the driver's ioctl interface */ |
420 | if (ops->ioctl) { | ||
421 | err = ops->ioctl(rtc->dev.parent, cmd, arg); | ||
422 | if (err == -ENOIOCTLCMD) | ||
423 | err = -ENOTTY; | ||
424 | } | ||
333 | break; | 425 | break; |
334 | } | 426 | } |
335 | 427 | ||
@@ -394,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc) | |||
394 | 486 | ||
395 | rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); | 487 | rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); |
396 | 488 | ||
489 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
490 | INIT_WORK(&rtc->uie_task, rtc_uie_task); | ||
491 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); | ||
492 | #endif | ||
493 | |||
397 | cdev_init(&rtc->char_dev, &rtc_dev_fops); | 494 | cdev_init(&rtc->char_dev, &rtc_dev_fops); |
398 | rtc->char_dev.owner = rtc->owner; | 495 | rtc->char_dev.owner = rtc->owner; |
399 | } | 496 | } |
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index bf430f9091ed..60ce69600828 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c | |||
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg) | |||
40 | __raw_writel(data, &priv->rtcregs[reg]); | 40 | __raw_writel(data, &priv->rtcregs[reg]); |
41 | } | 41 | } |
42 | 42 | ||
43 | |||
44 | static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
45 | { | ||
46 | struct ds1286_priv *priv = dev_get_drvdata(dev); | ||
47 | unsigned long flags; | ||
48 | unsigned char val; | ||
49 | |||
50 | /* Allow or mask alarm interrupts */ | ||
51 | spin_lock_irqsave(&priv->lock, flags); | ||
52 | val = ds1286_rtc_read(priv, RTC_CMD); | ||
53 | if (enabled) | ||
54 | val &= ~RTC_TDM; | ||
55 | else | ||
56 | val |= RTC_TDM; | ||
57 | ds1286_rtc_write(priv, val, RTC_CMD); | ||
58 | spin_unlock_irqrestore(&priv->lock, flags); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
43 | #ifdef CONFIG_RTC_INTF_DEV | 63 | #ifdef CONFIG_RTC_INTF_DEV |
44 | 64 | ||
45 | static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 65 | static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) |
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
49 | unsigned char val; | 69 | unsigned char val; |
50 | 70 | ||
51 | switch (cmd) { | 71 | switch (cmd) { |
52 | case RTC_AIE_OFF: | ||
53 | /* Mask alarm int. enab. bit */ | ||
54 | spin_lock_irqsave(&priv->lock, flags); | ||
55 | val = ds1286_rtc_read(priv, RTC_CMD); | ||
56 | val |= RTC_TDM; | ||
57 | ds1286_rtc_write(priv, val, RTC_CMD); | ||
58 | spin_unlock_irqrestore(&priv->lock, flags); | ||
59 | break; | ||
60 | case RTC_AIE_ON: | ||
61 | /* Allow alarm interrupts. */ | ||
62 | spin_lock_irqsave(&priv->lock, flags); | ||
63 | val = ds1286_rtc_read(priv, RTC_CMD); | ||
64 | val &= ~RTC_TDM; | ||
65 | ds1286_rtc_write(priv, val, RTC_CMD); | ||
66 | spin_unlock_irqrestore(&priv->lock, flags); | ||
67 | break; | ||
68 | case RTC_WIE_OFF: | 72 | case RTC_WIE_OFF: |
69 | /* Mask watchdog int. enab. bit */ | 73 | /* Mask watchdog int. enab. bit */ |
70 | spin_lock_irqsave(&priv->lock, flags); | 74 | spin_lock_irqsave(&priv->lock, flags); |
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
316 | } | 320 | } |
317 | 321 | ||
318 | static const struct rtc_class_ops ds1286_ops = { | 322 | static const struct rtc_class_ops ds1286_ops = { |
319 | .ioctl = ds1286_ioctl, | 323 | .ioctl = ds1286_ioctl, |
320 | .proc = ds1286_proc, | 324 | .proc = ds1286_proc, |
321 | .read_time = ds1286_read_time, | 325 | .read_time = ds1286_read_time, |
322 | .set_time = ds1286_set_time, | 326 | .set_time = ds1286_set_time, |
323 | .read_alarm = ds1286_read_alarm, | 327 | .read_alarm = ds1286_read_alarm, |
324 | .set_alarm = ds1286_set_alarm, | 328 | .set_alarm = ds1286_set_alarm, |
329 | .alarm_irq_enable = ds1286_alarm_irq_enable, | ||
325 | }; | 330 | }; |
326 | 331 | ||
327 | static int __devinit ds1286_probe(struct platform_device *pdev) | 332 | static int __devinit ds1286_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 077af1d7b9e4..57fbcc149ba7 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c | |||
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour) | |||
139 | * Interface to RTC framework | 139 | * Interface to RTC framework |
140 | */ | 140 | */ |
141 | 141 | ||
142 | #ifdef CONFIG_RTC_INTF_DEV | 142 | static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled) |
143 | |||
144 | /* | ||
145 | * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl) | ||
146 | */ | ||
147 | static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg) | ||
148 | { | 143 | { |
149 | struct ds1305 *ds1305 = dev_get_drvdata(dev); | 144 | struct ds1305 *ds1305 = dev_get_drvdata(dev); |
150 | u8 buf[2]; | 145 | u8 buf[2]; |
151 | int status = -ENOIOCTLCMD; | 146 | long err = -EINVAL; |
152 | 147 | ||
153 | buf[0] = DS1305_WRITE | DS1305_CONTROL; | 148 | buf[0] = DS1305_WRITE | DS1305_CONTROL; |
154 | buf[1] = ds1305->ctrl[0]; | 149 | buf[1] = ds1305->ctrl[0]; |
155 | 150 | ||
156 | switch (cmd) { | 151 | if (enabled) { |
157 | case RTC_AIE_OFF: | ||
158 | status = 0; | ||
159 | if (!(buf[1] & DS1305_AEI0)) | ||
160 | goto done; | ||
161 | buf[1] &= ~DS1305_AEI0; | ||
162 | break; | ||
163 | |||
164 | case RTC_AIE_ON: | ||
165 | status = 0; | ||
166 | if (ds1305->ctrl[0] & DS1305_AEI0) | 152 | if (ds1305->ctrl[0] & DS1305_AEI0) |
167 | goto done; | 153 | goto done; |
168 | buf[1] |= DS1305_AEI0; | 154 | buf[1] |= DS1305_AEI0; |
169 | break; | 155 | } else { |
170 | } | 156 | if (!(buf[1] & DS1305_AEI0)) |
171 | if (status == 0) { | 157 | goto done; |
172 | status = spi_write_then_read(ds1305->spi, buf, sizeof buf, | 158 | buf[1] &= ~DS1305_AEI0; |
173 | NULL, 0); | ||
174 | if (status >= 0) | ||
175 | ds1305->ctrl[0] = buf[1]; | ||
176 | } | 159 | } |
177 | 160 | err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0); | |
161 | if (err >= 0) | ||
162 | ds1305->ctrl[0] = buf[1]; | ||
178 | done: | 163 | done: |
179 | return status; | 164 | return err; |
165 | |||
180 | } | 166 | } |
181 | 167 | ||
182 | #else | ||
183 | #define ds1305_ioctl NULL | ||
184 | #endif | ||
185 | 168 | ||
186 | /* | 169 | /* |
187 | * Get/set of date and time is pretty normal. | 170 | * Get/set of date and time is pretty normal. |
@@ -460,12 +443,12 @@ done: | |||
460 | #endif | 443 | #endif |
461 | 444 | ||
462 | static const struct rtc_class_ops ds1305_ops = { | 445 | static const struct rtc_class_ops ds1305_ops = { |
463 | .ioctl = ds1305_ioctl, | ||
464 | .read_time = ds1305_get_time, | 446 | .read_time = ds1305_get_time, |
465 | .set_time = ds1305_set_time, | 447 | .set_time = ds1305_set_time, |
466 | .read_alarm = ds1305_get_alarm, | 448 | .read_alarm = ds1305_get_alarm, |
467 | .set_alarm = ds1305_set_alarm, | 449 | .set_alarm = ds1305_set_alarm, |
468 | .proc = ds1305_proc, | 450 | .proc = ds1305_proc, |
451 | .alarm_irq_enable = ds1305_alarm_irq_enable, | ||
469 | }; | 452 | }; |
470 | 453 | ||
471 | static void ds1305_work(struct work_struct *work) | 454 | static void ds1305_work(struct work_struct *work) |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 0d559b6416dd..4724ba3acf1a 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 498 | static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled) |
499 | { | 499 | { |
500 | struct i2c_client *client = to_i2c_client(dev); | 500 | struct i2c_client *client = to_i2c_client(dev); |
501 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | 501 | struct ds1307 *ds1307 = i2c_get_clientdata(client); |
502 | int ret; | 502 | int ret; |
503 | 503 | ||
504 | switch (cmd) { | 504 | if (!test_bit(HAS_ALARM, &ds1307->flags)) |
505 | case RTC_AIE_OFF: | 505 | return -ENOTTY; |
506 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
507 | return -ENOTTY; | ||
508 | |||
509 | ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); | ||
510 | if (ret < 0) | ||
511 | return ret; | ||
512 | |||
513 | ret &= ~DS1337_BIT_A1IE; | ||
514 | |||
515 | ret = i2c_smbus_write_byte_data(client, | ||
516 | DS1337_REG_CONTROL, ret); | ||
517 | if (ret < 0) | ||
518 | return ret; | ||
519 | |||
520 | break; | ||
521 | |||
522 | case RTC_AIE_ON: | ||
523 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
524 | return -ENOTTY; | ||
525 | 506 | ||
526 | ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); | 507 | ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); |
527 | if (ret < 0) | 508 | if (ret < 0) |
528 | return ret; | 509 | return ret; |
529 | 510 | ||
511 | if (enabled) | ||
530 | ret |= DS1337_BIT_A1IE; | 512 | ret |= DS1337_BIT_A1IE; |
513 | else | ||
514 | ret &= ~DS1337_BIT_A1IE; | ||
531 | 515 | ||
532 | ret = i2c_smbus_write_byte_data(client, | 516 | ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret); |
533 | DS1337_REG_CONTROL, ret); | 517 | if (ret < 0) |
534 | if (ret < 0) | 518 | return ret; |
535 | return ret; | ||
536 | |||
537 | break; | ||
538 | |||
539 | default: | ||
540 | return -ENOIOCTLCMD; | ||
541 | } | ||
542 | 519 | ||
543 | return 0; | 520 | return 0; |
544 | } | 521 | } |
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { | |||
548 | .set_time = ds1307_set_time, | 525 | .set_time = ds1307_set_time, |
549 | .read_alarm = ds1337_read_alarm, | 526 | .read_alarm = ds1337_read_alarm, |
550 | .set_alarm = ds1337_set_alarm, | 527 | .set_alarm = ds1337_set_alarm, |
551 | .ioctl = ds1307_ioctl, | 528 | .alarm_irq_enable = ds1307_alarm_irq_enable, |
552 | }; | 529 | }; |
553 | 530 | ||
554 | /*----------------------------------------------------------------------*/ | 531 | /*----------------------------------------------------------------------*/ |
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 47fb6357c346..d834a63ec4b0 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c | |||
@@ -307,42 +307,25 @@ unlock: | |||
307 | mutex_unlock(&ds1374->mutex); | 307 | mutex_unlock(&ds1374->mutex); |
308 | } | 308 | } |
309 | 309 | ||
310 | static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 310 | static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled) |
311 | { | 311 | { |
312 | struct i2c_client *client = to_i2c_client(dev); | 312 | struct i2c_client *client = to_i2c_client(dev); |
313 | struct ds1374 *ds1374 = i2c_get_clientdata(client); | 313 | struct ds1374 *ds1374 = i2c_get_clientdata(client); |
314 | int ret = -ENOIOCTLCMD; | 314 | int ret; |
315 | 315 | ||
316 | mutex_lock(&ds1374->mutex); | 316 | mutex_lock(&ds1374->mutex); |
317 | 317 | ||
318 | switch (cmd) { | 318 | ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); |
319 | case RTC_AIE_OFF: | 319 | if (ret < 0) |
320 | ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); | 320 | goto out; |
321 | if (ret < 0) | ||
322 | goto out; | ||
323 | |||
324 | ret &= ~DS1374_REG_CR_WACE; | ||
325 | |||
326 | ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); | ||
327 | if (ret < 0) | ||
328 | goto out; | ||
329 | |||
330 | break; | ||
331 | |||
332 | case RTC_AIE_ON: | ||
333 | ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); | ||
334 | if (ret < 0) | ||
335 | goto out; | ||
336 | 321 | ||
322 | if (enabled) { | ||
337 | ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; | 323 | ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; |
338 | ret &= ~DS1374_REG_CR_WDALM; | 324 | ret &= ~DS1374_REG_CR_WDALM; |
339 | 325 | } else { | |
340 | ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); | 326 | ret &= ~DS1374_REG_CR_WACE; |
341 | if (ret < 0) | ||
342 | goto out; | ||
343 | |||
344 | break; | ||
345 | } | 327 | } |
328 | ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); | ||
346 | 329 | ||
347 | out: | 330 | out: |
348 | mutex_unlock(&ds1374->mutex); | 331 | mutex_unlock(&ds1374->mutex); |
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = { | |||
354 | .set_time = ds1374_set_time, | 337 | .set_time = ds1374_set_time, |
355 | .read_alarm = ds1374_read_alarm, | 338 | .read_alarm = ds1374_read_alarm, |
356 | .set_alarm = ds1374_set_alarm, | 339 | .set_alarm = ds1374_set_alarm, |
357 | .ioctl = ds1374_ioctl, | 340 | .alarm_irq_enable = ds1374_alarm_irq_enable, |
358 | }; | 341 | }; |
359 | 342 | ||
360 | static int ds1374_probe(struct i2c_client *client, | 343 | static int ds1374_probe(struct i2c_client *client, |
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 23a9ee19764c..950735415a7c 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C | 2 | * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C |
3 | * | 3 | * |
4 | * Copyright (C) 2009-2010 Freescale Semiconductor. | 4 | * Copyright (C) 2009-2011 Freescale Semiconductor. |
5 | * Author: Jack Lan <jack.lan@freescale.com> | 5 | * Author: Jack Lan <jack.lan@freescale.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time) | |||
141 | time->tm_hour = bcd2bin(hour); | 141 | time->tm_hour = bcd2bin(hour); |
142 | } | 142 | } |
143 | 143 | ||
144 | time->tm_wday = bcd2bin(week); | 144 | /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ |
145 | time->tm_wday = bcd2bin(week) - 1; | ||
145 | time->tm_mday = bcd2bin(day); | 146 | time->tm_mday = bcd2bin(day); |
146 | time->tm_mon = bcd2bin(month & 0x7F); | 147 | /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ |
148 | time->tm_mon = bcd2bin(month & 0x7F) - 1; | ||
147 | if (century) | 149 | if (century) |
148 | add_century = 100; | 150 | add_century = 100; |
149 | 151 | ||
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time) | |||
162 | buf[0] = bin2bcd(time->tm_sec); | 164 | buf[0] = bin2bcd(time->tm_sec); |
163 | buf[1] = bin2bcd(time->tm_min); | 165 | buf[1] = bin2bcd(time->tm_min); |
164 | buf[2] = bin2bcd(time->tm_hour); | 166 | buf[2] = bin2bcd(time->tm_hour); |
165 | buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ | 167 | /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ |
168 | buf[3] = bin2bcd(time->tm_wday + 1); | ||
166 | buf[4] = bin2bcd(time->tm_mday); /* Date */ | 169 | buf[4] = bin2bcd(time->tm_mday); /* Date */ |
167 | buf[5] = bin2bcd(time->tm_mon); | 170 | /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ |
171 | buf[5] = bin2bcd(time->tm_mon + 1); | ||
168 | if (time->tm_year >= 100) { | 172 | if (time->tm_year >= 100) { |
169 | buf[5] |= 0x80; | 173 | buf[5] |= 0x80; |
170 | buf[6] = bin2bcd(time->tm_year - 100); | 174 | buf[6] = bin2bcd(time->tm_year - 100); |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 5a8daa358066..69fe664a2228 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
213 | return m41t80_set_datetime(to_i2c_client(dev), tm); | 213 | return m41t80_set_datetime(to_i2c_client(dev), tm); |
214 | } | 214 | } |
215 | 215 | ||
216 | #if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) | 216 | static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
217 | static int | ||
218 | m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | ||
219 | { | 217 | { |
220 | struct i2c_client *client = to_i2c_client(dev); | 218 | struct i2c_client *client = to_i2c_client(dev); |
221 | int rc; | 219 | int rc; |
222 | 220 | ||
223 | switch (cmd) { | ||
224 | case RTC_AIE_OFF: | ||
225 | case RTC_AIE_ON: | ||
226 | break; | ||
227 | default: | ||
228 | return -ENOIOCTLCMD; | ||
229 | } | ||
230 | |||
231 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); | 221 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); |
232 | if (rc < 0) | 222 | if (rc < 0) |
233 | goto err; | 223 | goto err; |
234 | switch (cmd) { | 224 | |
235 | case RTC_AIE_OFF: | 225 | if (enabled) |
236 | rc &= ~M41T80_ALMON_AFE; | ||
237 | break; | ||
238 | case RTC_AIE_ON: | ||
239 | rc |= M41T80_ALMON_AFE; | 226 | rc |= M41T80_ALMON_AFE; |
240 | break; | 227 | else |
241 | } | 228 | rc &= ~M41T80_ALMON_AFE; |
229 | |||
242 | if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) | 230 | if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) |
243 | goto err; | 231 | goto err; |
232 | |||
244 | return 0; | 233 | return 0; |
245 | err: | 234 | err: |
246 | return -EIO; | 235 | return -EIO; |
247 | } | 236 | } |
248 | #else | ||
249 | #define m41t80_rtc_ioctl NULL | ||
250 | #endif | ||
251 | 237 | ||
252 | static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) | 238 | static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) |
253 | { | 239 | { |
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = { | |||
374 | .read_alarm = m41t80_rtc_read_alarm, | 360 | .read_alarm = m41t80_rtc_read_alarm, |
375 | .set_alarm = m41t80_rtc_set_alarm, | 361 | .set_alarm = m41t80_rtc_set_alarm, |
376 | .proc = m41t80_rtc_proc, | 362 | .proc = m41t80_rtc_proc, |
377 | .ioctl = m41t80_rtc_ioctl, | 363 | .alarm_irq_enable = m41t80_rtc_alarm_irq_enable, |
378 | }; | 364 | }; |
379 | 365 | ||
380 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) | 366 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) |
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index a99a0b554eb8..3978f4caf724 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c | |||
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
263 | /* | 263 | /* |
264 | * Handle commands from user-space | 264 | * Handle commands from user-space |
265 | */ | 265 | */ |
266 | static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd, | 266 | static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
267 | unsigned long arg) | ||
268 | { | 267 | { |
269 | struct platform_device *pdev = to_platform_device(dev); | 268 | struct platform_device *pdev = to_platform_device(dev); |
270 | struct m48t59_plat_data *pdata = pdev->dev.platform_data; | 269 | struct m48t59_plat_data *pdata = pdev->dev.platform_data; |
271 | struct m48t59_private *m48t59 = platform_get_drvdata(pdev); | 270 | struct m48t59_private *m48t59 = platform_get_drvdata(pdev); |
272 | unsigned long flags; | 271 | unsigned long flags; |
273 | int ret = 0; | ||
274 | 272 | ||
275 | spin_lock_irqsave(&m48t59->lock, flags); | 273 | spin_lock_irqsave(&m48t59->lock, flags); |
276 | switch (cmd) { | 274 | if (enabled) |
277 | case RTC_AIE_OFF: /* alarm interrupt off */ | ||
278 | M48T59_WRITE(0x00, M48T59_INTR); | ||
279 | break; | ||
280 | case RTC_AIE_ON: /* alarm interrupt on */ | ||
281 | M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); | 275 | M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); |
282 | break; | 276 | else |
283 | default: | 277 | M48T59_WRITE(0x00, M48T59_INTR); |
284 | ret = -ENOIOCTLCMD; | ||
285 | break; | ||
286 | } | ||
287 | spin_unlock_irqrestore(&m48t59->lock, flags); | 278 | spin_unlock_irqrestore(&m48t59->lock, flags); |
288 | 279 | ||
289 | return ret; | 280 | return 0; |
290 | } | 281 | } |
291 | 282 | ||
292 | static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) | 283 | static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) |
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id) | |||
330 | } | 321 | } |
331 | 322 | ||
332 | static const struct rtc_class_ops m48t59_rtc_ops = { | 323 | static const struct rtc_class_ops m48t59_rtc_ops = { |
333 | .ioctl = m48t59_rtc_ioctl, | ||
334 | .read_time = m48t59_rtc_read_time, | 324 | .read_time = m48t59_rtc_read_time, |
335 | .set_time = m48t59_rtc_set_time, | 325 | .set_time = m48t59_rtc_set_time, |
336 | .read_alarm = m48t59_rtc_readalarm, | 326 | .read_alarm = m48t59_rtc_readalarm, |
337 | .set_alarm = m48t59_rtc_setalarm, | 327 | .set_alarm = m48t59_rtc_setalarm, |
338 | .proc = m48t59_rtc_proc, | 328 | .proc = m48t59_rtc_proc, |
329 | .alarm_irq_enable = m48t59_rtc_alarm_irq_enable, | ||
339 | }; | 330 | }; |
340 | 331 | ||
341 | static const struct rtc_class_ops m48t02_rtc_ops = { | 332 | static const struct rtc_class_ops m48t02_rtc_ops = { |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index bcd0cf63eb16..1db62db8469d 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled) | |||
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | #if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) | ||
259 | |||
260 | /* Currently, the vRTC doesn't support UIE ON/OFF */ | 258 | /* Currently, the vRTC doesn't support UIE ON/OFF */ |
261 | static int | 259 | static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
262 | mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | ||
263 | { | 260 | { |
264 | struct mrst_rtc *mrst = dev_get_drvdata(dev); | 261 | struct mrst_rtc *mrst = dev_get_drvdata(dev); |
265 | unsigned long flags; | 262 | unsigned long flags; |
266 | 263 | ||
267 | switch (cmd) { | ||
268 | case RTC_AIE_OFF: | ||
269 | case RTC_AIE_ON: | ||
270 | if (!mrst->irq) | ||
271 | return -EINVAL; | ||
272 | break; | ||
273 | default: | ||
274 | /* PIE ON/OFF is handled by mrst_irq_set_state() */ | ||
275 | return -ENOIOCTLCMD; | ||
276 | } | ||
277 | |||
278 | spin_lock_irqsave(&rtc_lock, flags); | 264 | spin_lock_irqsave(&rtc_lock, flags); |
279 | switch (cmd) { | 265 | if (enabled) |
280 | case RTC_AIE_OFF: /* alarm off */ | ||
281 | mrst_irq_disable(mrst, RTC_AIE); | ||
282 | break; | ||
283 | case RTC_AIE_ON: /* alarm on */ | ||
284 | mrst_irq_enable(mrst, RTC_AIE); | 266 | mrst_irq_enable(mrst, RTC_AIE); |
285 | break; | 267 | else |
286 | } | 268 | mrst_irq_disable(mrst, RTC_AIE); |
287 | spin_unlock_irqrestore(&rtc_lock, flags); | 269 | spin_unlock_irqrestore(&rtc_lock, flags); |
288 | return 0; | 270 | return 0; |
289 | } | 271 | } |
290 | 272 | ||
291 | #else | ||
292 | #define mrst_rtc_ioctl NULL | ||
293 | #endif | ||
294 | 273 | ||
295 | #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) | 274 | #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) |
296 | 275 | ||
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq) | |||
317 | #endif | 296 | #endif |
318 | 297 | ||
319 | static const struct rtc_class_ops mrst_rtc_ops = { | 298 | static const struct rtc_class_ops mrst_rtc_ops = { |
320 | .ioctl = mrst_rtc_ioctl, | ||
321 | .read_time = mrst_read_time, | 299 | .read_time = mrst_read_time, |
322 | .set_time = mrst_set_time, | 300 | .set_time = mrst_set_time, |
323 | .read_alarm = mrst_read_alarm, | 301 | .read_alarm = mrst_read_alarm, |
324 | .set_alarm = mrst_set_alarm, | 302 | .set_alarm = mrst_set_alarm, |
325 | .proc = mrst_procfs, | 303 | .proc = mrst_procfs, |
326 | .irq_set_state = mrst_irq_set_state, | 304 | .irq_set_state = mrst_irq_set_state, |
305 | .alarm_irq_enable = mrst_rtc_alarm_irq_enable, | ||
327 | }; | 306 | }; |
328 | 307 | ||
329 | static struct mrst_rtc mrst_rtc; | 308 | static struct mrst_rtc mrst_rtc; |
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index b2fff0ca49f8..67820626e18f 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c | |||
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv, | |||
82 | static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, | 82 | static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, |
83 | unsigned int reg) | 83 | unsigned int reg) |
84 | { | 84 | { |
85 | return __raw_writel(val, &priv->regs[reg]); | 85 | __raw_writel(val, &priv->regs[reg]); |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, | 88 | static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, |
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c index bcca47298554..60627a764514 100644 --- a/drivers/rtc/rtc-mv.c +++ b/drivers/rtc/rtc-mv.c | |||
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int mv_rtc_ioctl(struct device *dev, unsigned int cmd, | 172 | static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
173 | unsigned long arg) | ||
174 | { | 173 | { |
175 | struct platform_device *pdev = to_platform_device(dev); | 174 | struct platform_device *pdev = to_platform_device(dev); |
176 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); | 175 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); |
177 | void __iomem *ioaddr = pdata->ioaddr; | 176 | void __iomem *ioaddr = pdata->ioaddr; |
178 | 177 | ||
179 | if (pdata->irq < 0) | 178 | if (pdata->irq < 0) |
180 | return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ | 179 | return -EINVAL; /* fall back into rtc-dev's emulation */ |
181 | switch (cmd) { | 180 | |
182 | case RTC_AIE_OFF: | 181 | if (enabled) |
183 | writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); | ||
184 | break; | ||
185 | case RTC_AIE_ON: | ||
186 | writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); | 182 | writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); |
187 | break; | 183 | else |
188 | default: | 184 | writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); |
189 | return -ENOIOCTLCMD; | ||
190 | } | ||
191 | return 0; | 185 | return 0; |
192 | } | 186 | } |
193 | 187 | ||
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = { | |||
216 | .set_time = mv_rtc_set_time, | 210 | .set_time = mv_rtc_set_time, |
217 | .read_alarm = mv_rtc_read_alarm, | 211 | .read_alarm = mv_rtc_read_alarm, |
218 | .set_alarm = mv_rtc_set_alarm, | 212 | .set_alarm = mv_rtc_set_alarm, |
219 | .ioctl = mv_rtc_ioctl, | 213 | .alarm_irq_enable = mv_rtc_alarm_irq_enable, |
220 | }; | 214 | }; |
221 | 215 | ||
222 | static int __devinit mv_rtc_probe(struct platform_device *pdev) | 216 | static int __devinit mv_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index e72b523c79a5..b4dbf3a319b3 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
143 | u8 reg; | 143 | u8 reg; |
144 | 144 | ||
145 | switch (cmd) { | 145 | switch (cmd) { |
146 | case RTC_AIE_OFF: | ||
147 | case RTC_AIE_ON: | ||
148 | case RTC_UIE_OFF: | 146 | case RTC_UIE_OFF: |
149 | case RTC_UIE_ON: | 147 | case RTC_UIE_ON: |
150 | break; | 148 | break; |
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
156 | rtc_wait_not_busy(); | 154 | rtc_wait_not_busy(); |
157 | reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); | 155 | reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); |
158 | switch (cmd) { | 156 | switch (cmd) { |
159 | /* AIE = Alarm Interrupt Enable */ | ||
160 | case RTC_AIE_OFF: | ||
161 | reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
162 | break; | ||
163 | case RTC_AIE_ON: | ||
164 | reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
165 | break; | ||
166 | /* UIE = Update Interrupt Enable (1/second) */ | 157 | /* UIE = Update Interrupt Enable (1/second) */ |
167 | case RTC_UIE_OFF: | 158 | case RTC_UIE_OFF: |
168 | reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; | 159 | reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; |
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
182 | #define omap_rtc_ioctl NULL | 173 | #define omap_rtc_ioctl NULL |
183 | #endif | 174 | #endif |
184 | 175 | ||
176 | static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
177 | { | ||
178 | u8 reg; | ||
179 | |||
180 | local_irq_disable(); | ||
181 | rtc_wait_not_busy(); | ||
182 | reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); | ||
183 | if (enabled) | ||
184 | reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
185 | else | ||
186 | reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
187 | rtc_wait_not_busy(); | ||
188 | rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); | ||
189 | local_irq_enable(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
185 | /* this hardware doesn't support "don't care" alarm fields */ | 194 | /* this hardware doesn't support "don't care" alarm fields */ |
186 | static int tm2bcd(struct rtc_time *tm) | 195 | static int tm2bcd(struct rtc_time *tm) |
187 | { | 196 | { |
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = { | |||
309 | .set_time = omap_rtc_set_time, | 318 | .set_time = omap_rtc_set_time, |
310 | .read_alarm = omap_rtc_read_alarm, | 319 | .read_alarm = omap_rtc_read_alarm, |
311 | .set_alarm = omap_rtc_set_alarm, | 320 | .set_alarm = omap_rtc_set_alarm, |
321 | .alarm_irq_enable = omap_rtc_alarm_irq_enable, | ||
312 | }; | 322 | }; |
313 | 323 | ||
314 | static int omap_rtc_alarm; | 324 | static int omap_rtc_alarm; |
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index c086fc30a84c..242bbf86c74a 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c | |||
@@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset) | |||
81 | 81 | ||
82 | static int rtc_proc_open(struct inode *inode, struct file *file) | 82 | static int rtc_proc_open(struct inode *inode, struct file *file) |
83 | { | 83 | { |
84 | int ret; | ||
84 | struct rtc_device *rtc = PDE(inode)->data; | 85 | struct rtc_device *rtc = PDE(inode)->data; |
85 | 86 | ||
86 | if (!try_module_get(THIS_MODULE)) | 87 | if (!try_module_get(THIS_MODULE)) |
87 | return -ENODEV; | 88 | return -ENODEV; |
88 | 89 | ||
89 | return single_open(file, rtc_proc_show, rtc); | 90 | ret = single_open(file, rtc_proc_show, rtc); |
91 | if (ret) | ||
92 | module_put(THIS_MODULE); | ||
93 | return ret; | ||
90 | } | 94 | } |
91 | 95 | ||
92 | static int rtc_proc_release(struct inode *inode, struct file *file) | 96 | static int rtc_proc_release(struct inode *inode, struct file *file) |
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 36eb66184461..694da39b6dd2 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c | |||
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv, | |||
76 | static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, | 76 | static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, |
77 | unsigned int reg) | 77 | unsigned int reg) |
78 | { | 78 | { |
79 | return __raw_writel(val, &priv->regs[reg]); | 79 | __raw_writel(val, &priv->regs[reg]); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void rp5c01_lock(struct rp5c01_priv *priv) | 82 | static void rp5c01_lock(struct rp5c01_priv *priv) |
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index dd14e202c2c8..6aaa1550e3b1 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c | |||
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
299 | if (rs5c->type == rtc_rs5c372a | 299 | if (rs5c->type == rtc_rs5c372a |
300 | && (buf & RS5C372A_CTRL1_SL1)) | 300 | && (buf & RS5C372A_CTRL1_SL1)) |
301 | return -ENOIOCTLCMD; | 301 | return -ENOIOCTLCMD; |
302 | case RTC_AIE_OFF: | ||
303 | case RTC_AIE_ON: | ||
304 | /* these irq management calls only make sense for chips | ||
305 | * which are wired up to an IRQ. | ||
306 | */ | ||
307 | if (!rs5c->has_irq) | ||
308 | return -ENOIOCTLCMD; | ||
309 | break; | ||
310 | default: | 302 | default: |
311 | return -ENOIOCTLCMD; | 303 | return -ENOIOCTLCMD; |
312 | } | 304 | } |
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
317 | 309 | ||
318 | addr = RS5C_ADDR(RS5C_REG_CTRL1); | 310 | addr = RS5C_ADDR(RS5C_REG_CTRL1); |
319 | switch (cmd) { | 311 | switch (cmd) { |
320 | case RTC_AIE_OFF: /* alarm off */ | ||
321 | buf &= ~RS5C_CTRL1_AALE; | ||
322 | break; | ||
323 | case RTC_AIE_ON: /* alarm on */ | ||
324 | buf |= RS5C_CTRL1_AALE; | ||
325 | break; | ||
326 | case RTC_UIE_OFF: /* update off */ | 312 | case RTC_UIE_OFF: /* update off */ |
327 | buf &= ~RS5C_CTRL1_CT_MASK; | 313 | buf &= ~RS5C_CTRL1_CT_MASK; |
328 | break; | 314 | break; |
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
347 | #endif | 333 | #endif |
348 | 334 | ||
349 | 335 | ||
336 | static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
337 | { | ||
338 | struct i2c_client *client = to_i2c_client(dev); | ||
339 | struct rs5c372 *rs5c = i2c_get_clientdata(client); | ||
340 | unsigned char buf; | ||
341 | int status, addr; | ||
342 | |||
343 | buf = rs5c->regs[RS5C_REG_CTRL1]; | ||
344 | |||
345 | if (!rs5c->has_irq) | ||
346 | return -EINVAL; | ||
347 | |||
348 | status = rs5c_get_regs(rs5c); | ||
349 | if (status < 0) | ||
350 | return status; | ||
351 | |||
352 | addr = RS5C_ADDR(RS5C_REG_CTRL1); | ||
353 | if (enabled) | ||
354 | buf |= RS5C_CTRL1_AALE; | ||
355 | else | ||
356 | buf &= ~RS5C_CTRL1_AALE; | ||
357 | |||
358 | if (i2c_smbus_write_byte_data(client, addr, buf) < 0) { | ||
359 | printk(KERN_WARNING "%s: can't update alarm\n", | ||
360 | rs5c->rtc->name); | ||
361 | status = -EIO; | ||
362 | } else | ||
363 | rs5c->regs[RS5C_REG_CTRL1] = buf; | ||
364 | |||
365 | return status; | ||
366 | } | ||
367 | |||
368 | |||
350 | /* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, | 369 | /* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, |
351 | * which only exposes a polled programming interface; and since | 370 | * which only exposes a polled programming interface; and since |
352 | * these calls map directly to those EFI requests; we don't demand | 371 | * these calls map directly to those EFI requests; we don't demand |
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = { | |||
466 | .set_time = rs5c372_rtc_set_time, | 485 | .set_time = rs5c372_rtc_set_time, |
467 | .read_alarm = rs5c_read_alarm, | 486 | .read_alarm = rs5c_read_alarm, |
468 | .set_alarm = rs5c_set_alarm, | 487 | .set_alarm = rs5c_set_alarm, |
488 | .alarm_irq_enable = rs5c_rtc_alarm_irq_enable, | ||
469 | }; | 489 | }; |
470 | 490 | ||
471 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) | 491 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) |
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 88ea52b8647a..5dfe5ffcb0d3 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c | |||
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
314 | unsigned long arg) | 314 | unsigned long arg) |
315 | { | 315 | { |
316 | switch (cmd) { | 316 | switch (cmd) { |
317 | case RTC_AIE_OFF: | ||
318 | spin_lock_irq(&sa1100_rtc_lock); | ||
319 | RTSR &= ~RTSR_ALE; | ||
320 | spin_unlock_irq(&sa1100_rtc_lock); | ||
321 | return 0; | ||
322 | case RTC_AIE_ON: | ||
323 | spin_lock_irq(&sa1100_rtc_lock); | ||
324 | RTSR |= RTSR_ALE; | ||
325 | spin_unlock_irq(&sa1100_rtc_lock); | ||
326 | return 0; | ||
327 | case RTC_UIE_OFF: | 317 | case RTC_UIE_OFF: |
328 | spin_lock_irq(&sa1100_rtc_lock); | 318 | spin_lock_irq(&sa1100_rtc_lock); |
329 | RTSR &= ~RTSR_HZE; | 319 | RTSR &= ~RTSR_HZE; |
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
338 | return -ENOIOCTLCMD; | 328 | return -ENOIOCTLCMD; |
339 | } | 329 | } |
340 | 330 | ||
331 | static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
332 | { | ||
333 | spin_lock_irq(&sa1100_rtc_lock); | ||
334 | if (enabled) | ||
335 | RTSR |= RTSR_ALE; | ||
336 | else | ||
337 | RTSR &= ~RTSR_ALE; | ||
338 | spin_unlock_irq(&sa1100_rtc_lock); | ||
339 | return 0; | ||
340 | } | ||
341 | |||
341 | static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) | 342 | static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) |
342 | { | 343 | { |
343 | rtc_time_to_tm(RCNR, tm); | 344 | rtc_time_to_tm(RCNR, tm); |
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = { | |||
410 | .proc = sa1100_rtc_proc, | 411 | .proc = sa1100_rtc_proc, |
411 | .irq_set_freq = sa1100_irq_set_freq, | 412 | .irq_set_freq = sa1100_irq_set_freq, |
412 | .irq_set_state = sa1100_irq_set_state, | 413 | .irq_set_state = sa1100_irq_set_state, |
414 | .alarm_irq_enable = sa1100_rtc_alarm_irq_enable, | ||
413 | }; | 415 | }; |
414 | 416 | ||
415 | static int sa1100_rtc_probe(struct platform_device *pdev) | 417 | static int sa1100_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 06e41ed93230..93314a9e7fa9 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
350 | unsigned int ret = 0; | 350 | unsigned int ret = 0; |
351 | 351 | ||
352 | switch (cmd) { | 352 | switch (cmd) { |
353 | case RTC_AIE_OFF: | ||
354 | case RTC_AIE_ON: | ||
355 | sh_rtc_setaie(dev, cmd == RTC_AIE_ON); | ||
356 | break; | ||
357 | case RTC_UIE_OFF: | 353 | case RTC_UIE_OFF: |
358 | rtc->periodic_freq &= ~PF_OXS; | 354 | rtc->periodic_freq &= ~PF_OXS; |
359 | sh_rtc_setcie(dev, 0); | 355 | sh_rtc_setcie(dev, 0); |
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
369 | return ret; | 365 | return ret; |
370 | } | 366 | } |
371 | 367 | ||
368 | static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
369 | { | ||
370 | sh_rtc_setaie(dev, enabled); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
372 | static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) | 374 | static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) |
373 | { | 375 | { |
374 | struct platform_device *pdev = to_platform_device(dev); | 376 | struct platform_device *pdev = to_platform_device(dev); |
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = { | |||
604 | .irq_set_state = sh_rtc_irq_set_state, | 606 | .irq_set_state = sh_rtc_irq_set_state, |
605 | .irq_set_freq = sh_rtc_irq_set_freq, | 607 | .irq_set_freq = sh_rtc_irq_set_freq, |
606 | .proc = sh_rtc_proc, | 608 | .proc = sh_rtc_proc, |
609 | .alarm_irq_enable = sh_rtc_alarm_irq_enable, | ||
607 | }; | 610 | }; |
608 | 611 | ||
609 | static int __init sh_rtc_probe(struct platform_device *pdev) | 612 | static int __init sh_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index 51725f7755b0..a82d6fe97076 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c | |||
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq) | |||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int test_rtc_ioctl(struct device *dev, unsigned int cmd, | 53 | static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) |
54 | unsigned long arg) | ||
55 | { | 54 | { |
56 | /* We do support interrupts, they're generated | 55 | return 0; |
57 | * using the sysfs interface. | ||
58 | */ | ||
59 | switch (cmd) { | ||
60 | case RTC_PIE_ON: | ||
61 | case RTC_PIE_OFF: | ||
62 | case RTC_UIE_ON: | ||
63 | case RTC_UIE_OFF: | ||
64 | case RTC_AIE_ON: | ||
65 | case RTC_AIE_OFF: | ||
66 | return 0; | ||
67 | |||
68 | default: | ||
69 | return -ENOIOCTLCMD; | ||
70 | } | ||
71 | } | 56 | } |
72 | 57 | ||
73 | static const struct rtc_class_ops test_rtc_ops = { | 58 | static const struct rtc_class_ops test_rtc_ops = { |
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = { | |||
76 | .read_alarm = test_rtc_read_alarm, | 61 | .read_alarm = test_rtc_read_alarm, |
77 | .set_alarm = test_rtc_set_alarm, | 62 | .set_alarm = test_rtc_set_alarm, |
78 | .set_mmss = test_rtc_set_mmss, | 63 | .set_mmss = test_rtc_set_mmss, |
79 | .ioctl = test_rtc_ioctl, | 64 | .alarm_irq_enable = test_rtc_alarm_irq_enable, |
80 | }; | 65 | }; |
81 | 66 | ||
82 | static ssize_t test_irq_show(struct device *dev, | 67 | static ssize_t test_irq_show(struct device *dev, |
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index c3244244e8cf..769190ac6d11 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c | |||
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled) | |||
240 | static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 240 | static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) |
241 | { | 241 | { |
242 | switch (cmd) { | 242 | switch (cmd) { |
243 | case RTC_AIE_ON: | ||
244 | spin_lock_irq(&rtc_lock); | ||
245 | |||
246 | if (!alarm_enabled) { | ||
247 | enable_irq(aie_irq); | ||
248 | alarm_enabled = 1; | ||
249 | } | ||
250 | |||
251 | spin_unlock_irq(&rtc_lock); | ||
252 | break; | ||
253 | case RTC_AIE_OFF: | ||
254 | spin_lock_irq(&rtc_lock); | ||
255 | |||
256 | if (alarm_enabled) { | ||
257 | disable_irq(aie_irq); | ||
258 | alarm_enabled = 0; | ||
259 | } | ||
260 | |||
261 | spin_unlock_irq(&rtc_lock); | ||
262 | break; | ||
263 | case RTC_EPOCH_READ: | 243 | case RTC_EPOCH_READ: |
264 | return put_user(epoch, (unsigned long __user *)arg); | 244 | return put_user(epoch, (unsigned long __user *)arg); |
265 | case RTC_EPOCH_SET: | 245 | case RTC_EPOCH_SET: |
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long | |||
275 | return 0; | 255 | return 0; |
276 | } | 256 | } |
277 | 257 | ||
258 | static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
259 | { | ||
260 | spin_lock_irq(&rtc_lock); | ||
261 | if (enabled) { | ||
262 | if (!alarm_enabled) { | ||
263 | enable_irq(aie_irq); | ||
264 | alarm_enabled = 1; | ||
265 | } | ||
266 | } else { | ||
267 | if (alarm_enabled) { | ||
268 | disable_irq(aie_irq); | ||
269 | alarm_enabled = 0; | ||
270 | } | ||
271 | } | ||
272 | spin_unlock_irq(&rtc_lock); | ||
273 | return 0; | ||
274 | } | ||
275 | |||
278 | static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) | 276 | static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) |
279 | { | 277 | { |
280 | struct platform_device *pdev = (struct platform_device *)dev_id; | 278 | struct platform_device *pdev = (struct platform_device *)dev_id; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 318672d05563..a9fe23d5bd0f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline; | |||
72 | static struct ccw_device_id dasd_eckd_ids[] = { | 72 | static struct ccw_device_id dasd_eckd_ids[] = { |
73 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, | 73 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, |
74 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, | 74 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, |
75 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, | 75 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, |
76 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, | 76 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, |
77 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, | 77 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, |
78 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, | 78 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 44578b56ad0a..d3e58d763b43 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1561 | { | 1561 | { |
1562 | struct Scsi_Host *host = rport_to_shost(rport); | 1562 | struct Scsi_Host *host = rport_to_shost(rport); |
1563 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | 1563 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
1564 | unsigned long flags; | ||
1564 | 1565 | ||
1565 | if (!fcport) | 1566 | if (!fcport) |
1566 | return; | 1567 | return; |
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1573 | * Transport has effectively 'deleted' the rport, clear | 1574 | * Transport has effectively 'deleted' the rport, clear |
1574 | * all local references. | 1575 | * all local references. |
1575 | */ | 1576 | */ |
1576 | spin_lock_irq(host->host_lock); | 1577 | spin_lock_irqsave(host->host_lock, flags); |
1577 | fcport->rport = fcport->drport = NULL; | 1578 | fcport->rport = fcport->drport = NULL; |
1578 | *((fc_port_t **)rport->dd_data) = NULL; | 1579 | *((fc_port_t **)rport->dd_data) = NULL; |
1579 | spin_unlock_irq(host->host_lock); | 1580 | spin_unlock_irqrestore(host->host_lock, flags); |
1580 | 1581 | ||
1581 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) | 1582 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) |
1582 | return; | 1583 | return; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f948e1a73aec..d9479c3fe5f8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data) | |||
2505 | { | 2505 | { |
2506 | fc_port_t *fcport = data; | 2506 | fc_port_t *fcport = data; |
2507 | struct fc_rport *rport; | 2507 | struct fc_rport *rport; |
2508 | unsigned long flags; | ||
2508 | 2509 | ||
2509 | spin_lock_irq(fcport->vha->host->host_lock); | 2510 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2510 | rport = fcport->drport ? fcport->drport: fcport->rport; | 2511 | rport = fcport->drport ? fcport->drport: fcport->rport; |
2511 | fcport->drport = NULL; | 2512 | fcport->drport = NULL; |
2512 | spin_unlock_irq(fcport->vha->host->host_lock); | 2513 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2513 | if (rport) | 2514 | if (rport) |
2514 | fc_remote_port_delete(rport); | 2515 | fc_remote_port_delete(rport); |
2515 | } | 2516 | } |
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2879 | struct fc_rport_identifiers rport_ids; | 2880 | struct fc_rport_identifiers rport_ids; |
2880 | struct fc_rport *rport; | 2881 | struct fc_rport *rport; |
2881 | struct qla_hw_data *ha = vha->hw; | 2882 | struct qla_hw_data *ha = vha->hw; |
2883 | unsigned long flags; | ||
2882 | 2884 | ||
2883 | qla2x00_rport_del(fcport); | 2885 | qla2x00_rport_del(fcport); |
2884 | 2886 | ||
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2893 | "Unable to allocate fc remote port!\n"); | 2895 | "Unable to allocate fc remote port!\n"); |
2894 | return; | 2896 | return; |
2895 | } | 2897 | } |
2896 | spin_lock_irq(fcport->vha->host->host_lock); | 2898 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2897 | *((fc_port_t **)rport->dd_data) = fcport; | 2899 | *((fc_port_t **)rport->dd_data) = fcport; |
2898 | spin_unlock_irq(fcport->vha->host->host_lock); | 2900 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2899 | 2901 | ||
2900 | rport->supported_classes = fcport->supported_classes; | 2902 | rport->supported_classes = fcport->supported_classes; |
2901 | 2903 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1fb..f27724d76cf6 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) | |||
562 | } | 562 | } |
563 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 563 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
564 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 564 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
565 | atomic_read(&fcport->state) == FCS_DEVICE_LOST || | ||
566 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { | 565 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { |
567 | cmd->result = DID_NO_CONNECT << 16; | 566 | cmd->result = DID_NO_CONNECT << 16; |
568 | goto qc24_fail_command; | 567 | goto qc24_fail_command; |
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2513 | { | 2512 | { |
2514 | struct fc_rport *rport; | 2513 | struct fc_rport *rport; |
2515 | scsi_qla_host_t *base_vha; | 2514 | scsi_qla_host_t *base_vha; |
2515 | unsigned long flags; | ||
2516 | 2516 | ||
2517 | if (!fcport->rport) | 2517 | if (!fcport->rport) |
2518 | return; | 2518 | return; |
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2520 | rport = fcport->rport; | 2520 | rport = fcport->rport; |
2521 | if (defer) { | 2521 | if (defer) { |
2522 | base_vha = pci_get_drvdata(vha->hw->pdev); | 2522 | base_vha = pci_get_drvdata(vha->hw->pdev); |
2523 | spin_lock_irq(vha->host->host_lock); | 2523 | spin_lock_irqsave(vha->host->host_lock, flags); |
2524 | fcport->drport = rport; | 2524 | fcport->drport = rport; |
2525 | spin_unlock_irq(vha->host->host_lock); | 2525 | spin_unlock_irqrestore(vha->host->host_lock, flags); |
2526 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); | 2526 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); |
2527 | qla2xxx_wake_dpc(base_vha); | 2527 | qla2xxx_wake_dpc(base_vha); |
2528 | } else | 2528 | } else |
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data) | |||
3282 | 3282 | ||
3283 | set_user_nice(current, -20); | 3283 | set_user_nice(current, -20); |
3284 | 3284 | ||
3285 | set_current_state(TASK_INTERRUPTIBLE); | ||
3285 | while (!kthread_should_stop()) { | 3286 | while (!kthread_should_stop()) { |
3286 | DEBUG3(printk("qla2x00: DPC handler sleeping\n")); | 3287 | DEBUG3(printk("qla2x00: DPC handler sleeping\n")); |
3287 | 3288 | ||
3288 | set_current_state(TASK_INTERRUPTIBLE); | ||
3289 | schedule(); | 3289 | schedule(); |
3290 | __set_current_state(TASK_RUNNING); | 3290 | __set_current_state(TASK_RUNNING); |
3291 | 3291 | ||
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data) | |||
3454 | qla2x00_do_dpc_all_vps(base_vha); | 3454 | qla2x00_do_dpc_all_vps(base_vha); |
3455 | 3455 | ||
3456 | ha->dpc_active = 0; | 3456 | ha->dpc_active = 0; |
3457 | set_current_state(TASK_INTERRUPTIBLE); | ||
3457 | } /* End of while(1) */ | 3458 | } /* End of while(1) */ |
3459 | __set_current_state(TASK_RUNNING); | ||
3458 | 3460 | ||
3459 | DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); | 3461 | DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); |
3460 | 3462 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b310934efed..a6b2d72022fc 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd, | |||
1671 | unsigned long long lba, unsigned int num, int write) | 1671 | unsigned long long lba, unsigned int num, int write) |
1672 | { | 1672 | { |
1673 | int ret; | 1673 | int ret; |
1674 | unsigned int block, rest = 0; | 1674 | unsigned long long block, rest = 0; |
1675 | int (*func)(struct scsi_cmnd *, unsigned char *, int); | 1675 | int (*func)(struct scsi_cmnd *, unsigned char *, int); |
1676 | 1676 | ||
1677 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; | 1677 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; |
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c index 351d8a375b57..19752b09e155 100644 --- a/drivers/spi/pxa2xx_spi_pci.c +++ b/drivers/spi/pxa2xx_spi_pci.c | |||
@@ -7,10 +7,9 @@ | |||
7 | #include <linux/of_device.h> | 7 | #include <linux/of_device.h> |
8 | #include <linux/spi/pxa2xx_spi.h> | 8 | #include <linux/spi/pxa2xx_spi.h> |
9 | 9 | ||
10 | struct awesome_struct { | 10 | struct ce4100_info { |
11 | struct ssp_device ssp; | 11 | struct ssp_device ssp; |
12 | struct platform_device spi_pdev; | 12 | struct platform_device *spi_pdev; |
13 | struct pxa2xx_spi_master spi_pdata; | ||
14 | }; | 13 | }; |
15 | 14 | ||
16 | static DEFINE_MUTEX(ssp_lock); | 15 | static DEFINE_MUTEX(ssp_lock); |
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp) | |||
51 | } | 50 | } |
52 | EXPORT_SYMBOL_GPL(pxa_ssp_free); | 51 | EXPORT_SYMBOL_GPL(pxa_ssp_free); |
53 | 52 | ||
54 | static void plat_dev_release(struct device *dev) | ||
55 | { | ||
56 | struct awesome_struct *as = container_of(dev, | ||
57 | struct awesome_struct, spi_pdev.dev); | ||
58 | |||
59 | of_device_node_put(&as->spi_pdev.dev); | ||
60 | } | ||
61 | |||
62 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, | 53 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, |
63 | const struct pci_device_id *ent) | 54 | const struct pci_device_id *ent) |
64 | { | 55 | { |
65 | int ret; | 56 | int ret; |
66 | resource_size_t phys_beg; | 57 | resource_size_t phys_beg; |
67 | resource_size_t phys_len; | 58 | resource_size_t phys_len; |
68 | struct awesome_struct *spi_info; | 59 | struct ce4100_info *spi_info; |
69 | struct platform_device *pdev; | 60 | struct platform_device *pdev; |
70 | struct pxa2xx_spi_master *spi_pdata; | 61 | struct pxa2xx_spi_master spi_pdata; |
71 | struct ssp_device *ssp; | 62 | struct ssp_device *ssp; |
72 | 63 | ||
73 | ret = pci_enable_device(dev); | 64 | ret = pci_enable_device(dev); |
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, | |||
84 | return ret; | 75 | return ret; |
85 | } | 76 | } |
86 | 77 | ||
78 | pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); | ||
87 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); | 79 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); |
88 | if (!spi_info) { | 80 | if (!pdev || !spi_info ) { |
89 | ret = -ENOMEM; | 81 | ret = -ENOMEM; |
90 | goto err_kz; | 82 | goto err_nomem; |
91 | } | 83 | } |
92 | ssp = &spi_info->ssp; | 84 | memset(&spi_pdata, 0, sizeof(spi_pdata)); |
93 | pdev = &spi_info->spi_pdev; | 85 | spi_pdata.num_chipselect = dev->devfn; |
94 | spi_pdata = &spi_info->spi_pdata; | ||
95 | 86 | ||
96 | pdev->name = "pxa2xx-spi"; | 87 | ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); |
97 | pdev->id = dev->devfn; | 88 | if (ret) |
98 | pdev->dev.parent = &dev->dev; | 89 | goto err_nomem; |
99 | pdev->dev.platform_data = &spi_info->spi_pdata; | ||
100 | 90 | ||
91 | pdev->dev.parent = &dev->dev; | ||
101 | #ifdef CONFIG_OF | 92 | #ifdef CONFIG_OF |
102 | pdev->dev.of_node = dev->dev.of_node; | 93 | pdev->dev.of_node = dev->dev.of_node; |
103 | #endif | 94 | #endif |
104 | pdev->dev.release = plat_dev_release; | 95 | ssp = &spi_info->ssp; |
105 | |||
106 | spi_pdata->num_chipselect = dev->devfn; | ||
107 | |||
108 | ssp->phys_base = pci_resource_start(dev, 0); | 96 | ssp->phys_base = pci_resource_start(dev, 0); |
109 | ssp->mmio_base = ioremap(phys_beg, phys_len); | 97 | ssp->mmio_base = ioremap(phys_beg, phys_len); |
110 | if (!ssp->mmio_base) { | 98 | if (!ssp->mmio_base) { |
111 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); | 99 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); |
112 | ret = -EIO; | 100 | ret = -EIO; |
113 | goto err_remap; | 101 | goto err_nomem; |
114 | } | 102 | } |
115 | ssp->irq = dev->irq; | 103 | ssp->irq = dev->irq; |
116 | ssp->port_id = pdev->id; | 104 | ssp->port_id = pdev->id; |
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, | |||
122 | 110 | ||
123 | pci_set_drvdata(dev, spi_info); | 111 | pci_set_drvdata(dev, spi_info); |
124 | 112 | ||
125 | ret = platform_device_register(pdev); | 113 | ret = platform_device_add(pdev); |
126 | if (ret) | 114 | if (ret) |
127 | goto err_dev_add; | 115 | goto err_dev_add; |
128 | 116 | ||
@@ -135,27 +123,21 @@ err_dev_add: | |||
135 | mutex_unlock(&ssp_lock); | 123 | mutex_unlock(&ssp_lock); |
136 | iounmap(ssp->mmio_base); | 124 | iounmap(ssp->mmio_base); |
137 | 125 | ||
138 | err_remap: | 126 | err_nomem: |
139 | kfree(spi_info); | ||
140 | |||
141 | err_kz: | ||
142 | release_mem_region(phys_beg, phys_len); | 127 | release_mem_region(phys_beg, phys_len); |
143 | 128 | platform_device_put(pdev); | |
129 | kfree(spi_info); | ||
144 | return ret; | 130 | return ret; |
145 | } | 131 | } |
146 | 132 | ||
147 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) | 133 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) |
148 | { | 134 | { |
149 | struct awesome_struct *spi_info; | 135 | struct ce4100_info *spi_info; |
150 | struct platform_device *pdev; | ||
151 | struct ssp_device *ssp; | 136 | struct ssp_device *ssp; |
152 | 137 | ||
153 | spi_info = pci_get_drvdata(dev); | 138 | spi_info = pci_get_drvdata(dev); |
154 | |||
155 | ssp = &spi_info->ssp; | 139 | ssp = &spi_info->ssp; |
156 | pdev = &spi_info->spi_pdev; | 140 | platform_device_unregister(spi_info->spi_pdev); |
157 | |||
158 | platform_device_unregister(pdev); | ||
159 | 141 | ||
160 | iounmap(ssp->mmio_base); | 142 | iounmap(ssp->mmio_base); |
161 | release_mem_region(pci_resource_start(dev, 0), | 143 | release_mem_region(pci_resource_start(dev, 0), |
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev) | |||
171 | } | 153 | } |
172 | 154 | ||
173 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { | 155 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { |
174 | |||
175 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, | 156 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, |
176 | { }, | 157 | { }, |
177 | }; | 158 | }; |
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 56f60c8ea0ab..2c665fceaac7 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c | |||
@@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
509 | bytes_done = 0; | 509 | bytes_done = 0; |
510 | 510 | ||
511 | while (bytes_done < t->len) { | 511 | while (bytes_done < t->len) { |
512 | void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL; | ||
513 | const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL; | ||
512 | n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, | 514 | n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, |
513 | t->tx_buf + bytes_done, | 515 | tx_buf, |
514 | t->rx_buf + bytes_done, | 516 | rx_buf, |
515 | words, bits); | 517 | words, bits); |
516 | if (n < 0) | 518 | if (n < 0) |
517 | break; | 519 | break; |
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index c7345dbf43fa..f8533795ee7f 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c | |||
@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus, | |||
733 | 733 | ||
734 | /* Fetch the vendor specific tuples. */ | 734 | /* Fetch the vendor specific tuples. */ |
735 | res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, | 735 | res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, |
736 | ssb_pcmcia_do_get_invariants, sprom); | 736 | ssb_pcmcia_do_get_invariants, iv); |
737 | if ((res == 0) || (res == -ENOSPC)) | 737 | if ((res == 0) || (res == -ENOSPC)) |
738 | return 0; | 738 | return 0; |
739 | 739 | ||
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c index f1235884cc5d..cd8392badff0 100644 --- a/drivers/staging/brcm80211/sys/wl_mac80211.c +++ b/drivers/staging/brcm80211/sys/wl_mac80211.c | |||
@@ -263,9 +263,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan, | |||
263 | switch (type) { | 263 | switch (type) { |
264 | case NL80211_CHAN_HT20: | 264 | case NL80211_CHAN_HT20: |
265 | case NL80211_CHAN_NO_HT: | 265 | case NL80211_CHAN_NO_HT: |
266 | WL_LOCK(wl); | ||
267 | err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value); | 266 | err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value); |
268 | WL_UNLOCK(wl); | ||
269 | break; | 267 | break; |
270 | case NL80211_CHAN_HT40MINUS: | 268 | case NL80211_CHAN_HT40MINUS: |
271 | case NL80211_CHAN_HT40PLUS: | 269 | case NL80211_CHAN_HT40PLUS: |
@@ -285,6 +283,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed) | |||
285 | int err = 0; | 283 | int err = 0; |
286 | int new_int; | 284 | int new_int; |
287 | 285 | ||
286 | WL_LOCK(wl); | ||
288 | if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { | 287 | if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { |
289 | WL_NONE("%s: Setting listen interval to %d\n", | 288 | WL_NONE("%s: Setting listen interval to %d\n", |
290 | __func__, conf->listen_interval); | 289 | __func__, conf->listen_interval); |
@@ -341,6 +340,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed) | |||
341 | } | 340 | } |
342 | 341 | ||
343 | config_out: | 342 | config_out: |
343 | WL_UNLOCK(wl); | ||
344 | return err; | 344 | return err; |
345 | } | 345 | } |
346 | 346 | ||
@@ -459,13 +459,21 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) | |||
459 | 459 | ||
460 | static void wl_ops_sw_scan_start(struct ieee80211_hw *hw) | 460 | static void wl_ops_sw_scan_start(struct ieee80211_hw *hw) |
461 | { | 461 | { |
462 | struct wl_info *wl = hw->priv; | ||
462 | WL_NONE("Scan Start\n"); | 463 | WL_NONE("Scan Start\n"); |
464 | WL_LOCK(wl); | ||
465 | wlc_scan_start(wl->wlc); | ||
466 | WL_UNLOCK(wl); | ||
463 | return; | 467 | return; |
464 | } | 468 | } |
465 | 469 | ||
466 | static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw) | 470 | static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw) |
467 | { | 471 | { |
472 | struct wl_info *wl = hw->priv; | ||
468 | WL_NONE("Scan Complete\n"); | 473 | WL_NONE("Scan Complete\n"); |
474 | WL_LOCK(wl); | ||
475 | wlc_scan_stop(wl->wlc); | ||
476 | WL_UNLOCK(wl); | ||
469 | return; | 477 | return; |
470 | } | 478 | } |
471 | 479 | ||
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c index a1303863686c..e37e8058e2b8 100644 --- a/drivers/staging/brcm80211/sys/wlc_mac80211.c +++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c | |||
@@ -8461,3 +8461,16 @@ static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh, | |||
8461 | 8461 | ||
8462 | kfree(qi); | 8462 | kfree(qi); |
8463 | } | 8463 | } |
8464 | |||
8465 | /* | ||
8466 | * Flag 'scan in progress' to withold dynamic phy calibration | ||
8467 | */ | ||
8468 | void wlc_scan_start(struct wlc_info *wlc) | ||
8469 | { | ||
8470 | wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true); | ||
8471 | } | ||
8472 | |||
8473 | void wlc_scan_stop(struct wlc_info *wlc) | ||
8474 | { | ||
8475 | wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false); | ||
8476 | } | ||
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h index 146a6904a39b..aff413001b70 100644 --- a/drivers/staging/brcm80211/sys/wlc_pub.h +++ b/drivers/staging/brcm80211/sys/wlc_pub.h | |||
@@ -570,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc); | |||
570 | extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate); | 570 | extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate); |
571 | extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg); | 571 | extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg); |
572 | extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg); | 572 | extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg); |
573 | extern void wlc_scan_start(struct wlc_info *wlc); | ||
574 | extern void wlc_scan_stop(struct wlc_info *wlc); | ||
573 | 575 | ||
574 | static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name, | 576 | static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name, |
575 | uint *arg) | 577 | uint *arg) |
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index aad47326d6dc..1502d80f6f78 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig | |||
@@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO | |||
439 | config COMEDI_NI_ATMIO | 439 | config COMEDI_NI_ATMIO |
440 | tristate "NI AT-MIO E series ISA-PNP card support" | 440 | tristate "NI AT-MIO E series ISA-PNP card support" |
441 | depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON | 441 | depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON |
442 | select COMEDI_8255 | ||
442 | default N | 443 | default N |
443 | ---help--- | 444 | ---help--- |
444 | Enable support for National Instruments AT-MIO E series cards | 445 | Enable support for National Instruments AT-MIO E series cards |
@@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO | |||
1040 | config COMEDI_NI_PCIMIO | 1041 | config COMEDI_NI_PCIMIO |
1041 | tristate "NI PCI-MIO-E series and M series support" | 1042 | tristate "NI PCI-MIO-E series and M series support" |
1042 | depends on COMEDI_NI_TIO && COMEDI_NI_COMMON | 1043 | depends on COMEDI_NI_TIO && COMEDI_NI_COMMON |
1044 | select COMEDI_8255 | ||
1045 | select COMEDI_FC | ||
1043 | default N | 1046 | default N |
1044 | ---help--- | 1047 | ---help--- |
1045 | Enable support for National Instruments PCI-MIO-E series and M series | 1048 | Enable support for National Instruments PCI-MIO-E series and M series |
@@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS | |||
1164 | config COMEDI_NI_MIO_CS | 1167 | config COMEDI_NI_MIO_CS |
1165 | tristate "NI DAQCard E series PCMCIA support" | 1168 | tristate "NI DAQCard E series PCMCIA support" |
1166 | depends on COMEDI_NI_TIO && COMEDI_NI_COMMON | 1169 | depends on COMEDI_NI_TIO && COMEDI_NI_COMMON |
1170 | select COMEDI_8255 | ||
1167 | select COMEDI_FC | 1171 | select COMEDI_FC |
1168 | default N | 1172 | default N |
1169 | ---help--- | 1173 | ---help--- |
@@ -1268,7 +1272,6 @@ config COMEDI_MITE | |||
1268 | config COMEDI_NI_TIO | 1272 | config COMEDI_NI_TIO |
1269 | tristate "NI general purpose counter support" | 1273 | tristate "NI general purpose counter support" |
1270 | depends on COMEDI_MITE | 1274 | depends on COMEDI_MITE |
1271 | select COMEDI_8255 | ||
1272 | default N | 1275 | default N |
1273 | ---help--- | 1276 | ---help--- |
1274 | Enable support for National Instruments general purpose counters. | 1277 | Enable support for National Instruments general purpose counters. |
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c index cd25b241cc1f..fd274e9c7b78 100644 --- a/drivers/staging/comedi/drivers/mite.c +++ b/drivers/staging/comedi/drivers/mite.c | |||
@@ -61,8 +61,6 @@ | |||
61 | #define PCI_DAQ_SIZE 4096 | 61 | #define PCI_DAQ_SIZE 4096 |
62 | #define PCI_DAQ_SIZE_660X 8192 | 62 | #define PCI_DAQ_SIZE_660X 8192 |
63 | 63 | ||
64 | MODULE_LICENSE("GPL"); | ||
65 | |||
66 | struct mite_struct *mite_devices; | 64 | struct mite_struct *mite_devices; |
67 | EXPORT_SYMBOL(mite_devices); | 65 | EXPORT_SYMBOL(mite_devices); |
68 | 66 | ||
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 14e716e99a5c..54741c9e1af5 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c | |||
@@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void) | |||
527 | 527 | ||
528 | module_init(driver_ni6527_init_module); | 528 | module_init(driver_ni6527_init_module); |
529 | module_exit(driver_ni6527_cleanup_module); | 529 | module_exit(driver_ni6527_cleanup_module); |
530 | |||
531 | MODULE_AUTHOR("Comedi http://www.comedi.org"); | ||
532 | MODULE_DESCRIPTION("Comedi low-level driver"); | ||
533 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index 8b8e2aaf77fb..403fc0997d37 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c | |||
@@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void) | |||
871 | 871 | ||
872 | module_init(driver_ni_65xx_init_module); | 872 | module_init(driver_ni_65xx_init_module); |
873 | module_exit(driver_ni_65xx_cleanup_module); | 873 | module_exit(driver_ni_65xx_cleanup_module); |
874 | |||
875 | MODULE_AUTHOR("Comedi http://www.comedi.org"); | ||
876 | MODULE_DESCRIPTION("Comedi low-level driver"); | ||
877 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index 6612b085c4ef..ca2aeaa9449c 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c | |||
@@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev, | |||
1421 | }; | 1421 | }; |
1422 | return 0; | 1422 | return 0; |
1423 | } | 1423 | } |
1424 | |||
1425 | MODULE_AUTHOR("Comedi http://www.comedi.org"); | ||
1426 | MODULE_DESCRIPTION("Comedi low-level driver"); | ||
1427 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c index e9f034efdc6f..d8d91f90060e 100644 --- a/drivers/staging/comedi/drivers/ni_670x.c +++ b/drivers/staging/comedi/drivers/ni_670x.c | |||
@@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot) | |||
384 | mite_list_devices(); | 384 | mite_list_devices(); |
385 | return -EIO; | 385 | return -EIO; |
386 | } | 386 | } |
387 | |||
388 | MODULE_AUTHOR("Comedi http://www.comedi.org"); | ||
389 | MODULE_DESCRIPTION("Comedi low-level driver"); | ||
390 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index 84a15c34e484..005d2fe86ee4 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c | |||
@@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void) | |||
1354 | 1354 | ||
1355 | module_init(driver_pcidio_init_module); | 1355 | module_init(driver_pcidio_init_module); |
1356 | module_exit(driver_pcidio_cleanup_module); | 1356 | module_exit(driver_pcidio_cleanup_module); |
1357 | |||
1358 | MODULE_AUTHOR("Comedi http://www.comedi.org"); | ||
1359 | MODULE_DESCRIPTION("Comedi low-level driver"); | ||
1360 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c index 23a381247285..9148abdad074 100644 --- a/drivers/staging/comedi/drivers/ni_pcimio.c +++ b/drivers/staging/comedi/drivers/ni_pcimio.c | |||
@@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev, | |||
1853 | 1853 | ||
1854 | return 0; | 1854 | return 0; |
1855 | } | 1855 | } |
1856 | |||
1857 | MODULE_AUTHOR("Comedi http://www.comedi.org"); | ||
1858 | MODULE_DESCRIPTION("Comedi low-level driver"); | ||
1859 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 54706a16dc0a..b41c9640b72d 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c | |||
@@ -236,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, | |||
236 | if (status == 1) { | 236 | if (status == 1) { |
237 | netif_carrier_on(net); | 237 | netif_carrier_on(net); |
238 | netif_wake_queue(net); | 238 | netif_wake_queue(net); |
239 | netif_notify_peers(net); | ||
239 | } else { | 240 | } else { |
240 | netif_carrier_off(net); | 241 | netif_carrier_off(net); |
241 | netif_stop_queue(net); | 242 | netif_stop_queue(net); |
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c index e38e89df6e84..e2f6d6a3c850 100644 --- a/drivers/staging/intel_sst/intelmid_v2_control.c +++ b/drivers/staging/intel_sst/intelmid_v2_control.c | |||
@@ -874,7 +874,10 @@ static int nc_set_selected_input_dev(u8 value) | |||
874 | sc_access[3].reg_addr = 0x109; | 874 | sc_access[3].reg_addr = 0x109; |
875 | sc_access[3].mask = MASK6; | 875 | sc_access[3].mask = MASK6; |
876 | sc_access[3].value = 0x00; | 876 | sc_access[3].value = 0x00; |
877 | num_val = 4; | 877 | sc_access[4].reg_addr = 0x104; |
878 | sc_access[4].value = 0x3C; | ||
879 | sc_access[4].mask = 0xff; | ||
880 | num_val = 5; | ||
878 | break; | 881 | break; |
879 | default: | 882 | default: |
880 | return -EINVAL; | 883 | return -EINVAL; |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 5415712f01f8..4bd8cbdaee76 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio) | |||
227 | 227 | ||
228 | if (zram_test_flag(zram, index, ZRAM_ZERO)) { | 228 | if (zram_test_flag(zram, index, ZRAM_ZERO)) { |
229 | handle_zero_page(page); | 229 | handle_zero_page(page); |
230 | index++; | ||
230 | continue; | 231 | continue; |
231 | } | 232 | } |
232 | 233 | ||
@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio) | |||
235 | pr_debug("Read before write: sector=%lu, size=%u", | 236 | pr_debug("Read before write: sector=%lu, size=%u", |
236 | (ulong)(bio->bi_sector), bio->bi_size); | 237 | (ulong)(bio->bi_sector), bio->bi_size); |
237 | /* Do nothing */ | 238 | /* Do nothing */ |
239 | index++; | ||
238 | continue; | 240 | continue; |
239 | } | 241 | } |
240 | 242 | ||
241 | /* Page is stored uncompressed since it's incompressible */ | 243 | /* Page is stored uncompressed since it's incompressible */ |
242 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { | 244 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { |
243 | handle_uncompressed_page(zram, page, index); | 245 | handle_uncompressed_page(zram, page, index); |
246 | index++; | ||
244 | continue; | 247 | continue; |
245 | } | 248 | } |
246 | 249 | ||
@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio) | |||
320 | mutex_unlock(&zram->lock); | 323 | mutex_unlock(&zram->lock); |
321 | zram_stat_inc(&zram->stats.pages_zero); | 324 | zram_stat_inc(&zram->stats.pages_zero); |
322 | zram_set_flag(zram, index, ZRAM_ZERO); | 325 | zram_set_flag(zram, index, ZRAM_ZERO); |
326 | index++; | ||
323 | continue; | 327 | continue; |
324 | } | 328 | } |
325 | 329 | ||
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 5cfd70819f08..973bb190ef57 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \ | |||
13 | target_core_transport.o \ | 13 | target_core_transport.o \ |
14 | target_core_cdb.o \ | 14 | target_core_cdb.o \ |
15 | target_core_ua.o \ | 15 | target_core_ua.o \ |
16 | target_core_rd.o \ | 16 | target_core_rd.o |
17 | target_core_mib.o | ||
18 | 17 | ||
19 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | 18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o |
20 | 19 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2764510798b0..caf8dc18ee0a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/parser.h> | 37 | #include <linux/parser.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/proc_fs.h> | ||
41 | 40 | ||
42 | #include <target/target_core_base.h> | 41 | #include <target/target_core_base.h> |
43 | #include <target/target_core_device.h> | 42 | #include <target/target_core_device.h> |
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item) | |||
1971 | { | 1970 | { |
1972 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), | 1971 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), |
1973 | struct se_subsystem_dev, se_dev_group); | 1972 | struct se_subsystem_dev, se_dev_group); |
1974 | struct config_group *dev_cg; | 1973 | struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
1975 | 1974 | struct se_subsystem_api *t = hba->transport; | |
1976 | if (!(se_dev)) | 1975 | struct config_group *dev_cg = &se_dev->se_dev_group; |
1977 | return; | ||
1978 | 1976 | ||
1979 | dev_cg = &se_dev->se_dev_group; | ||
1980 | kfree(dev_cg->default_groups); | 1977 | kfree(dev_cg->default_groups); |
1978 | /* | ||
1979 | * This pointer will set when the storage is enabled with: | ||
1980 | *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | ||
1981 | */ | ||
1982 | if (se_dev->se_dev_ptr) { | ||
1983 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
1984 | "virtual_device() for se_dev_ptr: %p\n", | ||
1985 | se_dev->se_dev_ptr); | ||
1986 | |||
1987 | se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
1988 | } else { | ||
1989 | /* | ||
1990 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
1991 | */ | ||
1992 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
1993 | "device() for se_dev_su_ptr: %p\n", | ||
1994 | se_dev->se_dev_su_ptr); | ||
1995 | |||
1996 | t->free_device(se_dev->se_dev_su_ptr); | ||
1997 | } | ||
1998 | |||
1999 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2000 | "_dev_t: %p\n", se_dev); | ||
2001 | kfree(se_dev); | ||
1981 | } | 2002 | } |
1982 | 2003 | ||
1983 | static ssize_t target_core_dev_show(struct config_item *item, | 2004 | static ssize_t target_core_dev_show(struct config_item *item, |
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { | |||
2140 | NULL, | 2161 | NULL, |
2141 | }; | 2162 | }; |
2142 | 2163 | ||
2164 | static void target_core_alua_lu_gp_release(struct config_item *item) | ||
2165 | { | ||
2166 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | ||
2167 | struct t10_alua_lu_gp, lu_gp_group); | ||
2168 | |||
2169 | core_alua_free_lu_gp(lu_gp); | ||
2170 | } | ||
2171 | |||
2143 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { | 2172 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { |
2173 | .release = target_core_alua_lu_gp_release, | ||
2144 | .show_attribute = target_core_alua_lu_gp_attr_show, | 2174 | .show_attribute = target_core_alua_lu_gp_attr_show, |
2145 | .store_attribute = target_core_alua_lu_gp_attr_store, | 2175 | .store_attribute = target_core_alua_lu_gp_attr_store, |
2146 | }; | 2176 | }; |
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp( | |||
2191 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" | 2221 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" |
2192 | " Group: core/alua/lu_gps/%s, ID: %hu\n", | 2222 | " Group: core/alua/lu_gps/%s, ID: %hu\n", |
2193 | config_item_name(item), lu_gp->lu_gp_id); | 2223 | config_item_name(item), lu_gp->lu_gp_id); |
2194 | 2224 | /* | |
2225 | * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() | ||
2226 | * -> target_core_alua_lu_gp_release() | ||
2227 | */ | ||
2195 | config_item_put(item); | 2228 | config_item_put(item); |
2196 | core_alua_free_lu_gp(lu_gp); | ||
2197 | } | 2229 | } |
2198 | 2230 | ||
2199 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { | 2231 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { |
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { | |||
2549 | NULL, | 2581 | NULL, |
2550 | }; | 2582 | }; |
2551 | 2583 | ||
2584 | static void target_core_alua_tg_pt_gp_release(struct config_item *item) | ||
2585 | { | ||
2586 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | ||
2587 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | ||
2588 | |||
2589 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2590 | } | ||
2591 | |||
2552 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { | 2592 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { |
2593 | .release = target_core_alua_tg_pt_gp_release, | ||
2553 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, | 2594 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, |
2554 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, | 2595 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, |
2555 | }; | 2596 | }; |
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp( | |||
2602 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" | 2643 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" |
2603 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", | 2644 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", |
2604 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); | 2645 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); |
2605 | 2646 | /* | |
2647 | * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() | ||
2648 | * -> target_core_alua_tg_pt_gp_release(). | ||
2649 | */ | ||
2606 | config_item_put(item); | 2650 | config_item_put(item); |
2607 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2608 | } | 2651 | } |
2609 | 2652 | ||
2610 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { | 2653 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { |
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev( | |||
2771 | struct se_subsystem_api *t; | 2814 | struct se_subsystem_api *t; |
2772 | struct config_item *df_item; | 2815 | struct config_item *df_item; |
2773 | struct config_group *dev_cg, *tg_pt_gp_cg; | 2816 | struct config_group *dev_cg, *tg_pt_gp_cg; |
2774 | int i, ret; | 2817 | int i; |
2775 | 2818 | ||
2776 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | 2819 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
2777 | 2820 | ||
2778 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | 2821 | mutex_lock(&hba->hba_access_mutex); |
2779 | goto out; | ||
2780 | |||
2781 | t = hba->transport; | 2822 | t = hba->transport; |
2782 | 2823 | ||
2783 | spin_lock(&se_global->g_device_lock); | 2824 | spin_lock(&se_global->g_device_lock); |
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev( | |||
2791 | config_item_put(df_item); | 2832 | config_item_put(df_item); |
2792 | } | 2833 | } |
2793 | kfree(tg_pt_gp_cg->default_groups); | 2834 | kfree(tg_pt_gp_cg->default_groups); |
2794 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2835 | /* |
2836 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp | ||
2837 | * directly from target_core_alua_tg_pt_gp_release(). | ||
2838 | */ | ||
2795 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2839 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; |
2796 | 2840 | ||
2797 | dev_cg = &se_dev->se_dev_group; | 2841 | dev_cg = &se_dev->se_dev_group; |
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev( | |||
2800 | dev_cg->default_groups[i] = NULL; | 2844 | dev_cg->default_groups[i] = NULL; |
2801 | config_item_put(df_item); | 2845 | config_item_put(df_item); |
2802 | } | 2846 | } |
2803 | |||
2804 | config_item_put(item); | ||
2805 | /* | 2847 | /* |
2806 | * This pointer will set when the storage is enabled with: | 2848 | * The releasing of se_dev and associated se_dev->se_dev_ptr is done |
2807 | * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | 2849 | * from target_core_dev_item_ops->release() ->target_core_dev_release(). |
2808 | */ | 2850 | */ |
2809 | if (se_dev->se_dev_ptr) { | 2851 | config_item_put(item); |
2810 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
2811 | "virtual_device() for se_dev_ptr: %p\n", | ||
2812 | se_dev->se_dev_ptr); | ||
2813 | |||
2814 | ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
2815 | if (ret < 0) | ||
2816 | goto hba_out; | ||
2817 | } else { | ||
2818 | /* | ||
2819 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
2820 | */ | ||
2821 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
2822 | "device() for se_dev_su_ptr: %p\n", | ||
2823 | se_dev->se_dev_su_ptr); | ||
2824 | |||
2825 | t->free_device(se_dev->se_dev_su_ptr); | ||
2826 | } | ||
2827 | |||
2828 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2829 | "_dev_t: %p\n", se_dev); | ||
2830 | |||
2831 | hba_out: | ||
2832 | mutex_unlock(&hba->hba_access_mutex); | 2852 | mutex_unlock(&hba->hba_access_mutex); |
2833 | out: | ||
2834 | kfree(se_dev); | ||
2835 | } | 2853 | } |
2836 | 2854 | ||
2837 | static struct configfs_group_operations target_core_hba_group_ops = { | 2855 | static struct configfs_group_operations target_core_hba_group_ops = { |
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); | |||
2914 | 2932 | ||
2915 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); | 2933 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); |
2916 | 2934 | ||
2935 | static void target_core_hba_release(struct config_item *item) | ||
2936 | { | ||
2937 | struct se_hba *hba = container_of(to_config_group(item), | ||
2938 | struct se_hba, hba_group); | ||
2939 | core_delete_hba(hba); | ||
2940 | } | ||
2941 | |||
2917 | static struct configfs_attribute *target_core_hba_attrs[] = { | 2942 | static struct configfs_attribute *target_core_hba_attrs[] = { |
2918 | &target_core_hba_hba_info.attr, | 2943 | &target_core_hba_hba_info.attr, |
2919 | &target_core_hba_hba_mode.attr, | 2944 | &target_core_hba_hba_mode.attr, |
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = { | |||
2921 | }; | 2946 | }; |
2922 | 2947 | ||
2923 | static struct configfs_item_operations target_core_hba_item_ops = { | 2948 | static struct configfs_item_operations target_core_hba_item_ops = { |
2949 | .release = target_core_hba_release, | ||
2924 | .show_attribute = target_core_hba_attr_show, | 2950 | .show_attribute = target_core_hba_attr_show, |
2925 | .store_attribute = target_core_hba_attr_store, | 2951 | .store_attribute = target_core_hba_attr_store, |
2926 | }; | 2952 | }; |
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget( | |||
2997 | struct config_group *group, | 3023 | struct config_group *group, |
2998 | struct config_item *item) | 3024 | struct config_item *item) |
2999 | { | 3025 | { |
3000 | struct se_hba *hba = item_to_hba(item); | 3026 | /* |
3001 | 3027 | * core_delete_hba() is called from target_core_hba_item_ops->release() | |
3028 | * -> target_core_hba_release() | ||
3029 | */ | ||
3002 | config_item_put(item); | 3030 | config_item_put(item); |
3003 | core_delete_hba(hba); | ||
3004 | } | 3031 | } |
3005 | 3032 | ||
3006 | static struct configfs_group_operations target_core_group_ops = { | 3033 | static struct configfs_group_operations target_core_group_ops = { |
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void) | |||
3022 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 3049 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
3023 | struct config_group *lu_gp_cg = NULL; | 3050 | struct config_group *lu_gp_cg = NULL; |
3024 | struct configfs_subsystem *subsys; | 3051 | struct configfs_subsystem *subsys; |
3025 | struct proc_dir_entry *scsi_target_proc = NULL; | ||
3026 | struct t10_alua_lu_gp *lu_gp; | 3052 | struct t10_alua_lu_gp *lu_gp; |
3027 | int ret; | 3053 | int ret; |
3028 | 3054 | ||
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void) | |||
3128 | if (core_dev_setup_virtual_lun0() < 0) | 3154 | if (core_dev_setup_virtual_lun0() < 0) |
3129 | goto out; | 3155 | goto out; |
3130 | 3156 | ||
3131 | scsi_target_proc = proc_mkdir("scsi_target", 0); | ||
3132 | if (!(scsi_target_proc)) { | ||
3133 | printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); | ||
3134 | goto out; | ||
3135 | } | ||
3136 | ret = init_scsi_target_mib(); | ||
3137 | if (ret < 0) | ||
3138 | goto out; | ||
3139 | |||
3140 | return 0; | 3157 | return 0; |
3141 | 3158 | ||
3142 | out: | 3159 | out: |
3143 | configfs_unregister_subsystem(subsys); | 3160 | configfs_unregister_subsystem(subsys); |
3144 | if (scsi_target_proc) | ||
3145 | remove_proc_entry("scsi_target", 0); | ||
3146 | core_dev_release_virtual_lun0(); | 3161 | core_dev_release_virtual_lun0(); |
3147 | rd_module_exit(); | 3162 | rd_module_exit(); |
3148 | out_global: | 3163 | out_global: |
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void) | |||
3178 | config_item_put(item); | 3193 | config_item_put(item); |
3179 | } | 3194 | } |
3180 | kfree(lu_gp_cg->default_groups); | 3195 | kfree(lu_gp_cg->default_groups); |
3181 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3196 | lu_gp_cg->default_groups = NULL; |
3182 | se_global->default_lu_gp = NULL; | ||
3183 | 3197 | ||
3184 | alua_cg = &se_global->alua_group; | 3198 | alua_cg = &se_global->alua_group; |
3185 | for (i = 0; alua_cg->default_groups[i]; i++) { | 3199 | for (i = 0; alua_cg->default_groups[i]; i++) { |
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void) | |||
3188 | config_item_put(item); | 3202 | config_item_put(item); |
3189 | } | 3203 | } |
3190 | kfree(alua_cg->default_groups); | 3204 | kfree(alua_cg->default_groups); |
3205 | alua_cg->default_groups = NULL; | ||
3191 | 3206 | ||
3192 | hba_cg = &se_global->target_core_hbagroup; | 3207 | hba_cg = &se_global->target_core_hbagroup; |
3193 | for (i = 0; hba_cg->default_groups[i]; i++) { | 3208 | for (i = 0; hba_cg->default_groups[i]; i++) { |
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void) | |||
3196 | config_item_put(item); | 3211 | config_item_put(item); |
3197 | } | 3212 | } |
3198 | kfree(hba_cg->default_groups); | 3213 | kfree(hba_cg->default_groups); |
3199 | 3214 | hba_cg->default_groups = NULL; | |
3200 | for (i = 0; subsys->su_group.default_groups[i]; i++) { | 3215 | /* |
3201 | item = &subsys->su_group.default_groups[i]->cg_item; | 3216 | * We expect subsys->su_group.default_groups to be released |
3202 | subsys->su_group.default_groups[i] = NULL; | 3217 | * by configfs subsystem provider logic.. |
3203 | config_item_put(item); | 3218 | */ |
3204 | } | 3219 | configfs_unregister_subsystem(subsys); |
3205 | kfree(subsys->su_group.default_groups); | 3220 | kfree(subsys->su_group.default_groups); |
3206 | 3221 | ||
3207 | configfs_unregister_subsystem(subsys); | 3222 | core_alua_free_lu_gp(se_global->default_lu_gp); |
3223 | se_global->default_lu_gp = NULL; | ||
3224 | |||
3208 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" | 3225 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" |
3209 | " Infrastructure\n"); | 3226 | " Infrastructure\n"); |
3210 | 3227 | ||
3211 | remove_scsi_target_mib(); | ||
3212 | remove_proc_entry("scsi_target", 0); | ||
3213 | core_dev_release_virtual_lun0(); | 3228 | core_dev_release_virtual_lun0(); |
3214 | rd_module_exit(); | 3229 | rd_module_exit(); |
3215 | release_se_global(); | 3230 | release_se_global(); |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 317ce58d426d..5da051a07fa3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -373,11 +373,11 @@ int core_update_device_list_for_node( | |||
373 | /* | 373 | /* |
374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
375 | * that have not been explictly concerted to MappedLUNs -> | 375 | * that have not been explictly concerted to MappedLUNs -> |
376 | * struct se_lun_acl. | 376 | * struct se_lun_acl, but we remove deve->alua_port_list from |
377 | * port->sep_alua_list. This also means that active UAs and | ||
378 | * NodeACL context specific PR metadata for demo-mode | ||
379 | * MappedLUN *deve will be released below.. | ||
377 | */ | 380 | */ |
378 | if (!(deve->se_lun_acl)) | ||
379 | return 0; | ||
380 | |||
381 | spin_lock_bh(&port->sep_alua_lock); | 381 | spin_lock_bh(&port->sep_alua_lock); |
382 | list_del(&deve->alua_port_list); | 382 | list_del(&deve->alua_port_list); |
383 | spin_unlock_bh(&port->sep_alua_lock); | 383 | spin_unlock_bh(&port->sep_alua_lock); |
@@ -395,12 +395,14 @@ int core_update_device_list_for_node( | |||
395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | 395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" |
396 | " already set for demo mode -> explict" | 396 | " already set for demo mode -> explict" |
397 | " LUN ACL transition\n"); | 397 | " LUN ACL transition\n"); |
398 | spin_unlock_irq(&nacl->device_list_lock); | ||
398 | return -1; | 399 | return -1; |
399 | } | 400 | } |
400 | if (deve->se_lun != lun) { | 401 | if (deve->se_lun != lun) { |
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 402 | printk(KERN_ERR "struct se_dev_entry->se_lun does" |
402 | " match passed struct se_lun for demo mode" | 403 | " match passed struct se_lun for demo mode" |
403 | " -> explict LUN ACL transition\n"); | 404 | " -> explict LUN ACL transition\n"); |
405 | spin_unlock_irq(&nacl->device_list_lock); | ||
404 | return -1; | 406 | return -1; |
405 | } | 407 | } |
406 | deve->se_lun_acl = lun_acl; | 408 | deve->se_lun_acl = lun_acl; |
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev) | |||
865 | } | 867 | } |
866 | } | 868 | } |
867 | spin_unlock(&hba->device_lock); | 869 | spin_unlock(&hba->device_lock); |
868 | |||
869 | while (atomic_read(&hba->dev_mib_access_count)) | ||
870 | cpu_relax(); | ||
871 | } | 870 | } |
872 | 871 | ||
873 | int se_dev_check_online(struct se_device *dev) | 872 | int se_dev_check_online(struct se_device *dev) |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 32b148d7e261..b65d1c8e7740 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); | |||
214 | 214 | ||
215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); | 215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); |
216 | 216 | ||
217 | static void target_fabric_mappedlun_release(struct config_item *item) | ||
218 | { | ||
219 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
220 | struct se_lun_acl, se_lun_group); | ||
221 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
222 | |||
223 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | ||
224 | } | ||
225 | |||
217 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { | 226 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { |
218 | &target_fabric_mappedlun_write_protect.attr, | 227 | &target_fabric_mappedlun_write_protect.attr, |
219 | NULL, | 228 | NULL, |
220 | }; | 229 | }; |
221 | 230 | ||
222 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { | 231 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { |
232 | .release = target_fabric_mappedlun_release, | ||
223 | .show_attribute = target_fabric_mappedlun_attr_show, | 233 | .show_attribute = target_fabric_mappedlun_attr_show, |
224 | .store_attribute = target_fabric_mappedlun_attr_store, | 234 | .store_attribute = target_fabric_mappedlun_attr_store, |
225 | .allow_link = target_fabric_mappedlun_link, | 235 | .allow_link = target_fabric_mappedlun_link, |
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun( | |||
337 | struct config_group *group, | 347 | struct config_group *group, |
338 | struct config_item *item) | 348 | struct config_item *item) |
339 | { | 349 | { |
340 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
341 | struct se_lun_acl, se_lun_group); | ||
342 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
343 | |||
344 | config_item_put(item); | 350 | config_item_put(item); |
345 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | 351 | } |
352 | |||
353 | static void target_fabric_nacl_base_release(struct config_item *item) | ||
354 | { | ||
355 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | ||
356 | struct se_node_acl, acl_group); | ||
357 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
358 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
359 | |||
360 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
346 | } | 361 | } |
347 | 362 | ||
348 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { | 363 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { |
364 | .release = target_fabric_nacl_base_release, | ||
349 | .show_attribute = target_fabric_nacl_base_attr_show, | 365 | .show_attribute = target_fabric_nacl_base_attr_show, |
350 | .store_attribute = target_fabric_nacl_base_attr_store, | 366 | .store_attribute = target_fabric_nacl_base_attr_store, |
351 | }; | 367 | }; |
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl( | |||
404 | struct config_group *group, | 420 | struct config_group *group, |
405 | struct config_item *item) | 421 | struct config_item *item) |
406 | { | 422 | { |
407 | struct se_portal_group *se_tpg = container_of(group, | ||
408 | struct se_portal_group, tpg_acl_group); | ||
409 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
410 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | 423 | struct se_node_acl *se_nacl = container_of(to_config_group(item), |
411 | struct se_node_acl, acl_group); | 424 | struct se_node_acl, acl_group); |
412 | struct config_item *df_item; | 425 | struct config_item *df_item; |
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl( | |||
419 | nacl_cg->default_groups[i] = NULL; | 432 | nacl_cg->default_groups[i] = NULL; |
420 | config_item_put(df_item); | 433 | config_item_put(df_item); |
421 | } | 434 | } |
422 | 435 | /* | |
436 | * struct se_node_acl free is done in target_fabric_nacl_base_release() | ||
437 | */ | ||
423 | config_item_put(item); | 438 | config_item_put(item); |
424 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
425 | } | 439 | } |
426 | 440 | ||
427 | static struct configfs_group_operations target_fabric_nacl_group_ops = { | 441 | static struct configfs_group_operations target_fabric_nacl_group_ops = { |
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); | |||
437 | 451 | ||
438 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); | 452 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); |
439 | 453 | ||
454 | static void target_fabric_np_base_release(struct config_item *item) | ||
455 | { | ||
456 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
457 | struct se_tpg_np, tpg_np_group); | ||
458 | struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; | ||
459 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
460 | |||
461 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
462 | } | ||
463 | |||
440 | static struct configfs_item_operations target_fabric_np_base_item_ops = { | 464 | static struct configfs_item_operations target_fabric_np_base_item_ops = { |
465 | .release = target_fabric_np_base_release, | ||
441 | .show_attribute = target_fabric_np_base_attr_show, | 466 | .show_attribute = target_fabric_np_base_attr_show, |
442 | .store_attribute = target_fabric_np_base_attr_store, | 467 | .store_attribute = target_fabric_np_base_attr_store, |
443 | }; | 468 | }; |
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np( | |||
466 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) | 491 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) |
467 | return ERR_PTR(-EINVAL); | 492 | return ERR_PTR(-EINVAL); |
468 | 493 | ||
494 | se_tpg_np->tpg_np_parent = se_tpg; | ||
469 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, | 495 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, |
470 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); | 496 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); |
471 | 497 | ||
@@ -476,14 +502,10 @@ static void target_fabric_drop_np( | |||
476 | struct config_group *group, | 502 | struct config_group *group, |
477 | struct config_item *item) | 503 | struct config_item *item) |
478 | { | 504 | { |
479 | struct se_portal_group *se_tpg = container_of(group, | 505 | /* |
480 | struct se_portal_group, tpg_np_group); | 506 | * struct se_tpg_np is released via target_fabric_np_base_release() |
481 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 507 | */ |
482 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
483 | struct se_tpg_np, tpg_np_group); | ||
484 | |||
485 | config_item_put(item); | 508 | config_item_put(item); |
486 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
487 | } | 509 | } |
488 | 510 | ||
489 | static struct configfs_group_operations target_fabric_np_group_ops = { | 511 | static struct configfs_group_operations target_fabric_np_group_ops = { |
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); | |||
814 | */ | 836 | */ |
815 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); | 837 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); |
816 | 838 | ||
839 | static void target_fabric_tpg_release(struct config_item *item) | ||
840 | { | ||
841 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | ||
842 | struct se_portal_group, tpg_group); | ||
843 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; | ||
844 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
845 | |||
846 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
847 | } | ||
848 | |||
817 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | 849 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { |
850 | .release = target_fabric_tpg_release, | ||
818 | .show_attribute = target_fabric_tpg_attr_show, | 851 | .show_attribute = target_fabric_tpg_attr_show, |
819 | .store_attribute = target_fabric_tpg_attr_store, | 852 | .store_attribute = target_fabric_tpg_attr_store, |
820 | }; | 853 | }; |
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg( | |||
872 | struct config_group *group, | 905 | struct config_group *group, |
873 | struct config_item *item) | 906 | struct config_item *item) |
874 | { | 907 | { |
875 | struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); | ||
876 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
877 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | 908 | struct se_portal_group *se_tpg = container_of(to_config_group(item), |
878 | struct se_portal_group, tpg_group); | 909 | struct se_portal_group, tpg_group); |
879 | struct config_group *tpg_cg = &se_tpg->tpg_group; | 910 | struct config_group *tpg_cg = &se_tpg->tpg_group; |
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg( | |||
890 | } | 921 | } |
891 | 922 | ||
892 | config_item_put(item); | 923 | config_item_put(item); |
893 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
894 | } | 924 | } |
895 | 925 | ||
926 | static void target_fabric_release_wwn(struct config_item *item) | ||
927 | { | ||
928 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
929 | struct se_wwn, wwn_group); | ||
930 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
931 | |||
932 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
933 | } | ||
934 | |||
935 | static struct configfs_item_operations target_fabric_tpg_item_ops = { | ||
936 | .release = target_fabric_release_wwn, | ||
937 | }; | ||
938 | |||
896 | static struct configfs_group_operations target_fabric_tpg_group_ops = { | 939 | static struct configfs_group_operations target_fabric_tpg_group_ops = { |
897 | .make_group = target_fabric_make_tpg, | 940 | .make_group = target_fabric_make_tpg, |
898 | .drop_item = target_fabric_drop_tpg, | 941 | .drop_item = target_fabric_drop_tpg, |
899 | }; | 942 | }; |
900 | 943 | ||
901 | TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); | 944 | TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, |
945 | NULL); | ||
902 | 946 | ||
903 | /* End of tfc_tpg_cit */ | 947 | /* End of tfc_tpg_cit */ |
904 | 948 | ||
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn( | |||
932 | struct config_group *group, | 976 | struct config_group *group, |
933 | struct config_item *item) | 977 | struct config_item *item) |
934 | { | 978 | { |
935 | struct target_fabric_configfs *tf = container_of(group, | ||
936 | struct target_fabric_configfs, tf_group); | ||
937 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
938 | struct se_wwn, wwn_group); | ||
939 | |||
940 | config_item_put(item); | 979 | config_item_put(item); |
941 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
942 | } | 980 | } |
943 | 981 | ||
944 | static struct configfs_group_operations target_fabric_wwn_group_ops = { | 982 | static struct configfs_group_operations target_fabric_wwn_group_ops = { |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c6e0d757e76e..67f0c09983c8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice( | |||
154 | 154 | ||
155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, |
156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | 156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); |
157 | if (!(bd)) | 157 | if (IS_ERR(bd)) |
158 | goto failed; | 158 | goto failed; |
159 | /* | 159 | /* |
160 | * Setup the local scope queue_limits from struct request_queue->limits | 160 | * Setup the local scope queue_limits from struct request_queue->limits |
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p) | |||
220 | { | 220 | { |
221 | struct iblock_dev *ib_dev = p; | 221 | struct iblock_dev *ib_dev = p; |
222 | 222 | ||
223 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 223 | if (ib_dev->ibd_bd != NULL) |
224 | bioset_free(ib_dev->ibd_bio_set); | 224 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
225 | if (ib_dev->ibd_bio_set != NULL) | ||
226 | bioset_free(ib_dev->ibd_bio_set); | ||
225 | kfree(ib_dev); | 227 | kfree(ib_dev); |
226 | } | 228 | } |
227 | 229 | ||
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c deleted file mode 100644 index d5a48aa0d2d1..000000000000 --- a/drivers/target/target_core_mib.c +++ /dev/null | |||
@@ -1,1078 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_mib.c | ||
3 | * | ||
4 | * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. | ||
5 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
6 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
7 | * | ||
8 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
23 | * | ||
24 | ******************************************************************************/ | ||
25 | |||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/timer.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/version.h> | ||
33 | #include <generated/utsrelease.h> | ||
34 | #include <linux/utsname.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | #include <linux/seq_file.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <scsi/scsi.h> | ||
39 | #include <scsi/scsi_device.h> | ||
40 | #include <scsi/scsi_host.h> | ||
41 | |||
42 | #include <target/target_core_base.h> | ||
43 | #include <target/target_core_transport.h> | ||
44 | #include <target/target_core_fabric_ops.h> | ||
45 | #include <target/target_core_configfs.h> | ||
46 | |||
47 | #include "target_core_hba.h" | ||
48 | #include "target_core_mib.h" | ||
49 | |||
50 | /* SCSI mib table index */ | ||
51 | static struct scsi_index_table scsi_index_table; | ||
52 | |||
53 | #ifndef INITIAL_JIFFIES | ||
54 | #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) | ||
55 | #endif | ||
56 | |||
57 | /* SCSI Instance Table */ | ||
58 | #define SCSI_INST_SW_INDEX 1 | ||
59 | #define SCSI_TRANSPORT_INDEX 1 | ||
60 | |||
61 | #define NONE "None" | ||
62 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
63 | |||
64 | static inline int list_is_first(const struct list_head *list, | ||
65 | const struct list_head *head) | ||
66 | { | ||
67 | return list->prev == head; | ||
68 | } | ||
69 | |||
70 | static void *locate_hba_start( | ||
71 | struct seq_file *seq, | ||
72 | loff_t *pos) | ||
73 | { | ||
74 | spin_lock(&se_global->g_device_lock); | ||
75 | return seq_list_start(&se_global->g_se_dev_list, *pos); | ||
76 | } | ||
77 | |||
78 | static void *locate_hba_next( | ||
79 | struct seq_file *seq, | ||
80 | void *v, | ||
81 | loff_t *pos) | ||
82 | { | ||
83 | return seq_list_next(v, &se_global->g_se_dev_list, pos); | ||
84 | } | ||
85 | |||
86 | static void locate_hba_stop(struct seq_file *seq, void *v) | ||
87 | { | ||
88 | spin_unlock(&se_global->g_device_lock); | ||
89 | } | ||
90 | |||
91 | /**************************************************************************** | ||
92 | * SCSI MIB Tables | ||
93 | ****************************************************************************/ | ||
94 | |||
95 | /* | ||
96 | * SCSI Instance Table | ||
97 | */ | ||
98 | static void *scsi_inst_seq_start( | ||
99 | struct seq_file *seq, | ||
100 | loff_t *pos) | ||
101 | { | ||
102 | spin_lock(&se_global->hba_lock); | ||
103 | return seq_list_start(&se_global->g_hba_list, *pos); | ||
104 | } | ||
105 | |||
106 | static void *scsi_inst_seq_next( | ||
107 | struct seq_file *seq, | ||
108 | void *v, | ||
109 | loff_t *pos) | ||
110 | { | ||
111 | return seq_list_next(v, &se_global->g_hba_list, pos); | ||
112 | } | ||
113 | |||
114 | static void scsi_inst_seq_stop(struct seq_file *seq, void *v) | ||
115 | { | ||
116 | spin_unlock(&se_global->hba_lock); | ||
117 | } | ||
118 | |||
119 | static int scsi_inst_seq_show(struct seq_file *seq, void *v) | ||
120 | { | ||
121 | struct se_hba *hba = list_entry(v, struct se_hba, hba_list); | ||
122 | |||
123 | if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) | ||
124 | seq_puts(seq, "inst sw_indx\n"); | ||
125 | |||
126 | seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); | ||
127 | seq_printf(seq, "plugin: %s version: %s\n", | ||
128 | hba->transport->name, TARGET_CORE_VERSION); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static const struct seq_operations scsi_inst_seq_ops = { | ||
134 | .start = scsi_inst_seq_start, | ||
135 | .next = scsi_inst_seq_next, | ||
136 | .stop = scsi_inst_seq_stop, | ||
137 | .show = scsi_inst_seq_show | ||
138 | }; | ||
139 | |||
140 | static int scsi_inst_seq_open(struct inode *inode, struct file *file) | ||
141 | { | ||
142 | return seq_open(file, &scsi_inst_seq_ops); | ||
143 | } | ||
144 | |||
145 | static const struct file_operations scsi_inst_seq_fops = { | ||
146 | .owner = THIS_MODULE, | ||
147 | .open = scsi_inst_seq_open, | ||
148 | .read = seq_read, | ||
149 | .llseek = seq_lseek, | ||
150 | .release = seq_release, | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * SCSI Device Table | ||
155 | */ | ||
156 | static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
157 | { | ||
158 | return locate_hba_start(seq, pos); | ||
159 | } | ||
160 | |||
161 | static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
162 | { | ||
163 | return locate_hba_next(seq, v, pos); | ||
164 | } | ||
165 | |||
166 | static void scsi_dev_seq_stop(struct seq_file *seq, void *v) | ||
167 | { | ||
168 | locate_hba_stop(seq, v); | ||
169 | } | ||
170 | |||
171 | static int scsi_dev_seq_show(struct seq_file *seq, void *v) | ||
172 | { | ||
173 | struct se_hba *hba; | ||
174 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
175 | g_se_dev_list); | ||
176 | struct se_device *dev = se_dev->se_dev_ptr; | ||
177 | char str[28]; | ||
178 | int k; | ||
179 | |||
180 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
181 | seq_puts(seq, "inst indx role ports\n"); | ||
182 | |||
183 | if (!(dev)) | ||
184 | return 0; | ||
185 | |||
186 | hba = dev->se_hba; | ||
187 | if (!(hba)) { | ||
188 | /* Log error ? */ | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | seq_printf(seq, "%u %u %s %u\n", hba->hba_index, | ||
193 | dev->dev_index, "Target", dev->dev_port_count); | ||
194 | |||
195 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
196 | |||
197 | /* vendor */ | ||
198 | for (k = 0; k < 8; k++) | ||
199 | str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? | ||
200 | DEV_T10_WWN(dev)->vendor[k] : 0x20; | ||
201 | str[k] = 0x20; | ||
202 | |||
203 | /* model */ | ||
204 | for (k = 0; k < 16; k++) | ||
205 | str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? | ||
206 | DEV_T10_WWN(dev)->model[k] : 0x20; | ||
207 | str[k + 9] = 0; | ||
208 | |||
209 | seq_printf(seq, "dev_alias: %s\n", str); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static const struct seq_operations scsi_dev_seq_ops = { | ||
215 | .start = scsi_dev_seq_start, | ||
216 | .next = scsi_dev_seq_next, | ||
217 | .stop = scsi_dev_seq_stop, | ||
218 | .show = scsi_dev_seq_show | ||
219 | }; | ||
220 | |||
221 | static int scsi_dev_seq_open(struct inode *inode, struct file *file) | ||
222 | { | ||
223 | return seq_open(file, &scsi_dev_seq_ops); | ||
224 | } | ||
225 | |||
226 | static const struct file_operations scsi_dev_seq_fops = { | ||
227 | .owner = THIS_MODULE, | ||
228 | .open = scsi_dev_seq_open, | ||
229 | .read = seq_read, | ||
230 | .llseek = seq_lseek, | ||
231 | .release = seq_release, | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * SCSI Port Table | ||
236 | */ | ||
237 | static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
238 | { | ||
239 | return locate_hba_start(seq, pos); | ||
240 | } | ||
241 | |||
242 | static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
243 | { | ||
244 | return locate_hba_next(seq, v, pos); | ||
245 | } | ||
246 | |||
247 | static void scsi_port_seq_stop(struct seq_file *seq, void *v) | ||
248 | { | ||
249 | locate_hba_stop(seq, v); | ||
250 | } | ||
251 | |||
252 | static int scsi_port_seq_show(struct seq_file *seq, void *v) | ||
253 | { | ||
254 | struct se_hba *hba; | ||
255 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
256 | g_se_dev_list); | ||
257 | struct se_device *dev = se_dev->se_dev_ptr; | ||
258 | struct se_port *sep, *sep_tmp; | ||
259 | |||
260 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
261 | seq_puts(seq, "inst device indx role busy_count\n"); | ||
262 | |||
263 | if (!(dev)) | ||
264 | return 0; | ||
265 | |||
266 | hba = dev->se_hba; | ||
267 | if (!(hba)) { | ||
268 | /* Log error ? */ | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | /* FIXME: scsiPortBusyStatuses count */ | ||
273 | spin_lock(&dev->se_port_lock); | ||
274 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
275 | seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, | ||
276 | dev->dev_index, sep->sep_index, "Device", | ||
277 | dev->dev_index, 0); | ||
278 | } | ||
279 | spin_unlock(&dev->se_port_lock); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static const struct seq_operations scsi_port_seq_ops = { | ||
285 | .start = scsi_port_seq_start, | ||
286 | .next = scsi_port_seq_next, | ||
287 | .stop = scsi_port_seq_stop, | ||
288 | .show = scsi_port_seq_show | ||
289 | }; | ||
290 | |||
291 | static int scsi_port_seq_open(struct inode *inode, struct file *file) | ||
292 | { | ||
293 | return seq_open(file, &scsi_port_seq_ops); | ||
294 | } | ||
295 | |||
296 | static const struct file_operations scsi_port_seq_fops = { | ||
297 | .owner = THIS_MODULE, | ||
298 | .open = scsi_port_seq_open, | ||
299 | .read = seq_read, | ||
300 | .llseek = seq_lseek, | ||
301 | .release = seq_release, | ||
302 | }; | ||
303 | |||
304 | /* | ||
305 | * SCSI Transport Table | ||
306 | */ | ||
307 | static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) | ||
308 | { | ||
309 | return locate_hba_start(seq, pos); | ||
310 | } | ||
311 | |||
312 | static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
313 | { | ||
314 | return locate_hba_next(seq, v, pos); | ||
315 | } | ||
316 | |||
317 | static void scsi_transport_seq_stop(struct seq_file *seq, void *v) | ||
318 | { | ||
319 | locate_hba_stop(seq, v); | ||
320 | } | ||
321 | |||
322 | static int scsi_transport_seq_show(struct seq_file *seq, void *v) | ||
323 | { | ||
324 | struct se_hba *hba; | ||
325 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
326 | g_se_dev_list); | ||
327 | struct se_device *dev = se_dev->se_dev_ptr; | ||
328 | struct se_port *se, *se_tmp; | ||
329 | struct se_portal_group *tpg; | ||
330 | struct t10_wwn *wwn; | ||
331 | char buf[64]; | ||
332 | |||
333 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
334 | seq_puts(seq, "inst device indx dev_name\n"); | ||
335 | |||
336 | if (!(dev)) | ||
337 | return 0; | ||
338 | |||
339 | hba = dev->se_hba; | ||
340 | if (!(hba)) { | ||
341 | /* Log error ? */ | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | wwn = DEV_T10_WWN(dev); | ||
346 | |||
347 | spin_lock(&dev->se_port_lock); | ||
348 | list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { | ||
349 | tpg = se->sep_tpg; | ||
350 | sprintf(buf, "scsiTransport%s", | ||
351 | TPG_TFO(tpg)->get_fabric_name()); | ||
352 | |||
353 | seq_printf(seq, "%u %s %u %s+%s\n", | ||
354 | hba->hba_index, /* scsiTransportIndex */ | ||
355 | buf, /* scsiTransportType */ | ||
356 | (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? | ||
357 | TPG_TFO(tpg)->tpg_get_inst_index(tpg) : | ||
358 | 0, | ||
359 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
360 | (strlen(wwn->unit_serial)) ? | ||
361 | /* scsiTransportDevName */ | ||
362 | wwn->unit_serial : wwn->vendor); | ||
363 | } | ||
364 | spin_unlock(&dev->se_port_lock); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static const struct seq_operations scsi_transport_seq_ops = { | ||
370 | .start = scsi_transport_seq_start, | ||
371 | .next = scsi_transport_seq_next, | ||
372 | .stop = scsi_transport_seq_stop, | ||
373 | .show = scsi_transport_seq_show | ||
374 | }; | ||
375 | |||
376 | static int scsi_transport_seq_open(struct inode *inode, struct file *file) | ||
377 | { | ||
378 | return seq_open(file, &scsi_transport_seq_ops); | ||
379 | } | ||
380 | |||
381 | static const struct file_operations scsi_transport_seq_fops = { | ||
382 | .owner = THIS_MODULE, | ||
383 | .open = scsi_transport_seq_open, | ||
384 | .read = seq_read, | ||
385 | .llseek = seq_lseek, | ||
386 | .release = seq_release, | ||
387 | }; | ||
388 | |||
389 | /* | ||
390 | * SCSI Target Device Table | ||
391 | */ | ||
392 | static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
393 | { | ||
394 | return locate_hba_start(seq, pos); | ||
395 | } | ||
396 | |||
397 | static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
398 | { | ||
399 | return locate_hba_next(seq, v, pos); | ||
400 | } | ||
401 | |||
402 | static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) | ||
403 | { | ||
404 | locate_hba_stop(seq, v); | ||
405 | } | ||
406 | |||
407 | |||
408 | #define LU_COUNT 1 /* for now */ | ||
409 | static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) | ||
410 | { | ||
411 | struct se_hba *hba; | ||
412 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
413 | g_se_dev_list); | ||
414 | struct se_device *dev = se_dev->se_dev_ptr; | ||
415 | int non_accessible_lus = 0; | ||
416 | char status[16]; | ||
417 | |||
418 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
419 | seq_puts(seq, "inst indx num_LUs status non_access_LUs" | ||
420 | " resets\n"); | ||
421 | |||
422 | if (!(dev)) | ||
423 | return 0; | ||
424 | |||
425 | hba = dev->se_hba; | ||
426 | if (!(hba)) { | ||
427 | /* Log error ? */ | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | switch (dev->dev_status) { | ||
432 | case TRANSPORT_DEVICE_ACTIVATED: | ||
433 | strcpy(status, "activated"); | ||
434 | break; | ||
435 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
436 | strcpy(status, "deactivated"); | ||
437 | non_accessible_lus = 1; | ||
438 | break; | ||
439 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
440 | strcpy(status, "shutdown"); | ||
441 | non_accessible_lus = 1; | ||
442 | break; | ||
443 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
444 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
445 | strcpy(status, "offline"); | ||
446 | non_accessible_lus = 1; | ||
447 | break; | ||
448 | default: | ||
449 | sprintf(status, "unknown(%d)", dev->dev_status); | ||
450 | non_accessible_lus = 1; | ||
451 | } | ||
452 | |||
453 | seq_printf(seq, "%u %u %u %s %u %u\n", | ||
454 | hba->hba_index, dev->dev_index, LU_COUNT, | ||
455 | status, non_accessible_lus, dev->num_resets); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static const struct seq_operations scsi_tgt_dev_seq_ops = { | ||
461 | .start = scsi_tgt_dev_seq_start, | ||
462 | .next = scsi_tgt_dev_seq_next, | ||
463 | .stop = scsi_tgt_dev_seq_stop, | ||
464 | .show = scsi_tgt_dev_seq_show | ||
465 | }; | ||
466 | |||
467 | static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) | ||
468 | { | ||
469 | return seq_open(file, &scsi_tgt_dev_seq_ops); | ||
470 | } | ||
471 | |||
472 | static const struct file_operations scsi_tgt_dev_seq_fops = { | ||
473 | .owner = THIS_MODULE, | ||
474 | .open = scsi_tgt_dev_seq_open, | ||
475 | .read = seq_read, | ||
476 | .llseek = seq_lseek, | ||
477 | .release = seq_release, | ||
478 | }; | ||
479 | |||
480 | /* | ||
481 | * SCSI Target Port Table | ||
482 | */ | ||
483 | static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
484 | { | ||
485 | return locate_hba_start(seq, pos); | ||
486 | } | ||
487 | |||
488 | static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
489 | { | ||
490 | return locate_hba_next(seq, v, pos); | ||
491 | } | ||
492 | |||
493 | static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) | ||
494 | { | ||
495 | locate_hba_stop(seq, v); | ||
496 | } | ||
497 | |||
498 | static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) | ||
499 | { | ||
500 | struct se_hba *hba; | ||
501 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
502 | g_se_dev_list); | ||
503 | struct se_device *dev = se_dev->se_dev_ptr; | ||
504 | struct se_port *sep, *sep_tmp; | ||
505 | struct se_portal_group *tpg; | ||
506 | u32 rx_mbytes, tx_mbytes; | ||
507 | unsigned long long num_cmds; | ||
508 | char buf[64]; | ||
509 | |||
510 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
511 | seq_puts(seq, "inst device indx name port_index in_cmds" | ||
512 | " write_mbytes read_mbytes hs_in_cmds\n"); | ||
513 | |||
514 | if (!(dev)) | ||
515 | return 0; | ||
516 | |||
517 | hba = dev->se_hba; | ||
518 | if (!(hba)) { | ||
519 | /* Log error ? */ | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | spin_lock(&dev->se_port_lock); | ||
524 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
525 | tpg = sep->sep_tpg; | ||
526 | sprintf(buf, "%sPort#", | ||
527 | TPG_TFO(tpg)->get_fabric_name()); | ||
528 | |||
529 | seq_printf(seq, "%u %u %u %s%d %s%s%d ", | ||
530 | hba->hba_index, | ||
531 | dev->dev_index, | ||
532 | sep->sep_index, | ||
533 | buf, sep->sep_index, | ||
534 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | ||
535 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
536 | |||
537 | spin_lock(&sep->sep_lun->lun_sep_lock); | ||
538 | num_cmds = sep->sep_stats.cmd_pdus; | ||
539 | rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); | ||
540 | tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); | ||
541 | spin_unlock(&sep->sep_lun->lun_sep_lock); | ||
542 | |||
543 | seq_printf(seq, "%llu %u %u %u\n", num_cmds, | ||
544 | rx_mbytes, tx_mbytes, 0); | ||
545 | } | ||
546 | spin_unlock(&dev->se_port_lock); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static const struct seq_operations scsi_tgt_port_seq_ops = { | ||
552 | .start = scsi_tgt_port_seq_start, | ||
553 | .next = scsi_tgt_port_seq_next, | ||
554 | .stop = scsi_tgt_port_seq_stop, | ||
555 | .show = scsi_tgt_port_seq_show | ||
556 | }; | ||
557 | |||
558 | static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) | ||
559 | { | ||
560 | return seq_open(file, &scsi_tgt_port_seq_ops); | ||
561 | } | ||
562 | |||
563 | static const struct file_operations scsi_tgt_port_seq_fops = { | ||
564 | .owner = THIS_MODULE, | ||
565 | .open = scsi_tgt_port_seq_open, | ||
566 | .read = seq_read, | ||
567 | .llseek = seq_lseek, | ||
568 | .release = seq_release, | ||
569 | }; | ||
570 | |||
571 | /* | ||
572 | * SCSI Authorized Initiator Table: | ||
573 | * It contains the SCSI Initiators authorized to be attached to one of the | ||
574 | * local Target ports. | ||
575 | * Iterates through all active TPGs and extracts the info from the ACLs | ||
576 | */ | ||
577 | static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) | ||
578 | { | ||
579 | spin_lock_bh(&se_global->se_tpg_lock); | ||
580 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
581 | } | ||
582 | |||
583 | static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, | ||
584 | loff_t *pos) | ||
585 | { | ||
586 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
587 | } | ||
588 | |||
589 | static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) | ||
590 | { | ||
591 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
592 | } | ||
593 | |||
594 | static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) | ||
595 | { | ||
596 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
597 | se_tpg_list); | ||
598 | struct se_dev_entry *deve; | ||
599 | struct se_lun *lun; | ||
600 | struct se_node_acl *se_nacl; | ||
601 | int j; | ||
602 | |||
603 | if (list_is_first(&se_tpg->se_tpg_list, | ||
604 | &se_global->g_se_tpg_list)) | ||
605 | seq_puts(seq, "inst dev port indx dev_or_port intr_name " | ||
606 | "map_indx att_count num_cmds read_mbytes " | ||
607 | "write_mbytes hs_num_cmds creation_time row_status\n"); | ||
608 | |||
609 | if (!(se_tpg)) | ||
610 | return 0; | ||
611 | |||
612 | spin_lock(&se_tpg->acl_node_lock); | ||
613 | list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { | ||
614 | |||
615 | atomic_inc(&se_nacl->mib_ref_count); | ||
616 | smp_mb__after_atomic_inc(); | ||
617 | spin_unlock(&se_tpg->acl_node_lock); | ||
618 | |||
619 | spin_lock_irq(&se_nacl->device_list_lock); | ||
620 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
621 | deve = &se_nacl->device_list[j]; | ||
622 | if (!(deve->lun_flags & | ||
623 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
624 | (!deve->se_lun)) | ||
625 | continue; | ||
626 | lun = deve->se_lun; | ||
627 | if (!lun->lun_se_dev) | ||
628 | continue; | ||
629 | |||
630 | seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" | ||
631 | " %u %s\n", | ||
632 | /* scsiInstIndex */ | ||
633 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
634 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
635 | 0, | ||
636 | /* scsiDeviceIndex */ | ||
637 | lun->lun_se_dev->dev_index, | ||
638 | /* scsiAuthIntrTgtPortIndex */ | ||
639 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
640 | /* scsiAuthIntrIndex */ | ||
641 | se_nacl->acl_index, | ||
642 | /* scsiAuthIntrDevOrPort */ | ||
643 | 1, | ||
644 | /* scsiAuthIntrName */ | ||
645 | se_nacl->initiatorname[0] ? | ||
646 | se_nacl->initiatorname : NONE, | ||
647 | /* FIXME: scsiAuthIntrLunMapIndex */ | ||
648 | 0, | ||
649 | /* scsiAuthIntrAttachedTimes */ | ||
650 | deve->attach_count, | ||
651 | /* scsiAuthIntrOutCommands */ | ||
652 | deve->total_cmds, | ||
653 | /* scsiAuthIntrReadMegaBytes */ | ||
654 | (u32)(deve->read_bytes >> 20), | ||
655 | /* scsiAuthIntrWrittenMegaBytes */ | ||
656 | (u32)(deve->write_bytes >> 20), | ||
657 | /* FIXME: scsiAuthIntrHSOutCommands */ | ||
658 | 0, | ||
659 | /* scsiAuthIntrLastCreation */ | ||
660 | (u32)(((u32)deve->creation_time - | ||
661 | INITIAL_JIFFIES) * 100 / HZ), | ||
662 | /* FIXME: scsiAuthIntrRowStatus */ | ||
663 | "Ready"); | ||
664 | } | ||
665 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
666 | |||
667 | spin_lock(&se_tpg->acl_node_lock); | ||
668 | atomic_dec(&se_nacl->mib_ref_count); | ||
669 | smp_mb__after_atomic_dec(); | ||
670 | } | ||
671 | spin_unlock(&se_tpg->acl_node_lock); | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static const struct seq_operations scsi_auth_intr_seq_ops = { | ||
677 | .start = scsi_auth_intr_seq_start, | ||
678 | .next = scsi_auth_intr_seq_next, | ||
679 | .stop = scsi_auth_intr_seq_stop, | ||
680 | .show = scsi_auth_intr_seq_show | ||
681 | }; | ||
682 | |||
683 | static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) | ||
684 | { | ||
685 | return seq_open(file, &scsi_auth_intr_seq_ops); | ||
686 | } | ||
687 | |||
688 | static const struct file_operations scsi_auth_intr_seq_fops = { | ||
689 | .owner = THIS_MODULE, | ||
690 | .open = scsi_auth_intr_seq_open, | ||
691 | .read = seq_read, | ||
692 | .llseek = seq_lseek, | ||
693 | .release = seq_release, | ||
694 | }; | ||
695 | |||
696 | /* | ||
697 | * SCSI Attached Initiator Port Table: | ||
698 | * It lists the SCSI Initiators attached to one of the local Target ports. | ||
699 | * Iterates through all active TPGs and use active sessions from each TPG | ||
700 | * to list the info fo this table. | ||
701 | */ | ||
702 | static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
703 | { | ||
704 | spin_lock_bh(&se_global->se_tpg_lock); | ||
705 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
706 | } | ||
707 | |||
708 | static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, | ||
709 | loff_t *pos) | ||
710 | { | ||
711 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
712 | } | ||
713 | |||
714 | static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) | ||
715 | { | ||
716 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
717 | } | ||
718 | |||
719 | static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) | ||
720 | { | ||
721 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
722 | se_tpg_list); | ||
723 | struct se_dev_entry *deve; | ||
724 | struct se_lun *lun; | ||
725 | struct se_node_acl *se_nacl; | ||
726 | struct se_session *se_sess; | ||
727 | unsigned char buf[64]; | ||
728 | int j; | ||
729 | |||
730 | if (list_is_first(&se_tpg->se_tpg_list, | ||
731 | &se_global->g_se_tpg_list)) | ||
732 | seq_puts(seq, "inst dev port indx port_auth_indx port_name" | ||
733 | " port_ident\n"); | ||
734 | |||
735 | if (!(se_tpg)) | ||
736 | return 0; | ||
737 | |||
738 | spin_lock(&se_tpg->session_lock); | ||
739 | list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { | ||
740 | if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || | ||
741 | (!se_sess->se_node_acl) || | ||
742 | (!se_sess->se_node_acl->device_list)) | ||
743 | continue; | ||
744 | |||
745 | atomic_inc(&se_sess->mib_ref_count); | ||
746 | smp_mb__after_atomic_inc(); | ||
747 | se_nacl = se_sess->se_node_acl; | ||
748 | atomic_inc(&se_nacl->mib_ref_count); | ||
749 | smp_mb__after_atomic_inc(); | ||
750 | spin_unlock(&se_tpg->session_lock); | ||
751 | |||
752 | spin_lock_irq(&se_nacl->device_list_lock); | ||
753 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
754 | deve = &se_nacl->device_list[j]; | ||
755 | if (!(deve->lun_flags & | ||
756 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
757 | (!deve->se_lun)) | ||
758 | continue; | ||
759 | |||
760 | lun = deve->se_lun; | ||
761 | if (!lun->lun_se_dev) | ||
762 | continue; | ||
763 | |||
764 | memset(buf, 0, 64); | ||
765 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) | ||
766 | TPG_TFO(se_tpg)->sess_get_initiator_sid( | ||
767 | se_sess, (unsigned char *)&buf[0], 64); | ||
768 | |||
769 | seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", | ||
770 | /* scsiInstIndex */ | ||
771 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
772 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
773 | 0, | ||
774 | /* scsiDeviceIndex */ | ||
775 | lun->lun_se_dev->dev_index, | ||
776 | /* scsiPortIndex */ | ||
777 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
778 | /* scsiAttIntrPortIndex */ | ||
779 | (TPG_TFO(se_tpg)->sess_get_index != NULL) ? | ||
780 | TPG_TFO(se_tpg)->sess_get_index(se_sess) : | ||
781 | 0, | ||
782 | /* scsiAttIntrPortAuthIntrIdx */ | ||
783 | se_nacl->acl_index, | ||
784 | /* scsiAttIntrPortName */ | ||
785 | se_nacl->initiatorname[0] ? | ||
786 | se_nacl->initiatorname : NONE, | ||
787 | /* scsiAttIntrPortIdentifier */ | ||
788 | buf); | ||
789 | } | ||
790 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
791 | |||
792 | spin_lock(&se_tpg->session_lock); | ||
793 | atomic_dec(&se_nacl->mib_ref_count); | ||
794 | smp_mb__after_atomic_dec(); | ||
795 | atomic_dec(&se_sess->mib_ref_count); | ||
796 | smp_mb__after_atomic_dec(); | ||
797 | } | ||
798 | spin_unlock(&se_tpg->session_lock); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static const struct seq_operations scsi_att_intr_port_seq_ops = { | ||
804 | .start = scsi_att_intr_port_seq_start, | ||
805 | .next = scsi_att_intr_port_seq_next, | ||
806 | .stop = scsi_att_intr_port_seq_stop, | ||
807 | .show = scsi_att_intr_port_seq_show | ||
808 | }; | ||
809 | |||
810 | static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) | ||
811 | { | ||
812 | return seq_open(file, &scsi_att_intr_port_seq_ops); | ||
813 | } | ||
814 | |||
815 | static const struct file_operations scsi_att_intr_port_seq_fops = { | ||
816 | .owner = THIS_MODULE, | ||
817 | .open = scsi_att_intr_port_seq_open, | ||
818 | .read = seq_read, | ||
819 | .llseek = seq_lseek, | ||
820 | .release = seq_release, | ||
821 | }; | ||
822 | |||
823 | /* | ||
824 | * SCSI Logical Unit Table | ||
825 | */ | ||
826 | static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) | ||
827 | { | ||
828 | return locate_hba_start(seq, pos); | ||
829 | } | ||
830 | |||
831 | static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
832 | { | ||
833 | return locate_hba_next(seq, v, pos); | ||
834 | } | ||
835 | |||
836 | static void scsi_lu_seq_stop(struct seq_file *seq, void *v) | ||
837 | { | ||
838 | locate_hba_stop(seq, v); | ||
839 | } | ||
840 | |||
841 | #define SCSI_LU_INDEX 1 | ||
842 | static int scsi_lu_seq_show(struct seq_file *seq, void *v) | ||
843 | { | ||
844 | struct se_hba *hba; | ||
845 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
846 | g_se_dev_list); | ||
847 | struct se_device *dev = se_dev->se_dev_ptr; | ||
848 | int j; | ||
849 | char str[28]; | ||
850 | |||
851 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
852 | seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" | ||
853 | " dev_type status state-bit num_cmds read_mbytes" | ||
854 | " write_mbytes resets full_stat hs_num_cmds creation_time\n"); | ||
855 | |||
856 | if (!(dev)) | ||
857 | return 0; | ||
858 | |||
859 | hba = dev->se_hba; | ||
860 | if (!(hba)) { | ||
861 | /* Log error ? */ | ||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | /* Fix LU state, if we can read it from the device */ | ||
866 | seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, | ||
867 | dev->dev_index, SCSI_LU_INDEX, | ||
868 | (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ | ||
869 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | ||
870 | /* scsiLuWwnName */ | ||
871 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : | ||
872 | "None"); | ||
873 | |||
874 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
875 | /* scsiLuVendorId */ | ||
876 | for (j = 0; j < 8; j++) | ||
877 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | ||
878 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | ||
879 | str[8] = 0; | ||
880 | seq_printf(seq, " %s", str); | ||
881 | |||
882 | /* scsiLuProductId */ | ||
883 | for (j = 0; j < 16; j++) | ||
884 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | ||
885 | DEV_T10_WWN(dev)->model[j] : 0x20; | ||
886 | str[16] = 0; | ||
887 | seq_printf(seq, " %s", str); | ||
888 | |||
889 | /* scsiLuRevisionId */ | ||
890 | for (j = 0; j < 4; j++) | ||
891 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | ||
892 | DEV_T10_WWN(dev)->revision[j] : 0x20; | ||
893 | str[4] = 0; | ||
894 | seq_printf(seq, " %s", str); | ||
895 | |||
896 | seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", | ||
897 | /* scsiLuPeripheralType */ | ||
898 | TRANSPORT(dev)->get_device_type(dev), | ||
899 | (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? | ||
900 | "available" : "notavailable", /* scsiLuStatus */ | ||
901 | "exposed", /* scsiLuState */ | ||
902 | (unsigned long long)dev->num_cmds, | ||
903 | /* scsiLuReadMegaBytes */ | ||
904 | (u32)(dev->read_bytes >> 20), | ||
905 | /* scsiLuWrittenMegaBytes */ | ||
906 | (u32)(dev->write_bytes >> 20), | ||
907 | dev->num_resets, /* scsiLuInResets */ | ||
908 | 0, /* scsiLuOutTaskSetFullStatus */ | ||
909 | 0, /* scsiLuHSInCommands */ | ||
910 | (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * | ||
911 | 100 / HZ)); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static const struct seq_operations scsi_lu_seq_ops = { | ||
917 | .start = scsi_lu_seq_start, | ||
918 | .next = scsi_lu_seq_next, | ||
919 | .stop = scsi_lu_seq_stop, | ||
920 | .show = scsi_lu_seq_show | ||
921 | }; | ||
922 | |||
923 | static int scsi_lu_seq_open(struct inode *inode, struct file *file) | ||
924 | { | ||
925 | return seq_open(file, &scsi_lu_seq_ops); | ||
926 | } | ||
927 | |||
928 | static const struct file_operations scsi_lu_seq_fops = { | ||
929 | .owner = THIS_MODULE, | ||
930 | .open = scsi_lu_seq_open, | ||
931 | .read = seq_read, | ||
932 | .llseek = seq_lseek, | ||
933 | .release = seq_release, | ||
934 | }; | ||
935 | |||
936 | /****************************************************************************/ | ||
937 | |||
938 | /* | ||
939 | * Remove proc fs entries | ||
940 | */ | ||
941 | void remove_scsi_target_mib(void) | ||
942 | { | ||
943 | remove_proc_entry("scsi_target/mib/scsi_inst", NULL); | ||
944 | remove_proc_entry("scsi_target/mib/scsi_dev", NULL); | ||
945 | remove_proc_entry("scsi_target/mib/scsi_port", NULL); | ||
946 | remove_proc_entry("scsi_target/mib/scsi_transport", NULL); | ||
947 | remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); | ||
948 | remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); | ||
949 | remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); | ||
950 | remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); | ||
951 | remove_proc_entry("scsi_target/mib/scsi_lu", NULL); | ||
952 | remove_proc_entry("scsi_target/mib", NULL); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Create proc fs entries for the mib tables | ||
957 | */ | ||
958 | int init_scsi_target_mib(void) | ||
959 | { | ||
960 | struct proc_dir_entry *dir_entry; | ||
961 | struct proc_dir_entry *scsi_inst_entry; | ||
962 | struct proc_dir_entry *scsi_dev_entry; | ||
963 | struct proc_dir_entry *scsi_port_entry; | ||
964 | struct proc_dir_entry *scsi_transport_entry; | ||
965 | struct proc_dir_entry *scsi_tgt_dev_entry; | ||
966 | struct proc_dir_entry *scsi_tgt_port_entry; | ||
967 | struct proc_dir_entry *scsi_auth_intr_entry; | ||
968 | struct proc_dir_entry *scsi_att_intr_port_entry; | ||
969 | struct proc_dir_entry *scsi_lu_entry; | ||
970 | |||
971 | dir_entry = proc_mkdir("scsi_target/mib", NULL); | ||
972 | if (!(dir_entry)) { | ||
973 | printk(KERN_ERR "proc_mkdir() failed.\n"); | ||
974 | return -1; | ||
975 | } | ||
976 | |||
977 | scsi_inst_entry = | ||
978 | create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); | ||
979 | if (scsi_inst_entry) | ||
980 | scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; | ||
981 | else | ||
982 | goto error; | ||
983 | |||
984 | scsi_dev_entry = | ||
985 | create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); | ||
986 | if (scsi_dev_entry) | ||
987 | scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; | ||
988 | else | ||
989 | goto error; | ||
990 | |||
991 | scsi_port_entry = | ||
992 | create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); | ||
993 | if (scsi_port_entry) | ||
994 | scsi_port_entry->proc_fops = &scsi_port_seq_fops; | ||
995 | else | ||
996 | goto error; | ||
997 | |||
998 | scsi_transport_entry = | ||
999 | create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); | ||
1000 | if (scsi_transport_entry) | ||
1001 | scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; | ||
1002 | else | ||
1003 | goto error; | ||
1004 | |||
1005 | scsi_tgt_dev_entry = | ||
1006 | create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); | ||
1007 | if (scsi_tgt_dev_entry) | ||
1008 | scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; | ||
1009 | else | ||
1010 | goto error; | ||
1011 | |||
1012 | scsi_tgt_port_entry = | ||
1013 | create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); | ||
1014 | if (scsi_tgt_port_entry) | ||
1015 | scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; | ||
1016 | else | ||
1017 | goto error; | ||
1018 | |||
1019 | scsi_auth_intr_entry = | ||
1020 | create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); | ||
1021 | if (scsi_auth_intr_entry) | ||
1022 | scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; | ||
1023 | else | ||
1024 | goto error; | ||
1025 | |||
1026 | scsi_att_intr_port_entry = | ||
1027 | create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); | ||
1028 | if (scsi_att_intr_port_entry) | ||
1029 | scsi_att_intr_port_entry->proc_fops = | ||
1030 | &scsi_att_intr_port_seq_fops; | ||
1031 | else | ||
1032 | goto error; | ||
1033 | |||
1034 | scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); | ||
1035 | if (scsi_lu_entry) | ||
1036 | scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; | ||
1037 | else | ||
1038 | goto error; | ||
1039 | |||
1040 | return 0; | ||
1041 | |||
1042 | error: | ||
1043 | printk(KERN_ERR "create_proc_entry() failed.\n"); | ||
1044 | remove_scsi_target_mib(); | ||
1045 | return -1; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Initialize the index table for allocating unique row indexes to various mib | ||
1050 | * tables | ||
1051 | */ | ||
1052 | void init_scsi_index_table(void) | ||
1053 | { | ||
1054 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
1055 | spin_lock_init(&scsi_index_table.lock); | ||
1056 | } | ||
1057 | |||
1058 | /* | ||
1059 | * Allocate a new row index for the entry type specified | ||
1060 | */ | ||
1061 | u32 scsi_get_new_index(scsi_index_t type) | ||
1062 | { | ||
1063 | u32 new_index; | ||
1064 | |||
1065 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
1066 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
1067 | return -1; | ||
1068 | } | ||
1069 | |||
1070 | spin_lock(&scsi_index_table.lock); | ||
1071 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1072 | if (new_index == 0) | ||
1073 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1074 | spin_unlock(&scsi_index_table.lock); | ||
1075 | |||
1076 | return new_index; | ||
1077 | } | ||
1078 | EXPORT_SYMBOL(scsi_get_new_index); | ||
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h deleted file mode 100644 index 277204633850..000000000000 --- a/drivers/target/target_core_mib.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #ifndef TARGET_CORE_MIB_H | ||
2 | #define TARGET_CORE_MIB_H | ||
3 | |||
4 | typedef enum { | ||
5 | SCSI_INST_INDEX, | ||
6 | SCSI_DEVICE_INDEX, | ||
7 | SCSI_AUTH_INTR_INDEX, | ||
8 | SCSI_INDEX_TYPE_MAX | ||
9 | } scsi_index_t; | ||
10 | |||
11 | struct scsi_index_table { | ||
12 | spinlock_t lock; | ||
13 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
14 | } ____cacheline_aligned; | ||
15 | |||
16 | /* SCSI Port stats */ | ||
17 | struct scsi_port_stats { | ||
18 | u64 cmd_pdus; | ||
19 | u64 tx_data_octets; | ||
20 | u64 rx_data_octets; | ||
21 | } ____cacheline_aligned; | ||
22 | |||
23 | extern int init_scsi_target_mib(void); | ||
24 | extern void remove_scsi_target_mib(void); | ||
25 | extern void init_scsi_index_table(void); | ||
26 | extern u32 scsi_get_new_index(scsi_index_t); | ||
27 | |||
28 | #endif /*** TARGET_CORE_MIB_H ***/ | ||
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 742d24609a9b..f2a08477a68c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk( | |||
462 | */ | 462 | */ |
463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | 463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, |
464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | 464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); |
465 | if (!(bd)) { | 465 | if (IS_ERR(bd)) { |
466 | printk("pSCSI: blkdev_get_by_path() failed\n"); | 466 | printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); |
467 | scsi_device_put(sd); | 467 | scsi_device_put(sd); |
468 | return NULL; | 468 | return NULL; |
469 | } | 469 | } |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index abfa81a57115..c26f67467623 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
275 | spin_lock_init(&acl->device_list_lock); | 275 | spin_lock_init(&acl->device_list_lock); |
276 | spin_lock_init(&acl->nacl_sess_lock); | 276 | spin_lock_init(&acl->nacl_sess_lock); |
277 | atomic_set(&acl->acl_pr_ref_count, 0); | 277 | atomic_set(&acl->acl_pr_ref_count, 0); |
278 | atomic_set(&acl->mib_ref_count, 0); | ||
279 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); | 278 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); |
280 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 279 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
281 | acl->se_tpg = tpg; | 280 | acl->se_tpg = tpg; |
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |||
318 | cpu_relax(); | 317 | cpu_relax(); |
319 | } | 318 | } |
320 | 319 | ||
321 | void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) | ||
322 | { | ||
323 | while (atomic_read(&nacl->mib_ref_count) != 0) | ||
324 | cpu_relax(); | ||
325 | } | ||
326 | |||
327 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | 320 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) |
328 | { | 321 | { |
329 | int i, ret; | 322 | int i, ret; |
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl( | |||
480 | spin_unlock_bh(&tpg->session_lock); | 473 | spin_unlock_bh(&tpg->session_lock); |
481 | 474 | ||
482 | core_tpg_wait_for_nacl_pr_ref(acl); | 475 | core_tpg_wait_for_nacl_pr_ref(acl); |
483 | core_tpg_wait_for_mib_ref(acl); | ||
484 | core_clear_initiator_node_from_tpg(acl, tpg); | 476 | core_clear_initiator_node_from_tpg(acl, tpg); |
485 | core_free_device_list_for_node(acl, tpg); | 477 | core_free_device_list_for_node(acl, tpg); |
486 | 478 | ||
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register); | |||
701 | 693 | ||
702 | int core_tpg_deregister(struct se_portal_group *se_tpg) | 694 | int core_tpg_deregister(struct se_portal_group *se_tpg) |
703 | { | 695 | { |
696 | struct se_node_acl *nacl, *nacl_tmp; | ||
697 | |||
704 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 698 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" |
705 | " for endpoint: %s Portal Tag %u\n", | 699 | " for endpoint: %s Portal Tag %u\n", |
706 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 700 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? |
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
714 | 708 | ||
715 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 709 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
716 | cpu_relax(); | 710 | cpu_relax(); |
711 | /* | ||
712 | * Release any remaining demo-mode generated se_node_acl that have | ||
713 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | ||
714 | * in transport_deregister_session(). | ||
715 | */ | ||
716 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
717 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | ||
718 | acl_list) { | ||
719 | list_del(&nacl->acl_list); | ||
720 | se_tpg->num_node_acls--; | ||
721 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
722 | |||
723 | core_tpg_wait_for_nacl_pr_ref(nacl); | ||
724 | core_free_device_list_for_node(nacl, se_tpg); | ||
725 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); | ||
726 | |||
727 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
728 | } | ||
729 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
717 | 730 | ||
718 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 731 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) |
719 | core_tpg_release_virtual_lun0(se_tpg); | 732 | core_tpg_release_virtual_lun0(se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 28b6292ff298..236e22d8cfae 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -379,6 +379,40 @@ void release_se_global(void) | |||
379 | se_global = NULL; | 379 | se_global = NULL; |
380 | } | 380 | } |
381 | 381 | ||
382 | /* SCSI statistics table index */ | ||
383 | static struct scsi_index_table scsi_index_table; | ||
384 | |||
385 | /* | ||
386 | * Initialize the index table for allocating unique row indexes to various mib | ||
387 | * tables. | ||
388 | */ | ||
389 | void init_scsi_index_table(void) | ||
390 | { | ||
391 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
392 | spin_lock_init(&scsi_index_table.lock); | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Allocate a new row index for the entry type specified | ||
397 | */ | ||
398 | u32 scsi_get_new_index(scsi_index_t type) | ||
399 | { | ||
400 | u32 new_index; | ||
401 | |||
402 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
403 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
404 | return -EINVAL; | ||
405 | } | ||
406 | |||
407 | spin_lock(&scsi_index_table.lock); | ||
408 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
409 | if (new_index == 0) | ||
410 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
411 | spin_unlock(&scsi_index_table.lock); | ||
412 | |||
413 | return new_index; | ||
414 | } | ||
415 | |||
382 | void transport_init_queue_obj(struct se_queue_obj *qobj) | 416 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
383 | { | 417 | { |
384 | atomic_set(&qobj->queue_cnt, 0); | 418 | atomic_set(&qobj->queue_cnt, 0); |
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void) | |||
437 | } | 471 | } |
438 | INIT_LIST_HEAD(&se_sess->sess_list); | 472 | INIT_LIST_HEAD(&se_sess->sess_list); |
439 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 473 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
440 | atomic_set(&se_sess->mib_ref_count, 0); | ||
441 | 474 | ||
442 | return se_sess; | 475 | return se_sess; |
443 | } | 476 | } |
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
546 | transport_free_session(se_sess); | 579 | transport_free_session(se_sess); |
547 | return; | 580 | return; |
548 | } | 581 | } |
549 | /* | ||
550 | * Wait for possible reference in drivers/target/target_core_mib.c: | ||
551 | * scsi_att_intr_port_seq_show() | ||
552 | */ | ||
553 | while (atomic_read(&se_sess->mib_ref_count) != 0) | ||
554 | cpu_relax(); | ||
555 | 582 | ||
556 | spin_lock_bh(&se_tpg->session_lock); | 583 | spin_lock_bh(&se_tpg->session_lock); |
557 | list_del(&se_sess->sess_list); | 584 | list_del(&se_sess->sess_list); |
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
574 | spin_unlock_bh(&se_tpg->acl_node_lock); | 601 | spin_unlock_bh(&se_tpg->acl_node_lock); |
575 | 602 | ||
576 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 603 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
577 | core_tpg_wait_for_mib_ref(se_nacl); | ||
578 | core_free_device_list_for_node(se_nacl, se_tpg); | 604 | core_free_device_list_for_node(se_nacl, se_tpg); |
579 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | 605 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, |
580 | se_nacl); | 606 | se_nacl); |
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map( | |||
4827 | 4853 | ||
4828 | return ret; | 4854 | return ret; |
4829 | } | 4855 | } |
4856 | |||
4857 | BUG_ON(list_empty(se_mem_list)); | ||
4830 | /* | 4858 | /* |
4831 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | 4859 | * This is the normal path for all normal non BIDI and BIDI-COMMAND |
4832 | * WRITE payloads.. If we need to do BIDI READ passthrough for | 4860 | * WRITE payloads.. If we need to do BIDI READ passthrough for |
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
5008 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 5036 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
5009 | u32 se_mem_cnt = 0, task_offset = 0; | 5037 | u32 se_mem_cnt = 0, task_offset = 0; |
5010 | 5038 | ||
5011 | BUG_ON(list_empty(cmd->t_task->t_mem_list)); | 5039 | if (!list_empty(T_TASK(cmd)->t_mem_list)) |
5040 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | ||
5041 | struct se_mem, se_list); | ||
5012 | 5042 | ||
5013 | ret = transport_do_se_mem_map(dev, task, | 5043 | ret = transport_do_se_mem_map(dev, task, |
5014 | cmd->t_task->t_mem_list, NULL, se_mem, | 5044 | cmd->t_task->t_mem_list, NULL, se_mem, |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index f7a5dba3ca23..bf7c687519ef 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | menuconfig THERMAL | 5 | menuconfig THERMAL |
6 | tristate "Generic Thermal sysfs driver" | 6 | tristate "Generic Thermal sysfs driver" |
7 | depends on NET | ||
8 | help | 7 | help |
9 | Generic Thermal Sysfs driver offers a generic mechanism for | 8 | Generic Thermal Sysfs driver offers a generic mechanism for |
10 | thermal management. Usually it's made up of one or more thermal | 9 | thermal management. Usually it's made up of one or more thermal |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 7d0e63c79280..713b7ea4a607 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock); | |||
62 | 62 | ||
63 | static unsigned int thermal_event_seqnum; | 63 | static unsigned int thermal_event_seqnum; |
64 | 64 | ||
65 | static struct genl_family thermal_event_genl_family = { | ||
66 | .id = GENL_ID_GENERATE, | ||
67 | .name = THERMAL_GENL_FAMILY_NAME, | ||
68 | .version = THERMAL_GENL_VERSION, | ||
69 | .maxattr = THERMAL_GENL_ATTR_MAX, | ||
70 | }; | ||
71 | |||
72 | static struct genl_multicast_group thermal_event_mcgrp = { | ||
73 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | ||
74 | }; | ||
75 | |||
76 | static int genetlink_init(void); | ||
77 | static void genetlink_exit(void); | ||
78 | |||
79 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) | 65 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) |
80 | { | 66 | { |
81 | int err; | 67 | int err; |
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) | |||
1225 | 1211 | ||
1226 | EXPORT_SYMBOL(thermal_zone_device_unregister); | 1212 | EXPORT_SYMBOL(thermal_zone_device_unregister); |
1227 | 1213 | ||
1214 | #ifdef CONFIG_NET | ||
1215 | static struct genl_family thermal_event_genl_family = { | ||
1216 | .id = GENL_ID_GENERATE, | ||
1217 | .name = THERMAL_GENL_FAMILY_NAME, | ||
1218 | .version = THERMAL_GENL_VERSION, | ||
1219 | .maxattr = THERMAL_GENL_ATTR_MAX, | ||
1220 | }; | ||
1221 | |||
1222 | static struct genl_multicast_group thermal_event_mcgrp = { | ||
1223 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | ||
1224 | }; | ||
1225 | |||
1228 | int generate_netlink_event(u32 orig, enum events event) | 1226 | int generate_netlink_event(u32 orig, enum events event) |
1229 | { | 1227 | { |
1230 | struct sk_buff *skb; | 1228 | struct sk_buff *skb; |
@@ -1301,6 +1299,15 @@ static int genetlink_init(void) | |||
1301 | return result; | 1299 | return result; |
1302 | } | 1300 | } |
1303 | 1301 | ||
1302 | static void genetlink_exit(void) | ||
1303 | { | ||
1304 | genl_unregister_family(&thermal_event_genl_family); | ||
1305 | } | ||
1306 | #else /* !CONFIG_NET */ | ||
1307 | static inline int genetlink_init(void) { return 0; } | ||
1308 | static inline void genetlink_exit(void) {} | ||
1309 | #endif /* !CONFIG_NET */ | ||
1310 | |||
1304 | static int __init thermal_init(void) | 1311 | static int __init thermal_init(void) |
1305 | { | 1312 | { |
1306 | int result = 0; | 1313 | int result = 0; |
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void) | |||
1316 | return result; | 1323 | return result; |
1317 | } | 1324 | } |
1318 | 1325 | ||
1319 | static void genetlink_exit(void) | ||
1320 | { | ||
1321 | genl_unregister_family(&thermal_event_genl_family); | ||
1322 | } | ||
1323 | |||
1324 | static void __exit thermal_exit(void) | 1326 | static void __exit thermal_exit(void) |
1325 | { | 1327 | { |
1326 | class_unregister(&thermal_class); | 1328 | class_unregister(&thermal_class); |
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile index e6bed5f177ff..d79e7e9bf9d2 100644 --- a/drivers/tty/hvc/Makefile +++ b/drivers/tty/hvc/Makefile | |||
@@ -10,4 +10,3 @@ obj-$(CONFIG_HVC_XEN) += hvc_xen.o | |||
10 | obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o | 10 | obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o |
11 | obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o | 11 | obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o |
12 | obj-$(CONFIG_HVCS) += hvcs.o | 12 | obj-$(CONFIG_HVCS) += hvcs.o |
13 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o | ||
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 44b8412a04e8..aa2e5d3eb01a 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c | |||
@@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, | |||
2414 | 2414 | ||
2415 | gsm->initiator = c->initiator; | 2415 | gsm->initiator = c->initiator; |
2416 | gsm->mru = c->mru; | 2416 | gsm->mru = c->mru; |
2417 | gsm->mtu = c->mtu; | ||
2417 | gsm->encoding = c->encapsulation; | 2418 | gsm->encoding = c->encapsulation; |
2418 | gsm->adaption = c->adaption; | 2419 | gsm->adaption = c->adaption; |
2419 | gsm->n2 = c->n2; | 2420 | gsm->n2 = c->n2; |
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index be0ebce36e54..de0160e3f8c4 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c | |||
@@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status) | |||
262 | 262 | ||
263 | static void receive_chars(struct m68k_serial *info, unsigned short rx) | 263 | static void receive_chars(struct m68k_serial *info, unsigned short rx) |
264 | { | 264 | { |
265 | struct tty_struct *tty = info->port.tty; | 265 | struct tty_struct *tty = info->tty; |
266 | m68328_uart *uart = &uart_addr[info->line]; | 266 | m68328_uart *uart = &uart_addr[info->line]; |
267 | unsigned char ch, flag; | 267 | unsigned char ch, flag; |
268 | 268 | ||
@@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info) | |||
329 | goto clear_and_return; | 329 | goto clear_and_return; |
330 | } | 330 | } |
331 | 331 | ||
332 | if((info->xmit_cnt <= 0) || info->port.tty->stopped) { | 332 | if((info->xmit_cnt <= 0) || info->tty->stopped) { |
333 | /* That's peculiar... TX ints off */ | 333 | /* That's peculiar... TX ints off */ |
334 | uart->ustcnt &= ~USTCNT_TX_INTR_MASK; | 334 | uart->ustcnt &= ~USTCNT_TX_INTR_MASK; |
335 | goto clear_and_return; | 335 | goto clear_and_return; |
@@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work) | |||
383 | struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue); | 383 | struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue); |
384 | struct tty_struct *tty; | 384 | struct tty_struct *tty; |
385 | 385 | ||
386 | tty = info->port.tty; | 386 | tty = info->tty; |
387 | if (!tty) | 387 | if (!tty) |
388 | return; | 388 | return; |
389 | #if 0 | 389 | #if 0 |
@@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work) | |||
407 | struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup); | 407 | struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup); |
408 | struct tty_struct *tty; | 408 | struct tty_struct *tty; |
409 | 409 | ||
410 | tty = info->port.tty; | 410 | tty = info->tty; |
411 | if (!tty) | 411 | if (!tty) |
412 | return; | 412 | return; |
413 | 413 | ||
@@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info) | |||
451 | uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK; | 451 | uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK; |
452 | #endif | 452 | #endif |
453 | 453 | ||
454 | if (info->port.tty) | 454 | if (info->tty) |
455 | clear_bit(TTY_IO_ERROR, &info->port.tty->flags); | 455 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
456 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | 456 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; |
457 | 457 | ||
458 | /* | 458 | /* |
@@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info) | |||
486 | info->xmit_buf = 0; | 486 | info->xmit_buf = 0; |
487 | } | 487 | } |
488 | 488 | ||
489 | if (info->port.tty) | 489 | if (info->tty) |
490 | set_bit(TTY_IO_ERROR, &info->port.tty->flags); | 490 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
491 | 491 | ||
492 | info->flags &= ~S_INITIALIZED; | 492 | info->flags &= ~S_INITIALIZED; |
493 | local_irq_restore(flags); | 493 | local_irq_restore(flags); |
@@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info) | |||
553 | unsigned cflag; | 553 | unsigned cflag; |
554 | int i; | 554 | int i; |
555 | 555 | ||
556 | if (!info->port.tty || !info->port.tty->termios) | 556 | if (!info->tty || !info->tty->termios) |
557 | return; | 557 | return; |
558 | cflag = info->port.tty->termios->c_cflag; | 558 | cflag = info->tty->termios->c_cflag; |
559 | if (!(port = info->port)) | 559 | if (!(port = info->port)) |
560 | return; | 560 | return; |
561 | 561 | ||
@@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration) | |||
970 | static int rs_ioctl(struct tty_struct *tty, struct file * file, | 970 | static int rs_ioctl(struct tty_struct *tty, struct file * file, |
971 | unsigned int cmd, unsigned long arg) | 971 | unsigned int cmd, unsigned long arg) |
972 | { | 972 | { |
973 | int error; | ||
974 | struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; | 973 | struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; |
975 | int retval; | 974 | int retval; |
976 | 975 | ||
@@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
1104 | tty_ldisc_flush(tty); | 1103 | tty_ldisc_flush(tty); |
1105 | tty->closing = 0; | 1104 | tty->closing = 0; |
1106 | info->event = 0; | 1105 | info->event = 0; |
1107 | info->port.tty = NULL; | 1106 | info->tty = NULL; |
1108 | #warning "This is not and has never been valid so fix it" | 1107 | #warning "This is not and has never been valid so fix it" |
1109 | #if 0 | 1108 | #if 0 |
1110 | if (tty->ldisc.num != ldiscs[N_TTY].num) { | 1109 | if (tty->ldisc.num != ldiscs[N_TTY].num) { |
@@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty) | |||
1142 | info->event = 0; | 1141 | info->event = 0; |
1143 | info->count = 0; | 1142 | info->count = 0; |
1144 | info->flags &= ~S_NORMAL_ACTIVE; | 1143 | info->flags &= ~S_NORMAL_ACTIVE; |
1145 | info->port.tty = NULL; | 1144 | info->tty = NULL; |
1146 | wake_up_interruptible(&info->open_wait); | 1145 | wake_up_interruptible(&info->open_wait); |
1147 | } | 1146 | } |
1148 | 1147 | ||
@@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp) | |||
1261 | 1260 | ||
1262 | info->count++; | 1261 | info->count++; |
1263 | tty->driver_data = info; | 1262 | tty->driver_data = info; |
1264 | info->port.tty = tty; | 1263 | info->tty = tty; |
1265 | 1264 | ||
1266 | /* | 1265 | /* |
1267 | * Start up serial port | 1266 | * Start up serial port |
@@ -1338,7 +1337,7 @@ rs68328_init(void) | |||
1338 | info = &m68k_soft[i]; | 1337 | info = &m68k_soft[i]; |
1339 | info->magic = SERIAL_MAGIC; | 1338 | info->magic = SERIAL_MAGIC; |
1340 | info->port = (int) &uart_addr[i]; | 1339 | info->port = (int) &uart_addr[i]; |
1341 | info->port.tty = NULL; | 1340 | info->tty = NULL; |
1342 | info->irq = uart_irqs[i]; | 1341 | info->irq = uart_irqs[i]; |
1343 | info->custom_divisor = 16; | 1342 | info->custom_divisor = 16; |
1344 | info->close_delay = 50; | 1343 | info->close_delay = 50; |
diff --git a/drivers/tty/serial/68360serial.c b/drivers/tty/serial/68360serial.c index 88b13356ec10..bc21eeae8fde 100644 --- a/drivers/tty/serial/68360serial.c +++ b/drivers/tty/serial/68360serial.c | |||
@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = { | |||
2428 | /* .read_proc = rs_360_read_proc, */ | 2428 | /* .read_proc = rs_360_read_proc, */ |
2429 | .tiocmget = rs_360_tiocmget, | 2429 | .tiocmget = rs_360_tiocmget, |
2430 | .tiocmset = rs_360_tiocmset, | 2430 | .tiocmset = rs_360_tiocmset, |
2431 | .get_icount = rs_360_get_icount, | ||
2431 | }; | 2432 | }; |
2432 | 2433 | ||
2433 | static int __init rs_360_init(void) | 2434 | static int __init rs_360_init(void) |
diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c index e381b895b04d..9b1ff2b6bb37 100644 --- a/drivers/tty/serial/bfin_5xx.c +++ b/drivers/tty/serial/bfin_5xx.c | |||
@@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) | |||
370 | { | 370 | { |
371 | struct bfin_serial_port *uart = dev_id; | 371 | struct bfin_serial_port *uart = dev_id; |
372 | 372 | ||
373 | spin_lock(&uart->port.lock); | ||
374 | while (UART_GET_LSR(uart) & DR) | 373 | while (UART_GET_LSR(uart) & DR) |
375 | bfin_serial_rx_chars(uart); | 374 | bfin_serial_rx_chars(uart); |
376 | spin_unlock(&uart->port.lock); | ||
377 | 375 | ||
378 | return IRQ_HANDLED; | 376 | return IRQ_HANDLED; |
379 | } | 377 | } |
@@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | |||
490 | { | 488 | { |
491 | int x_pos, pos; | 489 | int x_pos, pos; |
492 | 490 | ||
493 | dma_disable_irq(uart->tx_dma_channel); | 491 | dma_disable_irq_nosync(uart->rx_dma_channel); |
494 | dma_disable_irq(uart->rx_dma_channel); | 492 | spin_lock_bh(&uart->rx_lock); |
495 | spin_lock_bh(&uart->port.lock); | ||
496 | 493 | ||
497 | /* 2D DMA RX buffer ring is used. Because curr_y_count and | 494 | /* 2D DMA RX buffer ring is used. Because curr_y_count and |
498 | * curr_x_count can't be read as an atomic operation, | 495 | * curr_x_count can't be read as an atomic operation, |
@@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | |||
523 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; | 520 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
524 | } | 521 | } |
525 | 522 | ||
526 | spin_unlock_bh(&uart->port.lock); | 523 | spin_unlock_bh(&uart->rx_lock); |
527 | dma_enable_irq(uart->tx_dma_channel); | ||
528 | dma_enable_irq(uart->rx_dma_channel); | 524 | dma_enable_irq(uart->rx_dma_channel); |
529 | 525 | ||
530 | mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); | 526 | mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); |
@@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
571 | unsigned short irqstat; | 567 | unsigned short irqstat; |
572 | int x_pos, pos; | 568 | int x_pos, pos; |
573 | 569 | ||
574 | spin_lock(&uart->port.lock); | 570 | spin_lock(&uart->rx_lock); |
575 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); | 571 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); |
576 | clear_dma_irqstat(uart->rx_dma_channel); | 572 | clear_dma_irqstat(uart->rx_dma_channel); |
577 | 573 | ||
@@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
589 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; | 585 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
590 | } | 586 | } |
591 | 587 | ||
592 | spin_unlock(&uart->port.lock); | 588 | spin_unlock(&uart->rx_lock); |
593 | 589 | ||
594 | return IRQ_HANDLED; | 590 | return IRQ_HANDLED; |
595 | } | 591 | } |
@@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev) | |||
1332 | } | 1328 | } |
1333 | 1329 | ||
1334 | #ifdef CONFIG_SERIAL_BFIN_DMA | 1330 | #ifdef CONFIG_SERIAL_BFIN_DMA |
1331 | spin_lock_init(&uart->rx_lock); | ||
1335 | uart->tx_done = 1; | 1332 | uart->tx_done = 1; |
1336 | uart->tx_count = 0; | 1333 | uart->tx_count = 0; |
1337 | 1334 | ||
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index beb1afa27d8d..7b951adac54b 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c | |||
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) | |||
601 | s->rts = 0; | 601 | s->rts = 0; |
602 | 602 | ||
603 | sprintf(b, "max3100-%d", s->minor); | 603 | sprintf(b, "max3100-%d", s->minor); |
604 | s->workqueue = create_freezeable_workqueue(b); | 604 | s->workqueue = create_freezable_workqueue(b); |
605 | if (!s->workqueue) { | 605 | if (!s->workqueue) { |
606 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); | 606 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); |
607 | return -EBUSY; | 607 | return -EBUSY; |
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 910870edf708..750b4f627315 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c | |||
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) | |||
833 | struct max3107_port *s = container_of(port, struct max3107_port, port); | 833 | struct max3107_port *s = container_of(port, struct max3107_port, port); |
834 | 834 | ||
835 | /* Initialize work queue */ | 835 | /* Initialize work queue */ |
836 | s->workqueue = create_freezeable_workqueue("max3107"); | 836 | s->workqueue = create_freezable_workqueue("max3107"); |
837 | if (!s->workqueue) { | 837 | if (!s->workqueue) { |
838 | dev_err(&s->spi->dev, "Workqueue creation failed\n"); | 838 | dev_err(&s->spi->dev, "Workqueue creation failed\n"); |
839 | return -EBUSY; | 839 | return -EBUSY; |
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 8e0dd254eb11..81f13958e751 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c | |||
@@ -571,6 +571,7 @@ struct sysrq_state { | |||
571 | unsigned int alt_use; | 571 | unsigned int alt_use; |
572 | bool active; | 572 | bool active; |
573 | bool need_reinject; | 573 | bool need_reinject; |
574 | bool reinjecting; | ||
574 | }; | 575 | }; |
575 | 576 | ||
576 | static void sysrq_reinject_alt_sysrq(struct work_struct *work) | 577 | static void sysrq_reinject_alt_sysrq(struct work_struct *work) |
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) | |||
581 | unsigned int alt_code = sysrq->alt_use; | 582 | unsigned int alt_code = sysrq->alt_use; |
582 | 583 | ||
583 | if (sysrq->need_reinject) { | 584 | if (sysrq->need_reinject) { |
585 | /* we do not want the assignment to be reordered */ | ||
586 | sysrq->reinjecting = true; | ||
587 | mb(); | ||
588 | |||
584 | /* Simulate press and release of Alt + SysRq */ | 589 | /* Simulate press and release of Alt + SysRq */ |
585 | input_inject_event(handle, EV_KEY, alt_code, 1); | 590 | input_inject_event(handle, EV_KEY, alt_code, 1); |
586 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); | 591 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); |
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) | |||
589 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); | 594 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); |
590 | input_inject_event(handle, EV_KEY, alt_code, 0); | 595 | input_inject_event(handle, EV_KEY, alt_code, 0); |
591 | input_inject_event(handle, EV_SYN, SYN_REPORT, 1); | 596 | input_inject_event(handle, EV_SYN, SYN_REPORT, 1); |
597 | |||
598 | mb(); | ||
599 | sysrq->reinjecting = false; | ||
592 | } | 600 | } |
593 | } | 601 | } |
594 | 602 | ||
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle, | |||
599 | bool was_active = sysrq->active; | 607 | bool was_active = sysrq->active; |
600 | bool suppress; | 608 | bool suppress; |
601 | 609 | ||
610 | /* | ||
611 | * Do not filter anything if we are in the process of re-injecting | ||
612 | * Alt+SysRq combination. | ||
613 | */ | ||
614 | if (sysrq->reinjecting) | ||
615 | return false; | ||
616 | |||
602 | switch (type) { | 617 | switch (type) { |
603 | 618 | ||
604 | case EV_SYN: | 619 | case EV_SYN: |
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle, | |||
629 | sysrq->alt_use = sysrq->alt; | 644 | sysrq->alt_use = sysrq->alt; |
630 | /* | 645 | /* |
631 | * If nothing else will be pressed we'll need | 646 | * If nothing else will be pressed we'll need |
632 | * to * re-inject Alt-SysRq keysroke. | 647 | * to re-inject Alt-SysRq keysroke. |
633 | */ | 648 | */ |
634 | sysrq->need_reinject = true; | 649 | sysrq->need_reinject = true; |
635 | } | 650 | } |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index d6ede989ff22..4ab49d4eebf4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = { | |||
1607 | { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ | 1607 | { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ |
1608 | { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ | 1608 | { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ |
1609 | { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ | 1609 | { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ |
1610 | { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */ | ||
1610 | { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ | 1611 | { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ |
1611 | 1612 | ||
1612 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ | 1613 | /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 6a95017fa62b..e935f71d7a34 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) | |||
1955 | 1955 | ||
1956 | dev_dbg(&rhdev->dev, "usb %s%s\n", | 1956 | dev_dbg(&rhdev->dev, "usb %s%s\n", |
1957 | (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); | 1957 | (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); |
1958 | clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); | ||
1959 | if (!hcd->driver->bus_resume) | 1958 | if (!hcd->driver->bus_resume) |
1960 | return -ENOENT; | 1959 | return -ENOENT; |
1961 | if (hcd->state == HC_STATE_RUNNING) | 1960 | if (hcd->state == HC_STATE_RUNNING) |
@@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) | |||
1963 | 1962 | ||
1964 | hcd->state = HC_STATE_RESUMING; | 1963 | hcd->state = HC_STATE_RESUMING; |
1965 | status = hcd->driver->bus_resume(hcd); | 1964 | status = hcd->driver->bus_resume(hcd); |
1965 | clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); | ||
1966 | if (status == 0) { | 1966 | if (status == 0) { |
1967 | /* TRSMRCY = 10 msec */ | 1967 | /* TRSMRCY = 10 msec */ |
1968 | msleep(10); | 1968 | msleep(10); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 4310cc4b1cb5..0f299b7aad60 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, | |||
2681 | 2681 | ||
2682 | mutex_lock(&usb_address0_mutex); | 2682 | mutex_lock(&usb_address0_mutex); |
2683 | 2683 | ||
2684 | if (!udev->config && oldspeed == USB_SPEED_SUPER) { | 2684 | /* Reset the device; full speed may morph to high speed */ |
2685 | /* Don't reset USB 3.0 devices during an initial setup */ | 2685 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ |
2686 | usb_set_device_state(udev, USB_STATE_DEFAULT); | 2686 | retval = hub_port_reset(hub, port1, udev, delay); |
2687 | } else { | 2687 | if (retval < 0) /* error or disconnect */ |
2688 | /* Reset the device; full speed may morph to high speed */ | 2688 | goto fail; |
2689 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ | 2689 | /* success, speed is known */ |
2690 | retval = hub_port_reset(hub, port1, udev, delay); | 2690 | |
2691 | if (retval < 0) /* error or disconnect */ | ||
2692 | goto fail; | ||
2693 | /* success, speed is known */ | ||
2694 | } | ||
2695 | retval = -ENODEV; | 2691 | retval = -ENODEV; |
2696 | 2692 | ||
2697 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { | 2693 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { |
@@ -2753,6 +2749,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, | |||
2753 | udev->ttport = hdev->ttport; | 2749 | udev->ttport = hdev->ttport; |
2754 | } else if (udev->speed != USB_SPEED_HIGH | 2750 | } else if (udev->speed != USB_SPEED_HIGH |
2755 | && hdev->speed == USB_SPEED_HIGH) { | 2751 | && hdev->speed == USB_SPEED_HIGH) { |
2752 | if (!hub->tt.hub) { | ||
2753 | dev_err(&udev->dev, "parent hub has no TT\n"); | ||
2754 | retval = -EINVAL; | ||
2755 | goto fail; | ||
2756 | } | ||
2756 | udev->tt = &hub->tt; | 2757 | udev->tt = &hub->tt; |
2757 | udev->ttport = port1; | 2758 | udev->ttport = port1; |
2758 | } | 2759 | } |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 44c595432d6f..81ce6a8e1d94 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
48 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = | 48 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = |
49 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 49 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
50 | 50 | ||
51 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ | ||
52 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = | ||
53 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
54 | |||
51 | /* Roland SC-8820 */ | 55 | /* Roland SC-8820 */ |
52 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 56 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
53 | 57 | ||
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
68 | /* M-Systems Flash Disk Pioneers */ | 72 | /* M-Systems Flash Disk Pioneers */ |
69 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 73 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
70 | 74 | ||
75 | /* Keytouch QWERTY Panel keyboard */ | ||
76 | { USB_DEVICE(0x0926, 0x3333), .driver_info = | ||
77 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
78 | |||
71 | /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ | 79 | /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ |
72 | { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, | 80 | { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, |
73 | 81 | ||
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 06bb9d4587e9..d50099675f28 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -546,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM | |||
546 | ci13xxx_udc core. | 546 | ci13xxx_udc core. |
547 | This driver depends on OTG driver for PHY initialization, | 547 | This driver depends on OTG driver for PHY initialization, |
548 | clock management, powering up VBUS, and power management. | 548 | clock management, powering up VBUS, and power management. |
549 | This driver is not supported on boards like trout which | ||
550 | has an external PHY. | ||
549 | 551 | ||
550 | Say "y" to link the driver statically, or "m" to build a | 552 | Say "y" to link the driver statically, or "m" to build a |
551 | dynamically linked module called "ci13xxx_msm" and force all | 553 | dynamically linked module called "ci13xxx_msm" and force all |
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index b5dbb2308f56..6d8e533949eb 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c | |||
@@ -293,6 +293,7 @@ | |||
293 | 293 | ||
294 | #include <linux/usb/ch9.h> | 294 | #include <linux/usb/ch9.h> |
295 | #include <linux/usb/gadget.h> | 295 | #include <linux/usb/gadget.h> |
296 | #include <linux/usb/composite.h> | ||
296 | 297 | ||
297 | #include "gadget_chips.h" | 298 | #include "gadget_chips.h" |
298 | 299 | ||
@@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, | |||
2763 | return ERR_PTR(-ENOMEM); | 2764 | return ERR_PTR(-ENOMEM); |
2764 | common->free_storage_on_release = 1; | 2765 | common->free_storage_on_release = 1; |
2765 | } else { | 2766 | } else { |
2766 | memset(common, 0, sizeof common); | 2767 | memset(common, 0, sizeof *common); |
2767 | common->free_storage_on_release = 0; | 2768 | common->free_storage_on_release = 0; |
2768 | } | 2769 | } |
2769 | 2770 | ||
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 20d43da319ae..015118535f77 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c | |||
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597, | |||
258 | break; | 258 | break; |
259 | case R8A66597_BULK: | 259 | case R8A66597_BULK: |
260 | /* isochronous pipes may be used as bulk pipes */ | 260 | /* isochronous pipes may be used as bulk pipes */ |
261 | if (info->pipe > R8A66597_BASE_PIPENUM_BULK) | 261 | if (info->pipe >= R8A66597_BASE_PIPENUM_BULK) |
262 | bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK; | 262 | bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK; |
263 | else | 263 | else |
264 | bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC; | 264 | bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC; |
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 24046c0f5878..0e6afa260ed8 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -151,6 +151,8 @@ config USB_EHCI_MSM | |||
151 | Qualcomm chipsets. Root Hub has inbuilt TT. | 151 | Qualcomm chipsets. Root Hub has inbuilt TT. |
152 | This driver depends on OTG driver for PHY initialization, | 152 | This driver depends on OTG driver for PHY initialization, |
153 | clock management, powering up VBUS, and power management. | 153 | clock management, powering up VBUS, and power management. |
154 | This driver is not supported on boards like trout which | ||
155 | has an external PHY. | ||
154 | 156 | ||
155 | config USB_EHCI_HCD_PPC_OF | 157 | config USB_EHCI_HCD_PPC_OF |
156 | bool "EHCI support for PPC USB controller on OF platform bus" | 158 | bool "EHCI support for PPC USB controller on OF platform bus" |
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index 2baf8a849086..a869e3c103d3 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c | |||
@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) | |||
227 | * mark HW unaccessible. The PM and USB cores make sure that | 227 | * mark HW unaccessible. The PM and USB cores make sure that |
228 | * the root hub is either suspended or stopped. | 228 | * the root hub is either suspended or stopped. |
229 | */ | 229 | */ |
230 | spin_lock_irqsave(&ehci->lock, flags); | ||
231 | ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev)); | 230 | ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev)); |
231 | spin_lock_irqsave(&ehci->lock, flags); | ||
232 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); | 232 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); |
233 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); | 233 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); |
234 | 234 | ||
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 796ea0c8900f..8a515f0d5988 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, | |||
111 | { | 111 | { |
112 | int port; | 112 | int port; |
113 | u32 temp; | 113 | u32 temp; |
114 | unsigned long flags; | ||
114 | 115 | ||
115 | /* If remote wakeup is enabled for the root hub but disabled | 116 | /* If remote wakeup is enabled for the root hub but disabled |
116 | * for the controller, we must adjust all the port wakeup flags | 117 | * for the controller, we must adjust all the port wakeup flags |
@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, | |||
120 | if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) | 121 | if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) |
121 | return; | 122 | return; |
122 | 123 | ||
124 | spin_lock_irqsave(&ehci->lock, flags); | ||
125 | |||
123 | /* clear phy low-power mode before changing wakeup flags */ | 126 | /* clear phy low-power mode before changing wakeup flags */ |
124 | if (ehci->has_hostpc) { | 127 | if (ehci->has_hostpc) { |
125 | port = HCS_N_PORTS(ehci->hcs_params); | 128 | port = HCS_N_PORTS(ehci->hcs_params); |
@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, | |||
131 | temp = ehci_readl(ehci, hostpc_reg); | 134 | temp = ehci_readl(ehci, hostpc_reg); |
132 | ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); | 135 | ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); |
133 | } | 136 | } |
137 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
134 | msleep(5); | 138 | msleep(5); |
139 | spin_lock_irqsave(&ehci->lock, flags); | ||
135 | } | 140 | } |
136 | 141 | ||
137 | port = HCS_N_PORTS(ehci->hcs_params); | 142 | port = HCS_N_PORTS(ehci->hcs_params); |
@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, | |||
170 | /* Does the root hub have a port wakeup pending? */ | 175 | /* Does the root hub have a port wakeup pending? */ |
171 | if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) | 176 | if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) |
172 | usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); | 177 | usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); |
178 | |||
179 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
173 | } | 180 | } |
174 | 181 | ||
175 | static int ehci_bus_suspend (struct usb_hcd *hcd) | 182 | static int ehci_bus_suspend (struct usb_hcd *hcd) |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 680f2ef4e59f..f784ceb862a3 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
796 | hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, | 796 | hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, |
797 | dev_name(&pdev->dev)); | 797 | dev_name(&pdev->dev)); |
798 | if (!hcd) { | 798 | if (!hcd) { |
799 | dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); | 799 | dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret); |
800 | ret = -ENOMEM; | 800 | ret = -ENOMEM; |
801 | goto err_create_hcd; | 801 | goto err_create_hcd; |
802 | } | 802 | } |
@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
864 | 864 | ||
865 | ret = omap_start_ehc(omap, hcd); | 865 | ret = omap_start_ehc(omap, hcd); |
866 | if (ret) { | 866 | if (ret) { |
867 | dev_dbg(&pdev->dev, "failed to start ehci\n"); | 867 | dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret); |
868 | goto err_start; | 868 | goto err_start; |
869 | } | 869 | } |
870 | 870 | ||
@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
879 | 879 | ||
880 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); | 880 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); |
881 | if (ret) { | 881 | if (ret) { |
882 | dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); | 882 | dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret); |
883 | goto err_add_hcd; | 883 | goto err_add_hcd; |
884 | } | 884 | } |
885 | 885 | ||
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index bed07d4aab06..07bb982e59f6 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -367,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | |||
367 | * mark HW unaccessible. The PM and USB cores make sure that | 367 | * mark HW unaccessible. The PM and USB cores make sure that |
368 | * the root hub is either suspended or stopped. | 368 | * the root hub is either suspended or stopped. |
369 | */ | 369 | */ |
370 | spin_lock_irqsave (&ehci->lock, flags); | ||
371 | ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); | 370 | ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); |
371 | spin_lock_irqsave (&ehci->lock, flags); | ||
372 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); | 372 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); |
373 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); | 373 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); |
374 | 374 | ||
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 990f06b89eaa..2e9602a10e9b 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c | |||
@@ -861,6 +861,7 @@ static int sl811h_urb_enqueue( | |||
861 | DBG("dev %d ep%d maxpacket %d\n", | 861 | DBG("dev %d ep%d maxpacket %d\n", |
862 | udev->devnum, epnum, ep->maxpacket); | 862 | udev->devnum, epnum, ep->maxpacket); |
863 | retval = -EINVAL; | 863 | retval = -EINVAL; |
864 | kfree(ep); | ||
864 | goto fail; | 865 | goto fail; |
865 | } | 866 | } |
866 | 867 | ||
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index fcbf4abbf381..0231814a97a5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci) | |||
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) | 172 | void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) |
173 | { | 173 | { |
174 | void *addr; | 174 | struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num]; |
175 | void __iomem *addr; | ||
175 | u32 temp; | 176 | u32 temp; |
176 | u64 temp_64; | 177 | u64 temp_64; |
177 | 178 | ||
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, | |||
449 | } | 450 | } |
450 | } | 451 | } |
451 | 452 | ||
452 | void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | 453 | static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) |
453 | { | 454 | { |
454 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | 455 | /* Fields are 32 bits wide, DMA addresses are in bytes */ |
455 | int field_size = 32 / 8; | 456 | int field_size = 32 / 8; |
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | |||
488 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); | 489 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); |
489 | } | 490 | } |
490 | 491 | ||
491 | void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | 492 | static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, |
492 | struct xhci_container_ctx *ctx, | 493 | struct xhci_container_ctx *ctx, |
493 | unsigned int last_ep) | 494 | unsigned int last_ep) |
494 | { | 495 | { |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1d0f45f0e7a6..a9534396e85b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |||
307 | 307 | ||
308 | /***************** Streams structures manipulation *************************/ | 308 | /***************** Streams structures manipulation *************************/ |
309 | 309 | ||
310 | void xhci_free_stream_ctx(struct xhci_hcd *xhci, | 310 | static void xhci_free_stream_ctx(struct xhci_hcd *xhci, |
311 | unsigned int num_stream_ctxs, | 311 | unsigned int num_stream_ctxs, |
312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) | 312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) |
313 | { | 313 | { |
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci, | |||
335 | * The stream context array must be a power of 2, and can be as small as | 335 | * The stream context array must be a power of 2, and can be as small as |
336 | * 64 bytes or as large as 1MB. | 336 | * 64 bytes or as large as 1MB. |
337 | */ | 337 | */ |
338 | struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, | 338 | static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, |
339 | unsigned int num_stream_ctxs, dma_addr_t *dma, | 339 | unsigned int num_stream_ctxs, dma_addr_t *dma, |
340 | gfp_t mem_flags) | 340 | gfp_t mem_flags) |
341 | { | 341 | { |
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1900 | val &= DBOFF_MASK; | 1900 | val &= DBOFF_MASK; |
1901 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | 1901 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" |
1902 | " from cap regs base addr\n", val); | 1902 | " from cap regs base addr\n", val); |
1903 | xhci->dba = (void *) xhci->cap_regs + val; | 1903 | xhci->dba = (void __iomem *) xhci->cap_regs + val; |
1904 | xhci_dbg_regs(xhci); | 1904 | xhci_dbg_regs(xhci); |
1905 | xhci_print_run_regs(xhci); | 1905 | xhci_print_run_regs(xhci); |
1906 | /* Set ir_set to interrupt register set 0 */ | 1906 | /* Set ir_set to interrupt register set 0 */ |
1907 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | 1907 | xhci->ir_set = &xhci->run_regs->ir_set[0]; |
1908 | 1908 | ||
1909 | /* | 1909 | /* |
1910 | * Event ring setup: Allocate a normal ring, but also setup | 1910 | * Event ring setup: Allocate a normal ring, but also setup |
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1961 | /* Set the event ring dequeue address */ | 1961 | /* Set the event ring dequeue address */ |
1962 | xhci_set_hc_event_deq(xhci); | 1962 | xhci_set_hc_event_deq(xhci); |
1963 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); | 1963 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
1964 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 1964 | xhci_print_ir_set(xhci, 0); |
1965 | 1965 | ||
1966 | /* | 1966 | /* |
1967 | * XXX: Might need to set the Interrupter Moderation Register to | 1967 | * XXX: Might need to set the Interrupter Moderation Register to |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e8211c1ce5a..3289bf4832c9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
474 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 474 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
475 | dev->eps[ep_index].stopped_trb, | 475 | dev->eps[ep_index].stopped_trb, |
476 | &state->new_cycle_state); | 476 | &state->new_cycle_state); |
477 | if (!state->new_deq_seg) | 477 | if (!state->new_deq_seg) { |
478 | BUG(); | 478 | WARN_ON(1); |
479 | return; | ||
480 | } | ||
481 | |||
479 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 482 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
480 | xhci_dbg(xhci, "Finding endpoint context\n"); | 483 | xhci_dbg(xhci, "Finding endpoint context\n"); |
481 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 484 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
486 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 489 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
487 | state->new_deq_ptr, | 490 | state->new_deq_ptr, |
488 | &state->new_cycle_state); | 491 | &state->new_cycle_state); |
489 | if (!state->new_deq_seg) | 492 | if (!state->new_deq_seg) { |
490 | BUG(); | 493 | WARN_ON(1); |
494 | return; | ||
495 | } | ||
491 | 496 | ||
492 | trb = &state->new_deq_ptr->generic; | 497 | trb = &state->new_deq_ptr->generic; |
493 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && | 498 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && |
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
2363 | 2368 | ||
2364 | /* Scatter gather list entries may cross 64KB boundaries */ | 2369 | /* Scatter gather list entries may cross 64KB boundaries */ |
2365 | running_total = TRB_MAX_BUFF_SIZE - | 2370 | running_total = TRB_MAX_BUFF_SIZE - |
2366 | (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2371 | (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); |
2372 | running_total &= TRB_MAX_BUFF_SIZE - 1; | ||
2367 | if (running_total != 0) | 2373 | if (running_total != 0) |
2368 | num_trbs++; | 2374 | num_trbs++; |
2369 | 2375 | ||
2370 | /* How many more 64KB chunks to transfer, how many more TRBs? */ | 2376 | /* How many more 64KB chunks to transfer, how many more TRBs? */ |
2371 | while (running_total < sg_dma_len(sg)) { | 2377 | while (running_total < sg_dma_len(sg) && running_total < temp) { |
2372 | num_trbs++; | 2378 | num_trbs++; |
2373 | running_total += TRB_MAX_BUFF_SIZE; | 2379 | running_total += TRB_MAX_BUFF_SIZE; |
2374 | } | 2380 | } |
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
2394 | static void check_trb_math(struct urb *urb, int num_trbs, int running_total) | 2400 | static void check_trb_math(struct urb *urb, int num_trbs, int running_total) |
2395 | { | 2401 | { |
2396 | if (num_trbs != 0) | 2402 | if (num_trbs != 0) |
2397 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " | 2403 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " |
2398 | "TRBs, %d left\n", __func__, | 2404 | "TRBs, %d left\n", __func__, |
2399 | urb->ep->desc.bEndpointAddress, num_trbs); | 2405 | urb->ep->desc.bEndpointAddress, num_trbs); |
2400 | if (running_total != urb->transfer_buffer_length) | 2406 | if (running_total != urb->transfer_buffer_length) |
2401 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " | 2407 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " |
2402 | "queued %#x (%d), asked for %#x (%d)\n", | 2408 | "queued %#x (%d), asked for %#x (%d)\n", |
2403 | __func__, | 2409 | __func__, |
2404 | urb->ep->desc.bEndpointAddress, | 2410 | urb->ep->desc.bEndpointAddress, |
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2533 | sg = urb->sg; | 2539 | sg = urb->sg; |
2534 | addr = (u64) sg_dma_address(sg); | 2540 | addr = (u64) sg_dma_address(sg); |
2535 | this_sg_len = sg_dma_len(sg); | 2541 | this_sg_len = sg_dma_len(sg); |
2536 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2542 | trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2537 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | ||
2538 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | 2543 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); |
2539 | if (trb_buff_len > urb->transfer_buffer_length) | 2544 | if (trb_buff_len > urb->transfer_buffer_length) |
2540 | trb_buff_len = urb->transfer_buffer_length; | 2545 | trb_buff_len = urb->transfer_buffer_length; |
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2572 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2577 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
2573 | (unsigned int) addr + trb_buff_len); | 2578 | (unsigned int) addr + trb_buff_len); |
2574 | if (TRB_MAX_BUFF_SIZE - | 2579 | if (TRB_MAX_BUFF_SIZE - |
2575 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { | 2580 | (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { |
2576 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); | 2581 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); |
2577 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", | 2582 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", |
2578 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2583 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2616 | } | 2621 | } |
2617 | 2622 | ||
2618 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2623 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
2619 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2624 | (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2620 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | 2625 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); |
2621 | if (running_total + trb_buff_len > urb->transfer_buffer_length) | 2626 | if (running_total + trb_buff_len > urb->transfer_buffer_length) |
2622 | trb_buff_len = | 2627 | trb_buff_len = |
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2656 | num_trbs = 0; | 2661 | num_trbs = 0; |
2657 | /* How much data is (potentially) left before the 64KB boundary? */ | 2662 | /* How much data is (potentially) left before the 64KB boundary? */ |
2658 | running_total = TRB_MAX_BUFF_SIZE - | 2663 | running_total = TRB_MAX_BUFF_SIZE - |
2659 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2664 | (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); |
2665 | running_total &= TRB_MAX_BUFF_SIZE - 1; | ||
2660 | 2666 | ||
2661 | /* If there's some data on this 64KB chunk, or we have to send a | 2667 | /* If there's some data on this 64KB chunk, or we have to send a |
2662 | * zero-length transfer, we need at least one TRB | 2668 | * zero-length transfer, we need at least one TRB |
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2700 | /* How much data is in the first TRB? */ | 2706 | /* How much data is in the first TRB? */ |
2701 | addr = (u64) urb->transfer_dma; | 2707 | addr = (u64) urb->transfer_dma; |
2702 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2708 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
2703 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2709 | (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); |
2704 | if (urb->transfer_buffer_length < trb_buff_len) | 2710 | if (trb_buff_len > urb->transfer_buffer_length) |
2705 | trb_buff_len = urb->transfer_buffer_length; | 2711 | trb_buff_len = urb->transfer_buffer_length; |
2706 | 2712 | ||
2707 | first_trb = true; | 2713 | first_trb = true; |
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, | |||
2879 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); | 2885 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); |
2880 | td_len = urb->iso_frame_desc[i].length; | 2886 | td_len = urb->iso_frame_desc[i].length; |
2881 | 2887 | ||
2882 | running_total = TRB_MAX_BUFF_SIZE - | 2888 | running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2883 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2889 | running_total &= TRB_MAX_BUFF_SIZE - 1; |
2884 | if (running_total != 0) | 2890 | if (running_total != 0) |
2885 | num_trbs++; | 2891 | num_trbs++; |
2886 | 2892 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 34cf4e165877..2083fc2179b2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci) | |||
109 | /* | 109 | /* |
110 | * Set the run bit and wait for the host to be running. | 110 | * Set the run bit and wait for the host to be running. |
111 | */ | 111 | */ |
112 | int xhci_start(struct xhci_hcd *xhci) | 112 | static int xhci_start(struct xhci_hcd *xhci) |
113 | { | 113 | { |
114 | u32 temp; | 114 | u32 temp; |
115 | int ret; | 115 | int ret; |
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
329 | 329 | ||
330 | 330 | ||
331 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 331 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
332 | void xhci_event_ring_work(unsigned long arg) | 332 | static void xhci_event_ring_work(unsigned long arg) |
333 | { | 333 | { |
334 | unsigned long flags; | 334 | unsigned long flags; |
335 | int temp; | 335 | int temp; |
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
473 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | 473 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
474 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), | 474 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
475 | &xhci->ir_set->irq_pending); | 475 | &xhci->ir_set->irq_pending); |
476 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 476 | xhci_print_ir_set(xhci, 0); |
477 | 477 | ||
478 | if (NUM_TEST_NOOPS > 0) | 478 | if (NUM_TEST_NOOPS > 0) |
479 | doorbell = xhci_setup_one_noop(xhci); | 479 | doorbell = xhci_setup_one_noop(xhci); |
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd) | |||
528 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 528 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
529 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 529 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
530 | &xhci->ir_set->irq_pending); | 530 | &xhci->ir_set->irq_pending); |
531 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 531 | xhci_print_ir_set(xhci, 0); |
532 | 532 | ||
533 | xhci_dbg(xhci, "cleaning up memory\n"); | 533 | xhci_dbg(xhci, "cleaning up memory\n"); |
534 | xhci_mem_cleanup(xhci); | 534 | xhci_mem_cleanup(xhci); |
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
755 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 755 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
756 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 756 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
757 | &xhci->ir_set->irq_pending); | 757 | &xhci->ir_set->irq_pending); |
758 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 758 | xhci_print_ir_set(xhci, 0); |
759 | 759 | ||
760 | xhci_dbg(xhci, "cleaning up memory\n"); | 760 | xhci_dbg(xhci, "cleaning up memory\n"); |
761 | xhci_mem_cleanup(xhci); | 761 | xhci_mem_cleanup(xhci); |
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | |||
857 | /* Returns 1 if the arguments are OK; | 857 | /* Returns 1 if the arguments are OK; |
858 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | 858 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
859 | */ | 859 | */ |
860 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | 860 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
861 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, | 861 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
862 | const char *func) { | 862 | const char *func) { |
863 | struct xhci_hcd *xhci; | 863 | struct xhci_hcd *xhci; |
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | |||
1693 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | 1693 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
1694 | } | 1694 | } |
1695 | 1695 | ||
1696 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | 1696 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
1697 | unsigned int slot_id, unsigned int ep_index, | 1697 | unsigned int slot_id, unsigned int ep_index, |
1698 | struct xhci_dequeue_state *deq_state) | 1698 | struct xhci_dequeue_state *deq_state) |
1699 | { | 1699 | { |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7f236fd22015..7f127df6dd55 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) | |||
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /* xHCI debugging */ | 1350 | /* xHCI debugging */ |
1351 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1351 | void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num); |
1352 | void xhci_print_registers(struct xhci_hcd *xhci); | 1352 | void xhci_print_registers(struct xhci_hcd *xhci); |
1353 | void xhci_dbg_regs(struct xhci_hcd *xhci); | 1353 | void xhci_dbg_regs(struct xhci_hcd *xhci); |
1354 | void xhci_print_run_regs(struct xhci_hcd *xhci); | 1354 | void xhci_print_run_regs(struct xhci_hcd *xhci); |
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index eeba228eb2af..9d49d1cd7ce2 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c | |||
@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb) | |||
404 | musb->xceiv->set_power = bfin_musb_set_power; | 404 | musb->xceiv->set_power = bfin_musb_set_power; |
405 | 405 | ||
406 | musb->isr = blackfin_interrupt; | 406 | musb->isr = blackfin_interrupt; |
407 | musb->double_buffer_not_ok = true; | ||
407 | 408 | ||
408 | return 0; | 409 | return 0; |
409 | } | 410 | } |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 07cf394e491b..c292d5c499e7 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); | |||
128 | 128 | ||
129 | static inline struct musb *dev_to_musb(struct device *dev) | 129 | static inline struct musb *dev_to_musb(struct device *dev) |
130 | { | 130 | { |
131 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
132 | /* usbcore insists dev->driver_data is a "struct hcd *" */ | ||
133 | return hcd_to_musb(dev_get_drvdata(dev)); | ||
134 | #else | ||
135 | return dev_get_drvdata(dev); | 131 | return dev_get_drvdata(dev); |
136 | #endif | ||
137 | } | 132 | } |
138 | 133 | ||
139 | /*-------------------------------------------------------------------------*/ | 134 | /*-------------------------------------------------------------------------*/ |
@@ -1869,6 +1864,7 @@ allocate_instance(struct device *dev, | |||
1869 | INIT_LIST_HEAD(&musb->out_bulk); | 1864 | INIT_LIST_HEAD(&musb->out_bulk); |
1870 | 1865 | ||
1871 | hcd->uses_new_polling = 1; | 1866 | hcd->uses_new_polling = 1; |
1867 | hcd->has_tt = 1; | ||
1872 | 1868 | ||
1873 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | 1869 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; |
1874 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; | 1870 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; |
@@ -1876,10 +1872,9 @@ allocate_instance(struct device *dev, | |||
1876 | musb = kzalloc(sizeof *musb, GFP_KERNEL); | 1872 | musb = kzalloc(sizeof *musb, GFP_KERNEL); |
1877 | if (!musb) | 1873 | if (!musb) |
1878 | return NULL; | 1874 | return NULL; |
1879 | dev_set_drvdata(dev, musb); | ||
1880 | 1875 | ||
1881 | #endif | 1876 | #endif |
1882 | 1877 | dev_set_drvdata(dev, musb); | |
1883 | musb->mregs = mbase; | 1878 | musb->mregs = mbase; |
1884 | musb->ctrl_base = mbase; | 1879 | musb->ctrl_base = mbase; |
1885 | musb->nIrq = -ENODEV; | 1880 | musb->nIrq = -ENODEV; |
@@ -2191,7 +2186,7 @@ static int __init musb_probe(struct platform_device *pdev) | |||
2191 | void __iomem *base; | 2186 | void __iomem *base; |
2192 | 2187 | ||
2193 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2188 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2194 | if (!iomem || irq == 0) | 2189 | if (!iomem || irq <= 0) |
2195 | return -ENODEV; | 2190 | return -ENODEV; |
2196 | 2191 | ||
2197 | base = ioremap(iomem->start, resource_size(iomem)); | 2192 | base = ioremap(iomem->start, resource_size(iomem)); |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index d0c236f8e191..e6400be8a0f8 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -497,6 +497,19 @@ struct musb { | |||
497 | struct usb_gadget_driver *gadget_driver; /* its driver */ | 497 | struct usb_gadget_driver *gadget_driver; /* its driver */ |
498 | #endif | 498 | #endif |
499 | 499 | ||
500 | /* | ||
501 | * FIXME: Remove this flag. | ||
502 | * | ||
503 | * This is only added to allow Blackfin to work | ||
504 | * with current driver. For some unknown reason | ||
505 | * Blackfin doesn't work with double buffering | ||
506 | * and that's enabled by default. | ||
507 | * | ||
508 | * We added this flag to forcefully disable double | ||
509 | * buffering until we get it working. | ||
510 | */ | ||
511 | unsigned double_buffer_not_ok:1 __deprecated; | ||
512 | |||
500 | struct musb_hdrc_config *config; | 513 | struct musb_hdrc_config *config; |
501 | 514 | ||
502 | #ifdef MUSB_CONFIG_PROC_FS | 515 | #ifdef MUSB_CONFIG_PROC_FS |
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h index 916065ba9e70..3a97c4e2d4f5 100644 --- a/drivers/usb/musb/musb_dma.h +++ b/drivers/usb/musb/musb_dma.h | |||
@@ -169,6 +169,9 @@ struct dma_controller { | |||
169 | dma_addr_t dma_addr, | 169 | dma_addr_t dma_addr, |
170 | u32 length); | 170 | u32 length); |
171 | int (*channel_abort)(struct dma_channel *); | 171 | int (*channel_abort)(struct dma_channel *); |
172 | int (*is_compatible)(struct dma_channel *channel, | ||
173 | u16 maxpacket, | ||
174 | void *buf, u32 length); | ||
172 | }; | 175 | }; |
173 | 176 | ||
174 | /* called after channel_program(), may indicate a fault */ | 177 | /* called after channel_program(), may indicate a fault */ |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ed58c6c8f15c..2fe304611dcf 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -92,11 +92,33 @@ | |||
92 | 92 | ||
93 | /* ----------------------------------------------------------------------- */ | 93 | /* ----------------------------------------------------------------------- */ |
94 | 94 | ||
95 | #define is_buffer_mapped(req) (is_dma_capable() && \ | ||
96 | (req->map_state != UN_MAPPED)) | ||
97 | |||
95 | /* Maps the buffer to dma */ | 98 | /* Maps the buffer to dma */ |
96 | 99 | ||
97 | static inline void map_dma_buffer(struct musb_request *request, | 100 | static inline void map_dma_buffer(struct musb_request *request, |
98 | struct musb *musb) | 101 | struct musb *musb, struct musb_ep *musb_ep) |
99 | { | 102 | { |
103 | int compatible = true; | ||
104 | struct dma_controller *dma = musb->dma_controller; | ||
105 | |||
106 | request->map_state = UN_MAPPED; | ||
107 | |||
108 | if (!is_dma_capable() || !musb_ep->dma) | ||
109 | return; | ||
110 | |||
111 | /* Check if DMA engine can handle this request. | ||
112 | * DMA code must reject the USB request explicitly. | ||
113 | * Default behaviour is to map the request. | ||
114 | */ | ||
115 | if (dma->is_compatible) | ||
116 | compatible = dma->is_compatible(musb_ep->dma, | ||
117 | musb_ep->packet_sz, request->request.buf, | ||
118 | request->request.length); | ||
119 | if (!compatible) | ||
120 | return; | ||
121 | |||
100 | if (request->request.dma == DMA_ADDR_INVALID) { | 122 | if (request->request.dma == DMA_ADDR_INVALID) { |
101 | request->request.dma = dma_map_single( | 123 | request->request.dma = dma_map_single( |
102 | musb->controller, | 124 | musb->controller, |
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
105 | request->tx | 127 | request->tx |
106 | ? DMA_TO_DEVICE | 128 | ? DMA_TO_DEVICE |
107 | : DMA_FROM_DEVICE); | 129 | : DMA_FROM_DEVICE); |
108 | request->mapped = 1; | 130 | request->map_state = MUSB_MAPPED; |
109 | } else { | 131 | } else { |
110 | dma_sync_single_for_device(musb->controller, | 132 | dma_sync_single_for_device(musb->controller, |
111 | request->request.dma, | 133 | request->request.dma, |
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
113 | request->tx | 135 | request->tx |
114 | ? DMA_TO_DEVICE | 136 | ? DMA_TO_DEVICE |
115 | : DMA_FROM_DEVICE); | 137 | : DMA_FROM_DEVICE); |
116 | request->mapped = 0; | 138 | request->map_state = PRE_MAPPED; |
117 | } | 139 | } |
118 | } | 140 | } |
119 | 141 | ||
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
121 | static inline void unmap_dma_buffer(struct musb_request *request, | 143 | static inline void unmap_dma_buffer(struct musb_request *request, |
122 | struct musb *musb) | 144 | struct musb *musb) |
123 | { | 145 | { |
146 | if (!is_buffer_mapped(request)) | ||
147 | return; | ||
148 | |||
124 | if (request->request.dma == DMA_ADDR_INVALID) { | 149 | if (request->request.dma == DMA_ADDR_INVALID) { |
125 | DBG(20, "not unmapping a never mapped buffer\n"); | 150 | DBG(20, "not unmapping a never mapped buffer\n"); |
126 | return; | 151 | return; |
127 | } | 152 | } |
128 | if (request->mapped) { | 153 | if (request->map_state == MUSB_MAPPED) { |
129 | dma_unmap_single(musb->controller, | 154 | dma_unmap_single(musb->controller, |
130 | request->request.dma, | 155 | request->request.dma, |
131 | request->request.length, | 156 | request->request.length, |
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, | |||
133 | ? DMA_TO_DEVICE | 158 | ? DMA_TO_DEVICE |
134 | : DMA_FROM_DEVICE); | 159 | : DMA_FROM_DEVICE); |
135 | request->request.dma = DMA_ADDR_INVALID; | 160 | request->request.dma = DMA_ADDR_INVALID; |
136 | request->mapped = 0; | 161 | } else { /* PRE_MAPPED */ |
137 | } else { | ||
138 | dma_sync_single_for_cpu(musb->controller, | 162 | dma_sync_single_for_cpu(musb->controller, |
139 | request->request.dma, | 163 | request->request.dma, |
140 | request->request.length, | 164 | request->request.length, |
141 | request->tx | 165 | request->tx |
142 | ? DMA_TO_DEVICE | 166 | ? DMA_TO_DEVICE |
143 | : DMA_FROM_DEVICE); | 167 | : DMA_FROM_DEVICE); |
144 | |||
145 | } | 168 | } |
169 | request->map_state = UN_MAPPED; | ||
146 | } | 170 | } |
147 | 171 | ||
148 | /* | 172 | /* |
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock) | |||
172 | 196 | ||
173 | ep->busy = 1; | 197 | ep->busy = 1; |
174 | spin_unlock(&musb->lock); | 198 | spin_unlock(&musb->lock); |
175 | if (is_dma_capable() && ep->dma) | 199 | unmap_dma_buffer(req, musb); |
176 | unmap_dma_buffer(req, musb); | ||
177 | if (request->status == 0) | 200 | if (request->status == 0) |
178 | DBG(5, "%s done request %p, %d/%d\n", | 201 | DBG(5, "%s done request %p, %d/%d\n", |
179 | ep->end_point.name, request, | 202 | ep->end_point.name, request, |
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
335 | csr); | 358 | csr); |
336 | 359 | ||
337 | #ifndef CONFIG_MUSB_PIO_ONLY | 360 | #ifndef CONFIG_MUSB_PIO_ONLY |
338 | if (is_dma_capable() && musb_ep->dma) { | 361 | if (is_buffer_mapped(req)) { |
339 | struct dma_controller *c = musb->dma_controller; | 362 | struct dma_controller *c = musb->dma_controller; |
340 | size_t request_size; | 363 | size_t request_size; |
341 | 364 | ||
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
436 | * Unmap the dma buffer back to cpu if dma channel | 459 | * Unmap the dma buffer back to cpu if dma channel |
437 | * programming fails | 460 | * programming fails |
438 | */ | 461 | */ |
439 | if (is_dma_capable() && musb_ep->dma) | 462 | unmap_dma_buffer(req, musb); |
440 | unmap_dma_buffer(req, musb); | ||
441 | 463 | ||
442 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 464 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
443 | (u8 *) (request->buf + request->actual)); | 465 | (u8 *) (request->buf + request->actual)); |
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
627 | return; | 649 | return; |
628 | } | 650 | } |
629 | 651 | ||
630 | if (is_cppi_enabled() && musb_ep->dma) { | 652 | if (is_cppi_enabled() && is_buffer_mapped(req)) { |
631 | struct dma_controller *c = musb->dma_controller; | 653 | struct dma_controller *c = musb->dma_controller; |
632 | struct dma_channel *channel = musb_ep->dma; | 654 | struct dma_channel *channel = musb_ep->dma; |
633 | 655 | ||
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
658 | len = musb_readw(epio, MUSB_RXCOUNT); | 680 | len = musb_readw(epio, MUSB_RXCOUNT); |
659 | if (request->actual < request->length) { | 681 | if (request->actual < request->length) { |
660 | #ifdef CONFIG_USB_INVENTRA_DMA | 682 | #ifdef CONFIG_USB_INVENTRA_DMA |
661 | if (is_dma_capable() && musb_ep->dma) { | 683 | if (is_buffer_mapped(req)) { |
662 | struct dma_controller *c; | 684 | struct dma_controller *c; |
663 | struct dma_channel *channel; | 685 | struct dma_channel *channel; |
664 | int use_dma = 0; | 686 | int use_dma = 0; |
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
742 | fifo_count = min_t(unsigned, len, fifo_count); | 764 | fifo_count = min_t(unsigned, len, fifo_count); |
743 | 765 | ||
744 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | 766 | #ifdef CONFIG_USB_TUSB_OMAP_DMA |
745 | if (tusb_dma_omap() && musb_ep->dma) { | 767 | if (tusb_dma_omap() && is_buffer_mapped(req)) { |
746 | struct dma_controller *c = musb->dma_controller; | 768 | struct dma_controller *c = musb->dma_controller; |
747 | struct dma_channel *channel = musb_ep->dma; | 769 | struct dma_channel *channel = musb_ep->dma; |
748 | u32 dma_addr = request->dma + request->actual; | 770 | u32 dma_addr = request->dma + request->actual; |
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
762 | * programming fails. This buffer is mapped if the | 784 | * programming fails. This buffer is mapped if the |
763 | * channel allocation is successful | 785 | * channel allocation is successful |
764 | */ | 786 | */ |
765 | if (is_dma_capable() && musb_ep->dma) { | 787 | if (is_buffer_mapped(req)) { |
766 | unmap_dma_buffer(req, musb); | 788 | unmap_dma_buffer(req, musb); |
767 | 789 | ||
768 | /* | 790 | /* |
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
989 | /* Set TXMAXP with the FIFO size of the endpoint | 1011 | /* Set TXMAXP with the FIFO size of the endpoint |
990 | * to disable double buffering mode. | 1012 | * to disable double buffering mode. |
991 | */ | 1013 | */ |
992 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | 1014 | if (musb->double_buffer_not_ok) |
1015 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); | ||
1016 | else | ||
1017 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | ||
1018 | | (musb_ep->hb_mult << 11)); | ||
993 | 1019 | ||
994 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | 1020 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
995 | if (musb_readw(regs, MUSB_TXCSR) | 1021 | if (musb_readw(regs, MUSB_TXCSR) |
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
1025 | /* Set RXMAXP with the FIFO size of the endpoint | 1051 | /* Set RXMAXP with the FIFO size of the endpoint |
1026 | * to disable double buffering mode. | 1052 | * to disable double buffering mode. |
1027 | */ | 1053 | */ |
1028 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | 1054 | if (musb->double_buffer_not_ok) |
1055 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); | ||
1056 | else | ||
1057 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | ||
1058 | | (musb_ep->hb_mult << 11)); | ||
1029 | 1059 | ||
1030 | /* force shared fifo to OUT-only mode */ | 1060 | /* force shared fifo to OUT-only mode */ |
1031 | if (hw_ep->is_shared_fifo) { | 1061 | if (hw_ep->is_shared_fifo) { |
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1214 | request->epnum = musb_ep->current_epnum; | 1244 | request->epnum = musb_ep->current_epnum; |
1215 | request->tx = musb_ep->is_in; | 1245 | request->tx = musb_ep->is_in; |
1216 | 1246 | ||
1217 | if (is_dma_capable() && musb_ep->dma) | 1247 | map_dma_buffer(request, musb, musb_ep); |
1218 | map_dma_buffer(request, musb); | ||
1219 | else | ||
1220 | request->mapped = 0; | ||
1221 | 1248 | ||
1222 | spin_lock_irqsave(&musb->lock, lockflags); | 1249 | spin_lock_irqsave(&musb->lock, lockflags); |
1223 | 1250 | ||
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index dec8dc008191..a55354fbccf5 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h | |||
@@ -35,13 +35,19 @@ | |||
35 | #ifndef __MUSB_GADGET_H | 35 | #ifndef __MUSB_GADGET_H |
36 | #define __MUSB_GADGET_H | 36 | #define __MUSB_GADGET_H |
37 | 37 | ||
38 | enum buffer_map_state { | ||
39 | UN_MAPPED = 0, | ||
40 | PRE_MAPPED, | ||
41 | MUSB_MAPPED | ||
42 | }; | ||
43 | |||
38 | struct musb_request { | 44 | struct musb_request { |
39 | struct usb_request request; | 45 | struct usb_request request; |
40 | struct musb_ep *ep; | 46 | struct musb_ep *ep; |
41 | struct musb *musb; | 47 | struct musb *musb; |
42 | u8 tx; /* endpoint direction */ | 48 | u8 tx; /* endpoint direction */ |
43 | u8 epnum; | 49 | u8 epnum; |
44 | u8 mapped; | 50 | enum buffer_map_state map_state; |
45 | }; | 51 | }; |
46 | 52 | ||
47 | static inline struct musb_request *to_musb_request(struct usb_request *req) | 53 | static inline struct musb_request *to_musb_request(struct usb_request *req) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 4d5bcb4e14d2..0f523d7db57b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
609 | /* Set RXMAXP with the FIFO size of the endpoint | 609 | /* Set RXMAXP with the FIFO size of the endpoint |
610 | * to disable double buffer mode. | 610 | * to disable double buffer mode. |
611 | */ | 611 | */ |
612 | if (musb->hwvers < MUSB_HWVERS_2000) | 612 | if (musb->double_buffer_not_ok) |
613 | musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); | 613 | musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); |
614 | else | 614 | else |
615 | musb_writew(ep->regs, MUSB_RXMAXP, | 615 | musb_writew(ep->regs, MUSB_RXMAXP, |
@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
784 | /* protocol/endpoint/interval/NAKlimit */ | 784 | /* protocol/endpoint/interval/NAKlimit */ |
785 | if (epnum) { | 785 | if (epnum) { |
786 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | 786 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); |
787 | if (can_bulk_split(musb, qh->type)) | 787 | if (musb->double_buffer_not_ok) |
788 | musb_writew(epio, MUSB_TXMAXP, | 788 | musb_writew(epio, MUSB_TXMAXP, |
789 | packet_sz | 789 | hw_ep->max_packet_sz_tx); |
790 | | ((hw_ep->max_packet_sz_tx / | ||
791 | packet_sz) - 1) << 11); | ||
792 | else | 790 | else |
793 | musb_writew(epio, MUSB_TXMAXP, | 791 | musb_writew(epio, MUSB_TXMAXP, |
794 | packet_sz); | 792 | qh->maxpacket | |
793 | ((qh->hb_mult - 1) << 11)); | ||
795 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | 794 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); |
796 | } else { | 795 | } else { |
797 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | 796 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); |
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index f763d62f151c..21056c924c74 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h | |||
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase, | |||
94 | { | 94 | { |
95 | musb_writew(mbase, | 95 | musb_writew(mbase, |
96 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), | 96 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), |
97 | ((u16)((u32) dma_addr & 0xFFFF))); | 97 | dma_addr); |
98 | musb_writew(mbase, | 98 | musb_writew(mbase, |
99 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), | 99 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), |
100 | ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); | 100 | (dma_addr >> 16)); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) | 103 | static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) |
104 | { | 104 | { |
105 | return musb_readl(mbase, | 105 | u32 count = musb_readw(mbase, |
106 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); | 106 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); |
107 | |||
108 | count = count << 16; | ||
109 | |||
110 | count |= musb_readw(mbase, | ||
111 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW)); | ||
112 | |||
113 | return count; | ||
107 | } | 114 | } |
108 | 115 | ||
109 | static inline void musb_write_hsdma_count(void __iomem *mbase, | 116 | static inline void musb_write_hsdma_count(void __iomem *mbase, |
110 | u8 bchannel, u32 len) | 117 | u8 bchannel, u32 len) |
111 | { | 118 | { |
112 | musb_writel(mbase, | 119 | musb_writew(mbase, |
120 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len); | ||
121 | musb_writew(mbase, | ||
113 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), | 122 | MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), |
114 | len); | 123 | (len >> 16)); |
115 | } | 124 | } |
116 | 125 | ||
117 | #endif /* CONFIG_BLACKFIN */ | 126 | #endif /* CONFIG_BLACKFIN */ |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index a3f12333fc41..bc8badd16897 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb) | |||
362 | 362 | ||
363 | static int omap2430_musb_exit(struct musb *musb) | 363 | static int omap2430_musb_exit(struct musb *musb) |
364 | { | 364 | { |
365 | del_timer_sync(&musb_idle_timer); | ||
365 | 366 | ||
366 | omap2430_low_level_exit(musb); | 367 | omap2430_low_level_exit(musb); |
367 | otg_put_transceiver(musb->xceiv); | 368 | otg_put_transceiver(musb->xceiv); |
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index 9fb875d5f09c..9ffc8237fb4b 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig | |||
@@ -103,6 +103,8 @@ config USB_MSM_OTG_72K | |||
103 | required after resetting the hardware and power management. | 103 | required after resetting the hardware and power management. |
104 | This driver is required even for peripheral only or host only | 104 | This driver is required even for peripheral only or host only |
105 | mode configurations. | 105 | mode configurations. |
106 | This driver is not supported on boards like trout which | ||
107 | has an external PHY. | ||
106 | 108 | ||
107 | config AB8500_USB | 109 | config AB8500_USB |
108 | tristate "AB8500 USB Transceiver Driver" | 110 | tristate "AB8500 USB Transceiver Driver" |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4787c0cd063f..f349a3629d00 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -100,6 +100,7 @@ struct ftdi_sio_quirk { | |||
100 | static int ftdi_jtag_probe(struct usb_serial *serial); | 100 | static int ftdi_jtag_probe(struct usb_serial *serial); |
101 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); | 101 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); |
102 | static int ftdi_NDI_device_setup(struct usb_serial *serial); | 102 | static int ftdi_NDI_device_setup(struct usb_serial *serial); |
103 | static int ftdi_stmclite_probe(struct usb_serial *serial); | ||
103 | static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); | 104 | static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); |
104 | static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); | 105 | static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); |
105 | 106 | ||
@@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { | |||
123 | .port_probe = ftdi_HE_TIRA1_setup, | 124 | .port_probe = ftdi_HE_TIRA1_setup, |
124 | }; | 125 | }; |
125 | 126 | ||
127 | static struct ftdi_sio_quirk ftdi_stmclite_quirk = { | ||
128 | .probe = ftdi_stmclite_probe, | ||
129 | }; | ||
130 | |||
126 | /* | 131 | /* |
127 | * The 8U232AM has the same API as the sio except for: | 132 | * The 8U232AM has the same API as the sio except for: |
128 | * - it can support MUCH higher baudrates; up to: | 133 | * - it can support MUCH higher baudrates; up to: |
@@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = { | |||
616 | { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, | 621 | { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, |
617 | { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, | 622 | { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, |
618 | { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, | 623 | { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, |
624 | { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, | ||
619 | { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, | 625 | { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, |
620 | { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, | 626 | { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, |
621 | { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, | 627 | { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, |
@@ -810,6 +816,8 @@ static struct usb_device_id id_table_combined [] = { | |||
810 | { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, | 816 | { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, |
811 | { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), | 817 | { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), |
812 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 818 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
819 | { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), | ||
820 | .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, | ||
813 | { }, /* Optional parameter entry */ | 821 | { }, /* Optional parameter entry */ |
814 | { } /* Terminating entry */ | 822 | { } /* Terminating entry */ |
815 | }; | 823 | }; |
@@ -1709,6 +1717,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial) | |||
1709 | } | 1717 | } |
1710 | 1718 | ||
1711 | /* | 1719 | /* |
1720 | * First and second port on STMCLiteadaptors is reserved for JTAG interface | ||
1721 | * and the forth port for pio | ||
1722 | */ | ||
1723 | static int ftdi_stmclite_probe(struct usb_serial *serial) | ||
1724 | { | ||
1725 | struct usb_device *udev = serial->dev; | ||
1726 | struct usb_interface *interface = serial->interface; | ||
1727 | |||
1728 | dbg("%s", __func__); | ||
1729 | |||
1730 | if (interface == udev->actconfig->interface[2]) | ||
1731 | return 0; | ||
1732 | |||
1733 | dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); | ||
1734 | |||
1735 | return -ENODEV; | ||
1736 | } | ||
1737 | |||
1738 | /* | ||
1712 | * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. | 1739 | * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. |
1713 | * We have to correct it if we want to read from it. | 1740 | * We have to correct it if we want to read from it. |
1714 | */ | 1741 | */ |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index ed160def8584..117e8e6f93c6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -518,6 +518,12 @@ | |||
518 | #define RATOC_PRODUCT_ID_USB60F 0xb020 | 518 | #define RATOC_PRODUCT_ID_USB60F 0xb020 |
519 | 519 | ||
520 | /* | 520 | /* |
521 | * Acton Research Corp. | ||
522 | */ | ||
523 | #define ACTON_VID 0x0647 /* Vendor ID */ | ||
524 | #define ACTON_SPECTRAPRO_PID 0x0100 | ||
525 | |||
526 | /* | ||
521 | * Contec products (http://www.contec.com) | 527 | * Contec products (http://www.contec.com) |
522 | * Submitted by Daniel Sangorrin | 528 | * Submitted by Daniel Sangorrin |
523 | */ | 529 | */ |
@@ -1034,6 +1040,12 @@ | |||
1034 | #define WHT_PID 0x0004 /* Wireless Handheld Terminal */ | 1040 | #define WHT_PID 0x0004 /* Wireless Handheld Terminal */ |
1035 | 1041 | ||
1036 | /* | 1042 | /* |
1043 | * STMicroelectonics | ||
1044 | */ | ||
1045 | #define ST_VID 0x0483 | ||
1046 | #define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ | ||
1047 | |||
1048 | /* | ||
1037 | * Papouch products (http://www.papouch.com/) | 1049 | * Papouch products (http://www.papouch.com/) |
1038 | * Submitted by Folkert van Heusden | 1050 | * Submitted by Folkert van Heusden |
1039 | */ | 1051 | */ |
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index cd769ef24f8a..3b246d93cf22 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c | |||
@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial) | |||
2889 | 2889 | ||
2890 | dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); | 2890 | dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); |
2891 | 2891 | ||
2892 | edge_serial->product_info.FirmwareMajorVersion = fw->data[0]; | 2892 | edge_serial->product_info.FirmwareMajorVersion = rec->data[0]; |
2893 | edge_serial->product_info.FirmwareMinorVersion = fw->data[1]; | 2893 | edge_serial->product_info.FirmwareMinorVersion = rec->data[1]; |
2894 | edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); | 2894 | edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); |
2895 | 2895 | ||
2896 | for (rec = ihex_next_binrec(rec); rec; | 2896 | for (rec = ihex_next_binrec(rec); rec; |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 7481ff8a49e4..0457813eebee 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = { | |||
301 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 301 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ |
302 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 302 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
303 | }, | 303 | }, |
304 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ | ||
305 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | ||
306 | }, | ||
304 | { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ | 307 | { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ |
305 | 308 | ||
306 | { } | 309 | { } |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index b2902f307b47..a910004f4079 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -369,9 +369,9 @@ failed_1port: | |||
369 | 369 | ||
370 | static void __exit ti_exit(void) | 370 | static void __exit ti_exit(void) |
371 | { | 371 | { |
372 | usb_deregister(&ti_usb_driver); | ||
372 | usb_serial_deregister(&ti_1port_device); | 373 | usb_serial_deregister(&ti_1port_device); |
373 | usb_serial_deregister(&ti_2port_device); | 374 | usb_serial_deregister(&ti_2port_device); |
374 | usb_deregister(&ti_usb_driver); | ||
375 | } | 375 | } |
376 | 376 | ||
377 | 377 | ||
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index b004b2a485c3..9c014e2ecd68 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb) | |||
295 | __func__, status, endpoint); | 295 | __func__, status, endpoint); |
296 | } else { | 296 | } else { |
297 | tty = tty_port_tty_get(&port->port); | 297 | tty = tty_port_tty_get(&port->port); |
298 | if (urb->actual_length) { | 298 | if (tty) { |
299 | tty_insert_flip_string(tty, data, urb->actual_length); | 299 | if (urb->actual_length) { |
300 | tty_flip_buffer_push(tty); | 300 | tty_insert_flip_string(tty, data, |
301 | } else | 301 | urb->actual_length); |
302 | dbg("%s: empty read urb received", __func__); | 302 | tty_flip_buffer_push(tty); |
303 | tty_kref_put(tty); | 303 | } else |
304 | dbg("%s: empty read urb received", __func__); | ||
305 | tty_kref_put(tty); | ||
306 | } | ||
304 | 307 | ||
305 | /* Resubmit urb so we continue receiving */ | 308 | /* Resubmit urb so we continue receiving */ |
306 | if (status != -ESHUTDOWN) { | 309 | if (status != -ESHUTDOWN) { |
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 15a5d89b7f39..1c11959a7d58 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <linux/usb.h> | 28 | #include <linux/usb.h> |
29 | #include <linux/usb/serial.h> | 29 | #include <linux/usb/serial.h> |
30 | #include <linux/usb/cdc.h> | ||
30 | #include "visor.h" | 31 | #include "visor.h" |
31 | 32 | ||
32 | /* | 33 | /* |
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial, | |||
479 | 480 | ||
480 | dbg("%s", __func__); | 481 | dbg("%s", __func__); |
481 | 482 | ||
483 | /* | ||
484 | * some Samsung Android phones in modem mode have the same ID | ||
485 | * as SPH-I500, but they are ACM devices, so dont bind to them | ||
486 | */ | ||
487 | if (id->idVendor == SAMSUNG_VENDOR_ID && | ||
488 | id->idProduct == SAMSUNG_SPH_I500_ID && | ||
489 | serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && | ||
490 | serial->dev->descriptor.bDeviceSubClass == | ||
491 | USB_CDC_SUBCLASS_ACM) | ||
492 | return -ENODEV; | ||
493 | |||
482 | if (serial->dev->actconfig->desc.bConfigurationValue != 1) { | 494 | if (serial->dev->actconfig->desc.bConfigurationValue != 1) { |
483 | dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", | 495 | dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", |
484 | serial->dev->actconfig->desc.bConfigurationValue); | 496 | serial->dev->actconfig->desc.bConfigurationValue); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 24bd5d7c3deb..c1602b8c5594 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1397,6 +1397,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, | |||
1397 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1397 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1398 | US_FL_IGNORE_RESIDUE ), | 1398 | US_FL_IGNORE_RESIDUE ), |
1399 | 1399 | ||
1400 | /* Submitted by Nick Holloway */ | ||
1401 | UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, | ||
1402 | "VTech", | ||
1403 | "Kidizoom", | ||
1404 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1405 | US_FL_FIX_CAPACITY ), | ||
1406 | |||
1400 | /* Reported by Michael Stattmann <michael@stattmann.com> */ | 1407 | /* Reported by Michael Stattmann <michael@stattmann.com> */ |
1401 | UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, | 1408 | UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, |
1402 | "Sony Ericsson", | 1409 | "Sony Ericsson", |
@@ -1890,6 +1897,13 @@ UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000, | |||
1890 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1897 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1891 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), | 1898 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), |
1892 | 1899 | ||
1900 | /* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */ | ||
1901 | UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000, | ||
1902 | "Coby Electronics", | ||
1903 | "MP3 Player", | ||
1904 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1905 | US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), | ||
1906 | |||
1893 | UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, | 1907 | UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, |
1894 | "ST", | 1908 | "ST", |
1895 | "2A", | 1909 | "2A", |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 3a7e9ff8a746..38e96ab90945 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
@@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev) | |||
593 | 593 | ||
594 | /* get interface & functional clock objects */ | 594 | /* get interface & functional clock objects */ |
595 | hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); | 595 | hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); |
596 | hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); | 596 | if (IS_ERR(hdq_data->hdq_ick)) { |
597 | dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n"); | ||
598 | ret = PTR_ERR(hdq_data->hdq_ick); | ||
599 | goto err_ick; | ||
600 | } | ||
597 | 601 | ||
598 | if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) { | 602 | hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); |
599 | dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n"); | 603 | if (IS_ERR(hdq_data->hdq_fck)) { |
600 | if (IS_ERR(hdq_data->hdq_ick)) { | 604 | dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n"); |
601 | ret = PTR_ERR(hdq_data->hdq_ick); | 605 | ret = PTR_ERR(hdq_data->hdq_fck); |
602 | goto err_clk; | 606 | goto err_fck; |
603 | } | ||
604 | if (IS_ERR(hdq_data->hdq_fck)) { | ||
605 | ret = PTR_ERR(hdq_data->hdq_fck); | ||
606 | clk_put(hdq_data->hdq_ick); | ||
607 | goto err_clk; | ||
608 | } | ||
609 | } | 607 | } |
610 | 608 | ||
611 | hdq_data->hdq_usecount = 0; | 609 | hdq_data->hdq_usecount = 0; |
@@ -665,10 +663,12 @@ err_fnclk: | |||
665 | clk_disable(hdq_data->hdq_ick); | 663 | clk_disable(hdq_data->hdq_ick); |
666 | 664 | ||
667 | err_intfclk: | 665 | err_intfclk: |
668 | clk_put(hdq_data->hdq_ick); | ||
669 | clk_put(hdq_data->hdq_fck); | 666 | clk_put(hdq_data->hdq_fck); |
670 | 667 | ||
671 | err_clk: | 668 | err_fck: |
669 | clk_put(hdq_data->hdq_ick); | ||
670 | |||
671 | err_ick: | ||
672 | iounmap(hdq_data->hdq_base); | 672 | iounmap(hdq_data->hdq_base); |
673 | 673 | ||
674 | err_ioremap: | 674 | err_ioremap: |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 2e2400e7322e..31649b7b672f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -862,12 +862,12 @@ config SBC_EPX_C3_WATCHDOG | |||
862 | 862 | ||
863 | # M68K Architecture | 863 | # M68K Architecture |
864 | 864 | ||
865 | config M548x_WATCHDOG | 865 | config M54xx_WATCHDOG |
866 | tristate "MCF548x watchdog support" | 866 | tristate "MCF54xx watchdog support" |
867 | depends on M548x | 867 | depends on M548x |
868 | help | 868 | help |
869 | To compile this driver as a module, choose M here: the | 869 | To compile this driver as a module, choose M here: the |
870 | module will be called m548x_wdt. | 870 | module will be called m54xx_wdt. |
871 | 871 | ||
872 | # MIPS Architecture | 872 | # MIPS Architecture |
873 | 873 | ||
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index dd776651917c..20e44c4782b3 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -106,7 +106,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o | |||
106 | # M32R Architecture | 106 | # M32R Architecture |
107 | 107 | ||
108 | # M68K Architecture | 108 | # M68K Architecture |
109 | obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o | 109 | obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o |
110 | 110 | ||
111 | # MIPS Architecture | 111 | # MIPS Architecture |
112 | obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o | 112 | obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o |
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m54xx_wdt.c index cabbcfe1c847..4d43286074aa 100644 --- a/drivers/watchdog/m548x_wdt.c +++ b/drivers/watchdog/m54xx_wdt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/watchdog/m548x_wdt.c | 2 | * drivers/watchdog/m54xx_wdt.c |
3 | * | 3 | * |
4 | * Watchdog driver for ColdFire MCF548x processors | 4 | * Watchdog driver for ColdFire MCF547x & MCF548x processors |
5 | * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be> | 5 | * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be> |
6 | * | 6 | * |
7 | * Adapted from the IXP4xx watchdog driver, which carries these notices: | 7 | * Adapted from the IXP4xx watchdog driver, which carries these notices: |
@@ -29,8 +29,8 @@ | |||
29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
30 | 30 | ||
31 | #include <asm/coldfire.h> | 31 | #include <asm/coldfire.h> |
32 | #include <asm/m548xsim.h> | 32 | #include <asm/m54xxsim.h> |
33 | #include <asm/m548xgpt.h> | 33 | #include <asm/m54xxgpt.h> |
34 | 34 | ||
35 | static int nowayout = WATCHDOG_NOWAYOUT; | 35 | static int nowayout = WATCHDOG_NOWAYOUT; |
36 | static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ | 36 | static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ |
@@ -76,7 +76,7 @@ static void wdt_keepalive(void) | |||
76 | __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); | 76 | __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); |
77 | } | 77 | } |
78 | 78 | ||
79 | static int m548x_wdt_open(struct inode *inode, struct file *file) | 79 | static int m54xx_wdt_open(struct inode *inode, struct file *file) |
80 | { | 80 | { |
81 | if (test_and_set_bit(WDT_IN_USE, &wdt_status)) | 81 | if (test_and_set_bit(WDT_IN_USE, &wdt_status)) |
82 | return -EBUSY; | 82 | return -EBUSY; |
@@ -86,7 +86,7 @@ static int m548x_wdt_open(struct inode *inode, struct file *file) | |||
86 | return nonseekable_open(inode, file); | 86 | return nonseekable_open(inode, file); |
87 | } | 87 | } |
88 | 88 | ||
89 | static ssize_t m548x_wdt_write(struct file *file, const char *data, | 89 | static ssize_t m54xx_wdt_write(struct file *file, const char *data, |
90 | size_t len, loff_t *ppos) | 90 | size_t len, loff_t *ppos) |
91 | { | 91 | { |
92 | if (len) { | 92 | if (len) { |
@@ -112,10 +112,10 @@ static ssize_t m548x_wdt_write(struct file *file, const char *data, | |||
112 | static const struct watchdog_info ident = { | 112 | static const struct watchdog_info ident = { |
113 | .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | | 113 | .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | |
114 | WDIOF_KEEPALIVEPING, | 114 | WDIOF_KEEPALIVEPING, |
115 | .identity = "Coldfire M548x Watchdog", | 115 | .identity = "Coldfire M54xx Watchdog", |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, | 118 | static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd, |
119 | unsigned long arg) | 119 | unsigned long arg) |
120 | { | 120 | { |
121 | int ret = -ENOTTY; | 121 | int ret = -ENOTTY; |
@@ -161,7 +161,7 @@ static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int m548x_wdt_release(struct inode *inode, struct file *file) | 164 | static int m54xx_wdt_release(struct inode *inode, struct file *file) |
165 | { | 165 | { |
166 | if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) | 166 | if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) |
167 | wdt_disable(); | 167 | wdt_disable(); |
@@ -177,45 +177,45 @@ static int m548x_wdt_release(struct inode *inode, struct file *file) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | 179 | ||
180 | static const struct file_operations m548x_wdt_fops = { | 180 | static const struct file_operations m54xx_wdt_fops = { |
181 | .owner = THIS_MODULE, | 181 | .owner = THIS_MODULE, |
182 | .llseek = no_llseek, | 182 | .llseek = no_llseek, |
183 | .write = m548x_wdt_write, | 183 | .write = m54xx_wdt_write, |
184 | .unlocked_ioctl = m548x_wdt_ioctl, | 184 | .unlocked_ioctl = m54xx_wdt_ioctl, |
185 | .open = m548x_wdt_open, | 185 | .open = m54xx_wdt_open, |
186 | .release = m548x_wdt_release, | 186 | .release = m54xx_wdt_release, |
187 | }; | 187 | }; |
188 | 188 | ||
189 | static struct miscdevice m548x_wdt_miscdev = { | 189 | static struct miscdevice m54xx_wdt_miscdev = { |
190 | .minor = WATCHDOG_MINOR, | 190 | .minor = WATCHDOG_MINOR, |
191 | .name = "watchdog", | 191 | .name = "watchdog", |
192 | .fops = &m548x_wdt_fops, | 192 | .fops = &m54xx_wdt_fops, |
193 | }; | 193 | }; |
194 | 194 | ||
195 | static int __init m548x_wdt_init(void) | 195 | static int __init m54xx_wdt_init(void) |
196 | { | 196 | { |
197 | if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, | 197 | if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, |
198 | "Coldfire M548x Watchdog")) { | 198 | "Coldfire M54xx Watchdog")) { |
199 | printk(KERN_WARNING | 199 | printk(KERN_WARNING |
200 | "Coldfire M548x Watchdog : I/O region busy\n"); | 200 | "Coldfire M54xx Watchdog : I/O region busy\n"); |
201 | return -EBUSY; | 201 | return -EBUSY; |
202 | } | 202 | } |
203 | printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); | 203 | printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); |
204 | 204 | ||
205 | return misc_register(&m548x_wdt_miscdev); | 205 | return misc_register(&m54xx_wdt_miscdev); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void __exit m548x_wdt_exit(void) | 208 | static void __exit m54xx_wdt_exit(void) |
209 | { | 209 | { |
210 | misc_deregister(&m548x_wdt_miscdev); | 210 | misc_deregister(&m54xx_wdt_miscdev); |
211 | release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); | 211 | release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); |
212 | } | 212 | } |
213 | 213 | ||
214 | module_init(m548x_wdt_init); | 214 | module_init(m54xx_wdt_init); |
215 | module_exit(m548x_wdt_exit); | 215 | module_exit(m54xx_wdt_exit); |
216 | 216 | ||
217 | MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); | 217 | MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); |
218 | MODULE_DESCRIPTION("Coldfire M548x Watchdog"); | 218 | MODULE_DESCRIPTION("Coldfire M54xx Watchdog"); |
219 | 219 | ||
220 | module_param(heartbeat, int, 0); | 220 | module_param(heartbeat, int, 0); |
221 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); | 221 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index db8c4c4ac880..24177272bcb8 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; | |||
37 | #ifdef CONFIG_PM_SLEEP | 37 | #ifdef CONFIG_PM_SLEEP |
38 | static int xen_hvm_suspend(void *data) | 38 | static int xen_hvm_suspend(void *data) |
39 | { | 39 | { |
40 | int err; | ||
40 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; | 41 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; |
41 | int *cancelled = data; | 42 | int *cancelled = data; |
42 | 43 | ||
43 | BUG_ON(!irqs_disabled()); | 44 | BUG_ON(!irqs_disabled()); |
44 | 45 | ||
46 | err = sysdev_suspend(PMSG_SUSPEND); | ||
47 | if (err) { | ||
48 | printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n", | ||
49 | err); | ||
50 | return err; | ||
51 | } | ||
52 | |||
45 | *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); | 53 | *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); |
46 | 54 | ||
47 | xen_hvm_post_suspend(*cancelled); | 55 | xen_hvm_post_suspend(*cancelled); |
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data) | |||
53 | xen_timer_resume(); | 61 | xen_timer_resume(); |
54 | } | 62 | } |
55 | 63 | ||
64 | sysdev_resume(); | ||
65 | |||
56 | return 0; | 66 | return 0; |
57 | } | 67 | } |
58 | 68 | ||
diff --git a/fs/afs/write.c b/fs/afs/write.c index 15690bb1d3b5..789b3afb3423 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, | |||
140 | candidate->first = candidate->last = index; | 140 | candidate->first = candidate->last = index; |
141 | candidate->offset_first = from; | 141 | candidate->offset_first = from; |
142 | candidate->to_last = to; | 142 | candidate->to_last = to; |
143 | INIT_LIST_HEAD(&candidate->link); | ||
143 | candidate->usage = 1; | 144 | candidate->usage = 1; |
144 | candidate->state = AFS_WBACK_PENDING; | 145 | candidate->state = AFS_WBACK_PENDING; |
145 | init_waitqueue_head(&candidate->waitq); | 146 | init_waitqueue_head(&candidate->waitq); |
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx) | |||
239 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 239 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
240 | } | 240 | } |
241 | 241 | ||
242 | #define get_ioctx(kioctx) do { \ | 242 | static inline void get_ioctx(struct kioctx *kioctx) |
243 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | 243 | { |
244 | atomic_inc(&(kioctx)->users); \ | 244 | BUG_ON(atomic_read(&kioctx->users) <= 0); |
245 | } while (0) | 245 | atomic_inc(&kioctx->users); |
246 | #define put_ioctx(kioctx) do { \ | 246 | } |
247 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | 247 | |
248 | if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ | 248 | static inline int try_get_ioctx(struct kioctx *kioctx) |
249 | __put_ioctx(kioctx); \ | 249 | { |
250 | } while (0) | 250 | return atomic_inc_not_zero(&kioctx->users); |
251 | } | ||
252 | |||
253 | static inline void put_ioctx(struct kioctx *kioctx) | ||
254 | { | ||
255 | BUG_ON(atomic_read(&kioctx->users) <= 0); | ||
256 | if (unlikely(atomic_dec_and_test(&kioctx->users))) | ||
257 | __put_ioctx(kioctx); | ||
258 | } | ||
251 | 259 | ||
252 | /* ioctx_alloc | 260 | /* ioctx_alloc |
253 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | 261 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
601 | rcu_read_lock(); | 609 | rcu_read_lock(); |
602 | 610 | ||
603 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { | 611 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { |
604 | if (ctx->user_id == ctx_id && !ctx->dead) { | 612 | /* |
605 | get_ioctx(ctx); | 613 | * RCU protects us against accessing freed memory but |
614 | * we have to be careful not to get a reference when the | ||
615 | * reference count already dropped to 0 (ctx->dead test | ||
616 | * is unreliable because of races). | ||
617 | */ | ||
618 | if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ | ||
606 | ret = ctx; | 619 | ret = ctx; |
607 | break; | 620 | break; |
608 | } | 621 | } |
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1629 | goto out_put_req; | 1642 | goto out_put_req; |
1630 | 1643 | ||
1631 | spin_lock_irq(&ctx->ctx_lock); | 1644 | spin_lock_irq(&ctx->ctx_lock); |
1645 | /* | ||
1646 | * We could have raced with io_destroy() and are currently holding a | ||
1647 | * reference to ctx which should be destroyed. We cannot submit IO | ||
1648 | * since ctx gets freed as soon as io_submit() puts its reference. The | ||
1649 | * check here is reliable: io_destroy() sets ctx->dead before waiting | ||
1650 | * for outstanding IO and the barrier between these two is realized by | ||
1651 | * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we | ||
1652 | * increment ctx->reqs_active before checking for ctx->dead and the | ||
1653 | * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we | ||
1654 | * don't see ctx->dead set here, io_destroy() waits for our IO to | ||
1655 | * finish. | ||
1656 | */ | ||
1657 | if (ctx->dead) { | ||
1658 | spin_unlock_irq(&ctx->ctx_lock); | ||
1659 | ret = -EINVAL; | ||
1660 | goto out_put_req; | ||
1661 | } | ||
1632 | aio_run_iocb(req); | 1662 | aio_run_iocb(req); |
1633 | if (!list_empty(&ctx->run_list)) { | 1663 | if (!list_empty(&ctx->run_list)) { |
1634 | /* drain the run list */ | 1664 | /* drain the run list */ |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9c..889287019599 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) | |||
873 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); | 873 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); |
874 | if (ret) | 874 | if (ret) |
875 | goto out_del; | 875 | goto out_del; |
876 | /* | ||
877 | * bdev could be deleted beneath us which would implicitly destroy | ||
878 | * the holder directory. Hold on to it. | ||
879 | */ | ||
880 | kobject_get(bdev->bd_part->holder_dir); | ||
876 | 881 | ||
877 | list_add(&holder->list, &bdev->bd_holder_disks); | 882 | list_add(&holder->list, &bdev->bd_holder_disks); |
878 | goto out_unlock; | 883 | goto out_unlock; |
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) | |||
909 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); | 914 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
910 | del_symlink(bdev->bd_part->holder_dir, | 915 | del_symlink(bdev->bd_part->holder_dir, |
911 | &disk_to_dev(disk)->kobj); | 916 | &disk_to_dev(disk)->kobj); |
917 | kobject_put(bdev->bd_part->holder_dir); | ||
912 | list_del_init(&holder->list); | 918 | list_del_init(&holder->list); |
913 | kfree(holder); | 919 | kfree(holder); |
914 | } | 920 | } |
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); | |||
922 | * flush_disk - invalidates all buffer-cache entries on a disk | 928 | * flush_disk - invalidates all buffer-cache entries on a disk |
923 | * | 929 | * |
924 | * @bdev: struct block device to be flushed | 930 | * @bdev: struct block device to be flushed |
931 | * @kill_dirty: flag to guide handling of dirty inodes | ||
925 | * | 932 | * |
926 | * Invalidates all buffer-cache entries on a disk. It should be called | 933 | * Invalidates all buffer-cache entries on a disk. It should be called |
927 | * when a disk has been changed -- either by a media change or online | 934 | * when a disk has been changed -- either by a media change or online |
928 | * resize. | 935 | * resize. |
929 | */ | 936 | */ |
930 | static void flush_disk(struct block_device *bdev) | 937 | static void flush_disk(struct block_device *bdev, bool kill_dirty) |
931 | { | 938 | { |
932 | if (__invalidate_device(bdev)) { | 939 | if (__invalidate_device(bdev, kill_dirty)) { |
933 | char name[BDEVNAME_SIZE] = ""; | 940 | char name[BDEVNAME_SIZE] = ""; |
934 | 941 | ||
935 | if (bdev->bd_disk) | 942 | if (bdev->bd_disk) |
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) | |||
966 | "%s: detected capacity change from %lld to %lld\n", | 973 | "%s: detected capacity change from %lld to %lld\n", |
967 | name, bdev_size, disk_size); | 974 | name, bdev_size, disk_size); |
968 | i_size_write(bdev->bd_inode, disk_size); | 975 | i_size_write(bdev->bd_inode, disk_size); |
969 | flush_disk(bdev); | 976 | flush_disk(bdev, false); |
970 | } | 977 | } |
971 | } | 978 | } |
972 | EXPORT_SYMBOL(check_disk_size_change); | 979 | EXPORT_SYMBOL(check_disk_size_change); |
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev) | |||
1019 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) | 1026 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) |
1020 | return 0; | 1027 | return 0; |
1021 | 1028 | ||
1022 | flush_disk(bdev); | 1029 | flush_disk(bdev, true); |
1023 | if (bdops->revalidate_disk) | 1030 | if (bdops->revalidate_disk) |
1024 | bdops->revalidate_disk(bdev->bd_disk); | 1031 | bdops->revalidate_disk(bdev->bd_disk); |
1025 | return 1; | 1032 | return 1; |
@@ -1215,12 +1222,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) | |||
1215 | 1222 | ||
1216 | res = __blkdev_get(bdev, mode, 0); | 1223 | res = __blkdev_get(bdev, mode, 0); |
1217 | 1224 | ||
1218 | /* __blkdev_get() may alter read only status, check it afterwards */ | ||
1219 | if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1220 | __blkdev_put(bdev, mode, 0); | ||
1221 | res = -EACCES; | ||
1222 | } | ||
1223 | |||
1224 | if (whole) { | 1225 | if (whole) { |
1225 | /* finish claiming */ | 1226 | /* finish claiming */ |
1226 | mutex_lock(&bdev->bd_mutex); | 1227 | mutex_lock(&bdev->bd_mutex); |
@@ -1298,6 +1299,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, | |||
1298 | if (err) | 1299 | if (err) |
1299 | return ERR_PTR(err); | 1300 | return ERR_PTR(err); |
1300 | 1301 | ||
1302 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1303 | blkdev_put(bdev, mode); | ||
1304 | return ERR_PTR(-EACCES); | ||
1305 | } | ||
1306 | |||
1301 | return bdev; | 1307 | return bdev; |
1302 | } | 1308 | } |
1303 | EXPORT_SYMBOL(blkdev_get_by_path); | 1309 | EXPORT_SYMBOL(blkdev_get_by_path); |
@@ -1601,7 +1607,7 @@ fail: | |||
1601 | } | 1607 | } |
1602 | EXPORT_SYMBOL(lookup_bdev); | 1608 | EXPORT_SYMBOL(lookup_bdev); |
1603 | 1609 | ||
1604 | int __invalidate_device(struct block_device *bdev) | 1610 | int __invalidate_device(struct block_device *bdev, bool kill_dirty) |
1605 | { | 1611 | { |
1606 | struct super_block *sb = get_super(bdev); | 1612 | struct super_block *sb = get_super(bdev); |
1607 | int res = 0; | 1613 | int res = 0; |
@@ -1614,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev) | |||
1614 | * hold). | 1620 | * hold). |
1615 | */ | 1621 | */ |
1616 | shrink_dcache_sb(sb); | 1622 | shrink_dcache_sb(sb); |
1617 | res = invalidate_inodes(sb); | 1623 | res = invalidate_inodes(sb, kill_dirty); |
1618 | drop_super(sb); | 1624 | drop_super(sb); |
1619 | } | 1625 | } |
1620 | invalidate_bdev(bdev); | 1626 | invalidate_bdev(bdev); |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 15b5ca2a2606..9c949348510b 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) | |||
37 | char *value = NULL; | 37 | char *value = NULL; |
38 | struct posix_acl *acl; | 38 | struct posix_acl *acl; |
39 | 39 | ||
40 | if (!IS_POSIXACL(inode)) | ||
41 | return NULL; | ||
42 | |||
40 | acl = get_cached_acl(inode, type); | 43 | acl = get_cached_acl(inode, type); |
41 | if (acl != ACL_NOT_CACHED) | 44 | if (acl != ACL_NOT_CACHED) |
42 | return acl; | 45 | return acl; |
@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, | |||
84 | struct posix_acl *acl; | 87 | struct posix_acl *acl; |
85 | int ret = 0; | 88 | int ret = 0; |
86 | 89 | ||
90 | if (!IS_POSIXACL(dentry->d_inode)) | ||
91 | return -EOPNOTSUPP; | ||
92 | |||
87 | acl = btrfs_get_acl(dentry->d_inode, type); | 93 | acl = btrfs_get_acl(dentry->d_inode, type); |
88 | 94 | ||
89 | if (IS_ERR(acl)) | 95 | if (IS_ERR(acl)) |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f745287fbf2e..4d2110eafe29 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
562 | u64 em_len; | 562 | u64 em_len; |
563 | u64 em_start; | 563 | u64 em_start; |
564 | struct extent_map *em; | 564 | struct extent_map *em; |
565 | int ret; | 565 | int ret = -ENOMEM; |
566 | u32 *sums; | 566 | u32 *sums; |
567 | 567 | ||
568 | tree = &BTRFS_I(inode)->io_tree; | 568 | tree = &BTRFS_I(inode)->io_tree; |
@@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
577 | 577 | ||
578 | compressed_len = em->block_len; | 578 | compressed_len = em->block_len; |
579 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 579 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
580 | if (!cb) | ||
581 | goto out; | ||
582 | |||
580 | atomic_set(&cb->pending_bios, 0); | 583 | atomic_set(&cb->pending_bios, 0); |
581 | cb->errors = 0; | 584 | cb->errors = 0; |
582 | cb->inode = inode; | 585 | cb->inode = inode; |
@@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
597 | 600 | ||
598 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / | 601 | nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / |
599 | PAGE_CACHE_SIZE; | 602 | PAGE_CACHE_SIZE; |
600 | cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, | 603 | cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, |
601 | GFP_NOFS); | 604 | GFP_NOFS); |
605 | if (!cb->compressed_pages) | ||
606 | goto fail1; | ||
607 | |||
602 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 608 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
603 | 609 | ||
604 | for (page_index = 0; page_index < nr_pages; page_index++) { | 610 | for (page_index = 0; page_index < nr_pages; page_index++) { |
605 | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | | 611 | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | |
606 | __GFP_HIGHMEM); | 612 | __GFP_HIGHMEM); |
613 | if (!cb->compressed_pages[page_index]) | ||
614 | goto fail2; | ||
607 | } | 615 | } |
608 | cb->nr_pages = nr_pages; | 616 | cb->nr_pages = nr_pages; |
609 | 617 | ||
@@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
614 | cb->len = uncompressed_len; | 622 | cb->len = uncompressed_len; |
615 | 623 | ||
616 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); | 624 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); |
625 | if (!comp_bio) | ||
626 | goto fail2; | ||
617 | comp_bio->bi_private = cb; | 627 | comp_bio->bi_private = cb; |
618 | comp_bio->bi_end_io = end_compressed_bio_read; | 628 | comp_bio->bi_end_io = end_compressed_bio_read; |
619 | atomic_inc(&cb->pending_bios); | 629 | atomic_inc(&cb->pending_bios); |
@@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
681 | 691 | ||
682 | bio_put(comp_bio); | 692 | bio_put(comp_bio); |
683 | return 0; | 693 | return 0; |
694 | |||
695 | fail2: | ||
696 | for (page_index = 0; page_index < nr_pages; page_index++) | ||
697 | free_page((unsigned long)cb->compressed_pages[page_index]); | ||
698 | |||
699 | kfree(cb->compressed_pages); | ||
700 | fail1: | ||
701 | kfree(cb); | ||
702 | out: | ||
703 | free_extent_map(em); | ||
704 | return ret; | ||
684 | } | 705 | } |
685 | 706 | ||
686 | static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; | 707 | static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; |
@@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | |||
900 | return ret; | 921 | return ret; |
901 | } | 922 | } |
902 | 923 | ||
903 | void __exit btrfs_exit_compress(void) | 924 | void btrfs_exit_compress(void) |
904 | { | 925 | { |
905 | free_workspaces(); | 926 | free_workspaces(); |
906 | } | 927 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c98b3af6052..6f820fa23df4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1254,6 +1254,7 @@ struct btrfs_root { | |||
1254 | #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) | 1254 | #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) |
1255 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) | 1255 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) |
1256 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) | 1256 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) |
1257 | #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) | ||
1257 | 1258 | ||
1258 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) | 1259 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
1259 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | 1260 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) |
@@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, | |||
2218 | u64 start, u64 end); | 2219 | u64 start, u64 end); |
2219 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | 2220 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
2220 | u64 num_bytes); | 2221 | u64 num_bytes); |
2222 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | ||
2223 | struct btrfs_root *root, u64 type); | ||
2221 | 2224 | ||
2222 | /* ctree.c */ | 2225 | /* ctree.c */ |
2223 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 2226 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b531c36455d8..e1aa8d607bc7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) | |||
359 | 359 | ||
360 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 360 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
361 | 361 | ||
362 | if (page->private == EXTENT_PAGE_PRIVATE) | 362 | if (page->private == EXTENT_PAGE_PRIVATE) { |
363 | WARN_ON(1); | ||
363 | goto out; | 364 | goto out; |
364 | if (!page->private) | 365 | } |
366 | if (!page->private) { | ||
367 | WARN_ON(1); | ||
365 | goto out; | 368 | goto out; |
369 | } | ||
366 | len = page->private >> 2; | 370 | len = page->private >> 2; |
367 | WARN_ON(len == 0); | 371 | WARN_ON(len == 0); |
368 | 372 | ||
@@ -1550,6 +1554,7 @@ static int transaction_kthread(void *arg) | |||
1550 | spin_unlock(&root->fs_info->new_trans_lock); | 1554 | spin_unlock(&root->fs_info->new_trans_lock); |
1551 | 1555 | ||
1552 | trans = btrfs_join_transaction(root, 1); | 1556 | trans = btrfs_join_transaction(root, 1); |
1557 | BUG_ON(IS_ERR(trans)); | ||
1553 | if (transid == trans->transid) { | 1558 | if (transid == trans->transid) { |
1554 | ret = btrfs_commit_transaction(trans, root); | 1559 | ret = btrfs_commit_transaction(trans, root); |
1555 | BUG_ON(ret); | 1560 | BUG_ON(ret); |
@@ -2453,10 +2458,14 @@ int btrfs_commit_super(struct btrfs_root *root) | |||
2453 | up_write(&root->fs_info->cleanup_work_sem); | 2458 | up_write(&root->fs_info->cleanup_work_sem); |
2454 | 2459 | ||
2455 | trans = btrfs_join_transaction(root, 1); | 2460 | trans = btrfs_join_transaction(root, 1); |
2461 | if (IS_ERR(trans)) | ||
2462 | return PTR_ERR(trans); | ||
2456 | ret = btrfs_commit_transaction(trans, root); | 2463 | ret = btrfs_commit_transaction(trans, root); |
2457 | BUG_ON(ret); | 2464 | BUG_ON(ret); |
2458 | /* run commit again to drop the original snapshot */ | 2465 | /* run commit again to drop the original snapshot */ |
2459 | trans = btrfs_join_transaction(root, 1); | 2466 | trans = btrfs_join_transaction(root, 1); |
2467 | if (IS_ERR(trans)) | ||
2468 | return PTR_ERR(trans); | ||
2460 | btrfs_commit_transaction(trans, root); | 2469 | btrfs_commit_transaction(trans, root); |
2461 | ret = btrfs_write_and_wait_transaction(NULL, root); | 2470 | ret = btrfs_write_and_wait_transaction(NULL, root); |
2462 | BUG_ON(ret); | 2471 | BUG_ON(ret); |
@@ -2554,6 +2563,8 @@ int close_ctree(struct btrfs_root *root) | |||
2554 | kfree(fs_info->chunk_root); | 2563 | kfree(fs_info->chunk_root); |
2555 | kfree(fs_info->dev_root); | 2564 | kfree(fs_info->dev_root); |
2556 | kfree(fs_info->csum_root); | 2565 | kfree(fs_info->csum_root); |
2566 | kfree(fs_info); | ||
2567 | |||
2557 | return 0; | 2568 | return 0; |
2558 | } | 2569 | } |
2559 | 2570 | ||
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 9786963b07e5..ff27d7a477b2 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c | |||
@@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) | |||
171 | int ret; | 171 | int ret; |
172 | 172 | ||
173 | path = btrfs_alloc_path(); | 173 | path = btrfs_alloc_path(); |
174 | if (!path) | ||
175 | return ERR_PTR(-ENOMEM); | ||
174 | 176 | ||
175 | if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { | 177 | if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { |
176 | key.objectid = root->root_key.objectid; | 178 | key.objectid = root->root_key.objectid; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b55269340cec..588ff9849873 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -320,11 +320,6 @@ static int caching_kthread(void *data) | |||
320 | if (!path) | 320 | if (!path) |
321 | return -ENOMEM; | 321 | return -ENOMEM; |
322 | 322 | ||
323 | exclude_super_stripes(extent_root, block_group); | ||
324 | spin_lock(&block_group->space_info->lock); | ||
325 | block_group->space_info->bytes_readonly += block_group->bytes_super; | ||
326 | spin_unlock(&block_group->space_info->lock); | ||
327 | |||
328 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 323 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); |
329 | 324 | ||
330 | /* | 325 | /* |
@@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
467 | cache->cached = BTRFS_CACHE_NO; | 462 | cache->cached = BTRFS_CACHE_NO; |
468 | } | 463 | } |
469 | spin_unlock(&cache->lock); | 464 | spin_unlock(&cache->lock); |
470 | if (ret == 1) | 465 | if (ret == 1) { |
466 | free_excluded_extents(fs_info->extent_root, cache); | ||
471 | return 0; | 467 | return 0; |
468 | } | ||
472 | } | 469 | } |
473 | 470 | ||
474 | if (load_cache_only) | 471 | if (load_cache_only) |
@@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3344 | u64 reserved; | 3341 | u64 reserved; |
3345 | u64 max_reclaim; | 3342 | u64 max_reclaim; |
3346 | u64 reclaimed = 0; | 3343 | u64 reclaimed = 0; |
3344 | long time_left; | ||
3347 | int pause = 1; | 3345 | int pause = 1; |
3348 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; | 3346 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; |
3347 | int loops = 0; | ||
3349 | 3348 | ||
3350 | block_rsv = &root->fs_info->delalloc_block_rsv; | 3349 | block_rsv = &root->fs_info->delalloc_block_rsv; |
3351 | space_info = block_rsv->space_info; | 3350 | space_info = block_rsv->space_info; |
@@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3358 | 3357 | ||
3359 | max_reclaim = min(reserved, to_reclaim); | 3358 | max_reclaim = min(reserved, to_reclaim); |
3360 | 3359 | ||
3361 | while (1) { | 3360 | while (loops < 1024) { |
3362 | /* have the flusher threads jump in and do some IO */ | 3361 | /* have the flusher threads jump in and do some IO */ |
3363 | smp_mb(); | 3362 | smp_mb(); |
3364 | nr_pages = min_t(unsigned long, nr_pages, | 3363 | nr_pages = min_t(unsigned long, nr_pages, |
@@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3366 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); | 3365 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); |
3367 | 3366 | ||
3368 | spin_lock(&space_info->lock); | 3367 | spin_lock(&space_info->lock); |
3369 | if (reserved > space_info->bytes_reserved) | 3368 | if (reserved > space_info->bytes_reserved) { |
3369 | loops = 0; | ||
3370 | reclaimed += reserved - space_info->bytes_reserved; | 3370 | reclaimed += reserved - space_info->bytes_reserved; |
3371 | } else { | ||
3372 | loops++; | ||
3373 | } | ||
3371 | reserved = space_info->bytes_reserved; | 3374 | reserved = space_info->bytes_reserved; |
3372 | spin_unlock(&space_info->lock); | 3375 | spin_unlock(&space_info->lock); |
3373 | 3376 | ||
@@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3378 | return -EAGAIN; | 3381 | return -EAGAIN; |
3379 | 3382 | ||
3380 | __set_current_state(TASK_INTERRUPTIBLE); | 3383 | __set_current_state(TASK_INTERRUPTIBLE); |
3381 | schedule_timeout(pause); | 3384 | time_left = schedule_timeout(pause); |
3385 | |||
3386 | /* We were interrupted, exit */ | ||
3387 | if (time_left) | ||
3388 | break; | ||
3389 | |||
3382 | pause <<= 1; | 3390 | pause <<= 1; |
3383 | if (pause > HZ / 10) | 3391 | if (pause > HZ / 10) |
3384 | pause = HZ / 10; | 3392 | pause = HZ / 10; |
@@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, | |||
3588 | 3596 | ||
3589 | if (num_bytes > 0) { | 3597 | if (num_bytes > 0) { |
3590 | if (dest) { | 3598 | if (dest) { |
3591 | block_rsv_add_bytes(dest, num_bytes, 0); | 3599 | spin_lock(&dest->lock); |
3592 | } else { | 3600 | if (!dest->full) { |
3601 | u64 bytes_to_add; | ||
3602 | |||
3603 | bytes_to_add = dest->size - dest->reserved; | ||
3604 | bytes_to_add = min(num_bytes, bytes_to_add); | ||
3605 | dest->reserved += bytes_to_add; | ||
3606 | if (dest->reserved >= dest->size) | ||
3607 | dest->full = 1; | ||
3608 | num_bytes -= bytes_to_add; | ||
3609 | } | ||
3610 | spin_unlock(&dest->lock); | ||
3611 | } | ||
3612 | if (num_bytes) { | ||
3593 | spin_lock(&space_info->lock); | 3613 | spin_lock(&space_info->lock); |
3594 | space_info->bytes_reserved -= num_bytes; | 3614 | space_info->bytes_reserved -= num_bytes; |
3595 | spin_unlock(&space_info->lock); | 3615 | spin_unlock(&space_info->lock); |
@@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
4012 | 4032 | ||
4013 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 4033 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
4014 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); | 4034 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); |
4035 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); | ||
4015 | 4036 | ||
4016 | spin_lock(&BTRFS_I(inode)->accounting_lock); | 4037 | spin_lock(&BTRFS_I(inode)->accounting_lock); |
4017 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); | 4038 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); |
@@ -5355,7 +5376,7 @@ again: | |||
5355 | num_bytes, data, 1); | 5376 | num_bytes, data, 1); |
5356 | goto again; | 5377 | goto again; |
5357 | } | 5378 | } |
5358 | if (ret == -ENOSPC) { | 5379 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { |
5359 | struct btrfs_space_info *sinfo; | 5380 | struct btrfs_space_info *sinfo; |
5360 | 5381 | ||
5361 | sinfo = __find_space_info(root->fs_info, data); | 5382 | sinfo = __find_space_info(root->fs_info, data); |
@@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, | |||
5633 | struct btrfs_root *root, u32 blocksize) | 5654 | struct btrfs_root *root, u32 blocksize) |
5634 | { | 5655 | { |
5635 | struct btrfs_block_rsv *block_rsv; | 5656 | struct btrfs_block_rsv *block_rsv; |
5657 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | ||
5636 | int ret; | 5658 | int ret; |
5637 | 5659 | ||
5638 | block_rsv = get_block_rsv(trans, root); | 5660 | block_rsv = get_block_rsv(trans, root); |
@@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, | |||
5640 | if (block_rsv->size == 0) { | 5662 | if (block_rsv->size == 0) { |
5641 | ret = reserve_metadata_bytes(trans, root, block_rsv, | 5663 | ret = reserve_metadata_bytes(trans, root, block_rsv, |
5642 | blocksize, 0); | 5664 | blocksize, 0); |
5643 | if (ret) | 5665 | /* |
5666 | * If we couldn't reserve metadata bytes try and use some from | ||
5667 | * the global reserve. | ||
5668 | */ | ||
5669 | if (ret && block_rsv != global_rsv) { | ||
5670 | ret = block_rsv_use_bytes(global_rsv, blocksize); | ||
5671 | if (!ret) | ||
5672 | return global_rsv; | ||
5673 | return ERR_PTR(ret); | ||
5674 | } else if (ret) { | ||
5644 | return ERR_PTR(ret); | 5675 | return ERR_PTR(ret); |
5676 | } | ||
5645 | return block_rsv; | 5677 | return block_rsv; |
5646 | } | 5678 | } |
5647 | 5679 | ||
5648 | ret = block_rsv_use_bytes(block_rsv, blocksize); | 5680 | ret = block_rsv_use_bytes(block_rsv, blocksize); |
5649 | if (!ret) | 5681 | if (!ret) |
5650 | return block_rsv; | 5682 | return block_rsv; |
5683 | if (ret) { | ||
5684 | WARN_ON(1); | ||
5685 | ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, | ||
5686 | 0); | ||
5687 | if (!ret) { | ||
5688 | spin_lock(&block_rsv->lock); | ||
5689 | block_rsv->size += blocksize; | ||
5690 | spin_unlock(&block_rsv->lock); | ||
5691 | return block_rsv; | ||
5692 | } else if (ret && block_rsv != global_rsv) { | ||
5693 | ret = block_rsv_use_bytes(global_rsv, blocksize); | ||
5694 | if (!ret) | ||
5695 | return global_rsv; | ||
5696 | } | ||
5697 | } | ||
5651 | 5698 | ||
5652 | return ERR_PTR(-ENOSPC); | 5699 | return ERR_PTR(-ENOSPC); |
5653 | } | 5700 | } |
@@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6221 | BUG_ON(!wc); | 6268 | BUG_ON(!wc); |
6222 | 6269 | ||
6223 | trans = btrfs_start_transaction(tree_root, 0); | 6270 | trans = btrfs_start_transaction(tree_root, 0); |
6271 | BUG_ON(IS_ERR(trans)); | ||
6272 | |||
6224 | if (block_rsv) | 6273 | if (block_rsv) |
6225 | trans->block_rsv = block_rsv; | 6274 | trans->block_rsv = block_rsv; |
6226 | 6275 | ||
@@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6318 | 6367 | ||
6319 | btrfs_end_transaction_throttle(trans, tree_root); | 6368 | btrfs_end_transaction_throttle(trans, tree_root); |
6320 | trans = btrfs_start_transaction(tree_root, 0); | 6369 | trans = btrfs_start_transaction(tree_root, 0); |
6370 | BUG_ON(IS_ERR(trans)); | ||
6321 | if (block_rsv) | 6371 | if (block_rsv) |
6322 | trans->block_rsv = block_rsv; | 6372 | trans->block_rsv = block_rsv; |
6323 | } | 6373 | } |
@@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, | |||
6446 | int ret = 0; | 6496 | int ret = 0; |
6447 | 6497 | ||
6448 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | 6498 | ra = kzalloc(sizeof(*ra), GFP_NOFS); |
6499 | if (!ra) | ||
6500 | return -ENOMEM; | ||
6449 | 6501 | ||
6450 | mutex_lock(&inode->i_mutex); | 6502 | mutex_lock(&inode->i_mutex); |
6451 | first_index = start >> PAGE_CACHE_SHIFT; | 6503 | first_index = start >> PAGE_CACHE_SHIFT; |
@@ -6531,7 +6583,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, | |||
6531 | u64 end = start + extent_key->offset - 1; | 6583 | u64 end = start + extent_key->offset - 1; |
6532 | 6584 | ||
6533 | em = alloc_extent_map(GFP_NOFS); | 6585 | em = alloc_extent_map(GFP_NOFS); |
6534 | BUG_ON(!em || IS_ERR(em)); | 6586 | BUG_ON(!em); |
6535 | 6587 | ||
6536 | em->start = start; | 6588 | em->start = start; |
6537 | em->len = extent_key->offset; | 6589 | em->len = extent_key->offset; |
@@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) | |||
7477 | BUG_ON(reloc_root->commit_root != NULL); | 7529 | BUG_ON(reloc_root->commit_root != NULL); |
7478 | while (1) { | 7530 | while (1) { |
7479 | trans = btrfs_join_transaction(root, 1); | 7531 | trans = btrfs_join_transaction(root, 1); |
7480 | BUG_ON(!trans); | 7532 | BUG_ON(IS_ERR(trans)); |
7481 | 7533 | ||
7482 | mutex_lock(&root->fs_info->drop_mutex); | 7534 | mutex_lock(&root->fs_info->drop_mutex); |
7483 | ret = btrfs_drop_snapshot(trans, reloc_root); | 7535 | ret = btrfs_drop_snapshot(trans, reloc_root); |
@@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) | |||
7535 | 7587 | ||
7536 | if (found) { | 7588 | if (found) { |
7537 | trans = btrfs_start_transaction(root, 1); | 7589 | trans = btrfs_start_transaction(root, 1); |
7538 | BUG_ON(!trans); | 7590 | BUG_ON(IS_ERR(trans)); |
7539 | ret = btrfs_commit_transaction(trans, root); | 7591 | ret = btrfs_commit_transaction(trans, root); |
7540 | BUG_ON(ret); | 7592 | BUG_ON(ret); |
7541 | } | 7593 | } |
@@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, | |||
7779 | 7831 | ||
7780 | 7832 | ||
7781 | trans = btrfs_start_transaction(extent_root, 1); | 7833 | trans = btrfs_start_transaction(extent_root, 1); |
7782 | BUG_ON(!trans); | 7834 | BUG_ON(IS_ERR(trans)); |
7783 | 7835 | ||
7784 | if (extent_key->objectid == 0) { | 7836 | if (extent_key->objectid == 0) { |
7785 | ret = del_extent_zero(trans, extent_root, path, extent_key); | 7837 | ret = del_extent_zero(trans, extent_root, path, extent_key); |
@@ -8013,6 +8065,13 @@ out: | |||
8013 | return ret; | 8065 | return ret; |
8014 | } | 8066 | } |
8015 | 8067 | ||
8068 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | ||
8069 | struct btrfs_root *root, u64 type) | ||
8070 | { | ||
8071 | u64 alloc_flags = get_alloc_profile(root, type); | ||
8072 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | ||
8073 | } | ||
8074 | |||
8016 | /* | 8075 | /* |
8017 | * helper to account the unused space of all the readonly block group in the | 8076 | * helper to account the unused space of all the readonly block group in the |
8018 | * list. takes mirrors into account. | 8077 | * list. takes mirrors into account. |
@@ -8270,6 +8329,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
8270 | if (block_group->cached == BTRFS_CACHE_STARTED) | 8329 | if (block_group->cached == BTRFS_CACHE_STARTED) |
8271 | wait_block_group_cache_done(block_group); | 8330 | wait_block_group_cache_done(block_group); |
8272 | 8331 | ||
8332 | /* | ||
8333 | * We haven't cached this block group, which means we could | ||
8334 | * possibly have excluded extents on this block group. | ||
8335 | */ | ||
8336 | if (block_group->cached == BTRFS_CACHE_NO) | ||
8337 | free_excluded_extents(info->extent_root, block_group); | ||
8338 | |||
8273 | btrfs_remove_free_space_cache(block_group); | 8339 | btrfs_remove_free_space_cache(block_group); |
8274 | btrfs_put_block_group(block_group); | 8340 | btrfs_put_block_group(block_group); |
8275 | 8341 | ||
@@ -8385,6 +8451,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8385 | cache->sectorsize = root->sectorsize; | 8451 | cache->sectorsize = root->sectorsize; |
8386 | 8452 | ||
8387 | /* | 8453 | /* |
8454 | * We need to exclude the super stripes now so that the space | ||
8455 | * info has super bytes accounted for, otherwise we'll think | ||
8456 | * we have more space than we actually do. | ||
8457 | */ | ||
8458 | exclude_super_stripes(root, cache); | ||
8459 | |||
8460 | /* | ||
8388 | * check for two cases, either we are full, and therefore | 8461 | * check for two cases, either we are full, and therefore |
8389 | * don't need to bother with the caching work since we won't | 8462 | * don't need to bother with the caching work since we won't |
8390 | * find any space, or we are empty, and we can just add all | 8463 | * find any space, or we are empty, and we can just add all |
@@ -8392,12 +8465,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8392 | * time, particularly in the full case. | 8465 | * time, particularly in the full case. |
8393 | */ | 8466 | */ |
8394 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { | 8467 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { |
8395 | exclude_super_stripes(root, cache); | ||
8396 | cache->last_byte_to_unpin = (u64)-1; | 8468 | cache->last_byte_to_unpin = (u64)-1; |
8397 | cache->cached = BTRFS_CACHE_FINISHED; | 8469 | cache->cached = BTRFS_CACHE_FINISHED; |
8398 | free_excluded_extents(root, cache); | 8470 | free_excluded_extents(root, cache); |
8399 | } else if (btrfs_block_group_used(&cache->item) == 0) { | 8471 | } else if (btrfs_block_group_used(&cache->item) == 0) { |
8400 | exclude_super_stripes(root, cache); | ||
8401 | cache->last_byte_to_unpin = (u64)-1; | 8472 | cache->last_byte_to_unpin = (u64)-1; |
8402 | cache->cached = BTRFS_CACHE_FINISHED; | 8473 | cache->cached = BTRFS_CACHE_FINISHED; |
8403 | add_new_free_space(cache, root->fs_info, | 8474 | add_new_free_space(cache, root->fs_info, |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2e993cf1766e..fd3f172e94e6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, | |||
1433 | */ | 1433 | */ |
1434 | u64 count_range_bits(struct extent_io_tree *tree, | 1434 | u64 count_range_bits(struct extent_io_tree *tree, |
1435 | u64 *start, u64 search_end, u64 max_bytes, | 1435 | u64 *start, u64 search_end, u64 max_bytes, |
1436 | unsigned long bits) | 1436 | unsigned long bits, int contig) |
1437 | { | 1437 | { |
1438 | struct rb_node *node; | 1438 | struct rb_node *node; |
1439 | struct extent_state *state; | 1439 | struct extent_state *state; |
1440 | u64 cur_start = *start; | 1440 | u64 cur_start = *start; |
1441 | u64 total_bytes = 0; | 1441 | u64 total_bytes = 0; |
1442 | u64 last = 0; | ||
1442 | int found = 0; | 1443 | int found = 0; |
1443 | 1444 | ||
1444 | if (search_end <= cur_start) { | 1445 | if (search_end <= cur_start) { |
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, | |||
1463 | state = rb_entry(node, struct extent_state, rb_node); | 1464 | state = rb_entry(node, struct extent_state, rb_node); |
1464 | if (state->start > search_end) | 1465 | if (state->start > search_end) |
1465 | break; | 1466 | break; |
1466 | if (state->end >= cur_start && (state->state & bits)) { | 1467 | if (contig && found && state->start > last + 1) |
1468 | break; | ||
1469 | if (state->end >= cur_start && (state->state & bits) == bits) { | ||
1467 | total_bytes += min(search_end, state->end) + 1 - | 1470 | total_bytes += min(search_end, state->end) + 1 - |
1468 | max(cur_start, state->start); | 1471 | max(cur_start, state->start); |
1469 | if (total_bytes >= max_bytes) | 1472 | if (total_bytes >= max_bytes) |
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, | |||
1472 | *start = state->start; | 1475 | *start = state->start; |
1473 | found = 1; | 1476 | found = 1; |
1474 | } | 1477 | } |
1478 | last = state->end; | ||
1479 | } else if (contig && found) { | ||
1480 | break; | ||
1475 | } | 1481 | } |
1476 | node = rb_next(node); | 1482 | node = rb_next(node); |
1477 | if (!node) | 1483 | if (!node) |
@@ -1865,7 +1871,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, | |||
1865 | bio_get(bio); | 1871 | bio_get(bio); |
1866 | 1872 | ||
1867 | if (tree->ops && tree->ops->submit_bio_hook) | 1873 | if (tree->ops && tree->ops->submit_bio_hook) |
1868 | tree->ops->submit_bio_hook(page->mapping->host, rw, bio, | 1874 | ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, |
1869 | mirror_num, bio_flags, start); | 1875 | mirror_num, bio_flags, start); |
1870 | else | 1876 | else |
1871 | submit_bio(rw, bio); | 1877 | submit_bio(rw, bio); |
@@ -1920,6 +1926,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
1920 | nr = bio_get_nr_vecs(bdev); | 1926 | nr = bio_get_nr_vecs(bdev); |
1921 | 1927 | ||
1922 | bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); | 1928 | bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); |
1929 | if (!bio) | ||
1930 | return -ENOMEM; | ||
1923 | 1931 | ||
1924 | bio_add_page(bio, page, page_size, offset); | 1932 | bio_add_page(bio, page, page_size, offset); |
1925 | bio->bi_end_io = end_io_func; | 1933 | bio->bi_end_io = end_io_func; |
@@ -1944,6 +1952,7 @@ void set_page_extent_mapped(struct page *page) | |||
1944 | 1952 | ||
1945 | static void set_page_extent_head(struct page *page, unsigned long len) | 1953 | static void set_page_extent_head(struct page *page, unsigned long len) |
1946 | { | 1954 | { |
1955 | WARN_ON(!PagePrivate(page)); | ||
1947 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); | 1956 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); |
1948 | } | 1957 | } |
1949 | 1958 | ||
@@ -2126,7 +2135,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, | |||
2126 | ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, | 2135 | ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, |
2127 | &bio_flags); | 2136 | &bio_flags); |
2128 | if (bio) | 2137 | if (bio) |
2129 | submit_one_bio(READ, bio, 0, bio_flags); | 2138 | ret = submit_one_bio(READ, bio, 0, bio_flags); |
2130 | return ret; | 2139 | return ret; |
2131 | } | 2140 | } |
2132 | 2141 | ||
@@ -2819,9 +2828,17 @@ int try_release_extent_state(struct extent_map_tree *map, | |||
2819 | * at this point we can safely clear everything except the | 2828 | * at this point we can safely clear everything except the |
2820 | * locked bit and the nodatasum bit | 2829 | * locked bit and the nodatasum bit |
2821 | */ | 2830 | */ |
2822 | clear_extent_bit(tree, start, end, | 2831 | ret = clear_extent_bit(tree, start, end, |
2823 | ~(EXTENT_LOCKED | EXTENT_NODATASUM), | 2832 | ~(EXTENT_LOCKED | EXTENT_NODATASUM), |
2824 | 0, 0, NULL, mask); | 2833 | 0, 0, NULL, mask); |
2834 | |||
2835 | /* if clear_extent_bit failed for enomem reasons, | ||
2836 | * we can't allow the release to continue. | ||
2837 | */ | ||
2838 | if (ret < 0) | ||
2839 | ret = 0; | ||
2840 | else | ||
2841 | ret = 1; | ||
2825 | } | 2842 | } |
2826 | return ret; | 2843 | return ret; |
2827 | } | 2844 | } |
@@ -2901,6 +2918,46 @@ out: | |||
2901 | return sector; | 2918 | return sector; |
2902 | } | 2919 | } |
2903 | 2920 | ||
2921 | /* | ||
2922 | * helper function for fiemap, which doesn't want to see any holes. | ||
2923 | * This maps until we find something past 'last' | ||
2924 | */ | ||
2925 | static struct extent_map *get_extent_skip_holes(struct inode *inode, | ||
2926 | u64 offset, | ||
2927 | u64 last, | ||
2928 | get_extent_t *get_extent) | ||
2929 | { | ||
2930 | u64 sectorsize = BTRFS_I(inode)->root->sectorsize; | ||
2931 | struct extent_map *em; | ||
2932 | u64 len; | ||
2933 | |||
2934 | if (offset >= last) | ||
2935 | return NULL; | ||
2936 | |||
2937 | while(1) { | ||
2938 | len = last - offset; | ||
2939 | if (len == 0) | ||
2940 | break; | ||
2941 | len = (len + sectorsize - 1) & ~(sectorsize - 1); | ||
2942 | em = get_extent(inode, NULL, 0, offset, len, 0); | ||
2943 | if (!em || IS_ERR(em)) | ||
2944 | return em; | ||
2945 | |||
2946 | /* if this isn't a hole return it */ | ||
2947 | if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && | ||
2948 | em->block_start != EXTENT_MAP_HOLE) { | ||
2949 | return em; | ||
2950 | } | ||
2951 | |||
2952 | /* this is a hole, advance to the next extent */ | ||
2953 | offset = extent_map_end(em); | ||
2954 | free_extent_map(em); | ||
2955 | if (offset >= last) | ||
2956 | break; | ||
2957 | } | ||
2958 | return NULL; | ||
2959 | } | ||
2960 | |||
2904 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 2961 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
2905 | __u64 start, __u64 len, get_extent_t *get_extent) | 2962 | __u64 start, __u64 len, get_extent_t *get_extent) |
2906 | { | 2963 | { |
@@ -2910,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2910 | u32 flags = 0; | 2967 | u32 flags = 0; |
2911 | u32 found_type; | 2968 | u32 found_type; |
2912 | u64 last; | 2969 | u64 last; |
2970 | u64 last_for_get_extent = 0; | ||
2913 | u64 disko = 0; | 2971 | u64 disko = 0; |
2972 | u64 isize = i_size_read(inode); | ||
2914 | struct btrfs_key found_key; | 2973 | struct btrfs_key found_key; |
2915 | struct extent_map *em = NULL; | 2974 | struct extent_map *em = NULL; |
2916 | struct extent_state *cached_state = NULL; | 2975 | struct extent_state *cached_state = NULL; |
2917 | struct btrfs_path *path; | 2976 | struct btrfs_path *path; |
2918 | struct btrfs_file_extent_item *item; | 2977 | struct btrfs_file_extent_item *item; |
2919 | int end = 0; | 2978 | int end = 0; |
2920 | u64 em_start = 0, em_len = 0; | 2979 | u64 em_start = 0; |
2980 | u64 em_len = 0; | ||
2981 | u64 em_end = 0; | ||
2921 | unsigned long emflags; | 2982 | unsigned long emflags; |
2922 | int hole = 0; | ||
2923 | 2983 | ||
2924 | if (len == 0) | 2984 | if (len == 0) |
2925 | return -EINVAL; | 2985 | return -EINVAL; |
@@ -2929,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2929 | return -ENOMEM; | 2989 | return -ENOMEM; |
2930 | path->leave_spinning = 1; | 2990 | path->leave_spinning = 1; |
2931 | 2991 | ||
2992 | /* | ||
2993 | * lookup the last file extent. We're not using i_size here | ||
2994 | * because there might be preallocation past i_size | ||
2995 | */ | ||
2932 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, | 2996 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, |
2933 | path, inode->i_ino, -1, 0); | 2997 | path, inode->i_ino, -1, 0); |
2934 | if (ret < 0) { | 2998 | if (ret < 0) { |
@@ -2942,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2942 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); | 3006 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); |
2943 | found_type = btrfs_key_type(&found_key); | 3007 | found_type = btrfs_key_type(&found_key); |
2944 | 3008 | ||
2945 | /* No extents, just return */ | 3009 | /* No extents, but there might be delalloc bits */ |
2946 | if (found_key.objectid != inode->i_ino || | 3010 | if (found_key.objectid != inode->i_ino || |
2947 | found_type != BTRFS_EXTENT_DATA_KEY) { | 3011 | found_type != BTRFS_EXTENT_DATA_KEY) { |
2948 | btrfs_free_path(path); | 3012 | /* have to trust i_size as the end */ |
2949 | return 0; | 3013 | last = (u64)-1; |
3014 | last_for_get_extent = isize; | ||
3015 | } else { | ||
3016 | /* | ||
3017 | * remember the start of the last extent. There are a | ||
3018 | * bunch of different factors that go into the length of the | ||
3019 | * extent, so its much less complex to remember where it started | ||
3020 | */ | ||
3021 | last = found_key.offset; | ||
3022 | last_for_get_extent = last + 1; | ||
2950 | } | 3023 | } |
2951 | last = found_key.offset; | ||
2952 | btrfs_free_path(path); | 3024 | btrfs_free_path(path); |
2953 | 3025 | ||
3026 | /* | ||
3027 | * we might have some extents allocated but more delalloc past those | ||
3028 | * extents. so, we trust isize unless the start of the last extent is | ||
3029 | * beyond isize | ||
3030 | */ | ||
3031 | if (last < isize) { | ||
3032 | last = (u64)-1; | ||
3033 | last_for_get_extent = isize; | ||
3034 | } | ||
3035 | |||
2954 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3036 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
2955 | &cached_state, GFP_NOFS); | 3037 | &cached_state, GFP_NOFS); |
2956 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 3038 | |
3039 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | ||
3040 | get_extent); | ||
2957 | if (!em) | 3041 | if (!em) |
2958 | goto out; | 3042 | goto out; |
2959 | if (IS_ERR(em)) { | 3043 | if (IS_ERR(em)) { |
@@ -2962,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2962 | } | 3046 | } |
2963 | 3047 | ||
2964 | while (!end) { | 3048 | while (!end) { |
2965 | hole = 0; | 3049 | off = extent_map_end(em); |
2966 | off = em->start + em->len; | ||
2967 | if (off >= max) | 3050 | if (off >= max) |
2968 | end = 1; | 3051 | end = 1; |
2969 | 3052 | ||
2970 | if (em->block_start == EXTENT_MAP_HOLE) { | ||
2971 | hole = 1; | ||
2972 | goto next; | ||
2973 | } | ||
2974 | |||
2975 | em_start = em->start; | 3053 | em_start = em->start; |
2976 | em_len = em->len; | 3054 | em_len = em->len; |
2977 | 3055 | em_end = extent_map_end(em); | |
3056 | emflags = em->flags; | ||
2978 | disko = 0; | 3057 | disko = 0; |
2979 | flags = 0; | 3058 | flags = 0; |
2980 | 3059 | ||
@@ -2993,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2993 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) | 3072 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) |
2994 | flags |= FIEMAP_EXTENT_ENCODED; | 3073 | flags |= FIEMAP_EXTENT_ENCODED; |
2995 | 3074 | ||
2996 | next: | ||
2997 | emflags = em->flags; | ||
2998 | free_extent_map(em); | 3075 | free_extent_map(em); |
2999 | em = NULL; | 3076 | em = NULL; |
3000 | if (!end) { | 3077 | if ((em_start >= last) || em_len == (u64)-1 || |
3001 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 3078 | (last == (u64)-1 && isize <= em_end)) { |
3002 | if (!em) | ||
3003 | goto out; | ||
3004 | if (IS_ERR(em)) { | ||
3005 | ret = PTR_ERR(em); | ||
3006 | goto out; | ||
3007 | } | ||
3008 | emflags = em->flags; | ||
3009 | } | ||
3010 | |||
3011 | if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { | ||
3012 | flags |= FIEMAP_EXTENT_LAST; | 3079 | flags |= FIEMAP_EXTENT_LAST; |
3013 | end = 1; | 3080 | end = 1; |
3014 | } | 3081 | } |
3015 | 3082 | ||
3016 | if (em_start == last) { | 3083 | /* now scan forward to see if this is really the last extent. */ |
3084 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | ||
3085 | get_extent); | ||
3086 | if (IS_ERR(em)) { | ||
3087 | ret = PTR_ERR(em); | ||
3088 | goto out; | ||
3089 | } | ||
3090 | if (!em) { | ||
3017 | flags |= FIEMAP_EXTENT_LAST; | 3091 | flags |= FIEMAP_EXTENT_LAST; |
3018 | end = 1; | 3092 | end = 1; |
3019 | } | 3093 | } |
3020 | 3094 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | |
3021 | if (!hole) { | 3095 | em_len, flags); |
3022 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | 3096 | if (ret) |
3023 | em_len, flags); | 3097 | goto out_free; |
3024 | if (ret) | ||
3025 | goto out_free; | ||
3026 | } | ||
3027 | } | 3098 | } |
3028 | out_free: | 3099 | out_free: |
3029 | free_extent_map(em); | 3100 | free_extent_map(em); |
@@ -3192,7 +3263,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3192 | } | 3263 | } |
3193 | if (!PageUptodate(p)) | 3264 | if (!PageUptodate(p)) |
3194 | uptodate = 0; | 3265 | uptodate = 0; |
3195 | unlock_page(p); | 3266 | |
3267 | /* | ||
3268 | * see below about how we avoid a nasty race with release page | ||
3269 | * and why we unlock later | ||
3270 | */ | ||
3271 | if (i != 0) | ||
3272 | unlock_page(p); | ||
3196 | } | 3273 | } |
3197 | if (uptodate) | 3274 | if (uptodate) |
3198 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3275 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
@@ -3216,9 +3293,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3216 | atomic_inc(&eb->refs); | 3293 | atomic_inc(&eb->refs); |
3217 | spin_unlock(&tree->buffer_lock); | 3294 | spin_unlock(&tree->buffer_lock); |
3218 | radix_tree_preload_end(); | 3295 | radix_tree_preload_end(); |
3296 | |||
3297 | /* | ||
3298 | * there is a race where release page may have | ||
3299 | * tried to find this extent buffer in the radix | ||
3300 | * but failed. It will tell the VM it is safe to | ||
3301 | * reclaim the, and it will clear the page private bit. | ||
3302 | * We must make sure to set the page private bit properly | ||
3303 | * after the extent buffer is in the radix tree so | ||
3304 | * it doesn't get lost | ||
3305 | */ | ||
3306 | set_page_extent_mapped(eb->first_page); | ||
3307 | set_page_extent_head(eb->first_page, eb->len); | ||
3308 | if (!page0) | ||
3309 | unlock_page(eb->first_page); | ||
3219 | return eb; | 3310 | return eb; |
3220 | 3311 | ||
3221 | free_eb: | 3312 | free_eb: |
3313 | if (eb->first_page && !page0) | ||
3314 | unlock_page(eb->first_page); | ||
3315 | |||
3222 | if (!atomic_dec_and_test(&eb->refs)) | 3316 | if (!atomic_dec_and_test(&eb->refs)) |
3223 | return exists; | 3317 | return exists; |
3224 | btrfs_release_extent_buffer(eb); | 3318 | btrfs_release_extent_buffer(eb); |
@@ -3269,10 +3363,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3269 | continue; | 3363 | continue; |
3270 | 3364 | ||
3271 | lock_page(page); | 3365 | lock_page(page); |
3366 | WARN_ON(!PagePrivate(page)); | ||
3367 | |||
3368 | set_page_extent_mapped(page); | ||
3272 | if (i == 0) | 3369 | if (i == 0) |
3273 | set_page_extent_head(page, eb->len); | 3370 | set_page_extent_head(page, eb->len); |
3274 | else | ||
3275 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
3276 | 3371 | ||
3277 | clear_page_dirty_for_io(page); | 3372 | clear_page_dirty_for_io(page); |
3278 | spin_lock_irq(&page->mapping->tree_lock); | 3373 | spin_lock_irq(&page->mapping->tree_lock); |
@@ -3462,6 +3557,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3462 | 3557 | ||
3463 | for (i = start_i; i < num_pages; i++) { | 3558 | for (i = start_i; i < num_pages; i++) { |
3464 | page = extent_buffer_page(eb, i); | 3559 | page = extent_buffer_page(eb, i); |
3560 | |||
3561 | WARN_ON(!PagePrivate(page)); | ||
3562 | |||
3563 | set_page_extent_mapped(page); | ||
3564 | if (i == 0) | ||
3565 | set_page_extent_head(page, eb->len); | ||
3566 | |||
3465 | if (inc_all_pages) | 3567 | if (inc_all_pages) |
3466 | page_cache_get(page); | 3568 | page_cache_get(page); |
3467 | if (!PageUptodate(page)) { | 3569 | if (!PageUptodate(page)) { |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd061..9318dfefd59c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -191,7 +191,7 @@ void extent_io_exit(void); | |||
191 | 191 | ||
192 | u64 count_range_bits(struct extent_io_tree *tree, | 192 | u64 count_range_bits(struct extent_io_tree *tree, |
193 | u64 *start, u64 search_end, | 193 | u64 *start, u64 search_end, |
194 | u64 max_bytes, unsigned long bits); | 194 | u64 max_bytes, unsigned long bits, int contig); |
195 | 195 | ||
196 | void free_extent_state(struct extent_state *state); | 196 | void free_extent_state(struct extent_state *state); |
197 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | 197 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index b0e1fce12530..2b6c12e983b3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask) | |||
51 | { | 51 | { |
52 | struct extent_map *em; | 52 | struct extent_map *em; |
53 | em = kmem_cache_alloc(extent_map_cache, mask); | 53 | em = kmem_cache_alloc(extent_map_cache, mask); |
54 | if (!em || IS_ERR(em)) | 54 | if (!em) |
55 | return em; | 55 | return NULL; |
56 | em->in_tree = 0; | 56 | em->in_tree = 0; |
57 | em->flags = 0; | 57 | em->flags = 0; |
58 | em->compress_type = BTRFS_COMPRESS_NONE; | 58 | em->compress_type = BTRFS_COMPRESS_NONE; |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a562a250ae77..4f19a3e1bf32 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
536 | root = root->fs_info->csum_root; | 536 | root = root->fs_info->csum_root; |
537 | 537 | ||
538 | path = btrfs_alloc_path(); | 538 | path = btrfs_alloc_path(); |
539 | if (!path) | ||
540 | return -ENOMEM; | ||
539 | 541 | ||
540 | while (1) { | 542 | while (1) { |
541 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 543 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; |
@@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
548 | if (path->slots[0] == 0) | 550 | if (path->slots[0] == 0) |
549 | goto out; | 551 | goto out; |
550 | path->slots[0]--; | 552 | path->slots[0]--; |
553 | } else if (ret < 0) { | ||
554 | goto out; | ||
551 | } | 555 | } |
556 | |||
552 | leaf = path->nodes[0]; | 557 | leaf = path->nodes[0]; |
553 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 558 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
554 | 559 | ||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c800d58f3013..7084140d5940 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -186,6 +186,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
186 | split = alloc_extent_map(GFP_NOFS); | 186 | split = alloc_extent_map(GFP_NOFS); |
187 | if (!split2) | 187 | if (!split2) |
188 | split2 = alloc_extent_map(GFP_NOFS); | 188 | split2 = alloc_extent_map(GFP_NOFS); |
189 | BUG_ON(!split || !split2); | ||
189 | 190 | ||
190 | write_lock(&em_tree->lock); | 191 | write_lock(&em_tree->lock); |
191 | em = lookup_extent_mapping(em_tree, start, len); | 192 | em = lookup_extent_mapping(em_tree, start, len); |
@@ -793,8 +794,12 @@ again: | |||
793 | for (i = 0; i < num_pages; i++) { | 794 | for (i = 0; i < num_pages; i++) { |
794 | pages[i] = grab_cache_page(inode->i_mapping, index + i); | 795 | pages[i] = grab_cache_page(inode->i_mapping, index + i); |
795 | if (!pages[i]) { | 796 | if (!pages[i]) { |
796 | err = -ENOMEM; | 797 | int c; |
797 | BUG_ON(1); | 798 | for (c = i - 1; c >= 0; c--) { |
799 | unlock_page(pages[c]); | ||
800 | page_cache_release(pages[c]); | ||
801 | } | ||
802 | return -ENOMEM; | ||
798 | } | 803 | } |
799 | wait_on_page_writeback(pages[i]); | 804 | wait_on_page_writeback(pages[i]); |
800 | } | 805 | } |
@@ -946,6 +951,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
946 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | 951 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / |
947 | (sizeof(struct page *))); | 952 | (sizeof(struct page *))); |
948 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); | 953 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); |
954 | if (!pages) { | ||
955 | ret = -ENOMEM; | ||
956 | goto out; | ||
957 | } | ||
949 | 958 | ||
950 | /* generic_write_checks can change our pos */ | 959 | /* generic_write_checks can change our pos */ |
951 | start_pos = pos; | 960 | start_pos = pos; |
@@ -984,8 +993,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
984 | size_t write_bytes = min(iov_iter_count(&i), | 993 | size_t write_bytes = min(iov_iter_count(&i), |
985 | nrptrs * (size_t)PAGE_CACHE_SIZE - | 994 | nrptrs * (size_t)PAGE_CACHE_SIZE - |
986 | offset); | 995 | offset); |
987 | size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> | 996 | size_t num_pages = (write_bytes + offset + |
988 | PAGE_CACHE_SHIFT; | 997 | PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
989 | 998 | ||
990 | WARN_ON(num_pages > nrptrs); | 999 | WARN_ON(num_pages > nrptrs); |
991 | memset(pages, 0, sizeof(struct page *) * nrptrs); | 1000 | memset(pages, 0, sizeof(struct page *) * nrptrs); |
@@ -1015,8 +1024,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
1015 | 1024 | ||
1016 | copied = btrfs_copy_from_user(pos, num_pages, | 1025 | copied = btrfs_copy_from_user(pos, num_pages, |
1017 | write_bytes, pages, &i); | 1026 | write_bytes, pages, &i); |
1018 | dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> | 1027 | dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> |
1019 | PAGE_CACHE_SHIFT; | 1028 | PAGE_CACHE_SHIFT; |
1020 | 1029 | ||
1021 | if (num_pages > dirty_pages) { | 1030 | if (num_pages > dirty_pages) { |
1022 | if (copied > 0) | 1031 | if (copied > 0) |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d684266959..a0390657451b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
987 | return entry; | 987 | return entry; |
988 | } | 988 | } |
989 | 989 | ||
990 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 990 | static inline void |
991 | struct btrfs_free_space *info) | 991 | __unlink_free_space(struct btrfs_block_group_cache *block_group, |
992 | struct btrfs_free_space *info) | ||
992 | { | 993 | { |
993 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 994 | rb_erase(&info->offset_index, &block_group->free_space_offset); |
994 | block_group->free_extents--; | 995 | block_group->free_extents--; |
996 | } | ||
997 | |||
998 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | ||
999 | struct btrfs_free_space *info) | ||
1000 | { | ||
1001 | __unlink_free_space(block_group, info); | ||
995 | block_group->free_space -= info->bytes; | 1002 | block_group->free_space -= info->bytes; |
996 | } | 1003 | } |
997 | 1004 | ||
@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1016 | u64 max_bytes; | 1023 | u64 max_bytes; |
1017 | u64 bitmap_bytes; | 1024 | u64 bitmap_bytes; |
1018 | u64 extent_bytes; | 1025 | u64 extent_bytes; |
1026 | u64 size = block_group->key.offset; | ||
1019 | 1027 | ||
1020 | /* | 1028 | /* |
1021 | * The goal is to keep the total amount of memory used per 1gb of space | 1029 | * The goal is to keep the total amount of memory used per 1gb of space |
1022 | * at or below 32k, so we need to adjust how much memory we allow to be | 1030 | * at or below 32k, so we need to adjust how much memory we allow to be |
1023 | * used by extent based free space tracking | 1031 | * used by extent based free space tracking |
1024 | */ | 1032 | */ |
1025 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | 1033 | if (size < 1024 * 1024 * 1024) |
1026 | (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); | 1034 | max_bytes = MAX_CACHE_BYTES_PER_GIG; |
1035 | else | ||
1036 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | ||
1037 | div64_u64(size, 1024 * 1024 * 1024); | ||
1027 | 1038 | ||
1028 | /* | 1039 | /* |
1029 | * we want to account for 1 more bitmap than what we have so we can make | 1040 | * we want to account for 1 more bitmap than what we have so we can make |
@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | |||
1171 | recalculate_thresholds(block_group); | 1182 | recalculate_thresholds(block_group); |
1172 | } | 1183 | } |
1173 | 1184 | ||
1185 | static void free_bitmap(struct btrfs_block_group_cache *block_group, | ||
1186 | struct btrfs_free_space *bitmap_info) | ||
1187 | { | ||
1188 | unlink_free_space(block_group, bitmap_info); | ||
1189 | kfree(bitmap_info->bitmap); | ||
1190 | kfree(bitmap_info); | ||
1191 | block_group->total_bitmaps--; | ||
1192 | recalculate_thresholds(block_group); | ||
1193 | } | ||
1194 | |||
1174 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1195 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, |
1175 | struct btrfs_free_space *bitmap_info, | 1196 | struct btrfs_free_space *bitmap_info, |
1176 | u64 *offset, u64 *bytes) | 1197 | u64 *offset, u64 *bytes) |
@@ -1195,6 +1216,7 @@ again: | |||
1195 | */ | 1216 | */ |
1196 | search_start = *offset; | 1217 | search_start = *offset; |
1197 | search_bytes = *bytes; | 1218 | search_bytes = *bytes; |
1219 | search_bytes = min(search_bytes, end - search_start + 1); | ||
1198 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1220 | ret = search_bitmap(block_group, bitmap_info, &search_start, |
1199 | &search_bytes); | 1221 | &search_bytes); |
1200 | BUG_ON(ret < 0 || search_start != *offset); | 1222 | BUG_ON(ret < 0 || search_start != *offset); |
@@ -1211,13 +1233,8 @@ again: | |||
1211 | 1233 | ||
1212 | if (*bytes) { | 1234 | if (*bytes) { |
1213 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1235 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
1214 | if (!bitmap_info->bytes) { | 1236 | if (!bitmap_info->bytes) |
1215 | unlink_free_space(block_group, bitmap_info); | 1237 | free_bitmap(block_group, bitmap_info); |
1216 | kfree(bitmap_info->bitmap); | ||
1217 | kfree(bitmap_info); | ||
1218 | block_group->total_bitmaps--; | ||
1219 | recalculate_thresholds(block_group); | ||
1220 | } | ||
1221 | 1238 | ||
1222 | /* | 1239 | /* |
1223 | * no entry after this bitmap, but we still have bytes to | 1240 | * no entry after this bitmap, but we still have bytes to |
@@ -1250,13 +1267,8 @@ again: | |||
1250 | return -EAGAIN; | 1267 | return -EAGAIN; |
1251 | 1268 | ||
1252 | goto again; | 1269 | goto again; |
1253 | } else if (!bitmap_info->bytes) { | 1270 | } else if (!bitmap_info->bytes) |
1254 | unlink_free_space(block_group, bitmap_info); | 1271 | free_bitmap(block_group, bitmap_info); |
1255 | kfree(bitmap_info->bitmap); | ||
1256 | kfree(bitmap_info); | ||
1257 | block_group->total_bitmaps--; | ||
1258 | recalculate_thresholds(block_group); | ||
1259 | } | ||
1260 | 1272 | ||
1261 | return 0; | 1273 | return 0; |
1262 | } | 1274 | } |
@@ -1359,22 +1371,14 @@ out: | |||
1359 | return ret; | 1371 | return ret; |
1360 | } | 1372 | } |
1361 | 1373 | ||
1362 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1374 | bool try_merge_free_space(struct btrfs_block_group_cache *block_group, |
1363 | u64 offset, u64 bytes) | 1375 | struct btrfs_free_space *info, bool update_stat) |
1364 | { | 1376 | { |
1365 | struct btrfs_free_space *right_info = NULL; | 1377 | struct btrfs_free_space *left_info; |
1366 | struct btrfs_free_space *left_info = NULL; | 1378 | struct btrfs_free_space *right_info; |
1367 | struct btrfs_free_space *info = NULL; | 1379 | bool merged = false; |
1368 | int ret = 0; | 1380 | u64 offset = info->offset; |
1369 | 1381 | u64 bytes = info->bytes; | |
1370 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
1371 | if (!info) | ||
1372 | return -ENOMEM; | ||
1373 | |||
1374 | info->offset = offset; | ||
1375 | info->bytes = bytes; | ||
1376 | |||
1377 | spin_lock(&block_group->tree_lock); | ||
1378 | 1382 | ||
1379 | /* | 1383 | /* |
1380 | * first we want to see if there is free space adjacent to the range we | 1384 | * first we want to see if there is free space adjacent to the range we |
@@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1388 | else | 1392 | else |
1389 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1393 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); |
1390 | 1394 | ||
1391 | /* | ||
1392 | * If there was no extent directly to the left or right of this new | ||
1393 | * extent then we know we're going to have to allocate a new extent, so | ||
1394 | * before we do that see if we need to drop this into a bitmap | ||
1395 | */ | ||
1396 | if ((!left_info || left_info->bitmap) && | ||
1397 | (!right_info || right_info->bitmap)) { | ||
1398 | ret = insert_into_bitmap(block_group, info); | ||
1399 | |||
1400 | if (ret < 0) { | ||
1401 | goto out; | ||
1402 | } else if (ret) { | ||
1403 | ret = 0; | ||
1404 | goto out; | ||
1405 | } | ||
1406 | } | ||
1407 | |||
1408 | if (right_info && !right_info->bitmap) { | 1395 | if (right_info && !right_info->bitmap) { |
1409 | unlink_free_space(block_group, right_info); | 1396 | if (update_stat) |
1397 | unlink_free_space(block_group, right_info); | ||
1398 | else | ||
1399 | __unlink_free_space(block_group, right_info); | ||
1410 | info->bytes += right_info->bytes; | 1400 | info->bytes += right_info->bytes; |
1411 | kfree(right_info); | 1401 | kfree(right_info); |
1402 | merged = true; | ||
1412 | } | 1403 | } |
1413 | 1404 | ||
1414 | if (left_info && !left_info->bitmap && | 1405 | if (left_info && !left_info->bitmap && |
1415 | left_info->offset + left_info->bytes == offset) { | 1406 | left_info->offset + left_info->bytes == offset) { |
1416 | unlink_free_space(block_group, left_info); | 1407 | if (update_stat) |
1408 | unlink_free_space(block_group, left_info); | ||
1409 | else | ||
1410 | __unlink_free_space(block_group, left_info); | ||
1417 | info->offset = left_info->offset; | 1411 | info->offset = left_info->offset; |
1418 | info->bytes += left_info->bytes; | 1412 | info->bytes += left_info->bytes; |
1419 | kfree(left_info); | 1413 | kfree(left_info); |
1414 | merged = true; | ||
1420 | } | 1415 | } |
1421 | 1416 | ||
1417 | return merged; | ||
1418 | } | ||
1419 | |||
1420 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
1421 | u64 offset, u64 bytes) | ||
1422 | { | ||
1423 | struct btrfs_free_space *info; | ||
1424 | int ret = 0; | ||
1425 | |||
1426 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
1427 | if (!info) | ||
1428 | return -ENOMEM; | ||
1429 | |||
1430 | info->offset = offset; | ||
1431 | info->bytes = bytes; | ||
1432 | |||
1433 | spin_lock(&block_group->tree_lock); | ||
1434 | |||
1435 | if (try_merge_free_space(block_group, info, true)) | ||
1436 | goto link; | ||
1437 | |||
1438 | /* | ||
1439 | * There was no extent directly to the left or right of this new | ||
1440 | * extent then we know we're going to have to allocate a new extent, so | ||
1441 | * before we do that see if we need to drop this into a bitmap | ||
1442 | */ | ||
1443 | ret = insert_into_bitmap(block_group, info); | ||
1444 | if (ret < 0) { | ||
1445 | goto out; | ||
1446 | } else if (ret) { | ||
1447 | ret = 0; | ||
1448 | goto out; | ||
1449 | } | ||
1450 | link: | ||
1422 | ret = link_free_space(block_group, info); | 1451 | ret = link_free_space(block_group, info); |
1423 | if (ret) | 1452 | if (ret) |
1424 | kfree(info); | 1453 | kfree(info); |
@@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space( | |||
1621 | node = rb_next(&entry->offset_index); | 1650 | node = rb_next(&entry->offset_index); |
1622 | rb_erase(&entry->offset_index, &cluster->root); | 1651 | rb_erase(&entry->offset_index, &cluster->root); |
1623 | BUG_ON(entry->bitmap); | 1652 | BUG_ON(entry->bitmap); |
1653 | try_merge_free_space(block_group, entry, false); | ||
1624 | tree_insert_offset(&block_group->free_space_offset, | 1654 | tree_insert_offset(&block_group->free_space_offset, |
1625 | entry->offset, &entry->offset_index, 0); | 1655 | entry->offset, &entry->offset_index, 0); |
1626 | } | 1656 | } |
@@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
1685 | ret = offset; | 1715 | ret = offset; |
1686 | if (entry->bitmap) { | 1716 | if (entry->bitmap) { |
1687 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1717 | bitmap_clear_bits(block_group, entry, offset, bytes); |
1688 | if (!entry->bytes) { | 1718 | if (!entry->bytes) |
1689 | unlink_free_space(block_group, entry); | 1719 | free_bitmap(block_group, entry); |
1690 | kfree(entry->bitmap); | ||
1691 | kfree(entry); | ||
1692 | block_group->total_bitmaps--; | ||
1693 | recalculate_thresholds(block_group); | ||
1694 | } | ||
1695 | } else { | 1720 | } else { |
1696 | unlink_free_space(block_group, entry); | 1721 | unlink_free_space(block_group, entry); |
1697 | entry->offset += bytes; | 1722 | entry->offset += bytes; |
@@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1789 | 1814 | ||
1790 | ret = search_start; | 1815 | ret = search_start; |
1791 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1816 | bitmap_clear_bits(block_group, entry, ret, bytes); |
1817 | if (entry->bytes == 0) | ||
1818 | free_bitmap(block_group, entry); | ||
1792 | out: | 1819 | out: |
1793 | spin_unlock(&cluster->lock); | 1820 | spin_unlock(&cluster->lock); |
1794 | spin_unlock(&block_group->tree_lock); | 1821 | spin_unlock(&block_group->tree_lock); |
@@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1842 | entry->offset += bytes; | 1869 | entry->offset += bytes; |
1843 | entry->bytes -= bytes; | 1870 | entry->bytes -= bytes; |
1844 | 1871 | ||
1845 | if (entry->bytes == 0) { | 1872 | if (entry->bytes == 0) |
1846 | rb_erase(&entry->offset_index, &cluster->root); | 1873 | rb_erase(&entry->offset_index, &cluster->root); |
1847 | kfree(entry); | ||
1848 | } | ||
1849 | break; | 1874 | break; |
1850 | } | 1875 | } |
1851 | out: | 1876 | out: |
1852 | spin_unlock(&cluster->lock); | 1877 | spin_unlock(&cluster->lock); |
1853 | 1878 | ||
1879 | if (!ret) | ||
1880 | return 0; | ||
1881 | |||
1882 | spin_lock(&block_group->tree_lock); | ||
1883 | |||
1884 | block_group->free_space -= bytes; | ||
1885 | if (entry->bytes == 0) { | ||
1886 | block_group->free_extents--; | ||
1887 | kfree(entry); | ||
1888 | } | ||
1889 | |||
1890 | spin_unlock(&block_group->tree_lock); | ||
1891 | |||
1854 | return ret; | 1892 | return ret; |
1855 | } | 1893 | } |
1856 | 1894 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 160b55b3e132..0efdb65953c5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -416,7 +416,7 @@ again: | |||
416 | } | 416 | } |
417 | if (start == 0) { | 417 | if (start == 0) { |
418 | trans = btrfs_join_transaction(root, 1); | 418 | trans = btrfs_join_transaction(root, 1); |
419 | BUG_ON(!trans); | 419 | BUG_ON(IS_ERR(trans)); |
420 | btrfs_set_trans_block_group(trans, inode); | 420 | btrfs_set_trans_block_group(trans, inode); |
421 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 421 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
422 | 422 | ||
@@ -612,6 +612,7 @@ retry: | |||
612 | GFP_NOFS); | 612 | GFP_NOFS); |
613 | 613 | ||
614 | trans = btrfs_join_transaction(root, 1); | 614 | trans = btrfs_join_transaction(root, 1); |
615 | BUG_ON(IS_ERR(trans)); | ||
615 | ret = btrfs_reserve_extent(trans, root, | 616 | ret = btrfs_reserve_extent(trans, root, |
616 | async_extent->compressed_size, | 617 | async_extent->compressed_size, |
617 | async_extent->compressed_size, | 618 | async_extent->compressed_size, |
@@ -643,6 +644,7 @@ retry: | |||
643 | async_extent->ram_size - 1, 0); | 644 | async_extent->ram_size - 1, 0); |
644 | 645 | ||
645 | em = alloc_extent_map(GFP_NOFS); | 646 | em = alloc_extent_map(GFP_NOFS); |
647 | BUG_ON(!em); | ||
646 | em->start = async_extent->start; | 648 | em->start = async_extent->start; |
647 | em->len = async_extent->ram_size; | 649 | em->len = async_extent->ram_size; |
648 | em->orig_start = em->start; | 650 | em->orig_start = em->start; |
@@ -771,7 +773,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
771 | 773 | ||
772 | BUG_ON(root == root->fs_info->tree_root); | 774 | BUG_ON(root == root->fs_info->tree_root); |
773 | trans = btrfs_join_transaction(root, 1); | 775 | trans = btrfs_join_transaction(root, 1); |
774 | BUG_ON(!trans); | 776 | BUG_ON(IS_ERR(trans)); |
775 | btrfs_set_trans_block_group(trans, inode); | 777 | btrfs_set_trans_block_group(trans, inode); |
776 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 778 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
777 | 779 | ||
@@ -819,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
819 | BUG_ON(ret); | 821 | BUG_ON(ret); |
820 | 822 | ||
821 | em = alloc_extent_map(GFP_NOFS); | 823 | em = alloc_extent_map(GFP_NOFS); |
824 | BUG_ON(!em); | ||
822 | em->start = start; | 825 | em->start = start; |
823 | em->orig_start = em->start; | 826 | em->orig_start = em->start; |
824 | ram_size = ins.offset; | 827 | ram_size = ins.offset; |
@@ -1049,7 +1052,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1049 | } else { | 1052 | } else { |
1050 | trans = btrfs_join_transaction(root, 1); | 1053 | trans = btrfs_join_transaction(root, 1); |
1051 | } | 1054 | } |
1052 | BUG_ON(!trans); | 1055 | BUG_ON(IS_ERR(trans)); |
1053 | 1056 | ||
1054 | cow_start = (u64)-1; | 1057 | cow_start = (u64)-1; |
1055 | cur_offset = start; | 1058 | cur_offset = start; |
@@ -1168,6 +1171,7 @@ out_check: | |||
1168 | struct extent_map_tree *em_tree; | 1171 | struct extent_map_tree *em_tree; |
1169 | em_tree = &BTRFS_I(inode)->extent_tree; | 1172 | em_tree = &BTRFS_I(inode)->extent_tree; |
1170 | em = alloc_extent_map(GFP_NOFS); | 1173 | em = alloc_extent_map(GFP_NOFS); |
1174 | BUG_ON(!em); | ||
1171 | em->start = cur_offset; | 1175 | em->start = cur_offset; |
1172 | em->orig_start = em->start; | 1176 | em->orig_start = em->start; |
1173 | em->len = num_bytes; | 1177 | em->len = num_bytes; |
@@ -1557,6 +1561,7 @@ out: | |||
1557 | out_page: | 1561 | out_page: |
1558 | unlock_page(page); | 1562 | unlock_page(page); |
1559 | page_cache_release(page); | 1563 | page_cache_release(page); |
1564 | kfree(fixup); | ||
1560 | } | 1565 | } |
1561 | 1566 | ||
1562 | /* | 1567 | /* |
@@ -1703,7 +1708,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1703 | trans = btrfs_join_transaction_nolock(root, 1); | 1708 | trans = btrfs_join_transaction_nolock(root, 1); |
1704 | else | 1709 | else |
1705 | trans = btrfs_join_transaction(root, 1); | 1710 | trans = btrfs_join_transaction(root, 1); |
1706 | BUG_ON(!trans); | 1711 | BUG_ON(IS_ERR(trans)); |
1707 | btrfs_set_trans_block_group(trans, inode); | 1712 | btrfs_set_trans_block_group(trans, inode); |
1708 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 1713 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
1709 | ret = btrfs_update_inode(trans, root, inode); | 1714 | ret = btrfs_update_inode(trans, root, inode); |
@@ -1720,6 +1725,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1720 | trans = btrfs_join_transaction_nolock(root, 1); | 1725 | trans = btrfs_join_transaction_nolock(root, 1); |
1721 | else | 1726 | else |
1722 | trans = btrfs_join_transaction(root, 1); | 1727 | trans = btrfs_join_transaction(root, 1); |
1728 | BUG_ON(IS_ERR(trans)); | ||
1723 | btrfs_set_trans_block_group(trans, inode); | 1729 | btrfs_set_trans_block_group(trans, inode); |
1724 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 1730 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
1725 | 1731 | ||
@@ -1907,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) | |||
1907 | 1913 | ||
1908 | private = 0; | 1914 | private = 0; |
1909 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, | 1915 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, |
1910 | (u64)-1, 1, EXTENT_DIRTY)) { | 1916 | (u64)-1, 1, EXTENT_DIRTY, 0)) { |
1911 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, | 1917 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, |
1912 | start, &private_failure); | 1918 | start, &private_failure); |
1913 | if (ret == 0) { | 1919 | if (ret == 0) { |
@@ -2354,6 +2360,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2354 | */ | 2360 | */ |
2355 | if (is_bad_inode(inode)) { | 2361 | if (is_bad_inode(inode)) { |
2356 | trans = btrfs_start_transaction(root, 0); | 2362 | trans = btrfs_start_transaction(root, 0); |
2363 | BUG_ON(IS_ERR(trans)); | ||
2357 | btrfs_orphan_del(trans, inode); | 2364 | btrfs_orphan_del(trans, inode); |
2358 | btrfs_end_transaction(trans, root); | 2365 | btrfs_end_transaction(trans, root); |
2359 | iput(inode); | 2366 | iput(inode); |
@@ -2381,6 +2388,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2381 | 2388 | ||
2382 | if (root->orphan_block_rsv || root->orphan_item_inserted) { | 2389 | if (root->orphan_block_rsv || root->orphan_item_inserted) { |
2383 | trans = btrfs_join_transaction(root, 1); | 2390 | trans = btrfs_join_transaction(root, 1); |
2391 | BUG_ON(IS_ERR(trans)); | ||
2384 | btrfs_end_transaction(trans, root); | 2392 | btrfs_end_transaction(trans, root); |
2385 | } | 2393 | } |
2386 | 2394 | ||
@@ -2641,7 +2649,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |||
2641 | path = btrfs_alloc_path(); | 2649 | path = btrfs_alloc_path(); |
2642 | if (!path) { | 2650 | if (!path) { |
2643 | ret = -ENOMEM; | 2651 | ret = -ENOMEM; |
2644 | goto err; | 2652 | goto out; |
2645 | } | 2653 | } |
2646 | 2654 | ||
2647 | path->leave_spinning = 1; | 2655 | path->leave_spinning = 1; |
@@ -2714,9 +2722,10 @@ static int check_path_shared(struct btrfs_root *root, | |||
2714 | struct extent_buffer *eb; | 2722 | struct extent_buffer *eb; |
2715 | int level; | 2723 | int level; |
2716 | u64 refs = 1; | 2724 | u64 refs = 1; |
2717 | int uninitialized_var(ret); | ||
2718 | 2725 | ||
2719 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { | 2726 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { |
2727 | int ret; | ||
2728 | |||
2720 | if (!path->nodes[level]) | 2729 | if (!path->nodes[level]) |
2721 | break; | 2730 | break; |
2722 | eb = path->nodes[level]; | 2731 | eb = path->nodes[level]; |
@@ -2727,7 +2736,7 @@ static int check_path_shared(struct btrfs_root *root, | |||
2727 | if (refs > 1) | 2736 | if (refs > 1) |
2728 | return 1; | 2737 | return 1; |
2729 | } | 2738 | } |
2730 | return ret; /* XXX callers? */ | 2739 | return 0; |
2731 | } | 2740 | } |
2732 | 2741 | ||
2733 | /* | 2742 | /* |
@@ -4134,7 +4143,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
4134 | } | 4143 | } |
4135 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); | 4144 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); |
4136 | 4145 | ||
4137 | if (root != sub_root) { | 4146 | if (!IS_ERR(inode) && root != sub_root) { |
4138 | down_read(&root->fs_info->cleanup_work_sem); | 4147 | down_read(&root->fs_info->cleanup_work_sem); |
4139 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | 4148 | if (!(inode->i_sb->s_flags & MS_RDONLY)) |
4140 | btrfs_orphan_cleanup(sub_root); | 4149 | btrfs_orphan_cleanup(sub_root); |
@@ -4347,6 +4356,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
4347 | trans = btrfs_join_transaction_nolock(root, 1); | 4356 | trans = btrfs_join_transaction_nolock(root, 1); |
4348 | else | 4357 | else |
4349 | trans = btrfs_join_transaction(root, 1); | 4358 | trans = btrfs_join_transaction(root, 1); |
4359 | if (IS_ERR(trans)) | ||
4360 | return PTR_ERR(trans); | ||
4350 | btrfs_set_trans_block_group(trans, inode); | 4361 | btrfs_set_trans_block_group(trans, inode); |
4351 | if (nolock) | 4362 | if (nolock) |
4352 | ret = btrfs_end_transaction_nolock(trans, root); | 4363 | ret = btrfs_end_transaction_nolock(trans, root); |
@@ -4372,6 +4383,7 @@ void btrfs_dirty_inode(struct inode *inode) | |||
4372 | return; | 4383 | return; |
4373 | 4384 | ||
4374 | trans = btrfs_join_transaction(root, 1); | 4385 | trans = btrfs_join_transaction(root, 1); |
4386 | BUG_ON(IS_ERR(trans)); | ||
4375 | btrfs_set_trans_block_group(trans, inode); | 4387 | btrfs_set_trans_block_group(trans, inode); |
4376 | 4388 | ||
4377 | ret = btrfs_update_inode(trans, root, inode); | 4389 | ret = btrfs_update_inode(trans, root, inode); |
@@ -5176,6 +5188,8 @@ again: | |||
5176 | em = NULL; | 5188 | em = NULL; |
5177 | btrfs_release_path(root, path); | 5189 | btrfs_release_path(root, path); |
5178 | trans = btrfs_join_transaction(root, 1); | 5190 | trans = btrfs_join_transaction(root, 1); |
5191 | if (IS_ERR(trans)) | ||
5192 | return ERR_CAST(trans); | ||
5179 | goto again; | 5193 | goto again; |
5180 | } | 5194 | } |
5181 | map = kmap(page); | 5195 | map = kmap(page); |
@@ -5266,6 +5280,128 @@ out: | |||
5266 | return em; | 5280 | return em; |
5267 | } | 5281 | } |
5268 | 5282 | ||
5283 | struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, | ||
5284 | size_t pg_offset, u64 start, u64 len, | ||
5285 | int create) | ||
5286 | { | ||
5287 | struct extent_map *em; | ||
5288 | struct extent_map *hole_em = NULL; | ||
5289 | u64 range_start = start; | ||
5290 | u64 end; | ||
5291 | u64 found; | ||
5292 | u64 found_end; | ||
5293 | int err = 0; | ||
5294 | |||
5295 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | ||
5296 | if (IS_ERR(em)) | ||
5297 | return em; | ||
5298 | if (em) { | ||
5299 | /* | ||
5300 | * if our em maps to a hole, there might | ||
5301 | * actually be delalloc bytes behind it | ||
5302 | */ | ||
5303 | if (em->block_start != EXTENT_MAP_HOLE) | ||
5304 | return em; | ||
5305 | else | ||
5306 | hole_em = em; | ||
5307 | } | ||
5308 | |||
5309 | /* check to see if we've wrapped (len == -1 or similar) */ | ||
5310 | end = start + len; | ||
5311 | if (end < start) | ||
5312 | end = (u64)-1; | ||
5313 | else | ||
5314 | end -= 1; | ||
5315 | |||
5316 | em = NULL; | ||
5317 | |||
5318 | /* ok, we didn't find anything, lets look for delalloc */ | ||
5319 | found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, | ||
5320 | end, len, EXTENT_DELALLOC, 1); | ||
5321 | found_end = range_start + found; | ||
5322 | if (found_end < range_start) | ||
5323 | found_end = (u64)-1; | ||
5324 | |||
5325 | /* | ||
5326 | * we didn't find anything useful, return | ||
5327 | * the original results from get_extent() | ||
5328 | */ | ||
5329 | if (range_start > end || found_end <= start) { | ||
5330 | em = hole_em; | ||
5331 | hole_em = NULL; | ||
5332 | goto out; | ||
5333 | } | ||
5334 | |||
5335 | /* adjust the range_start to make sure it doesn't | ||
5336 | * go backwards from the start they passed in | ||
5337 | */ | ||
5338 | range_start = max(start,range_start); | ||
5339 | found = found_end - range_start; | ||
5340 | |||
5341 | if (found > 0) { | ||
5342 | u64 hole_start = start; | ||
5343 | u64 hole_len = len; | ||
5344 | |||
5345 | em = alloc_extent_map(GFP_NOFS); | ||
5346 | if (!em) { | ||
5347 | err = -ENOMEM; | ||
5348 | goto out; | ||
5349 | } | ||
5350 | /* | ||
5351 | * when btrfs_get_extent can't find anything it | ||
5352 | * returns one huge hole | ||
5353 | * | ||
5354 | * make sure what it found really fits our range, and | ||
5355 | * adjust to make sure it is based on the start from | ||
5356 | * the caller | ||
5357 | */ | ||
5358 | if (hole_em) { | ||
5359 | u64 calc_end = extent_map_end(hole_em); | ||
5360 | |||
5361 | if (calc_end <= start || (hole_em->start > end)) { | ||
5362 | free_extent_map(hole_em); | ||
5363 | hole_em = NULL; | ||
5364 | } else { | ||
5365 | hole_start = max(hole_em->start, start); | ||
5366 | hole_len = calc_end - hole_start; | ||
5367 | } | ||
5368 | } | ||
5369 | em->bdev = NULL; | ||
5370 | if (hole_em && range_start > hole_start) { | ||
5371 | /* our hole starts before our delalloc, so we | ||
5372 | * have to return just the parts of the hole | ||
5373 | * that go until the delalloc starts | ||
5374 | */ | ||
5375 | em->len = min(hole_len, | ||
5376 | range_start - hole_start); | ||
5377 | em->start = hole_start; | ||
5378 | em->orig_start = hole_start; | ||
5379 | /* | ||
5380 | * don't adjust block start at all, | ||
5381 | * it is fixed at EXTENT_MAP_HOLE | ||
5382 | */ | ||
5383 | em->block_start = hole_em->block_start; | ||
5384 | em->block_len = hole_len; | ||
5385 | } else { | ||
5386 | em->start = range_start; | ||
5387 | em->len = found; | ||
5388 | em->orig_start = range_start; | ||
5389 | em->block_start = EXTENT_MAP_DELALLOC; | ||
5390 | em->block_len = found; | ||
5391 | } | ||
5392 | } else if (hole_em) { | ||
5393 | return hole_em; | ||
5394 | } | ||
5395 | out: | ||
5396 | |||
5397 | free_extent_map(hole_em); | ||
5398 | if (err) { | ||
5399 | free_extent_map(em); | ||
5400 | return ERR_PTR(err); | ||
5401 | } | ||
5402 | return em; | ||
5403 | } | ||
5404 | |||
5269 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | 5405 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
5270 | u64 start, u64 len) | 5406 | u64 start, u64 len) |
5271 | { | 5407 | { |
@@ -5280,8 +5416,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
5280 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); | 5416 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); |
5281 | 5417 | ||
5282 | trans = btrfs_join_transaction(root, 0); | 5418 | trans = btrfs_join_transaction(root, 0); |
5283 | if (!trans) | 5419 | if (IS_ERR(trans)) |
5284 | return ERR_PTR(-ENOMEM); | 5420 | return ERR_CAST(trans); |
5285 | 5421 | ||
5286 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 5422 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
5287 | 5423 | ||
@@ -5505,7 +5641,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
5505 | * while we look for nocow cross refs | 5641 | * while we look for nocow cross refs |
5506 | */ | 5642 | */ |
5507 | trans = btrfs_join_transaction(root, 0); | 5643 | trans = btrfs_join_transaction(root, 0); |
5508 | if (!trans) | 5644 | if (IS_ERR(trans)) |
5509 | goto must_cow; | 5645 | goto must_cow; |
5510 | 5646 | ||
5511 | if (can_nocow_odirect(trans, inode, start, len) == 1) { | 5647 | if (can_nocow_odirect(trans, inode, start, len) == 1) { |
@@ -5640,7 +5776,7 @@ again: | |||
5640 | BUG_ON(!ordered); | 5776 | BUG_ON(!ordered); |
5641 | 5777 | ||
5642 | trans = btrfs_join_transaction(root, 1); | 5778 | trans = btrfs_join_transaction(root, 1); |
5643 | if (!trans) { | 5779 | if (IS_ERR(trans)) { |
5644 | err = -ENOMEM; | 5780 | err = -ENOMEM; |
5645 | goto out; | 5781 | goto out; |
5646 | } | 5782 | } |
@@ -6088,7 +6224,7 @@ out: | |||
6088 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 6224 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
6089 | __u64 start, __u64 len) | 6225 | __u64 start, __u64 len) |
6090 | { | 6226 | { |
6091 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); | 6227 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); |
6092 | } | 6228 | } |
6093 | 6229 | ||
6094 | int btrfs_readpage(struct file *file, struct page *page) | 6230 | int btrfs_readpage(struct file *file, struct page *page) |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a506a22b522a..5fdb2abc4fa7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
203 | 203 | ||
204 | 204 | ||
205 | trans = btrfs_join_transaction(root, 1); | 205 | trans = btrfs_join_transaction(root, 1); |
206 | BUG_ON(!trans); | 206 | BUG_ON(IS_ERR(trans)); |
207 | 207 | ||
208 | ret = btrfs_update_inode(trans, root, inode); | 208 | ret = btrfs_update_inode(trans, root, inode); |
209 | BUG_ON(ret); | 209 | BUG_ON(ret); |
@@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
907 | 907 | ||
908 | if (new_size > old_size) { | 908 | if (new_size > old_size) { |
909 | trans = btrfs_start_transaction(root, 0); | 909 | trans = btrfs_start_transaction(root, 0); |
910 | if (IS_ERR(trans)) { | ||
911 | ret = PTR_ERR(trans); | ||
912 | goto out_unlock; | ||
913 | } | ||
910 | ret = btrfs_grow_device(trans, device, new_size); | 914 | ret = btrfs_grow_device(trans, device, new_size); |
911 | btrfs_commit_transaction(trans, root); | 915 | btrfs_commit_transaction(trans, root); |
912 | } else { | 916 | } else { |
@@ -1067,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1067 | if (copy_from_user(&flags, arg, sizeof(flags))) | 1071 | if (copy_from_user(&flags, arg, sizeof(flags))) |
1068 | return -EFAULT; | 1072 | return -EFAULT; |
1069 | 1073 | ||
1070 | if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) | 1074 | if (flags & BTRFS_SUBVOL_CREATE_ASYNC) |
1071 | return -EINVAL; | 1075 | return -EINVAL; |
1072 | 1076 | ||
1073 | if (flags & ~BTRFS_SUBVOL_RDONLY) | 1077 | if (flags & ~BTRFS_SUBVOL_RDONLY) |
1074 | return -EOPNOTSUPP; | 1078 | return -EOPNOTSUPP; |
1075 | 1079 | ||
1080 | if (!is_owner_or_cap(inode)) | ||
1081 | return -EACCES; | ||
1082 | |||
1076 | down_write(&root->fs_info->subvol_sem); | 1083 | down_write(&root->fs_info->subvol_sem); |
1077 | 1084 | ||
1078 | /* nothing to do */ | 1085 | /* nothing to do */ |
@@ -1093,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1093 | goto out_reset; | 1100 | goto out_reset; |
1094 | } | 1101 | } |
1095 | 1102 | ||
1096 | ret = btrfs_update_root(trans, root, | 1103 | ret = btrfs_update_root(trans, root->fs_info->tree_root, |
1097 | &root->root_key, &root->root_item); | 1104 | &root->root_key, &root->root_item); |
1098 | 1105 | ||
1099 | btrfs_commit_transaction(trans, root); | 1106 | btrfs_commit_transaction(trans, root); |
@@ -1898,7 +1905,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1898 | 1905 | ||
1899 | memcpy(&new_key, &key, sizeof(new_key)); | 1906 | memcpy(&new_key, &key, sizeof(new_key)); |
1900 | new_key.objectid = inode->i_ino; | 1907 | new_key.objectid = inode->i_ino; |
1901 | new_key.offset = key.offset + destoff - off; | 1908 | if (off <= key.offset) |
1909 | new_key.offset = key.offset + destoff - off; | ||
1910 | else | ||
1911 | new_key.offset = destoff; | ||
1902 | 1912 | ||
1903 | trans = btrfs_start_transaction(root, 1); | 1913 | trans = btrfs_start_transaction(root, 1); |
1904 | if (IS_ERR(trans)) { | 1914 | if (IS_ERR(trans)) { |
@@ -2082,7 +2092,7 @@ static long btrfs_ioctl_trans_start(struct file *file) | |||
2082 | 2092 | ||
2083 | ret = -ENOMEM; | 2093 | ret = -ENOMEM; |
2084 | trans = btrfs_start_ioctl_transaction(root, 0); | 2094 | trans = btrfs_start_ioctl_transaction(root, 0); |
2085 | if (!trans) | 2095 | if (IS_ERR(trans)) |
2086 | goto out_drop; | 2096 | goto out_drop; |
2087 | 2097 | ||
2088 | file->private_data = trans; | 2098 | file->private_data = trans; |
@@ -2138,9 +2148,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | |||
2138 | path->leave_spinning = 1; | 2148 | path->leave_spinning = 1; |
2139 | 2149 | ||
2140 | trans = btrfs_start_transaction(root, 1); | 2150 | trans = btrfs_start_transaction(root, 1); |
2141 | if (!trans) { | 2151 | if (IS_ERR(trans)) { |
2142 | btrfs_free_path(path); | 2152 | btrfs_free_path(path); |
2143 | return -ENOMEM; | 2153 | return PTR_ERR(trans); |
2144 | } | 2154 | } |
2145 | 2155 | ||
2146 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | 2156 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); |
@@ -2201,7 +2211,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2201 | int num_types = 4; | 2211 | int num_types = 4; |
2202 | int alloc_size; | 2212 | int alloc_size; |
2203 | int ret = 0; | 2213 | int ret = 0; |
2204 | int slot_count = 0; | 2214 | u64 slot_count = 0; |
2205 | int i, c; | 2215 | int i, c; |
2206 | 2216 | ||
2207 | if (copy_from_user(&space_args, | 2217 | if (copy_from_user(&space_args, |
@@ -2240,7 +2250,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2240 | goto out; | 2250 | goto out; |
2241 | } | 2251 | } |
2242 | 2252 | ||
2243 | slot_count = min_t(int, space_args.space_slots, slot_count); | 2253 | slot_count = min_t(u64, space_args.space_slots, slot_count); |
2244 | 2254 | ||
2245 | alloc_size = sizeof(*dest) * slot_count; | 2255 | alloc_size = sizeof(*dest) * slot_count; |
2246 | 2256 | ||
@@ -2260,6 +2270,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2260 | for (i = 0; i < num_types; i++) { | 2270 | for (i = 0; i < num_types; i++) { |
2261 | struct btrfs_space_info *tmp; | 2271 | struct btrfs_space_info *tmp; |
2262 | 2272 | ||
2273 | if (!slot_count) | ||
2274 | break; | ||
2275 | |||
2263 | info = NULL; | 2276 | info = NULL; |
2264 | rcu_read_lock(); | 2277 | rcu_read_lock(); |
2265 | list_for_each_entry_rcu(tmp, &root->fs_info->space_info, | 2278 | list_for_each_entry_rcu(tmp, &root->fs_info->space_info, |
@@ -2281,7 +2294,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2281 | memcpy(dest, &space, sizeof(space)); | 2294 | memcpy(dest, &space, sizeof(space)); |
2282 | dest++; | 2295 | dest++; |
2283 | space_args.total_spaces++; | 2296 | space_args.total_spaces++; |
2297 | slot_count--; | ||
2284 | } | 2298 | } |
2299 | if (!slot_count) | ||
2300 | break; | ||
2285 | } | 2301 | } |
2286 | up_read(&info->groups_sem); | 2302 | up_read(&info->groups_sem); |
2287 | } | 2303 | } |
@@ -2334,6 +2350,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp | |||
2334 | u64 transid; | 2350 | u64 transid; |
2335 | 2351 | ||
2336 | trans = btrfs_start_transaction(root, 0); | 2352 | trans = btrfs_start_transaction(root, 0); |
2353 | if (IS_ERR(trans)) | ||
2354 | return PTR_ERR(trans); | ||
2337 | transid = trans->transid; | 2355 | transid = trans->transid; |
2338 | btrfs_commit_transaction_async(trans, root, 0); | 2356 | btrfs_commit_transaction_async(trans, root, 0); |
2339 | 2357 | ||
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399df..a178f5ebea78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c | |||
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, | |||
280 | unsigned long tot_out; | 280 | unsigned long tot_out; |
281 | unsigned long tot_len; | 281 | unsigned long tot_len; |
282 | char *buf; | 282 | char *buf; |
283 | bool may_late_unmap, need_unmap; | ||
283 | 284 | ||
284 | data_in = kmap(pages_in[0]); | 285 | data_in = kmap(pages_in[0]); |
285 | tot_len = read_compress_length(data_in); | 286 | tot_len = read_compress_length(data_in); |
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, | |||
300 | 301 | ||
301 | tot_in += in_len; | 302 | tot_in += in_len; |
302 | working_bytes = in_len; | 303 | working_bytes = in_len; |
304 | may_late_unmap = need_unmap = false; | ||
303 | 305 | ||
304 | /* fast path: avoid using the working buffer */ | 306 | /* fast path: avoid using the working buffer */ |
305 | if (in_page_bytes_left >= in_len) { | 307 | if (in_page_bytes_left >= in_len) { |
306 | buf = data_in + in_offset; | 308 | buf = data_in + in_offset; |
307 | bytes = in_len; | 309 | bytes = in_len; |
310 | may_late_unmap = true; | ||
308 | goto cont; | 311 | goto cont; |
309 | } | 312 | } |
310 | 313 | ||
@@ -329,14 +332,17 @@ cont: | |||
329 | if (working_bytes == 0 && tot_in >= tot_len) | 332 | if (working_bytes == 0 && tot_in >= tot_len) |
330 | break; | 333 | break; |
331 | 334 | ||
332 | kunmap(pages_in[page_in_index]); | 335 | if (page_in_index + 1 >= total_pages_in) { |
333 | page_in_index++; | ||
334 | if (page_in_index >= total_pages_in) { | ||
335 | ret = -1; | 336 | ret = -1; |
336 | data_in = NULL; | ||
337 | goto done; | 337 | goto done; |
338 | } | 338 | } |
339 | data_in = kmap(pages_in[page_in_index]); | 339 | |
340 | if (may_late_unmap) | ||
341 | need_unmap = true; | ||
342 | else | ||
343 | kunmap(pages_in[page_in_index]); | ||
344 | |||
345 | data_in = kmap(pages_in[++page_in_index]); | ||
340 | 346 | ||
341 | in_page_bytes_left = PAGE_CACHE_SIZE; | 347 | in_page_bytes_left = PAGE_CACHE_SIZE; |
342 | in_offset = 0; | 348 | in_offset = 0; |
@@ -346,6 +352,8 @@ cont: | |||
346 | out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); | 352 | out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); |
347 | ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, | 353 | ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, |
348 | &out_len); | 354 | &out_len); |
355 | if (need_unmap) | ||
356 | kunmap(pages_in[page_in_index - 1]); | ||
349 | if (ret != LZO_E_OK) { | 357 | if (ret != LZO_E_OK) { |
350 | printk(KERN_WARNING "btrfs decompress failed\n"); | 358 | printk(KERN_WARNING "btrfs decompress failed\n"); |
351 | ret = -1; | 359 | ret = -1; |
@@ -363,8 +371,7 @@ cont: | |||
363 | break; | 371 | break; |
364 | } | 372 | } |
365 | done: | 373 | done: |
366 | if (data_in) | 374 | kunmap(pages_in[page_in_index]); |
367 | kunmap(pages_in[page_in_index]); | ||
368 | return ret; | 375 | return ret; |
369 | } | 376 | } |
370 | 377 | ||
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2b61e1ddcd99..083a55477375 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, | |||
141 | u64 file_offset) | 141 | u64 file_offset) |
142 | { | 142 | { |
143 | struct rb_root *root = &tree->tree; | 143 | struct rb_root *root = &tree->tree; |
144 | struct rb_node *prev; | 144 | struct rb_node *prev = NULL; |
145 | struct rb_node *ret; | 145 | struct rb_node *ret; |
146 | struct btrfs_ordered_extent *entry; | 146 | struct btrfs_ordered_extent *entry; |
147 | 147 | ||
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0d126be22b63..fb2605d998e9 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c | |||
@@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) | |||
260 | #else | 260 | #else |
261 | BUG(); | 261 | BUG(); |
262 | #endif | 262 | #endif |
263 | break; | ||
263 | case BTRFS_BLOCK_GROUP_ITEM_KEY: | 264 | case BTRFS_BLOCK_GROUP_ITEM_KEY: |
264 | bi = btrfs_item_ptr(l, i, | 265 | bi = btrfs_item_ptr(l, i, |
265 | struct btrfs_block_group_item); | 266 | struct btrfs_block_group_item); |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 045c9c2b2d7e..31ade5802ae8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, | |||
1157 | new_node->bytenr = dest->node->start; | 1157 | new_node->bytenr = dest->node->start; |
1158 | new_node->level = node->level; | 1158 | new_node->level = node->level; |
1159 | new_node->lowest = node->lowest; | 1159 | new_node->lowest = node->lowest; |
1160 | new_node->checked = 1; | ||
1160 | new_node->root = dest; | 1161 | new_node->root = dest; |
1161 | 1162 | ||
1162 | if (!node->lowest) { | 1163 | if (!node->lowest) { |
@@ -2028,6 +2029,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |||
2028 | 2029 | ||
2029 | while (1) { | 2030 | while (1) { |
2030 | trans = btrfs_start_transaction(root, 0); | 2031 | trans = btrfs_start_transaction(root, 0); |
2032 | BUG_ON(IS_ERR(trans)); | ||
2031 | trans->block_rsv = rc->block_rsv; | 2033 | trans->block_rsv = rc->block_rsv; |
2032 | 2034 | ||
2033 | ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, | 2035 | ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, |
@@ -2147,6 +2149,12 @@ again: | |||
2147 | } | 2149 | } |
2148 | 2150 | ||
2149 | trans = btrfs_join_transaction(rc->extent_root, 1); | 2151 | trans = btrfs_join_transaction(rc->extent_root, 1); |
2152 | if (IS_ERR(trans)) { | ||
2153 | if (!err) | ||
2154 | btrfs_block_rsv_release(rc->extent_root, | ||
2155 | rc->block_rsv, num_bytes); | ||
2156 | return PTR_ERR(trans); | ||
2157 | } | ||
2150 | 2158 | ||
2151 | if (!err) { | 2159 | if (!err) { |
2152 | if (num_bytes != rc->merging_rsv_size) { | 2160 | if (num_bytes != rc->merging_rsv_size) { |
@@ -3222,6 +3230,7 @@ truncate: | |||
3222 | trans = btrfs_join_transaction(root, 0); | 3230 | trans = btrfs_join_transaction(root, 0); |
3223 | if (IS_ERR(trans)) { | 3231 | if (IS_ERR(trans)) { |
3224 | btrfs_free_path(path); | 3232 | btrfs_free_path(path); |
3233 | ret = PTR_ERR(trans); | ||
3225 | goto out; | 3234 | goto out; |
3226 | } | 3235 | } |
3227 | 3236 | ||
@@ -3628,6 +3637,7 @@ int prepare_to_relocate(struct reloc_control *rc) | |||
3628 | set_reloc_control(rc); | 3637 | set_reloc_control(rc); |
3629 | 3638 | ||
3630 | trans = btrfs_join_transaction(rc->extent_root, 1); | 3639 | trans = btrfs_join_transaction(rc->extent_root, 1); |
3640 | BUG_ON(IS_ERR(trans)); | ||
3631 | btrfs_commit_transaction(trans, rc->extent_root); | 3641 | btrfs_commit_transaction(trans, rc->extent_root); |
3632 | return 0; | 3642 | return 0; |
3633 | } | 3643 | } |
@@ -3644,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3644 | u32 item_size; | 3654 | u32 item_size; |
3645 | int ret; | 3655 | int ret; |
3646 | int err = 0; | 3656 | int err = 0; |
3657 | int progress = 0; | ||
3647 | 3658 | ||
3648 | path = btrfs_alloc_path(); | 3659 | path = btrfs_alloc_path(); |
3649 | if (!path) | 3660 | if (!path) |
@@ -3656,8 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3656 | } | 3667 | } |
3657 | 3668 | ||
3658 | while (1) { | 3669 | while (1) { |
3670 | progress++; | ||
3659 | trans = btrfs_start_transaction(rc->extent_root, 0); | 3671 | trans = btrfs_start_transaction(rc->extent_root, 0); |
3660 | 3672 | BUG_ON(IS_ERR(trans)); | |
3673 | restart: | ||
3661 | if (update_backref_cache(trans, &rc->backref_cache)) { | 3674 | if (update_backref_cache(trans, &rc->backref_cache)) { |
3662 | btrfs_end_transaction(trans, rc->extent_root); | 3675 | btrfs_end_transaction(trans, rc->extent_root); |
3663 | continue; | 3676 | continue; |
@@ -3770,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3770 | } | 3783 | } |
3771 | } | 3784 | } |
3772 | } | 3785 | } |
3786 | if (trans && progress && err == -ENOSPC) { | ||
3787 | ret = btrfs_force_chunk_alloc(trans, rc->extent_root, | ||
3788 | rc->block_group->flags); | ||
3789 | if (ret == 0) { | ||
3790 | err = 0; | ||
3791 | progress = 0; | ||
3792 | goto restart; | ||
3793 | } | ||
3794 | } | ||
3773 | 3795 | ||
3774 | btrfs_release_path(rc->extent_root, path); | 3796 | btrfs_release_path(rc->extent_root, path); |
3775 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, | 3797 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, |
@@ -3804,7 +3826,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3804 | 3826 | ||
3805 | /* get rid of pinned extents */ | 3827 | /* get rid of pinned extents */ |
3806 | trans = btrfs_join_transaction(rc->extent_root, 1); | 3828 | trans = btrfs_join_transaction(rc->extent_root, 1); |
3807 | btrfs_commit_transaction(trans, rc->extent_root); | 3829 | if (IS_ERR(trans)) |
3830 | err = PTR_ERR(trans); | ||
3831 | else | ||
3832 | btrfs_commit_transaction(trans, rc->extent_root); | ||
3808 | out_free: | 3833 | out_free: |
3809 | btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); | 3834 | btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); |
3810 | btrfs_free_path(path); | 3835 | btrfs_free_path(path); |
@@ -4022,6 +4047,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) | |||
4022 | int ret; | 4047 | int ret; |
4023 | 4048 | ||
4024 | trans = btrfs_start_transaction(root->fs_info->tree_root, 0); | 4049 | trans = btrfs_start_transaction(root->fs_info->tree_root, 0); |
4050 | BUG_ON(IS_ERR(trans)); | ||
4025 | 4051 | ||
4026 | memset(&root->root_item.drop_progress, 0, | 4052 | memset(&root->root_item.drop_progress, 0, |
4027 | sizeof(root->root_item.drop_progress)); | 4053 | sizeof(root->root_item.drop_progress)); |
@@ -4125,6 +4151,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) | |||
4125 | set_reloc_control(rc); | 4151 | set_reloc_control(rc); |
4126 | 4152 | ||
4127 | trans = btrfs_join_transaction(rc->extent_root, 1); | 4153 | trans = btrfs_join_transaction(rc->extent_root, 1); |
4154 | if (IS_ERR(trans)) { | ||
4155 | unset_reloc_control(rc); | ||
4156 | err = PTR_ERR(trans); | ||
4157 | goto out_free; | ||
4158 | } | ||
4128 | 4159 | ||
4129 | rc->merge_reloc_tree = 1; | 4160 | rc->merge_reloc_tree = 1; |
4130 | 4161 | ||
@@ -4154,9 +4185,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) | |||
4154 | unset_reloc_control(rc); | 4185 | unset_reloc_control(rc); |
4155 | 4186 | ||
4156 | trans = btrfs_join_transaction(rc->extent_root, 1); | 4187 | trans = btrfs_join_transaction(rc->extent_root, 1); |
4157 | btrfs_commit_transaction(trans, rc->extent_root); | 4188 | if (IS_ERR(trans)) |
4158 | out: | 4189 | err = PTR_ERR(trans); |
4190 | else | ||
4191 | btrfs_commit_transaction(trans, rc->extent_root); | ||
4192 | out_free: | ||
4159 | kfree(rc); | 4193 | kfree(rc); |
4194 | out: | ||
4160 | while (!list_empty(&reloc_roots)) { | 4195 | while (!list_empty(&reloc_roots)) { |
4161 | reloc_root = list_entry(reloc_roots.next, | 4196 | reloc_root = list_entry(reloc_roots.next, |
4162 | struct btrfs_root, root_list); | 4197 | struct btrfs_root, root_list); |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b2130c46fdb5..d39a9895d932 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -155,7 +155,8 @@ enum { | |||
155 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, | 155 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, |
156 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, | 156 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, |
157 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, | 157 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, |
158 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, | 158 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, |
159 | Opt_enospc_debug, Opt_err, | ||
159 | }; | 160 | }; |
160 | 161 | ||
161 | static match_table_t tokens = { | 162 | static match_table_t tokens = { |
@@ -184,6 +185,7 @@ static match_table_t tokens = { | |||
184 | {Opt_space_cache, "space_cache"}, | 185 | {Opt_space_cache, "space_cache"}, |
185 | {Opt_clear_cache, "clear_cache"}, | 186 | {Opt_clear_cache, "clear_cache"}, |
186 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, | 187 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, |
188 | {Opt_enospc_debug, "enospc_debug"}, | ||
187 | {Opt_err, NULL}, | 189 | {Opt_err, NULL}, |
188 | }; | 190 | }; |
189 | 191 | ||
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
358 | case Opt_user_subvol_rm_allowed: | 360 | case Opt_user_subvol_rm_allowed: |
359 | btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); | 361 | btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); |
360 | break; | 362 | break; |
363 | case Opt_enospc_debug: | ||
364 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); | ||
365 | break; | ||
361 | case Opt_err: | 366 | case Opt_err: |
362 | printk(KERN_INFO "btrfs: unrecognized mount option " | 367 | printk(KERN_INFO "btrfs: unrecognized mount option " |
363 | "'%s'\n", p); | 368 | "'%s'\n", p); |
@@ -383,7 +388,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
383 | struct btrfs_fs_devices **fs_devices) | 388 | struct btrfs_fs_devices **fs_devices) |
384 | { | 389 | { |
385 | substring_t args[MAX_OPT_ARGS]; | 390 | substring_t args[MAX_OPT_ARGS]; |
386 | char *opts, *p; | 391 | char *opts, *orig, *p; |
387 | int error = 0; | 392 | int error = 0; |
388 | int intarg; | 393 | int intarg; |
389 | 394 | ||
@@ -397,6 +402,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
397 | opts = kstrdup(options, GFP_KERNEL); | 402 | opts = kstrdup(options, GFP_KERNEL); |
398 | if (!opts) | 403 | if (!opts) |
399 | return -ENOMEM; | 404 | return -ENOMEM; |
405 | orig = opts; | ||
400 | 406 | ||
401 | while ((p = strsep(&opts, ",")) != NULL) { | 407 | while ((p = strsep(&opts, ",")) != NULL) { |
402 | int token; | 408 | int token; |
@@ -432,7 +438,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, | |||
432 | } | 438 | } |
433 | 439 | ||
434 | out_free_opts: | 440 | out_free_opts: |
435 | kfree(opts); | 441 | kfree(orig); |
436 | out: | 442 | out: |
437 | /* | 443 | /* |
438 | * If no subvolume name is specified we use the default one. Allocate | 444 | * If no subvolume name is specified we use the default one. Allocate |
@@ -623,6 +629,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
623 | btrfs_wait_ordered_extents(root, 0, 0); | 629 | btrfs_wait_ordered_extents(root, 0, 0); |
624 | 630 | ||
625 | trans = btrfs_start_transaction(root, 0); | 631 | trans = btrfs_start_transaction(root, 0); |
632 | if (IS_ERR(trans)) | ||
633 | return PTR_ERR(trans); | ||
626 | ret = btrfs_commit_transaction(trans, root); | 634 | ret = btrfs_commit_transaction(trans, root); |
627 | return ret; | 635 | return ret; |
628 | } | 636 | } |
@@ -761,6 +769,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
761 | } | 769 | } |
762 | 770 | ||
763 | btrfs_close_devices(fs_devices); | 771 | btrfs_close_devices(fs_devices); |
772 | kfree(fs_info); | ||
773 | kfree(tree_root); | ||
764 | } else { | 774 | } else { |
765 | char b[BDEVNAME_SIZE]; | 775 | char b[BDEVNAME_SIZE]; |
766 | 776 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index bae5c7b8bbe2..3d73c8d93bbb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1161 | INIT_DELAYED_WORK(&ac->work, do_async_commit); | 1161 | INIT_DELAYED_WORK(&ac->work, do_async_commit); |
1162 | ac->root = root; | 1162 | ac->root = root; |
1163 | ac->newtrans = btrfs_join_transaction(root, 0); | 1163 | ac->newtrans = btrfs_join_transaction(root, 0); |
1164 | if (IS_ERR(ac->newtrans)) { | ||
1165 | int err = PTR_ERR(ac->newtrans); | ||
1166 | kfree(ac); | ||
1167 | return err; | ||
1168 | } | ||
1164 | 1169 | ||
1165 | /* take transaction reference */ | 1170 | /* take transaction reference */ |
1166 | mutex_lock(&root->fs_info->trans_mutex); | 1171 | mutex_lock(&root->fs_info->trans_mutex); |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 054744ac5719..a4bbb854dfd2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
338 | } | 338 | } |
339 | dst_copy = kmalloc(item_size, GFP_NOFS); | 339 | dst_copy = kmalloc(item_size, GFP_NOFS); |
340 | src_copy = kmalloc(item_size, GFP_NOFS); | 340 | src_copy = kmalloc(item_size, GFP_NOFS); |
341 | if (!dst_copy || !src_copy) { | ||
342 | btrfs_release_path(root, path); | ||
343 | kfree(dst_copy); | ||
344 | kfree(src_copy); | ||
345 | return -ENOMEM; | ||
346 | } | ||
341 | 347 | ||
342 | read_extent_buffer(eb, src_copy, src_ptr, item_size); | 348 | read_extent_buffer(eb, src_copy, src_ptr, item_size); |
343 | 349 | ||
@@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, | |||
665 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | 671 | btrfs_dir_item_key_to_cpu(leaf, di, &location); |
666 | name_len = btrfs_dir_name_len(leaf, di); | 672 | name_len = btrfs_dir_name_len(leaf, di); |
667 | name = kmalloc(name_len, GFP_NOFS); | 673 | name = kmalloc(name_len, GFP_NOFS); |
674 | if (!name) | ||
675 | return -ENOMEM; | ||
676 | |||
668 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); | 677 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); |
669 | btrfs_release_path(root, path); | 678 | btrfs_release_path(root, path); |
670 | 679 | ||
@@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, | |||
744 | int match = 0; | 753 | int match = 0; |
745 | 754 | ||
746 | path = btrfs_alloc_path(); | 755 | path = btrfs_alloc_path(); |
756 | if (!path) | ||
757 | return -ENOMEM; | ||
758 | |||
747 | ret = btrfs_search_slot(NULL, log, key, path, 0, 0); | 759 | ret = btrfs_search_slot(NULL, log, key, path, 0, 0); |
748 | if (ret != 0) | 760 | if (ret != 0) |
749 | goto out; | 761 | goto out; |
@@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
967 | key.offset = (u64)-1; | 979 | key.offset = (u64)-1; |
968 | 980 | ||
969 | path = btrfs_alloc_path(); | 981 | path = btrfs_alloc_path(); |
982 | if (!path) | ||
983 | return -ENOMEM; | ||
970 | 984 | ||
971 | while (1) { | 985 | while (1) { |
972 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 986 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
@@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |||
1178 | 1192 | ||
1179 | name_len = btrfs_dir_name_len(eb, di); | 1193 | name_len = btrfs_dir_name_len(eb, di); |
1180 | name = kmalloc(name_len, GFP_NOFS); | 1194 | name = kmalloc(name_len, GFP_NOFS); |
1195 | if (!name) | ||
1196 | return -ENOMEM; | ||
1197 | |||
1181 | log_type = btrfs_dir_type(eb, di); | 1198 | log_type = btrfs_dir_type(eb, di); |
1182 | read_extent_buffer(eb, name, (unsigned long)(di + 1), | 1199 | read_extent_buffer(eb, name, (unsigned long)(di + 1), |
1183 | name_len); | 1200 | name_len); |
@@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, | |||
1692 | root_owner = btrfs_header_owner(parent); | 1709 | root_owner = btrfs_header_owner(parent); |
1693 | 1710 | ||
1694 | next = btrfs_find_create_tree_block(root, bytenr, blocksize); | 1711 | next = btrfs_find_create_tree_block(root, bytenr, blocksize); |
1712 | if (!next) | ||
1713 | return -ENOMEM; | ||
1695 | 1714 | ||
1696 | if (*level == 1) { | 1715 | if (*level == 1) { |
1697 | wc->process_func(root, next, wc, ptr_gen); | 1716 | wc->process_func(root, next, wc, ptr_gen); |
@@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2032 | wait_log_commit(trans, log_root_tree, | 2051 | wait_log_commit(trans, log_root_tree, |
2033 | log_root_tree->log_transid); | 2052 | log_root_tree->log_transid); |
2034 | mutex_unlock(&log_root_tree->log_mutex); | 2053 | mutex_unlock(&log_root_tree->log_mutex); |
2054 | ret = 0; | ||
2035 | goto out; | 2055 | goto out; |
2036 | } | 2056 | } |
2037 | atomic_set(&log_root_tree->log_commit[index2], 1); | 2057 | atomic_set(&log_root_tree->log_commit[index2], 1); |
@@ -2096,7 +2116,7 @@ out: | |||
2096 | smp_mb(); | 2116 | smp_mb(); |
2097 | if (waitqueue_active(&root->log_commit_wait[index1])) | 2117 | if (waitqueue_active(&root->log_commit_wait[index1])) |
2098 | wake_up(&root->log_commit_wait[index1]); | 2118 | wake_up(&root->log_commit_wait[index1]); |
2099 | return 0; | 2119 | return ret; |
2100 | } | 2120 | } |
2101 | 2121 | ||
2102 | static void free_log_tree(struct btrfs_trans_handle *trans, | 2122 | static void free_log_tree(struct btrfs_trans_handle *trans, |
@@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2194 | 2214 | ||
2195 | log = root->log_root; | 2215 | log = root->log_root; |
2196 | path = btrfs_alloc_path(); | 2216 | path = btrfs_alloc_path(); |
2217 | if (!path) | ||
2218 | return -ENOMEM; | ||
2219 | |||
2197 | di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, | 2220 | di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, |
2198 | name, name_len, -1); | 2221 | name, name_len, -1); |
2199 | if (IS_ERR(di)) { | 2222 | if (IS_ERR(di)) { |
@@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
2594 | 2617 | ||
2595 | ins_data = kmalloc(nr * sizeof(struct btrfs_key) + | 2618 | ins_data = kmalloc(nr * sizeof(struct btrfs_key) + |
2596 | nr * sizeof(u32), GFP_NOFS); | 2619 | nr * sizeof(u32), GFP_NOFS); |
2620 | if (!ins_data) | ||
2621 | return -ENOMEM; | ||
2622 | |||
2597 | ins_sizes = (u32 *)ins_data; | 2623 | ins_sizes = (u32 *)ins_data; |
2598 | ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); | 2624 | ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); |
2599 | 2625 | ||
@@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2725 | log = root->log_root; | 2751 | log = root->log_root; |
2726 | 2752 | ||
2727 | path = btrfs_alloc_path(); | 2753 | path = btrfs_alloc_path(); |
2754 | if (!path) | ||
2755 | return -ENOMEM; | ||
2728 | dst_path = btrfs_alloc_path(); | 2756 | dst_path = btrfs_alloc_path(); |
2757 | if (!dst_path) { | ||
2758 | btrfs_free_path(path); | ||
2759 | return -ENOMEM; | ||
2760 | } | ||
2729 | 2761 | ||
2730 | min_key.objectid = inode->i_ino; | 2762 | min_key.objectid = inode->i_ino; |
2731 | min_key.type = BTRFS_INODE_ITEM_KEY; | 2763 | min_key.type = BTRFS_INODE_ITEM_KEY; |
@@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) | |||
3080 | BUG_ON(!path); | 3112 | BUG_ON(!path); |
3081 | 3113 | ||
3082 | trans = btrfs_start_transaction(fs_info->tree_root, 0); | 3114 | trans = btrfs_start_transaction(fs_info->tree_root, 0); |
3115 | BUG_ON(IS_ERR(trans)); | ||
3083 | 3116 | ||
3084 | wc.trans = trans; | 3117 | wc.trans = trans; |
3085 | wc.pin = 1; | 3118 | wc.pin = 1; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d158530233b7..dd13eb81ee40 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, | |||
1213 | return -ENOMEM; | 1213 | return -ENOMEM; |
1214 | 1214 | ||
1215 | trans = btrfs_start_transaction(root, 0); | 1215 | trans = btrfs_start_transaction(root, 0); |
1216 | if (IS_ERR(trans)) { | ||
1217 | btrfs_free_path(path); | ||
1218 | return PTR_ERR(trans); | ||
1219 | } | ||
1216 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; | 1220 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; |
1217 | key.type = BTRFS_DEV_ITEM_KEY; | 1221 | key.type = BTRFS_DEV_ITEM_KEY; |
1218 | key.offset = device->devid; | 1222 | key.offset = device->devid; |
@@ -1334,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1334 | 1338 | ||
1335 | ret = btrfs_shrink_device(device, 0); | 1339 | ret = btrfs_shrink_device(device, 0); |
1336 | if (ret) | 1340 | if (ret) |
1337 | goto error_brelse; | 1341 | goto error_undo; |
1338 | 1342 | ||
1339 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); | 1343 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); |
1340 | if (ret) | 1344 | if (ret) |
1341 | goto error_brelse; | 1345 | goto error_undo; |
1342 | 1346 | ||
1343 | device->in_fs_metadata = 0; | 1347 | device->in_fs_metadata = 0; |
1344 | 1348 | ||
@@ -1412,6 +1416,13 @@ out: | |||
1412 | mutex_unlock(&root->fs_info->volume_mutex); | 1416 | mutex_unlock(&root->fs_info->volume_mutex); |
1413 | mutex_unlock(&uuid_mutex); | 1417 | mutex_unlock(&uuid_mutex); |
1414 | return ret; | 1418 | return ret; |
1419 | error_undo: | ||
1420 | if (device->writeable) { | ||
1421 | list_add(&device->dev_alloc_list, | ||
1422 | &root->fs_info->fs_devices->alloc_list); | ||
1423 | root->fs_info->fs_devices->rw_devices++; | ||
1424 | } | ||
1425 | goto error_brelse; | ||
1415 | } | 1426 | } |
1416 | 1427 | ||
1417 | /* | 1428 | /* |
@@ -1601,11 +1612,19 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1601 | 1612 | ||
1602 | ret = find_next_devid(root, &device->devid); | 1613 | ret = find_next_devid(root, &device->devid); |
1603 | if (ret) { | 1614 | if (ret) { |
1615 | kfree(device->name); | ||
1604 | kfree(device); | 1616 | kfree(device); |
1605 | goto error; | 1617 | goto error; |
1606 | } | 1618 | } |
1607 | 1619 | ||
1608 | trans = btrfs_start_transaction(root, 0); | 1620 | trans = btrfs_start_transaction(root, 0); |
1621 | if (IS_ERR(trans)) { | ||
1622 | kfree(device->name); | ||
1623 | kfree(device); | ||
1624 | ret = PTR_ERR(trans); | ||
1625 | goto error; | ||
1626 | } | ||
1627 | |||
1609 | lock_chunks(root); | 1628 | lock_chunks(root); |
1610 | 1629 | ||
1611 | device->writeable = 1; | 1630 | device->writeable = 1; |
@@ -1621,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1621 | device->dev_root = root->fs_info->dev_root; | 1640 | device->dev_root = root->fs_info->dev_root; |
1622 | device->bdev = bdev; | 1641 | device->bdev = bdev; |
1623 | device->in_fs_metadata = 1; | 1642 | device->in_fs_metadata = 1; |
1624 | device->mode = 0; | 1643 | device->mode = FMODE_EXCL; |
1625 | set_blocksize(device->bdev, 4096); | 1644 | set_blocksize(device->bdev, 4096); |
1626 | 1645 | ||
1627 | if (seeding_dev) { | 1646 | if (seeding_dev) { |
@@ -1873,7 +1892,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, | |||
1873 | return ret; | 1892 | return ret; |
1874 | 1893 | ||
1875 | trans = btrfs_start_transaction(root, 0); | 1894 | trans = btrfs_start_transaction(root, 0); |
1876 | BUG_ON(!trans); | 1895 | BUG_ON(IS_ERR(trans)); |
1877 | 1896 | ||
1878 | lock_chunks(root); | 1897 | lock_chunks(root); |
1879 | 1898 | ||
@@ -2047,7 +2066,7 @@ int btrfs_balance(struct btrfs_root *dev_root) | |||
2047 | BUG_ON(ret); | 2066 | BUG_ON(ret); |
2048 | 2067 | ||
2049 | trans = btrfs_start_transaction(dev_root, 0); | 2068 | trans = btrfs_start_transaction(dev_root, 0); |
2050 | BUG_ON(!trans); | 2069 | BUG_ON(IS_ERR(trans)); |
2051 | 2070 | ||
2052 | ret = btrfs_grow_device(trans, device, old_size); | 2071 | ret = btrfs_grow_device(trans, device, old_size); |
2053 | BUG_ON(ret); | 2072 | BUG_ON(ret); |
@@ -2213,6 +2232,11 @@ again: | |||
2213 | 2232 | ||
2214 | /* Shrinking succeeded, else we would be at "done". */ | 2233 | /* Shrinking succeeded, else we would be at "done". */ |
2215 | trans = btrfs_start_transaction(root, 0); | 2234 | trans = btrfs_start_transaction(root, 0); |
2235 | if (IS_ERR(trans)) { | ||
2236 | ret = PTR_ERR(trans); | ||
2237 | goto done; | ||
2238 | } | ||
2239 | |||
2216 | lock_chunks(root); | 2240 | lock_chunks(root); |
2217 | 2241 | ||
2218 | device->disk_total_bytes = new_size; | 2242 | device->disk_total_bytes = new_size; |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0bc68de8edd7..f0aef787a102 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -60,6 +60,7 @@ int ceph_init_dentry(struct dentry *dentry) | |||
60 | } | 60 | } |
61 | di->dentry = dentry; | 61 | di->dentry = dentry; |
62 | di->lease_session = NULL; | 62 | di->lease_session = NULL; |
63 | di->parent_inode = igrab(dentry->d_parent->d_inode); | ||
63 | dentry->d_fsdata = di; | 64 | dentry->d_fsdata = di; |
64 | dentry->d_time = jiffies; | 65 | dentry->d_time = jiffies; |
65 | ceph_dentry_lru_add(dentry); | 66 | ceph_dentry_lru_add(dentry); |
@@ -1033,7 +1034,7 @@ static void ceph_dentry_release(struct dentry *dentry) | |||
1033 | u64 snapid = CEPH_NOSNAP; | 1034 | u64 snapid = CEPH_NOSNAP; |
1034 | 1035 | ||
1035 | if (!IS_ROOT(dentry)) { | 1036 | if (!IS_ROOT(dentry)) { |
1036 | parent_inode = dentry->d_parent->d_inode; | 1037 | parent_inode = di->parent_inode; |
1037 | if (parent_inode) | 1038 | if (parent_inode) |
1038 | snapid = ceph_snap(parent_inode); | 1039 | snapid = ceph_snap(parent_inode); |
1039 | } | 1040 | } |
@@ -1058,6 +1059,8 @@ static void ceph_dentry_release(struct dentry *dentry) | |||
1058 | kmem_cache_free(ceph_dentry_cachep, di); | 1059 | kmem_cache_free(ceph_dentry_cachep, di); |
1059 | dentry->d_fsdata = NULL; | 1060 | dentry->d_fsdata = NULL; |
1060 | } | 1061 | } |
1062 | if (parent_inode) | ||
1063 | iput(parent_inode); | ||
1061 | } | 1064 | } |
1062 | 1065 | ||
1063 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, | 1066 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 39c243acd062..f40b9139e437 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) | |||
584 | if (lastinode) | 584 | if (lastinode) |
585 | iput(lastinode); | 585 | iput(lastinode); |
586 | 586 | ||
587 | dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); | 587 | list_for_each_entry(child, &realm->children, child_item) { |
588 | list_for_each_entry(child, &realm->children, child_item) | 588 | dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n", |
589 | queue_realm_cap_snaps(child); | 589 | realm, realm->ino, child, child->ino); |
590 | list_del_init(&child->dirty_item); | ||
591 | list_add(&child->dirty_item, &realm->dirty_item); | ||
592 | } | ||
590 | 593 | ||
594 | list_del_init(&realm->dirty_item); | ||
591 | dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); | 595 | dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); |
592 | } | 596 | } |
593 | 597 | ||
@@ -683,7 +687,9 @@ more: | |||
683 | * queue cap snaps _after_ we've built the new snap contexts, | 687 | * queue cap snaps _after_ we've built the new snap contexts, |
684 | * so that i_head_snapc can be set appropriately. | 688 | * so that i_head_snapc can be set appropriately. |
685 | */ | 689 | */ |
686 | list_for_each_entry(realm, &dirty_realms, dirty_item) { | 690 | while (!list_empty(&dirty_realms)) { |
691 | realm = list_first_entry(&dirty_realms, struct ceph_snap_realm, | ||
692 | dirty_item); | ||
687 | queue_realm_cap_snaps(realm); | 693 | queue_realm_cap_snaps(realm); |
688 | } | 694 | } |
689 | 695 | ||
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 20b907d76ae2..88fcaa21b801 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -207,6 +207,7 @@ struct ceph_dentry_info { | |||
207 | struct dentry *dentry; | 207 | struct dentry *dentry; |
208 | u64 time; | 208 | u64 time; |
209 | u64 offset; | 209 | u64 offset; |
210 | struct inode *parent_inode; | ||
210 | }; | 211 | }; |
211 | 212 | ||
212 | struct ceph_inode_xattrs_info { | 213 | struct ceph_inode_xattrs_info { |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1e7636b145a8..beeebf194234 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, | |||
372 | 372 | ||
373 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), | 373 | ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), |
374 | GFP_KERNEL); | 374 | GFP_KERNEL); |
375 | if (!ppace) { | ||
376 | cERROR(1, "DACL memory allocation error"); | ||
377 | return; | ||
378 | } | ||
375 | 379 | ||
376 | for (i = 0; i < num_aces; ++i) { | 380 | for (i = 0; i < num_aces; ++i) { |
377 | ppace[i] = (struct cifs_ace *) (acl_base + acl_size); | 381 | ppace[i] = (struct cifs_ace *) (acl_base + acl_size); |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4a3330235d55..a9371b6578c0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
127 | extern const struct export_operations cifs_export_ops; | 127 | extern const struct export_operations cifs_export_ops; |
128 | #endif /* EXPERIMENTAL */ | 128 | #endif /* EXPERIMENTAL */ |
129 | 129 | ||
130 | #define CIFS_VERSION "1.70" | 130 | #define CIFS_VERSION "1.71" |
131 | #endif /* _CIFSFS_H */ | 131 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index edd5b29b53c9..17afb0fbcaed 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -188,6 +188,8 @@ struct TCP_Server_Info { | |||
188 | /* multiplexed reads or writes */ | 188 | /* multiplexed reads or writes */ |
189 | unsigned int maxBuf; /* maxBuf specifies the maximum */ | 189 | unsigned int maxBuf; /* maxBuf specifies the maximum */ |
190 | /* message size the server can send or receive for non-raw SMBs */ | 190 | /* message size the server can send or receive for non-raw SMBs */ |
191 | /* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */ | ||
192 | /* when socket is setup (and during reconnect) before NegProt sent */ | ||
191 | unsigned int max_rw; /* maxRw specifies the maximum */ | 193 | unsigned int max_rw; /* maxRw specifies the maximum */ |
192 | /* message size the server can send or receive for */ | 194 | /* message size the server can send or receive for */ |
193 | /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ | 195 | /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ |
@@ -652,7 +654,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, | |||
652 | #define MID_REQUEST_SUBMITTED 2 | 654 | #define MID_REQUEST_SUBMITTED 2 |
653 | #define MID_RESPONSE_RECEIVED 4 | 655 | #define MID_RESPONSE_RECEIVED 4 |
654 | #define MID_RETRY_NEEDED 8 /* session closed while this request out */ | 656 | #define MID_RETRY_NEEDED 8 /* session closed while this request out */ |
655 | #define MID_NO_RESP_NEEDED 0x10 | 657 | #define MID_RESPONSE_MALFORMED 0x10 |
656 | 658 | ||
657 | /* Types of response buffer returned from SendReceive2 */ | 659 | /* Types of response buffer returned from SendReceive2 */ |
658 | #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ | 660 | #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 46c66ed01af4..904aa47e3515 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
139 | if (ses->status == CifsExiting) | ||
140 | return -EIO; | ||
141 | |||
142 | /* | 139 | /* |
143 | * Give demultiplex thread up to 10 seconds to reconnect, should be | 140 | * Give demultiplex thread up to 10 seconds to reconnect, should be |
144 | * greater than cifs socket timeout which is 7 seconds | 141 | * greater than cifs socket timeout which is 7 seconds |
@@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
156 | * retrying until process is killed or server comes | 153 | * retrying until process is killed or server comes |
157 | * back on-line | 154 | * back on-line |
158 | */ | 155 | */ |
159 | if (!tcon->retry || ses->status == CifsExiting) { | 156 | if (!tcon->retry) { |
160 | cFYI(1, "gave up waiting on reconnect in smb_init"); | 157 | cFYI(1, "gave up waiting on reconnect in smb_init"); |
161 | return -EHOSTDOWN; | 158 | return -EHOSTDOWN; |
162 | } | 159 | } |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 47d8ff623683..8d6c17ab593d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -337,8 +337,13 @@ cifs_echo_request(struct work_struct *work) | |||
337 | struct TCP_Server_Info *server = container_of(work, | 337 | struct TCP_Server_Info *server = container_of(work, |
338 | struct TCP_Server_Info, echo.work); | 338 | struct TCP_Server_Info, echo.work); |
339 | 339 | ||
340 | /* no need to ping if we got a response recently */ | 340 | /* |
341 | if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) | 341 | * We cannot send an echo until the NEGOTIATE_PROTOCOL request is |
342 | * done, which is indicated by maxBuf != 0. Also, no need to ping if | ||
343 | * we got a response recently | ||
344 | */ | ||
345 | if (server->maxBuf == 0 || | ||
346 | time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) | ||
342 | goto requeue_echo; | 347 | goto requeue_echo; |
343 | 348 | ||
344 | rc = CIFSSMBEcho(server); | 349 | rc = CIFSSMBEcho(server); |
@@ -578,14 +583,23 @@ incomplete_rcv: | |||
578 | else if (reconnect == 1) | 583 | else if (reconnect == 1) |
579 | continue; | 584 | continue; |
580 | 585 | ||
581 | length += 4; /* account for rfc1002 hdr */ | 586 | total_read += 4; /* account for rfc1002 hdr */ |
582 | 587 | ||
588 | dump_smb(smb_buffer, total_read); | ||
583 | 589 | ||
584 | dump_smb(smb_buffer, length); | 590 | /* |
585 | if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { | 591 | * We know that we received enough to get to the MID as we |
586 | cifs_dump_mem("Bad SMB: ", smb_buffer, 48); | 592 | * checked the pdu_length earlier. Now check to see |
587 | continue; | 593 | * if the rest of the header is OK. We borrow the length |
588 | } | 594 | * var for the rest of the loop to avoid a new stack var. |
595 | * | ||
596 | * 48 bytes is enough to display the header and a little bit | ||
597 | * into the payload for debugging purposes. | ||
598 | */ | ||
599 | length = checkSMB(smb_buffer, smb_buffer->Mid, total_read); | ||
600 | if (length != 0) | ||
601 | cifs_dump_mem("Bad SMB: ", smb_buffer, | ||
602 | min_t(unsigned int, total_read, 48)); | ||
589 | 603 | ||
590 | mid_entry = NULL; | 604 | mid_entry = NULL; |
591 | server->lstrp = jiffies; | 605 | server->lstrp = jiffies; |
@@ -597,7 +611,8 @@ incomplete_rcv: | |||
597 | if ((mid_entry->mid == smb_buffer->Mid) && | 611 | if ((mid_entry->mid == smb_buffer->Mid) && |
598 | (mid_entry->midState == MID_REQUEST_SUBMITTED) && | 612 | (mid_entry->midState == MID_REQUEST_SUBMITTED) && |
599 | (mid_entry->command == smb_buffer->Command)) { | 613 | (mid_entry->command == smb_buffer->Command)) { |
600 | if (check2ndT2(smb_buffer,server->maxBuf) > 0) { | 614 | if (length == 0 && |
615 | check2ndT2(smb_buffer, server->maxBuf) > 0) { | ||
601 | /* We have a multipart transact2 resp */ | 616 | /* We have a multipart transact2 resp */ |
602 | isMultiRsp = true; | 617 | isMultiRsp = true; |
603 | if (mid_entry->resp_buf) { | 618 | if (mid_entry->resp_buf) { |
@@ -632,12 +647,17 @@ incomplete_rcv: | |||
632 | mid_entry->resp_buf = smb_buffer; | 647 | mid_entry->resp_buf = smb_buffer; |
633 | mid_entry->largeBuf = isLargeBuf; | 648 | mid_entry->largeBuf = isLargeBuf; |
634 | multi_t2_fnd: | 649 | multi_t2_fnd: |
635 | mid_entry->midState = MID_RESPONSE_RECEIVED; | 650 | if (length == 0) |
636 | list_del_init(&mid_entry->qhead); | 651 | mid_entry->midState = |
637 | mid_entry->callback(mid_entry); | 652 | MID_RESPONSE_RECEIVED; |
653 | else | ||
654 | mid_entry->midState = | ||
655 | MID_RESPONSE_MALFORMED; | ||
638 | #ifdef CONFIG_CIFS_STATS2 | 656 | #ifdef CONFIG_CIFS_STATS2 |
639 | mid_entry->when_received = jiffies; | 657 | mid_entry->when_received = jiffies; |
640 | #endif | 658 | #endif |
659 | list_del_init(&mid_entry->qhead); | ||
660 | mid_entry->callback(mid_entry); | ||
641 | break; | 661 | break; |
642 | } | 662 | } |
643 | mid_entry = NULL; | 663 | mid_entry = NULL; |
@@ -653,6 +673,9 @@ multi_t2_fnd: | |||
653 | else | 673 | else |
654 | smallbuf = NULL; | 674 | smallbuf = NULL; |
655 | } | 675 | } |
676 | } else if (length != 0) { | ||
677 | /* response sanity checks failed */ | ||
678 | continue; | ||
656 | } else if (!is_valid_oplock_break(smb_buffer, server) && | 679 | } else if (!is_valid_oplock_break(smb_buffer, server) && |
657 | !isMultiRsp) { | 680 | !isMultiRsp) { |
658 | cERROR(1, "No task to wake, unknown frame received! " | 681 | cERROR(1, "No task to wake, unknown frame received! " |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 74c0a282d012..e964b1cd5dd0 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1662,10 +1662,10 @@ static ssize_t | |||
1662 | cifs_iovec_write(struct file *file, const struct iovec *iov, | 1662 | cifs_iovec_write(struct file *file, const struct iovec *iov, |
1663 | unsigned long nr_segs, loff_t *poffset) | 1663 | unsigned long nr_segs, loff_t *poffset) |
1664 | { | 1664 | { |
1665 | size_t total_written = 0; | 1665 | unsigned int written; |
1666 | unsigned int written = 0; | 1666 | unsigned long num_pages, npages, i; |
1667 | unsigned long num_pages, npages; | 1667 | size_t copied, len, cur_len; |
1668 | size_t copied, len, cur_len, i; | 1668 | ssize_t total_written = 0; |
1669 | struct kvec *to_send; | 1669 | struct kvec *to_send; |
1670 | struct page **pages; | 1670 | struct page **pages; |
1671 | struct iov_iter it; | 1671 | struct iov_iter it; |
@@ -1821,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, | |||
1821 | { | 1821 | { |
1822 | int rc; | 1822 | int rc; |
1823 | int xid; | 1823 | int xid; |
1824 | unsigned int total_read, bytes_read = 0; | 1824 | ssize_t total_read; |
1825 | unsigned int bytes_read = 0; | ||
1825 | size_t len, cur_len; | 1826 | size_t len, cur_len; |
1826 | int iov_offset = 0; | 1827 | int iov_offset = 0; |
1827 | struct cifs_sb_info *cifs_sb; | 1828 | struct cifs_sb_info *cifs_sb; |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 8d9189f64477..79f641eeda30 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) | |||
170 | { | 170 | { |
171 | int rc, alen, slen; | 171 | int rc, alen, slen; |
172 | const char *pct; | 172 | const char *pct; |
173 | char *endp, scope_id[13]; | 173 | char scope_id[13]; |
174 | struct sockaddr_in *s4 = (struct sockaddr_in *) dst; | 174 | struct sockaddr_in *s4 = (struct sockaddr_in *) dst; |
175 | struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; | 175 | struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; |
176 | 176 | ||
@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) | |||
197 | memcpy(scope_id, pct + 1, slen); | 197 | memcpy(scope_id, pct + 1, slen); |
198 | scope_id[slen] = '\0'; | 198 | scope_id[slen] = '\0'; |
199 | 199 | ||
200 | s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); | 200 | rc = strict_strtoul(scope_id, 0, |
201 | if (endp != scope_id + slen) | 201 | (unsigned long *)&s6->sin6_scope_id); |
202 | return 0; | 202 | rc = (rc == 0) ? 1 : 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | return rc; | 205 | return rc; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 1adc9625a344..16765703131b 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate: | |||
656 | 656 | ||
657 | if (type == LANMAN) { | 657 | if (type == LANMAN) { |
658 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 658 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
659 | char lnm_session_key[CIFS_SESS_KEY_SIZE]; | 659 | char lnm_session_key[CIFS_AUTH_RESP_SIZE]; |
660 | 660 | ||
661 | pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; | 661 | pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; |
662 | 662 | ||
663 | /* no capabilities flags in old lanman negotiation */ | 663 | /* no capabilities flags in old lanman negotiation */ |
664 | 664 | ||
665 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); | 665 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); |
666 | 666 | ||
667 | /* Calculate hash with password and copy into bcc_ptr. | 667 | /* Calculate hash with password and copy into bcc_ptr. |
668 | * Encryption Key (stored as in cryptkey) gets used if the | 668 | * Encryption Key (stored as in cryptkey) gets used if the |
@@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate: | |||
675 | true : false, lnm_session_key); | 675 | true : false, lnm_session_key); |
676 | 676 | ||
677 | ses->flags |= CIFS_SES_LANMAN; | 677 | ses->flags |= CIFS_SES_LANMAN; |
678 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); | 678 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); |
679 | bcc_ptr += CIFS_SESS_KEY_SIZE; | 679 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
680 | 680 | ||
681 | /* can not sign if LANMAN negotiated so no need | 681 | /* can not sign if LANMAN negotiated so no need |
682 | to calculate signing key? but what if server | 682 | to calculate signing key? but what if server |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b8c5e2eb43d0..46d8756f2b24 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, | |||
359 | if (rc) | 359 | if (rc) |
360 | return rc; | 360 | return rc; |
361 | 361 | ||
362 | /* enable signing if server requires it */ | ||
363 | if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | ||
364 | in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | ||
365 | |||
362 | mutex_lock(&server->srv_mutex); | 366 | mutex_lock(&server->srv_mutex); |
363 | mid = AllocMidQEntry(in_buf, server); | 367 | mid = AllocMidQEntry(in_buf, server); |
364 | if (mid == NULL) { | 368 | if (mid == NULL) { |
@@ -453,6 +457,9 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | |||
453 | case MID_RETRY_NEEDED: | 457 | case MID_RETRY_NEEDED: |
454 | rc = -EAGAIN; | 458 | rc = -EAGAIN; |
455 | break; | 459 | break; |
460 | case MID_RESPONSE_MALFORMED: | ||
461 | rc = -EIO; | ||
462 | break; | ||
456 | default: | 463 | default: |
457 | cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, | 464 | cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, |
458 | mid->mid, mid->midState); | 465 | mid->mid, mid->midState); |
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 9c64ae9e4c1a..2d8c87b951c2 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c | |||
@@ -1468,15 +1468,13 @@ static void work_stop(void) | |||
1468 | 1468 | ||
1469 | static int work_start(void) | 1469 | static int work_start(void) |
1470 | { | 1470 | { |
1471 | recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM | | 1471 | recv_workqueue = create_singlethread_workqueue("dlm_recv"); |
1472 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | ||
1473 | if (!recv_workqueue) { | 1472 | if (!recv_workqueue) { |
1474 | log_print("can't start dlm_recv"); | 1473 | log_print("can't start dlm_recv"); |
1475 | return -ENOMEM; | 1474 | return -ENOMEM; |
1476 | } | 1475 | } |
1477 | 1476 | ||
1478 | send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM | | 1477 | send_workqueue = create_singlethread_workqueue("dlm_send"); |
1479 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | ||
1480 | if (!send_workqueue) { | 1478 | if (!send_workqueue) { |
1481 | log_print("can't start dlm_send"); | 1479 | log_print("can't start dlm_send"); |
1482 | destroy_workqueue(recv_workqueue); | 1480 | destroy_workqueue(recv_workqueue); |
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 6fc4f319b550..534c1d46e69e 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c | |||
@@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
46 | { | 46 | { |
47 | struct dentry *lower_dentry; | 47 | struct dentry *lower_dentry; |
48 | struct vfsmount *lower_mnt; | 48 | struct vfsmount *lower_mnt; |
49 | struct dentry *dentry_save; | 49 | struct dentry *dentry_save = NULL; |
50 | struct vfsmount *vfsmount_save; | 50 | struct vfsmount *vfsmount_save = NULL; |
51 | int rc = 1; | 51 | int rc = 1; |
52 | 52 | ||
53 | if (nd->flags & LOOKUP_RCU) | 53 | if (nd && nd->flags & LOOKUP_RCU) |
54 | return -ECHILD; | 54 | return -ECHILD; |
55 | 55 | ||
56 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 56 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
57 | lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); | 57 | lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); |
58 | if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) | 58 | if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) |
59 | goto out; | 59 | goto out; |
60 | dentry_save = nd->path.dentry; | 60 | if (nd) { |
61 | vfsmount_save = nd->path.mnt; | 61 | dentry_save = nd->path.dentry; |
62 | nd->path.dentry = lower_dentry; | 62 | vfsmount_save = nd->path.mnt; |
63 | nd->path.mnt = lower_mnt; | 63 | nd->path.dentry = lower_dentry; |
64 | nd->path.mnt = lower_mnt; | ||
65 | } | ||
64 | rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); | 66 | rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); |
65 | nd->path.dentry = dentry_save; | 67 | if (nd) { |
66 | nd->path.mnt = vfsmount_save; | 68 | nd->path.dentry = dentry_save; |
69 | nd->path.mnt = vfsmount_save; | ||
70 | } | ||
67 | if (dentry->d_inode) { | 71 | if (dentry->d_inode) { |
68 | struct inode *lower_inode = | 72 | struct inode *lower_inode = |
69 | ecryptfs_inode_to_lower(dentry->d_inode); | 73 | ecryptfs_inode_to_lower(dentry->d_inode); |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index dbc84ed96336..e00753496e3e 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry, | |||
632 | u32 flags); | 632 | u32 flags); |
633 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | 633 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, |
634 | struct dentry *lower_dentry, | 634 | struct dentry *lower_dentry, |
635 | struct inode *ecryptfs_dir_inode, | 635 | struct inode *ecryptfs_dir_inode); |
636 | struct nameidata *ecryptfs_nd); | ||
637 | int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, | 636 | int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, |
638 | size_t *decrypted_name_size, | 637 | size_t *decrypted_name_size, |
639 | struct dentry *ecryptfs_dentry, | 638 | struct dentry *ecryptfs_dentry, |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 81e10e6a9443..7d1050e254f9 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
317 | 317 | ||
318 | const struct file_operations ecryptfs_dir_fops = { | 318 | const struct file_operations ecryptfs_dir_fops = { |
319 | .readdir = ecryptfs_readdir, | 319 | .readdir = ecryptfs_readdir, |
320 | .read = generic_read_dir, | ||
320 | .unlocked_ioctl = ecryptfs_unlocked_ioctl, | 321 | .unlocked_ioctl = ecryptfs_unlocked_ioctl, |
321 | #ifdef CONFIG_COMPAT | 322 | #ifdef CONFIG_COMPAT |
322 | .compat_ioctl = ecryptfs_compat_ioctl, | 323 | .compat_ioctl = ecryptfs_compat_ioctl, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index bd33f87a1907..b592938a84bc 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, | |||
74 | unsigned int flags_save; | 74 | unsigned int flags_save; |
75 | int rc; | 75 | int rc; |
76 | 76 | ||
77 | dentry_save = nd->path.dentry; | 77 | if (nd) { |
78 | vfsmount_save = nd->path.mnt; | 78 | dentry_save = nd->path.dentry; |
79 | flags_save = nd->flags; | 79 | vfsmount_save = nd->path.mnt; |
80 | nd->path.dentry = lower_dentry; | 80 | flags_save = nd->flags; |
81 | nd->path.mnt = lower_mnt; | 81 | nd->path.dentry = lower_dentry; |
82 | nd->flags &= ~LOOKUP_OPEN; | 82 | nd->path.mnt = lower_mnt; |
83 | nd->flags &= ~LOOKUP_OPEN; | ||
84 | } | ||
83 | rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); | 85 | rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); |
84 | nd->path.dentry = dentry_save; | 86 | if (nd) { |
85 | nd->path.mnt = vfsmount_save; | 87 | nd->path.dentry = dentry_save; |
86 | nd->flags = flags_save; | 88 | nd->path.mnt = vfsmount_save; |
89 | nd->flags = flags_save; | ||
90 | } | ||
87 | return rc; | 91 | return rc; |
88 | } | 92 | } |
89 | 93 | ||
@@ -241,8 +245,7 @@ out: | |||
241 | */ | 245 | */ |
242 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | 246 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, |
243 | struct dentry *lower_dentry, | 247 | struct dentry *lower_dentry, |
244 | struct inode *ecryptfs_dir_inode, | 248 | struct inode *ecryptfs_dir_inode) |
245 | struct nameidata *ecryptfs_nd) | ||
246 | { | 249 | { |
247 | struct dentry *lower_dir_dentry; | 250 | struct dentry *lower_dir_dentry; |
248 | struct vfsmount *lower_mnt; | 251 | struct vfsmount *lower_mnt; |
@@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
290 | goto out; | 293 | goto out; |
291 | if (special_file(lower_inode->i_mode)) | 294 | if (special_file(lower_inode->i_mode)) |
292 | goto out; | 295 | goto out; |
293 | if (!ecryptfs_nd) | ||
294 | goto out; | ||
295 | /* Released in this function */ | 296 | /* Released in this function */ |
296 | page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); | 297 | page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); |
297 | if (!page_virt) { | 298 | if (!page_virt) { |
@@ -349,75 +350,6 @@ out: | |||
349 | } | 350 | } |
350 | 351 | ||
351 | /** | 352 | /** |
352 | * ecryptfs_new_lower_dentry | ||
353 | * @name: The name of the new dentry. | ||
354 | * @lower_dir_dentry: Parent directory of the new dentry. | ||
355 | * @nd: nameidata from last lookup. | ||
356 | * | ||
357 | * Create a new dentry or get it from lower parent dir. | ||
358 | */ | ||
359 | static struct dentry * | ||
360 | ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry, | ||
361 | struct nameidata *nd) | ||
362 | { | ||
363 | struct dentry *new_dentry; | ||
364 | struct dentry *tmp; | ||
365 | struct inode *lower_dir_inode; | ||
366 | |||
367 | lower_dir_inode = lower_dir_dentry->d_inode; | ||
368 | |||
369 | tmp = d_alloc(lower_dir_dentry, name); | ||
370 | if (!tmp) | ||
371 | return ERR_PTR(-ENOMEM); | ||
372 | |||
373 | mutex_lock(&lower_dir_inode->i_mutex); | ||
374 | new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd); | ||
375 | mutex_unlock(&lower_dir_inode->i_mutex); | ||
376 | |||
377 | if (!new_dentry) | ||
378 | new_dentry = tmp; | ||
379 | else | ||
380 | dput(tmp); | ||
381 | |||
382 | return new_dentry; | ||
383 | } | ||
384 | |||
385 | |||
386 | /** | ||
387 | * ecryptfs_lookup_one_lower | ||
388 | * @ecryptfs_dentry: The eCryptfs dentry that we are looking up | ||
389 | * @lower_dir_dentry: lower parent directory | ||
390 | * @name: lower file name | ||
391 | * | ||
392 | * Get the lower dentry from vfs. If lower dentry does not exist yet, | ||
393 | * create it. | ||
394 | */ | ||
395 | static struct dentry * | ||
396 | ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, | ||
397 | struct dentry *lower_dir_dentry, struct qstr *name) | ||
398 | { | ||
399 | struct nameidata nd; | ||
400 | struct vfsmount *lower_mnt; | ||
401 | int err; | ||
402 | |||
403 | lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( | ||
404 | ecryptfs_dentry->d_parent)); | ||
405 | err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); | ||
406 | mntput(lower_mnt); | ||
407 | |||
408 | if (!err) { | ||
409 | /* we dont need the mount */ | ||
410 | mntput(nd.path.mnt); | ||
411 | return nd.path.dentry; | ||
412 | } | ||
413 | if (err != -ENOENT) | ||
414 | return ERR_PTR(err); | ||
415 | |||
416 | /* create a new lower dentry */ | ||
417 | return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd); | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * ecryptfs_lookup | 353 | * ecryptfs_lookup |
422 | * @ecryptfs_dir_inode: The eCryptfs directory inode | 354 | * @ecryptfs_dir_inode: The eCryptfs directory inode |
423 | * @ecryptfs_dentry: The eCryptfs dentry that we are looking up | 355 | * @ecryptfs_dentry: The eCryptfs dentry that we are looking up |
@@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
434 | size_t encrypted_and_encoded_name_size; | 366 | size_t encrypted_and_encoded_name_size; |
435 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; | 367 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; |
436 | struct dentry *lower_dir_dentry, *lower_dentry; | 368 | struct dentry *lower_dir_dentry, *lower_dentry; |
437 | struct qstr lower_name; | ||
438 | int rc = 0; | 369 | int rc = 0; |
439 | 370 | ||
440 | if ((ecryptfs_dentry->d_name.len == 1 | 371 | if ((ecryptfs_dentry->d_name.len == 1 |
@@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
444 | goto out_d_drop; | 375 | goto out_d_drop; |
445 | } | 376 | } |
446 | lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); | 377 | lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); |
447 | lower_name.name = ecryptfs_dentry->d_name.name; | 378 | mutex_lock(&lower_dir_dentry->d_inode->i_mutex); |
448 | lower_name.len = ecryptfs_dentry->d_name.len; | 379 | lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, |
449 | lower_name.hash = ecryptfs_dentry->d_name.hash; | 380 | lower_dir_dentry, |
450 | if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { | 381 | ecryptfs_dentry->d_name.len); |
451 | rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, | 382 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
452 | lower_dir_dentry->d_inode, &lower_name); | ||
453 | if (rc < 0) | ||
454 | goto out_d_drop; | ||
455 | } | ||
456 | lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, | ||
457 | lower_dir_dentry, &lower_name); | ||
458 | if (IS_ERR(lower_dentry)) { | 383 | if (IS_ERR(lower_dentry)) { |
459 | rc = PTR_ERR(lower_dentry); | 384 | rc = PTR_ERR(lower_dentry); |
460 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " | 385 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
461 | "[%d] on lower_dentry = [%s]\n", __func__, rc, | 386 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
462 | encrypted_and_encoded_name); | 387 | encrypted_and_encoded_name); |
463 | goto out_d_drop; | 388 | goto out_d_drop; |
@@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
479 | "filename; rc = [%d]\n", __func__, rc); | 404 | "filename; rc = [%d]\n", __func__, rc); |
480 | goto out_d_drop; | 405 | goto out_d_drop; |
481 | } | 406 | } |
482 | lower_name.name = encrypted_and_encoded_name; | 407 | mutex_lock(&lower_dir_dentry->d_inode->i_mutex); |
483 | lower_name.len = encrypted_and_encoded_name_size; | 408 | lower_dentry = lookup_one_len(encrypted_and_encoded_name, |
484 | lower_name.hash = full_name_hash(lower_name.name, lower_name.len); | 409 | lower_dir_dentry, |
485 | if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { | 410 | encrypted_and_encoded_name_size); |
486 | rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, | 411 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
487 | lower_dir_dentry->d_inode, &lower_name); | ||
488 | if (rc < 0) | ||
489 | goto out_d_drop; | ||
490 | } | ||
491 | lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, | ||
492 | lower_dir_dentry, &lower_name); | ||
493 | if (IS_ERR(lower_dentry)) { | 412 | if (IS_ERR(lower_dentry)) { |
494 | rc = PTR_ERR(lower_dentry); | 413 | rc = PTR_ERR(lower_dentry); |
495 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " | 414 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
496 | "[%d] on lower_dentry = [%s]\n", __func__, rc, | 415 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
497 | encrypted_and_encoded_name); | 416 | encrypted_and_encoded_name); |
498 | goto out_d_drop; | 417 | goto out_d_drop; |
499 | } | 418 | } |
500 | lookup_and_interpose: | 419 | lookup_and_interpose: |
501 | rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, | 420 | rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, |
502 | ecryptfs_dir_inode, | 421 | ecryptfs_dir_inode); |
503 | ecryptfs_nd); | ||
504 | goto out; | 422 | goto out; |
505 | out_d_drop: | 423 | out_d_drop: |
506 | d_drop(ecryptfs_dentry); | 424 | d_drop(ecryptfs_dentry); |
@@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
1092 | rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), | 1010 | rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), |
1093 | ecryptfs_dentry_to_lower(dentry), &lower_stat); | 1011 | ecryptfs_dentry_to_lower(dentry), &lower_stat); |
1094 | if (!rc) { | 1012 | if (!rc) { |
1013 | fsstack_copy_attr_all(dentry->d_inode, | ||
1014 | ecryptfs_inode_to_lower(dentry->d_inode)); | ||
1095 | generic_fillattr(dentry->d_inode, stat); | 1015 | generic_fillattr(dentry->d_inode, stat); |
1096 | stat->blocks = lower_stat.blocks; | 1016 | stat->blocks = lower_stat.blocks; |
1097 | } | 1017 | } |
diff --git a/fs/eventfd.c b/fs/eventfd.c index e0194b3e14d6..d9a591773919 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get); | |||
99 | * @ctx: [in] Pointer to eventfd context. | 99 | * @ctx: [in] Pointer to eventfd context. |
100 | * | 100 | * |
101 | * The eventfd context reference must have been previously acquired either | 101 | * The eventfd context reference must have been previously acquired either |
102 | * with eventfd_ctx_get() or eventfd_ctx_fdget()). | 102 | * with eventfd_ctx_get() or eventfd_ctx_fdget(). |
103 | */ | 103 | */ |
104 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | 104 | void eventfd_ctx_put(struct eventfd_ctx *ctx) |
105 | { | 105 | { |
@@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) | |||
146 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. | 146 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. |
147 | * @ctx: [in] Pointer to eventfd context. | 147 | * @ctx: [in] Pointer to eventfd context. |
148 | * @wait: [in] Wait queue to be removed. | 148 | * @wait: [in] Wait queue to be removed. |
149 | * @cnt: [out] Pointer to the 64bit conter value. | 149 | * @cnt: [out] Pointer to the 64-bit counter value. |
150 | * | 150 | * |
151 | * Returns zero if successful, or the following error codes: | 151 | * Returns %0 if successful, or the following error codes: |
152 | * | 152 | * |
153 | * -EAGAIN : The operation would have blocked. | 153 | * -EAGAIN : The operation would have blocked. |
154 | * | 154 | * |
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); | |||
175 | * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. | 175 | * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. |
176 | * @ctx: [in] Pointer to eventfd context. | 176 | * @ctx: [in] Pointer to eventfd context. |
177 | * @no_wait: [in] Different from zero if the operation should not block. | 177 | * @no_wait: [in] Different from zero if the operation should not block. |
178 | * @cnt: [out] Pointer to the 64bit conter value. | 178 | * @cnt: [out] Pointer to the 64-bit counter value. |
179 | * | 179 | * |
180 | * Returns zero if successful, or the following error codes: | 180 | * Returns %0 if successful, or the following error codes: |
181 | * | 181 | * |
182 | * -EAGAIN : The operation would have blocked but @no_wait was nonzero. | 182 | * -EAGAIN : The operation would have blocked but @no_wait was non-zero. |
183 | * -ERESTARTSYS : A signal interrupted the wait operation. | 183 | * -ERESTARTSYS : A signal interrupted the wait operation. |
184 | * | 184 | * |
185 | * If @no_wait is zero, the function might sleep until the eventfd internal | 185 | * If @no_wait is zero, the function might sleep until the eventfd internal |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 267d0ada4541..4a09af9e9a63 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -63,6 +63,13 @@ | |||
63 | * cleanup path and it is also acquired by eventpoll_release_file() | 63 | * cleanup path and it is also acquired by eventpoll_release_file() |
64 | * if a file has been pushed inside an epoll set and it is then | 64 | * if a file has been pushed inside an epoll set and it is then |
65 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). | 65 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). |
66 | * It is also acquired when inserting an epoll fd onto another epoll | ||
67 | * fd. We do this so that we walk the epoll tree and ensure that this | ||
68 | * insertion does not create a cycle of epoll file descriptors, which | ||
69 | * could lead to deadlock. We need a global mutex to prevent two | ||
70 | * simultaneous inserts (A into B and B into A) from racing and | ||
71 | * constructing a cycle without either insert observing that it is | ||
72 | * going to. | ||
66 | * It is possible to drop the "ep->mtx" and to use the global | 73 | * It is possible to drop the "ep->mtx" and to use the global |
67 | * mutex "epmutex" (together with "ep->lock") to have it working, | 74 | * mutex "epmutex" (together with "ep->lock") to have it working, |
68 | * but having "ep->mtx" will make the interface more scalable. | 75 | * but having "ep->mtx" will make the interface more scalable. |
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly; | |||
224 | */ | 231 | */ |
225 | static DEFINE_MUTEX(epmutex); | 232 | static DEFINE_MUTEX(epmutex); |
226 | 233 | ||
234 | /* Used to check for epoll file descriptor inclusion loops */ | ||
235 | static struct nested_calls poll_loop_ncalls; | ||
236 | |||
227 | /* Used for safe wake up implementation */ | 237 | /* Used for safe wake up implementation */ |
228 | static struct nested_calls poll_safewake_ncalls; | 238 | static struct nested_calls poll_safewake_ncalls; |
229 | 239 | ||
@@ -1198,6 +1208,62 @@ retry: | |||
1198 | return res; | 1208 | return res; |
1199 | } | 1209 | } |
1200 | 1210 | ||
1211 | /** | ||
1212 | * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() | ||
1213 | * API, to verify that adding an epoll file inside another | ||
1214 | * epoll structure, does not violate the constraints, in | ||
1215 | * terms of closed loops, or too deep chains (which can | ||
1216 | * result in excessive stack usage). | ||
1217 | * | ||
1218 | * @priv: Pointer to the epoll file to be currently checked. | ||
1219 | * @cookie: Original cookie for this call. This is the top-of-the-chain epoll | ||
1220 | * data structure pointer. | ||
1221 | * @call_nests: Current dept of the @ep_call_nested() call stack. | ||
1222 | * | ||
1223 | * Returns: Returns zero if adding the epoll @file inside current epoll | ||
1224 | * structure @ep does not violate the constraints, or -1 otherwise. | ||
1225 | */ | ||
1226 | static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) | ||
1227 | { | ||
1228 | int error = 0; | ||
1229 | struct file *file = priv; | ||
1230 | struct eventpoll *ep = file->private_data; | ||
1231 | struct rb_node *rbp; | ||
1232 | struct epitem *epi; | ||
1233 | |||
1234 | mutex_lock(&ep->mtx); | ||
1235 | for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { | ||
1236 | epi = rb_entry(rbp, struct epitem, rbn); | ||
1237 | if (unlikely(is_file_epoll(epi->ffd.file))) { | ||
1238 | error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, | ||
1239 | ep_loop_check_proc, epi->ffd.file, | ||
1240 | epi->ffd.file->private_data, current); | ||
1241 | if (error != 0) | ||
1242 | break; | ||
1243 | } | ||
1244 | } | ||
1245 | mutex_unlock(&ep->mtx); | ||
1246 | |||
1247 | return error; | ||
1248 | } | ||
1249 | |||
1250 | /** | ||
1251 | * ep_loop_check - Performs a check to verify that adding an epoll file (@file) | ||
1252 | * another epoll file (represented by @ep) does not create | ||
1253 | * closed loops or too deep chains. | ||
1254 | * | ||
1255 | * @ep: Pointer to the epoll private data structure. | ||
1256 | * @file: Pointer to the epoll file to be checked. | ||
1257 | * | ||
1258 | * Returns: Returns zero if adding the epoll @file inside current epoll | ||
1259 | * structure @ep does not violate the constraints, or -1 otherwise. | ||
1260 | */ | ||
1261 | static int ep_loop_check(struct eventpoll *ep, struct file *file) | ||
1262 | { | ||
1263 | return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, | ||
1264 | ep_loop_check_proc, file, ep, current); | ||
1265 | } | ||
1266 | |||
1201 | /* | 1267 | /* |
1202 | * Open an eventpoll file descriptor. | 1268 | * Open an eventpoll file descriptor. |
1203 | */ | 1269 | */ |
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1246 | struct epoll_event __user *, event) | 1312 | struct epoll_event __user *, event) |
1247 | { | 1313 | { |
1248 | int error; | 1314 | int error; |
1315 | int did_lock_epmutex = 0; | ||
1249 | struct file *file, *tfile; | 1316 | struct file *file, *tfile; |
1250 | struct eventpoll *ep; | 1317 | struct eventpoll *ep; |
1251 | struct epitem *epi; | 1318 | struct epitem *epi; |
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1287 | */ | 1354 | */ |
1288 | ep = file->private_data; | 1355 | ep = file->private_data; |
1289 | 1356 | ||
1357 | /* | ||
1358 | * When we insert an epoll file descriptor, inside another epoll file | ||
1359 | * descriptor, there is the change of creating closed loops, which are | ||
1360 | * better be handled here, than in more critical paths. | ||
1361 | * | ||
1362 | * We hold epmutex across the loop check and the insert in this case, in | ||
1363 | * order to prevent two separate inserts from racing and each doing the | ||
1364 | * insert "at the same time" such that ep_loop_check passes on both | ||
1365 | * before either one does the insert, thereby creating a cycle. | ||
1366 | */ | ||
1367 | if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { | ||
1368 | mutex_lock(&epmutex); | ||
1369 | did_lock_epmutex = 1; | ||
1370 | error = -ELOOP; | ||
1371 | if (ep_loop_check(ep, tfile) != 0) | ||
1372 | goto error_tgt_fput; | ||
1373 | } | ||
1374 | |||
1375 | |||
1290 | mutex_lock(&ep->mtx); | 1376 | mutex_lock(&ep->mtx); |
1291 | 1377 | ||
1292 | /* | 1378 | /* |
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1322 | mutex_unlock(&ep->mtx); | 1408 | mutex_unlock(&ep->mtx); |
1323 | 1409 | ||
1324 | error_tgt_fput: | 1410 | error_tgt_fput: |
1411 | if (unlikely(did_lock_epmutex)) | ||
1412 | mutex_unlock(&epmutex); | ||
1413 | |||
1325 | fput(tfile); | 1414 | fput(tfile); |
1326 | error_fput: | 1415 | error_fput: |
1327 | fput(file); | 1416 | fput(file); |
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void) | |||
1441 | EP_ITEM_COST; | 1530 | EP_ITEM_COST; |
1442 | BUG_ON(max_user_watches < 0); | 1531 | BUG_ON(max_user_watches < 0); |
1443 | 1532 | ||
1533 | /* | ||
1534 | * Initialize the structure used to perform epoll file descriptor | ||
1535 | * inclusion loops checks. | ||
1536 | */ | ||
1537 | ep_nested_calls_init(&poll_loop_ncalls); | ||
1538 | |||
1444 | /* Initialize the structure used to perform safe poll wait head wake ups */ | 1539 | /* Initialize the structure used to perform safe poll wait head wake ups */ |
1445 | ep_nested_calls_init(&poll_safewake_ncalls); | 1540 | ep_nested_calls_init(&poll_safewake_ncalls); |
1446 | 1541 | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0c8d97b56f34..3aa0b72b3b94 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -848,6 +848,7 @@ struct ext4_inode_info { | |||
848 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ | 848 | atomic_t i_ioend_count; /* Number of outstanding io_end structs */ |
849 | /* current io_end structure for async DIO write*/ | 849 | /* current io_end structure for async DIO write*/ |
850 | ext4_io_end_t *cur_aio_dio; | 850 | ext4_io_end_t *cur_aio_dio; |
851 | atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */ | ||
851 | 852 | ||
852 | spinlock_t i_block_reservation_lock; | 853 | spinlock_t i_block_reservation_lock; |
853 | 854 | ||
@@ -2119,6 +2120,15 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) | |||
2119 | 2120 | ||
2120 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | 2121 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) |
2121 | 2122 | ||
2123 | /* For ioend & aio unwritten conversion wait queues */ | ||
2124 | #define EXT4_WQ_HASH_SZ 37 | ||
2125 | #define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\ | ||
2126 | EXT4_WQ_HASH_SZ]) | ||
2127 | #define ext4_aio_mutex(v) (&ext4__aio_mutex[((unsigned long)(v)) %\ | ||
2128 | EXT4_WQ_HASH_SZ]) | ||
2129 | extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; | ||
2130 | extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; | ||
2131 | |||
2122 | #endif /* __KERNEL__ */ | 2132 | #endif /* __KERNEL__ */ |
2123 | 2133 | ||
2124 | #endif /* _EXT4_H */ | 2134 | #endif /* _EXT4_H */ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 63a75810b7c3..ccce8a7e94ed 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -3174,9 +3174,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3174 | * that this IO needs to convertion to written when IO is | 3174 | * that this IO needs to convertion to written when IO is |
3175 | * completed | 3175 | * completed |
3176 | */ | 3176 | */ |
3177 | if (io) | 3177 | if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
3178 | io->flag = EXT4_IO_END_UNWRITTEN; | 3178 | io->flag = EXT4_IO_END_UNWRITTEN; |
3179 | else | 3179 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); |
3180 | } else | ||
3180 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); | 3181 | ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
3181 | if (ext4_should_dioread_nolock(inode)) | 3182 | if (ext4_should_dioread_nolock(inode)) |
3182 | map->m_flags |= EXT4_MAP_UNINIT; | 3183 | map->m_flags |= EXT4_MAP_UNINIT; |
@@ -3463,9 +3464,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3463 | * that we need to perform convertion when IO is done. | 3464 | * that we need to perform convertion when IO is done. |
3464 | */ | 3465 | */ |
3465 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { | 3466 | if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
3466 | if (io) | 3467 | if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
3467 | io->flag = EXT4_IO_END_UNWRITTEN; | 3468 | io->flag = EXT4_IO_END_UNWRITTEN; |
3468 | else | 3469 | atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); |
3470 | } else | ||
3469 | ext4_set_inode_state(inode, | 3471 | ext4_set_inode_state(inode, |
3470 | EXT4_STATE_DIO_UNWRITTEN); | 3472 | EXT4_STATE_DIO_UNWRITTEN); |
3471 | } | 3473 | } |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 2e8322c8aa88..7b80d543b89e 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -55,11 +55,47 @@ static int ext4_release_file(struct inode *inode, struct file *filp) | |||
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static void ext4_aiodio_wait(struct inode *inode) | ||
59 | { | ||
60 | wait_queue_head_t *wq = ext4_ioend_wq(inode); | ||
61 | |||
62 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0)); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * This tests whether the IO in question is block-aligned or not. | ||
67 | * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they | ||
68 | * are converted to written only after the IO is complete. Until they are | ||
69 | * mapped, these blocks appear as holes, so dio_zero_block() will assume that | ||
70 | * it needs to zero out portions of the start and/or end block. If 2 AIO | ||
71 | * threads are at work on the same unwritten block, they must be synchronized | ||
72 | * or one thread will zero the other's data, causing corruption. | ||
73 | */ | ||
74 | static int | ||
75 | ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, | ||
76 | unsigned long nr_segs, loff_t pos) | ||
77 | { | ||
78 | struct super_block *sb = inode->i_sb; | ||
79 | int blockmask = sb->s_blocksize - 1; | ||
80 | size_t count = iov_length(iov, nr_segs); | ||
81 | loff_t final_size = pos + count; | ||
82 | |||
83 | if (pos >= inode->i_size) | ||
84 | return 0; | ||
85 | |||
86 | if ((pos & blockmask) || (final_size & blockmask)) | ||
87 | return 1; | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
58 | static ssize_t | 92 | static ssize_t |
59 | ext4_file_write(struct kiocb *iocb, const struct iovec *iov, | 93 | ext4_file_write(struct kiocb *iocb, const struct iovec *iov, |
60 | unsigned long nr_segs, loff_t pos) | 94 | unsigned long nr_segs, loff_t pos) |
61 | { | 95 | { |
62 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; | 96 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
97 | int unaligned_aio = 0; | ||
98 | int ret; | ||
63 | 99 | ||
64 | /* | 100 | /* |
65 | * If we have encountered a bitmap-format file, the size limit | 101 | * If we have encountered a bitmap-format file, the size limit |
@@ -78,9 +114,31 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, | |||
78 | nr_segs = iov_shorten((struct iovec *)iov, nr_segs, | 114 | nr_segs = iov_shorten((struct iovec *)iov, nr_segs, |
79 | sbi->s_bitmap_maxbytes - pos); | 115 | sbi->s_bitmap_maxbytes - pos); |
80 | } | 116 | } |
117 | } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) && | ||
118 | !is_sync_kiocb(iocb))) { | ||
119 | unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); | ||
81 | } | 120 | } |
82 | 121 | ||
83 | return generic_file_aio_write(iocb, iov, nr_segs, pos); | 122 | /* Unaligned direct AIO must be serialized; see comment above */ |
123 | if (unaligned_aio) { | ||
124 | static unsigned long unaligned_warn_time; | ||
125 | |||
126 | /* Warn about this once per day */ | ||
127 | if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) | ||
128 | ext4_msg(inode->i_sb, KERN_WARNING, | ||
129 | "Unaligned AIO/DIO on inode %ld by %s; " | ||
130 | "performance will be poor.", | ||
131 | inode->i_ino, current->comm); | ||
132 | mutex_lock(ext4_aio_mutex(inode)); | ||
133 | ext4_aiodio_wait(inode); | ||
134 | } | ||
135 | |||
136 | ret = generic_file_aio_write(iocb, iov, nr_segs, pos); | ||
137 | |||
138 | if (unaligned_aio) | ||
139 | mutex_unlock(ext4_aio_mutex(inode)); | ||
140 | |||
141 | return ret; | ||
84 | } | 142 | } |
85 | 143 | ||
86 | static const struct vm_operations_struct ext4_file_vm_ops = { | 144 | static const struct vm_operations_struct ext4_file_vm_ops = { |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 851f49b2f9d2..d1fe09aea73d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep; | |||
342 | /* We create slab caches for groupinfo data structures based on the | 342 | /* We create slab caches for groupinfo data structures based on the |
343 | * superblock block size. There will be one per mounted filesystem for | 343 | * superblock block size. There will be one per mounted filesystem for |
344 | * each unique s_blocksize_bits */ | 344 | * each unique s_blocksize_bits */ |
345 | #define NR_GRPINFO_CACHES \ | 345 | #define NR_GRPINFO_CACHES 8 |
346 | (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1) | ||
347 | static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; | 346 | static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; |
348 | 347 | ||
348 | static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { | ||
349 | "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", | ||
350 | "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", | ||
351 | "ext4_groupinfo_64k", "ext4_groupinfo_128k" | ||
352 | }; | ||
353 | |||
349 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | 354 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, |
350 | ext4_group_t group); | 355 | ext4_group_t group); |
351 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, | 356 | static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, |
@@ -2414,6 +2419,55 @@ err_freesgi: | |||
2414 | return -ENOMEM; | 2419 | return -ENOMEM; |
2415 | } | 2420 | } |
2416 | 2421 | ||
2422 | static void ext4_groupinfo_destroy_slabs(void) | ||
2423 | { | ||
2424 | int i; | ||
2425 | |||
2426 | for (i = 0; i < NR_GRPINFO_CACHES; i++) { | ||
2427 | if (ext4_groupinfo_caches[i]) | ||
2428 | kmem_cache_destroy(ext4_groupinfo_caches[i]); | ||
2429 | ext4_groupinfo_caches[i] = NULL; | ||
2430 | } | ||
2431 | } | ||
2432 | |||
2433 | static int ext4_groupinfo_create_slab(size_t size) | ||
2434 | { | ||
2435 | static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); | ||
2436 | int slab_size; | ||
2437 | int blocksize_bits = order_base_2(size); | ||
2438 | int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; | ||
2439 | struct kmem_cache *cachep; | ||
2440 | |||
2441 | if (cache_index >= NR_GRPINFO_CACHES) | ||
2442 | return -EINVAL; | ||
2443 | |||
2444 | if (unlikely(cache_index < 0)) | ||
2445 | cache_index = 0; | ||
2446 | |||
2447 | mutex_lock(&ext4_grpinfo_slab_create_mutex); | ||
2448 | if (ext4_groupinfo_caches[cache_index]) { | ||
2449 | mutex_unlock(&ext4_grpinfo_slab_create_mutex); | ||
2450 | return 0; /* Already created */ | ||
2451 | } | ||
2452 | |||
2453 | slab_size = offsetof(struct ext4_group_info, | ||
2454 | bb_counters[blocksize_bits + 2]); | ||
2455 | |||
2456 | cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], | ||
2457 | slab_size, 0, SLAB_RECLAIM_ACCOUNT, | ||
2458 | NULL); | ||
2459 | |||
2460 | mutex_unlock(&ext4_grpinfo_slab_create_mutex); | ||
2461 | if (!cachep) { | ||
2462 | printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n"); | ||
2463 | return -ENOMEM; | ||
2464 | } | ||
2465 | |||
2466 | ext4_groupinfo_caches[cache_index] = cachep; | ||
2467 | |||
2468 | return 0; | ||
2469 | } | ||
2470 | |||
2417 | int ext4_mb_init(struct super_block *sb, int needs_recovery) | 2471 | int ext4_mb_init(struct super_block *sb, int needs_recovery) |
2418 | { | 2472 | { |
2419 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 2473 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
@@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) | |||
2421 | unsigned offset; | 2475 | unsigned offset; |
2422 | unsigned max; | 2476 | unsigned max; |
2423 | int ret; | 2477 | int ret; |
2424 | int cache_index; | ||
2425 | struct kmem_cache *cachep; | ||
2426 | char *namep = NULL; | ||
2427 | 2478 | ||
2428 | i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); | 2479 | i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); |
2429 | 2480 | ||
@@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) | |||
2440 | goto out; | 2491 | goto out; |
2441 | } | 2492 | } |
2442 | 2493 | ||
2443 | cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; | 2494 | ret = ext4_groupinfo_create_slab(sb->s_blocksize); |
2444 | cachep = ext4_groupinfo_caches[cache_index]; | 2495 | if (ret < 0) |
2445 | if (!cachep) { | 2496 | goto out; |
2446 | char name[32]; | ||
2447 | int len = offsetof(struct ext4_group_info, | ||
2448 | bb_counters[sb->s_blocksize_bits + 2]); | ||
2449 | |||
2450 | sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits); | ||
2451 | namep = kstrdup(name, GFP_KERNEL); | ||
2452 | if (!namep) { | ||
2453 | ret = -ENOMEM; | ||
2454 | goto out; | ||
2455 | } | ||
2456 | |||
2457 | /* Need to free the kmem_cache_name() when we | ||
2458 | * destroy the slab */ | ||
2459 | cachep = kmem_cache_create(namep, len, 0, | ||
2460 | SLAB_RECLAIM_ACCOUNT, NULL); | ||
2461 | if (!cachep) { | ||
2462 | ret = -ENOMEM; | ||
2463 | goto out; | ||
2464 | } | ||
2465 | ext4_groupinfo_caches[cache_index] = cachep; | ||
2466 | } | ||
2467 | 2497 | ||
2468 | /* order 0 is regular bitmap */ | 2498 | /* order 0 is regular bitmap */ |
2469 | sbi->s_mb_maxs[0] = sb->s_blocksize << 3; | 2499 | sbi->s_mb_maxs[0] = sb->s_blocksize << 3; |
@@ -2520,7 +2550,6 @@ out: | |||
2520 | if (ret) { | 2550 | if (ret) { |
2521 | kfree(sbi->s_mb_offsets); | 2551 | kfree(sbi->s_mb_offsets); |
2522 | kfree(sbi->s_mb_maxs); | 2552 | kfree(sbi->s_mb_maxs); |
2523 | kfree(namep); | ||
2524 | } | 2553 | } |
2525 | return ret; | 2554 | return ret; |
2526 | } | 2555 | } |
@@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void) | |||
2734 | 2763 | ||
2735 | void ext4_exit_mballoc(void) | 2764 | void ext4_exit_mballoc(void) |
2736 | { | 2765 | { |
2737 | int i; | ||
2738 | /* | 2766 | /* |
2739 | * Wait for completion of call_rcu()'s on ext4_pspace_cachep | 2767 | * Wait for completion of call_rcu()'s on ext4_pspace_cachep |
2740 | * before destroying the slab cache. | 2768 | * before destroying the slab cache. |
@@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void) | |||
2743 | kmem_cache_destroy(ext4_pspace_cachep); | 2771 | kmem_cache_destroy(ext4_pspace_cachep); |
2744 | kmem_cache_destroy(ext4_ac_cachep); | 2772 | kmem_cache_destroy(ext4_ac_cachep); |
2745 | kmem_cache_destroy(ext4_free_ext_cachep); | 2773 | kmem_cache_destroy(ext4_free_ext_cachep); |
2746 | 2774 | ext4_groupinfo_destroy_slabs(); | |
2747 | for (i = 0; i < NR_GRPINFO_CACHES; i++) { | ||
2748 | struct kmem_cache *cachep = ext4_groupinfo_caches[i]; | ||
2749 | if (cachep) { | ||
2750 | char *name = (char *)kmem_cache_name(cachep); | ||
2751 | kmem_cache_destroy(cachep); | ||
2752 | kfree(name); | ||
2753 | } | ||
2754 | } | ||
2755 | ext4_remove_debugfs_entry(); | 2775 | ext4_remove_debugfs_entry(); |
2756 | } | 2776 | } |
2757 | 2777 | ||
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 7270dcfca92a..955cc309142f 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -32,14 +32,8 @@ | |||
32 | 32 | ||
33 | static struct kmem_cache *io_page_cachep, *io_end_cachep; | 33 | static struct kmem_cache *io_page_cachep, *io_end_cachep; |
34 | 34 | ||
35 | #define WQ_HASH_SZ 37 | ||
36 | #define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ]) | ||
37 | static wait_queue_head_t ioend_wq[WQ_HASH_SZ]; | ||
38 | |||
39 | int __init ext4_init_pageio(void) | 35 | int __init ext4_init_pageio(void) |
40 | { | 36 | { |
41 | int i; | ||
42 | |||
43 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); | 37 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); |
44 | if (io_page_cachep == NULL) | 38 | if (io_page_cachep == NULL) |
45 | return -ENOMEM; | 39 | return -ENOMEM; |
@@ -48,9 +42,6 @@ int __init ext4_init_pageio(void) | |||
48 | kmem_cache_destroy(io_page_cachep); | 42 | kmem_cache_destroy(io_page_cachep); |
49 | return -ENOMEM; | 43 | return -ENOMEM; |
50 | } | 44 | } |
51 | for (i = 0; i < WQ_HASH_SZ; i++) | ||
52 | init_waitqueue_head(&ioend_wq[i]); | ||
53 | |||
54 | return 0; | 45 | return 0; |
55 | } | 46 | } |
56 | 47 | ||
@@ -62,7 +53,7 @@ void ext4_exit_pageio(void) | |||
62 | 53 | ||
63 | void ext4_ioend_wait(struct inode *inode) | 54 | void ext4_ioend_wait(struct inode *inode) |
64 | { | 55 | { |
65 | wait_queue_head_t *wq = to_ioend_wq(inode); | 56 | wait_queue_head_t *wq = ext4_ioend_wq(inode); |
66 | 57 | ||
67 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); | 58 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); |
68 | } | 59 | } |
@@ -87,7 +78,7 @@ void ext4_free_io_end(ext4_io_end_t *io) | |||
87 | for (i = 0; i < io->num_io_pages; i++) | 78 | for (i = 0; i < io->num_io_pages; i++) |
88 | put_io_page(io->pages[i]); | 79 | put_io_page(io->pages[i]); |
89 | io->num_io_pages = 0; | 80 | io->num_io_pages = 0; |
90 | wq = to_ioend_wq(io->inode); | 81 | wq = ext4_ioend_wq(io->inode); |
91 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && | 82 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && |
92 | waitqueue_active(wq)) | 83 | waitqueue_active(wq)) |
93 | wake_up_all(wq); | 84 | wake_up_all(wq); |
@@ -102,6 +93,7 @@ int ext4_end_io_nolock(ext4_io_end_t *io) | |||
102 | struct inode *inode = io->inode; | 93 | struct inode *inode = io->inode; |
103 | loff_t offset = io->offset; | 94 | loff_t offset = io->offset; |
104 | ssize_t size = io->size; | 95 | ssize_t size = io->size; |
96 | wait_queue_head_t *wq; | ||
105 | int ret = 0; | 97 | int ret = 0; |
106 | 98 | ||
107 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," | 99 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," |
@@ -126,7 +118,16 @@ int ext4_end_io_nolock(ext4_io_end_t *io) | |||
126 | if (io->iocb) | 118 | if (io->iocb) |
127 | aio_complete(io->iocb, io->result, 0); | 119 | aio_complete(io->iocb, io->result, 0); |
128 | /* clear the DIO AIO unwritten flag */ | 120 | /* clear the DIO AIO unwritten flag */ |
129 | io->flag &= ~EXT4_IO_END_UNWRITTEN; | 121 | if (io->flag & EXT4_IO_END_UNWRITTEN) { |
122 | io->flag &= ~EXT4_IO_END_UNWRITTEN; | ||
123 | /* Wake up anyone waiting on unwritten extent conversion */ | ||
124 | wq = ext4_ioend_wq(io->inode); | ||
125 | if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) && | ||
126 | waitqueue_active(wq)) { | ||
127 | wake_up_all(wq); | ||
128 | } | ||
129 | } | ||
130 | |||
130 | return ret; | 131 | return ret; |
131 | } | 132 | } |
132 | 133 | ||
@@ -190,6 +191,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
190 | struct inode *inode; | 191 | struct inode *inode; |
191 | unsigned long flags; | 192 | unsigned long flags; |
192 | int i; | 193 | int i; |
194 | sector_t bi_sector = bio->bi_sector; | ||
193 | 195 | ||
194 | BUG_ON(!io_end); | 196 | BUG_ON(!io_end); |
195 | bio->bi_private = NULL; | 197 | bio->bi_private = NULL; |
@@ -207,9 +209,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
207 | if (error) | 209 | if (error) |
208 | SetPageError(page); | 210 | SetPageError(page); |
209 | BUG_ON(!head); | 211 | BUG_ON(!head); |
210 | if (head->b_size == PAGE_CACHE_SIZE) | 212 | if (head->b_size != PAGE_CACHE_SIZE) { |
211 | clear_buffer_dirty(head); | ||
212 | else { | ||
213 | loff_t offset; | 213 | loff_t offset; |
214 | loff_t io_end_offset = io_end->offset + io_end->size; | 214 | loff_t io_end_offset = io_end->offset + io_end->size; |
215 | 215 | ||
@@ -221,7 +221,6 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
221 | if (error) | 221 | if (error) |
222 | buffer_io_error(bh); | 222 | buffer_io_error(bh); |
223 | 223 | ||
224 | clear_buffer_dirty(bh); | ||
225 | } | 224 | } |
226 | if (buffer_delay(bh)) | 225 | if (buffer_delay(bh)) |
227 | partial_write = 1; | 226 | partial_write = 1; |
@@ -257,7 +256,7 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
257 | (unsigned long long) io_end->offset, | 256 | (unsigned long long) io_end->offset, |
258 | (long) io_end->size, | 257 | (long) io_end->size, |
259 | (unsigned long long) | 258 | (unsigned long long) |
260 | bio->bi_sector >> (inode->i_blkbits - 9)); | 259 | bi_sector >> (inode->i_blkbits - 9)); |
261 | } | 260 | } |
262 | 261 | ||
263 | /* Add the io_end to per-inode completed io list*/ | 262 | /* Add the io_end to per-inode completed io list*/ |
@@ -380,6 +379,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
380 | 379 | ||
381 | blocksize = 1 << inode->i_blkbits; | 380 | blocksize = 1 << inode->i_blkbits; |
382 | 381 | ||
382 | BUG_ON(!PageLocked(page)); | ||
383 | BUG_ON(PageWriteback(page)); | 383 | BUG_ON(PageWriteback(page)); |
384 | set_page_writeback(page); | 384 | set_page_writeback(page); |
385 | ClearPageError(page); | 385 | ClearPageError(page); |
@@ -397,12 +397,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
397 | for (bh = head = page_buffers(page), block_start = 0; | 397 | for (bh = head = page_buffers(page), block_start = 0; |
398 | bh != head || !block_start; | 398 | bh != head || !block_start; |
399 | block_start = block_end, bh = bh->b_this_page) { | 399 | block_start = block_end, bh = bh->b_this_page) { |
400 | |||
400 | block_end = block_start + blocksize; | 401 | block_end = block_start + blocksize; |
401 | if (block_start >= len) { | 402 | if (block_start >= len) { |
402 | clear_buffer_dirty(bh); | 403 | clear_buffer_dirty(bh); |
403 | set_buffer_uptodate(bh); | 404 | set_buffer_uptodate(bh); |
404 | continue; | 405 | continue; |
405 | } | 406 | } |
407 | clear_buffer_dirty(bh); | ||
406 | ret = io_submit_add_bh(io, io_page, inode, wbc, bh); | 408 | ret = io_submit_add_bh(io, io_page, inode, wbc, bh); |
407 | if (ret) { | 409 | if (ret) { |
408 | /* | 410 | /* |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 48ce561fafac..f6a318f836b2 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -77,6 +77,7 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, | |||
77 | const char *dev_name, void *data); | 77 | const char *dev_name, void *data); |
78 | static void ext4_destroy_lazyinit_thread(void); | 78 | static void ext4_destroy_lazyinit_thread(void); |
79 | static void ext4_unregister_li_request(struct super_block *sb); | 79 | static void ext4_unregister_li_request(struct super_block *sb); |
80 | static void ext4_clear_request_list(void); | ||
80 | 81 | ||
81 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 82 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
82 | static struct file_system_type ext3_fs_type = { | 83 | static struct file_system_type ext3_fs_type = { |
@@ -832,6 +833,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
832 | ei->i_sync_tid = 0; | 833 | ei->i_sync_tid = 0; |
833 | ei->i_datasync_tid = 0; | 834 | ei->i_datasync_tid = 0; |
834 | atomic_set(&ei->i_ioend_count, 0); | 835 | atomic_set(&ei->i_ioend_count, 0); |
836 | atomic_set(&ei->i_aiodio_unwritten, 0); | ||
835 | 837 | ||
836 | return &ei->vfs_inode; | 838 | return &ei->vfs_inode; |
837 | } | 839 | } |
@@ -2716,6 +2718,8 @@ static void ext4_unregister_li_request(struct super_block *sb) | |||
2716 | mutex_unlock(&ext4_li_info->li_list_mtx); | 2718 | mutex_unlock(&ext4_li_info->li_list_mtx); |
2717 | } | 2719 | } |
2718 | 2720 | ||
2721 | static struct task_struct *ext4_lazyinit_task; | ||
2722 | |||
2719 | /* | 2723 | /* |
2720 | * This is the function where ext4lazyinit thread lives. It walks | 2724 | * This is the function where ext4lazyinit thread lives. It walks |
2721 | * through the request list searching for next scheduled filesystem. | 2725 | * through the request list searching for next scheduled filesystem. |
@@ -2784,6 +2788,10 @@ cont_thread: | |||
2784 | if (time_before(jiffies, next_wakeup)) | 2788 | if (time_before(jiffies, next_wakeup)) |
2785 | schedule(); | 2789 | schedule(); |
2786 | finish_wait(&eli->li_wait_daemon, &wait); | 2790 | finish_wait(&eli->li_wait_daemon, &wait); |
2791 | if (kthread_should_stop()) { | ||
2792 | ext4_clear_request_list(); | ||
2793 | goto exit_thread; | ||
2794 | } | ||
2787 | } | 2795 | } |
2788 | 2796 | ||
2789 | exit_thread: | 2797 | exit_thread: |
@@ -2808,6 +2816,7 @@ exit_thread: | |||
2808 | wake_up(&eli->li_wait_task); | 2816 | wake_up(&eli->li_wait_task); |
2809 | 2817 | ||
2810 | kfree(ext4_li_info); | 2818 | kfree(ext4_li_info); |
2819 | ext4_lazyinit_task = NULL; | ||
2811 | ext4_li_info = NULL; | 2820 | ext4_li_info = NULL; |
2812 | mutex_unlock(&ext4_li_mtx); | 2821 | mutex_unlock(&ext4_li_mtx); |
2813 | 2822 | ||
@@ -2830,11 +2839,10 @@ static void ext4_clear_request_list(void) | |||
2830 | 2839 | ||
2831 | static int ext4_run_lazyinit_thread(void) | 2840 | static int ext4_run_lazyinit_thread(void) |
2832 | { | 2841 | { |
2833 | struct task_struct *t; | 2842 | ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, |
2834 | 2843 | ext4_li_info, "ext4lazyinit"); | |
2835 | t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); | 2844 | if (IS_ERR(ext4_lazyinit_task)) { |
2836 | if (IS_ERR(t)) { | 2845 | int err = PTR_ERR(ext4_lazyinit_task); |
2837 | int err = PTR_ERR(t); | ||
2838 | ext4_clear_request_list(); | 2846 | ext4_clear_request_list(); |
2839 | del_timer_sync(&ext4_li_info->li_timer); | 2847 | del_timer_sync(&ext4_li_info->li_timer); |
2840 | kfree(ext4_li_info); | 2848 | kfree(ext4_li_info); |
@@ -2985,16 +2993,10 @@ static void ext4_destroy_lazyinit_thread(void) | |||
2985 | * If thread exited earlier | 2993 | * If thread exited earlier |
2986 | * there's nothing to be done. | 2994 | * there's nothing to be done. |
2987 | */ | 2995 | */ |
2988 | if (!ext4_li_info) | 2996 | if (!ext4_li_info || !ext4_lazyinit_task) |
2989 | return; | 2997 | return; |
2990 | 2998 | ||
2991 | ext4_clear_request_list(); | 2999 | kthread_stop(ext4_lazyinit_task); |
2992 | |||
2993 | while (ext4_li_info->li_task) { | ||
2994 | wake_up(&ext4_li_info->li_wait_daemon); | ||
2995 | wait_event(ext4_li_info->li_wait_task, | ||
2996 | ext4_li_info->li_task == NULL); | ||
2997 | } | ||
2998 | } | 3000 | } |
2999 | 3001 | ||
3000 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) | 3002 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
@@ -4768,7 +4770,7 @@ static struct file_system_type ext4_fs_type = { | |||
4768 | .fs_flags = FS_REQUIRES_DEV, | 4770 | .fs_flags = FS_REQUIRES_DEV, |
4769 | }; | 4771 | }; |
4770 | 4772 | ||
4771 | int __init ext4_init_feat_adverts(void) | 4773 | static int __init ext4_init_feat_adverts(void) |
4772 | { | 4774 | { |
4773 | struct ext4_features *ef; | 4775 | struct ext4_features *ef; |
4774 | int ret = -ENOMEM; | 4776 | int ret = -ENOMEM; |
@@ -4792,23 +4794,44 @@ out: | |||
4792 | return ret; | 4794 | return ret; |
4793 | } | 4795 | } |
4794 | 4796 | ||
4797 | static void ext4_exit_feat_adverts(void) | ||
4798 | { | ||
4799 | kobject_put(&ext4_feat->f_kobj); | ||
4800 | wait_for_completion(&ext4_feat->f_kobj_unregister); | ||
4801 | kfree(ext4_feat); | ||
4802 | } | ||
4803 | |||
4804 | /* Shared across all ext4 file systems */ | ||
4805 | wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; | ||
4806 | struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; | ||
4807 | |||
4795 | static int __init ext4_init_fs(void) | 4808 | static int __init ext4_init_fs(void) |
4796 | { | 4809 | { |
4797 | int err; | 4810 | int i, err; |
4798 | 4811 | ||
4799 | ext4_check_flag_values(); | 4812 | ext4_check_flag_values(); |
4813 | |||
4814 | for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { | ||
4815 | mutex_init(&ext4__aio_mutex[i]); | ||
4816 | init_waitqueue_head(&ext4__ioend_wq[i]); | ||
4817 | } | ||
4818 | |||
4800 | err = ext4_init_pageio(); | 4819 | err = ext4_init_pageio(); |
4801 | if (err) | 4820 | if (err) |
4802 | return err; | 4821 | return err; |
4803 | err = ext4_init_system_zone(); | 4822 | err = ext4_init_system_zone(); |
4804 | if (err) | 4823 | if (err) |
4805 | goto out5; | 4824 | goto out7; |
4806 | ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); | 4825 | ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); |
4807 | if (!ext4_kset) | 4826 | if (!ext4_kset) |
4808 | goto out4; | 4827 | goto out6; |
4809 | ext4_proc_root = proc_mkdir("fs/ext4", NULL); | 4828 | ext4_proc_root = proc_mkdir("fs/ext4", NULL); |
4829 | if (!ext4_proc_root) | ||
4830 | goto out5; | ||
4810 | 4831 | ||
4811 | err = ext4_init_feat_adverts(); | 4832 | err = ext4_init_feat_adverts(); |
4833 | if (err) | ||
4834 | goto out4; | ||
4812 | 4835 | ||
4813 | err = ext4_init_mballoc(); | 4836 | err = ext4_init_mballoc(); |
4814 | if (err) | 4837 | if (err) |
@@ -4838,12 +4861,14 @@ out1: | |||
4838 | out2: | 4861 | out2: |
4839 | ext4_exit_mballoc(); | 4862 | ext4_exit_mballoc(); |
4840 | out3: | 4863 | out3: |
4841 | kfree(ext4_feat); | 4864 | ext4_exit_feat_adverts(); |
4865 | out4: | ||
4842 | remove_proc_entry("fs/ext4", NULL); | 4866 | remove_proc_entry("fs/ext4", NULL); |
4867 | out5: | ||
4843 | kset_unregister(ext4_kset); | 4868 | kset_unregister(ext4_kset); |
4844 | out4: | 4869 | out6: |
4845 | ext4_exit_system_zone(); | 4870 | ext4_exit_system_zone(); |
4846 | out5: | 4871 | out7: |
4847 | ext4_exit_pageio(); | 4872 | ext4_exit_pageio(); |
4848 | return err; | 4873 | return err; |
4849 | } | 4874 | } |
@@ -4857,6 +4882,7 @@ static void __exit ext4_exit_fs(void) | |||
4857 | destroy_inodecache(); | 4882 | destroy_inodecache(); |
4858 | ext4_exit_xattr(); | 4883 | ext4_exit_xattr(); |
4859 | ext4_exit_mballoc(); | 4884 | ext4_exit_mballoc(); |
4885 | ext4_exit_feat_adverts(); | ||
4860 | remove_proc_entry("fs/ext4", NULL); | 4886 | remove_proc_entry("fs/ext4", NULL); |
4861 | kset_unregister(ext4_kset); | 4887 | kset_unregister(ext4_kset); |
4862 | ext4_exit_system_zone(); | 4888 | ext4_exit_system_zone(); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bfed8447ed80..83543b5ff941 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, | |||
1283 | if (err) | 1283 | if (err) |
1284 | return err; | 1284 | return err; |
1285 | 1285 | ||
1286 | if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) | 1286 | if (attr->ia_valid & ATTR_OPEN) { |
1287 | return 0; | 1287 | if (fc->atomic_o_trunc) |
1288 | return 0; | ||
1289 | file = NULL; | ||
1290 | } | ||
1288 | 1291 | ||
1289 | if (attr->ia_valid & ATTR_SIZE) | 1292 | if (attr->ia_valid & ATTR_SIZE) |
1290 | is_truncate = true; | 1293 | is_truncate = true; |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 95da1bc1c826..9e0832dbb1e3 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) | |||
86 | return ff; | 86 | return ff; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void fuse_release_async(struct work_struct *work) | ||
90 | { | ||
91 | struct fuse_req *req; | ||
92 | struct fuse_conn *fc; | ||
93 | struct path path; | ||
94 | |||
95 | req = container_of(work, struct fuse_req, misc.release.work); | ||
96 | path = req->misc.release.path; | ||
97 | fc = get_fuse_conn(path.dentry->d_inode); | ||
98 | |||
99 | fuse_put_request(fc, req); | ||
100 | path_put(&path); | ||
101 | } | ||
102 | |||
89 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) | 103 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
90 | { | 104 | { |
91 | path_put(&req->misc.release.path); | 105 | if (fc->destroy_req) { |
106 | /* | ||
107 | * If this is a fuseblk mount, then it's possible that | ||
108 | * releasing the path will result in releasing the | ||
109 | * super block and sending the DESTROY request. If | ||
110 | * the server is single threaded, this would hang. | ||
111 | * For this reason do the path_put() in a separate | ||
112 | * thread. | ||
113 | */ | ||
114 | atomic_inc(&req->count); | ||
115 | INIT_WORK(&req->misc.release.work, fuse_release_async); | ||
116 | schedule_work(&req->misc.release.work); | ||
117 | } else { | ||
118 | path_put(&req->misc.release.path); | ||
119 | } | ||
92 | } | 120 | } |
93 | 121 | ||
94 | static void fuse_file_put(struct fuse_file *ff) | 122 | static void fuse_file_put(struct fuse_file *ff, bool sync) |
95 | { | 123 | { |
96 | if (atomic_dec_and_test(&ff->count)) { | 124 | if (atomic_dec_and_test(&ff->count)) { |
97 | struct fuse_req *req = ff->reserved_req; | 125 | struct fuse_req *req = ff->reserved_req; |
98 | 126 | ||
99 | req->end = fuse_release_end; | 127 | if (sync) { |
100 | fuse_request_send_background(ff->fc, req); | 128 | fuse_request_send(ff->fc, req); |
129 | path_put(&req->misc.release.path); | ||
130 | fuse_put_request(ff->fc, req); | ||
131 | } else { | ||
132 | req->end = fuse_release_end; | ||
133 | fuse_request_send_background(ff->fc, req); | ||
134 | } | ||
101 | kfree(ff); | 135 | kfree(ff); |
102 | } | 136 | } |
103 | } | 137 | } |
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) | |||
219 | * Normally this will send the RELEASE request, however if | 253 | * Normally this will send the RELEASE request, however if |
220 | * some asynchronous READ or WRITE requests are outstanding, | 254 | * some asynchronous READ or WRITE requests are outstanding, |
221 | * the sending will be delayed. | 255 | * the sending will be delayed. |
256 | * | ||
257 | * Make the release synchronous if this is a fuseblk mount, | ||
258 | * synchronous RELEASE is allowed (and desirable) in this case | ||
259 | * because the server can be trusted not to screw up. | ||
222 | */ | 260 | */ |
223 | fuse_file_put(ff); | 261 | fuse_file_put(ff, ff->fc->destroy_req != NULL); |
224 | } | 262 | } |
225 | 263 | ||
226 | static int fuse_open(struct inode *inode, struct file *file) | 264 | static int fuse_open(struct inode *inode, struct file *file) |
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) | |||
558 | page_cache_release(page); | 596 | page_cache_release(page); |
559 | } | 597 | } |
560 | if (req->ff) | 598 | if (req->ff) |
561 | fuse_file_put(req->ff); | 599 | fuse_file_put(req->ff, false); |
562 | } | 600 | } |
563 | 601 | ||
564 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) | 602 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, | |||
1137 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) | 1175 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
1138 | { | 1176 | { |
1139 | __free_page(req->pages[0]); | 1177 | __free_page(req->pages[0]); |
1140 | fuse_file_put(req->ff); | 1178 | fuse_file_put(req->ff, false); |
1141 | } | 1179 | } |
1142 | 1180 | ||
1143 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | 1181 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ae5744a2f9e9..d4286947bc2c 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/rwsem.h> | 21 | #include <linux/rwsem.h> |
22 | #include <linux/rbtree.h> | 22 | #include <linux/rbtree.h> |
23 | #include <linux/poll.h> | 23 | #include <linux/poll.h> |
24 | #include <linux/workqueue.h> | ||
24 | 25 | ||
25 | /** Max number of pages that can be used in a single read request */ | 26 | /** Max number of pages that can be used in a single read request */ |
26 | #define FUSE_MAX_PAGES_PER_REQ 32 | 27 | #define FUSE_MAX_PAGES_PER_REQ 32 |
@@ -262,7 +263,10 @@ struct fuse_req { | |||
262 | /** Data for asynchronous requests */ | 263 | /** Data for asynchronous requests */ |
263 | union { | 264 | union { |
264 | struct { | 265 | struct { |
265 | struct fuse_release_in in; | 266 | union { |
267 | struct fuse_release_in in; | ||
268 | struct work_struct work; | ||
269 | }; | ||
266 | struct path path; | 270 | struct path path; |
267 | } release; | 271 | } release; |
268 | struct fuse_init_in init_in; | 272 | struct fuse_init_in init_in; |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e6..7cd9a5a68d59 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) | |||
1779 | #endif | 1779 | #endif |
1780 | 1780 | ||
1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | | 1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | |
1782 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | 1782 | WQ_HIGHPRI | WQ_FREEZABLE, 0); |
1783 | if (IS_ERR(glock_workqueue)) | 1783 | if (IS_ERR(glock_workqueue)) |
1784 | return PTR_ERR(glock_workqueue); | 1784 | return PTR_ERR(glock_workqueue); |
1785 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", | 1785 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", |
1786 | WQ_MEM_RECLAIM | WQ_FREEZEABLE, | 1786 | WQ_MEM_RECLAIM | WQ_FREEZABLE, |
1787 | 0); | 1787 | 0); |
1788 | if (IS_ERR(gfs2_delete_workqueue)) { | 1788 | if (IS_ERR(gfs2_delete_workqueue)) { |
1789 | destroy_workqueue(glock_workqueue); | 1789 | destroy_workqueue(glock_workqueue); |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ebef7ab6e17e..72c31a315d96 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo) | |||
59 | struct address_space *mapping = (struct address_space *)(gl + 1); | 59 | struct address_space *mapping = (struct address_space *)(gl + 1); |
60 | 60 | ||
61 | gfs2_init_glock_once(gl); | 61 | gfs2_init_glock_once(gl); |
62 | memset(mapping, 0, sizeof(*mapping)); | 62 | address_space_init_once(mapping); |
63 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | ||
64 | spin_lock_init(&mapping->tree_lock); | ||
65 | spin_lock_init(&mapping->i_mmap_lock); | ||
66 | INIT_LIST_HEAD(&mapping->private_list); | ||
67 | spin_lock_init(&mapping->private_lock); | ||
68 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | ||
69 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | ||
70 | } | 63 | } |
71 | 64 | ||
72 | /** | 65 | /** |
@@ -144,7 +137,7 @@ static int __init init_gfs2_fs(void) | |||
144 | 137 | ||
145 | error = -ENOMEM; | 138 | error = -ENOMEM; |
146 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", | 139 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", |
147 | WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); | 140 | WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); |
148 | if (!gfs_recovery_wq) | 141 | if (!gfs_recovery_wq) |
149 | goto fail_wq; | 142 | goto fail_wq; |
150 | 143 | ||
diff --git a/fs/inode.c b/fs/inode.c index da85e56378f3..0647d80accf6 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode) | |||
295 | call_rcu(&inode->i_rcu, i_callback); | 295 | call_rcu(&inode->i_rcu, i_callback); |
296 | } | 296 | } |
297 | 297 | ||
298 | void address_space_init_once(struct address_space *mapping) | ||
299 | { | ||
300 | memset(mapping, 0, sizeof(*mapping)); | ||
301 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | ||
302 | spin_lock_init(&mapping->tree_lock); | ||
303 | spin_lock_init(&mapping->i_mmap_lock); | ||
304 | INIT_LIST_HEAD(&mapping->private_list); | ||
305 | spin_lock_init(&mapping->private_lock); | ||
306 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | ||
307 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | ||
308 | mutex_init(&mapping->unmap_mutex); | ||
309 | } | ||
310 | EXPORT_SYMBOL(address_space_init_once); | ||
311 | |||
298 | /* | 312 | /* |
299 | * These are initializations that only need to be done | 313 | * These are initializations that only need to be done |
300 | * once, because the fields are idempotent across use | 314 | * once, because the fields are idempotent across use |
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode) | |||
308 | INIT_LIST_HEAD(&inode->i_devices); | 322 | INIT_LIST_HEAD(&inode->i_devices); |
309 | INIT_LIST_HEAD(&inode->i_wb_list); | 323 | INIT_LIST_HEAD(&inode->i_wb_list); |
310 | INIT_LIST_HEAD(&inode->i_lru); | 324 | INIT_LIST_HEAD(&inode->i_lru); |
311 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); | 325 | address_space_init_once(&inode->i_data); |
312 | spin_lock_init(&inode->i_data.tree_lock); | ||
313 | spin_lock_init(&inode->i_data.i_mmap_lock); | ||
314 | INIT_LIST_HEAD(&inode->i_data.private_list); | ||
315 | spin_lock_init(&inode->i_data.private_lock); | ||
316 | INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); | ||
317 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); | ||
318 | i_size_ordered_init(inode); | 326 | i_size_ordered_init(inode); |
319 | #ifdef CONFIG_FSNOTIFY | 327 | #ifdef CONFIG_FSNOTIFY |
320 | INIT_HLIST_HEAD(&inode->i_fsnotify_marks); | 328 | INIT_HLIST_HEAD(&inode->i_fsnotify_marks); |
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb) | |||
540 | /** | 548 | /** |
541 | * invalidate_inodes - attempt to free all inodes on a superblock | 549 | * invalidate_inodes - attempt to free all inodes on a superblock |
542 | * @sb: superblock to operate on | 550 | * @sb: superblock to operate on |
551 | * @kill_dirty: flag to guide handling of dirty inodes | ||
543 | * | 552 | * |
544 | * Attempts to free all inodes for a given superblock. If there were any | 553 | * Attempts to free all inodes for a given superblock. If there were any |
545 | * busy inodes return a non-zero value, else zero. | 554 | * busy inodes return a non-zero value, else zero. |
555 | * If @kill_dirty is set, discard dirty inodes too, otherwise treat | ||
556 | * them as busy. | ||
546 | */ | 557 | */ |
547 | int invalidate_inodes(struct super_block *sb) | 558 | int invalidate_inodes(struct super_block *sb, bool kill_dirty) |
548 | { | 559 | { |
549 | int busy = 0; | 560 | int busy = 0; |
550 | struct inode *inode, *next; | 561 | struct inode *inode, *next; |
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb) | |||
556 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 567 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
557 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) | 568 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) |
558 | continue; | 569 | continue; |
570 | if (inode->i_state & I_DIRTY && !kill_dirty) { | ||
571 | busy = 1; | ||
572 | continue; | ||
573 | } | ||
559 | if (atomic_read(&inode->i_count)) { | 574 | if (atomic_read(&inode->i_count)) { |
560 | busy = 1; | 575 | busy = 1; |
561 | continue; | 576 | continue; |
diff --git a/fs/internal.h b/fs/internal.h index 0663568b1247..9b976b57d7fe 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *); | |||
112 | */ | 112 | */ |
113 | extern int get_nr_dirty_inodes(void); | 113 | extern int get_nr_dirty_inodes(void); |
114 | extern void evict_inodes(struct super_block *); | 114 | extern void evict_inodes(struct super_block *); |
115 | extern int invalidate_inodes(struct super_block *); | 115 | extern int invalidate_inodes(struct super_block *, bool); |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9e4686900f18..97e73469b2c4 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -473,7 +473,8 @@ int __jbd2_log_space_left(journal_t *journal) | |||
473 | } | 473 | } |
474 | 474 | ||
475 | /* | 475 | /* |
476 | * Called under j_state_lock. Returns true if a transaction commit was started. | 476 | * Called with j_state_lock locked for writing. |
477 | * Returns true if a transaction commit was started. | ||
477 | */ | 478 | */ |
478 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) | 479 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) |
479 | { | 480 | { |
@@ -520,11 +521,13 @@ int jbd2_journal_force_commit_nested(journal_t *journal) | |||
520 | { | 521 | { |
521 | transaction_t *transaction = NULL; | 522 | transaction_t *transaction = NULL; |
522 | tid_t tid; | 523 | tid_t tid; |
524 | int need_to_start = 0; | ||
523 | 525 | ||
524 | read_lock(&journal->j_state_lock); | 526 | read_lock(&journal->j_state_lock); |
525 | if (journal->j_running_transaction && !current->journal_info) { | 527 | if (journal->j_running_transaction && !current->journal_info) { |
526 | transaction = journal->j_running_transaction; | 528 | transaction = journal->j_running_transaction; |
527 | __jbd2_log_start_commit(journal, transaction->t_tid); | 529 | if (!tid_geq(journal->j_commit_request, transaction->t_tid)) |
530 | need_to_start = 1; | ||
528 | } else if (journal->j_committing_transaction) | 531 | } else if (journal->j_committing_transaction) |
529 | transaction = journal->j_committing_transaction; | 532 | transaction = journal->j_committing_transaction; |
530 | 533 | ||
@@ -535,6 +538,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal) | |||
535 | 538 | ||
536 | tid = transaction->t_tid; | 539 | tid = transaction->t_tid; |
537 | read_unlock(&journal->j_state_lock); | 540 | read_unlock(&journal->j_state_lock); |
541 | if (need_to_start) | ||
542 | jbd2_log_start_commit(journal, tid); | ||
538 | jbd2_log_wait_commit(journal, tid); | 543 | jbd2_log_wait_commit(journal, tid); |
539 | return 1; | 544 | return 1; |
540 | } | 545 | } |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index faad2bd787c7..1d1191050f99 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -117,10 +117,10 @@ static inline void update_t_max_wait(transaction_t *transaction) | |||
117 | static int start_this_handle(journal_t *journal, handle_t *handle, | 117 | static int start_this_handle(journal_t *journal, handle_t *handle, |
118 | int gfp_mask) | 118 | int gfp_mask) |
119 | { | 119 | { |
120 | transaction_t *transaction; | 120 | transaction_t *transaction, *new_transaction = NULL; |
121 | int needed; | 121 | tid_t tid; |
122 | int nblocks = handle->h_buffer_credits; | 122 | int needed, need_to_start; |
123 | transaction_t *new_transaction = NULL; | 123 | int nblocks = handle->h_buffer_credits; |
124 | 124 | ||
125 | if (nblocks > journal->j_max_transaction_buffers) { | 125 | if (nblocks > journal->j_max_transaction_buffers) { |
126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", | 126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", |
@@ -222,8 +222,11 @@ repeat: | |||
222 | atomic_sub(nblocks, &transaction->t_outstanding_credits); | 222 | atomic_sub(nblocks, &transaction->t_outstanding_credits); |
223 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, | 223 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, |
224 | TASK_UNINTERRUPTIBLE); | 224 | TASK_UNINTERRUPTIBLE); |
225 | __jbd2_log_start_commit(journal, transaction->t_tid); | 225 | tid = transaction->t_tid; |
226 | need_to_start = !tid_geq(journal->j_commit_request, tid); | ||
226 | read_unlock(&journal->j_state_lock); | 227 | read_unlock(&journal->j_state_lock); |
228 | if (need_to_start) | ||
229 | jbd2_log_start_commit(journal, tid); | ||
227 | schedule(); | 230 | schedule(); |
228 | finish_wait(&journal->j_wait_transaction_locked, &wait); | 231 | finish_wait(&journal->j_wait_transaction_locked, &wait); |
229 | goto repeat; | 232 | goto repeat; |
@@ -442,7 +445,8 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) | |||
442 | { | 445 | { |
443 | transaction_t *transaction = handle->h_transaction; | 446 | transaction_t *transaction = handle->h_transaction; |
444 | journal_t *journal = transaction->t_journal; | 447 | journal_t *journal = transaction->t_journal; |
445 | int ret; | 448 | tid_t tid; |
449 | int need_to_start, ret; | ||
446 | 450 | ||
447 | /* If we've had an abort of any type, don't even think about | 451 | /* If we've had an abort of any type, don't even think about |
448 | * actually doing the restart! */ | 452 | * actually doing the restart! */ |
@@ -465,8 +469,11 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) | |||
465 | spin_unlock(&transaction->t_handle_lock); | 469 | spin_unlock(&transaction->t_handle_lock); |
466 | 470 | ||
467 | jbd_debug(2, "restarting handle %p\n", handle); | 471 | jbd_debug(2, "restarting handle %p\n", handle); |
468 | __jbd2_log_start_commit(journal, transaction->t_tid); | 472 | tid = transaction->t_tid; |
473 | need_to_start = !tid_geq(journal->j_commit_request, tid); | ||
469 | read_unlock(&journal->j_state_lock); | 474 | read_unlock(&journal->j_state_lock); |
475 | if (need_to_start) | ||
476 | jbd2_log_start_commit(journal, tid); | ||
470 | 477 | ||
471 | lock_map_release(&handle->h_lockdep_map); | 478 | lock_map_release(&handle->h_lockdep_map); |
472 | handle->h_buffer_credits = nblocks; | 479 | handle->h_buffer_credits = nblocks; |
diff --git a/fs/namei.c b/fs/namei.c index 7d77f24d32a9..0087cf9c2c6b 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -455,14 +455,6 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry | |||
455 | struct fs_struct *fs = current->fs; | 455 | struct fs_struct *fs = current->fs; |
456 | struct dentry *parent = nd->path.dentry; | 456 | struct dentry *parent = nd->path.dentry; |
457 | 457 | ||
458 | /* | ||
459 | * It can be possible to revalidate the dentry that we started | ||
460 | * the path walk with. force_reval_path may also revalidate the | ||
461 | * dentry already committed to the nameidata. | ||
462 | */ | ||
463 | if (unlikely(parent == dentry)) | ||
464 | return nameidata_drop_rcu(nd); | ||
465 | |||
466 | BUG_ON(!(nd->flags & LOOKUP_RCU)); | 458 | BUG_ON(!(nd->flags & LOOKUP_RCU)); |
467 | if (nd->root.mnt) { | 459 | if (nd->root.mnt) { |
468 | spin_lock(&fs->lock); | 460 | spin_lock(&fs->lock); |
@@ -561,39 +553,25 @@ static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd) | |||
561 | */ | 553 | */ |
562 | void release_open_intent(struct nameidata *nd) | 554 | void release_open_intent(struct nameidata *nd) |
563 | { | 555 | { |
564 | if (nd->intent.open.file->f_path.dentry == NULL) | 556 | struct file *file = nd->intent.open.file; |
565 | put_filp(nd->intent.open.file); | ||
566 | else | ||
567 | fput(nd->intent.open.file); | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * Call d_revalidate and handle filesystems that request rcu-walk | ||
572 | * to be dropped. This may be called and return in rcu-walk mode, | ||
573 | * regardless of success or error. If -ECHILD is returned, the caller | ||
574 | * must return -ECHILD back up the path walk stack so path walk may | ||
575 | * be restarted in ref-walk mode. | ||
576 | */ | ||
577 | static int d_revalidate(struct dentry *dentry, struct nameidata *nd) | ||
578 | { | ||
579 | int status; | ||
580 | 557 | ||
581 | status = dentry->d_op->d_revalidate(dentry, nd); | 558 | if (file && !IS_ERR(file)) { |
582 | if (status == -ECHILD) { | 559 | if (file->f_path.dentry == NULL) |
583 | if (nameidata_dentry_drop_rcu(nd, dentry)) | 560 | put_filp(file); |
584 | return status; | 561 | else |
585 | status = dentry->d_op->d_revalidate(dentry, nd); | 562 | fput(file); |
586 | } | 563 | } |
564 | } | ||
587 | 565 | ||
588 | return status; | 566 | static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) |
567 | { | ||
568 | return dentry->d_op->d_revalidate(dentry, nd); | ||
589 | } | 569 | } |
590 | 570 | ||
591 | static inline struct dentry * | 571 | static struct dentry * |
592 | do_revalidate(struct dentry *dentry, struct nameidata *nd) | 572 | do_revalidate(struct dentry *dentry, struct nameidata *nd) |
593 | { | 573 | { |
594 | int status; | 574 | int status = d_revalidate(dentry, nd); |
595 | |||
596 | status = d_revalidate(dentry, nd); | ||
597 | if (unlikely(status <= 0)) { | 575 | if (unlikely(status <= 0)) { |
598 | /* | 576 | /* |
599 | * The dentry failed validation. | 577 | * The dentry failed validation. |
@@ -602,24 +580,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
602 | * to return a fail status. | 580 | * to return a fail status. |
603 | */ | 581 | */ |
604 | if (status < 0) { | 582 | if (status < 0) { |
605 | /* If we're in rcu-walk, we don't have a ref */ | 583 | dput(dentry); |
606 | if (!(nd->flags & LOOKUP_RCU)) | ||
607 | dput(dentry); | ||
608 | dentry = ERR_PTR(status); | 584 | dentry = ERR_PTR(status); |
609 | 585 | } else if (!d_invalidate(dentry)) { | |
610 | } else { | 586 | dput(dentry); |
611 | /* Don't d_invalidate in rcu-walk mode */ | 587 | dentry = NULL; |
612 | if (nameidata_dentry_drop_rcu_maybe(nd, dentry)) | ||
613 | return ERR_PTR(-ECHILD); | ||
614 | if (!d_invalidate(dentry)) { | ||
615 | dput(dentry); | ||
616 | dentry = NULL; | ||
617 | } | ||
618 | } | 588 | } |
619 | } | 589 | } |
620 | return dentry; | 590 | return dentry; |
621 | } | 591 | } |
622 | 592 | ||
593 | static inline struct dentry * | ||
594 | do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd) | ||
595 | { | ||
596 | int status = d_revalidate(dentry, nd); | ||
597 | if (likely(status > 0)) | ||
598 | return dentry; | ||
599 | if (status == -ECHILD) { | ||
600 | if (nameidata_dentry_drop_rcu(nd, dentry)) | ||
601 | return ERR_PTR(-ECHILD); | ||
602 | return do_revalidate(dentry, nd); | ||
603 | } | ||
604 | if (status < 0) | ||
605 | return ERR_PTR(status); | ||
606 | /* Don't d_invalidate in rcu-walk mode */ | ||
607 | if (nameidata_dentry_drop_rcu(nd, dentry)) | ||
608 | return ERR_PTR(-ECHILD); | ||
609 | if (!d_invalidate(dentry)) { | ||
610 | dput(dentry); | ||
611 | dentry = NULL; | ||
612 | } | ||
613 | return dentry; | ||
614 | } | ||
615 | |||
623 | static inline int need_reval_dot(struct dentry *dentry) | 616 | static inline int need_reval_dot(struct dentry *dentry) |
624 | { | 617 | { |
625 | if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) | 618 | if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) |
@@ -664,9 +657,6 @@ force_reval_path(struct path *path, struct nameidata *nd) | |||
664 | return 0; | 657 | return 0; |
665 | 658 | ||
666 | if (!status) { | 659 | if (!status) { |
667 | /* Don't d_invalidate in rcu-walk mode */ | ||
668 | if (nameidata_drop_rcu(nd)) | ||
669 | return -ECHILD; | ||
670 | d_invalidate(dentry); | 660 | d_invalidate(dentry); |
671 | status = -ESTALE; | 661 | status = -ESTALE; |
672 | } | 662 | } |
@@ -773,6 +763,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) | |||
773 | int error; | 763 | int error; |
774 | struct dentry *dentry = link->dentry; | 764 | struct dentry *dentry = link->dentry; |
775 | 765 | ||
766 | BUG_ON(nd->flags & LOOKUP_RCU); | ||
767 | |||
776 | touch_atime(link->mnt, dentry); | 768 | touch_atime(link->mnt, dentry); |
777 | nd_set_link(nd, NULL); | 769 | nd_set_link(nd, NULL); |
778 | 770 | ||
@@ -803,10 +795,16 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) | |||
803 | * Without that kind of total limit, nasty chains of consecutive | 795 | * Without that kind of total limit, nasty chains of consecutive |
804 | * symlinks can cause almost arbitrarily long lookups. | 796 | * symlinks can cause almost arbitrarily long lookups. |
805 | */ | 797 | */ |
806 | static inline int do_follow_link(struct path *path, struct nameidata *nd) | 798 | static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd) |
807 | { | 799 | { |
808 | void *cookie; | 800 | void *cookie; |
809 | int err = -ELOOP; | 801 | int err = -ELOOP; |
802 | |||
803 | /* We drop rcu-walk here */ | ||
804 | if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) | ||
805 | return -ECHILD; | ||
806 | BUG_ON(inode != path->dentry->d_inode); | ||
807 | |||
810 | if (current->link_count >= MAX_NESTED_LINKS) | 808 | if (current->link_count >= MAX_NESTED_LINKS) |
811 | goto loop; | 809 | goto loop; |
812 | if (current->total_link_count >= 40) | 810 | if (current->total_link_count >= 40) |
@@ -1251,9 +1249,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, | |||
1251 | return -ECHILD; | 1249 | return -ECHILD; |
1252 | 1250 | ||
1253 | nd->seq = seq; | 1251 | nd->seq = seq; |
1254 | if (dentry->d_flags & DCACHE_OP_REVALIDATE) | 1252 | if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { |
1255 | goto need_revalidate; | 1253 | dentry = do_revalidate_rcu(dentry, nd); |
1256 | done2: | 1254 | if (!dentry) |
1255 | goto need_lookup; | ||
1256 | if (IS_ERR(dentry)) | ||
1257 | goto fail; | ||
1258 | if (!(nd->flags & LOOKUP_RCU)) | ||
1259 | goto done; | ||
1260 | } | ||
1257 | path->mnt = mnt; | 1261 | path->mnt = mnt; |
1258 | path->dentry = dentry; | 1262 | path->dentry = dentry; |
1259 | if (likely(__follow_mount_rcu(nd, path, inode, false))) | 1263 | if (likely(__follow_mount_rcu(nd, path, inode, false))) |
@@ -1266,8 +1270,13 @@ done2: | |||
1266 | if (!dentry) | 1270 | if (!dentry) |
1267 | goto need_lookup; | 1271 | goto need_lookup; |
1268 | found: | 1272 | found: |
1269 | if (dentry->d_flags & DCACHE_OP_REVALIDATE) | 1273 | if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { |
1270 | goto need_revalidate; | 1274 | dentry = do_revalidate(dentry, nd); |
1275 | if (!dentry) | ||
1276 | goto need_lookup; | ||
1277 | if (IS_ERR(dentry)) | ||
1278 | goto fail; | ||
1279 | } | ||
1271 | done: | 1280 | done: |
1272 | path->mnt = mnt; | 1281 | path->mnt = mnt; |
1273 | path->dentry = dentry; | 1282 | path->dentry = dentry; |
@@ -1309,16 +1318,6 @@ need_lookup: | |||
1309 | mutex_unlock(&dir->i_mutex); | 1318 | mutex_unlock(&dir->i_mutex); |
1310 | goto found; | 1319 | goto found; |
1311 | 1320 | ||
1312 | need_revalidate: | ||
1313 | dentry = do_revalidate(dentry, nd); | ||
1314 | if (!dentry) | ||
1315 | goto need_lookup; | ||
1316 | if (IS_ERR(dentry)) | ||
1317 | goto fail; | ||
1318 | if (nd->flags & LOOKUP_RCU) | ||
1319 | goto done2; | ||
1320 | goto done; | ||
1321 | |||
1322 | fail: | 1321 | fail: |
1323 | return PTR_ERR(dentry); | 1322 | return PTR_ERR(dentry); |
1324 | } | 1323 | } |
@@ -1415,11 +1414,7 @@ exec_again: | |||
1415 | goto out_dput; | 1414 | goto out_dput; |
1416 | 1415 | ||
1417 | if (inode->i_op->follow_link) { | 1416 | if (inode->i_op->follow_link) { |
1418 | /* We commonly drop rcu-walk here */ | 1417 | err = do_follow_link(inode, &next, nd); |
1419 | if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) | ||
1420 | return -ECHILD; | ||
1421 | BUG_ON(inode != next.dentry->d_inode); | ||
1422 | err = do_follow_link(&next, nd); | ||
1423 | if (err) | 1418 | if (err) |
1424 | goto return_err; | 1419 | goto return_err; |
1425 | nd->inode = nd->path.dentry->d_inode; | 1420 | nd->inode = nd->path.dentry->d_inode; |
@@ -1463,10 +1458,7 @@ last_component: | |||
1463 | break; | 1458 | break; |
1464 | if (inode && unlikely(inode->i_op->follow_link) && | 1459 | if (inode && unlikely(inode->i_op->follow_link) && |
1465 | (lookup_flags & LOOKUP_FOLLOW)) { | 1460 | (lookup_flags & LOOKUP_FOLLOW)) { |
1466 | if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) | 1461 | err = do_follow_link(inode, &next, nd); |
1467 | return -ECHILD; | ||
1468 | BUG_ON(inode != next.dentry->d_inode); | ||
1469 | err = do_follow_link(&next, nd); | ||
1470 | if (err) | 1462 | if (err) |
1471 | goto return_err; | 1463 | goto return_err; |
1472 | nd->inode = nd->path.dentry->d_inode; | 1464 | nd->inode = nd->path.dentry->d_inode; |
@@ -1500,12 +1492,15 @@ return_reval: | |||
1500 | * We may need to check the cached dentry for staleness. | 1492 | * We may need to check the cached dentry for staleness. |
1501 | */ | 1493 | */ |
1502 | if (need_reval_dot(nd->path.dentry)) { | 1494 | if (need_reval_dot(nd->path.dentry)) { |
1495 | if (nameidata_drop_rcu_last_maybe(nd)) | ||
1496 | return -ECHILD; | ||
1503 | /* Note: we do not d_invalidate() */ | 1497 | /* Note: we do not d_invalidate() */ |
1504 | err = d_revalidate(nd->path.dentry, nd); | 1498 | err = d_revalidate(nd->path.dentry, nd); |
1505 | if (!err) | 1499 | if (!err) |
1506 | err = -ESTALE; | 1500 | err = -ESTALE; |
1507 | if (err < 0) | 1501 | if (err < 0) |
1508 | break; | 1502 | break; |
1503 | return 0; | ||
1509 | } | 1504 | } |
1510 | return_base: | 1505 | return_base: |
1511 | if (nameidata_drop_rcu_last_maybe(nd)) | 1506 | if (nameidata_drop_rcu_last_maybe(nd)) |
@@ -2265,8 +2260,6 @@ static struct file *finish_open(struct nameidata *nd, | |||
2265 | return filp; | 2260 | return filp; |
2266 | 2261 | ||
2267 | exit: | 2262 | exit: |
2268 | if (!IS_ERR(nd->intent.open.file)) | ||
2269 | release_open_intent(nd); | ||
2270 | path_put(&nd->path); | 2263 | path_put(&nd->path); |
2271 | return ERR_PTR(error); | 2264 | return ERR_PTR(error); |
2272 | } | 2265 | } |
@@ -2389,8 +2382,6 @@ exit_mutex_unlock: | |||
2389 | exit_dput: | 2382 | exit_dput: |
2390 | path_put_conditional(path, nd); | 2383 | path_put_conditional(path, nd); |
2391 | exit: | 2384 | exit: |
2392 | if (!IS_ERR(nd->intent.open.file)) | ||
2393 | release_open_intent(nd); | ||
2394 | path_put(&nd->path); | 2385 | path_put(&nd->path); |
2395 | return ERR_PTR(error); | 2386 | return ERR_PTR(error); |
2396 | } | 2387 | } |
@@ -2477,6 +2468,7 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
2477 | } | 2468 | } |
2478 | audit_inode(pathname, nd.path.dentry); | 2469 | audit_inode(pathname, nd.path.dentry); |
2479 | filp = finish_open(&nd, open_flag, acc_mode); | 2470 | filp = finish_open(&nd, open_flag, acc_mode); |
2471 | release_open_intent(&nd); | ||
2480 | return filp; | 2472 | return filp; |
2481 | 2473 | ||
2482 | creat: | 2474 | creat: |
@@ -2553,6 +2545,7 @@ out: | |||
2553 | path_put(&nd.root); | 2545 | path_put(&nd.root); |
2554 | if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) | 2546 | if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) |
2555 | goto reval; | 2547 | goto reval; |
2548 | release_open_intent(&nd); | ||
2556 | return filp; | 2549 | return filp; |
2557 | 2550 | ||
2558 | exit_dput: | 2551 | exit_dput: |
@@ -2560,8 +2553,6 @@ exit_dput: | |||
2560 | out_path: | 2553 | out_path: |
2561 | path_put(&nd.path); | 2554 | path_put(&nd.path); |
2562 | out_filp: | 2555 | out_filp: |
2563 | if (!IS_ERR(nd.intent.open.file)) | ||
2564 | release_open_intent(&nd); | ||
2565 | filp = ERR_PTR(error); | 2556 | filp = ERR_PTR(error); |
2566 | goto out; | 2557 | goto out; |
2567 | } | 2558 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 7b0b95371696..d1edf26025dc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
1244 | */ | 1244 | */ |
1245 | br_write_lock(vfsmount_lock); | 1245 | br_write_lock(vfsmount_lock); |
1246 | if (mnt_get_count(mnt) != 2) { | 1246 | if (mnt_get_count(mnt) != 2) { |
1247 | br_write_lock(vfsmount_lock); | 1247 | br_write_unlock(vfsmount_lock); |
1248 | return -EBUSY; | 1248 | return -EBUSY; |
1249 | } | 1249 | } |
1250 | br_write_unlock(vfsmount_lock); | 1250 | br_write_unlock(vfsmount_lock); |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3be975e18919..cde36cb0f348 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -484,7 +484,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr, | |||
484 | out: | 484 | out: |
485 | return status; | 485 | return status; |
486 | out_default: | 486 | out_default: |
487 | return nfs_cb_stat_to_errno(status); | 487 | return nfs_cb_stat_to_errno(nfserr); |
488 | } | 488 | } |
489 | 489 | ||
490 | /* | 490 | /* |
@@ -564,11 +564,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, | |||
564 | if (unlikely(status)) | 564 | if (unlikely(status)) |
565 | goto out; | 565 | goto out; |
566 | if (unlikely(nfserr != NFS4_OK)) | 566 | if (unlikely(nfserr != NFS4_OK)) |
567 | goto out_default; | 567 | status = nfs_cb_stat_to_errno(nfserr); |
568 | out: | 568 | out: |
569 | return status; | 569 | return status; |
570 | out_default: | ||
571 | return nfs_cb_stat_to_errno(status); | ||
572 | } | 570 | } |
573 | 571 | ||
574 | /* | 572 | /* |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d98d0213285d..54b60bfceb8d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -230,9 +230,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
230 | dp->dl_client = clp; | 230 | dp->dl_client = clp; |
231 | get_nfs4_file(fp); | 231 | get_nfs4_file(fp); |
232 | dp->dl_file = fp; | 232 | dp->dl_file = fp; |
233 | dp->dl_vfs_file = find_readable_file(fp); | ||
234 | get_file(dp->dl_vfs_file); | ||
235 | dp->dl_flock = NULL; | ||
236 | dp->dl_type = type; | 233 | dp->dl_type = type; |
237 | dp->dl_stateid.si_boot = boot_time; | 234 | dp->dl_stateid.si_boot = boot_time; |
238 | dp->dl_stateid.si_stateownerid = current_delegid++; | 235 | dp->dl_stateid.si_stateownerid = current_delegid++; |
@@ -241,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
241 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); | 238 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); |
242 | dp->dl_time = 0; | 239 | dp->dl_time = 0; |
243 | atomic_set(&dp->dl_count, 1); | 240 | atomic_set(&dp->dl_count, 1); |
244 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
245 | list_add(&dp->dl_perclnt, &clp->cl_delegations); | ||
246 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); | 241 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); |
247 | return dp; | 242 | return dp; |
248 | } | 243 | } |
@@ -253,36 +248,30 @@ nfs4_put_delegation(struct nfs4_delegation *dp) | |||
253 | if (atomic_dec_and_test(&dp->dl_count)) { | 248 | if (atomic_dec_and_test(&dp->dl_count)) { |
254 | dprintk("NFSD: freeing dp %p\n",dp); | 249 | dprintk("NFSD: freeing dp %p\n",dp); |
255 | put_nfs4_file(dp->dl_file); | 250 | put_nfs4_file(dp->dl_file); |
256 | fput(dp->dl_vfs_file); | ||
257 | kmem_cache_free(deleg_slab, dp); | 251 | kmem_cache_free(deleg_slab, dp); |
258 | num_delegations--; | 252 | num_delegations--; |
259 | } | 253 | } |
260 | } | 254 | } |
261 | 255 | ||
262 | /* Remove the associated file_lock first, then remove the delegation. | 256 | static void nfs4_put_deleg_lease(struct nfs4_file *fp) |
263 | * lease_modify() is called to remove the FS_LEASE file_lock from | ||
264 | * the i_flock list, eventually calling nfsd's lock_manager | ||
265 | * fl_release_callback. | ||
266 | */ | ||
267 | static void | ||
268 | nfs4_close_delegation(struct nfs4_delegation *dp) | ||
269 | { | 257 | { |
270 | dprintk("NFSD: close_delegation dp %p\n",dp); | 258 | if (atomic_dec_and_test(&fp->fi_delegees)) { |
271 | /* XXX: do we even need this check?: */ | 259 | vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); |
272 | if (dp->dl_flock) | 260 | fp->fi_lease = NULL; |
273 | vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); | 261 | fp->fi_deleg_file = NULL; |
262 | } | ||
274 | } | 263 | } |
275 | 264 | ||
276 | /* Called under the state lock. */ | 265 | /* Called under the state lock. */ |
277 | static void | 266 | static void |
278 | unhash_delegation(struct nfs4_delegation *dp) | 267 | unhash_delegation(struct nfs4_delegation *dp) |
279 | { | 268 | { |
280 | list_del_init(&dp->dl_perfile); | ||
281 | list_del_init(&dp->dl_perclnt); | 269 | list_del_init(&dp->dl_perclnt); |
282 | spin_lock(&recall_lock); | 270 | spin_lock(&recall_lock); |
271 | list_del_init(&dp->dl_perfile); | ||
283 | list_del_init(&dp->dl_recall_lru); | 272 | list_del_init(&dp->dl_recall_lru); |
284 | spin_unlock(&recall_lock); | 273 | spin_unlock(&recall_lock); |
285 | nfs4_close_delegation(dp); | 274 | nfs4_put_deleg_lease(dp->dl_file); |
286 | nfs4_put_delegation(dp); | 275 | nfs4_put_delegation(dp); |
287 | } | 276 | } |
288 | 277 | ||
@@ -958,8 +947,6 @@ expire_client(struct nfs4_client *clp) | |||
958 | spin_lock(&recall_lock); | 947 | spin_lock(&recall_lock); |
959 | while (!list_empty(&clp->cl_delegations)) { | 948 | while (!list_empty(&clp->cl_delegations)) { |
960 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); | 949 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); |
961 | dprintk("NFSD: expire client. dp %p, fp %p\n", dp, | ||
962 | dp->dl_flock); | ||
963 | list_del_init(&dp->dl_perclnt); | 950 | list_del_init(&dp->dl_perclnt); |
964 | list_move(&dp->dl_recall_lru, &reaplist); | 951 | list_move(&dp->dl_recall_lru, &reaplist); |
965 | } | 952 | } |
@@ -2078,6 +2065,7 @@ alloc_init_file(struct inode *ino) | |||
2078 | fp->fi_inode = igrab(ino); | 2065 | fp->fi_inode = igrab(ino); |
2079 | fp->fi_id = current_fileid++; | 2066 | fp->fi_id = current_fileid++; |
2080 | fp->fi_had_conflict = false; | 2067 | fp->fi_had_conflict = false; |
2068 | fp->fi_lease = NULL; | ||
2081 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); | 2069 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); |
2082 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); | 2070 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); |
2083 | spin_lock(&recall_lock); | 2071 | spin_lock(&recall_lock); |
@@ -2329,23 +2317,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) | |||
2329 | nfs4_file_put_access(fp, O_RDONLY); | 2317 | nfs4_file_put_access(fp, O_RDONLY); |
2330 | } | 2318 | } |
2331 | 2319 | ||
2332 | /* | 2320 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) |
2333 | * Spawn a thread to perform a recall on the delegation represented | ||
2334 | * by the lease (file_lock) | ||
2335 | * | ||
2336 | * Called from break_lease() with lock_flocks() held. | ||
2337 | * Note: we assume break_lease will only call this *once* for any given | ||
2338 | * lease. | ||
2339 | */ | ||
2340 | static | ||
2341 | void nfsd_break_deleg_cb(struct file_lock *fl) | ||
2342 | { | 2321 | { |
2343 | struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; | ||
2344 | |||
2345 | dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl); | ||
2346 | if (!dp) | ||
2347 | return; | ||
2348 | |||
2349 | /* We're assuming the state code never drops its reference | 2322 | /* We're assuming the state code never drops its reference |
2350 | * without first removing the lease. Since we're in this lease | 2323 | * without first removing the lease. Since we're in this lease |
2351 | * callback (and since the lease code is serialized by the kernel | 2324 | * callback (and since the lease code is serialized by the kernel |
@@ -2353,22 +2326,35 @@ void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2353 | * it's safe to take a reference: */ | 2326 | * it's safe to take a reference: */ |
2354 | atomic_inc(&dp->dl_count); | 2327 | atomic_inc(&dp->dl_count); |
2355 | 2328 | ||
2356 | spin_lock(&recall_lock); | ||
2357 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); | 2329 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); |
2358 | spin_unlock(&recall_lock); | ||
2359 | 2330 | ||
2360 | /* only place dl_time is set. protected by lock_flocks*/ | 2331 | /* only place dl_time is set. protected by lock_flocks*/ |
2361 | dp->dl_time = get_seconds(); | 2332 | dp->dl_time = get_seconds(); |
2362 | 2333 | ||
2334 | nfsd4_cb_recall(dp); | ||
2335 | } | ||
2336 | |||
2337 | /* Called from break_lease() with lock_flocks() held. */ | ||
2338 | static void nfsd_break_deleg_cb(struct file_lock *fl) | ||
2339 | { | ||
2340 | struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; | ||
2341 | struct nfs4_delegation *dp; | ||
2342 | |||
2343 | BUG_ON(!fp); | ||
2344 | /* We assume break_lease is only called once per lease: */ | ||
2345 | BUG_ON(fp->fi_had_conflict); | ||
2363 | /* | 2346 | /* |
2364 | * We don't want the locks code to timeout the lease for us; | 2347 | * We don't want the locks code to timeout the lease for us; |
2365 | * we'll remove it ourself if the delegation isn't returned | 2348 | * we'll remove it ourself if a delegation isn't returned |
2366 | * in time. | 2349 | * in time: |
2367 | */ | 2350 | */ |
2368 | fl->fl_break_time = 0; | 2351 | fl->fl_break_time = 0; |
2369 | 2352 | ||
2370 | dp->dl_file->fi_had_conflict = true; | 2353 | spin_lock(&recall_lock); |
2371 | nfsd4_cb_recall(dp); | 2354 | fp->fi_had_conflict = true; |
2355 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) | ||
2356 | nfsd_break_one_deleg(dp); | ||
2357 | spin_unlock(&recall_lock); | ||
2372 | } | 2358 | } |
2373 | 2359 | ||
2374 | static | 2360 | static |
@@ -2459,13 +2445,15 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) | |||
2459 | static struct nfs4_delegation * | 2445 | static struct nfs4_delegation * |
2460 | find_delegation_file(struct nfs4_file *fp, stateid_t *stid) | 2446 | find_delegation_file(struct nfs4_file *fp, stateid_t *stid) |
2461 | { | 2447 | { |
2462 | struct nfs4_delegation *dp; | 2448 | struct nfs4_delegation *dp = NULL; |
2463 | 2449 | ||
2450 | spin_lock(&recall_lock); | ||
2464 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { | 2451 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { |
2465 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) | 2452 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) |
2466 | return dp; | 2453 | break; |
2467 | } | 2454 | } |
2468 | return NULL; | 2455 | spin_unlock(&recall_lock); |
2456 | return dp; | ||
2469 | } | 2457 | } |
2470 | 2458 | ||
2471 | int share_access_to_flags(u32 share_access) | 2459 | int share_access_to_flags(u32 share_access) |
@@ -2641,6 +2629,66 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) | |||
2641 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; | 2629 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; |
2642 | } | 2630 | } |
2643 | 2631 | ||
2632 | static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) | ||
2633 | { | ||
2634 | struct file_lock *fl; | ||
2635 | |||
2636 | fl = locks_alloc_lock(); | ||
2637 | if (!fl) | ||
2638 | return NULL; | ||
2639 | locks_init_lock(fl); | ||
2640 | fl->fl_lmops = &nfsd_lease_mng_ops; | ||
2641 | fl->fl_flags = FL_LEASE; | ||
2642 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | ||
2643 | fl->fl_end = OFFSET_MAX; | ||
2644 | fl->fl_owner = (fl_owner_t)(dp->dl_file); | ||
2645 | fl->fl_pid = current->tgid; | ||
2646 | return fl; | ||
2647 | } | ||
2648 | |||
2649 | static int nfs4_setlease(struct nfs4_delegation *dp, int flag) | ||
2650 | { | ||
2651 | struct nfs4_file *fp = dp->dl_file; | ||
2652 | struct file_lock *fl; | ||
2653 | int status; | ||
2654 | |||
2655 | fl = nfs4_alloc_init_lease(dp, flag); | ||
2656 | if (!fl) | ||
2657 | return -ENOMEM; | ||
2658 | fl->fl_file = find_readable_file(fp); | ||
2659 | list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); | ||
2660 | status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); | ||
2661 | if (status) { | ||
2662 | list_del_init(&dp->dl_perclnt); | ||
2663 | locks_free_lock(fl); | ||
2664 | return -ENOMEM; | ||
2665 | } | ||
2666 | fp->fi_lease = fl; | ||
2667 | fp->fi_deleg_file = fl->fl_file; | ||
2668 | get_file(fp->fi_deleg_file); | ||
2669 | atomic_set(&fp->fi_delegees, 1); | ||
2670 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
2671 | return 0; | ||
2672 | } | ||
2673 | |||
2674 | static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) | ||
2675 | { | ||
2676 | struct nfs4_file *fp = dp->dl_file; | ||
2677 | |||
2678 | if (!fp->fi_lease) | ||
2679 | return nfs4_setlease(dp, flag); | ||
2680 | spin_lock(&recall_lock); | ||
2681 | if (fp->fi_had_conflict) { | ||
2682 | spin_unlock(&recall_lock); | ||
2683 | return -EAGAIN; | ||
2684 | } | ||
2685 | atomic_inc(&fp->fi_delegees); | ||
2686 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
2687 | spin_unlock(&recall_lock); | ||
2688 | list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); | ||
2689 | return 0; | ||
2690 | } | ||
2691 | |||
2644 | /* | 2692 | /* |
2645 | * Attempt to hand out a delegation. | 2693 | * Attempt to hand out a delegation. |
2646 | */ | 2694 | */ |
@@ -2650,7 +2698,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2650 | struct nfs4_delegation *dp; | 2698 | struct nfs4_delegation *dp; |
2651 | struct nfs4_stateowner *sop = stp->st_stateowner; | 2699 | struct nfs4_stateowner *sop = stp->st_stateowner; |
2652 | int cb_up; | 2700 | int cb_up; |
2653 | struct file_lock *fl; | ||
2654 | int status, flag = 0; | 2701 | int status, flag = 0; |
2655 | 2702 | ||
2656 | cb_up = nfsd4_cb_channel_good(sop->so_client); | 2703 | cb_up = nfsd4_cb_channel_good(sop->so_client); |
@@ -2681,36 +2728,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2681 | } | 2728 | } |
2682 | 2729 | ||
2683 | dp = alloc_init_deleg(sop->so_client, stp, fh, flag); | 2730 | dp = alloc_init_deleg(sop->so_client, stp, fh, flag); |
2684 | if (dp == NULL) { | 2731 | if (dp == NULL) |
2685 | flag = NFS4_OPEN_DELEGATE_NONE; | 2732 | goto out_no_deleg; |
2686 | goto out; | 2733 | status = nfs4_set_delegation(dp, flag); |
2687 | } | 2734 | if (status) |
2688 | status = -ENOMEM; | 2735 | goto out_free; |
2689 | fl = locks_alloc_lock(); | ||
2690 | if (!fl) | ||
2691 | goto out; | ||
2692 | locks_init_lock(fl); | ||
2693 | fl->fl_lmops = &nfsd_lease_mng_ops; | ||
2694 | fl->fl_flags = FL_LEASE; | ||
2695 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | ||
2696 | fl->fl_end = OFFSET_MAX; | ||
2697 | fl->fl_owner = (fl_owner_t)dp; | ||
2698 | fl->fl_file = find_readable_file(stp->st_file); | ||
2699 | BUG_ON(!fl->fl_file); | ||
2700 | fl->fl_pid = current->tgid; | ||
2701 | dp->dl_flock = fl; | ||
2702 | |||
2703 | /* vfs_setlease checks to see if delegation should be handed out. | ||
2704 | * the lock_manager callback fl_change is used | ||
2705 | */ | ||
2706 | if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { | ||
2707 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); | ||
2708 | dp->dl_flock = NULL; | ||
2709 | locks_free_lock(fl); | ||
2710 | unhash_delegation(dp); | ||
2711 | flag = NFS4_OPEN_DELEGATE_NONE; | ||
2712 | goto out; | ||
2713 | } | ||
2714 | 2736 | ||
2715 | memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); | 2737 | memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); |
2716 | 2738 | ||
@@ -2722,6 +2744,12 @@ out: | |||
2722 | && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) | 2744 | && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) |
2723 | dprintk("NFSD: WARNING: refusing delegation reclaim\n"); | 2745 | dprintk("NFSD: WARNING: refusing delegation reclaim\n"); |
2724 | open->op_delegate_type = flag; | 2746 | open->op_delegate_type = flag; |
2747 | return; | ||
2748 | out_free: | ||
2749 | nfs4_put_delegation(dp); | ||
2750 | out_no_deleg: | ||
2751 | flag = NFS4_OPEN_DELEGATE_NONE; | ||
2752 | goto out; | ||
2725 | } | 2753 | } |
2726 | 2754 | ||
2727 | /* | 2755 | /* |
@@ -2916,8 +2944,6 @@ nfs4_laundromat(void) | |||
2916 | test_val = u; | 2944 | test_val = u; |
2917 | break; | 2945 | break; |
2918 | } | 2946 | } |
2919 | dprintk("NFSD: purging unused delegation dp %p, fp %p\n", | ||
2920 | dp, dp->dl_flock); | ||
2921 | list_move(&dp->dl_recall_lru, &reaplist); | 2947 | list_move(&dp->dl_recall_lru, &reaplist); |
2922 | } | 2948 | } |
2923 | spin_unlock(&recall_lock); | 2949 | spin_unlock(&recall_lock); |
@@ -3128,7 +3154,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | |||
3128 | goto out; | 3154 | goto out; |
3129 | renew_client(dp->dl_client); | 3155 | renew_client(dp->dl_client); |
3130 | if (filpp) { | 3156 | if (filpp) { |
3131 | *filpp = find_readable_file(dp->dl_file); | 3157 | *filpp = dp->dl_file->fi_deleg_file; |
3132 | BUG_ON(!*filpp); | 3158 | BUG_ON(!*filpp); |
3133 | } | 3159 | } |
3134 | } else { /* open or lock stateid */ | 3160 | } else { /* open or lock stateid */ |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 956629b9cdc9..1275b8655070 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
317 | READ_BUF(dummy32); | 317 | READ_BUF(dummy32); |
318 | len += (XDR_QUADLEN(dummy32) << 2); | 318 | len += (XDR_QUADLEN(dummy32) << 2); |
319 | READMEM(buf, dummy32); | 319 | READMEM(buf, dummy32); |
320 | if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) | 320 | if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) |
321 | goto out_nfserr; | 321 | return status; |
322 | iattr->ia_valid |= ATTR_UID; | 322 | iattr->ia_valid |= ATTR_UID; |
323 | } | 323 | } |
324 | if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { | 324 | if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { |
@@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
328 | READ_BUF(dummy32); | 328 | READ_BUF(dummy32); |
329 | len += (XDR_QUADLEN(dummy32) << 2); | 329 | len += (XDR_QUADLEN(dummy32) << 2); |
330 | READMEM(buf, dummy32); | 330 | READMEM(buf, dummy32); |
331 | if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) | 331 | if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) |
332 | goto out_nfserr; | 332 | return status; |
333 | iattr->ia_valid |= ATTR_GID; | 333 | iattr->ia_valid |= ATTR_GID; |
334 | } | 334 | } |
335 | if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { | 335 | if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 3074656ba7bf..2d31224b07bf 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -83,8 +83,6 @@ struct nfs4_delegation { | |||
83 | atomic_t dl_count; /* ref count */ | 83 | atomic_t dl_count; /* ref count */ |
84 | struct nfs4_client *dl_client; | 84 | struct nfs4_client *dl_client; |
85 | struct nfs4_file *dl_file; | 85 | struct nfs4_file *dl_file; |
86 | struct file *dl_vfs_file; | ||
87 | struct file_lock *dl_flock; | ||
88 | u32 dl_type; | 86 | u32 dl_type; |
89 | time_t dl_time; | 87 | time_t dl_time; |
90 | /* For recall: */ | 88 | /* For recall: */ |
@@ -379,6 +377,9 @@ struct nfs4_file { | |||
379 | */ | 377 | */ |
380 | atomic_t fi_readers; | 378 | atomic_t fi_readers; |
381 | atomic_t fi_writers; | 379 | atomic_t fi_writers; |
380 | struct file *fi_deleg_file; | ||
381 | struct file_lock *fi_lease; | ||
382 | atomic_t fi_delegees; | ||
382 | struct inode *fi_inode; | 383 | struct inode *fi_inode; |
383 | u32 fi_id; /* used with stateowner->so_id | 384 | u32 fi_id; /* used with stateowner->so_id |
384 | * for stateid_hashtbl hash */ | 385 | * for stateid_hashtbl hash */ |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 641117f2188d..da1d9701f8e4 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -808,7 +808,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino) | |||
808 | if (ra->p_count == 0) | 808 | if (ra->p_count == 0) |
809 | frap = rap; | 809 | frap = rap; |
810 | } | 810 | } |
811 | depth = nfsdstats.ra_size*11/10; | 811 | depth = nfsdstats.ra_size; |
812 | if (!frap) { | 812 | if (!frap) { |
813 | spin_unlock(&rab->pb_lock); | 813 | spin_unlock(&rab->pb_lock); |
814 | return NULL; | 814 | return NULL; |
@@ -1744,6 +1744,13 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, | |||
1744 | host_err = nfsd_break_lease(odentry->d_inode); | 1744 | host_err = nfsd_break_lease(odentry->d_inode); |
1745 | if (host_err) | 1745 | if (host_err) |
1746 | goto out_drop_write; | 1746 | goto out_drop_write; |
1747 | if (ndentry->d_inode) { | ||
1748 | host_err = nfsd_break_lease(ndentry->d_inode); | ||
1749 | if (host_err) | ||
1750 | goto out_drop_write; | ||
1751 | } | ||
1752 | if (host_err) | ||
1753 | goto out_drop_write; | ||
1747 | host_err = vfs_rename(fdir, odentry, tdir, ndentry); | 1754 | host_err = vfs_rename(fdir, odentry, tdir, ndentry); |
1748 | if (!host_err) { | 1755 | if (!host_err) { |
1749 | host_err = commit_metadata(tfhp); | 1756 | host_err = commit_metadata(tfhp); |
@@ -1812,22 +1819,22 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, | |||
1812 | 1819 | ||
1813 | host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); | 1820 | host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); |
1814 | if (host_err) | 1821 | if (host_err) |
1815 | goto out_nfserr; | 1822 | goto out_put; |
1816 | 1823 | ||
1817 | host_err = nfsd_break_lease(rdentry->d_inode); | 1824 | host_err = nfsd_break_lease(rdentry->d_inode); |
1818 | if (host_err) | 1825 | if (host_err) |
1819 | goto out_put; | 1826 | goto out_drop_write; |
1820 | if (type != S_IFDIR) | 1827 | if (type != S_IFDIR) |
1821 | host_err = vfs_unlink(dirp, rdentry); | 1828 | host_err = vfs_unlink(dirp, rdentry); |
1822 | else | 1829 | else |
1823 | host_err = vfs_rmdir(dirp, rdentry); | 1830 | host_err = vfs_rmdir(dirp, rdentry); |
1824 | out_put: | ||
1825 | dput(rdentry); | ||
1826 | |||
1827 | if (!host_err) | 1831 | if (!host_err) |
1828 | host_err = commit_metadata(fhp); | 1832 | host_err = commit_metadata(fhp); |
1829 | 1833 | out_drop_write: | |
1830 | mnt_drop_write(fhp->fh_export->ex_path.mnt); | 1834 | mnt_drop_write(fhp->fh_export->ex_path.mnt); |
1835 | out_put: | ||
1836 | dput(rdentry); | ||
1837 | |||
1831 | out_nfserr: | 1838 | out_nfserr: |
1832 | err = nfserrno(host_err); | 1839 | err = nfserrno(host_err); |
1833 | out: | 1840 | out: |
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 388e9e8f5286..85f7baa15f5d 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -35,11 +35,6 @@ | |||
35 | #include "btnode.h" | 35 | #include "btnode.h" |
36 | 36 | ||
37 | 37 | ||
38 | void nilfs_btnode_cache_init_once(struct address_space *btnc) | ||
39 | { | ||
40 | nilfs_mapping_init_once(btnc); | ||
41 | } | ||
42 | |||
43 | static const struct address_space_operations def_btnode_aops = { | 38 | static const struct address_space_operations def_btnode_aops = { |
44 | .sync_page = block_sync_page, | 39 | .sync_page = block_sync_page, |
45 | }; | 40 | }; |
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 79037494f1e0..1b8ebd888c28 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h | |||
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { | |||
37 | struct buffer_head *newbh; | 37 | struct buffer_head *newbh; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | void nilfs_btnode_cache_init_once(struct address_space *); | ||
41 | void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); | 40 | void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); |
42 | void nilfs_btnode_cache_clear(struct address_space *); | 41 | void nilfs_btnode_cache_clear(struct address_space *); |
43 | struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, | 42 | struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 6a0e2a189f60..a0babd2bff6a 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, | |||
454 | struct backing_dev_info *bdi = inode->i_sb->s_bdi; | 454 | struct backing_dev_info *bdi = inode->i_sb->s_bdi; |
455 | 455 | ||
456 | INIT_LIST_HEAD(&shadow->frozen_buffers); | 456 | INIT_LIST_HEAD(&shadow->frozen_buffers); |
457 | nilfs_mapping_init_once(&shadow->frozen_data); | 457 | address_space_init_once(&shadow->frozen_data); |
458 | nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); | 458 | nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); |
459 | nilfs_mapping_init_once(&shadow->frozen_btnodes); | 459 | address_space_init_once(&shadow->frozen_btnodes); |
460 | nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); | 460 | nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); |
461 | mi->mi_shadow = shadow; | 461 | mi->mi_shadow = shadow; |
462 | return 0; | 462 | return 0; |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0c432416cfef..a585b35fd6bc 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, | |||
492 | return nc; | 492 | return nc; |
493 | } | 493 | } |
494 | 494 | ||
495 | void nilfs_mapping_init_once(struct address_space *mapping) | ||
496 | { | ||
497 | memset(mapping, 0, sizeof(*mapping)); | ||
498 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | ||
499 | spin_lock_init(&mapping->tree_lock); | ||
500 | INIT_LIST_HEAD(&mapping->private_list); | ||
501 | spin_lock_init(&mapping->private_lock); | ||
502 | |||
503 | spin_lock_init(&mapping->i_mmap_lock); | ||
504 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | ||
505 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | ||
506 | } | ||
507 | |||
508 | void nilfs_mapping_init(struct address_space *mapping, | 495 | void nilfs_mapping_init(struct address_space *mapping, |
509 | struct backing_dev_info *bdi, | 496 | struct backing_dev_info *bdi, |
510 | const struct address_space_operations *aops) | 497 | const struct address_space_operations *aops) |
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 622df27cd891..2a00953ebd5f 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h | |||
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *); | |||
61 | int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); | 61 | int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); |
62 | void nilfs_copy_back_pages(struct address_space *, struct address_space *); | 62 | void nilfs_copy_back_pages(struct address_space *, struct address_space *); |
63 | void nilfs_clear_dirty_pages(struct address_space *); | 63 | void nilfs_clear_dirty_pages(struct address_space *); |
64 | void nilfs_mapping_init_once(struct address_space *mapping); | ||
65 | void nilfs_mapping_init(struct address_space *mapping, | 64 | void nilfs_mapping_init(struct address_space *mapping, |
66 | struct backing_dev_info *bdi, | 65 | struct backing_dev_info *bdi, |
67 | const struct address_space_operations *aops); | 66 | const struct address_space_operations *aops); |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 58fd707174e1..1673b3d99842 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj) | |||
1279 | #ifdef CONFIG_NILFS_XATTR | 1279 | #ifdef CONFIG_NILFS_XATTR |
1280 | init_rwsem(&ii->xattr_sem); | 1280 | init_rwsem(&ii->xattr_sem); |
1281 | #endif | 1281 | #endif |
1282 | nilfs_btnode_cache_init_once(&ii->i_btnode_cache); | 1282 | address_space_init_once(&ii->i_btnode_cache); |
1283 | ii->i_bmap = &ii->i_bmap_data; | 1283 | ii->i_bmap = &ii->i_bmap_data; |
1284 | inode_init_once(&ii->vfs_inode); | 1284 | inode_init_once(&ii->vfs_inode); |
1285 | } | 1285 | } |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 43e56b97f9c0..6180da1e37e6 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb) | |||
405 | ocfs2_quota_trans_credits(sb); | 405 | ocfs2_quota_trans_credits(sb); |
406 | } | 406 | } |
407 | 407 | ||
408 | /* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + | 408 | /* data block for new dir/symlink, allocation of directory block, dx_root |
409 | * bitmap block for the new bit) dx_root update for free list */ | 409 | * update for free list */ |
410 | #define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) | 410 | #define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1) |
411 | 411 | ||
412 | static inline int ocfs2_add_dir_index_credits(struct super_block *sb) | 412 | static inline int ocfs2_add_dir_index_credits(struct super_block *sb) |
413 | { | 413 | { |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index b5f9160e93e9..19ebc5aad391 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3228 | u32 num_clusters, unsigned int e_flags) | 3228 | u32 num_clusters, unsigned int e_flags) |
3229 | { | 3229 | { |
3230 | int ret, delete, index, credits = 0; | 3230 | int ret, delete, index, credits = 0; |
3231 | u32 new_bit, new_len; | 3231 | u32 new_bit, new_len, orig_num_clusters; |
3232 | unsigned int set_len; | 3232 | unsigned int set_len; |
3233 | struct ocfs2_super *osb = OCFS2_SB(sb); | 3233 | struct ocfs2_super *osb = OCFS2_SB(sb); |
3234 | handle_t *handle; | 3234 | handle_t *handle; |
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3261 | goto out; | 3261 | goto out; |
3262 | } | 3262 | } |
3263 | 3263 | ||
3264 | orig_num_clusters = num_clusters; | ||
3265 | |||
3264 | while (num_clusters) { | 3266 | while (num_clusters) { |
3265 | ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, | 3267 | ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, |
3266 | p_cluster, num_clusters, | 3268 | p_cluster, num_clusters, |
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3348 | * in write-back mode. | 3350 | * in write-back mode. |
3349 | */ | 3351 | */ |
3350 | if (context->get_clusters == ocfs2_di_get_clusters) { | 3352 | if (context->get_clusters == ocfs2_di_get_clusters) { |
3351 | ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); | 3353 | ret = ocfs2_cow_sync_writeback(sb, context, cpos, |
3354 | orig_num_clusters); | ||
3352 | if (ret) | 3355 | if (ret) |
3353 | mlog_errno(ret); | 3356 | mlog_errno(ret); |
3354 | } | 3357 | } |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 38f986d2447e..36c423fb0635 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1316 | struct mount_options *mopt, | 1316 | struct mount_options *mopt, |
1317 | int is_remount) | 1317 | int is_remount) |
1318 | { | 1318 | { |
1319 | int status; | 1319 | int status, user_stack = 0; |
1320 | char *p; | 1320 | char *p; |
1321 | u32 tmp; | 1321 | u32 tmp; |
1322 | 1322 | ||
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1459 | memcpy(mopt->cluster_stack, args[0].from, | 1459 | memcpy(mopt->cluster_stack, args[0].from, |
1460 | OCFS2_STACK_LABEL_LEN); | 1460 | OCFS2_STACK_LABEL_LEN); |
1461 | mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; | 1461 | mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; |
1462 | /* | ||
1463 | * Open code the memcmp here as we don't have | ||
1464 | * an osb to pass to | ||
1465 | * ocfs2_userspace_stack(). | ||
1466 | */ | ||
1467 | if (memcmp(mopt->cluster_stack, | ||
1468 | OCFS2_CLASSIC_CLUSTER_STACK, | ||
1469 | OCFS2_STACK_LABEL_LEN)) | ||
1470 | user_stack = 1; | ||
1462 | break; | 1471 | break; |
1463 | case Opt_inode64: | 1472 | case Opt_inode64: |
1464 | mopt->mount_opt |= OCFS2_MOUNT_INODE64; | 1473 | mopt->mount_opt |= OCFS2_MOUNT_INODE64; |
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1514 | } | 1523 | } |
1515 | } | 1524 | } |
1516 | 1525 | ||
1517 | /* Ensure only one heartbeat mode */ | 1526 | if (user_stack == 0) { |
1518 | tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | | 1527 | /* Ensure only one heartbeat mode */ |
1519 | OCFS2_MOUNT_HB_NONE); | 1528 | tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | |
1520 | if (hweight32(tmp) != 1) { | 1529 | OCFS2_MOUNT_HB_GLOBAL | |
1521 | mlog(ML_ERROR, "Invalid heartbeat mount options\n"); | 1530 | OCFS2_MOUNT_HB_NONE); |
1522 | status = 0; | 1531 | if (hweight32(tmp) != 1) { |
1523 | goto bail; | 1532 | mlog(ML_ERROR, "Invalid heartbeat mount options\n"); |
1533 | status = 0; | ||
1534 | goto bail; | ||
1535 | } | ||
1524 | } | 1536 | } |
1525 | 1537 | ||
1526 | status = 1; | 1538 | status = 1; |
@@ -790,6 +790,8 @@ struct file *nameidata_to_filp(struct nameidata *nd) | |||
790 | 790 | ||
791 | /* Pick up the filp from the open intent */ | 791 | /* Pick up the filp from the open intent */ |
792 | filp = nd->intent.open.file; | 792 | filp = nd->intent.open.file; |
793 | nd->intent.open.file = NULL; | ||
794 | |||
793 | /* Has the filesystem initialised the file for us? */ | 795 | /* Has the filesystem initialised the file for us? */ |
794 | if (filp->f_path.dentry == NULL) { | 796 | if (filp->f_path.dentry == NULL) { |
795 | path_get(&nd->path); | 797 | path_get(&nd->path); |
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 789c625c7aa5..b10e3540d5b7 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c | |||
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | vm->vblk_size = get_unaligned_be32(data + 0x08); | 253 | vm->vblk_size = get_unaligned_be32(data + 0x08); |
254 | if (vm->vblk_size == 0) { | ||
255 | ldm_error ("Illegal VBLK size"); | ||
256 | return false; | ||
257 | } | ||
258 | |||
254 | vm->vblk_offset = get_unaligned_be32(data + 0x0C); | 259 | vm->vblk_offset = get_unaligned_be32(data + 0x0C); |
255 | vm->last_vblk_seq = get_unaligned_be32(data + 0x04); | 260 | vm->last_vblk_seq = get_unaligned_be32(data + 0x04); |
256 | 261 | ||
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 68d6a216ee79..11f688bd76c5 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c | |||
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) | |||
29 | 29 | ||
30 | int mac_partition(struct parsed_partitions *state) | 30 | int mac_partition(struct parsed_partitions *state) |
31 | { | 31 | { |
32 | int slot = 1; | ||
33 | Sector sect; | 32 | Sector sect; |
34 | unsigned char *data; | 33 | unsigned char *data; |
35 | int blk, blocks_in_map; | 34 | int slot, blocks_in_map; |
36 | unsigned secsize; | 35 | unsigned secsize; |
37 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
38 | int found_root = 0; | 37 | int found_root = 0; |
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) | |||
59 | put_dev_sector(sect); | 58 | put_dev_sector(sect); |
60 | return 0; /* not a MacOS disk */ | 59 | return 0; /* not a MacOS disk */ |
61 | } | 60 | } |
62 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | ||
63 | blocks_in_map = be32_to_cpu(part->map_count); | 61 | blocks_in_map = be32_to_cpu(part->map_count); |
64 | for (blk = 1; blk <= blocks_in_map; ++blk) { | 62 | if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { |
65 | int pos = blk * secsize; | 63 | put_dev_sector(sect); |
64 | return 0; | ||
65 | } | ||
66 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | ||
67 | for (slot = 1; slot <= blocks_in_map; ++slot) { | ||
68 | int pos = slot * secsize; | ||
66 | put_dev_sector(sect); | 69 | put_dev_sector(sect); |
67 | data = read_part_sector(state, pos/512, §); | 70 | data = read_part_sector(state, pos/512, §); |
68 | if (!data) | 71 | if (!data) |
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) | |||
113 | } | 116 | } |
114 | 117 | ||
115 | if (goodness > found_root_goodness) { | 118 | if (goodness > found_root_goodness) { |
116 | found_root = blk; | 119 | found_root = slot; |
117 | found_root_goodness = goodness; | 120 | found_root_goodness = goodness; |
118 | } | 121 | } |
119 | } | 122 | } |
120 | #endif /* CONFIG_PPC_PMAC */ | 123 | #endif /* CONFIG_PPC_PMAC */ |
121 | |||
122 | ++slot; | ||
123 | } | 124 | } |
124 | #ifdef CONFIG_PPC_PMAC | 125 | #ifdef CONFIG_PPC_PMAC |
125 | if (found_root_goodness) | 126 | if (found_root_goodness) |
diff --git a/fs/proc/array.c b/fs/proc/array.c index df2b703b9d0f..7c99c1cf7e5c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
353 | task_cap(m, task); | 353 | task_cap(m, task); |
354 | task_cpus_allowed(m, task); | 354 | task_cpus_allowed(m, task); |
355 | cpuset_task_status_allowed(m, task); | 355 | cpuset_task_status_allowed(m, task); |
356 | #if defined(CONFIG_S390) | ||
357 | task_show_regs(m, task); | ||
358 | #endif | ||
359 | task_context_switch_counts(m, task); | 356 | task_context_switch_counts(m, task); |
360 | return 0; | 357 | return 0; |
361 | } | 358 | } |
diff --git a/fs/super.c b/fs/super.c index 74e149efed81..7e9dd4cc2c01 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -177,6 +177,11 @@ void deactivate_locked_super(struct super_block *s) | |||
177 | struct file_system_type *fs = s->s_type; | 177 | struct file_system_type *fs = s->s_type; |
178 | if (atomic_dec_and_test(&s->s_active)) { | 178 | if (atomic_dec_and_test(&s->s_active)) { |
179 | fs->kill_sb(s); | 179 | fs->kill_sb(s); |
180 | /* | ||
181 | * We need to call rcu_barrier so all the delayed rcu free | ||
182 | * inodes are flushed before we release the fs module. | ||
183 | */ | ||
184 | rcu_barrier(); | ||
180 | put_filesystem(fs); | 185 | put_filesystem(fs); |
181 | put_super(s); | 186 | put_super(s); |
182 | } else { | 187 | } else { |
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c index 05201ae719e5..d61611c88012 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/linux-2.6/xfs_discard.c | |||
@@ -152,6 +152,8 @@ xfs_ioc_trim( | |||
152 | 152 | ||
153 | if (!capable(CAP_SYS_ADMIN)) | 153 | if (!capable(CAP_SYS_ADMIN)) |
154 | return -XFS_ERROR(EPERM); | 154 | return -XFS_ERROR(EPERM); |
155 | if (!blk_queue_discard(q)) | ||
156 | return -XFS_ERROR(EOPNOTSUPP); | ||
155 | if (copy_from_user(&range, urange, sizeof(range))) | 157 | if (copy_from_user(&range, urange, sizeof(range))) |
156 | return -XFS_ERROR(EFAULT); | 158 | return -XFS_ERROR(EFAULT); |
157 | 159 | ||
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index cec89dd5d7d2..85668efb3e3e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -53,6 +53,9 @@ xfs_fs_geometry( | |||
53 | xfs_fsop_geom_t *geo, | 53 | xfs_fsop_geom_t *geo, |
54 | int new_version) | 54 | int new_version) |
55 | { | 55 | { |
56 | |||
57 | memset(geo, 0, sizeof(*geo)); | ||
58 | |||
56 | geo->blocksize = mp->m_sb.sb_blocksize; | 59 | geo->blocksize = mp->m_sb.sb_blocksize; |
57 | geo->rtextsize = mp->m_sb.sb_rextsize; | 60 | geo->rtextsize = mp->m_sb.sb_rextsize; |
58 | geo->agblocks = mp->m_sb.sb_agblocks; | 61 | geo->agblocks = mp->m_sb.sb_agblocks; |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 31b6188df221..b4bfe338ea0e 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #ifdef CONFIG_MMU | 5 | #ifdef CONFIG_MMU |
6 | 6 | ||
7 | #include <linux/mm_types.h> | ||
8 | |||
7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 9 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
8 | extern int ptep_set_access_flags(struct vm_area_struct *vma, | 10 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
9 | unsigned long address, pte_t *ptep, | 11 | unsigned long address, pte_t *ptep, |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 835214761d4f..ad5770f2315c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1134,7 +1134,7 @@ struct drm_device { | |||
1134 | struct usb_device *usbdev; | 1134 | struct usb_device *usbdev; |
1135 | 1135 | ||
1136 | struct drm_sg_mem *sg; /**< Scatter gather memory */ | 1136 | struct drm_sg_mem *sg; /**< Scatter gather memory */ |
1137 | int num_crtcs; /**< Number of CRTCs on this device */ | 1137 | unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
1138 | void *dev_private; /**< device private data */ | 1138 | void *dev_private; /**< device private data */ |
1139 | void *mm_private; | 1139 | void *mm_private; |
1140 | struct address_space *dev_mapping; | 1140 | struct address_space *dev_mapping; |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 0039f1f97ad8..c4d6dbfa3ff4 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -290,6 +290,7 @@ typedef struct drm_i915_irq_wait { | |||
290 | #define I915_PARAM_HAS_RELAXED_FENCING 12 | 290 | #define I915_PARAM_HAS_RELAXED_FENCING 12 |
291 | #define I915_PARAM_HAS_COHERENT_RINGS 13 | 291 | #define I915_PARAM_HAS_COHERENT_RINGS 13 |
292 | #define I915_PARAM_HAS_EXEC_CONSTANTS 14 | 292 | #define I915_PARAM_HAS_EXEC_CONSTANTS 14 |
293 | #define I915_PARAM_HAS_RELAXED_DELTA 15 | ||
293 | 294 | ||
294 | typedef struct drm_i915_getparam { | 295 | typedef struct drm_i915_getparam { |
295 | int param; | 296 | int param; |
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index 68cd248f6d3e..66900e3c6eb1 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h | |||
@@ -101,8 +101,8 @@ struct ieee_pfc { | |||
101 | */ | 101 | */ |
102 | struct dcb_app { | 102 | struct dcb_app { |
103 | __u8 selector; | 103 | __u8 selector; |
104 | __u32 protocol; | ||
105 | __u8 priority; | 104 | __u8 priority; |
105 | __u16 protocol; | ||
106 | }; | 106 | }; |
107 | 107 | ||
108 | struct dcbmsg { | 108 | struct dcbmsg { |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index da7e52b099f3..1effc8b56b4e 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -109,7 +109,7 @@ static inline void freezer_count(void) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Check if the task should be counted as freezeable by the freezer | 112 | * Check if the task should be counted as freezable by the freezer |
113 | */ | 113 | */ |
114 | static inline int freezer_should_skip(struct task_struct *p) | 114 | static inline int freezer_should_skip(struct task_struct *p) |
115 | { | 115 | { |
diff --git a/include/linux/fs.h b/include/linux/fs.h index bd3215940c37..e38b50a4b9d2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -649,6 +649,7 @@ struct address_space { | |||
649 | spinlock_t private_lock; /* for use by the address_space */ | 649 | spinlock_t private_lock; /* for use by the address_space */ |
650 | struct list_head private_list; /* ditto */ | 650 | struct list_head private_list; /* ditto */ |
651 | struct address_space *assoc_mapping; /* ditto */ | 651 | struct address_space *assoc_mapping; /* ditto */ |
652 | struct mutex unmap_mutex; /* to protect unmapping */ | ||
652 | } __attribute__((aligned(sizeof(long)))); | 653 | } __attribute__((aligned(sizeof(long)))); |
653 | /* | 654 | /* |
654 | * On most architectures that alignment is already the case; but | 655 | * On most architectures that alignment is already the case; but |
@@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk, | |||
2139 | struct block_device *bdev); | 2140 | struct block_device *bdev); |
2140 | extern int revalidate_disk(struct gendisk *); | 2141 | extern int revalidate_disk(struct gendisk *); |
2141 | extern int check_disk_change(struct block_device *); | 2142 | extern int check_disk_change(struct block_device *); |
2142 | extern int __invalidate_device(struct block_device *); | 2143 | extern int __invalidate_device(struct block_device *, bool); |
2143 | extern int invalidate_partition(struct gendisk *, int); | 2144 | extern int invalidate_partition(struct gendisk *, int); |
2144 | #endif | 2145 | #endif |
2145 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 2146 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
@@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); | |||
2225 | 2226 | ||
2226 | extern int inode_init_always(struct super_block *, struct inode *); | 2227 | extern int inode_init_always(struct super_block *, struct inode *); |
2227 | extern void inode_init_once(struct inode *); | 2228 | extern void inode_init_once(struct inode *); |
2229 | extern void address_space_init_once(struct address_space *mapping); | ||
2228 | extern void ihold(struct inode * inode); | 2230 | extern void ihold(struct inode * inode); |
2229 | extern void iput(struct inode *); | 2231 | extern void iput(struct inode *); |
2230 | extern struct inode * igrab(struct inode *); | 2232 | extern struct inode * igrab(struct inode *); |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 8e6c8c42bc3c..df29c8fde36b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page, | |||
57 | (transparent_hugepage_flags & \ | 57 | (transparent_hugepage_flags & \ |
58 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ | 58 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ |
59 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ | 59 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ |
60 | !((__vma)->vm_flags & VM_NOHUGEPAGE)) | 60 | !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ |
61 | !is_vma_temporary_stack(__vma)) | ||
61 | #define transparent_hugepage_defrag(__vma) \ | 62 | #define transparent_hugepage_defrag(__vma) \ |
62 | ((transparent_hugepage_flags & \ | 63 | ((transparent_hugepage_flags & \ |
63 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ | 64 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ |
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 697474691749..fe7c4b9ae270 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h | |||
@@ -4,8 +4,8 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/input.h> | 5 | #include <linux/input.h> |
6 | 6 | ||
7 | #define MATRIX_MAX_ROWS 16 | 7 | #define MATRIX_MAX_ROWS 32 |
8 | #define MATRIX_MAX_COLS 16 | 8 | #define MATRIX_MAX_COLS 32 |
9 | 9 | ||
10 | #define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ | 10 | #define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ |
11 | (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ | 11 | (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ |
diff --git a/include/linux/klist.h b/include/linux/klist.h index e91a4e59b771..a370ce57cf1d 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h | |||
@@ -22,7 +22,7 @@ struct klist { | |||
22 | struct list_head k_list; | 22 | struct list_head k_list; |
23 | void (*get)(struct klist_node *); | 23 | void (*get)(struct klist_node *); |
24 | void (*put)(struct klist_node *); | 24 | void (*put)(struct klist_node *); |
25 | } __attribute__ ((aligned (4))); | 25 | } __attribute__ ((aligned (sizeof(void *)))); |
26 | 26 | ||
27 | #define KLIST_INIT(_name, _get, _put) \ | 27 | #define KLIST_INIT(_name, _get, _put) \ |
28 | { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ | 28 | { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ |
diff --git a/include/linux/list.h b/include/linux/list.h index 9a5f8a71810c..3a54266a1e85 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) | |||
96 | * in an undefined state. | 96 | * in an undefined state. |
97 | */ | 97 | */ |
98 | #ifndef CONFIG_DEBUG_LIST | 98 | #ifndef CONFIG_DEBUG_LIST |
99 | static inline void __list_del_entry(struct list_head *entry) | ||
100 | { | ||
101 | __list_del(entry->prev, entry->next); | ||
102 | } | ||
103 | |||
99 | static inline void list_del(struct list_head *entry) | 104 | static inline void list_del(struct list_head *entry) |
100 | { | 105 | { |
101 | __list_del(entry->prev, entry->next); | 106 | __list_del(entry->prev, entry->next); |
@@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry) | |||
103 | entry->prev = LIST_POISON2; | 108 | entry->prev = LIST_POISON2; |
104 | } | 109 | } |
105 | #else | 110 | #else |
111 | extern void __list_del_entry(struct list_head *entry); | ||
106 | extern void list_del(struct list_head *entry); | 112 | extern void list_del(struct list_head *entry); |
107 | #endif | 113 | #endif |
108 | 114 | ||
@@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old, | |||
135 | */ | 141 | */ |
136 | static inline void list_del_init(struct list_head *entry) | 142 | static inline void list_del_init(struct list_head *entry) |
137 | { | 143 | { |
138 | __list_del(entry->prev, entry->next); | 144 | __list_del_entry(entry); |
139 | INIT_LIST_HEAD(entry); | 145 | INIT_LIST_HEAD(entry); |
140 | } | 146 | } |
141 | 147 | ||
@@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry) | |||
146 | */ | 152 | */ |
147 | static inline void list_move(struct list_head *list, struct list_head *head) | 153 | static inline void list_move(struct list_head *list, struct list_head *head) |
148 | { | 154 | { |
149 | __list_del(list->prev, list->next); | 155 | __list_del_entry(list); |
150 | list_add(list, head); | 156 | list_add(list, head); |
151 | } | 157 | } |
152 | 158 | ||
@@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head) | |||
158 | static inline void list_move_tail(struct list_head *list, | 164 | static inline void list_move_tail(struct list_head *list, |
159 | struct list_head *head) | 165 | struct list_head *head) |
160 | { | 166 | { |
161 | __list_del(list->prev, list->next); | 167 | __list_del_entry(list); |
162 | list_add_tail(list, head); | 168 | list_add_tail(list, head); |
163 | } | 169 | } |
164 | 170 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index 9bdf27c7615b..5de42043dff0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -62,7 +62,7 @@ struct module_version_attribute { | |||
62 | struct module_attribute mattr; | 62 | struct module_attribute mattr; |
63 | const char *module_name; | 63 | const char *module_name; |
64 | const char *version; | 64 | const char *version; |
65 | }; | 65 | } __attribute__ ((__aligned__(sizeof(void *)))); |
66 | 66 | ||
67 | struct module_kobject | 67 | struct module_kobject |
68 | { | 68 | { |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 32fb81212fd1..1ca64113efe8 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/errno.h> | ||
20 | #include <linux/printk.h> | ||
19 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
20 | 22 | ||
21 | /* Each escaped entry is prefixed by ESCAPE_CODE | 23 | /* Each escaped entry is prefixed by ESCAPE_CODE |
@@ -186,10 +188,17 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val); | |||
186 | int oprofile_add_data64(struct op_entry *entry, u64 val); | 188 | int oprofile_add_data64(struct op_entry *entry, u64 val); |
187 | int oprofile_write_commit(struct op_entry *entry); | 189 | int oprofile_write_commit(struct op_entry *entry); |
188 | 190 | ||
189 | #ifdef CONFIG_PERF_EVENTS | 191 | #ifdef CONFIG_HW_PERF_EVENTS |
190 | int __init oprofile_perf_init(struct oprofile_operations *ops); | 192 | int __init oprofile_perf_init(struct oprofile_operations *ops); |
191 | void oprofile_perf_exit(void); | 193 | void oprofile_perf_exit(void); |
192 | char *op_name_from_perf_id(void); | 194 | char *op_name_from_perf_id(void); |
193 | #endif /* CONFIG_PERF_EVENTS */ | 195 | #else |
196 | static inline int __init oprofile_perf_init(struct oprofile_operations *ops) | ||
197 | { | ||
198 | pr_info("oprofile: hardware counters not available\n"); | ||
199 | return -ENODEV; | ||
200 | } | ||
201 | static inline void oprofile_perf_exit(void) { } | ||
202 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
194 | 203 | ||
195 | #endif /* OPROFILE_H */ | 204 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index dd9c7ab38270..21415cc91cbb 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -431,6 +431,8 @@ struct dev_pm_info { | |||
431 | struct list_head entry; | 431 | struct list_head entry; |
432 | struct completion completion; | 432 | struct completion completion; |
433 | struct wakeup_source *wakeup; | 433 | struct wakeup_source *wakeup; |
434 | #else | ||
435 | unsigned int should_wakeup:1; | ||
434 | #endif | 436 | #endif |
435 | #ifdef CONFIG_PM_RUNTIME | 437 | #ifdef CONFIG_PM_RUNTIME |
436 | struct timer_list suspend_timer; | 438 | struct timer_list suspend_timer; |
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 9cff00dd6b63..03a67db03d01 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev) | |||
109 | return dev->power.can_wakeup; | 109 | return dev->power.can_wakeup; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline bool device_may_wakeup(struct device *dev) | ||
113 | { | ||
114 | return false; | ||
115 | } | ||
116 | |||
117 | static inline struct wakeup_source *wakeup_source_create(const char *name) | 112 | static inline struct wakeup_source *wakeup_source_create(const char *name) |
118 | { | 113 | { |
119 | return NULL; | 114 | return NULL; |
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {} | |||
134 | 129 | ||
135 | static inline int device_wakeup_enable(struct device *dev) | 130 | static inline int device_wakeup_enable(struct device *dev) |
136 | { | 131 | { |
137 | return -EINVAL; | 132 | dev->power.should_wakeup = true; |
133 | return 0; | ||
138 | } | 134 | } |
139 | 135 | ||
140 | static inline int device_wakeup_disable(struct device *dev) | 136 | static inline int device_wakeup_disable(struct device *dev) |
141 | { | 137 | { |
138 | dev->power.should_wakeup = false; | ||
142 | return 0; | 139 | return 0; |
143 | } | 140 | } |
144 | 141 | ||
145 | static inline int device_init_wakeup(struct device *dev, bool val) | 142 | static inline int device_set_wakeup_enable(struct device *dev, bool enable) |
146 | { | 143 | { |
147 | dev->power.can_wakeup = val; | 144 | dev->power.should_wakeup = enable; |
148 | return val ? -EINVAL : 0; | 145 | return 0; |
149 | } | 146 | } |
150 | 147 | ||
148 | static inline int device_init_wakeup(struct device *dev, bool val) | ||
149 | { | ||
150 | device_set_wakeup_capable(dev, val); | ||
151 | device_set_wakeup_enable(dev, val); | ||
152 | return 0; | ||
153 | } | ||
151 | 154 | ||
152 | static inline int device_set_wakeup_enable(struct device *dev, bool enable) | 155 | static inline bool device_may_wakeup(struct device *dev) |
153 | { | 156 | { |
154 | return -EINVAL; | 157 | return dev->power.can_wakeup && dev->power.should_wakeup; |
155 | } | 158 | } |
156 | 159 | ||
157 | static inline void __pm_stay_awake(struct wakeup_source *ws) {} | 160 | static inline void __pm_stay_awake(struct wakeup_source *ws) {} |
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index d63dcbaea169..9026b30238f3 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h | |||
@@ -14,10 +14,12 @@ | |||
14 | #define LINUX_RIO_REGS_H | 14 | #define LINUX_RIO_REGS_H |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * In RapidIO, each device has a 2MB configuration space that is | 17 | * In RapidIO, each device has a 16MB configuration space that is |
18 | * accessed via maintenance transactions. Portions of configuration | 18 | * accessed via maintenance transactions. Portions of configuration |
19 | * space are standardized and/or reserved. | 19 | * space are standardized and/or reserved. |
20 | */ | 20 | */ |
21 | #define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ | ||
22 | |||
21 | #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ | 23 | #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ |
22 | #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ | 24 | #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ |
23 | #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ | 25 | #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index a0b639f8e805..89c3e5182991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -203,6 +203,18 @@ struct rtc_device | |||
203 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ | 203 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ |
204 | int pie_enabled; | 204 | int pie_enabled; |
205 | struct work_struct irqwork; | 205 | struct work_struct irqwork; |
206 | |||
207 | |||
208 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
209 | struct work_struct uie_task; | ||
210 | struct timer_list uie_timer; | ||
211 | /* Those fields are protected by rtc->irq_lock */ | ||
212 | unsigned int oldsecs; | ||
213 | unsigned int uie_irq_active:1; | ||
214 | unsigned int stop_uie_polling:1; | ||
215 | unsigned int uie_task_active:1; | ||
216 | unsigned int uie_timer_active:1; | ||
217 | #endif | ||
206 | }; | 218 | }; |
207 | #define to_rtc_device(d) container_of(d, struct rtc_device, dev) | 219 | #define to_rtc_device(d) container_of(d, struct rtc_device, dev) |
208 | 220 | ||
@@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, | |||
235 | struct rtc_task *task, int freq); | 247 | struct rtc_task *task, int freq); |
236 | extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); | 248 | extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); |
237 | extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); | 249 | extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); |
250 | extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, | ||
251 | unsigned int enabled); | ||
238 | 252 | ||
253 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); | ||
239 | void rtc_aie_update_irq(void *private); | 254 | void rtc_aie_update_irq(void *private); |
240 | void rtc_uie_update_irq(void *private); | 255 | void rtc_uie_update_irq(void *private); |
241 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); | 256 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d747f948b34e..777d8a5ed06b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | 1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ |
1749 | 1749 | ||
1750 | /* | 1750 | /* |
diff --git a/include/linux/security.h b/include/linux/security.h index c642bb8b8f5a..b2b7f9749f5e 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -1662,7 +1662,7 @@ int security_capset(struct cred *new, const struct cred *old, | |||
1662 | const kernel_cap_t *effective, | 1662 | const kernel_cap_t *effective, |
1663 | const kernel_cap_t *inheritable, | 1663 | const kernel_cap_t *inheritable, |
1664 | const kernel_cap_t *permitted); | 1664 | const kernel_cap_t *permitted); |
1665 | int security_capable(int cap); | 1665 | int security_capable(const struct cred *cred, int cap); |
1666 | int security_real_capable(struct task_struct *tsk, int cap); | 1666 | int security_real_capable(struct task_struct *tsk, int cap); |
1667 | int security_real_capable_noaudit(struct task_struct *tsk, int cap); | 1667 | int security_real_capable_noaudit(struct task_struct *tsk, int cap); |
1668 | int security_sysctl(struct ctl_table *table, int op); | 1668 | int security_sysctl(struct ctl_table *table, int op); |
@@ -1856,9 +1856,9 @@ static inline int security_capset(struct cred *new, | |||
1856 | return cap_capset(new, old, effective, inheritable, permitted); | 1856 | return cap_capset(new, old, effective, inheritable, permitted); |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | static inline int security_capable(int cap) | 1859 | static inline int security_capable(const struct cred *cred, int cap) |
1860 | { | 1860 | { |
1861 | return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT); | 1861 | return cap_capable(current, cred, cap, SECURITY_CAP_AUDIT); |
1862 | } | 1862 | } |
1863 | 1863 | ||
1864 | static inline int security_real_capable(struct task_struct *tsk, int cap) | 1864 | static inline int security_real_capable(struct task_struct *tsk, int cap) |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 8651556dbd52..d3ec89fb4122 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *); | |||
172 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, | 172 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, |
173 | const struct thermal_cooling_device_ops *); | 173 | const struct thermal_cooling_device_ops *); |
174 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); | 174 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); |
175 | |||
176 | #ifdef CONFIG_NET | ||
175 | extern int generate_netlink_event(u32 orig, enum events event); | 177 | extern int generate_netlink_event(u32 orig, enum events event); |
178 | #else | ||
179 | static inline int generate_netlink_event(u32 orig, enum events event) | ||
180 | { | ||
181 | return 0; | ||
182 | } | ||
183 | #endif | ||
176 | 184 | ||
177 | #endif /* __THERMAL_H__ */ | 185 | #endif /* __THERMAL_H__ */ |
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 5e86dc771da4..81a927930bfd 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h | |||
@@ -89,7 +89,7 @@ struct usb_cdc_acm_descriptor { | |||
89 | 89 | ||
90 | #define USB_CDC_COMM_FEATURE 0x01 | 90 | #define USB_CDC_COMM_FEATURE 0x01 |
91 | #define USB_CDC_CAP_LINE 0x02 | 91 | #define USB_CDC_CAP_LINE 0x02 |
92 | #define USB_CDC_CAP_BRK 0x04 | 92 | #define USB_CDC_CAP_BRK 0x04 |
93 | #define USB_CDC_CAP_NOTIFY 0x08 | 93 | #define USB_CDC_CAP_NOTIFY 0x08 |
94 | 94 | ||
95 | /* "Union Functional Descriptor" from CDC spec 5.2.3.8 */ | 95 | /* "Union Functional Descriptor" from CDC spec 5.2.3.8 */ |
@@ -271,6 +271,11 @@ struct usb_cdc_notification { | |||
271 | __le16 wLength; | 271 | __le16 wLength; |
272 | } __attribute__ ((packed)); | 272 | } __attribute__ ((packed)); |
273 | 273 | ||
274 | struct usb_cdc_speed_change { | ||
275 | __le32 DLBitRRate; /* contains the downlink bit rate (IN pipe) */ | ||
276 | __le32 ULBitRate; /* contains the uplink bit rate (OUT pipe) */ | ||
277 | } __attribute__ ((packed)); | ||
278 | |||
274 | /*-------------------------------------------------------------------------*/ | 279 | /*-------------------------------------------------------------------------*/ |
275 | 280 | ||
276 | /* | 281 | /* |
@@ -292,7 +297,7 @@ struct usb_cdc_ncm_ntb_parameters { | |||
292 | __le16 wNdpOutDivisor; | 297 | __le16 wNdpOutDivisor; |
293 | __le16 wNdpOutPayloadRemainder; | 298 | __le16 wNdpOutPayloadRemainder; |
294 | __le16 wNdpOutAlignment; | 299 | __le16 wNdpOutAlignment; |
295 | __le16 wPadding2; | 300 | __le16 wNtbOutMaxDatagrams; |
296 | } __attribute__ ((packed)); | 301 | } __attribute__ ((packed)); |
297 | 302 | ||
298 | /* | 303 | /* |
@@ -307,7 +312,7 @@ struct usb_cdc_ncm_nth16 { | |||
307 | __le16 wHeaderLength; | 312 | __le16 wHeaderLength; |
308 | __le16 wSequence; | 313 | __le16 wSequence; |
309 | __le16 wBlockLength; | 314 | __le16 wBlockLength; |
310 | __le16 wFpIndex; | 315 | __le16 wNdpIndex; |
311 | } __attribute__ ((packed)); | 316 | } __attribute__ ((packed)); |
312 | 317 | ||
313 | struct usb_cdc_ncm_nth32 { | 318 | struct usb_cdc_ncm_nth32 { |
@@ -315,7 +320,7 @@ struct usb_cdc_ncm_nth32 { | |||
315 | __le16 wHeaderLength; | 320 | __le16 wHeaderLength; |
316 | __le16 wSequence; | 321 | __le16 wSequence; |
317 | __le32 dwBlockLength; | 322 | __le32 dwBlockLength; |
318 | __le32 dwFpIndex; | 323 | __le32 dwNdpIndex; |
319 | } __attribute__ ((packed)); | 324 | } __attribute__ ((packed)); |
320 | 325 | ||
321 | /* | 326 | /* |
@@ -337,7 +342,7 @@ struct usb_cdc_ncm_dpe16 { | |||
337 | struct usb_cdc_ncm_ndp16 { | 342 | struct usb_cdc_ncm_ndp16 { |
338 | __le32 dwSignature; | 343 | __le32 dwSignature; |
339 | __le16 wLength; | 344 | __le16 wLength; |
340 | __le16 wNextFpIndex; | 345 | __le16 wNextNdpIndex; |
341 | struct usb_cdc_ncm_dpe16 dpe16[0]; | 346 | struct usb_cdc_ncm_dpe16 dpe16[0]; |
342 | } __attribute__ ((packed)); | 347 | } __attribute__ ((packed)); |
343 | 348 | ||
@@ -375,6 +380,7 @@ struct usb_cdc_ncm_ndp32 { | |||
375 | #define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2) | 380 | #define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2) |
376 | #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3) | 381 | #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3) |
377 | #define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4) | 382 | #define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4) |
383 | #define USB_CDC_NCM_NCAP_NTB_INPUT_SIZE (1 << 5) | ||
378 | 384 | ||
379 | /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ | 385 | /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ |
380 | #define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0) | 386 | #define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0) |
@@ -392,6 +398,13 @@ struct usb_cdc_ncm_ndp32 { | |||
392 | #define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048 | 398 | #define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048 |
393 | #define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048 | 399 | #define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048 |
394 | 400 | ||
401 | /* NTB Input Size Structure */ | ||
402 | struct usb_cdc_ncm_ndp_input_size { | ||
403 | __le32 dwNtbInMaxSize; | ||
404 | __le16 wNtbInMaxDatagrams; | ||
405 | __le16 wReserved; | ||
406 | } __attribute__ ((packed)); | ||
407 | |||
395 | /* CDC NCM subclass 6.2.11 SetCrcMode */ | 408 | /* CDC NCM subclass 6.2.11 SetCrcMode */ |
396 | #define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 | 409 | #define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 |
397 | #define USB_CDC_NCM_CRC_APPENDED 0x01 | 410 | #define USB_CDC_NCM_CRC_APPENDED 0x01 |
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index b92e17349c7b..7d1babbff071 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h | |||
@@ -16,12 +16,8 @@ | |||
16 | #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ | 16 | #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ |
17 | #define __LINUX_USB_GADGET_MSM72K_UDC_H__ | 17 | #define __LINUX_USB_GADGET_MSM72K_UDC_H__ |
18 | 18 | ||
19 | #ifdef CONFIG_ARCH_MSM7X00A | ||
20 | #define USB_SBUSCFG (MSM_USB_BASE + 0x0090) | ||
21 | #else | ||
22 | #define USB_AHBBURST (MSM_USB_BASE + 0x0090) | 19 | #define USB_AHBBURST (MSM_USB_BASE + 0x0090) |
23 | #define USB_AHBMODE (MSM_USB_BASE + 0x0098) | 20 | #define USB_AHBMODE (MSM_USB_BASE + 0x0098) |
24 | #endif | ||
25 | #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ | 21 | #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ |
26 | 22 | ||
27 | #define USB_USBCMD (MSM_USB_BASE + 0x0140) | 23 | #define USB_USBCMD (MSM_USB_BASE + 0x0140) |
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index a85064db8f94..e4d333543a33 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h | |||
@@ -7,7 +7,8 @@ | |||
7 | * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so | 7 | * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so |
8 | * anyone can use the definitions to implement compatible drivers/servers. | 8 | * anyone can use the definitions to implement compatible drivers/servers. |
9 | * | 9 | * |
10 | * Copyright (C) Red Hat, Inc., 2009, 2010 | 10 | * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 |
11 | * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011 | ||
11 | */ | 12 | */ |
12 | 13 | ||
13 | /* Feature bits */ | 14 | /* Feature bits */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1ac11586a2f5..f7998a3bf020 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
250 | enum { | 250 | enum { |
251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | 251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
253 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | 253 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ | 254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
255 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 255 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | |||
318 | /** | 318 | /** |
319 | * alloc_ordered_workqueue - allocate an ordered workqueue | 319 | * alloc_ordered_workqueue - allocate an ordered workqueue |
320 | * @name: name of the workqueue | 320 | * @name: name of the workqueue |
321 | * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) | 321 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
322 | * | 322 | * |
323 | * Allocate an ordered workqueue. An ordered workqueue executes at | 323 | * Allocate an ordered workqueue. An ordered workqueue executes at |
324 | * most one work item at any given time in the queued order. They are | 324 | * most one work item at any given time in the queued order. They are |
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) | |||
335 | 335 | ||
336 | #define create_workqueue(name) \ | 336 | #define create_workqueue(name) \ |
337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
338 | #define create_freezeable_workqueue(name) \ | 338 | #define create_freezable_workqueue(name) \ |
339 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 339 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
340 | #define create_singlethread_workqueue(name) \ | 340 | #define create_singlethread_workqueue(name) \ |
341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
342 | 342 | ||
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4a3cd2cd2f5e..96e50e0ce3ca 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -89,6 +89,18 @@ | |||
89 | #define IPV6_ADDR_SCOPE_GLOBAL 0x0e | 89 | #define IPV6_ADDR_SCOPE_GLOBAL 0x0e |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Addr flags | ||
93 | */ | ||
94 | #ifdef __KERNEL__ | ||
95 | #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ | ||
96 | ((a)->s6_addr[1] & 0x10) | ||
97 | #define IPV6_ADDR_MC_FLAG_PREFIX(a) \ | ||
98 | ((a)->s6_addr[1] & 0x20) | ||
99 | #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ | ||
100 | ((a)->s6_addr[1] & 0x40) | ||
101 | #endif | ||
102 | |||
103 | /* | ||
92 | * fragmentation header | 104 | * fragmentation header |
93 | */ | 105 | */ |
94 | 106 | ||
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index cd85b3bc8327..e505358d8999 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h | |||
@@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, | |||
201 | } | 201 | } |
202 | #endif | 202 | #endif |
203 | 203 | ||
204 | static inline void | ||
205 | nf_tproxy_put_sock(struct sock *sk) | ||
206 | { | ||
207 | /* TIME_WAIT inet sockets have to be handled differently */ | ||
208 | if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT)) | ||
209 | inet_twsk_put(inet_twsk(sk)); | ||
210 | else | ||
211 | sock_put(sk); | ||
212 | } | ||
213 | |||
214 | /* assign a socket to the skb -- consumes sk */ | 204 | /* assign a socket to the skb -- consumes sk */ |
215 | int | 205 | void |
216 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); | 206 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); |
217 | 207 | ||
218 | #endif | 208 | #endif |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 160a407c1963..04f8556313d5 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -199,7 +199,7 @@ struct tcf_proto { | |||
199 | 199 | ||
200 | struct qdisc_skb_cb { | 200 | struct qdisc_skb_cb { |
201 | unsigned int pkt_len; | 201 | unsigned int pkt_len; |
202 | char data[]; | 202 | long data[]; |
203 | }; | 203 | }; |
204 | 204 | ||
205 | static inline int qdisc_qlen(struct Qdisc *q) | 205 | static inline int qdisc_qlen(struct Qdisc *q) |
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index 8479b66c067b..3fd5064dd43a 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h | |||
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev); | |||
261 | #define CONF_ENABLE_ESR 0x0008 | 261 | #define CONF_ENABLE_ESR 0x0008 |
262 | #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ | 262 | #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ |
263 | * (CONF_ENABLE_IRQ) in use */ | 263 | * (CONF_ENABLE_IRQ) in use */ |
264 | #define CONF_ENABLE_ZVCARD 0x0020 | ||
264 | 265 | ||
265 | /* flags used by pcmcia_loop_config() autoconfiguration */ | 266 | /* flags used by pcmcia_loop_config() autoconfiguration */ |
266 | #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ | 267 | #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ |
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h index b4a0db2307ef..1eeebd534f7e 100644 --- a/include/sound/wm8903.h +++ b/include/sound/wm8903.h | |||
@@ -17,13 +17,9 @@ | |||
17 | /* | 17 | /* |
18 | * R6 (0x06) - Mic Bias Control 0 | 18 | * R6 (0x06) - Mic Bias Control 0 |
19 | */ | 19 | */ |
20 | #define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */ | 20 | #define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */ |
21 | #define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */ | 21 | #define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */ |
22 | #define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */ | 22 | #define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */ |
23 | #define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */ | ||
24 | #define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */ | ||
25 | #define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */ | ||
26 | #define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */ | ||
27 | #define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ | 23 | #define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ |
28 | #define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ | 24 | #define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ |
29 | #define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ | 25 | #define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 07fdfb6b9a9a..0828b6c8610a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <scsi/scsi_cmnd.h> | 8 | #include <scsi/scsi_cmnd.h> |
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | #include "target_core_mib.h" | ||
12 | 11 | ||
13 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" | 12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" |
14 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | 13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) |
@@ -195,6 +194,21 @@ typedef enum { | |||
195 | SAM_TASK_ATTR_EMULATED | 194 | SAM_TASK_ATTR_EMULATED |
196 | } t10_task_attr_index_t; | 195 | } t10_task_attr_index_t; |
197 | 196 | ||
197 | /* | ||
198 | * Used for target SCSI statistics | ||
199 | */ | ||
200 | typedef enum { | ||
201 | SCSI_INST_INDEX, | ||
202 | SCSI_DEVICE_INDEX, | ||
203 | SCSI_AUTH_INTR_INDEX, | ||
204 | SCSI_INDEX_TYPE_MAX | ||
205 | } scsi_index_t; | ||
206 | |||
207 | struct scsi_index_table { | ||
208 | spinlock_t lock; | ||
209 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
210 | } ____cacheline_aligned; | ||
211 | |||
198 | struct se_cmd; | 212 | struct se_cmd; |
199 | 213 | ||
200 | struct t10_alua { | 214 | struct t10_alua { |
@@ -578,8 +592,6 @@ struct se_node_acl { | |||
578 | spinlock_t stats_lock; | 592 | spinlock_t stats_lock; |
579 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 593 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
580 | atomic_t acl_pr_ref_count; | 594 | atomic_t acl_pr_ref_count; |
581 | /* Used for MIB access */ | ||
582 | atomic_t mib_ref_count; | ||
583 | struct se_dev_entry *device_list; | 595 | struct se_dev_entry *device_list; |
584 | struct se_session *nacl_sess; | 596 | struct se_session *nacl_sess; |
585 | struct se_portal_group *se_tpg; | 597 | struct se_portal_group *se_tpg; |
@@ -595,8 +607,6 @@ struct se_node_acl { | |||
595 | } ____cacheline_aligned; | 607 | } ____cacheline_aligned; |
596 | 608 | ||
597 | struct se_session { | 609 | struct se_session { |
598 | /* Used for MIB access */ | ||
599 | atomic_t mib_ref_count; | ||
600 | u64 sess_bin_isid; | 610 | u64 sess_bin_isid; |
601 | struct se_node_acl *se_node_acl; | 611 | struct se_node_acl *se_node_acl; |
602 | struct se_portal_group *se_tpg; | 612 | struct se_portal_group *se_tpg; |
@@ -806,7 +816,6 @@ struct se_hba { | |||
806 | /* Virtual iSCSI devices attached. */ | 816 | /* Virtual iSCSI devices attached. */ |
807 | u32 dev_count; | 817 | u32 dev_count; |
808 | u32 hba_index; | 818 | u32 hba_index; |
809 | atomic_t dev_mib_access_count; | ||
810 | atomic_t load_balance_queue; | 819 | atomic_t load_balance_queue; |
811 | atomic_t left_queue_depth; | 820 | atomic_t left_queue_depth; |
812 | /* Maximum queue depth the HBA can handle. */ | 821 | /* Maximum queue depth the HBA can handle. */ |
@@ -845,6 +854,12 @@ struct se_lun { | |||
845 | 854 | ||
846 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) | 855 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) |
847 | 856 | ||
857 | struct scsi_port_stats { | ||
858 | u64 cmd_pdus; | ||
859 | u64 tx_data_octets; | ||
860 | u64 rx_data_octets; | ||
861 | } ____cacheline_aligned; | ||
862 | |||
848 | struct se_port { | 863 | struct se_port { |
849 | /* RELATIVE TARGET PORT IDENTIFER */ | 864 | /* RELATIVE TARGET PORT IDENTIFER */ |
850 | u16 sep_rtpi; | 865 | u16 sep_rtpi; |
@@ -867,6 +882,7 @@ struct se_port { | |||
867 | } ____cacheline_aligned; | 882 | } ____cacheline_aligned; |
868 | 883 | ||
869 | struct se_tpg_np { | 884 | struct se_tpg_np { |
885 | struct se_portal_group *tpg_np_parent; | ||
870 | struct config_group tpg_np_group; | 886 | struct config_group tpg_np_group; |
871 | } ____cacheline_aligned; | 887 | } ____cacheline_aligned; |
872 | 888 | ||
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 66f44e56eb80..246940511579 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h | |||
@@ -111,6 +111,8 @@ struct se_subsystem_api; | |||
111 | 111 | ||
112 | extern int init_se_global(void); | 112 | extern int init_se_global(void); |
113 | extern void release_se_global(void); | 113 | extern void release_se_global(void); |
114 | extern void init_scsi_index_table(void); | ||
115 | extern u32 scsi_get_new_index(scsi_index_t); | ||
114 | extern void transport_init_queue_obj(struct se_queue_obj *); | 116 | extern void transport_init_queue_obj(struct se_queue_obj *); |
115 | extern int transport_subsystem_check_init(void); | 117 | extern int transport_subsystem_check_init(void); |
116 | extern int transport_subsystem_register(struct se_subsystem_api *); | 118 | extern int transport_subsystem_register(struct se_subsystem_api *); |
diff --git a/init/calibrate.c b/init/calibrate.c index 6eb48e53d61c..24fe022c55f9 100644 --- a/init/calibrate.c +++ b/init/calibrate.c | |||
@@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void) | |||
66 | pre_start = 0; | 66 | pre_start = 0; |
67 | read_current_timer(&start); | 67 | read_current_timer(&start); |
68 | start_jiffies = jiffies; | 68 | start_jiffies = jiffies; |
69 | while (jiffies <= (start_jiffies + 1)) { | 69 | while (time_before_eq(jiffies, start_jiffies + 1)) { |
70 | pre_start = start; | 70 | pre_start = start; |
71 | read_current_timer(&start); | 71 | read_current_timer(&start); |
72 | } | 72 | } |
@@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void) | |||
74 | 74 | ||
75 | pre_end = 0; | 75 | pre_end = 0; |
76 | end = post_start; | 76 | end = post_start; |
77 | while (jiffies <= | 77 | while (time_before_eq(jiffies, start_jiffies + 1 + |
78 | (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) { | 78 | DELAY_CALIBRATION_TICKS)) { |
79 | pre_end = end; | 79 | pre_end = end; |
80 | read_current_timer(&end); | 80 | read_current_timer(&end); |
81 | } | 81 | } |
diff --git a/kernel/capability.c b/kernel/capability.c index 2f05303715a5..9e9385f132c8 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -306,7 +306,7 @@ int capable(int cap) | |||
306 | BUG(); | 306 | BUG(); |
307 | } | 307 | } |
308 | 308 | ||
309 | if (security_capable(cap) == 0) { | 309 | if (security_capable(current_cred(), cap) == 0) { |
310 | current->flags |= PF_SUPERPRIV; | 310 | current->flags |= PF_SUPERPRIV; |
311 | return 1; | 311 | return 1; |
312 | } | 312 | } |
diff --git a/kernel/cred.c b/kernel/cred.c index 6a1aa004e376..3a9d6dd53a6c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void) | |||
252 | #endif | 252 | #endif |
253 | 253 | ||
254 | atomic_set(&new->usage, 1); | 254 | atomic_set(&new->usage, 1); |
255 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
256 | new->magic = CRED_MAGIC; | ||
257 | #endif | ||
255 | 258 | ||
256 | if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) | 259 | if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) |
257 | goto error; | 260 | goto error; |
258 | 261 | ||
259 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
260 | new->magic = CRED_MAGIC; | ||
261 | #endif | ||
262 | return new; | 262 | return new; |
263 | 263 | ||
264 | error: | 264 | error: |
@@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
657 | validate_creds(old); | 657 | validate_creds(old); |
658 | 658 | ||
659 | *new = *old; | 659 | *new = *old; |
660 | atomic_set(&new->usage, 1); | ||
661 | set_cred_subscribers(new, 0); | ||
660 | get_uid(new->user); | 662 | get_uid(new->user); |
661 | get_group_info(new->group_info); | 663 | get_group_info(new->group_info); |
662 | 664 | ||
@@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
674 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) | 676 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) |
675 | goto error; | 677 | goto error; |
676 | 678 | ||
677 | atomic_set(&new->usage, 1); | ||
678 | set_cred_subscribers(new, 0); | ||
679 | put_cred(old); | 679 | put_cred(old); |
680 | validate_creds(new); | 680 | validate_creds(new); |
681 | return new; | 681 | return new; |
@@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred) | |||
748 | if (cred->magic != CRED_MAGIC) | 748 | if (cred->magic != CRED_MAGIC) |
749 | return true; | 749 | return true; |
750 | #ifdef CONFIG_SECURITY_SELINUX | 750 | #ifdef CONFIG_SECURITY_SELINUX |
751 | if (selinux_is_enabled()) { | 751 | /* |
752 | * cred->security == NULL if security_cred_alloc_blank() or | ||
753 | * security_prepare_creds() returned an error. | ||
754 | */ | ||
755 | if (selinux_is_enabled() && cred->security) { | ||
752 | if ((unsigned long) cred->security < PAGE_SIZE) | 756 | if ((unsigned long) cred->security < PAGE_SIZE) |
753 | return true; | 757 | return true; |
754 | if ((*(u32 *)cred->security & 0xffffff00) == | 758 | if ((*(u32 *)cred->security & 0xffffff00) == |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4571ae7e085a..99c3bc8a6fb4 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -3,6 +3,12 @@ | |||
3 | */ | 3 | */ |
4 | #include <linux/irqdesc.h> | 4 | #include <linux/irqdesc.h> |
5 | 5 | ||
6 | #ifdef CONFIG_SPARSE_IRQ | ||
7 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | ||
8 | #else | ||
9 | # define IRQ_BITMAP_BITS NR_IRQS | ||
10 | #endif | ||
11 | |||
6 | extern int noirqdebug; | 12 | extern int noirqdebug; |
7 | 13 | ||
8 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 14 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 282f20230e67..2039bea31bdf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS; | |||
94 | EXPORT_SYMBOL_GPL(nr_irqs); | 94 | EXPORT_SYMBOL_GPL(nr_irqs); |
95 | 95 | ||
96 | static DEFINE_MUTEX(sparse_irq_lock); | 96 | static DEFINE_MUTEX(sparse_irq_lock); |
97 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | 97 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
98 | 98 | ||
99 | #ifdef CONFIG_SPARSE_IRQ | 99 | #ifdef CONFIG_SPARSE_IRQ |
100 | 100 | ||
@@ -217,6 +217,15 @@ int __init early_irq_init(void) | |||
217 | initcnt = arch_probe_nr_irqs(); | 217 | initcnt = arch_probe_nr_irqs(); |
218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
219 | 219 | ||
220 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | ||
221 | nr_irqs = IRQ_BITMAP_BITS; | ||
222 | |||
223 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | ||
224 | initcnt = IRQ_BITMAP_BITS; | ||
225 | |||
226 | if (initcnt > nr_irqs) | ||
227 | nr_irqs = initcnt; | ||
228 | |||
220 | for (i = 0; i < initcnt; i++) { | 229 | for (i = 0; i < initcnt; i++) { |
221 | desc = alloc_desc(i, node); | 230 | desc = alloc_desc(i, node); |
222 | set_bit(i, allocated_irqs); | 231 | set_bit(i, allocated_irqs); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..9033c1c70828 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1100,7 +1100,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1100 | if (retval) | 1100 | if (retval) |
1101 | kfree(action); | 1101 | kfree(action); |
1102 | 1102 | ||
1103 | #ifdef CONFIG_DEBUG_SHIRQ | 1103 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1104 | if (!retval && (irqflags & IRQF_SHARED)) { | 1104 | if (!retval && (irqflags & IRQF_SHARED)) { |
1105 | /* | 1105 | /* |
1106 | * It's a shared IRQ -- the driver ought to be prepared for it | 1106 | * It's a shared IRQ -- the driver ought to be prepared for it |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 891115a929aa..dc49358b73fa 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 23 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
24 | 24 | ||
25 | /* Bitmap to handle software resend of interrupts: */ | 25 | /* Bitmap to handle software resend of interrupts: */ |
26 | static DECLARE_BITMAP(irqs_resend, NR_IRQS); | 26 | static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Run software resends of IRQ's | 29 | * Run software resends of IRQ's |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 999835b6112b..656222fcf767 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -782,6 +782,10 @@ retry: | |||
782 | raw_spin_unlock_irq(&ctx->lock); | 782 | raw_spin_unlock_irq(&ctx->lock); |
783 | } | 783 | } |
784 | 784 | ||
785 | #define MAX_INTERRUPTS (~0ULL) | ||
786 | |||
787 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
788 | |||
785 | static int | 789 | static int |
786 | event_sched_in(struct perf_event *event, | 790 | event_sched_in(struct perf_event *event, |
787 | struct perf_cpu_context *cpuctx, | 791 | struct perf_cpu_context *cpuctx, |
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, | |||
794 | 798 | ||
795 | event->state = PERF_EVENT_STATE_ACTIVE; | 799 | event->state = PERF_EVENT_STATE_ACTIVE; |
796 | event->oncpu = smp_processor_id(); | 800 | event->oncpu = smp_processor_id(); |
801 | |||
802 | /* | ||
803 | * Unthrottle events, since we scheduled we might have missed several | ||
804 | * ticks already, also for a heavily scheduling task there is little | ||
805 | * guarantee it'll get a tick in a timely manner. | ||
806 | */ | ||
807 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
808 | perf_log_throttle(event, 1); | ||
809 | event->hw.interrupts = 0; | ||
810 | } | ||
811 | |||
797 | /* | 812 | /* |
798 | * The new state must be visible before we turn it on in the hardware: | 813 | * The new state must be visible before we turn it on in the hardware: |
799 | */ | 814 | */ |
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
1596 | } | 1611 | } |
1597 | } | 1612 | } |
1598 | 1613 | ||
1599 | #define MAX_INTERRUPTS (~0ULL) | ||
1600 | |||
1601 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1602 | |||
1603 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1614 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
1604 | { | 1615 | { |
1605 | u64 frequency = event->attr.sample_freq; | 1616 | u64 frequency = event->attr.sample_freq; |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 7b5db6a8561e..701853042c28 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); | |||
326 | 326 | ||
327 | static int __init pm_start_workqueue(void) | 327 | static int __init pm_start_workqueue(void) |
328 | { | 328 | { |
329 | pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); | 329 | pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); |
330 | 330 | ||
331 | return pm_wq ? 0 : -ENOMEM; | 331 | return pm_wq ? 0 : -ENOMEM; |
332 | } | 332 | } |
diff --git a/kernel/power/process.c b/kernel/power/process.c index d6d2a10320e0..0cf3a27a6c9d 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | #define TIMEOUT (20 * HZ) | 23 | #define TIMEOUT (20 * HZ) |
24 | 24 | ||
25 | static inline int freezeable(struct task_struct * p) | 25 | static inline int freezable(struct task_struct * p) |
26 | { | 26 | { |
27 | if ((p == current) || | 27 | if ((p == current) || |
28 | (p->flags & PF_NOFREEZE) || | 28 | (p->flags & PF_NOFREEZE) || |
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
53 | todo = 0; | 53 | todo = 0; |
54 | read_lock(&tasklist_lock); | 54 | read_lock(&tasklist_lock); |
55 | do_each_thread(g, p) { | 55 | do_each_thread(g, p) { |
56 | if (frozen(p) || !freezeable(p)) | 56 | if (frozen(p) || !freezable(p)) |
57 | continue; | 57 | continue; |
58 | 58 | ||
59 | if (!freeze_task(p, sig_only)) | 59 | if (!freeze_task(p, sig_only)) |
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) | |||
167 | 167 | ||
168 | read_lock(&tasklist_lock); | 168 | read_lock(&tasklist_lock); |
169 | do_each_thread(g, p) { | 169 | do_each_thread(g, p) { |
170 | if (!freezeable(p)) | 170 | if (!freezable(p)) |
171 | continue; | 171 | continue; |
172 | 172 | ||
173 | if (nosig_only && should_send_signal(p)) | 173 | if (nosig_only && should_send_signal(p)) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0dac75ea4456..64db648ff911 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1519,11 +1519,8 @@ static int | |||
1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | 1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, |
1520 | unsigned int nr_pages, unsigned int nr_highmem) | 1520 | unsigned int nr_pages, unsigned int nr_highmem) |
1521 | { | 1521 | { |
1522 | int error = 0; | ||
1523 | |||
1524 | if (nr_highmem > 0) { | 1522 | if (nr_highmem > 0) { |
1525 | error = get_highmem_buffer(PG_ANY); | 1523 | if (get_highmem_buffer(PG_ANY)) |
1526 | if (error) | ||
1527 | goto err_out; | 1524 | goto err_out; |
1528 | if (nr_highmem > alloc_highmem) { | 1525 | if (nr_highmem > alloc_highmem) { |
1529 | nr_highmem -= alloc_highmem; | 1526 | nr_highmem -= alloc_highmem; |
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | |||
1546 | 1543 | ||
1547 | err_out: | 1544 | err_out: |
1548 | swsusp_free(); | 1545 | swsusp_free(); |
1549 | return error; | 1546 | return -ENOMEM; |
1550 | } | 1547 | } |
1551 | 1548 | ||
1552 | asmlinkage int swsusp_save(void) | 1549 | asmlinkage int swsusp_save(void) |
diff --git a/kernel/printk.c b/kernel/printk.c index 2ddbdc73aade..36231525e22f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -262,25 +262,47 @@ int dmesg_restrict = 1; | |||
262 | int dmesg_restrict; | 262 | int dmesg_restrict; |
263 | #endif | 263 | #endif |
264 | 264 | ||
265 | static int syslog_action_restricted(int type) | ||
266 | { | ||
267 | if (dmesg_restrict) | ||
268 | return 1; | ||
269 | /* Unless restricted, we allow "read all" and "get buffer size" for everybody */ | ||
270 | return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; | ||
271 | } | ||
272 | |||
273 | static int check_syslog_permissions(int type, bool from_file) | ||
274 | { | ||
275 | /* | ||
276 | * If this is from /proc/kmsg and we've already opened it, then we've | ||
277 | * already done the capabilities checks at open time. | ||
278 | */ | ||
279 | if (from_file && type != SYSLOG_ACTION_OPEN) | ||
280 | return 0; | ||
281 | |||
282 | if (syslog_action_restricted(type)) { | ||
283 | if (capable(CAP_SYSLOG)) | ||
284 | return 0; | ||
285 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ | ||
286 | if (capable(CAP_SYS_ADMIN)) { | ||
287 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
288 | "but no CAP_SYSLOG (deprecated).\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | return -EPERM; | ||
292 | } | ||
293 | return 0; | ||
294 | } | ||
295 | |||
265 | int do_syslog(int type, char __user *buf, int len, bool from_file) | 296 | int do_syslog(int type, char __user *buf, int len, bool from_file) |
266 | { | 297 | { |
267 | unsigned i, j, limit, count; | 298 | unsigned i, j, limit, count; |
268 | int do_clear = 0; | 299 | int do_clear = 0; |
269 | char c; | 300 | char c; |
270 | int error = 0; | 301 | int error; |
271 | 302 | ||
272 | /* | 303 | error = check_syslog_permissions(type, from_file); |
273 | * If this is from /proc/kmsg we only do the capabilities checks | 304 | if (error) |
274 | * at open time. | 305 | goto out; |
275 | */ | ||
276 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | ||
277 | if (dmesg_restrict && !capable(CAP_SYSLOG)) | ||
278 | goto warn; /* switch to return -EPERM after 2.6.39 */ | ||
279 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
280 | type != SYSLOG_ACTION_SIZE_BUFFER) && | ||
281 | !capable(CAP_SYSLOG)) | ||
282 | goto warn; /* switch to return -EPERM after 2.6.39 */ | ||
283 | } | ||
284 | 306 | ||
285 | error = security_syslog(type); | 307 | error = security_syslog(type); |
286 | if (error) | 308 | if (error) |
@@ -423,12 +445,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
423 | } | 445 | } |
424 | out: | 446 | out: |
425 | return error; | 447 | return error; |
426 | warn: | ||
427 | /* remove after 2.6.39 */ | ||
428 | if (capable(CAP_SYS_ADMIN)) | ||
429 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
430 | "but no CAP_SYSLOG (deprecated and denied).\n"); | ||
431 | return -EPERM; | ||
432 | } | 448 | } |
433 | 449 | ||
434 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) | 450 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 99bbaa3e5b0d..1708b1e2972d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
313 | child->exit_code = data; | 313 | child->exit_code = data; |
314 | dead = __ptrace_detach(current, child); | 314 | dead = __ptrace_detach(current, child); |
315 | if (!child->exit_state) | 315 | if (!child->exit_state) |
316 | wake_up_process(child); | 316 | wake_up_state(child, TASK_TRACED | TASK_STOPPED); |
317 | } | 317 | } |
318 | write_unlock_irq(&tasklist_lock); | 318 | write_unlock_irq(&tasklist_lock); |
319 | 319 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 48b2761b5668..a3b5aff62606 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void) | |||
600 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | 600 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; |
601 | } | 601 | } |
602 | 602 | ||
603 | /* | ||
604 | * Check whether the broadcast device supports oneshot. | ||
605 | */ | ||
606 | bool tick_broadcast_oneshot_available(void) | ||
607 | { | ||
608 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
609 | |||
610 | return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; | ||
611 | } | ||
612 | |||
603 | #endif | 613 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 051bc80a0c43..ed228ef6f6b8 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void) | |||
51 | { | 51 | { |
52 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | 52 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
53 | 53 | ||
54 | return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); | 54 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
55 | return 0; | ||
56 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | ||
57 | return 1; | ||
58 | return tick_broadcast_oneshot_available(); | ||
55 | } | 59 | } |
56 | 60 | ||
57 | /* | 61 | /* |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 290eefbc1f60..f65d3a723a64 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | |||
36 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 36 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
37 | extern int tick_broadcast_oneshot_active(void); | 37 | extern int tick_broadcast_oneshot_active(void); |
38 | extern void tick_check_oneshot_broadcast(int cpu); | 38 | extern void tick_check_oneshot_broadcast(int cpu); |
39 | bool tick_broadcast_oneshot_available(void); | ||
39 | # else /* BROADCAST */ | 40 | # else /* BROADCAST */ |
40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 41 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
41 | { | 42 | { |
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { } | |||
46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 47 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 48 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
48 | static inline void tick_check_oneshot_broadcast(int cpu) { } | 49 | static inline void tick_check_oneshot_broadcast(int cpu) { } |
50 | static inline bool tick_broadcast_oneshot_available(void) { return true; } | ||
49 | # endif /* !BROADCAST */ | 51 | # endif /* !BROADCAST */ |
50 | 52 | ||
51 | #else /* !ONESHOT */ | 53 | #else /* !ONESHOT */ |
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
76 | return 0; | 78 | return 0; |
77 | } | 79 | } |
78 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 80 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
81 | static inline bool tick_broadcast_oneshot_available(void) { return false; } | ||
79 | #endif /* !TICK_ONESHOT */ | 82 | #endif /* !TICK_ONESHOT */ |
80 | 83 | ||
81 | /* | 84 | /* |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 32a19f9397fc..3258455549f4 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym) | |||
41 | char symname[KSYM_NAME_LEN]; | 41 | char symname[KSYM_NAME_LEN]; |
42 | 42 | ||
43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) | 43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) |
44 | SEQ_printf(m, "<%p>", sym); | 44 | SEQ_printf(m, "<%pK>", sym); |
45 | else | 45 | else |
46 | SEQ_printf(m, "%s", symname); | 46 | SEQ_printf(m, "%s", symname); |
47 | } | 47 | } |
@@ -112,7 +112,7 @@ next_one: | |||
112 | static void | 112 | static void |
113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) | 113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) |
114 | { | 114 | { |
115 | SEQ_printf(m, " .base: %p\n", base); | 115 | SEQ_printf(m, " .base: %pK\n", base); |
116 | SEQ_printf(m, " .index: %d\n", | 116 | SEQ_printf(m, " .index: %d\n", |
117 | base->index); | 117 | base->index); |
118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", | 118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", |
diff --git a/kernel/timer.c b/kernel/timer.c index d53ce66daea0..d6459923d245 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
959 | * | 959 | * |
960 | * Synchronization rules: Callers must prevent restarting of the timer, | 960 | * Synchronization rules: Callers must prevent restarting of the timer, |
961 | * otherwise this function is meaningless. It must not be called from | 961 | * otherwise this function is meaningless. It must not be called from |
962 | * hardirq contexts. The caller must not hold locks which would prevent | 962 | * interrupt contexts. The caller must not hold locks which would prevent |
963 | * completion of the timer's handler. The timer's handler must not call | 963 | * completion of the timer's handler. The timer's handler must not call |
964 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 964 | * add_timer_on(). Upon exit the timer is not queued and the handler is |
965 | * not running on any CPU. | 965 | * not running on any CPU. |
@@ -971,12 +971,10 @@ int del_timer_sync(struct timer_list *timer) | |||
971 | #ifdef CONFIG_LOCKDEP | 971 | #ifdef CONFIG_LOCKDEP |
972 | unsigned long flags; | 972 | unsigned long flags; |
973 | 973 | ||
974 | raw_local_irq_save(flags); | 974 | local_irq_save(flags); |
975 | local_bh_disable(); | ||
976 | lock_map_acquire(&timer->lockdep_map); | 975 | lock_map_acquire(&timer->lockdep_map); |
977 | lock_map_release(&timer->lockdep_map); | 976 | lock_map_release(&timer->lockdep_map); |
978 | _local_bh_enable(); | 977 | local_irq_restore(flags); |
979 | raw_local_irq_restore(flags); | ||
980 | #endif | 978 | #endif |
981 | /* | 979 | /* |
982 | * don't use it in hardirq context, because it | 980 | * don't use it in hardirq context, because it |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 153562d0b93c..d95721f33702 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) | |||
138 | !blk_tracer_enabled)) | 138 | !blk_tracer_enabled)) |
139 | return; | 139 | return; |
140 | 140 | ||
141 | /* | ||
142 | * If the BLK_TC_NOTIFY action mask isn't set, don't send any note | ||
143 | * message to the trace. | ||
144 | */ | ||
145 | if (!(bt->act_mask & BLK_TC_NOTIFY)) | ||
146 | return; | ||
147 | |||
141 | local_irq_save(flags); | 148 | local_irq_save(flags); |
142 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); | 149 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); |
143 | va_start(args, fmt); | 150 | va_start(args, fmt); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f37f974aa81b..18bb15776c57 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -363,8 +363,14 @@ static int watchdog_nmi_enable(int cpu) | |||
363 | goto out_save; | 363 | goto out_save; |
364 | } | 364 | } |
365 | 365 | ||
366 | printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", | 366 | |
367 | cpu, PTR_ERR(event)); | 367 | /* vary the KERN level based on the returned errno */ |
368 | if (PTR_ERR(event) == -EOPNOTSUPP) | ||
369 | printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
370 | else if (PTR_ERR(event) == -ENOENT) | ||
371 | printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); | ||
372 | else | ||
373 | printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); | ||
368 | return PTR_ERR(event); | 374 | return PTR_ERR(event); |
369 | 375 | ||
370 | /* success path */ | 376 | /* success path */ |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..ee6578b578ad 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -79,7 +79,9 @@ enum { | |||
79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ | 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ | 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
81 | 81 | ||
82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ | 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
83 | /* call for help after 10ms | ||
84 | (min two ticks) */ | ||
83 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ | 85 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
84 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ | 86 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
85 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ | 87 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
@@ -2047,6 +2049,15 @@ repeat: | |||
2047 | move_linked_works(work, scheduled, &n); | 2049 | move_linked_works(work, scheduled, &n); |
2048 | 2050 | ||
2049 | process_scheduled_works(rescuer); | 2051 | process_scheduled_works(rescuer); |
2052 | |||
2053 | /* | ||
2054 | * Leave this gcwq. If keep_working() is %true, notify a | ||
2055 | * regular worker; otherwise, we end up with 0 concurrency | ||
2056 | * and stalling the execution. | ||
2057 | */ | ||
2058 | if (keep_working(gcwq)) | ||
2059 | wake_up_worker(gcwq); | ||
2060 | |||
2050 | spin_unlock_irq(&gcwq->lock); | 2061 | spin_unlock_irq(&gcwq->lock); |
2051 | } | 2062 | } |
2052 | 2063 | ||
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2956 | */ | 2967 | */ |
2957 | spin_lock(&workqueue_lock); | 2968 | spin_lock(&workqueue_lock); |
2958 | 2969 | ||
2959 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | 2970 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
2960 | for_each_cwq_cpu(cpu, wq) | 2971 | for_each_cwq_cpu(cpu, wq) |
2961 | get_cwq(cpu, wq)->max_active = 0; | 2972 | get_cwq(cpu, wq)->max_active = 0; |
2962 | 2973 | ||
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3068 | 3079 | ||
3069 | spin_lock_irq(&gcwq->lock); | 3080 | spin_lock_irq(&gcwq->lock); |
3070 | 3081 | ||
3071 | if (!(wq->flags & WQ_FREEZEABLE) || | 3082 | if (!(wq->flags & WQ_FREEZABLE) || |
3072 | !(gcwq->flags & GCWQ_FREEZING)) | 3083 | !(gcwq->flags & GCWQ_FREEZING)) |
3073 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 3084 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
3074 | 3085 | ||
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3318 | * want to get it over with ASAP - spam rescuers, wake up as | 3329 | * want to get it over with ASAP - spam rescuers, wake up as |
3319 | * many idlers as necessary and create new ones till the | 3330 | * many idlers as necessary and create new ones till the |
3320 | * worklist is empty. Note that if the gcwq is frozen, there | 3331 | * worklist is empty. Note that if the gcwq is frozen, there |
3321 | * may be frozen works in freezeable cwqs. Don't declare | 3332 | * may be frozen works in freezable cwqs. Don't declare |
3322 | * completion while frozen. | 3333 | * completion while frozen. |
3323 | */ | 3334 | */ |
3324 | while (gcwq->nr_workers != gcwq->nr_idle || | 3335 | while (gcwq->nr_workers != gcwq->nr_idle || |
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
3576 | /** | 3587 | /** |
3577 | * freeze_workqueues_begin - begin freezing workqueues | 3588 | * freeze_workqueues_begin - begin freezing workqueues |
3578 | * | 3589 | * |
3579 | * Start freezing workqueues. After this function returns, all | 3590 | * Start freezing workqueues. After this function returns, all freezable |
3580 | * freezeable workqueues will queue new works to their frozen_works | 3591 | * workqueues will queue new works to their frozen_works list instead of |
3581 | * list instead of gcwq->worklist. | 3592 | * gcwq->worklist. |
3582 | * | 3593 | * |
3583 | * CONTEXT: | 3594 | * CONTEXT: |
3584 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3595 | * Grabs and releases workqueue_lock and gcwq->lock's. |
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) | |||
3604 | list_for_each_entry(wq, &workqueues, list) { | 3615 | list_for_each_entry(wq, &workqueues, list) { |
3605 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3616 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3606 | 3617 | ||
3607 | if (cwq && wq->flags & WQ_FREEZEABLE) | 3618 | if (cwq && wq->flags & WQ_FREEZABLE) |
3608 | cwq->max_active = 0; | 3619 | cwq->max_active = 0; |
3609 | } | 3620 | } |
3610 | 3621 | ||
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) | |||
3615 | } | 3626 | } |
3616 | 3627 | ||
3617 | /** | 3628 | /** |
3618 | * freeze_workqueues_busy - are freezeable workqueues still busy? | 3629 | * freeze_workqueues_busy - are freezable workqueues still busy? |
3619 | * | 3630 | * |
3620 | * Check whether freezing is complete. This function must be called | 3631 | * Check whether freezing is complete. This function must be called |
3621 | * between freeze_workqueues_begin() and thaw_workqueues(). | 3632 | * between freeze_workqueues_begin() and thaw_workqueues(). |
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) | |||
3624 | * Grabs and releases workqueue_lock. | 3635 | * Grabs and releases workqueue_lock. |
3625 | * | 3636 | * |
3626 | * RETURNS: | 3637 | * RETURNS: |
3627 | * %true if some freezeable workqueues are still busy. %false if | 3638 | * %true if some freezable workqueues are still busy. %false if freezing |
3628 | * freezing is complete. | 3639 | * is complete. |
3629 | */ | 3640 | */ |
3630 | bool freeze_workqueues_busy(void) | 3641 | bool freeze_workqueues_busy(void) |
3631 | { | 3642 | { |
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) | |||
3645 | list_for_each_entry(wq, &workqueues, list) { | 3656 | list_for_each_entry(wq, &workqueues, list) { |
3646 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3657 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3647 | 3658 | ||
3648 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3659 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3649 | continue; | 3660 | continue; |
3650 | 3661 | ||
3651 | BUG_ON(cwq->nr_active < 0); | 3662 | BUG_ON(cwq->nr_active < 0); |
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void) | |||
3690 | list_for_each_entry(wq, &workqueues, list) { | 3701 | list_for_each_entry(wq, &workqueues, list) { |
3691 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3702 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3692 | 3703 | ||
3693 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3704 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3694 | continue; | 3705 | continue; |
3695 | 3706 | ||
3696 | /* restore max_active and repopulate worklist */ | 3707 | /* restore max_active and repopulate worklist */ |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3967c2356e37..2b97418c67e2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS | |||
805 | config FRAME_POINTER | 805 | config FRAME_POINTER |
806 | bool "Compile the kernel with frame pointers" | 806 | bool "Compile the kernel with frame pointers" |
807 | depends on DEBUG_KERNEL && \ | 807 | depends on DEBUG_KERNEL && \ |
808 | (CRIS || M68K || M68KNOMMU || FRV || UML || \ | 808 | (CRIS || M68K || FRV || UML || \ |
809 | AVR32 || SUPERH || BLACKFIN || MN10300) || \ | 809 | AVR32 || SUPERH || BLACKFIN || MN10300) || \ |
810 | ARCH_WANT_FRAME_POINTERS | 810 | ARCH_WANT_FRAME_POINTERS |
811 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS | 811 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS |
diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16ca..b8029a5583ff 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new, | |||
35 | } | 35 | } |
36 | EXPORT_SYMBOL(__list_add); | 36 | EXPORT_SYMBOL(__list_add); |
37 | 37 | ||
38 | void __list_del_entry(struct list_head *entry) | ||
39 | { | ||
40 | struct list_head *prev, *next; | ||
41 | |||
42 | prev = entry->prev; | ||
43 | next = entry->next; | ||
44 | |||
45 | if (WARN(next == LIST_POISON1, | ||
46 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", | ||
47 | entry, LIST_POISON1) || | ||
48 | WARN(prev == LIST_POISON2, | ||
49 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", | ||
50 | entry, LIST_POISON2) || | ||
51 | WARN(prev->next != entry, | ||
52 | "list_del corruption. prev->next should be %p, " | ||
53 | "but was %p\n", entry, prev->next) || | ||
54 | WARN(next->prev != entry, | ||
55 | "list_del corruption. next->prev should be %p, " | ||
56 | "but was %p\n", entry, next->prev)) | ||
57 | return; | ||
58 | |||
59 | __list_del(prev, next); | ||
60 | } | ||
61 | EXPORT_SYMBOL(__list_del_entry); | ||
62 | |||
38 | /** | 63 | /** |
39 | * list_del - deletes entry from list. | 64 | * list_del - deletes entry from list. |
40 | * @entry: the element to delete from the list. | 65 | * @entry: the element to delete from the list. |
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); | |||
43 | */ | 68 | */ |
44 | void list_del(struct list_head *entry) | 69 | void list_del(struct list_head *entry) |
45 | { | 70 | { |
46 | WARN(entry->next == LIST_POISON1, | 71 | __list_del_entry(entry); |
47 | "list_del corruption, next is LIST_POISON1 (%p)\n", | ||
48 | LIST_POISON1); | ||
49 | WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, | ||
50 | "list_del corruption, prev is LIST_POISON2 (%p)\n", | ||
51 | LIST_POISON2); | ||
52 | WARN(entry->prev->next != entry, | ||
53 | "list_del corruption. prev->next should be %p, " | ||
54 | "but was %p\n", entry, entry->prev->next); | ||
55 | WARN(entry->next->prev != entry, | ||
56 | "list_del corruption. next->prev should be %p, " | ||
57 | "but was %p\n", entry, entry->next->prev); | ||
58 | __list_del(entry->prev, entry->next); | ||
59 | entry->next = LIST_POISON1; | 72 | entry->next = LIST_POISON1; |
60 | entry->prev = LIST_POISON2; | 73 | entry->prev = LIST_POISON2; |
61 | } | 74 | } |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c47bbe11b804..93ca08b8a451 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
686 | /* | 686 | /* |
687 | * Ensure that the address returned is DMA'ble | 687 | * Ensure that the address returned is DMA'ble |
688 | */ | 688 | */ |
689 | if (!dma_capable(dev, dev_addr, size)) | 689 | if (!dma_capable(dev, dev_addr, size)) { |
690 | panic("map_single: bounce buffer is not DMA'ble"); | 690 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
691 | dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); | ||
692 | } | ||
691 | 693 | ||
692 | return dev_addr; | 694 | return dev_addr; |
693 | } | 695 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b6c1ce3c53b5..3e29781ee762 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1811 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ | 1811 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ |
1812 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) | 1812 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) |
1813 | goto out; | 1813 | goto out; |
1814 | if (is_vma_temporary_stack(vma)) | ||
1815 | goto out; | ||
1814 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); | 1816 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); |
1815 | 1817 | ||
1816 | pgd = pgd_offset(mm, address); | 1818 | pgd = pgd_offset(mm, address); |
@@ -1852,7 +1854,6 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1852 | set_pmd_at(mm, address, pmd, _pmd); | 1854 | set_pmd_at(mm, address, pmd, _pmd); |
1853 | spin_unlock(&mm->page_table_lock); | 1855 | spin_unlock(&mm->page_table_lock); |
1854 | anon_vma_unlock(vma->anon_vma); | 1856 | anon_vma_unlock(vma->anon_vma); |
1855 | mem_cgroup_uncharge_page(new_page); | ||
1856 | goto out; | 1857 | goto out; |
1857 | } | 1858 | } |
1858 | 1859 | ||
@@ -1898,6 +1899,7 @@ out_up_write: | |||
1898 | return; | 1899 | return; |
1899 | 1900 | ||
1900 | out: | 1901 | out: |
1902 | mem_cgroup_uncharge_page(new_page); | ||
1901 | #ifdef CONFIG_NUMA | 1903 | #ifdef CONFIG_NUMA |
1902 | put_page(new_page); | 1904 | put_page(new_page); |
1903 | #endif | 1905 | #endif |
@@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |||
2032 | if ((!(vma->vm_flags & VM_HUGEPAGE) && | 2034 | if ((!(vma->vm_flags & VM_HUGEPAGE) && |
2033 | !khugepaged_always()) || | 2035 | !khugepaged_always()) || |
2034 | (vma->vm_flags & VM_NOHUGEPAGE)) { | 2036 | (vma->vm_flags & VM_NOHUGEPAGE)) { |
2037 | skip: | ||
2035 | progress++; | 2038 | progress++; |
2036 | continue; | 2039 | continue; |
2037 | } | 2040 | } |
2038 | |||
2039 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ | 2041 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ |
2040 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { | 2042 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) |
2041 | khugepaged_scan.address = vma->vm_end; | 2043 | goto skip; |
2042 | progress++; | 2044 | if (is_vma_temporary_stack(vma)) |
2043 | continue; | 2045 | goto skip; |
2044 | } | 2046 | |
2045 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); | 2047 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); |
2046 | 2048 | ||
2047 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | 2049 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
2048 | hend = vma->vm_end & HPAGE_PMD_MASK; | 2050 | hend = vma->vm_end & HPAGE_PMD_MASK; |
2049 | if (hstart >= hend) { | 2051 | if (hstart >= hend) |
2050 | progress++; | 2052 | goto skip; |
2051 | continue; | 2053 | if (khugepaged_scan.address > hend) |
2052 | } | 2054 | goto skip; |
2053 | if (khugepaged_scan.address < hstart) | 2055 | if (khugepaged_scan.address < hstart) |
2054 | khugepaged_scan.address = hstart; | 2056 | khugepaged_scan.address = hstart; |
2055 | if (khugepaged_scan.address > hend) { | 2057 | VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); |
2056 | khugepaged_scan.address = hend + HPAGE_PMD_SIZE; | ||
2057 | progress++; | ||
2058 | continue; | ||
2059 | } | ||
2060 | BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); | ||
2061 | 2058 | ||
2062 | while (khugepaged_scan.address < hend) { | 2059 | while (khugepaged_scan.address < hend) { |
2063 | int ret; | 2060 | int ret; |
@@ -2086,7 +2083,7 @@ breakouterloop: | |||
2086 | breakouterloop_mmap_sem: | 2083 | breakouterloop_mmap_sem: |
2087 | 2084 | ||
2088 | spin_lock(&khugepaged_mm_lock); | 2085 | spin_lock(&khugepaged_mm_lock); |
2089 | BUG_ON(khugepaged_scan.mm_slot != mm_slot); | 2086 | VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); |
2090 | /* | 2087 | /* |
2091 | * Release the current mm_slot if this mm is about to die, or | 2088 | * Release the current mm_slot if this mm is about to die, or |
2092 | * if we scanned all vmas of this mm. | 2089 | * if we scanned all vmas of this mm. |
@@ -2241,9 +2238,9 @@ static int khugepaged(void *none) | |||
2241 | 2238 | ||
2242 | for (;;) { | 2239 | for (;;) { |
2243 | mutex_unlock(&khugepaged_mutex); | 2240 | mutex_unlock(&khugepaged_mutex); |
2244 | BUG_ON(khugepaged_thread != current); | 2241 | VM_BUG_ON(khugepaged_thread != current); |
2245 | khugepaged_loop(); | 2242 | khugepaged_loop(); |
2246 | BUG_ON(khugepaged_thread != current); | 2243 | VM_BUG_ON(khugepaged_thread != current); |
2247 | 2244 | ||
2248 | mutex_lock(&khugepaged_mutex); | 2245 | mutex_lock(&khugepaged_mutex); |
2249 | if (!khugepaged_enabled()) | 2246 | if (!khugepaged_enabled()) |
diff --git a/mm/memblock.c b/mm/memblock.c index bdba245d8afd..4618fda975a0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -137,8 +137,6 @@ static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, | |||
137 | 137 | ||
138 | BUG_ON(0 == size); | 138 | BUG_ON(0 == size); |
139 | 139 | ||
140 | size = memblock_align_up(size, align); | ||
141 | |||
142 | /* Pump up max_addr */ | 140 | /* Pump up max_addr */ |
143 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | 141 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
144 | end = memblock.current_limit; | 142 | end = memblock.current_limit; |
diff --git a/mm/memory.c b/mm/memory.c index 31250faff390..5823698c2b71 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2219,7 +2219,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2219 | &ptl); | 2219 | &ptl); |
2220 | if (!pte_same(*page_table, orig_pte)) { | 2220 | if (!pte_same(*page_table, orig_pte)) { |
2221 | unlock_page(old_page); | 2221 | unlock_page(old_page); |
2222 | page_cache_release(old_page); | ||
2223 | goto unlock; | 2222 | goto unlock; |
2224 | } | 2223 | } |
2225 | page_cache_release(old_page); | 2224 | page_cache_release(old_page); |
@@ -2289,7 +2288,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2289 | &ptl); | 2288 | &ptl); |
2290 | if (!pte_same(*page_table, orig_pte)) { | 2289 | if (!pte_same(*page_table, orig_pte)) { |
2291 | unlock_page(old_page); | 2290 | unlock_page(old_page); |
2292 | page_cache_release(old_page); | ||
2293 | goto unlock; | 2291 | goto unlock; |
2294 | } | 2292 | } |
2295 | 2293 | ||
@@ -2367,16 +2365,6 @@ gotten: | |||
2367 | } | 2365 | } |
2368 | __SetPageUptodate(new_page); | 2366 | __SetPageUptodate(new_page); |
2369 | 2367 | ||
2370 | /* | ||
2371 | * Don't let another task, with possibly unlocked vma, | ||
2372 | * keep the mlocked page. | ||
2373 | */ | ||
2374 | if ((vma->vm_flags & VM_LOCKED) && old_page) { | ||
2375 | lock_page(old_page); /* for LRU manipulation */ | ||
2376 | clear_page_mlock(old_page); | ||
2377 | unlock_page(old_page); | ||
2378 | } | ||
2379 | |||
2380 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) | 2368 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) |
2381 | goto oom_free_new; | 2369 | goto oom_free_new; |
2382 | 2370 | ||
@@ -2444,10 +2432,20 @@ gotten: | |||
2444 | 2432 | ||
2445 | if (new_page) | 2433 | if (new_page) |
2446 | page_cache_release(new_page); | 2434 | page_cache_release(new_page); |
2447 | if (old_page) | ||
2448 | page_cache_release(old_page); | ||
2449 | unlock: | 2435 | unlock: |
2450 | pte_unmap_unlock(page_table, ptl); | 2436 | pte_unmap_unlock(page_table, ptl); |
2437 | if (old_page) { | ||
2438 | /* | ||
2439 | * Don't let another task, with possibly unlocked vma, | ||
2440 | * keep the mlocked page. | ||
2441 | */ | ||
2442 | if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { | ||
2443 | lock_page(old_page); /* LRU manipulation */ | ||
2444 | munlock_vma_page(old_page); | ||
2445 | unlock_page(old_page); | ||
2446 | } | ||
2447 | page_cache_release(old_page); | ||
2448 | } | ||
2451 | return ret; | 2449 | return ret; |
2452 | oom_free_new: | 2450 | oom_free_new: |
2453 | page_cache_release(new_page); | 2451 | page_cache_release(new_page); |
@@ -2650,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2650 | details.last_index = ULONG_MAX; | 2648 | details.last_index = ULONG_MAX; |
2651 | details.i_mmap_lock = &mapping->i_mmap_lock; | 2649 | details.i_mmap_lock = &mapping->i_mmap_lock; |
2652 | 2650 | ||
2651 | mutex_lock(&mapping->unmap_mutex); | ||
2653 | spin_lock(&mapping->i_mmap_lock); | 2652 | spin_lock(&mapping->i_mmap_lock); |
2654 | 2653 | ||
2655 | /* Protect against endless unmapping loops */ | 2654 | /* Protect against endless unmapping loops */ |
@@ -2666,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2666 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) | 2665 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) |
2667 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); | 2666 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); |
2668 | spin_unlock(&mapping->i_mmap_lock); | 2667 | spin_unlock(&mapping->i_mmap_lock); |
2668 | mutex_unlock(&mapping->unmap_mutex); | ||
2669 | } | 2669 | } |
2670 | EXPORT_SYMBOL(unmap_mapping_range); | 2670 | EXPORT_SYMBOL(unmap_mapping_range); |
2671 | 2671 | ||
@@ -3053,12 +3053,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3053 | goto out; | 3053 | goto out; |
3054 | } | 3054 | } |
3055 | charged = 1; | 3055 | charged = 1; |
3056 | /* | ||
3057 | * Don't let another task, with possibly unlocked vma, | ||
3058 | * keep the mlocked page. | ||
3059 | */ | ||
3060 | if (vma->vm_flags & VM_LOCKED) | ||
3061 | clear_page_mlock(vmf.page); | ||
3062 | copy_user_highpage(page, vmf.page, address, vma); | 3056 | copy_user_highpage(page, vmf.page, address, vma); |
3063 | __SetPageUptodate(page); | 3057 | __SetPageUptodate(page); |
3064 | } else { | 3058 | } else { |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 368fc9d23610..49355a970be2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1830,7 +1830,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
1830 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { | 1830 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { |
1831 | unsigned nid; | 1831 | unsigned nid; |
1832 | 1832 | ||
1833 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); | 1833 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); |
1834 | mpol_cond_put(pol); | 1834 | mpol_cond_put(pol); |
1835 | page = alloc_page_interleave(gfp, order, nid); | 1835 | page = alloc_page_interleave(gfp, order, nid); |
1836 | put_mems_allowed(); | 1836 | put_mems_allowed(); |
diff --git a/mm/migrate.c b/mm/migrate.c index 766115253807..352de555626c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | |||
1287 | return -EPERM; | 1287 | return -EPERM; |
1288 | 1288 | ||
1289 | /* Find the mm_struct */ | 1289 | /* Find the mm_struct */ |
1290 | read_lock(&tasklist_lock); | 1290 | rcu_read_lock(); |
1291 | task = pid ? find_task_by_vpid(pid) : current; | 1291 | task = pid ? find_task_by_vpid(pid) : current; |
1292 | if (!task) { | 1292 | if (!task) { |
1293 | read_unlock(&tasklist_lock); | 1293 | rcu_read_unlock(); |
1294 | return -ESRCH; | 1294 | return -ESRCH; |
1295 | } | 1295 | } |
1296 | mm = get_task_mm(task); | 1296 | mm = get_task_mm(task); |
1297 | read_unlock(&tasklist_lock); | 1297 | rcu_read_unlock(); |
1298 | 1298 | ||
1299 | if (!mm) | 1299 | if (!mm) |
1300 | return -EINVAL; | 1300 | return -EINVAL; |
diff --git a/mm/mremap.c b/mm/mremap.c index 9925b6391b80..1de98d492ddc 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
94 | */ | 94 | */ |
95 | mapping = vma->vm_file->f_mapping; | 95 | mapping = vma->vm_file->f_mapping; |
96 | spin_lock(&mapping->i_mmap_lock); | 96 | spin_lock(&mapping->i_mmap_lock); |
97 | if (new_vma->vm_truncate_count && | 97 | new_vma->vm_truncate_count = 0; |
98 | new_vma->vm_truncate_count != vma->vm_truncate_count) | ||
99 | new_vma->vm_truncate_count = 0; | ||
100 | } | 98 | } |
101 | 99 | ||
102 | /* | 100 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a873e61e312e..cdef1d4b4e47 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count) | |||
5376 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { | 5376 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { |
5377 | unsigned long check = pfn + iter; | 5377 | unsigned long check = pfn + iter; |
5378 | 5378 | ||
5379 | if (!pfn_valid_within(check)) { | 5379 | if (!pfn_valid_within(check)) |
5380 | iter++; | ||
5381 | continue; | 5380 | continue; |
5382 | } | 5381 | |
5383 | page = pfn_to_page(check); | 5382 | page = pfn_to_page(check); |
5384 | if (!page_count(page)) { | 5383 | if (!page_count(page)) { |
5385 | if (PageBuddy(page)) | 5384 | if (PageBuddy(page)) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 07a458d72fa8..0341c5700e34 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
1940 | 1940 | ||
1941 | error = -EINVAL; | 1941 | error = -EINVAL; |
1942 | if (S_ISBLK(inode->i_mode)) { | 1942 | if (S_ISBLK(inode->i_mode)) { |
1943 | bdev = I_BDEV(inode); | 1943 | bdev = bdgrab(I_BDEV(inode)); |
1944 | error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, | 1944 | error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, |
1945 | sys_swapon); | 1945 | sys_swapon); |
1946 | if (error < 0) { | 1946 | if (error < 0) { |
diff --git a/mm/truncate.c b/mm/truncate.c index 49feb46e77b8..d64296be00d3 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
225 | next = start; | 225 | next = start; |
226 | while (next <= end && | 226 | while (next <= end && |
227 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | 227 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
228 | mem_cgroup_uncharge_start(); | ||
228 | for (i = 0; i < pagevec_count(&pvec); i++) { | 229 | for (i = 0; i < pagevec_count(&pvec); i++) { |
229 | struct page *page = pvec.pages[i]; | 230 | struct page *page = pvec.pages[i]; |
230 | pgoff_t page_index = page->index; | 231 | pgoff_t page_index = page->index; |
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
247 | unlock_page(page); | 248 | unlock_page(page); |
248 | } | 249 | } |
249 | pagevec_release(&pvec); | 250 | pagevec_release(&pvec); |
251 | mem_cgroup_uncharge_end(); | ||
250 | cond_resched(); | 252 | cond_resched(); |
251 | } | 253 | } |
252 | 254 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 148c6e630df2..6771ea70bfe7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone, | |||
1841 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) | 1841 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) |
1842 | return false; | 1842 | return false; |
1843 | 1843 | ||
1844 | /* | 1844 | /* Consider stopping depending on scan and reclaim activity */ |
1845 | * If we failed to reclaim and have scanned the full list, stop. | 1845 | if (sc->gfp_mask & __GFP_REPEAT) { |
1846 | * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far | 1846 | /* |
1847 | * faster but obviously would be less likely to succeed | 1847 | * For __GFP_REPEAT allocations, stop reclaiming if the |
1848 | * allocation. If this is desirable, use GFP_REPEAT to decide | 1848 | * full LRU list has been scanned and we are still failing |
1849 | * if both reclaimed and scanned should be checked or just | 1849 | * to reclaim pages. This full LRU scan is potentially |
1850 | * reclaimed | 1850 | * expensive but a __GFP_REPEAT caller really wants to succeed |
1851 | */ | 1851 | */ |
1852 | if (!nr_reclaimed && !nr_scanned) | 1852 | if (!nr_reclaimed && !nr_scanned) |
1853 | return false; | 1853 | return false; |
1854 | } else { | ||
1855 | /* | ||
1856 | * For non-__GFP_REPEAT allocations which can presumably | ||
1857 | * fail without consequence, stop if we failed to reclaim | ||
1858 | * any pages from the last SWAP_CLUSTER_MAX number of | ||
1859 | * pages that were scanned. This will return to the | ||
1860 | * caller faster at the risk reclaim/compaction and | ||
1861 | * the resulting allocation attempt fails | ||
1862 | */ | ||
1863 | if (!nr_reclaimed) | ||
1864 | return false; | ||
1865 | } | ||
1854 | 1866 | ||
1855 | /* | 1867 | /* |
1856 | * If we have not reclaimed enough pages for compaction and the | 1868 | * If we have not reclaimed enough pages for compaction and the |
@@ -1882,12 +1894,12 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1882 | unsigned long nr[NR_LRU_LISTS]; | 1894 | unsigned long nr[NR_LRU_LISTS]; |
1883 | unsigned long nr_to_scan; | 1895 | unsigned long nr_to_scan; |
1884 | enum lru_list l; | 1896 | enum lru_list l; |
1885 | unsigned long nr_reclaimed; | 1897 | unsigned long nr_reclaimed, nr_scanned; |
1886 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; | 1898 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; |
1887 | unsigned long nr_scanned = sc->nr_scanned; | ||
1888 | 1899 | ||
1889 | restart: | 1900 | restart: |
1890 | nr_reclaimed = 0; | 1901 | nr_reclaimed = 0; |
1902 | nr_scanned = sc->nr_scanned; | ||
1891 | get_scan_count(zone, sc, nr, priority); | 1903 | get_scan_count(zone, sc, nr, priority); |
1892 | 1904 | ||
1893 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || | 1905 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index ee41fef04b21..d1a611322549 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -50,12 +50,12 @@ static struct sk_buff *frag_merge_packet(struct list_head *head, | |||
50 | skb = tfp->skb; | 50 | skb = tfp->skb; |
51 | } | 51 | } |
52 | 52 | ||
53 | if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0) | ||
54 | goto err; | ||
55 | |||
53 | skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); | 56 | skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); |
54 | if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) { | 57 | if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) |
55 | /* free buffered skb, skb will be freed later */ | 58 | goto err; |
56 | kfree_skb(tfp->skb); | ||
57 | return NULL; | ||
58 | } | ||
59 | 59 | ||
60 | /* move free entry to end */ | 60 | /* move free entry to end */ |
61 | tfp->skb = NULL; | 61 | tfp->skb = NULL; |
@@ -70,6 +70,11 @@ static struct sk_buff *frag_merge_packet(struct list_head *head, | |||
70 | unicast_packet->packet_type = BAT_UNICAST; | 70 | unicast_packet->packet_type = BAT_UNICAST; |
71 | 71 | ||
72 | return skb; | 72 | return skb; |
73 | |||
74 | err: | ||
75 | /* free buffered skb, skb will be freed later */ | ||
76 | kfree_skb(tfp->skb); | ||
77 | return NULL; | ||
73 | } | 78 | } |
74 | 79 | ||
75 | static void frag_create_entry(struct list_head *head, struct sk_buff *skb) | 80 | static void frag_create_entry(struct list_head *head, struct sk_buff *skb) |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7550abb0c96a..675614e38e14 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason) | |||
859 | result = L2CAP_CR_SEC_BLOCK; | 859 | result = L2CAP_CR_SEC_BLOCK; |
860 | else | 860 | else |
861 | result = L2CAP_CR_BAD_PSM; | 861 | result = L2CAP_CR_BAD_PSM; |
862 | sk->sk_state = BT_DISCONN; | ||
862 | 863 | ||
863 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 864 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); |
864 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 865 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2575c2db6404..d7b9af4703d0 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
727 | break; | 727 | break; |
728 | } | 728 | } |
729 | 729 | ||
730 | tty_unlock(); | ||
730 | schedule(); | 731 | schedule(); |
732 | tty_lock(); | ||
731 | } | 733 | } |
732 | set_current_state(TASK_RUNNING); | 734 | set_current_state(TASK_RUNNING); |
733 | remove_wait_queue(&dev->wait, &wait); | 735 | remove_wait_queue(&dev->wait, &wait); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6f6d8e1b776f..88e4aa9cb1f9 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
80 | if (is_multicast_ether_addr(dest)) { | 80 | if (is_multicast_ether_addr(dest)) { |
81 | mdst = br_mdb_get(br, skb); | 81 | mdst = br_mdb_get(br, skb); |
82 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { | 82 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
83 | if ((mdst && !hlist_unhashed(&mdst->mglist)) || | 83 | if ((mdst && mdst->mglist) || |
84 | br_multicast_is_router(br)) | 84 | br_multicast_is_router(br)) |
85 | skb2 = skb; | 85 | skb2 = skb; |
86 | br_multicast_forward(mdst, skb, skb2); | 86 | br_multicast_forward(mdst, skb, skb2); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f701a21acb34..030a002ff8ee 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -37,10 +37,9 @@ | |||
37 | rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) | 37 | rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) |
38 | 38 | ||
39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
40 | static inline int ipv6_is_local_multicast(const struct in6_addr *addr) | 40 | static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) |
41 | { | 41 | { |
42 | if (ipv6_addr_is_multicast(addr) && | 42 | if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) |
43 | IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) | ||
44 | return 1; | 43 | return 1; |
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
@@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data) | |||
232 | if (!netif_running(br->dev) || timer_pending(&mp->timer)) | 231 | if (!netif_running(br->dev) || timer_pending(&mp->timer)) |
233 | goto out; | 232 | goto out; |
234 | 233 | ||
235 | if (!hlist_unhashed(&mp->mglist)) | 234 | mp->mglist = false; |
236 | hlist_del_init(&mp->mglist); | ||
237 | 235 | ||
238 | if (mp->ports) | 236 | if (mp->ports) |
239 | goto out; | 237 | goto out; |
@@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
276 | del_timer(&p->query_timer); | 274 | del_timer(&p->query_timer); |
277 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 275 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
278 | 276 | ||
279 | if (!mp->ports && hlist_unhashed(&mp->mglist) && | 277 | if (!mp->ports && !mp->mglist && |
280 | netif_running(br->dev)) | 278 | netif_running(br->dev)) |
281 | mod_timer(&mp->timer, jiffies); | 279 | mod_timer(&mp->timer, jiffies); |
282 | 280 | ||
@@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
436 | eth = eth_hdr(skb); | 434 | eth = eth_hdr(skb); |
437 | 435 | ||
438 | memcpy(eth->h_source, br->dev->dev_addr, 6); | 436 | memcpy(eth->h_source, br->dev->dev_addr, 6); |
439 | ipv6_eth_mc_map(group, eth->h_dest); | ||
440 | eth->h_proto = htons(ETH_P_IPV6); | 437 | eth->h_proto = htons(ETH_P_IPV6); |
441 | skb_put(skb, sizeof(*eth)); | 438 | skb_put(skb, sizeof(*eth)); |
442 | 439 | ||
@@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
448 | ip6h->payload_len = htons(8 + sizeof(*mldq)); | 445 | ip6h->payload_len = htons(8 + sizeof(*mldq)); |
449 | ip6h->nexthdr = IPPROTO_HOPOPTS; | 446 | ip6h->nexthdr = IPPROTO_HOPOPTS; |
450 | ip6h->hop_limit = 1; | 447 | ip6h->hop_limit = 1; |
451 | ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); | 448 | ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, |
449 | &ip6h->saddr); | ||
452 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | 450 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); |
451 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); | ||
453 | 452 | ||
454 | hopopt = (u8 *)(ip6h + 1); | 453 | hopopt = (u8 *)(ip6h + 1); |
455 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ | 454 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ |
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data) | |||
528 | struct net_bridge *br = mp->br; | 527 | struct net_bridge *br = mp->br; |
529 | 528 | ||
530 | spin_lock(&br->multicast_lock); | 529 | spin_lock(&br->multicast_lock); |
531 | if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || | 530 | if (!netif_running(br->dev) || !mp->mglist || |
532 | mp->queries_sent >= br->multicast_last_member_count) | 531 | mp->queries_sent >= br->multicast_last_member_count) |
533 | goto out; | 532 | goto out; |
534 | 533 | ||
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
719 | goto err; | 718 | goto err; |
720 | 719 | ||
721 | if (!port) { | 720 | if (!port) { |
722 | hlist_add_head(&mp->mglist, &br->mglist); | 721 | mp->mglist = true; |
723 | mod_timer(&mp->timer, now + br->multicast_membership_interval); | 722 | mod_timer(&mp->timer, now + br->multicast_membership_interval); |
724 | goto out; | 723 | goto out; |
725 | } | 724 | } |
@@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, | |||
781 | { | 780 | { |
782 | struct br_ip br_group; | 781 | struct br_ip br_group; |
783 | 782 | ||
784 | if (ipv6_is_local_multicast(group)) | 783 | if (!ipv6_is_transient_multicast(group)) |
785 | return 0; | 784 | return 0; |
786 | 785 | ||
787 | ipv6_addr_copy(&br_group.u.ip6, group); | 786 | ipv6_addr_copy(&br_group.u.ip6, group); |
788 | br_group.proto = htons(ETH_P_IP); | 787 | br_group.proto = htons(ETH_P_IPV6); |
789 | 788 | ||
790 | return br_multicast_add_group(br, port, &br_group); | 789 | return br_multicast_add_group(br, port, &br_group); |
791 | } | 790 | } |
@@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1014 | 1013 | ||
1015 | nsrcs = skb_header_pointer(skb, | 1014 | nsrcs = skb_header_pointer(skb, |
1016 | len + offsetof(struct mld2_grec, | 1015 | len + offsetof(struct mld2_grec, |
1017 | grec_mca), | 1016 | grec_nsrcs), |
1018 | sizeof(_nsrcs), &_nsrcs); | 1017 | sizeof(_nsrcs), &_nsrcs); |
1019 | if (!nsrcs) | 1018 | if (!nsrcs) |
1020 | return -EINVAL; | 1019 | return -EINVAL; |
1021 | 1020 | ||
1022 | if (!pskb_may_pull(skb, | 1021 | if (!pskb_may_pull(skb, |
1023 | len + sizeof(*grec) + | 1022 | len + sizeof(*grec) + |
1024 | sizeof(struct in6_addr) * (*nsrcs))) | 1023 | sizeof(struct in6_addr) * ntohs(*nsrcs))) |
1025 | return -EINVAL; | 1024 | return -EINVAL; |
1026 | 1025 | ||
1027 | grec = (struct mld2_grec *)(skb->data + len); | 1026 | grec = (struct mld2_grec *)(skb->data + len); |
1028 | len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); | 1027 | len += sizeof(*grec) + |
1028 | sizeof(struct in6_addr) * ntohs(*nsrcs); | ||
1029 | 1029 | ||
1030 | /* We treat these as MLDv1 reports for now. */ | 1030 | /* We treat these as MLDv1 reports for now. */ |
1031 | switch (grec->grec_type) { | 1031 | switch (grec->grec_type) { |
@@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1165 | 1165 | ||
1166 | max_delay *= br->multicast_last_member_count; | 1166 | max_delay *= br->multicast_last_member_count; |
1167 | 1167 | ||
1168 | if (!hlist_unhashed(&mp->mglist) && | 1168 | if (mp->mglist && |
1169 | (timer_pending(&mp->timer) ? | 1169 | (timer_pending(&mp->timer) ? |
1170 | time_after(mp->timer.expires, now + max_delay) : | 1170 | time_after(mp->timer.expires, now + max_delay) : |
1171 | try_to_del_timer_sync(&mp->timer) >= 0)) | 1171 | try_to_del_timer_sync(&mp->timer) >= 0)) |
@@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1177 | if (timer_pending(&p->timer) ? | 1177 | if (timer_pending(&p->timer) ? |
1178 | time_after(p->timer.expires, now + max_delay) : | 1178 | time_after(p->timer.expires, now + max_delay) : |
1179 | try_to_del_timer_sync(&p->timer) >= 0) | 1179 | try_to_del_timer_sync(&p->timer) >= 0) |
1180 | mod_timer(&mp->timer, now + max_delay); | 1180 | mod_timer(&p->timer, now + max_delay); |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | out: | 1183 | out: |
@@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1236 | goto out; | 1236 | goto out; |
1237 | 1237 | ||
1238 | max_delay *= br->multicast_last_member_count; | 1238 | max_delay *= br->multicast_last_member_count; |
1239 | if (!hlist_unhashed(&mp->mglist) && | 1239 | if (mp->mglist && |
1240 | (timer_pending(&mp->timer) ? | 1240 | (timer_pending(&mp->timer) ? |
1241 | time_after(mp->timer.expires, now + max_delay) : | 1241 | time_after(mp->timer.expires, now + max_delay) : |
1242 | try_to_del_timer_sync(&mp->timer) >= 0)) | 1242 | try_to_del_timer_sync(&mp->timer) >= 0)) |
@@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1248 | if (timer_pending(&p->timer) ? | 1248 | if (timer_pending(&p->timer) ? |
1249 | time_after(p->timer.expires, now + max_delay) : | 1249 | time_after(p->timer.expires, now + max_delay) : |
1250 | try_to_del_timer_sync(&p->timer) >= 0) | 1250 | try_to_del_timer_sync(&p->timer) >= 0) |
1251 | mod_timer(&mp->timer, now + max_delay); | 1251 | mod_timer(&p->timer, now + max_delay); |
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | out: | 1254 | out: |
@@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1283 | br->multicast_last_member_interval; | 1283 | br->multicast_last_member_interval; |
1284 | 1284 | ||
1285 | if (!port) { | 1285 | if (!port) { |
1286 | if (!hlist_unhashed(&mp->mglist) && | 1286 | if (mp->mglist && |
1287 | (timer_pending(&mp->timer) ? | 1287 | (timer_pending(&mp->timer) ? |
1288 | time_after(mp->timer.expires, time) : | 1288 | time_after(mp->timer.expires, time) : |
1289 | try_to_del_timer_sync(&mp->timer) >= 0)) { | 1289 | try_to_del_timer_sync(&mp->timer) >= 0)) { |
@@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1341 | { | 1341 | { |
1342 | struct br_ip br_group; | 1342 | struct br_ip br_group; |
1343 | 1343 | ||
1344 | if (ipv6_is_local_multicast(group)) | 1344 | if (!ipv6_is_transient_multicast(group)) |
1345 | return; | 1345 | return; |
1346 | 1346 | ||
1347 | ipv6_addr_copy(&br_group.u.ip6, group); | 1347 | ipv6_addr_copy(&br_group.u.ip6, group); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 84aac7734bfc..4e1b620b6be6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -84,13 +84,13 @@ struct net_bridge_port_group { | |||
84 | struct net_bridge_mdb_entry | 84 | struct net_bridge_mdb_entry |
85 | { | 85 | { |
86 | struct hlist_node hlist[2]; | 86 | struct hlist_node hlist[2]; |
87 | struct hlist_node mglist; | ||
88 | struct net_bridge *br; | 87 | struct net_bridge *br; |
89 | struct net_bridge_port_group __rcu *ports; | 88 | struct net_bridge_port_group __rcu *ports; |
90 | struct rcu_head rcu; | 89 | struct rcu_head rcu; |
91 | struct timer_list timer; | 90 | struct timer_list timer; |
92 | struct timer_list query_timer; | 91 | struct timer_list query_timer; |
93 | struct br_ip addr; | 92 | struct br_ip addr; |
93 | bool mglist; | ||
94 | u32 queries_sent; | 94 | u32 queries_sent; |
95 | }; | 95 | }; |
96 | 96 | ||
@@ -238,7 +238,6 @@ struct net_bridge | |||
238 | spinlock_t multicast_lock; | 238 | spinlock_t multicast_lock; |
239 | struct net_bridge_mdb_htable __rcu *mdb; | 239 | struct net_bridge_mdb_htable __rcu *mdb; |
240 | struct hlist_head router_list; | 240 | struct hlist_head router_list; |
241 | struct hlist_head mglist; | ||
242 | 241 | ||
243 | struct timer_list multicast_router_timer; | 242 | struct timer_list multicast_router_timer; |
244 | struct timer_list multicast_querier_timer; | 243 | struct timer_list multicast_querier_timer; |
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index fa9dab372b68..6008d6dc18a0 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -394,9 +394,7 @@ static void ipcaif_net_setup(struct net_device *dev) | |||
394 | priv->conn_req.sockaddr.u.dgm.connection_id = -1; | 394 | priv->conn_req.sockaddr.u.dgm.connection_id = -1; |
395 | priv->flowenabled = false; | 395 | priv->flowenabled = false; |
396 | 396 | ||
397 | ASSERT_RTNL(); | ||
398 | init_waitqueue_head(&priv->netmgmt_wq); | 397 | init_waitqueue_head(&priv->netmgmt_wq); |
399 | list_add(&priv->list_field, &chnl_net_list); | ||
400 | } | 398 | } |
401 | 399 | ||
402 | 400 | ||
@@ -453,6 +451,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev, | |||
453 | ret = register_netdevice(dev); | 451 | ret = register_netdevice(dev); |
454 | if (ret) | 452 | if (ret) |
455 | pr_warn("device rtml registration failed\n"); | 453 | pr_warn("device rtml registration failed\n"); |
454 | else | ||
455 | list_add(&caifdev->list_field, &chnl_net_list); | ||
456 | return ret; | 456 | return ret; |
457 | } | 457 | } |
458 | 458 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dff633d62e5b..35b36b86d762 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) | |||
252 | { | 252 | { |
253 | struct kvec iov = {buf, len}; | 253 | struct kvec iov = {buf, len}; |
254 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; | 254 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; |
255 | int r; | ||
255 | 256 | ||
256 | return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); | 257 | r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); |
258 | if (r == -EAGAIN) | ||
259 | r = 0; | ||
260 | return r; | ||
257 | } | 261 | } |
258 | 262 | ||
259 | /* | 263 | /* |
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, | |||
264 | size_t kvlen, size_t len, int more) | 268 | size_t kvlen, size_t len, int more) |
265 | { | 269 | { |
266 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; | 270 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; |
271 | int r; | ||
267 | 272 | ||
268 | if (more) | 273 | if (more) |
269 | msg.msg_flags |= MSG_MORE; | 274 | msg.msg_flags |= MSG_MORE; |
270 | else | 275 | else |
271 | msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ | 276 | msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ |
272 | 277 | ||
273 | return kernel_sendmsg(sock, &msg, iov, kvlen, len); | 278 | r = kernel_sendmsg(sock, &msg, iov, kvlen, len); |
279 | if (r == -EAGAIN) | ||
280 | r = 0; | ||
281 | return r; | ||
274 | } | 282 | } |
275 | 283 | ||
276 | 284 | ||
@@ -847,6 +855,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
847 | (msg->pages || msg->pagelist || msg->bio || in_trail)) | 855 | (msg->pages || msg->pagelist || msg->bio || in_trail)) |
848 | kunmap(page); | 856 | kunmap(page); |
849 | 857 | ||
858 | if (ret == -EAGAIN) | ||
859 | ret = 0; | ||
850 | if (ret <= 0) | 860 | if (ret <= 0) |
851 | goto out; | 861 | goto out; |
852 | 862 | ||
@@ -1737,16 +1747,12 @@ more_kvec: | |||
1737 | if (con->out_skip) { | 1747 | if (con->out_skip) { |
1738 | ret = write_partial_skip(con); | 1748 | ret = write_partial_skip(con); |
1739 | if (ret <= 0) | 1749 | if (ret <= 0) |
1740 | goto done; | 1750 | goto out; |
1741 | if (ret < 0) { | ||
1742 | dout("try_write write_partial_skip err %d\n", ret); | ||
1743 | goto done; | ||
1744 | } | ||
1745 | } | 1751 | } |
1746 | if (con->out_kvec_left) { | 1752 | if (con->out_kvec_left) { |
1747 | ret = write_partial_kvec(con); | 1753 | ret = write_partial_kvec(con); |
1748 | if (ret <= 0) | 1754 | if (ret <= 0) |
1749 | goto done; | 1755 | goto out; |
1750 | } | 1756 | } |
1751 | 1757 | ||
1752 | /* msg pages? */ | 1758 | /* msg pages? */ |
@@ -1761,11 +1767,11 @@ more_kvec: | |||
1761 | if (ret == 1) | 1767 | if (ret == 1) |
1762 | goto more_kvec; /* we need to send the footer, too! */ | 1768 | goto more_kvec; /* we need to send the footer, too! */ |
1763 | if (ret == 0) | 1769 | if (ret == 0) |
1764 | goto done; | 1770 | goto out; |
1765 | if (ret < 0) { | 1771 | if (ret < 0) { |
1766 | dout("try_write write_partial_msg_pages err %d\n", | 1772 | dout("try_write write_partial_msg_pages err %d\n", |
1767 | ret); | 1773 | ret); |
1768 | goto done; | 1774 | goto out; |
1769 | } | 1775 | } |
1770 | } | 1776 | } |
1771 | 1777 | ||
@@ -1789,10 +1795,9 @@ do_next: | |||
1789 | /* Nothing to do! */ | 1795 | /* Nothing to do! */ |
1790 | clear_bit(WRITE_PENDING, &con->state); | 1796 | clear_bit(WRITE_PENDING, &con->state); |
1791 | dout("try_write nothing else to write.\n"); | 1797 | dout("try_write nothing else to write.\n"); |
1792 | done: | ||
1793 | ret = 0; | 1798 | ret = 0; |
1794 | out: | 1799 | out: |
1795 | dout("try_write done on %p\n", con); | 1800 | dout("try_write done on %p ret %d\n", con, ret); |
1796 | return ret; | 1801 | return ret; |
1797 | } | 1802 | } |
1798 | 1803 | ||
@@ -1821,19 +1826,17 @@ more: | |||
1821 | dout("try_read connecting\n"); | 1826 | dout("try_read connecting\n"); |
1822 | ret = read_partial_banner(con); | 1827 | ret = read_partial_banner(con); |
1823 | if (ret <= 0) | 1828 | if (ret <= 0) |
1824 | goto done; | ||
1825 | if (process_banner(con) < 0) { | ||
1826 | ret = -1; | ||
1827 | goto out; | 1829 | goto out; |
1828 | } | 1830 | ret = process_banner(con); |
1831 | if (ret < 0) | ||
1832 | goto out; | ||
1829 | } | 1833 | } |
1830 | ret = read_partial_connect(con); | 1834 | ret = read_partial_connect(con); |
1831 | if (ret <= 0) | 1835 | if (ret <= 0) |
1832 | goto done; | ||
1833 | if (process_connect(con) < 0) { | ||
1834 | ret = -1; | ||
1835 | goto out; | 1836 | goto out; |
1836 | } | 1837 | ret = process_connect(con); |
1838 | if (ret < 0) | ||
1839 | goto out; | ||
1837 | goto more; | 1840 | goto more; |
1838 | } | 1841 | } |
1839 | 1842 | ||
@@ -1848,7 +1851,7 @@ more: | |||
1848 | dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); | 1851 | dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); |
1849 | ret = ceph_tcp_recvmsg(con->sock, buf, skip); | 1852 | ret = ceph_tcp_recvmsg(con->sock, buf, skip); |
1850 | if (ret <= 0) | 1853 | if (ret <= 0) |
1851 | goto done; | 1854 | goto out; |
1852 | con->in_base_pos += ret; | 1855 | con->in_base_pos += ret; |
1853 | if (con->in_base_pos) | 1856 | if (con->in_base_pos) |
1854 | goto more; | 1857 | goto more; |
@@ -1859,7 +1862,7 @@ more: | |||
1859 | */ | 1862 | */ |
1860 | ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); | 1863 | ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); |
1861 | if (ret <= 0) | 1864 | if (ret <= 0) |
1862 | goto done; | 1865 | goto out; |
1863 | dout("try_read got tag %d\n", (int)con->in_tag); | 1866 | dout("try_read got tag %d\n", (int)con->in_tag); |
1864 | switch (con->in_tag) { | 1867 | switch (con->in_tag) { |
1865 | case CEPH_MSGR_TAG_MSG: | 1868 | case CEPH_MSGR_TAG_MSG: |
@@ -1870,7 +1873,7 @@ more: | |||
1870 | break; | 1873 | break; |
1871 | case CEPH_MSGR_TAG_CLOSE: | 1874 | case CEPH_MSGR_TAG_CLOSE: |
1872 | set_bit(CLOSED, &con->state); /* fixme */ | 1875 | set_bit(CLOSED, &con->state); /* fixme */ |
1873 | goto done; | 1876 | goto out; |
1874 | default: | 1877 | default: |
1875 | goto bad_tag; | 1878 | goto bad_tag; |
1876 | } | 1879 | } |
@@ -1882,13 +1885,12 @@ more: | |||
1882 | case -EBADMSG: | 1885 | case -EBADMSG: |
1883 | con->error_msg = "bad crc"; | 1886 | con->error_msg = "bad crc"; |
1884 | ret = -EIO; | 1887 | ret = -EIO; |
1885 | goto out; | 1888 | break; |
1886 | case -EIO: | 1889 | case -EIO: |
1887 | con->error_msg = "io error"; | 1890 | con->error_msg = "io error"; |
1888 | goto out; | 1891 | break; |
1889 | default: | ||
1890 | goto done; | ||
1891 | } | 1892 | } |
1893 | goto out; | ||
1892 | } | 1894 | } |
1893 | if (con->in_tag == CEPH_MSGR_TAG_READY) | 1895 | if (con->in_tag == CEPH_MSGR_TAG_READY) |
1894 | goto more; | 1896 | goto more; |
@@ -1898,15 +1900,13 @@ more: | |||
1898 | if (con->in_tag == CEPH_MSGR_TAG_ACK) { | 1900 | if (con->in_tag == CEPH_MSGR_TAG_ACK) { |
1899 | ret = read_partial_ack(con); | 1901 | ret = read_partial_ack(con); |
1900 | if (ret <= 0) | 1902 | if (ret <= 0) |
1901 | goto done; | 1903 | goto out; |
1902 | process_ack(con); | 1904 | process_ack(con); |
1903 | goto more; | 1905 | goto more; |
1904 | } | 1906 | } |
1905 | 1907 | ||
1906 | done: | ||
1907 | ret = 0; | ||
1908 | out: | 1908 | out: |
1909 | dout("try_read done on %p\n", con); | 1909 | dout("try_read done on %p ret %d\n", con, ret); |
1910 | return ret; | 1910 | return ret; |
1911 | 1911 | ||
1912 | bad_tag: | 1912 | bad_tag: |
diff --git a/net/core/dev.c b/net/core/dev.c index b6d0bf875a8e..8ae6631abcc2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1280,10 +1280,13 @@ static int __dev_close_many(struct list_head *head) | |||
1280 | 1280 | ||
1281 | static int __dev_close(struct net_device *dev) | 1281 | static int __dev_close(struct net_device *dev) |
1282 | { | 1282 | { |
1283 | int retval; | ||
1283 | LIST_HEAD(single); | 1284 | LIST_HEAD(single); |
1284 | 1285 | ||
1285 | list_add(&dev->unreg_list, &single); | 1286 | list_add(&dev->unreg_list, &single); |
1286 | return __dev_close_many(&single); | 1287 | retval = __dev_close_many(&single); |
1288 | list_del(&single); | ||
1289 | return retval; | ||
1287 | } | 1290 | } |
1288 | 1291 | ||
1289 | int dev_close_many(struct list_head *head) | 1292 | int dev_close_many(struct list_head *head) |
@@ -1325,7 +1328,7 @@ int dev_close(struct net_device *dev) | |||
1325 | 1328 | ||
1326 | list_add(&dev->unreg_list, &single); | 1329 | list_add(&dev->unreg_list, &single); |
1327 | dev_close_many(&single); | 1330 | dev_close_many(&single); |
1328 | 1331 | list_del(&single); | |
1329 | return 0; | 1332 | return 0; |
1330 | } | 1333 | } |
1331 | EXPORT_SYMBOL(dev_close); | 1334 | EXPORT_SYMBOL(dev_close); |
@@ -5063,6 +5066,7 @@ static void rollback_registered(struct net_device *dev) | |||
5063 | 5066 | ||
5064 | list_add(&dev->unreg_list, &single); | 5067 | list_add(&dev->unreg_list, &single); |
5065 | rollback_registered_many(&single); | 5068 | rollback_registered_many(&single); |
5069 | list_del(&single); | ||
5066 | } | 5070 | } |
5067 | 5071 | ||
5068 | unsigned long netdev_fix_features(unsigned long features, const char *name) | 5072 | unsigned long netdev_fix_features(unsigned long features, const char *name) |
@@ -5660,30 +5664,35 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | |||
5660 | 5664 | ||
5661 | dev_net_set(dev, &init_net); | 5665 | dev_net_set(dev, &init_net); |
5662 | 5666 | ||
5667 | dev->gso_max_size = GSO_MAX_SIZE; | ||
5668 | |||
5669 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | ||
5670 | dev->ethtool_ntuple_list.count = 0; | ||
5671 | INIT_LIST_HEAD(&dev->napi_list); | ||
5672 | INIT_LIST_HEAD(&dev->unreg_list); | ||
5673 | INIT_LIST_HEAD(&dev->link_watch_list); | ||
5674 | dev->priv_flags = IFF_XMIT_DST_RELEASE; | ||
5675 | setup(dev); | ||
5676 | |||
5663 | dev->num_tx_queues = txqs; | 5677 | dev->num_tx_queues = txqs; |
5664 | dev->real_num_tx_queues = txqs; | 5678 | dev->real_num_tx_queues = txqs; |
5665 | if (netif_alloc_netdev_queues(dev)) | 5679 | if (netif_alloc_netdev_queues(dev)) |
5666 | goto free_pcpu; | 5680 | goto free_all; |
5667 | 5681 | ||
5668 | #ifdef CONFIG_RPS | 5682 | #ifdef CONFIG_RPS |
5669 | dev->num_rx_queues = rxqs; | 5683 | dev->num_rx_queues = rxqs; |
5670 | dev->real_num_rx_queues = rxqs; | 5684 | dev->real_num_rx_queues = rxqs; |
5671 | if (netif_alloc_rx_queues(dev)) | 5685 | if (netif_alloc_rx_queues(dev)) |
5672 | goto free_pcpu; | 5686 | goto free_all; |
5673 | #endif | 5687 | #endif |
5674 | 5688 | ||
5675 | dev->gso_max_size = GSO_MAX_SIZE; | ||
5676 | |||
5677 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | ||
5678 | dev->ethtool_ntuple_list.count = 0; | ||
5679 | INIT_LIST_HEAD(&dev->napi_list); | ||
5680 | INIT_LIST_HEAD(&dev->unreg_list); | ||
5681 | INIT_LIST_HEAD(&dev->link_watch_list); | ||
5682 | dev->priv_flags = IFF_XMIT_DST_RELEASE; | ||
5683 | setup(dev); | ||
5684 | strcpy(dev->name, name); | 5689 | strcpy(dev->name, name); |
5685 | return dev; | 5690 | return dev; |
5686 | 5691 | ||
5692 | free_all: | ||
5693 | free_netdev(dev); | ||
5694 | return NULL; | ||
5695 | |||
5687 | free_pcpu: | 5696 | free_pcpu: |
5688 | free_percpu(dev->pcpu_refcnt); | 5697 | free_percpu(dev->pcpu_refcnt); |
5689 | kfree(dev->_tx); | 5698 | kfree(dev->_tx); |
@@ -6211,6 +6220,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) | |||
6211 | } | 6220 | } |
6212 | } | 6221 | } |
6213 | unregister_netdevice_many(&dev_kill_list); | 6222 | unregister_netdevice_many(&dev_kill_list); |
6223 | list_del(&dev_kill_list); | ||
6214 | rtnl_unlock(); | 6224 | rtnl_unlock(); |
6215 | } | 6225 | } |
6216 | 6226 | ||
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 6b03f561caec..d5074a567289 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, | |||
626 | dcb->cmd = DCB_CMD_GAPP; | 626 | dcb->cmd = DCB_CMD_GAPP; |
627 | 627 | ||
628 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); | 628 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); |
629 | if (!app_nest) | ||
630 | goto out_cancel; | ||
631 | |||
629 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); | 632 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); |
630 | if (ret) | 633 | if (ret) |
631 | goto out_cancel; | 634 | goto out_cancel; |
@@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp); | |||
1613 | u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) | 1616 | u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) |
1614 | { | 1617 | { |
1615 | struct dcb_app_type *itr; | 1618 | struct dcb_app_type *itr; |
1619 | struct dcb_app_type event; | ||
1620 | |||
1621 | memcpy(&event.name, dev->name, sizeof(event.name)); | ||
1622 | memcpy(&event.app, new, sizeof(event.app)); | ||
1616 | 1623 | ||
1617 | spin_lock(&dcb_lock); | 1624 | spin_lock(&dcb_lock); |
1618 | /* Search for existing match and replace */ | 1625 | /* Search for existing match and replace */ |
@@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
1644 | } | 1651 | } |
1645 | out: | 1652 | out: |
1646 | spin_unlock(&dcb_lock); | 1653 | spin_unlock(&dcb_lock); |
1647 | call_dcbevent_notifiers(DCB_APP_EVENT, new); | 1654 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
1648 | return 0; | 1655 | return 0; |
1649 | } | 1656 | } |
1650 | EXPORT_SYMBOL(dcb_setapp); | 1657 | EXPORT_SYMBOL(dcb_setapp); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 748cb5b337bd..df4616fce929 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) | |||
1030 | return mtu >= 68; | 1030 | return mtu >= 68; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static void inetdev_send_gratuitous_arp(struct net_device *dev, | ||
1034 | struct in_device *in_dev) | ||
1035 | |||
1036 | { | ||
1037 | struct in_ifaddr *ifa = in_dev->ifa_list; | ||
1038 | |||
1039 | if (!ifa) | ||
1040 | return; | ||
1041 | |||
1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1043 | ifa->ifa_address, dev, | ||
1044 | ifa->ifa_address, NULL, | ||
1045 | dev->dev_addr, NULL); | ||
1046 | } | ||
1047 | |||
1033 | /* Called only under RTNL semaphore */ | 1048 | /* Called only under RTNL semaphore */ |
1034 | 1049 | ||
1035 | static int inetdev_event(struct notifier_block *this, unsigned long event, | 1050 | static int inetdev_event(struct notifier_block *this, unsigned long event, |
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1082 | } | 1097 | } |
1083 | ip_mc_up(in_dev); | 1098 | ip_mc_up(in_dev); |
1084 | /* fall through */ | 1099 | /* fall through */ |
1085 | case NETDEV_NOTIFY_PEERS: | ||
1086 | case NETDEV_CHANGEADDR: | 1100 | case NETDEV_CHANGEADDR: |
1101 | if (!IN_DEV_ARP_NOTIFY(in_dev)) | ||
1102 | break; | ||
1103 | /* fall through */ | ||
1104 | case NETDEV_NOTIFY_PEERS: | ||
1087 | /* Send gratuitous ARP to notify of link change */ | 1105 | /* Send gratuitous ARP to notify of link change */ |
1088 | if (IN_DEV_ARP_NOTIFY(in_dev)) { | 1106 | inetdev_send_gratuitous_arp(dev, in_dev); |
1089 | struct in_ifaddr *ifa = in_dev->ifa_list; | ||
1090 | |||
1091 | if (ifa) | ||
1092 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1093 | ifa->ifa_address, dev, | ||
1094 | ifa->ifa_address, NULL, | ||
1095 | dev->dev_addr, NULL); | ||
1096 | } | ||
1097 | break; | 1107 | break; |
1098 | case NETDEV_DOWN: | 1108 | case NETDEV_DOWN: |
1099 | ip_mc_down(in_dev); | 1109 | ip_mc_down(in_dev); |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c5af909cf701..3c8dfa16614d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -505,7 +505,9 @@ restart: | |||
505 | } | 505 | } |
506 | 506 | ||
507 | rcu_read_unlock(); | 507 | rcu_read_unlock(); |
508 | local_bh_disable(); | ||
508 | inet_twsk_deschedule(tw, twdr); | 509 | inet_twsk_deschedule(tw, twdr); |
510 | local_bh_enable(); | ||
509 | inet_twsk_put(tw); | 511 | inet_twsk_put(tw); |
510 | goto restart_rcu; | 512 | goto restart_rcu; |
511 | } | 513 | } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index eb68a0e34e49..6613edfac28c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
775 | .fl4_dst = dst, | 775 | .fl4_dst = dst, |
776 | .fl4_src = tiph->saddr, | 776 | .fl4_src = tiph->saddr, |
777 | .fl4_tos = RT_TOS(tos), | 777 | .fl4_tos = RT_TOS(tos), |
778 | .proto = IPPROTO_GRE, | ||
778 | .fl_gre_key = tunnel->parms.o_key | 779 | .fl_gre_key = tunnel->parms.o_key |
779 | }; | 780 | }; |
780 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 781 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 788a3e74834e..6ed6603c2f6d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2722 | .destroy = ipv4_dst_destroy, | 2722 | .destroy = ipv4_dst_destroy, |
2723 | .check = ipv4_blackhole_dst_check, | 2723 | .check = ipv4_blackhole_dst_check, |
2724 | .default_mtu = ipv4_blackhole_default_mtu, | 2724 | .default_mtu = ipv4_blackhole_default_mtu, |
2725 | .default_advmss = ipv4_default_advmss, | ||
2725 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2726 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
2726 | }; | 2727 | }; |
2727 | 2728 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb7f82ebf4a3..65f6c0406245 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, | |||
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | /* D-SACK for already forgotten data... Do dumb counting. */ | 1224 | /* D-SACK for already forgotten data... Do dumb counting. */ |
1225 | if (dup_sack && | 1225 | if (dup_sack && tp->undo_marker && tp->undo_retrans && |
1226 | !after(end_seq_0, prior_snd_una) && | 1226 | !after(end_seq_0, prior_snd_una) && |
1227 | after(end_seq_0, tp->undo_marker)) | 1227 | after(end_seq_0, tp->undo_marker)) |
1228 | tp->undo_retrans--; | 1228 | tp->undo_retrans--; |
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1299 | 1299 | ||
1300 | /* Account D-SACK for retransmitted packet. */ | 1300 | /* Account D-SACK for retransmitted packet. */ |
1301 | if (dup_sack && (sacked & TCPCB_RETRANS)) { | 1301 | if (dup_sack && (sacked & TCPCB_RETRANS)) { |
1302 | if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | 1302 | if (tp->undo_marker && tp->undo_retrans && |
1303 | after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | ||
1303 | tp->undo_retrans--; | 1304 | tp->undo_retrans--; |
1304 | if (sacked & TCPCB_SACKED_ACKED) | 1305 | if (sacked & TCPCB_SACKED_ACKED) |
1305 | state->reord = min(fack_count, state->reord); | 1306 | state->reord = min(fack_count, state->reord); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 406f320336e6..dfa5beb0c1c8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2162 | if (!tp->retrans_stamp) | 2162 | if (!tp->retrans_stamp) |
2163 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; | 2163 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; |
2164 | 2164 | ||
2165 | tp->undo_retrans++; | 2165 | tp->undo_retrans += tcp_skb_pcount(skb); |
2166 | 2166 | ||
2167 | /* snd_nxt is stored to detect loss of retransmitted segment, | 2167 | /* snd_nxt is stored to detect loss of retransmitted segment, |
2168 | * see tcp_input.c tcp_sacktag_write_queue(). | 2168 | * see tcp_input.c tcp_sacktag_write_queue(). |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 09c88891a753..de338037a736 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -410,7 +410,7 @@ fallback: | |||
410 | if (p != NULL) { | 410 | if (p != NULL) { |
411 | sb_add(m, "%02x", *p++); | 411 | sb_add(m, "%02x", *p++); |
412 | for (i = 1; i < len; i++) | 412 | for (i = 1; i < len; i++) |
413 | sb_add(m, ":%02x", p[i]); | 413 | sb_add(m, ":%02x", *p++); |
414 | } | 414 | } |
415 | sb_add(m, " "); | 415 | sb_add(m, " "); |
416 | 416 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1c29f95695de..a998db6e7895 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
128 | .destroy = ip6_dst_destroy, | 128 | .destroy = ip6_dst_destroy, |
129 | .check = ip6_dst_check, | 129 | .check = ip6_dst_check, |
130 | .default_mtu = ip6_blackhole_default_mtu, | 130 | .default_mtu = ip6_blackhole_default_mtu, |
131 | .default_advmss = ip6_default_advmss, | ||
131 | .update_pmtu = ip6_rt_blackhole_update_pmtu, | 132 | .update_pmtu = ip6_rt_blackhole_update_pmtu, |
132 | }; | 133 | }; |
133 | 134 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4bc8a9250cfd..9cd73b11506e 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1822,6 +1822,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, | |||
1822 | *cookie ^= 2; | 1822 | *cookie ^= 2; |
1823 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; | 1823 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; |
1824 | local->hw_roc_skb = skb; | 1824 | local->hw_roc_skb = skb; |
1825 | local->hw_roc_skb_for_status = skb; | ||
1825 | mutex_unlock(&local->mtx); | 1826 | mutex_unlock(&local->mtx); |
1826 | 1827 | ||
1827 | return 0; | 1828 | return 0; |
@@ -1875,6 +1876,7 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, | |||
1875 | if (ret == 0) { | 1876 | if (ret == 0) { |
1876 | kfree_skb(local->hw_roc_skb); | 1877 | kfree_skb(local->hw_roc_skb); |
1877 | local->hw_roc_skb = NULL; | 1878 | local->hw_roc_skb = NULL; |
1879 | local->hw_roc_skb_for_status = NULL; | ||
1878 | } | 1880 | } |
1879 | 1881 | ||
1880 | mutex_unlock(&local->mtx); | 1882 | mutex_unlock(&local->mtx); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c47d7c0e48a4..533fd32f49ff 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -953,7 +953,7 @@ struct ieee80211_local { | |||
953 | 953 | ||
954 | struct ieee80211_channel *hw_roc_channel; | 954 | struct ieee80211_channel *hw_roc_channel; |
955 | struct net_device *hw_roc_dev; | 955 | struct net_device *hw_roc_dev; |
956 | struct sk_buff *hw_roc_skb; | 956 | struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status; |
957 | struct work_struct hw_roc_start, hw_roc_done; | 957 | struct work_struct hw_roc_start, hw_roc_done; |
958 | enum nl80211_channel_type hw_roc_channel_type; | 958 | enum nl80211_channel_type hw_roc_channel_type; |
959 | unsigned int hw_roc_duration; | 959 | unsigned int hw_roc_duration; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8acba456744e..7a10a8d1b2d0 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) | |||
1229 | } | 1229 | } |
1230 | mutex_unlock(&local->iflist_mtx); | 1230 | mutex_unlock(&local->iflist_mtx); |
1231 | unregister_netdevice_many(&unreg_list); | 1231 | unregister_netdevice_many(&unreg_list); |
1232 | list_del(&unreg_list); | ||
1232 | } | 1233 | } |
1233 | 1234 | ||
1234 | static u32 ieee80211_idle_off(struct ieee80211_local *local, | 1235 | static u32 ieee80211_idle_off(struct ieee80211_local *local, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 45fbb9e33746..c9ceb4d57ab0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | |||
1033 | if (is_multicast_ether_addr(hdr->addr1)) | 1033 | if (is_multicast_ether_addr(hdr->addr1)) |
1034 | return; | 1034 | return; |
1035 | 1035 | ||
1036 | /* | ||
1037 | * In case we receive frames after disassociation. | ||
1038 | */ | ||
1039 | if (!sdata->u.mgd.associated) | ||
1040 | return; | ||
1041 | |||
1036 | ieee80211_sta_reset_conn_monitor(sdata); | 1042 | ieee80211_sta_reset_conn_monitor(sdata); |
1037 | } | 1043 | } |
1038 | 1044 | ||
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 38a797217a91..071ac95c4aa0 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -323,6 +323,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
323 | 323 | ||
324 | if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { | 324 | if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { |
325 | struct ieee80211_work *wk; | 325 | struct ieee80211_work *wk; |
326 | u64 cookie = (unsigned long)skb; | ||
326 | 327 | ||
327 | rcu_read_lock(); | 328 | rcu_read_lock(); |
328 | list_for_each_entry_rcu(wk, &local->work_list, list) { | 329 | list_for_each_entry_rcu(wk, &local->work_list, list) { |
@@ -334,8 +335,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
334 | break; | 335 | break; |
335 | } | 336 | } |
336 | rcu_read_unlock(); | 337 | rcu_read_unlock(); |
338 | if (local->hw_roc_skb_for_status == skb) { | ||
339 | cookie = local->hw_roc_cookie ^ 2; | ||
340 | local->hw_roc_skb_for_status = NULL; | ||
341 | } | ||
337 | cfg80211_mgmt_tx_status( | 342 | cfg80211_mgmt_tx_status( |
338 | skb->dev, (unsigned long) skb, skb->data, skb->len, | 343 | skb->dev, cookie, skb->data, skb->len, |
339 | !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); | 344 | !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); |
340 | } | 345 | } |
341 | 346 | ||
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b64b42bc774b..b0beaa58246b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1547,7 +1547,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local, | |||
1547 | skb_orphan(skb); | 1547 | skb_orphan(skb); |
1548 | } | 1548 | } |
1549 | 1549 | ||
1550 | if (skb_header_cloned(skb)) | 1550 | if (skb_cloned(skb)) |
1551 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | 1551 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); |
1552 | else if (head_need || tail_need) | 1552 | else if (head_need || tail_need) |
1553 | I802_DEBUG_INC(local->tx_expand_skb_head); | 1553 | I802_DEBUG_INC(local->tx_expand_skb_head); |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index cf68700abffa..d036597aabbe 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1210 | switch (sdata->vif.type) { | 1210 | switch (sdata->vif.type) { |
1211 | case NL80211_IFTYPE_STATION: | 1211 | case NL80211_IFTYPE_STATION: |
1212 | changed |= BSS_CHANGED_ASSOC; | 1212 | changed |= BSS_CHANGED_ASSOC; |
1213 | mutex_lock(&sdata->u.mgd.mtx); | ||
1213 | ieee80211_bss_info_change_notify(sdata, changed); | 1214 | ieee80211_bss_info_change_notify(sdata, changed); |
1215 | mutex_unlock(&sdata->u.mgd.mtx); | ||
1214 | break; | 1216 | break; |
1215 | case NL80211_IFTYPE_ADHOC: | 1217 | case NL80211_IFTYPE_ADHOC: |
1216 | changed |= BSS_CHANGED_IBSS; | 1218 | changed |= BSS_CHANGED_IBSS; |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 32fcbe290c04..4aa614b8a96a 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head, | |||
133 | 133 | ||
134 | /* Optimization: we don't need to hold module | 134 | /* Optimization: we don't need to hold module |
135 | reference here, since function can't sleep. --RR */ | 135 | reference here, since function can't sleep. --RR */ |
136 | repeat: | ||
136 | verdict = elem->hook(hook, skb, indev, outdev, okfn); | 137 | verdict = elem->hook(hook, skb, indev, outdev, okfn); |
137 | if (verdict != NF_ACCEPT) { | 138 | if (verdict != NF_ACCEPT) { |
138 | #ifdef CONFIG_NETFILTER_DEBUG | 139 | #ifdef CONFIG_NETFILTER_DEBUG |
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head, | |||
145 | #endif | 146 | #endif |
146 | if (verdict != NF_REPEAT) | 147 | if (verdict != NF_REPEAT) |
147 | return verdict; | 148 | return verdict; |
148 | *i = (*i)->prev; | 149 | goto repeat; |
149 | } | 150 | } |
150 | } | 151 | } |
151 | return NF_ACCEPT; | 152 | return NF_ACCEPT; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e61511929c66..84f4fcc5884b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -942,8 +942,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
942 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | 942 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) |
943 | nf_conntrack_event_cache(IPCT_REPLY, ct); | 943 | nf_conntrack_event_cache(IPCT_REPLY, ct); |
944 | out: | 944 | out: |
945 | if (tmpl) | 945 | if (tmpl) { |
946 | nf_ct_put(tmpl); | 946 | /* Special case: we have to repeat this hook, assign the |
947 | * template again to this packet. We assume that this packet | ||
948 | * has no conntrack assigned. This is used by nf_ct_tcp. */ | ||
949 | if (ret == NF_REPEAT) | ||
950 | skb->nfct = (struct nf_conntrack *)tmpl; | ||
951 | else | ||
952 | nf_ct_put(tmpl); | ||
953 | } | ||
947 | 954 | ||
948 | return ret; | 955 | return ret; |
949 | } | 956 | } |
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c index 4d87befb04c0..474d621cbc2e 100644 --- a/net/netfilter/nf_tproxy_core.c +++ b/net/netfilter/nf_tproxy_core.c | |||
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb) | |||
28 | skb->destructor = NULL; | 28 | skb->destructor = NULL; |
29 | 29 | ||
30 | if (sk) | 30 | if (sk) |
31 | nf_tproxy_put_sock(sk); | 31 | sock_put(sk); |
32 | } | 32 | } |
33 | 33 | ||
34 | /* consumes sk */ | 34 | /* consumes sk */ |
35 | int | 35 | void |
36 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) | 36 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) |
37 | { | 37 | { |
38 | bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? | 38 | /* assigning tw sockets complicates things; most |
39 | inet_twsk(sk)->tw_transparent : | 39 | * skb->sk->X checks would have to test sk->sk_state first */ |
40 | inet_sk(sk)->transparent; | 40 | if (sk->sk_state == TCP_TIME_WAIT) { |
41 | 41 | inet_twsk_put(inet_twsk(sk)); | |
42 | if (transparent) { | 42 | return; |
43 | skb_orphan(skb); | 43 | } |
44 | skb->sk = sk; | 44 | |
45 | skb->destructor = nf_tproxy_destructor; | 45 | skb_orphan(skb); |
46 | return 1; | 46 | skb->sk = sk; |
47 | } else | 47 | skb->destructor = nf_tproxy_destructor; |
48 | nf_tproxy_put_sock(sk); | ||
49 | |||
50 | return 0; | ||
51 | } | 48 | } |
52 | EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); | 49 | EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); |
53 | 50 | ||
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 640678f47a2a..dcfd57eb9d02 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -33,6 +33,20 @@ | |||
33 | #include <net/netfilter/nf_tproxy_core.h> | 33 | #include <net/netfilter/nf_tproxy_core.h> |
34 | #include <linux/netfilter/xt_TPROXY.h> | 34 | #include <linux/netfilter/xt_TPROXY.h> |
35 | 35 | ||
36 | static bool tproxy_sk_is_transparent(struct sock *sk) | ||
37 | { | ||
38 | if (sk->sk_state != TCP_TIME_WAIT) { | ||
39 | if (inet_sk(sk)->transparent) | ||
40 | return true; | ||
41 | sock_put(sk); | ||
42 | } else { | ||
43 | if (inet_twsk(sk)->tw_transparent) | ||
44 | return true; | ||
45 | inet_twsk_put(inet_twsk(sk)); | ||
46 | } | ||
47 | return false; | ||
48 | } | ||
49 | |||
36 | static inline __be32 | 50 | static inline __be32 |
37 | tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) | 51 | tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) |
38 | { | 52 | { |
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
141 | skb->dev, NFT_LOOKUP_LISTENER); | 155 | skb->dev, NFT_LOOKUP_LISTENER); |
142 | 156 | ||
143 | /* NOTE: assign_sock consumes our sk reference */ | 157 | /* NOTE: assign_sock consumes our sk reference */ |
144 | if (sk && nf_tproxy_assign_sock(skb, sk)) { | 158 | if (sk && tproxy_sk_is_transparent(sk)) { |
145 | /* This should be in a separate target, but we don't do multiple | 159 | /* This should be in a separate target, but we don't do multiple |
146 | targets on the same rule yet */ | 160 | targets on the same rule yet */ |
147 | skb->mark = (skb->mark & ~mark_mask) ^ mark_value; | 161 | skb->mark = (skb->mark & ~mark_mask) ^ mark_value; |
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
149 | pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", | 163 | pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", |
150 | iph->protocol, &iph->daddr, ntohs(hp->dest), | 164 | iph->protocol, &iph->daddr, ntohs(hp->dest), |
151 | &laddr, ntohs(lport), skb->mark); | 165 | &laddr, ntohs(lport), skb->mark); |
166 | |||
167 | nf_tproxy_assign_sock(skb, sk); | ||
152 | return NF_ACCEPT; | 168 | return NF_ACCEPT; |
153 | } | 169 | } |
154 | 170 | ||
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
306 | par->in, NFT_LOOKUP_LISTENER); | 322 | par->in, NFT_LOOKUP_LISTENER); |
307 | 323 | ||
308 | /* NOTE: assign_sock consumes our sk reference */ | 324 | /* NOTE: assign_sock consumes our sk reference */ |
309 | if (sk && nf_tproxy_assign_sock(skb, sk)) { | 325 | if (sk && tproxy_sk_is_transparent(sk)) { |
310 | /* This should be in a separate target, but we don't do multiple | 326 | /* This should be in a separate target, but we don't do multiple |
311 | targets on the same rule yet */ | 327 | targets on the same rule yet */ |
312 | skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; | 328 | skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; |
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
314 | pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", | 330 | pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", |
315 | tproto, &iph->saddr, ntohs(hp->source), | 331 | tproto, &iph->saddr, ntohs(hp->source), |
316 | laddr, ntohs(lport), skb->mark); | 332 | laddr, ntohs(lport), skb->mark); |
333 | |||
334 | nf_tproxy_assign_sock(skb, sk); | ||
317 | return NF_ACCEPT; | 335 | return NF_ACCEPT; |
318 | } | 336 | } |
319 | 337 | ||
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 00d6ae838303..9cc46356b577 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -35,6 +35,15 @@ | |||
35 | #include <net/netfilter/nf_conntrack.h> | 35 | #include <net/netfilter/nf_conntrack.h> |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | static void | ||
39 | xt_socket_put_sk(struct sock *sk) | ||
40 | { | ||
41 | if (sk->sk_state == TCP_TIME_WAIT) | ||
42 | inet_twsk_put(inet_twsk(sk)); | ||
43 | else | ||
44 | sock_put(sk); | ||
45 | } | ||
46 | |||
38 | static int | 47 | static int |
39 | extract_icmp4_fields(const struct sk_buff *skb, | 48 | extract_icmp4_fields(const struct sk_buff *skb, |
40 | u8 *protocol, | 49 | u8 *protocol, |
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, | |||
164 | (sk->sk_state == TCP_TIME_WAIT && | 173 | (sk->sk_state == TCP_TIME_WAIT && |
165 | inet_twsk(sk)->tw_transparent)); | 174 | inet_twsk(sk)->tw_transparent)); |
166 | 175 | ||
167 | nf_tproxy_put_sock(sk); | 176 | xt_socket_put_sk(sk); |
168 | 177 | ||
169 | if (wildcard || !transparent) | 178 | if (wildcard || !transparent) |
170 | sk = NULL; | 179 | sk = NULL; |
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
298 | (sk->sk_state == TCP_TIME_WAIT && | 307 | (sk->sk_state == TCP_TIME_WAIT && |
299 | inet_twsk(sk)->tw_transparent)); | 308 | inet_twsk(sk)->tw_transparent)); |
300 | 309 | ||
301 | nf_tproxy_put_sock(sk); | 310 | xt_socket_put_sk(sk); |
302 | 311 | ||
303 | if (wildcard || !transparent) | 312 | if (wildcard || !transparent) |
304 | sk = NULL; | 313 | sk = NULL; |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 5ee16f0353fe..d763793d39de 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, | |||
89 | return ret; | 89 | return ret; |
90 | 90 | ||
91 | plen -= sizeof(*token); | 91 | plen -= sizeof(*token); |
92 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 92 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
93 | if (!token) | 93 | if (!token) |
94 | return -ENOMEM; | 94 | return -ENOMEM; |
95 | 95 | ||
96 | token->kad = kmalloc(plen, GFP_KERNEL); | 96 | token->kad = kzalloc(plen, GFP_KERNEL); |
97 | if (!token->kad) { | 97 | if (!token->kad) { |
98 | kfree(token); | 98 | kfree(token); |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
731 | goto error; | 731 | goto error; |
732 | 732 | ||
733 | ret = -ENOMEM; | 733 | ret = -ENOMEM; |
734 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 734 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
735 | if (!token) | 735 | if (!token) |
736 | goto error; | 736 | goto error; |
737 | token->kad = kmalloc(plen, GFP_KERNEL); | 737 | token->kad = kzalloc(plen, GFP_KERNEL); |
738 | if (!token->kad) | 738 | if (!token->kad) |
739 | goto error_free; | 739 | goto error_free; |
740 | 740 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 34dc598440a2..1bc698039ae2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev) | |||
839 | 839 | ||
840 | list_add(&dev->unreg_list, &single); | 840 | list_add(&dev->unreg_list, &single); |
841 | dev_deactivate_many(&single); | 841 | dev_deactivate_many(&single); |
842 | list_del(&single); | ||
842 | } | 843 | } |
843 | 844 | ||
844 | static void dev_init_scheduler_queue(struct net_device *dev, | 845 | static void dev_init_scheduler_queue(struct net_device *dev, |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2cc46f0962ca..b23428f3c0dd 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, | |||
2029 | *errp = sctp_make_op_error_fixed(asoc, chunk); | 2029 | *errp = sctp_make_op_error_fixed(asoc, chunk); |
2030 | 2030 | ||
2031 | if (*errp) { | 2031 | if (*errp) { |
2032 | sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 2032 | if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
2033 | WORD_ROUND(ntohs(param.p->length))); | 2033 | WORD_ROUND(ntohs(param.p->length)))) |
2034 | sctp_addto_chunk_fixed(*errp, | 2034 | sctp_addto_chunk_fixed(*errp, |
2035 | WORD_ROUND(ntohs(param.p->length)), | 2035 | WORD_ROUND(ntohs(param.p->length)), |
2036 | param.v); | 2036 | param.v); |
2037 | } else { | 2037 | } else { |
2038 | /* If there is no memory for generating the ERROR | 2038 | /* If there is no memory for generating the ERROR |
2039 | * report as specified, an ABORT will be triggered | 2039 | * report as specified, an ABORT will be triggered |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3e5dbd4e4cd5..d112f038edf0 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev, | |||
802 | return freq; | 802 | return freq; |
803 | if (freq == 0) | 803 | if (freq == 0) |
804 | return -EINVAL; | 804 | return -EINVAL; |
805 | wdev_lock(wdev); | ||
806 | mutex_lock(&rdev->devlist_mtx); | 805 | mutex_lock(&rdev->devlist_mtx); |
806 | wdev_lock(wdev); | ||
807 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); | 807 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); |
808 | mutex_unlock(&rdev->devlist_mtx); | ||
809 | wdev_unlock(wdev); | 808 | wdev_unlock(wdev); |
809 | mutex_unlock(&rdev->devlist_mtx); | ||
810 | return err; | 810 | return err; |
811 | default: | 811 | default: |
812 | return -EOPNOTSUPP; | 812 | return -EOPNOTSUPP; |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 55187c8f6420..406207515b5e 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -27,9 +27,19 @@ | |||
27 | #include <net/sock.h> | 27 | #include <net/sock.h> |
28 | #include <net/x25.h> | 28 | #include <net/x25.h> |
29 | 29 | ||
30 | /* | 30 | /** |
31 | * Parse a set of facilities into the facilities structures. Unrecognised | 31 | * x25_parse_facilities - Parse facilities from skb into the facilities structs |
32 | * facilities are written to the debug log file. | 32 | * |
33 | * @skb: sk_buff to parse | ||
34 | * @facilities: Regular facilites, updated as facilities are found | ||
35 | * @dte_facs: ITU DTE facilities, updated as DTE facilities are found | ||
36 | * @vc_fac_mask: mask is updated with all facilities found | ||
37 | * | ||
38 | * Return codes: | ||
39 | * -1 - Parsing error, caller should drop call and clean up | ||
40 | * 0 - Parse OK, this skb has no facilities | ||
41 | * >0 - Parse OK, returns the length of the facilities header | ||
42 | * | ||
33 | */ | 43 | */ |
34 | int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | 44 | int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, |
35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) | 45 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) |
@@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
62 | switch (*p & X25_FAC_CLASS_MASK) { | 72 | switch (*p & X25_FAC_CLASS_MASK) { |
63 | case X25_FAC_CLASS_A: | 73 | case X25_FAC_CLASS_A: |
64 | if (len < 2) | 74 | if (len < 2) |
65 | return 0; | 75 | return -1; |
66 | switch (*p) { | 76 | switch (*p) { |
67 | case X25_FAC_REVERSE: | 77 | case X25_FAC_REVERSE: |
68 | if((p[1] & 0x81) == 0x81) { | 78 | if((p[1] & 0x81) == 0x81) { |
@@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
107 | break; | 117 | break; |
108 | case X25_FAC_CLASS_B: | 118 | case X25_FAC_CLASS_B: |
109 | if (len < 3) | 119 | if (len < 3) |
110 | return 0; | 120 | return -1; |
111 | switch (*p) { | 121 | switch (*p) { |
112 | case X25_FAC_PACKET_SIZE: | 122 | case X25_FAC_PACKET_SIZE: |
113 | facilities->pacsize_in = p[1]; | 123 | facilities->pacsize_in = p[1]; |
@@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
130 | break; | 140 | break; |
131 | case X25_FAC_CLASS_C: | 141 | case X25_FAC_CLASS_C: |
132 | if (len < 4) | 142 | if (len < 4) |
133 | return 0; | 143 | return -1; |
134 | printk(KERN_DEBUG "X.25: unknown facility %02X, " | 144 | printk(KERN_DEBUG "X.25: unknown facility %02X, " |
135 | "values %02X, %02X, %02X\n", | 145 | "values %02X, %02X, %02X\n", |
136 | p[0], p[1], p[2], p[3]); | 146 | p[0], p[1], p[2], p[3]); |
@@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
139 | break; | 149 | break; |
140 | case X25_FAC_CLASS_D: | 150 | case X25_FAC_CLASS_D: |
141 | if (len < p[1] + 2) | 151 | if (len < p[1] + 2) |
142 | return 0; | 152 | return -1; |
143 | switch (*p) { | 153 | switch (*p) { |
144 | case X25_FAC_CALLING_AE: | 154 | case X25_FAC_CALLING_AE: |
145 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) | 155 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) |
146 | return 0; | 156 | return -1; |
147 | dte_facs->calling_len = p[2]; | 157 | dte_facs->calling_len = p[2]; |
148 | memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); | 158 | memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); |
149 | *vc_fac_mask |= X25_MASK_CALLING_AE; | 159 | *vc_fac_mask |= X25_MASK_CALLING_AE; |
150 | break; | 160 | break; |
151 | case X25_FAC_CALLED_AE: | 161 | case X25_FAC_CALLED_AE: |
152 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) | 162 | if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) |
153 | return 0; | 163 | return -1; |
154 | dte_facs->called_len = p[2]; | 164 | dte_facs->called_len = p[2]; |
155 | memcpy(dte_facs->called_ae, &p[3], p[1] - 1); | 165 | memcpy(dte_facs->called_ae, &p[3], p[1] - 1); |
156 | *vc_fac_mask |= X25_MASK_CALLED_AE; | 166 | *vc_fac_mask |= X25_MASK_CALLED_AE; |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index f729f022be69..15de65f04719 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
91 | { | 91 | { |
92 | struct x25_address source_addr, dest_addr; | 92 | struct x25_address source_addr, dest_addr; |
93 | int len; | 93 | int len; |
94 | struct x25_sock *x25 = x25_sk(sk); | ||
94 | 95 | ||
95 | switch (frametype) { | 96 | switch (frametype) { |
96 | case X25_CALL_ACCEPTED: { | 97 | case X25_CALL_ACCEPTED: { |
97 | struct x25_sock *x25 = x25_sk(sk); | ||
98 | 98 | ||
99 | x25_stop_timer(sk); | 99 | x25_stop_timer(sk); |
100 | x25->condition = 0x00; | 100 | x25->condition = 0x00; |
@@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
113 | &dest_addr); | 113 | &dest_addr); |
114 | if (len > 0) | 114 | if (len > 0) |
115 | skb_pull(skb, len); | 115 | skb_pull(skb, len); |
116 | else if (len < 0) | ||
117 | goto out_clear; | ||
116 | 118 | ||
117 | len = x25_parse_facilities(skb, &x25->facilities, | 119 | len = x25_parse_facilities(skb, &x25->facilities, |
118 | &x25->dte_facilities, | 120 | &x25->dte_facilities, |
119 | &x25->vc_facil_mask); | 121 | &x25->vc_facil_mask); |
120 | if (len > 0) | 122 | if (len > 0) |
121 | skb_pull(skb, len); | 123 | skb_pull(skb, len); |
122 | else | 124 | else if (len < 0) |
123 | return -1; | 125 | goto out_clear; |
124 | /* | 126 | /* |
125 | * Copy any Call User Data. | 127 | * Copy any Call User Data. |
126 | */ | 128 | */ |
@@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
144 | } | 146 | } |
145 | 147 | ||
146 | return 0; | 148 | return 0; |
149 | |||
150 | out_clear: | ||
151 | x25_write_internal(sk, X25_CLEAR_REQUEST); | ||
152 | x25->state = X25_STATE_2; | ||
153 | x25_start_t23timer(sk); | ||
154 | return 0; | ||
147 | } | 155 | } |
148 | 156 | ||
149 | /* | 157 | /* |
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 4cbc942f762a..21306928d47f 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c | |||
@@ -396,9 +396,12 @@ void __exit x25_link_free(void) | |||
396 | write_lock_bh(&x25_neigh_list_lock); | 396 | write_lock_bh(&x25_neigh_list_lock); |
397 | 397 | ||
398 | list_for_each_safe(entry, tmp, &x25_neigh_list) { | 398 | list_for_each_safe(entry, tmp, &x25_neigh_list) { |
399 | struct net_device *dev; | ||
400 | |||
399 | nb = list_entry(entry, struct x25_neigh, node); | 401 | nb = list_entry(entry, struct x25_neigh, node); |
402 | dev = nb->dev; | ||
400 | __x25_remove_neigh(nb); | 403 | __x25_remove_neigh(nb); |
401 | dev_put(nb->dev); | 404 | dev_put(dev); |
402 | } | 405 | } |
403 | write_unlock_bh(&x25_neigh_list_lock); | 406 | write_unlock_bh(&x25_neigh_list_lock); |
404 | } | 407 | } |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8b3ef404c794..6459588befc3 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1340 | default: | 1340 | default: |
1341 | BUG(); | 1341 | BUG(); |
1342 | } | 1342 | } |
1343 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | 1343 | xdst = dst_alloc(dst_ops); |
1344 | xfrm_policy_put_afinfo(afinfo); | 1344 | xfrm_policy_put_afinfo(afinfo); |
1345 | 1345 | ||
1346 | xdst->flo.ops = &xfrm_bundle_fc_ops; | 1346 | if (likely(xdst)) |
1347 | xdst->flo.ops = &xfrm_bundle_fc_ops; | ||
1348 | else | ||
1349 | xdst = ERR_PTR(-ENOBUFS); | ||
1347 | 1350 | ||
1348 | return xdst; | 1351 | return xdst; |
1349 | } | 1352 | } |
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index c9a16abacab4..6c94c6ce2925 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c | |||
@@ -315,6 +315,7 @@ static void parse_dep_file(void *map, size_t len) | |||
315 | char *end = m + len; | 315 | char *end = m + len; |
316 | char *p; | 316 | char *p; |
317 | char s[PATH_MAX]; | 317 | char s[PATH_MAX]; |
318 | int first; | ||
318 | 319 | ||
319 | p = strchr(m, ':'); | 320 | p = strchr(m, ':'); |
320 | if (!p) { | 321 | if (!p) { |
@@ -327,6 +328,7 @@ static void parse_dep_file(void *map, size_t len) | |||
327 | 328 | ||
328 | clear_config(); | 329 | clear_config(); |
329 | 330 | ||
331 | first = 1; | ||
330 | while (m < end) { | 332 | while (m < end) { |
331 | while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) | 333 | while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) |
332 | m++; | 334 | m++; |
@@ -340,9 +342,17 @@ static void parse_dep_file(void *map, size_t len) | |||
340 | if (strrcmp(s, "include/generated/autoconf.h") && | 342 | if (strrcmp(s, "include/generated/autoconf.h") && |
341 | strrcmp(s, "arch/um/include/uml-config.h") && | 343 | strrcmp(s, "arch/um/include/uml-config.h") && |
342 | strrcmp(s, ".ver")) { | 344 | strrcmp(s, ".ver")) { |
343 | printf(" %s \\\n", s); | 345 | /* |
346 | * Do not output the first dependency (the | ||
347 | * source file), so that kbuild is not confused | ||
348 | * if a .c file is rewritten into .S or vice | ||
349 | * versa. | ||
350 | */ | ||
351 | if (!first) | ||
352 | printf(" %s \\\n", s); | ||
344 | do_config_file(s); | 353 | do_config_file(s); |
345 | } | 354 | } |
355 | first = 0; | ||
346 | m = p + 1; | 356 | m = p + 1; |
347 | } | 357 | } |
348 | printf("\n%s: $(deps_%s)\n\n", target, target); | 358 | printf("\n%s: $(deps_%s)\n\n", target, target); |
diff --git a/scripts/package/builddeb b/scripts/package/builddeb index b0b2357aef42..f6cbc3ddb68b 100644 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb | |||
@@ -238,12 +238,12 @@ EOF | |||
238 | fi | 238 | fi |
239 | 239 | ||
240 | # Build header package | 240 | # Build header package |
241 | find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$ | 241 | (cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$) |
242 | find arch/x86/include include scripts -type f >> /tmp/files$$ | 242 | (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$) |
243 | (cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$) | 243 | (cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$) |
244 | destdir=$kernel_headers_dir/usr/src/linux-headers-$version | 244 | destdir=$kernel_headers_dir/usr/src/linux-headers-$version |
245 | mkdir -p "$destdir" | 245 | mkdir -p "$destdir" |
246 | tar -c -f - -T /tmp/files$$ | (cd $destdir; tar -xf -) | 246 | (cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -) |
247 | (cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -) | 247 | (cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -) |
248 | rm -f /tmp/files$$ /tmp/objfiles$$ | 248 | rm -f /tmp/files$$ /tmp/objfiles$$ |
249 | arch=$(dpkg --print-architecture) | 249 | arch=$(dpkg --print-architecture) |
diff --git a/security/security.c b/security/security.c index 739e40362f44..7b7308ace8c5 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -154,10 +154,9 @@ int security_capset(struct cred *new, const struct cred *old, | |||
154 | effective, inheritable, permitted); | 154 | effective, inheritable, permitted); |
155 | } | 155 | } |
156 | 156 | ||
157 | int security_capable(int cap) | 157 | int security_capable(const struct cred *cred, int cap) |
158 | { | 158 | { |
159 | return security_ops->capable(current, current_cred(), cap, | 159 | return security_ops->capable(current, cred, cap, SECURITY_CAP_AUDIT); |
160 | SECURITY_CAP_AUDIT); | ||
161 | } | 160 | } |
162 | 161 | ||
163 | int security_real_capable(struct task_struct *tsk, int cap) | 162 | int security_real_capable(struct task_struct *tsk, int cap) |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e276eb468536..c8d699270687 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred) | |||
3198 | { | 3198 | { |
3199 | struct task_security_struct *tsec = cred->security; | 3199 | struct task_security_struct *tsec = cred->security; |
3200 | 3200 | ||
3201 | BUG_ON((unsigned long) cred->security < PAGE_SIZE); | 3201 | /* |
3202 | * cred->security == NULL if security_cred_alloc_blank() or | ||
3203 | * security_prepare_creds() returned an error. | ||
3204 | */ | ||
3205 | BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); | ||
3202 | cred->security = (void *) 0x7UL; | 3206 | cred->security = (void *) 0x7UL; |
3203 | kfree(tsec); | 3207 | kfree(tsec); |
3204 | } | 3208 | } |
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 24d3013c0231..7c1fc64cb53d 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c | |||
@@ -50,7 +50,11 @@ static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97) | |||
50 | if (v & SLFR_1RXV) | 50 | if (v & SLFR_1RXV) |
51 | readl(aaci->base + AACI_SL1RX); | 51 | readl(aaci->base + AACI_SL1RX); |
52 | 52 | ||
53 | writel(maincr, aaci->base + AACI_MAINCR); | 53 | if (maincr != readl(aaci->base + AACI_MAINCR)) { |
54 | writel(maincr, aaci->base + AACI_MAINCR); | ||
55 | readl(aaci->base + AACI_MAINCR); | ||
56 | udelay(1); | ||
57 | } | ||
54 | } | 58 | } |
55 | 59 | ||
56 | /* | 60 | /* |
@@ -993,6 +997,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci) | |||
993 | * disabling the channel doesn't clear the FIFO. | 997 | * disabling the channel doesn't clear the FIFO. |
994 | */ | 998 | */ |
995 | writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR); | 999 | writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR); |
1000 | readl(aaci->base + AACI_MAINCR); | ||
1001 | udelay(1); | ||
996 | writel(aaci->maincr, aaci->base + AACI_MAINCR); | 1002 | writel(aaci->maincr, aaci->base + AACI_MAINCR); |
997 | 1003 | ||
998 | /* | 1004 | /* |
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c index 7730575bfadd..b8b31c433d64 100644 --- a/sound/core/hrtimer.c +++ b/sound/core/hrtimer.c | |||
@@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) | |||
45 | { | 45 | { |
46 | struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); | 46 | struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); |
47 | struct snd_timer *t = stime->timer; | 47 | struct snd_timer *t = stime->timer; |
48 | unsigned long oruns; | ||
48 | 49 | ||
49 | if (!atomic_read(&stime->running)) | 50 | if (!atomic_read(&stime->running)) |
50 | return HRTIMER_NORESTART; | 51 | return HRTIMER_NORESTART; |
51 | 52 | ||
52 | hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); | 53 | oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); |
53 | snd_timer_interrupt(stime->timer, t->sticks); | 54 | snd_timer_interrupt(stime->timer, t->sticks * oruns); |
54 | 55 | ||
55 | if (!atomic_read(&stime->running)) | 56 | if (!atomic_read(&stime->running)) |
56 | return HRTIMER_NORESTART; | 57 | return HRTIMER_NORESTART; |
@@ -104,7 +105,7 @@ static int snd_hrtimer_stop(struct snd_timer *t) | |||
104 | } | 105 | } |
105 | 106 | ||
106 | static struct snd_timer_hardware hrtimer_hw = { | 107 | static struct snd_timer_hardware hrtimer_hw = { |
107 | .flags = SNDRV_TIMER_HW_AUTO, | 108 | .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET, |
108 | .open = snd_hrtimer_open, | 109 | .open = snd_hrtimer_open, |
109 | .close = snd_hrtimer_close, | 110 | .close = snd_hrtimer_close, |
110 | .start = snd_hrtimer_start, | 111 | .start = snd_hrtimer_start, |
diff --git a/sound/core/jack.c b/sound/core/jack.c index 4902ae568730..53b53e97c896 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c | |||
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type, | |||
141 | 141 | ||
142 | fail_input: | 142 | fail_input: |
143 | input_free_device(jack->input_dev); | 143 | input_free_device(jack->input_dev); |
144 | kfree(jack->id); | ||
144 | kfree(jack); | 145 | kfree(jack); |
145 | return err; | 146 | return err; |
146 | } | 147 | } |
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 23f49f356e0f..16c0bdfbb164 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c | |||
@@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { | |||
1252 | static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) | 1252 | static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) |
1253 | { | 1253 | { |
1254 | stream_t *dma = &vortex->dma_adb[adbdma]; | 1254 | stream_t *dma = &vortex->dma_adb[adbdma]; |
1255 | int temp; | 1255 | int temp, page, delta; |
1256 | 1256 | ||
1257 | temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); | 1257 | temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); |
1258 | temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); | 1258 | page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; |
1259 | return temp; | 1259 | if (dma->nr_periods >= 4) |
1260 | delta = (page - dma->period_real) & 3; | ||
1261 | else { | ||
1262 | delta = (page - dma->period_real); | ||
1263 | if (delta < 0) | ||
1264 | delta += dma->nr_periods; | ||
1265 | } | ||
1266 | return (dma->period_virt + delta) * dma->period_bytes | ||
1267 | + (temp & (dma->period_bytes - 1)); | ||
1260 | } | 1268 | } |
1261 | 1269 | ||
1262 | static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) | 1270 | static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2e91a991eb15..fcedad9a5fef 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
2308 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), | 2308 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), |
2309 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), | 2309 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), |
2310 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), | 2310 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), |
2311 | SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), | ||
2311 | SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), | 2312 | SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), |
2312 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), | 2313 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), |
2313 | SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), | 2314 | SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), |
@@ -2703,7 +2704,7 @@ static int __devinit azx_probe(struct pci_dev *pci, | |||
2703 | if (err < 0) | 2704 | if (err < 0) |
2704 | goto out_free; | 2705 | goto out_free; |
2705 | #ifdef CONFIG_SND_HDA_PATCH_LOADER | 2706 | #ifdef CONFIG_SND_HDA_PATCH_LOADER |
2706 | if (patch[dev]) { | 2707 | if (patch[dev] && *patch[dev]) { |
2707 | snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n", | 2708 | snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n", |
2708 | patch[dev]); | 2709 | patch[dev]); |
2709 | err = snd_hda_load_patch(chip->bus, patch[dev]); | 2710 | err = snd_hda_load_patch(chip->bus, patch[dev]); |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fbe97d32140d..4d5004e693f0 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3114 | SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), | 3114 | SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), |
3115 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), | 3115 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), |
3116 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3116 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
3117 | SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), | ||
3118 | SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), | ||
3117 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | 3119 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), |
3118 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), | 3120 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), |
3119 | SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), | 3121 | SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), |
@@ -3410,7 +3412,7 @@ static void cx_auto_parse_output(struct hda_codec *codec) | |||
3410 | } | 3412 | } |
3411 | } | 3413 | } |
3412 | spec->multiout.dac_nids = spec->private_dac_nids; | 3414 | spec->multiout.dac_nids = spec->private_dac_nids; |
3413 | spec->multiout.max_channels = nums * 2; | 3415 | spec->multiout.max_channels = spec->multiout.num_dacs * 2; |
3414 | 3416 | ||
3415 | if (cfg->hp_outs > 0) | 3417 | if (cfg->hp_outs > 0) |
3416 | spec->auto_mute = 1; | 3418 | spec->auto_mute = 1; |
@@ -3729,9 +3731,9 @@ static int cx_auto_init(struct hda_codec *codec) | |||
3729 | return 0; | 3731 | return 0; |
3730 | } | 3732 | } |
3731 | 3733 | ||
3732 | static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | 3734 | static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename, |
3733 | const char *dir, int cidx, | 3735 | const char *dir, int cidx, |
3734 | hda_nid_t nid, int hda_dir) | 3736 | hda_nid_t nid, int hda_dir, int amp_idx) |
3735 | { | 3737 | { |
3736 | static char name[32]; | 3738 | static char name[32]; |
3737 | static struct snd_kcontrol_new knew[] = { | 3739 | static struct snd_kcontrol_new knew[] = { |
@@ -3743,7 +3745,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | |||
3743 | 3745 | ||
3744 | for (i = 0; i < 2; i++) { | 3746 | for (i = 0; i < 2; i++) { |
3745 | struct snd_kcontrol *kctl; | 3747 | struct snd_kcontrol *kctl; |
3746 | knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); | 3748 | knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx, |
3749 | hda_dir); | ||
3747 | knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; | 3750 | knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; |
3748 | knew[i].index = cidx; | 3751 | knew[i].index = cidx; |
3749 | snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); | 3752 | snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); |
@@ -3759,6 +3762,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | |||
3759 | return 0; | 3762 | return 0; |
3760 | } | 3763 | } |
3761 | 3764 | ||
3765 | #define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \ | ||
3766 | cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0) | ||
3767 | |||
3762 | #define cx_auto_add_pb_volume(codec, nid, str, idx) \ | 3768 | #define cx_auto_add_pb_volume(codec, nid, str, idx) \ |
3763 | cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) | 3769 | cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) |
3764 | 3770 | ||
@@ -3808,29 +3814,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec) | |||
3808 | struct conexant_spec *spec = codec->spec; | 3814 | struct conexant_spec *spec = codec->spec; |
3809 | struct auto_pin_cfg *cfg = &spec->autocfg; | 3815 | struct auto_pin_cfg *cfg = &spec->autocfg; |
3810 | static const char *prev_label; | 3816 | static const char *prev_label; |
3811 | int i, err, cidx; | 3817 | int i, err, cidx, conn_len; |
3818 | hda_nid_t conn[HDA_MAX_CONNECTIONS]; | ||
3819 | |||
3820 | int multi_adc_volume = 0; /* If the ADC nid has several input volumes */ | ||
3821 | int adc_nid = spec->adc_nids[0]; | ||
3822 | |||
3823 | conn_len = snd_hda_get_connections(codec, adc_nid, conn, | ||
3824 | HDA_MAX_CONNECTIONS); | ||
3825 | if (conn_len < 0) | ||
3826 | return conn_len; | ||
3827 | |||
3828 | multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1; | ||
3829 | if (!multi_adc_volume) { | ||
3830 | err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid, | ||
3831 | HDA_INPUT); | ||
3832 | if (err < 0) | ||
3833 | return err; | ||
3834 | } | ||
3812 | 3835 | ||
3813 | err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0], | ||
3814 | HDA_INPUT); | ||
3815 | if (err < 0) | ||
3816 | return err; | ||
3817 | prev_label = NULL; | 3836 | prev_label = NULL; |
3818 | cidx = 0; | 3837 | cidx = 0; |
3819 | for (i = 0; i < cfg->num_inputs; i++) { | 3838 | for (i = 0; i < cfg->num_inputs; i++) { |
3820 | hda_nid_t nid = cfg->inputs[i].pin; | 3839 | hda_nid_t nid = cfg->inputs[i].pin; |
3821 | const char *label; | 3840 | const char *label; |
3822 | if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) | 3841 | int j; |
3842 | int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP; | ||
3843 | if (!pin_amp && !multi_adc_volume) | ||
3823 | continue; | 3844 | continue; |
3845 | |||
3824 | label = hda_get_autocfg_input_label(codec, cfg, i); | 3846 | label = hda_get_autocfg_input_label(codec, cfg, i); |
3825 | if (label == prev_label) | 3847 | if (label == prev_label) |
3826 | cidx++; | 3848 | cidx++; |
3827 | else | 3849 | else |
3828 | cidx = 0; | 3850 | cidx = 0; |
3829 | prev_label = label; | 3851 | prev_label = label; |
3830 | err = cx_auto_add_volume(codec, label, " Capture", cidx, | 3852 | |
3831 | nid, HDA_INPUT); | 3853 | if (pin_amp) { |
3832 | if (err < 0) | 3854 | err = cx_auto_add_volume(codec, label, " Boost", cidx, |
3833 | return err; | 3855 | nid, HDA_INPUT); |
3856 | if (err < 0) | ||
3857 | return err; | ||
3858 | } | ||
3859 | |||
3860 | if (!multi_adc_volume) | ||
3861 | continue; | ||
3862 | for (j = 0; j < conn_len; j++) { | ||
3863 | if (conn[j] == nid) { | ||
3864 | err = cx_auto_add_volume_idx(codec, label, | ||
3865 | " Capture", cidx, adc_nid, HDA_INPUT, j); | ||
3866 | if (err < 0) | ||
3867 | return err; | ||
3868 | break; | ||
3869 | } | ||
3870 | } | ||
3834 | } | 3871 | } |
3835 | return 0; | 3872 | return 0; |
3836 | } | 3873 | } |
@@ -3902,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = { | |||
3902 | .patch = patch_cxt5066 }, | 3939 | .patch = patch_cxt5066 }, |
3903 | { .id = 0x14f15069, .name = "CX20585", | 3940 | { .id = 0x14f15069, .name = "CX20585", |
3904 | .patch = patch_cxt5066 }, | 3941 | .patch = patch_cxt5066 }, |
3942 | { .id = 0x14f1506e, .name = "CX20590", | ||
3943 | .patch = patch_cxt5066 }, | ||
3905 | { .id = 0x14f15097, .name = "CX20631", | 3944 | { .id = 0x14f15097, .name = "CX20631", |
3906 | .patch = patch_conexant_auto }, | 3945 | .patch = patch_conexant_auto }, |
3907 | { .id = 0x14f15098, .name = "CX20632", | 3946 | { .id = 0x14f15098, .name = "CX20632", |
@@ -3928,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066"); | |||
3928 | MODULE_ALIAS("snd-hda-codec-id:14f15067"); | 3967 | MODULE_ALIAS("snd-hda-codec-id:14f15067"); |
3929 | MODULE_ALIAS("snd-hda-codec-id:14f15068"); | 3968 | MODULE_ALIAS("snd-hda-codec-id:14f15068"); |
3930 | MODULE_ALIAS("snd-hda-codec-id:14f15069"); | 3969 | MODULE_ALIAS("snd-hda-codec-id:14f15069"); |
3970 | MODULE_ALIAS("snd-hda-codec-id:14f1506e"); | ||
3931 | MODULE_ALIAS("snd-hda-codec-id:14f15097"); | 3971 | MODULE_ALIAS("snd-hda-codec-id:14f15097"); |
3932 | MODULE_ALIAS("snd-hda-codec-id:14f15098"); | 3972 | MODULE_ALIAS("snd-hda-codec-id:14f15098"); |
3933 | MODULE_ALIAS("snd-hda-codec-id:14f150a1"); | 3973 | MODULE_ALIAS("snd-hda-codec-id:14f150a1"); |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2d5b83fa8d24..a58767736727 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -642,6 +642,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid, | |||
642 | hdmi_ai->ver = 0x01; | 642 | hdmi_ai->ver = 0x01; |
643 | hdmi_ai->len = 0x0a; | 643 | hdmi_ai->len = 0x0a; |
644 | hdmi_ai->CC02_CT47 = channels - 1; | 644 | hdmi_ai->CC02_CT47 = channels - 1; |
645 | hdmi_ai->CA = ca; | ||
645 | hdmi_checksum_audio_infoframe(hdmi_ai); | 646 | hdmi_checksum_audio_infoframe(hdmi_ai); |
646 | } else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */ | 647 | } else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */ |
647 | struct dp_audio_infoframe *dp_ai; | 648 | struct dp_audio_infoframe *dp_ai; |
@@ -651,6 +652,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid, | |||
651 | dp_ai->len = 0x1b; | 652 | dp_ai->len = 0x1b; |
652 | dp_ai->ver = 0x11 << 2; | 653 | dp_ai->ver = 0x11 << 2; |
653 | dp_ai->CC02_CT47 = channels - 1; | 654 | dp_ai->CC02_CT47 = channels - 1; |
655 | dp_ai->CA = ca; | ||
654 | } else { | 656 | } else { |
655 | snd_printd("HDMI: unknown connection type at pin %d\n", | 657 | snd_printd("HDMI: unknown connection type at pin %d\n", |
656 | pin_nid); | 658 | pin_nid); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2fa9ed99c32f..3328a259a242 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -2290,6 +2290,29 @@ static struct snd_kcontrol_new alc888_base_mixer[] = { | |||
2290 | { } /* end */ | 2290 | { } /* end */ |
2291 | }; | 2291 | }; |
2292 | 2292 | ||
2293 | static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = { | ||
2294 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | ||
2295 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | ||
2296 | HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT), | ||
2297 | HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT), | ||
2298 | HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0f, 2, 0x0, | ||
2299 | HDA_OUTPUT), | ||
2300 | HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0f, 2, 2, HDA_INPUT), | ||
2301 | HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT), | ||
2302 | HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT), | ||
2303 | HDA_CODEC_VOLUME("Side Playback Volume", 0x0e, 0x0, HDA_OUTPUT), | ||
2304 | HDA_BIND_MUTE("Side Playback Switch", 0x0e, 2, HDA_INPUT), | ||
2305 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), | ||
2306 | HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), | ||
2307 | HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), | ||
2308 | HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), | ||
2309 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | ||
2310 | HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), | ||
2311 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), | ||
2312 | { } /* end */ | ||
2313 | }; | ||
2314 | |||
2315 | |||
2293 | static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = { | 2316 | static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = { |
2294 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 2317 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
2295 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | 2318 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), |
@@ -10359,7 +10382,7 @@ static struct alc_config_preset alc882_presets[] = { | |||
10359 | .init_hook = alc_automute_amp, | 10382 | .init_hook = alc_automute_amp, |
10360 | }, | 10383 | }, |
10361 | [ALC888_ACER_ASPIRE_4930G] = { | 10384 | [ALC888_ACER_ASPIRE_4930G] = { |
10362 | .mixers = { alc888_base_mixer, | 10385 | .mixers = { alc888_acer_aspire_4930g_mixer, |
10363 | alc883_chmode_mixer }, | 10386 | alc883_chmode_mixer }, |
10364 | .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs, | 10387 | .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs, |
10365 | alc888_acer_aspire_4930g_verbs }, | 10388 | alc888_acer_aspire_4930g_verbs }, |
@@ -18802,6 +18825,7 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = { | |||
18802 | ALC662_3ST_6ch_DIG), | 18825 | ALC662_3ST_6ch_DIG), |
18803 | SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x", | 18826 | SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x", |
18804 | ALC663_ASUS_H13), | 18827 | ALC663_ASUS_H13), |
18828 | SND_PCI_QUIRK(0x1991, 0x5628, "Ordissimo EVE", ALC662_LENOVO_101E), | ||
18805 | {} | 18829 | {} |
18806 | }; | 18830 | }; |
18807 | 18831 | ||
@@ -19494,6 +19518,7 @@ static const struct alc_fixup alc662_fixups[] = { | |||
19494 | }; | 19518 | }; |
19495 | 19519 | ||
19496 | static struct snd_pci_quirk alc662_fixup_tbl[] = { | 19520 | static struct snd_pci_quirk alc662_fixup_tbl[] = { |
19521 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), | ||
19497 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 19522 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
19498 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), | 19523 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), |
19499 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), | 19524 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9ea48b425d0b..bd7b123f6440 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = { | |||
586 | 0x0f, 0x10, 0x11, 0x1f, 0x20, | 586 | 0x0f, 0x10, 0x11, 0x1f, 0x20, |
587 | }; | 587 | }; |
588 | 588 | ||
589 | static hda_nid_t stac92hd88xxx_pin_nids[10] = { | 589 | static hda_nid_t stac92hd87xxx_pin_nids[6] = { |
590 | 0x0a, 0x0b, 0x0c, 0x0d, | ||
591 | 0x0f, 0x11, | ||
592 | }; | ||
593 | |||
594 | static hda_nid_t stac92hd88xxx_pin_nids[8] = { | ||
590 | 0x0a, 0x0b, 0x0c, 0x0d, | 595 | 0x0a, 0x0b, 0x0c, 0x0d, |
591 | 0x0f, 0x11, 0x1f, 0x20, | 596 | 0x0f, 0x11, 0x1f, 0x20, |
592 | }; | 597 | }; |
@@ -5430,12 +5435,13 @@ again: | |||
5430 | switch (codec->vendor_id) { | 5435 | switch (codec->vendor_id) { |
5431 | case 0x111d76d1: | 5436 | case 0x111d76d1: |
5432 | case 0x111d76d9: | 5437 | case 0x111d76d9: |
5438 | case 0x111d76e5: | ||
5433 | spec->dmic_nids = stac92hd87b_dmic_nids; | 5439 | spec->dmic_nids = stac92hd87b_dmic_nids; |
5434 | spec->num_dmics = stac92xx_connected_ports(codec, | 5440 | spec->num_dmics = stac92xx_connected_ports(codec, |
5435 | stac92hd87b_dmic_nids, | 5441 | stac92hd87b_dmic_nids, |
5436 | STAC92HD87B_NUM_DMICS); | 5442 | STAC92HD87B_NUM_DMICS); |
5437 | spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); | 5443 | spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids); |
5438 | spec->pin_nids = stac92hd88xxx_pin_nids; | 5444 | spec->pin_nids = stac92hd87xxx_pin_nids; |
5439 | spec->mono_nid = 0; | 5445 | spec->mono_nid = 0; |
5440 | spec->num_pwrs = 0; | 5446 | spec->num_pwrs = 0; |
5441 | break; | 5447 | break; |
@@ -5443,6 +5449,7 @@ again: | |||
5443 | case 0x111d7667: | 5449 | case 0x111d7667: |
5444 | case 0x111d7668: | 5450 | case 0x111d7668: |
5445 | case 0x111d7669: | 5451 | case 0x111d7669: |
5452 | case 0x111d76e3: | ||
5446 | spec->num_dmics = stac92xx_connected_ports(codec, | 5453 | spec->num_dmics = stac92xx_connected_ports(codec, |
5447 | stac92hd88xxx_dmic_nids, | 5454 | stac92hd88xxx_dmic_nids, |
5448 | STAC92HD88XXX_NUM_DMICS); | 5455 | STAC92HD88XXX_NUM_DMICS); |
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = { | |||
6387 | { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, | 6394 | { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, |
6388 | { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, | 6395 | { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, |
6389 | { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, | 6396 | { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, |
6397 | { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, | ||
6398 | { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, | ||
6390 | { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, | 6399 | { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, |
6391 | {} /* terminator */ | 6400 | {} /* terminator */ |
6392 | }; | 6401 | }; |
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a76c3260d941..63b0054200a8 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec) | |||
567 | hda_nid_t nid = cfg->inputs[i].pin; | 567 | hda_nid_t nid = cfg->inputs[i].pin; |
568 | if (spec->smart51_enabled && is_smart51_pins(spec, nid)) | 568 | if (spec->smart51_enabled && is_smart51_pins(spec, nid)) |
569 | ctl = PIN_OUT; | 569 | ctl = PIN_OUT; |
570 | else if (i == AUTO_PIN_MIC) | 570 | else if (cfg->inputs[i].type == AUTO_PIN_MIC) |
571 | ctl = PIN_VREF50; | 571 | ctl = PIN_VREF50; |
572 | else | 572 | else |
573 | ctl = PIN_IN; | 573 | ctl = PIN_IN; |
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c index bb4bf65b9e7e..0bb424af956f 100644 --- a/sound/soc/codecs/cx20442.c +++ b/sound/soc/codecs/cx20442.c | |||
@@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec) | |||
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC; | 370 | static const u8 cx20442_reg; |
371 | 371 | ||
372 | static struct snd_soc_codec_driver cx20442_codec_dev = { | 372 | static struct snd_soc_codec_driver cx20442_codec_dev = { |
373 | .probe = cx20442_codec_probe, | 373 | .probe = cx20442_codec_probe, |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 987476a5895f..017d99ceb42e 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, | |||
1482 | WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, | 1482 | WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, |
1483 | irq_mask); | 1483 | irq_mask); |
1484 | 1484 | ||
1485 | if (det && shrt) { | 1485 | if (det || shrt) { |
1486 | /* Enable mic detection, this may not have been set through | 1486 | /* Enable mic detection, this may not have been set through |
1487 | * platform data (eg, if the defaults are OK). */ | 1487 | * platform data (eg, if the defaults are OK). */ |
1488 | snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, | 1488 | snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, |
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h index e8490f3edd03..e3ec2433b215 100644 --- a/sound/soc/codecs/wm8903.h +++ b/sound/soc/codecs/wm8903.h | |||
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, | |||
165 | 165 | ||
166 | #define WM8903_VMID_RES_50K 2 | 166 | #define WM8903_VMID_RES_50K 2 |
167 | #define WM8903_VMID_RES_250K 3 | 167 | #define WM8903_VMID_RES_250K 3 |
168 | #define WM8903_VMID_RES_5K 4 | 168 | #define WM8903_VMID_RES_5K 6 |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * R8 (0x08) - Analogue DAC 0 | 171 | * R8 (0x08) - Analogue DAC 0 |
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 3351f77607b3..ebaee5ca7434 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
@@ -107,6 +107,9 @@ struct wm8994_priv { | |||
107 | 107 | ||
108 | int revision; | 108 | int revision; |
109 | struct wm8994_pdata *pdata; | 109 | struct wm8994_pdata *pdata; |
110 | |||
111 | unsigned int aif1clk_enable:1; | ||
112 | unsigned int aif2clk_enable:1; | ||
110 | }; | 113 | }; |
111 | 114 | ||
112 | static int wm8994_readable(unsigned int reg) | 115 | static int wm8994_readable(unsigned int reg) |
@@ -1004,6 +1007,93 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec) | |||
1004 | } | 1007 | } |
1005 | } | 1008 | } |
1006 | 1009 | ||
1010 | static int late_enable_ev(struct snd_soc_dapm_widget *w, | ||
1011 | struct snd_kcontrol *kcontrol, int event) | ||
1012 | { | ||
1013 | struct snd_soc_codec *codec = w->codec; | ||
1014 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1015 | |||
1016 | switch (event) { | ||
1017 | case SND_SOC_DAPM_PRE_PMU: | ||
1018 | if (wm8994->aif1clk_enable) | ||
1019 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | ||
1020 | WM8994_AIF1CLK_ENA_MASK, | ||
1021 | WM8994_AIF1CLK_ENA); | ||
1022 | if (wm8994->aif2clk_enable) | ||
1023 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | ||
1024 | WM8994_AIF2CLK_ENA_MASK, | ||
1025 | WM8994_AIF2CLK_ENA); | ||
1026 | break; | ||
1027 | } | ||
1028 | |||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | static int late_disable_ev(struct snd_soc_dapm_widget *w, | ||
1033 | struct snd_kcontrol *kcontrol, int event) | ||
1034 | { | ||
1035 | struct snd_soc_codec *codec = w->codec; | ||
1036 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1037 | |||
1038 | switch (event) { | ||
1039 | case SND_SOC_DAPM_POST_PMD: | ||
1040 | if (wm8994->aif1clk_enable) { | ||
1041 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | ||
1042 | WM8994_AIF1CLK_ENA_MASK, 0); | ||
1043 | wm8994->aif1clk_enable = 0; | ||
1044 | } | ||
1045 | if (wm8994->aif2clk_enable) { | ||
1046 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | ||
1047 | WM8994_AIF2CLK_ENA_MASK, 0); | ||
1048 | wm8994->aif2clk_enable = 0; | ||
1049 | } | ||
1050 | break; | ||
1051 | } | ||
1052 | |||
1053 | return 0; | ||
1054 | } | ||
1055 | |||
1056 | static int aif1clk_ev(struct snd_soc_dapm_widget *w, | ||
1057 | struct snd_kcontrol *kcontrol, int event) | ||
1058 | { | ||
1059 | struct snd_soc_codec *codec = w->codec; | ||
1060 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1061 | |||
1062 | switch (event) { | ||
1063 | case SND_SOC_DAPM_PRE_PMU: | ||
1064 | wm8994->aif1clk_enable = 1; | ||
1065 | break; | ||
1066 | } | ||
1067 | |||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | static int aif2clk_ev(struct snd_soc_dapm_widget *w, | ||
1072 | struct snd_kcontrol *kcontrol, int event) | ||
1073 | { | ||
1074 | struct snd_soc_codec *codec = w->codec; | ||
1075 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1076 | |||
1077 | switch (event) { | ||
1078 | case SND_SOC_DAPM_PRE_PMU: | ||
1079 | wm8994->aif2clk_enable = 1; | ||
1080 | break; | ||
1081 | } | ||
1082 | |||
1083 | return 0; | ||
1084 | } | ||
1085 | |||
1086 | static int dac_ev(struct snd_soc_dapm_widget *w, | ||
1087 | struct snd_kcontrol *kcontrol, int event) | ||
1088 | { | ||
1089 | struct snd_soc_codec *codec = w->codec; | ||
1090 | unsigned int mask = 1 << w->shift; | ||
1091 | |||
1092 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | ||
1093 | mask, mask); | ||
1094 | return 0; | ||
1095 | } | ||
1096 | |||
1007 | static const char *hp_mux_text[] = { | 1097 | static const char *hp_mux_text[] = { |
1008 | "Mixer", | 1098 | "Mixer", |
1009 | "DAC", | 1099 | "DAC", |
@@ -1272,6 +1362,47 @@ static const struct soc_enum aif2dacr_src_enum = | |||
1272 | static const struct snd_kcontrol_new aif2dacr_src_mux = | 1362 | static const struct snd_kcontrol_new aif2dacr_src_mux = |
1273 | SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); | 1363 | SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); |
1274 | 1364 | ||
1365 | static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = { | ||
1366 | SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev, | ||
1367 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), | ||
1368 | SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev, | ||
1369 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), | ||
1370 | |||
1371 | SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1372 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1373 | SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1374 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1375 | SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1376 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1377 | SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1378 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1379 | |||
1380 | SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) | ||
1381 | }; | ||
1382 | |||
1383 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { | ||
1384 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), | ||
1385 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0) | ||
1386 | }; | ||
1387 | |||
1388 | static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = { | ||
1389 | SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0, | ||
1390 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1391 | SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0, | ||
1392 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1393 | SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0, | ||
1394 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1395 | SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0, | ||
1396 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1397 | }; | ||
1398 | |||
1399 | static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = { | ||
1400 | SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), | ||
1401 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), | ||
1402 | SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), | ||
1403 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), | ||
1404 | }; | ||
1405 | |||
1275 | static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { | 1406 | static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { |
1276 | SND_SOC_DAPM_INPUT("DMIC1DAT"), | 1407 | SND_SOC_DAPM_INPUT("DMIC1DAT"), |
1277 | SND_SOC_DAPM_INPUT("DMIC2DAT"), | 1408 | SND_SOC_DAPM_INPUT("DMIC2DAT"), |
@@ -1284,12 +1415,9 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0), | |||
1284 | SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), | 1415 | SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), |
1285 | SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), | 1416 | SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), |
1286 | 1417 | ||
1287 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), | 1418 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, |
1288 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0), | ||
1289 | |||
1290 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", | ||
1291 | 0, WM8994_POWER_MANAGEMENT_4, 9, 0), | 1419 | 0, WM8994_POWER_MANAGEMENT_4, 9, 0), |
1292 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", | 1420 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, |
1293 | 0, WM8994_POWER_MANAGEMENT_4, 8, 0), | 1421 | 0, WM8994_POWER_MANAGEMENT_4, 8, 0), |
1294 | SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0, | 1422 | SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0, |
1295 | WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev, | 1423 | WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev, |
@@ -1298,9 +1426,9 @@ SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0, | |||
1298 | WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev, | 1426 | WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev, |
1299 | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), | 1427 | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), |
1300 | 1428 | ||
1301 | SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture", | 1429 | SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL, |
1302 | 0, WM8994_POWER_MANAGEMENT_4, 11, 0), | 1430 | 0, WM8994_POWER_MANAGEMENT_4, 11, 0), |
1303 | SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture", | 1431 | SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL, |
1304 | 0, WM8994_POWER_MANAGEMENT_4, 10, 0), | 1432 | 0, WM8994_POWER_MANAGEMENT_4, 10, 0), |
1305 | SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0, | 1433 | SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0, |
1306 | WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev, | 1434 | WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev, |
@@ -1345,6 +1473,7 @@ SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0, | |||
1345 | 1473 | ||
1346 | SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), | 1474 | SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), |
1347 | SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0), | 1475 | SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0), |
1476 | SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), | ||
1348 | SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0), | 1477 | SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0), |
1349 | 1478 | ||
1350 | SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux), | 1479 | SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux), |
@@ -1371,11 +1500,6 @@ SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), | |||
1371 | SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), | 1500 | SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), |
1372 | SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), | 1501 | SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), |
1373 | 1502 | ||
1374 | SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), | ||
1375 | SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), | ||
1376 | SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), | ||
1377 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), | ||
1378 | |||
1379 | SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), | 1503 | SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), |
1380 | SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), | 1504 | SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), |
1381 | 1505 | ||
@@ -1515,14 +1639,12 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1515 | { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, | 1639 | { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, |
1516 | 1640 | ||
1517 | /* DAC1 inputs */ | 1641 | /* DAC1 inputs */ |
1518 | { "DAC1L", NULL, "DAC1L Mixer" }, | ||
1519 | { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, | 1642 | { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, |
1520 | { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, | 1643 | { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, |
1521 | { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, | 1644 | { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, |
1522 | { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, | 1645 | { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, |
1523 | { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, | 1646 | { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, |
1524 | 1647 | ||
1525 | { "DAC1R", NULL, "DAC1R Mixer" }, | ||
1526 | { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, | 1648 | { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, |
1527 | { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, | 1649 | { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, |
1528 | { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, | 1650 | { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, |
@@ -1531,7 +1653,6 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1531 | 1653 | ||
1532 | /* DAC2/AIF2 outputs */ | 1654 | /* DAC2/AIF2 outputs */ |
1533 | { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, | 1655 | { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, |
1534 | { "DAC2L", NULL, "AIF2DAC2L Mixer" }, | ||
1535 | { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, | 1656 | { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, |
1536 | { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, | 1657 | { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, |
1537 | { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, | 1658 | { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, |
@@ -1539,13 +1660,17 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1539 | { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, | 1660 | { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, |
1540 | 1661 | ||
1541 | { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, | 1662 | { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, |
1542 | { "DAC2R", NULL, "AIF2DAC2R Mixer" }, | ||
1543 | { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, | 1663 | { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, |
1544 | { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, | 1664 | { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, |
1545 | { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, | 1665 | { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, |
1546 | { "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" }, | 1666 | { "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" }, |
1547 | { "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" }, | 1667 | { "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" }, |
1548 | 1668 | ||
1669 | { "AIF1ADCDAT", NULL, "AIF1ADC1L" }, | ||
1670 | { "AIF1ADCDAT", NULL, "AIF1ADC1R" }, | ||
1671 | { "AIF1ADCDAT", NULL, "AIF1ADC2L" }, | ||
1672 | { "AIF1ADCDAT", NULL, "AIF1ADC2R" }, | ||
1673 | |||
1549 | { "AIF2ADCDAT", NULL, "AIF2ADC Mux" }, | 1674 | { "AIF2ADCDAT", NULL, "AIF2ADC Mux" }, |
1550 | 1675 | ||
1551 | /* AIF3 output */ | 1676 | /* AIF3 output */ |
@@ -1578,6 +1703,31 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1578 | { "Right Headphone Mux", "DAC", "DAC1R" }, | 1703 | { "Right Headphone Mux", "DAC", "DAC1R" }, |
1579 | }; | 1704 | }; |
1580 | 1705 | ||
1706 | static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = { | ||
1707 | { "DAC1L", NULL, "Late DAC1L Enable PGA" }, | ||
1708 | { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" }, | ||
1709 | { "DAC1R", NULL, "Late DAC1R Enable PGA" }, | ||
1710 | { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" }, | ||
1711 | { "DAC2L", NULL, "Late DAC2L Enable PGA" }, | ||
1712 | { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" }, | ||
1713 | { "DAC2R", NULL, "Late DAC2R Enable PGA" }, | ||
1714 | { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" } | ||
1715 | }; | ||
1716 | |||
1717 | static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = { | ||
1718 | { "DAC1L", NULL, "DAC1L Mixer" }, | ||
1719 | { "DAC1R", NULL, "DAC1R Mixer" }, | ||
1720 | { "DAC2L", NULL, "AIF2DAC2L Mixer" }, | ||
1721 | { "DAC2R", NULL, "AIF2DAC2R Mixer" }, | ||
1722 | }; | ||
1723 | |||
1724 | static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { | ||
1725 | { "AIF1DACDAT", NULL, "AIF2DACDAT" }, | ||
1726 | { "AIF2DACDAT", NULL, "AIF1DACDAT" }, | ||
1727 | { "AIF1ADCDAT", NULL, "AIF2ADCDAT" }, | ||
1728 | { "AIF2ADCDAT", NULL, "AIF1ADCDAT" }, | ||
1729 | }; | ||
1730 | |||
1581 | static const struct snd_soc_dapm_route wm8994_intercon[] = { | 1731 | static const struct snd_soc_dapm_route wm8994_intercon[] = { |
1582 | { "AIF2DACL", NULL, "AIF2DAC Mux" }, | 1732 | { "AIF2DACL", NULL, "AIF2DAC Mux" }, |
1583 | { "AIF2DACR", NULL, "AIF2DAC Mux" }, | 1733 | { "AIF2DACR", NULL, "AIF2DAC Mux" }, |
@@ -2501,6 +2651,22 @@ static int wm8994_resume(struct snd_soc_codec *codec) | |||
2501 | { | 2651 | { |
2502 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | 2652 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); |
2503 | int i, ret; | 2653 | int i, ret; |
2654 | unsigned int val, mask; | ||
2655 | |||
2656 | if (wm8994->revision < 4) { | ||
2657 | /* force a HW read */ | ||
2658 | val = wm8994_reg_read(codec->control_data, | ||
2659 | WM8994_POWER_MANAGEMENT_5); | ||
2660 | |||
2661 | /* modify the cache only */ | ||
2662 | codec->cache_only = 1; | ||
2663 | mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA | | ||
2664 | WM8994_DAC2R_ENA | WM8994_DAC2L_ENA; | ||
2665 | val &= mask; | ||
2666 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | ||
2667 | mask, val); | ||
2668 | codec->cache_only = 0; | ||
2669 | } | ||
2504 | 2670 | ||
2505 | /* Restore the registers */ | 2671 | /* Restore the registers */ |
2506 | ret = snd_soc_cache_sync(codec); | 2672 | ret = snd_soc_cache_sync(codec); |
@@ -2834,11 +3000,10 @@ static void wm8958_default_micdet(u16 status, void *data) | |||
2834 | report |= SND_JACK_BTN_5; | 3000 | report |= SND_JACK_BTN_5; |
2835 | 3001 | ||
2836 | done: | 3002 | done: |
2837 | snd_soc_jack_report(wm8994->micdet[0].jack, | 3003 | snd_soc_jack_report(wm8994->micdet[0].jack, report, |
2838 | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | | 3004 | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | |
2839 | SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | | 3005 | SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | |
2840 | SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, | 3006 | SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT); |
2841 | report); | ||
2842 | } | 3007 | } |
2843 | 3008 | ||
2844 | /** | 3009 | /** |
@@ -3112,6 +3277,17 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3112 | case WM8994: | 3277 | case WM8994: |
3113 | snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, | 3278 | snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, |
3114 | ARRAY_SIZE(wm8994_specific_dapm_widgets)); | 3279 | ARRAY_SIZE(wm8994_specific_dapm_widgets)); |
3280 | if (wm8994->revision < 4) { | ||
3281 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, | ||
3282 | ARRAY_SIZE(wm8994_lateclk_revd_widgets)); | ||
3283 | snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, | ||
3284 | ARRAY_SIZE(wm8994_dac_revd_widgets)); | ||
3285 | } else { | ||
3286 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, | ||
3287 | ARRAY_SIZE(wm8994_lateclk_widgets)); | ||
3288 | snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, | ||
3289 | ARRAY_SIZE(wm8994_dac_widgets)); | ||
3290 | } | ||
3115 | break; | 3291 | break; |
3116 | case WM8958: | 3292 | case WM8958: |
3117 | snd_soc_add_controls(codec, wm8958_snd_controls, | 3293 | snd_soc_add_controls(codec, wm8958_snd_controls, |
@@ -3129,6 +3305,16 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3129 | case WM8994: | 3305 | case WM8994: |
3130 | snd_soc_dapm_add_routes(dapm, wm8994_intercon, | 3306 | snd_soc_dapm_add_routes(dapm, wm8994_intercon, |
3131 | ARRAY_SIZE(wm8994_intercon)); | 3307 | ARRAY_SIZE(wm8994_intercon)); |
3308 | |||
3309 | if (wm8994->revision < 4) { | ||
3310 | snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, | ||
3311 | ARRAY_SIZE(wm8994_revd_intercon)); | ||
3312 | snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, | ||
3313 | ARRAY_SIZE(wm8994_lateclk_revd_intercon)); | ||
3314 | } else { | ||
3315 | snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon, | ||
3316 | ARRAY_SIZE(wm8994_lateclk_intercon)); | ||
3317 | } | ||
3132 | break; | 3318 | break; |
3133 | case WM8958: | 3319 | case WM8958: |
3134 | snd_soc_dapm_add_routes(dapm, wm8958_intercon, | 3320 | snd_soc_dapm_add_routes(dapm, wm8958_intercon, |
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 613df5db0b32..516892706063 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c | |||
@@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"), | |||
674 | }; | 674 | }; |
675 | 675 | ||
676 | static const struct snd_soc_dapm_route analogue_routes[] = { | 676 | static const struct snd_soc_dapm_route analogue_routes[] = { |
677 | { "MICBIAS1", NULL, "CLK_SYS" }, | ||
678 | { "MICBIAS2", NULL, "CLK_SYS" }, | ||
679 | |||
677 | { "IN1L PGA", "IN1LP Switch", "IN1LP" }, | 680 | { "IN1L PGA", "IN1LP Switch", "IN1LP" }, |
678 | { "IN1L PGA", "IN1LN Switch", "IN1LN" }, | 681 | { "IN1L PGA", "IN1LN Switch", "IN1LN" }, |
679 | 682 | ||
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index b36f0b39b090..fe7984221eb9 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c | |||
@@ -218,7 +218,19 @@ static struct snd_soc_dai_link dm6467_evm_dai[] = { | |||
218 | .ops = &evm_spdif_ops, | 218 | .ops = &evm_spdif_ops, |
219 | }, | 219 | }, |
220 | }; | 220 | }; |
221 | static struct snd_soc_dai_link da8xx_evm_dai = { | 221 | |
222 | static struct snd_soc_dai_link da830_evm_dai = { | ||
223 | .name = "TLV320AIC3X", | ||
224 | .stream_name = "AIC3X", | ||
225 | .cpu_dai_name = "davinci-mcasp.1", | ||
226 | .codec_dai_name = "tlv320aic3x-hifi", | ||
227 | .codec_name = "tlv320aic3x-codec.1-0018", | ||
228 | .platform_name = "davinci-pcm-audio", | ||
229 | .init = evm_aic3x_init, | ||
230 | .ops = &evm_ops, | ||
231 | }; | ||
232 | |||
233 | static struct snd_soc_dai_link da850_evm_dai = { | ||
222 | .name = "TLV320AIC3X", | 234 | .name = "TLV320AIC3X", |
223 | .stream_name = "AIC3X", | 235 | .stream_name = "AIC3X", |
224 | .cpu_dai_name= "davinci-mcasp.0", | 236 | .cpu_dai_name= "davinci-mcasp.0", |
@@ -259,13 +271,13 @@ static struct snd_soc_card dm6467_snd_soc_card_evm = { | |||
259 | 271 | ||
260 | static struct snd_soc_card da830_snd_soc_card = { | 272 | static struct snd_soc_card da830_snd_soc_card = { |
261 | .name = "DA830/OMAP-L137 EVM", | 273 | .name = "DA830/OMAP-L137 EVM", |
262 | .dai_link = &da8xx_evm_dai, | 274 | .dai_link = &da830_evm_dai, |
263 | .num_links = 1, | 275 | .num_links = 1, |
264 | }; | 276 | }; |
265 | 277 | ||
266 | static struct snd_soc_card da850_snd_soc_card = { | 278 | static struct snd_soc_card da850_snd_soc_card = { |
267 | .name = "DA850/OMAP-L138 EVM", | 279 | .name = "DA850/OMAP-L138 EVM", |
268 | .dai_link = &da8xx_evm_dai, | 280 | .dai_link = &da850_evm_dai, |
269 | .num_links = 1, | 281 | .num_links = 1, |
270 | }; | 282 | }; |
271 | 283 | ||
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index e20c9e1457c0..1e9bccae4e80 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c | |||
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = { | |||
79 | .name = "tlv320aic23", | 79 | .name = "tlv320aic23", |
80 | .stream_name = "TLV320AIC23", | 80 | .stream_name = "TLV320AIC23", |
81 | .codec_dai_name = "tlv320aic23-hifi", | 81 | .codec_dai_name = "tlv320aic23-hifi", |
82 | .platform_name = "imx-pcm-audio.0", | 82 | .platform_name = "imx-fiq-pcm-audio.0", |
83 | .codec_name = "tlv320aic23-codec.0-001a", | 83 | .codec_name = "tlv320aic23-codec.0-001a", |
84 | .cpu_dai_name = "imx-ssi.0", | 84 | .cpu_dai_name = "imx-ssi.0", |
85 | .ops = &eukrea_tlv320_snd_ops, | 85 | .ops = &eukrea_tlv320_snd_ops, |
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c index 28333e7d9c50..dc65650a6fa1 100644 --- a/sound/soc/pxa/e740_wm9705.c +++ b/sound/soc/pxa/e740_wm9705.c | |||
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = { | |||
117 | { | 117 | { |
118 | .name = "AC97", | 118 | .name = "AC97", |
119 | .stream_name = "AC97 HiFi", | 119 | .stream_name = "AC97 HiFi", |
120 | .cpu_dai_name = "pxa-ac97.0", | 120 | .cpu_dai_name = "pxa2xx-ac97", |
121 | .codec_dai_name = "wm9705-hifi", | 121 | .codec_dai_name = "wm9705-hifi", |
122 | .platform_name = "pxa-pcm-audio", | 122 | .platform_name = "pxa-pcm-audio", |
123 | .codec_name = "wm9705-codec", | 123 | .codec_name = "wm9705-codec", |
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = { | |||
126 | { | 126 | { |
127 | .name = "AC97 Aux", | 127 | .name = "AC97 Aux", |
128 | .stream_name = "AC97 Aux", | 128 | .stream_name = "AC97 Aux", |
129 | .cpu_dai_name = "pxa-ac97.1", | 129 | .cpu_dai_name = "pxa2xx-ac97-aux", |
130 | .codec_dai_name = "wm9705-aux", | 130 | .codec_dai_name = "wm9705-aux", |
131 | .platform_name = "pxa-pcm-audio", | 131 | .platform_name = "pxa-pcm-audio", |
132 | .codec_name = "wm9705-codec", | 132 | .codec_name = "wm9705-codec", |
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c index 01bf31675c55..51897fcd911b 100644 --- a/sound/soc/pxa/e750_wm9705.c +++ b/sound/soc/pxa/e750_wm9705.c | |||
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = { | |||
99 | { | 99 | { |
100 | .name = "AC97", | 100 | .name = "AC97", |
101 | .stream_name = "AC97 HiFi", | 101 | .stream_name = "AC97 HiFi", |
102 | .cpu_dai_name = "pxa-ac97.0", | 102 | .cpu_dai_name = "pxa2xx-ac97", |
103 | .codec_dai_name = "wm9705-hifi", | 103 | .codec_dai_name = "wm9705-hifi", |
104 | .platform_name = "pxa-pcm-audio", | 104 | .platform_name = "pxa-pcm-audio", |
105 | .codec_name = "wm9705-codec", | 105 | .codec_name = "wm9705-codec", |
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = { | |||
109 | { | 109 | { |
110 | .name = "AC97 Aux", | 110 | .name = "AC97 Aux", |
111 | .stream_name = "AC97 Aux", | 111 | .stream_name = "AC97 Aux", |
112 | .cpu_dai_name = "pxa-ac97.1", | 112 | .cpu_dai_name = "pxa2xx-ac97-aux", |
113 | .codec_dai_name ="wm9705-aux", | 113 | .codec_dai_name ="wm9705-aux", |
114 | .platform_name = "pxa-pcm-audio", | 114 | .platform_name = "pxa-pcm-audio", |
115 | .codec_name = "wm9705-codec", | 115 | .codec_name = "wm9705-codec", |
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c index c6a37c6ef23b..053ed208e59f 100644 --- a/sound/soc/pxa/e800_wm9712.c +++ b/sound/soc/pxa/e800_wm9712.c | |||
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = { | |||
89 | { | 89 | { |
90 | .name = "AC97", | 90 | .name = "AC97", |
91 | .stream_name = "AC97 HiFi", | 91 | .stream_name = "AC97 HiFi", |
92 | .cpu_dai_name = "pxa-ac97.0", | 92 | .cpu_dai_name = "pxa2xx-ac97", |
93 | .codec_dai_name = "wm9712-hifi", | 93 | .codec_dai_name = "wm9712-hifi", |
94 | .platform_name = "pxa-pcm-audio", | 94 | .platform_name = "pxa-pcm-audio", |
95 | .codec_name = "wm9712-codec", | 95 | .codec_name = "wm9712-codec", |
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = { | |||
98 | { | 98 | { |
99 | .name = "AC97 Aux", | 99 | .name = "AC97 Aux", |
100 | .stream_name = "AC97 Aux", | 100 | .stream_name = "AC97 Aux", |
101 | .cpu_dai_name = "pxa-ac97.1", | 101 | .cpu_dai_name = "pxa2xx-ac97-aux", |
102 | .codec_dai_name ="wm9712-aux", | 102 | .codec_dai_name ="wm9712-aux", |
103 | .platform_name = "pxa-pcm-audio", | 103 | .platform_name = "pxa-pcm-audio", |
104 | .codec_name = "wm9712-codec", | 104 | .codec_name = "wm9712-codec", |
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c index fc22e6eefc98..b13a4252812d 100644 --- a/sound/soc/pxa/em-x270.c +++ b/sound/soc/pxa/em-x270.c | |||
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { | |||
37 | { | 37 | { |
38 | .name = "AC97", | 38 | .name = "AC97", |
39 | .stream_name = "AC97 HiFi", | 39 | .stream_name = "AC97 HiFi", |
40 | .cpu_dai_name = "pxa-ac97.0", | 40 | .cpu_dai_name = "pxa2xx-ac97", |
41 | .codec_dai_name = "wm9712-hifi", | 41 | .codec_dai_name = "wm9712-hifi", |
42 | .platform_name = "pxa-pcm-audio", | 42 | .platform_name = "pxa-pcm-audio", |
43 | .codec_name = "wm9712-codec", | 43 | .codec_name = "wm9712-codec", |
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { | |||
45 | { | 45 | { |
46 | .name = "AC97 Aux", | 46 | .name = "AC97 Aux", |
47 | .stream_name = "AC97 Aux", | 47 | .stream_name = "AC97 Aux", |
48 | .cpu_dai_name = "pxa-ac97.1", | 48 | .cpu_dai_name = "pxa2xx-ac97-aux", |
49 | .codec_dai_name ="wm9712-aux", | 49 | .codec_dai_name ="wm9712-aux", |
50 | .platform_name = "pxa-pcm-audio", | 50 | .platform_name = "pxa-pcm-audio", |
51 | .codec_name = "wm9712-codec", | 51 | .codec_name = "wm9712-codec", |
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c index 0d70fc8c12bd..38ca6759907e 100644 --- a/sound/soc/pxa/mioa701_wm9713.c +++ b/sound/soc/pxa/mioa701_wm9713.c | |||
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { | |||
162 | { | 162 | { |
163 | .name = "AC97", | 163 | .name = "AC97", |
164 | .stream_name = "AC97 HiFi", | 164 | .stream_name = "AC97 HiFi", |
165 | .cpu_dai_name = "pxa-ac97.0", | 165 | .cpu_dai_name = "pxa2xx-ac97", |
166 | .codec_dai_name = "wm9713-hifi", | 166 | .codec_dai_name = "wm9713-hifi", |
167 | .codec_name = "wm9713-codec", | 167 | .codec_name = "wm9713-codec", |
168 | .init = mioa701_wm9713_init, | 168 | .init = mioa701_wm9713_init, |
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { | |||
172 | { | 172 | { |
173 | .name = "AC97 Aux", | 173 | .name = "AC97 Aux", |
174 | .stream_name = "AC97 Aux", | 174 | .stream_name = "AC97 Aux", |
175 | .cpu_dai_name = "pxa-ac97.1", | 175 | .cpu_dai_name = "pxa2xx-ac97-aux", |
176 | .codec_dai_name ="wm9713-aux", | 176 | .codec_dai_name ="wm9713-aux", |
177 | .codec_name = "wm9713-codec", | 177 | .codec_name = "wm9713-codec", |
178 | .platform_name = "pxa-pcm-audio", | 178 | .platform_name = "pxa-pcm-audio", |
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c index 857db96d4a4f..504e4004f004 100644 --- a/sound/soc/pxa/palm27x.c +++ b/sound/soc/pxa/palm27x.c | |||
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { | |||
132 | { | 132 | { |
133 | .name = "AC97 HiFi", | 133 | .name = "AC97 HiFi", |
134 | .stream_name = "AC97 HiFi", | 134 | .stream_name = "AC97 HiFi", |
135 | .cpu_dai_name = "pxa-ac97.0", | 135 | .cpu_dai_name = "pxa2xx-ac97", |
136 | .codec_dai_name = "wm9712-hifi", | 136 | .codec_dai_name = "wm9712-hifi", |
137 | .codec_name = "wm9712-codec", | 137 | .codec_name = "wm9712-codec", |
138 | .platform_name = "pxa-pcm-audio", | 138 | .platform_name = "pxa-pcm-audio", |
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { | |||
141 | { | 141 | { |
142 | .name = "AC97 Aux", | 142 | .name = "AC97 Aux", |
143 | .stream_name = "AC97 Aux", | 143 | .stream_name = "AC97 Aux", |
144 | .cpu_dai_name = "pxa-ac97.1", | 144 | .cpu_dai_name = "pxa2xx-ac97-aux", |
145 | .codec_dai_name = "wm9712-aux", | 145 | .codec_dai_name = "wm9712-aux", |
146 | .codec_name = "wm9712-codec", | 146 | .codec_name = "wm9712-codec", |
147 | .platform_name = "pxa-pcm-audio", | 147 | .platform_name = "pxa-pcm-audio", |
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index f75804ef0897..4b6e5d608b42 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c | |||
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = { | |||
219 | { | 219 | { |
220 | .name = "AC97", | 220 | .name = "AC97", |
221 | .stream_name = "AC97 HiFi", | 221 | .stream_name = "AC97 HiFi", |
222 | .cpu_dai_name = "pxa-ac97.0", | 222 | .cpu_dai_name = "pxa2xx-ac97", |
223 | .codec_dai_name = "wm9712-hifi", | 223 | .codec_dai_name = "wm9712-hifi", |
224 | .platform_name = "pxa-pcm-audio", | 224 | .platform_name = "pxa-pcm-audio", |
225 | .codec_name = "wm9712-codec", | 225 | .codec_name = "wm9712-codec", |
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = { | |||
229 | { | 229 | { |
230 | .name = "AC97 Aux", | 230 | .name = "AC97 Aux", |
231 | .stream_name = "AC97 Aux", | 231 | .stream_name = "AC97 Aux", |
232 | .cpu_dai_name = "pxa-ac97.1", | 232 | .cpu_dai_name = "pxa2xx-ac97-aux", |
233 | .codec_dai_name = "wm9712-aux", | 233 | .codec_dai_name = "wm9712-aux", |
234 | .platform_name = "pxa-pcm-audio", | 234 | .platform_name = "pxa-pcm-audio", |
235 | .codec_name = "wm9712-codec", | 235 | .codec_name = "wm9712-codec", |
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b222a7d72027..25bba108fea3 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c | |||
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
166 | .stream_name = "AC97 HiFi", | 166 | .stream_name = "AC97 HiFi", |
167 | .codec_name = "wm9713-codec", | 167 | .codec_name = "wm9713-codec", |
168 | .platform_name = "pxa-pcm-audio", | 168 | .platform_name = "pxa-pcm-audio", |
169 | .cpu_dai_name = "pxa-ac97.0", | 169 | .cpu_dai_name = "pxa2xx-ac97", |
170 | .codec_name = "wm9713-hifi", | 170 | .codec_name = "wm9713-hifi", |
171 | .init = zylonite_wm9713_init, | 171 | .init = zylonite_wm9713_init, |
172 | }, | 172 | }, |
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
175 | .stream_name = "AC97 Aux", | 175 | .stream_name = "AC97 Aux", |
176 | .codec_name = "wm9713-codec", | 176 | .codec_name = "wm9713-codec", |
177 | .platform_name = "pxa-pcm-audio", | 177 | .platform_name = "pxa-pcm-audio", |
178 | .cpu_dai_name = "pxa-ac97.1", | 178 | .cpu_dai_name = "pxa2xx-ac97-aux", |
179 | .codec_name = "wm9713-aux", | 179 | .codec_name = "wm9713-aux", |
180 | }, | 180 | }, |
181 | { | 181 | { |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index c4b60610beb0..c3f6f1e72790 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -1449,6 +1449,7 @@ static int soc_post_component_init(struct snd_soc_card *card, | |||
1449 | rtd = &card->rtd_aux[num]; | 1449 | rtd = &card->rtd_aux[num]; |
1450 | name = aux_dev->name; | 1450 | name = aux_dev->name; |
1451 | } | 1451 | } |
1452 | rtd->card = card; | ||
1452 | 1453 | ||
1453 | /* machine controls, routes and widgets are not prefixed */ | 1454 | /* machine controls, routes and widgets are not prefixed */ |
1454 | temp = codec->name_prefix; | 1455 | temp = codec->name_prefix; |
@@ -1471,7 +1472,6 @@ static int soc_post_component_init(struct snd_soc_card *card, | |||
1471 | 1472 | ||
1472 | /* register the rtd device */ | 1473 | /* register the rtd device */ |
1473 | rtd->codec = codec; | 1474 | rtd->codec = codec; |
1474 | rtd->card = card; | ||
1475 | rtd->dev.parent = card->dev; | 1475 | rtd->dev.parent = card->dev; |
1476 | rtd->dev.release = rtd_release; | 1476 | rtd->dev.release = rtd_release; |
1477 | rtd->dev.init_name = name; | 1477 | rtd->dev.init_name = name; |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 8194f150bab7..25e54230cc6a 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) | |||
712 | !path->connected(path->source, path->sink)) | 712 | !path->connected(path->source, path->sink)) |
713 | continue; | 713 | continue; |
714 | 714 | ||
715 | if (path->sink && path->sink->power_check && | 715 | if (!path->sink) |
716 | continue; | ||
717 | |||
718 | if (path->sink->force) { | ||
719 | power = 1; | ||
720 | break; | ||
721 | } | ||
722 | |||
723 | if (path->sink->power_check && | ||
716 | path->sink->power_check(path->sink)) { | 724 | path->sink->power_check(path->sink)) { |
717 | power = 1; | 725 | power = 1; |
718 | break; | 726 | break; |
@@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); | |||
1627 | int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) | 1635 | int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) |
1628 | { | 1636 | { |
1629 | struct snd_soc_dapm_widget *w; | 1637 | struct snd_soc_dapm_widget *w; |
1638 | unsigned int val; | ||
1630 | 1639 | ||
1631 | list_for_each_entry(w, &dapm->card->widgets, list) | 1640 | list_for_each_entry(w, &dapm->card->widgets, list) |
1632 | { | 1641 | { |
@@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) | |||
1675 | case snd_soc_dapm_post: | 1684 | case snd_soc_dapm_post: |
1676 | break; | 1685 | break; |
1677 | } | 1686 | } |
1687 | |||
1688 | /* Read the initial power state from the device */ | ||
1689 | if (w->reg >= 0) { | ||
1690 | val = snd_soc_read(w->codec, w->reg); | ||
1691 | val &= 1 << w->shift; | ||
1692 | if (w->invert) | ||
1693 | val = !val; | ||
1694 | |||
1695 | if (val) | ||
1696 | w->power = 1; | ||
1697 | } | ||
1698 | |||
1678 | w->new = 1; | 1699 | w->new = 1; |
1679 | } | 1700 | } |
1680 | 1701 | ||
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 68b97477577b..66eabafb1c24 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c | |||
@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | dev->pcm->private_data = dev; | 787 | dev->pcm->private_data = dev; |
788 | strcpy(dev->pcm->name, dev->product_name); | 788 | strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); |
789 | 789 | ||
790 | memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); | 790 | memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); |
791 | memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); | 791 | memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); |
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c77fff2..a1a47088fd0c 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c | |||
@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) | |||
136 | if (ret < 0) | 136 | if (ret < 0) |
137 | return ret; | 137 | return ret; |
138 | 138 | ||
139 | strcpy(rmidi->name, device->product_name); | 139 | strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); |
140 | 140 | ||
141 | rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; | 141 | rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; |
142 | rmidi->private_data = device; | 142 | rmidi->private_data = device; |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 800f7cb4f251..c0f8270bc199 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, | |||
323 | return -ENOMEM; | 323 | return -ENOMEM; |
324 | } | 324 | } |
325 | 325 | ||
326 | mutex_init(&chip->shutdown_mutex); | ||
326 | chip->index = idx; | 327 | chip->index = idx; |
327 | chip->dev = dev; | 328 | chip->dev = dev; |
328 | chip->card = card; | 329 | chip->card = card; |
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) | |||
531 | chip = ptr; | 532 | chip = ptr; |
532 | card = chip->card; | 533 | card = chip->card; |
533 | mutex_lock(®ister_mutex); | 534 | mutex_lock(®ister_mutex); |
535 | mutex_lock(&chip->shutdown_mutex); | ||
534 | chip->shutdown = 1; | 536 | chip->shutdown = 1; |
535 | chip->num_interfaces--; | 537 | chip->num_interfaces--; |
536 | if (chip->num_interfaces <= 0) { | 538 | if (chip->num_interfaces <= 0) { |
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) | |||
548 | snd_usb_mixer_disconnect(p); | 550 | snd_usb_mixer_disconnect(p); |
549 | } | 551 | } |
550 | usb_chip[chip->index] = NULL; | 552 | usb_chip[chip->index] = NULL; |
553 | mutex_unlock(&chip->shutdown_mutex); | ||
551 | mutex_unlock(®ister_mutex); | 554 | mutex_unlock(®ister_mutex); |
552 | snd_card_free_when_closed(card); | 555 | snd_card_free_when_closed(card); |
553 | } else { | 556 | } else { |
557 | mutex_unlock(&chip->shutdown_mutex); | ||
554 | mutex_unlock(®ister_mutex); | 558 | mutex_unlock(®ister_mutex); |
555 | } | 559 | } |
556 | } | 560 | } |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 7df89b3d7ded..85af6051b52d 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -95,7 +95,7 @@ enum { | |||
95 | }; | 95 | }; |
96 | 96 | ||
97 | 97 | ||
98 | /*E-mu 0202(0404) eXtension Unit(XU) control*/ | 98 | /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ |
99 | enum { | 99 | enum { |
100 | USB_XU_CLOCK_RATE = 0xe301, | 100 | USB_XU_CLOCK_RATE = 0xe301, |
101 | USB_XU_CLOCK_SOURCE = 0xe302, | 101 | USB_XU_CLOCK_SOURCE = 0xe302, |
@@ -1566,7 +1566,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw | |||
1566 | cval->initialized = 1; | 1566 | cval->initialized = 1; |
1567 | } else { | 1567 | } else { |
1568 | if (type == USB_XU_CLOCK_RATE) { | 1568 | if (type == USB_XU_CLOCK_RATE) { |
1569 | /* E-Mu USB 0404/0202/TrackerPre | 1569 | /* E-Mu USB 0404/0202/TrackerPre/0204 |
1570 | * samplerate control quirk | 1570 | * samplerate control quirk |
1571 | */ | 1571 | */ |
1572 | cval->min = 0; | 1572 | cval->min = 0; |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 4132522ac90f..e3f680526cb5 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, | |||
361 | } | 361 | } |
362 | 362 | ||
363 | if (changed) { | 363 | if (changed) { |
364 | mutex_lock(&subs->stream->chip->shutdown_mutex); | ||
364 | /* format changed */ | 365 | /* format changed */ |
365 | snd_usb_release_substream_urbs(subs, 0); | 366 | snd_usb_release_substream_urbs(subs, 0); |
366 | /* influenced: period_bytes, channels, rate, format, */ | 367 | /* influenced: period_bytes, channels, rate, format, */ |
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, | |||
368 | params_rate(hw_params), | 369 | params_rate(hw_params), |
369 | snd_pcm_format_physical_width(params_format(hw_params)) * | 370 | snd_pcm_format_physical_width(params_format(hw_params)) * |
370 | params_channels(hw_params)); | 371 | params_channels(hw_params)); |
372 | mutex_unlock(&subs->stream->chip->shutdown_mutex); | ||
371 | } | 373 | } |
372 | 374 | ||
373 | return ret; | 375 | return ret; |
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) | |||
385 | subs->cur_audiofmt = NULL; | 387 | subs->cur_audiofmt = NULL; |
386 | subs->cur_rate = 0; | 388 | subs->cur_rate = 0; |
387 | subs->period_bytes = 0; | 389 | subs->period_bytes = 0; |
388 | if (!subs->stream->chip->shutdown) | 390 | mutex_lock(&subs->stream->chip->shutdown_mutex); |
389 | snd_usb_release_substream_urbs(subs, 0); | 391 | snd_usb_release_substream_urbs(subs, 0); |
392 | mutex_unlock(&subs->stream->chip->shutdown_mutex); | ||
390 | return snd_pcm_lib_free_vmalloc_buffer(substream); | 393 | return snd_pcm_lib_free_vmalloc_buffer(substream); |
391 | } | 394 | } |
392 | 395 | ||
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 35999874d301..921a86fd9884 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -79,6 +79,13 @@ | |||
79 | .idProduct = 0x3f0a, | 79 | .idProduct = 0x3f0a, |
80 | .bInterfaceClass = USB_CLASS_AUDIO, | 80 | .bInterfaceClass = USB_CLASS_AUDIO, |
81 | }, | 81 | }, |
82 | { | ||
83 | /* E-Mu 0204 USB */ | ||
84 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE, | ||
85 | .idVendor = 0x041e, | ||
86 | .idProduct = 0x3f19, | ||
87 | .bInterfaceClass = USB_CLASS_AUDIO, | ||
88 | }, | ||
82 | 89 | ||
83 | /* | 90 | /* |
84 | * Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface | 91 | * Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index cf8bf088394b..e314cdb85003 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -532,7 +532,7 @@ int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat | |||
532 | } | 532 | } |
533 | 533 | ||
534 | /* | 534 | /* |
535 | * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device, | 535 | * For E-Mu 0404USB/0202USB/TrackerPre/0204 sample rate should be set for device, |
536 | * not for interface. | 536 | * not for interface. |
537 | */ | 537 | */ |
538 | 538 | ||
@@ -589,6 +589,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, | |||
589 | case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ | 589 | case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ |
590 | case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ | 590 | case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ |
591 | case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */ | 591 | case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */ |
592 | case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ | ||
592 | set_format_emu_quirk(subs, fmt); | 593 | set_format_emu_quirk(subs, fmt); |
593 | break; | 594 | break; |
594 | } | 595 | } |
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index db3eb21627ee..6e66fffe87f5 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h | |||
@@ -36,6 +36,7 @@ struct snd_usb_audio { | |||
36 | struct snd_card *card; | 36 | struct snd_card *card; |
37 | u32 usb_id; | 37 | u32 usb_id; |
38 | int shutdown; | 38 | int shutdown; |
39 | struct mutex shutdown_mutex; | ||
39 | unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ | 40 | unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ |
40 | int num_interfaces; | 41 | int num_interfaces; |
41 | int num_suspended_intf; | 42 | int num_suspended_intf; |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b2f729fdb317..60cac6f92e8b 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -759,8 +759,8 @@ static int __cmd_record(int argc, const char **argv) | |||
759 | perf_session__process_machines(session, event__synthesize_guest_os); | 759 | perf_session__process_machines(session, event__synthesize_guest_os); |
760 | 760 | ||
761 | if (!system_wide) | 761 | if (!system_wide) |
762 | event__synthesize_thread(target_tid, process_synthesized_event, | 762 | event__synthesize_thread_map(threads, process_synthesized_event, |
763 | session); | 763 | session); |
764 | else | 764 | else |
765 | event__synthesize_threads(process_synthesized_event, session); | 765 | event__synthesize_threads(process_synthesized_event, session); |
766 | 766 | ||
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 746cf03cb05d..0ace786e83e0 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) | |||
264 | c->start_time = start; | 264 | c->start_time = start; |
265 | if (p->start_time == 0 || p->start_time > start) | 265 | if (p->start_time == 0 || p->start_time > start) |
266 | p->start_time = start; | 266 | p->start_time = start; |
267 | |||
268 | if (cpu > numcpus) | ||
269 | numcpus = cpu; | ||
270 | } | 267 | } |
271 | 268 | ||
272 | #define MAX_CPUS 4096 | 269 | #define MAX_CPUS 4096 |
@@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used, | |||
511 | if (!event_str) | 508 | if (!event_str) |
512 | return 0; | 509 | return 0; |
513 | 510 | ||
511 | if (sample->cpu > numcpus) | ||
512 | numcpus = sample->cpu; | ||
513 | |||
514 | if (strcmp(event_str, "power:cpu_idle") == 0) { | 514 | if (strcmp(event_str, "power:cpu_idle") == 0) { |
515 | struct power_processor_entry *ppe = (void *)te; | 515 | struct power_processor_entry *ppe = (void *)te; |
516 | if (ppe->state == (u32)PWR_EVENT_EXIT) | 516 | if (ppe->state == (u32)PWR_EVENT_EXIT) |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b6998e055767..5a29d9cd9486 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -1306,7 +1306,7 @@ static int __cmd_top(void) | |||
1306 | return -ENOMEM; | 1306 | return -ENOMEM; |
1307 | 1307 | ||
1308 | if (target_tid != -1) | 1308 | if (target_tid != -1) |
1309 | event__synthesize_thread(target_tid, event__process, session); | 1309 | event__synthesize_thread_map(threads, event__process, session); |
1310 | else | 1310 | else |
1311 | event__synthesize_threads(event__process, session); | 1311 | event__synthesize_threads(event__process, session); |
1312 | 1312 | ||
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1478ab4ee222..50d0a931497a 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -263,11 +263,12 @@ static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event, | |||
263 | process, session); | 263 | process, session); |
264 | } | 264 | } |
265 | 265 | ||
266 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 266 | int event__synthesize_thread_map(struct thread_map *threads, |
267 | struct perf_session *session) | 267 | event__handler_t process, |
268 | struct perf_session *session) | ||
268 | { | 269 | { |
269 | event_t *comm_event, *mmap_event; | 270 | event_t *comm_event, *mmap_event; |
270 | int err = -1; | 271 | int err = -1, thread; |
271 | 272 | ||
272 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | 273 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); |
273 | if (comm_event == NULL) | 274 | if (comm_event == NULL) |
@@ -277,8 +278,15 @@ int event__synthesize_thread(pid_t pid, event__handler_t process, | |||
277 | if (mmap_event == NULL) | 278 | if (mmap_event == NULL) |
278 | goto out_free_comm; | 279 | goto out_free_comm; |
279 | 280 | ||
280 | err = __event__synthesize_thread(comm_event, mmap_event, pid, | 281 | err = 0; |
281 | process, session); | 282 | for (thread = 0; thread < threads->nr; ++thread) { |
283 | if (__event__synthesize_thread(comm_event, mmap_event, | ||
284 | threads->map[thread], | ||
285 | process, session)) { | ||
286 | err = -1; | ||
287 | break; | ||
288 | } | ||
289 | } | ||
282 | free(mmap_event); | 290 | free(mmap_event); |
283 | out_free_comm: | 291 | out_free_comm: |
284 | free(comm_event); | 292 | free(comm_event); |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2b7e91902f10..cc7b52f9b492 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -135,14 +135,16 @@ typedef union event_union { | |||
135 | void event__print_totals(void); | 135 | void event__print_totals(void); |
136 | 136 | ||
137 | struct perf_session; | 137 | struct perf_session; |
138 | struct thread_map; | ||
138 | 139 | ||
139 | typedef int (*event__handler_synth_t)(event_t *event, | 140 | typedef int (*event__handler_synth_t)(event_t *event, |
140 | struct perf_session *session); | 141 | struct perf_session *session); |
141 | typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, | 142 | typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, |
142 | struct perf_session *session); | 143 | struct perf_session *session); |
143 | 144 | ||
144 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 145 | int event__synthesize_thread_map(struct thread_map *threads, |
145 | struct perf_session *session); | 146 | event__handler_t process, |
147 | struct perf_session *session); | ||
146 | int event__synthesize_threads(event__handler_t process, | 148 | int event__synthesize_threads(event__handler_t process, |
147 | struct perf_session *session); | 149 | struct perf_session *session); |
148 | int event__synthesize_kernel_mmap(event__handler_t process, | 150 | int event__synthesize_kernel_mmap(event__handler_t process, |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 32f4f1f2f6e4..df51560f16f7 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
585 | { | 585 | { |
586 | struct sort_entry *se; | 586 | struct sort_entry *se; |
587 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; | 587 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; |
588 | u64 nr_events; | ||
588 | const char *sep = symbol_conf.field_sep; | 589 | const char *sep = symbol_conf.field_sep; |
589 | int ret; | 590 | int ret; |
590 | 591 | ||
@@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
593 | 594 | ||
594 | if (pair_hists) { | 595 | if (pair_hists) { |
595 | period = self->pair ? self->pair->period : 0; | 596 | period = self->pair ? self->pair->period : 0; |
597 | nr_events = self->pair ? self->pair->nr_events : 0; | ||
596 | total = pair_hists->stats.total_period; | 598 | total = pair_hists->stats.total_period; |
597 | period_sys = self->pair ? self->pair->period_sys : 0; | 599 | period_sys = self->pair ? self->pair->period_sys : 0; |
598 | period_us = self->pair ? self->pair->period_us : 0; | 600 | period_us = self->pair ? self->pair->period_us : 0; |
@@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
600 | period_guest_us = self->pair ? self->pair->period_guest_us : 0; | 602 | period_guest_us = self->pair ? self->pair->period_guest_us : 0; |
601 | } else { | 603 | } else { |
602 | period = self->period; | 604 | period = self->period; |
605 | nr_events = self->nr_events; | ||
603 | total = session_total; | 606 | total = session_total; |
604 | period_sys = self->period_sys; | 607 | period_sys = self->period_sys; |
605 | period_us = self->period_us; | 608 | period_us = self->period_us; |
@@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
640 | 643 | ||
641 | if (symbol_conf.show_nr_samples) { | 644 | if (symbol_conf.show_nr_samples) { |
642 | if (sep) | 645 | if (sep) |
643 | ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); | 646 | ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); |
644 | else | 647 | else |
645 | ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); | 648 | ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events); |
646 | } | 649 | } |
647 | 650 | ||
648 | if (pair_hists) { | 651 | if (pair_hists) { |
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index fb737fe9be91..96c866045d60 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c | |||
@@ -456,9 +456,9 @@ void svg_legenda(void) | |||
456 | return; | 456 | return; |
457 | 457 | ||
458 | svg_legenda_box(0, "Running", "sample"); | 458 | svg_legenda_box(0, "Running", "sample"); |
459 | svg_legenda_box(100, "Idle","rect.c1"); | 459 | svg_legenda_box(100, "Idle","c1"); |
460 | svg_legenda_box(200, "Deeper Idle", "rect.c3"); | 460 | svg_legenda_box(200, "Deeper Idle", "c3"); |
461 | svg_legenda_box(350, "Deepest Idle", "rect.c6"); | 461 | svg_legenda_box(350, "Deepest Idle", "c6"); |
462 | svg_legenda_box(550, "Sleeping", "process2"); | 462 | svg_legenda_box(550, "Sleeping", "process2"); |
463 | svg_legenda_box(650, "Waiting for cpu", "waiting"); | 463 | svg_legenda_box(650, "Waiting for cpu", "waiting"); |
464 | svg_legenda_box(800, "Blocked on IO", "blocked"); | 464 | svg_legenda_box(800, "Blocked on IO", "blocked"); |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4c6983de6fd9..362a0cb448db 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -72,7 +72,7 @@ int need_reinitialize; | |||
72 | 72 | ||
73 | int num_cpus; | 73 | int num_cpus; |
74 | 74 | ||
75 | typedef struct per_cpu_counters { | 75 | struct counters { |
76 | unsigned long long tsc; /* per thread */ | 76 | unsigned long long tsc; /* per thread */ |
77 | unsigned long long aperf; /* per thread */ | 77 | unsigned long long aperf; /* per thread */ |
78 | unsigned long long mperf; /* per thread */ | 78 | unsigned long long mperf; /* per thread */ |
@@ -88,13 +88,13 @@ typedef struct per_cpu_counters { | |||
88 | int pkg; | 88 | int pkg; |
89 | int core; | 89 | int core; |
90 | int cpu; | 90 | int cpu; |
91 | struct per_cpu_counters *next; | 91 | struct counters *next; |
92 | } PCC; | 92 | }; |
93 | 93 | ||
94 | PCC *pcc_even; | 94 | struct counters *cnt_even; |
95 | PCC *pcc_odd; | 95 | struct counters *cnt_odd; |
96 | PCC *pcc_delta; | 96 | struct counters *cnt_delta; |
97 | PCC *pcc_average; | 97 | struct counters *cnt_average; |
98 | struct timeval tv_even; | 98 | struct timeval tv_even; |
99 | struct timeval tv_odd; | 99 | struct timeval tv_odd; |
100 | struct timeval tv_delta; | 100 | struct timeval tv_delta; |
@@ -125,7 +125,7 @@ unsigned long long get_msr(int cpu, off_t offset) | |||
125 | return msr; | 125 | return msr; |
126 | } | 126 | } |
127 | 127 | ||
128 | void print_header() | 128 | void print_header(void) |
129 | { | 129 | { |
130 | if (show_pkg) | 130 | if (show_pkg) |
131 | fprintf(stderr, "pkg "); | 131 | fprintf(stderr, "pkg "); |
@@ -160,39 +160,39 @@ void print_header() | |||
160 | putc('\n', stderr); | 160 | putc('\n', stderr); |
161 | } | 161 | } |
162 | 162 | ||
163 | void dump_pcc(PCC *pcc) | 163 | void dump_cnt(struct counters *cnt) |
164 | { | 164 | { |
165 | fprintf(stderr, "package: %d ", pcc->pkg); | 165 | fprintf(stderr, "package: %d ", cnt->pkg); |
166 | fprintf(stderr, "core:: %d ", pcc->core); | 166 | fprintf(stderr, "core:: %d ", cnt->core); |
167 | fprintf(stderr, "CPU: %d ", pcc->cpu); | 167 | fprintf(stderr, "CPU: %d ", cnt->cpu); |
168 | fprintf(stderr, "TSC: %016llX\n", pcc->tsc); | 168 | fprintf(stderr, "TSC: %016llX\n", cnt->tsc); |
169 | fprintf(stderr, "c3: %016llX\n", pcc->c3); | 169 | fprintf(stderr, "c3: %016llX\n", cnt->c3); |
170 | fprintf(stderr, "c6: %016llX\n", pcc->c6); | 170 | fprintf(stderr, "c6: %016llX\n", cnt->c6); |
171 | fprintf(stderr, "c7: %016llX\n", pcc->c7); | 171 | fprintf(stderr, "c7: %016llX\n", cnt->c7); |
172 | fprintf(stderr, "aperf: %016llX\n", pcc->aperf); | 172 | fprintf(stderr, "aperf: %016llX\n", cnt->aperf); |
173 | fprintf(stderr, "pc2: %016llX\n", pcc->pc2); | 173 | fprintf(stderr, "pc2: %016llX\n", cnt->pc2); |
174 | fprintf(stderr, "pc3: %016llX\n", pcc->pc3); | 174 | fprintf(stderr, "pc3: %016llX\n", cnt->pc3); |
175 | fprintf(stderr, "pc6: %016llX\n", pcc->pc6); | 175 | fprintf(stderr, "pc6: %016llX\n", cnt->pc6); |
176 | fprintf(stderr, "pc7: %016llX\n", pcc->pc7); | 176 | fprintf(stderr, "pc7: %016llX\n", cnt->pc7); |
177 | fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr); | 177 | fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr); |
178 | } | 178 | } |
179 | 179 | ||
180 | void dump_list(PCC *pcc) | 180 | void dump_list(struct counters *cnt) |
181 | { | 181 | { |
182 | printf("dump_list 0x%p\n", pcc); | 182 | printf("dump_list 0x%p\n", cnt); |
183 | 183 | ||
184 | for (; pcc; pcc = pcc->next) | 184 | for (; cnt; cnt = cnt->next) |
185 | dump_pcc(pcc); | 185 | dump_cnt(cnt); |
186 | } | 186 | } |
187 | 187 | ||
188 | void print_pcc(PCC *p) | 188 | void print_cnt(struct counters *p) |
189 | { | 189 | { |
190 | double interval_float; | 190 | double interval_float; |
191 | 191 | ||
192 | interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; | 192 | interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; |
193 | 193 | ||
194 | /* topology columns, print blanks on 1st (average) line */ | 194 | /* topology columns, print blanks on 1st (average) line */ |
195 | if (p == pcc_average) { | 195 | if (p == cnt_average) { |
196 | if (show_pkg) | 196 | if (show_pkg) |
197 | fprintf(stderr, " "); | 197 | fprintf(stderr, " "); |
198 | if (show_core) | 198 | if (show_core) |
@@ -262,24 +262,24 @@ void print_pcc(PCC *p) | |||
262 | putc('\n', stderr); | 262 | putc('\n', stderr); |
263 | } | 263 | } |
264 | 264 | ||
265 | void print_counters(PCC *cnt) | 265 | void print_counters(struct counters *counters) |
266 | { | 266 | { |
267 | PCC *pcc; | 267 | struct counters *cnt; |
268 | 268 | ||
269 | print_header(); | 269 | print_header(); |
270 | 270 | ||
271 | if (num_cpus > 1) | 271 | if (num_cpus > 1) |
272 | print_pcc(pcc_average); | 272 | print_cnt(cnt_average); |
273 | 273 | ||
274 | for (pcc = cnt; pcc != NULL; pcc = pcc->next) | 274 | for (cnt = counters; cnt != NULL; cnt = cnt->next) |
275 | print_pcc(pcc); | 275 | print_cnt(cnt); |
276 | 276 | ||
277 | } | 277 | } |
278 | 278 | ||
279 | #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) | 279 | #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) |
280 | 280 | ||
281 | 281 | int compute_delta(struct counters *after, | |
282 | int compute_delta(PCC *after, PCC *before, PCC *delta) | 282 | struct counters *before, struct counters *delta) |
283 | { | 283 | { |
284 | int errors = 0; | 284 | int errors = 0; |
285 | int perf_err = 0; | 285 | int perf_err = 0; |
@@ -391,20 +391,20 @@ int compute_delta(PCC *after, PCC *before, PCC *delta) | |||
391 | delta->extra_msr = after->extra_msr; | 391 | delta->extra_msr = after->extra_msr; |
392 | if (errors) { | 392 | if (errors) { |
393 | fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); | 393 | fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); |
394 | dump_pcc(before); | 394 | dump_cnt(before); |
395 | fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); | 395 | fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); |
396 | dump_pcc(after); | 396 | dump_cnt(after); |
397 | errors = 0; | 397 | errors = 0; |
398 | } | 398 | } |
399 | } | 399 | } |
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | void compute_average(PCC *delta, PCC *avg) | 403 | void compute_average(struct counters *delta, struct counters *avg) |
404 | { | 404 | { |
405 | PCC *sum; | 405 | struct counters *sum; |
406 | 406 | ||
407 | sum = calloc(1, sizeof(PCC)); | 407 | sum = calloc(1, sizeof(struct counters)); |
408 | if (sum == NULL) { | 408 | if (sum == NULL) { |
409 | perror("calloc sum"); | 409 | perror("calloc sum"); |
410 | exit(1); | 410 | exit(1); |
@@ -438,35 +438,34 @@ void compute_average(PCC *delta, PCC *avg) | |||
438 | free(sum); | 438 | free(sum); |
439 | } | 439 | } |
440 | 440 | ||
441 | void get_counters(PCC *pcc) | 441 | void get_counters(struct counters *cnt) |
442 | { | 442 | { |
443 | for ( ; pcc; pcc = pcc->next) { | 443 | for ( ; cnt; cnt = cnt->next) { |
444 | pcc->tsc = get_msr(pcc->cpu, MSR_TSC); | 444 | cnt->tsc = get_msr(cnt->cpu, MSR_TSC); |
445 | if (do_nhm_cstates) | 445 | if (do_nhm_cstates) |
446 | pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY); | 446 | cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY); |
447 | if (do_nhm_cstates) | 447 | if (do_nhm_cstates) |
448 | pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY); | 448 | cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY); |
449 | if (do_snb_cstates) | 449 | if (do_snb_cstates) |
450 | pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY); | 450 | cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY); |
451 | if (has_aperf) | 451 | if (has_aperf) |
452 | pcc->aperf = get_msr(pcc->cpu, MSR_APERF); | 452 | cnt->aperf = get_msr(cnt->cpu, MSR_APERF); |
453 | if (has_aperf) | 453 | if (has_aperf) |
454 | pcc->mperf = get_msr(pcc->cpu, MSR_MPERF); | 454 | cnt->mperf = get_msr(cnt->cpu, MSR_MPERF); |
455 | if (do_snb_cstates) | 455 | if (do_snb_cstates) |
456 | pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY); | 456 | cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY); |
457 | if (do_nhm_cstates) | 457 | if (do_nhm_cstates) |
458 | pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY); | 458 | cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY); |
459 | if (do_nhm_cstates) | 459 | if (do_nhm_cstates) |
460 | pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY); | 460 | cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY); |
461 | if (do_snb_cstates) | 461 | if (do_snb_cstates) |
462 | pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY); | 462 | cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY); |
463 | if (extra_msr_offset) | 463 | if (extra_msr_offset) |
464 | pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset); | 464 | cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset); |
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
468 | 468 | void print_nehalem_info(void) | |
469 | void print_nehalem_info() | ||
470 | { | 469 | { |
471 | unsigned long long msr; | 470 | unsigned long long msr; |
472 | unsigned int ratio; | 471 | unsigned int ratio; |
@@ -514,38 +513,38 @@ void print_nehalem_info() | |||
514 | 513 | ||
515 | } | 514 | } |
516 | 515 | ||
517 | void free_counter_list(PCC *list) | 516 | void free_counter_list(struct counters *list) |
518 | { | 517 | { |
519 | PCC *p; | 518 | struct counters *p; |
520 | 519 | ||
521 | for (p = list; p; ) { | 520 | for (p = list; p; ) { |
522 | PCC *free_me; | 521 | struct counters *free_me; |
523 | 522 | ||
524 | free_me = p; | 523 | free_me = p; |
525 | p = p->next; | 524 | p = p->next; |
526 | free(free_me); | 525 | free(free_me); |
527 | } | 526 | } |
528 | return; | ||
529 | } | 527 | } |
530 | 528 | ||
531 | void free_all_counters(void) | 529 | void free_all_counters(void) |
532 | { | 530 | { |
533 | free_counter_list(pcc_even); | 531 | free_counter_list(cnt_even); |
534 | pcc_even = NULL; | 532 | cnt_even = NULL; |
535 | 533 | ||
536 | free_counter_list(pcc_odd); | 534 | free_counter_list(cnt_odd); |
537 | pcc_odd = NULL; | 535 | cnt_odd = NULL; |
538 | 536 | ||
539 | free_counter_list(pcc_delta); | 537 | free_counter_list(cnt_delta); |
540 | pcc_delta = NULL; | 538 | cnt_delta = NULL; |
541 | 539 | ||
542 | free_counter_list(pcc_average); | 540 | free_counter_list(cnt_average); |
543 | pcc_average = NULL; | 541 | cnt_average = NULL; |
544 | } | 542 | } |
545 | 543 | ||
546 | void insert_cpu_counters(PCC **list, PCC *new) | 544 | void insert_counters(struct counters **list, |
545 | struct counters *new) | ||
547 | { | 546 | { |
548 | PCC *prev; | 547 | struct counters *prev; |
549 | 548 | ||
550 | /* | 549 | /* |
551 | * list was empty | 550 | * list was empty |
@@ -594,18 +593,16 @@ void insert_cpu_counters(PCC **list, PCC *new) | |||
594 | */ | 593 | */ |
595 | new->next = prev->next; | 594 | new->next = prev->next; |
596 | prev->next = new; | 595 | prev->next = new; |
597 | |||
598 | return; | ||
599 | } | 596 | } |
600 | 597 | ||
601 | void alloc_new_cpu_counters(int pkg, int core, int cpu) | 598 | void alloc_new_counters(int pkg, int core, int cpu) |
602 | { | 599 | { |
603 | PCC *new; | 600 | struct counters *new; |
604 | 601 | ||
605 | if (verbose > 1) | 602 | if (verbose > 1) |
606 | printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); | 603 | printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); |
607 | 604 | ||
608 | new = (PCC *)calloc(1, sizeof(PCC)); | 605 | new = (struct counters *)calloc(1, sizeof(struct counters)); |
609 | if (new == NULL) { | 606 | if (new == NULL) { |
610 | perror("calloc"); | 607 | perror("calloc"); |
611 | exit(1); | 608 | exit(1); |
@@ -613,9 +610,10 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
613 | new->pkg = pkg; | 610 | new->pkg = pkg; |
614 | new->core = core; | 611 | new->core = core; |
615 | new->cpu = cpu; | 612 | new->cpu = cpu; |
616 | insert_cpu_counters(&pcc_odd, new); | 613 | insert_counters(&cnt_odd, new); |
617 | 614 | ||
618 | new = (PCC *)calloc(1, sizeof(PCC)); | 615 | new = (struct counters *)calloc(1, |
616 | sizeof(struct counters)); | ||
619 | if (new == NULL) { | 617 | if (new == NULL) { |
620 | perror("calloc"); | 618 | perror("calloc"); |
621 | exit(1); | 619 | exit(1); |
@@ -623,9 +621,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
623 | new->pkg = pkg; | 621 | new->pkg = pkg; |
624 | new->core = core; | 622 | new->core = core; |
625 | new->cpu = cpu; | 623 | new->cpu = cpu; |
626 | insert_cpu_counters(&pcc_even, new); | 624 | insert_counters(&cnt_even, new); |
627 | 625 | ||
628 | new = (PCC *)calloc(1, sizeof(PCC)); | 626 | new = (struct counters *)calloc(1, sizeof(struct counters)); |
629 | if (new == NULL) { | 627 | if (new == NULL) { |
630 | perror("calloc"); | 628 | perror("calloc"); |
631 | exit(1); | 629 | exit(1); |
@@ -633,9 +631,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
633 | new->pkg = pkg; | 631 | new->pkg = pkg; |
634 | new->core = core; | 632 | new->core = core; |
635 | new->cpu = cpu; | 633 | new->cpu = cpu; |
636 | insert_cpu_counters(&pcc_delta, new); | 634 | insert_counters(&cnt_delta, new); |
637 | 635 | ||
638 | new = (PCC *)calloc(1, sizeof(PCC)); | 636 | new = (struct counters *)calloc(1, sizeof(struct counters)); |
639 | if (new == NULL) { | 637 | if (new == NULL) { |
640 | perror("calloc"); | 638 | perror("calloc"); |
641 | exit(1); | 639 | exit(1); |
@@ -643,7 +641,7 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
643 | new->pkg = pkg; | 641 | new->pkg = pkg; |
644 | new->core = core; | 642 | new->core = core; |
645 | new->cpu = cpu; | 643 | new->cpu = cpu; |
646 | pcc_average = new; | 644 | cnt_average = new; |
647 | } | 645 | } |
648 | 646 | ||
649 | int get_physical_package_id(int cpu) | 647 | int get_physical_package_id(int cpu) |
@@ -719,7 +717,7 @@ void re_initialize(void) | |||
719 | { | 717 | { |
720 | printf("turbostat: topology changed, re-initializing.\n"); | 718 | printf("turbostat: topology changed, re-initializing.\n"); |
721 | free_all_counters(); | 719 | free_all_counters(); |
722 | num_cpus = for_all_cpus(alloc_new_cpu_counters); | 720 | num_cpus = for_all_cpus(alloc_new_counters); |
723 | need_reinitialize = 0; | 721 | need_reinitialize = 0; |
724 | printf("num_cpus is now %d\n", num_cpus); | 722 | printf("num_cpus is now %d\n", num_cpus); |
725 | } | 723 | } |
@@ -728,7 +726,7 @@ void dummy(int pkg, int core, int cpu) { return; } | |||
728 | /* | 726 | /* |
729 | * check to see if a cpu came on-line | 727 | * check to see if a cpu came on-line |
730 | */ | 728 | */ |
731 | void verify_num_cpus() | 729 | void verify_num_cpus(void) |
732 | { | 730 | { |
733 | int new_num_cpus; | 731 | int new_num_cpus; |
734 | 732 | ||
@@ -740,14 +738,12 @@ void verify_num_cpus() | |||
740 | num_cpus, new_num_cpus); | 738 | num_cpus, new_num_cpus); |
741 | need_reinitialize = 1; | 739 | need_reinitialize = 1; |
742 | } | 740 | } |
743 | |||
744 | return; | ||
745 | } | 741 | } |
746 | 742 | ||
747 | void turbostat_loop() | 743 | void turbostat_loop() |
748 | { | 744 | { |
749 | restart: | 745 | restart: |
750 | get_counters(pcc_even); | 746 | get_counters(cnt_even); |
751 | gettimeofday(&tv_even, (struct timezone *)NULL); | 747 | gettimeofday(&tv_even, (struct timezone *)NULL); |
752 | 748 | ||
753 | while (1) { | 749 | while (1) { |
@@ -757,24 +753,24 @@ restart: | |||
757 | goto restart; | 753 | goto restart; |
758 | } | 754 | } |
759 | sleep(interval_sec); | 755 | sleep(interval_sec); |
760 | get_counters(pcc_odd); | 756 | get_counters(cnt_odd); |
761 | gettimeofday(&tv_odd, (struct timezone *)NULL); | 757 | gettimeofday(&tv_odd, (struct timezone *)NULL); |
762 | 758 | ||
763 | compute_delta(pcc_odd, pcc_even, pcc_delta); | 759 | compute_delta(cnt_odd, cnt_even, cnt_delta); |
764 | timersub(&tv_odd, &tv_even, &tv_delta); | 760 | timersub(&tv_odd, &tv_even, &tv_delta); |
765 | compute_average(pcc_delta, pcc_average); | 761 | compute_average(cnt_delta, cnt_average); |
766 | print_counters(pcc_delta); | 762 | print_counters(cnt_delta); |
767 | if (need_reinitialize) { | 763 | if (need_reinitialize) { |
768 | re_initialize(); | 764 | re_initialize(); |
769 | goto restart; | 765 | goto restart; |
770 | } | 766 | } |
771 | sleep(interval_sec); | 767 | sleep(interval_sec); |
772 | get_counters(pcc_even); | 768 | get_counters(cnt_even); |
773 | gettimeofday(&tv_even, (struct timezone *)NULL); | 769 | gettimeofday(&tv_even, (struct timezone *)NULL); |
774 | compute_delta(pcc_even, pcc_odd, pcc_delta); | 770 | compute_delta(cnt_even, cnt_odd, cnt_delta); |
775 | timersub(&tv_even, &tv_odd, &tv_delta); | 771 | timersub(&tv_even, &tv_odd, &tv_delta); |
776 | compute_average(pcc_delta, pcc_average); | 772 | compute_average(cnt_delta, cnt_average); |
777 | print_counters(pcc_delta); | 773 | print_counters(cnt_delta); |
778 | } | 774 | } |
779 | } | 775 | } |
780 | 776 | ||
@@ -892,7 +888,7 @@ void check_cpuid() | |||
892 | * this check is valid for both Intel and AMD | 888 | * this check is valid for both Intel and AMD |
893 | */ | 889 | */ |
894 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); | 890 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); |
895 | has_invariant_tsc = edx && (1 << 8); | 891 | has_invariant_tsc = edx & (1 << 8); |
896 | 892 | ||
897 | if (!has_invariant_tsc) { | 893 | if (!has_invariant_tsc) { |
898 | fprintf(stderr, "No invariant TSC\n"); | 894 | fprintf(stderr, "No invariant TSC\n"); |
@@ -905,7 +901,7 @@ void check_cpuid() | |||
905 | */ | 901 | */ |
906 | 902 | ||
907 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); | 903 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); |
908 | has_aperf = ecx && (1 << 0); | 904 | has_aperf = ecx & (1 << 0); |
909 | if (!has_aperf) { | 905 | if (!has_aperf) { |
910 | fprintf(stderr, "No APERF MSR\n"); | 906 | fprintf(stderr, "No APERF MSR\n"); |
911 | exit(1); | 907 | exit(1); |
@@ -952,7 +948,7 @@ void turbostat_init() | |||
952 | check_dev_msr(); | 948 | check_dev_msr(); |
953 | check_super_user(); | 949 | check_super_user(); |
954 | 950 | ||
955 | num_cpus = for_all_cpus(alloc_new_cpu_counters); | 951 | num_cpus = for_all_cpus(alloc_new_counters); |
956 | 952 | ||
957 | if (verbose) | 953 | if (verbose) |
958 | print_nehalem_info(); | 954 | print_nehalem_info(); |
@@ -962,7 +958,7 @@ int fork_it(char **argv) | |||
962 | { | 958 | { |
963 | int retval; | 959 | int retval; |
964 | pid_t child_pid; | 960 | pid_t child_pid; |
965 | get_counters(pcc_even); | 961 | get_counters(cnt_even); |
966 | gettimeofday(&tv_even, (struct timezone *)NULL); | 962 | gettimeofday(&tv_even, (struct timezone *)NULL); |
967 | 963 | ||
968 | child_pid = fork(); | 964 | child_pid = fork(); |
@@ -985,14 +981,14 @@ int fork_it(char **argv) | |||
985 | exit(1); | 981 | exit(1); |
986 | } | 982 | } |
987 | } | 983 | } |
988 | get_counters(pcc_odd); | 984 | get_counters(cnt_odd); |
989 | gettimeofday(&tv_odd, (struct timezone *)NULL); | 985 | gettimeofday(&tv_odd, (struct timezone *)NULL); |
990 | retval = compute_delta(pcc_odd, pcc_even, pcc_delta); | 986 | retval = compute_delta(cnt_odd, cnt_even, cnt_delta); |
991 | 987 | ||
992 | timersub(&tv_odd, &tv_even, &tv_delta); | 988 | timersub(&tv_odd, &tv_even, &tv_delta); |
993 | compute_average(pcc_delta, pcc_average); | 989 | compute_average(cnt_delta, cnt_average); |
994 | if (!retval) | 990 | if (!retval) |
995 | print_counters(pcc_delta); | 991 | print_counters(cnt_delta); |
996 | 992 | ||
997 | fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; | 993 | fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; |
998 | 994 | ||