diff options
34 files changed, 276 insertions, 213 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index c6d0e93eff62..99d5b4f43564 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -73,7 +73,8 @@ Descriptions of section entries: | |||
| 73 | L: Mailing list that is relevant to this area | 73 | L: Mailing list that is relevant to this area |
| 74 | W: Web-page with status/info | 74 | W: Web-page with status/info |
| 75 | Q: Patchwork web based patch tracking system site | 75 | Q: Patchwork web based patch tracking system site |
| 76 | T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit. | 76 | T: SCM tree type and location. |
| 77 | Type is one of: git, hg, quilt, stgit, topgit | ||
| 77 | S: Status, one of the following: | 78 | S: Status, one of the following: |
| 78 | Supported: Someone is actually paid to look after this. | 79 | Supported: Someone is actually paid to look after this. |
| 79 | Maintained: Someone actually looks after it. | 80 | Maintained: Someone actually looks after it. |
| @@ -1612,11 +1613,11 @@ S: Maintained | |||
| 1612 | F: drivers/net/wireless/atmel* | 1613 | F: drivers/net/wireless/atmel* |
| 1613 | 1614 | ||
| 1614 | ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER | 1615 | ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER |
| 1615 | M: Bradley Grove <linuxdrivers@attotech.com> | 1616 | M: Bradley Grove <linuxdrivers@attotech.com> |
| 1616 | L: linux-scsi@vger.kernel.org | 1617 | L: linux-scsi@vger.kernel.org |
| 1617 | W: http://www.attotech.com | 1618 | W: http://www.attotech.com |
| 1618 | S: Supported | 1619 | S: Supported |
| 1619 | F: drivers/scsi/esas2r | 1620 | F: drivers/scsi/esas2r |
| 1620 | 1621 | ||
| 1621 | AUDIT SUBSYSTEM | 1622 | AUDIT SUBSYSTEM |
| 1622 | M: Eric Paris <eparis@redhat.com> | 1623 | M: Eric Paris <eparis@redhat.com> |
| @@ -2159,7 +2160,7 @@ F: Documentation/zh_CN/ | |||
| 2159 | 2160 | ||
| 2160 | CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER | 2161 | CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER |
| 2161 | M: Peter Chen <Peter.Chen@freescale.com> | 2162 | M: Peter Chen <Peter.Chen@freescale.com> |
| 2162 | T: git://github.com/hzpeterchen/linux-usb.git | 2163 | T: git git://github.com/hzpeterchen/linux-usb.git |
| 2163 | L: linux-usb@vger.kernel.org | 2164 | L: linux-usb@vger.kernel.org |
| 2164 | S: Maintained | 2165 | S: Maintained |
| 2165 | F: drivers/usb/chipidea/ | 2166 | F: drivers/usb/chipidea/ |
| @@ -2179,9 +2180,9 @@ S: Supported | |||
| 2179 | F: drivers/net/ethernet/cisco/enic/ | 2180 | F: drivers/net/ethernet/cisco/enic/ |
| 2180 | 2181 | ||
| 2181 | CISCO VIC LOW LATENCY NIC DRIVER | 2182 | CISCO VIC LOW LATENCY NIC DRIVER |
| 2182 | M: Upinder Malhi <umalhi@cisco.com> | 2183 | M: Upinder Malhi <umalhi@cisco.com> |
| 2183 | S: Supported | 2184 | S: Supported |
| 2184 | F: drivers/infiniband/hw/usnic | 2185 | F: drivers/infiniband/hw/usnic |
| 2185 | 2186 | ||
| 2186 | CIRRUS LOGIC EP93XX ETHERNET DRIVER | 2187 | CIRRUS LOGIC EP93XX ETHERNET DRIVER |
| 2187 | M: Hartley Sweeten <hsweeten@visionengravers.com> | 2188 | M: Hartley Sweeten <hsweeten@visionengravers.com> |
| @@ -2378,20 +2379,20 @@ F: drivers/cpufreq/arm_big_little.c | |||
| 2378 | F: drivers/cpufreq/arm_big_little_dt.c | 2379 | F: drivers/cpufreq/arm_big_little_dt.c |
| 2379 | 2380 | ||
| 2380 | CPUIDLE DRIVER - ARM BIG LITTLE | 2381 | CPUIDLE DRIVER - ARM BIG LITTLE |
| 2381 | M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | 2382 | M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> |
| 2382 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | 2383 | M: Daniel Lezcano <daniel.lezcano@linaro.org> |
| 2383 | L: linux-pm@vger.kernel.org | 2384 | L: linux-pm@vger.kernel.org |
| 2384 | L: linux-arm-kernel@lists.infradead.org | 2385 | L: linux-arm-kernel@lists.infradead.org |
| 2385 | T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git | 2386 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git |
| 2386 | S: Maintained | 2387 | S: Maintained |
| 2387 | F: drivers/cpuidle/cpuidle-big_little.c | 2388 | F: drivers/cpuidle/cpuidle-big_little.c |
| 2388 | 2389 | ||
| 2389 | CPUIDLE DRIVERS | 2390 | CPUIDLE DRIVERS |
| 2390 | M: Rafael J. Wysocki <rjw@rjwysocki.net> | 2391 | M: Rafael J. Wysocki <rjw@rjwysocki.net> |
| 2391 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | 2392 | M: Daniel Lezcano <daniel.lezcano@linaro.org> |
| 2392 | L: linux-pm@vger.kernel.org | 2393 | L: linux-pm@vger.kernel.org |
| 2393 | S: Maintained | 2394 | S: Maintained |
| 2394 | T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git | 2395 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git |
| 2395 | F: drivers/cpuidle/* | 2396 | F: drivers/cpuidle/* |
| 2396 | F: include/linux/cpuidle.h | 2397 | F: include/linux/cpuidle.h |
| 2397 | 2398 | ||
| @@ -2458,9 +2459,9 @@ S: Maintained | |||
| 2458 | F: sound/pci/cs5535audio/ | 2459 | F: sound/pci/cs5535audio/ |
| 2459 | 2460 | ||
| 2460 | CW1200 WLAN driver | 2461 | CW1200 WLAN driver |
| 2461 | M: Solomon Peachy <pizza@shaftnet.org> | 2462 | M: Solomon Peachy <pizza@shaftnet.org> |
| 2462 | S: Maintained | 2463 | S: Maintained |
| 2463 | F: drivers/net/wireless/cw1200/ | 2464 | F: drivers/net/wireless/cw1200/ |
| 2464 | 2465 | ||
| 2465 | CX18 VIDEO4LINUX DRIVER | 2466 | CX18 VIDEO4LINUX DRIVER |
| 2466 | M: Andy Walls <awalls@md.metrocast.net> | 2467 | M: Andy Walls <awalls@md.metrocast.net> |
| @@ -3095,6 +3096,8 @@ F: fs/ecryptfs/ | |||
| 3095 | 3096 | ||
| 3096 | EDAC-CORE | 3097 | EDAC-CORE |
| 3097 | M: Doug Thompson <dougthompson@xmission.com> | 3098 | M: Doug Thompson <dougthompson@xmission.com> |
| 3099 | M: Borislav Petkov <bp@alien8.de> | ||
| 3100 | M: Mauro Carvalho Chehab <m.chehab@samsung.com> | ||
| 3098 | L: linux-edac@vger.kernel.org | 3101 | L: linux-edac@vger.kernel.org |
| 3099 | W: bluesmoke.sourceforge.net | 3102 | W: bluesmoke.sourceforge.net |
| 3100 | S: Supported | 3103 | S: Supported |
| @@ -4914,7 +4917,7 @@ F: drivers/staging/ktap/ | |||
| 4914 | KCONFIG | 4917 | KCONFIG |
| 4915 | M: "Yann E. MORIN" <yann.morin.1998@free.fr> | 4918 | M: "Yann E. MORIN" <yann.morin.1998@free.fr> |
| 4916 | L: linux-kbuild@vger.kernel.org | 4919 | L: linux-kbuild@vger.kernel.org |
| 4917 | T: git://gitorious.org/linux-kconfig/linux-kconfig | 4920 | T: git git://gitorious.org/linux-kconfig/linux-kconfig |
| 4918 | S: Maintained | 4921 | S: Maintained |
| 4919 | F: Documentation/kbuild/kconfig-language.txt | 4922 | F: Documentation/kbuild/kconfig-language.txt |
| 4920 | F: scripts/kconfig/ | 4923 | F: scripts/kconfig/ |
| @@ -5471,11 +5474,11 @@ S: Maintained | |||
| 5471 | F: drivers/media/tuners/m88ts2022* | 5474 | F: drivers/media/tuners/m88ts2022* |
| 5472 | 5475 | ||
| 5473 | MA901 MASTERKIT USB FM RADIO DRIVER | 5476 | MA901 MASTERKIT USB FM RADIO DRIVER |
| 5474 | M: Alexey Klimov <klimov.linux@gmail.com> | 5477 | M: Alexey Klimov <klimov.linux@gmail.com> |
| 5475 | L: linux-media@vger.kernel.org | 5478 | L: linux-media@vger.kernel.org |
| 5476 | T: git git://linuxtv.org/media_tree.git | 5479 | T: git git://linuxtv.org/media_tree.git |
| 5477 | S: Maintained | 5480 | S: Maintained |
| 5478 | F: drivers/media/radio/radio-ma901.c | 5481 | F: drivers/media/radio/radio-ma901.c |
| 5479 | 5482 | ||
| 5480 | MAC80211 | 5483 | MAC80211 |
| 5481 | M: Johannes Berg <johannes@sipsolutions.net> | 5484 | M: Johannes Berg <johannes@sipsolutions.net> |
| @@ -5636,7 +5639,7 @@ F: drivers/scsi/megaraid/ | |||
| 5636 | 5639 | ||
| 5637 | MELLANOX ETHERNET DRIVER (mlx4_en) | 5640 | MELLANOX ETHERNET DRIVER (mlx4_en) |
| 5638 | M: Amir Vadai <amirv@mellanox.com> | 5641 | M: Amir Vadai <amirv@mellanox.com> |
| 5639 | L: netdev@vger.kernel.org | 5642 | L: netdev@vger.kernel.org |
| 5640 | S: Supported | 5643 | S: Supported |
| 5641 | W: http://www.mellanox.com | 5644 | W: http://www.mellanox.com |
| 5642 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | 5645 | Q: http://patchwork.ozlabs.org/project/netdev/list/ |
| @@ -5677,7 +5680,7 @@ F: include/linux/mtd/ | |||
| 5677 | F: include/uapi/mtd/ | 5680 | F: include/uapi/mtd/ |
| 5678 | 5681 | ||
| 5679 | MEN A21 WATCHDOG DRIVER | 5682 | MEN A21 WATCHDOG DRIVER |
| 5680 | M: Johannes Thumshirn <johannes.thumshirn@men.de> | 5683 | M: Johannes Thumshirn <johannes.thumshirn@men.de> |
| 5681 | L: linux-watchdog@vger.kernel.org | 5684 | L: linux-watchdog@vger.kernel.org |
| 5682 | S: Supported | 5685 | S: Supported |
| 5683 | F: drivers/watchdog/mena21_wdt.c | 5686 | F: drivers/watchdog/mena21_wdt.c |
| @@ -5733,20 +5736,20 @@ L: linux-rdma@vger.kernel.org | |||
| 5733 | W: http://www.mellanox.com | 5736 | W: http://www.mellanox.com |
| 5734 | Q: http://patchwork.ozlabs.org/project/netdev/list/ | 5737 | Q: http://patchwork.ozlabs.org/project/netdev/list/ |
| 5735 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5738 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
| 5736 | T: git://openfabrics.org/~eli/connect-ib.git | 5739 | T: git git://openfabrics.org/~eli/connect-ib.git |
| 5737 | S: Supported | 5740 | S: Supported |
| 5738 | F: drivers/net/ethernet/mellanox/mlx5/core/ | 5741 | F: drivers/net/ethernet/mellanox/mlx5/core/ |
| 5739 | F: include/linux/mlx5/ | 5742 | F: include/linux/mlx5/ |
| 5740 | 5743 | ||
| 5741 | Mellanox MLX5 IB driver | 5744 | Mellanox MLX5 IB driver |
| 5742 | M: Eli Cohen <eli@mellanox.com> | 5745 | M: Eli Cohen <eli@mellanox.com> |
| 5743 | L: linux-rdma@vger.kernel.org | 5746 | L: linux-rdma@vger.kernel.org |
| 5744 | W: http://www.mellanox.com | 5747 | W: http://www.mellanox.com |
| 5745 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5748 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
| 5746 | T: git://openfabrics.org/~eli/connect-ib.git | 5749 | T: git git://openfabrics.org/~eli/connect-ib.git |
| 5747 | S: Supported | 5750 | S: Supported |
| 5748 | F: include/linux/mlx5/ | 5751 | F: include/linux/mlx5/ |
| 5749 | F: drivers/infiniband/hw/mlx5/ | 5752 | F: drivers/infiniband/hw/mlx5/ |
| 5750 | 5753 | ||
| 5751 | MODULE SUPPORT | 5754 | MODULE SUPPORT |
| 5752 | M: Rusty Russell <rusty@rustcorp.com.au> | 5755 | M: Rusty Russell <rusty@rustcorp.com.au> |
| @@ -8700,17 +8703,17 @@ S: Maintained | |||
| 8700 | F: drivers/media/radio/radio-raremono.c | 8703 | F: drivers/media/radio/radio-raremono.c |
| 8701 | 8704 | ||
| 8702 | THERMAL | 8705 | THERMAL |
| 8703 | M: Zhang Rui <rui.zhang@intel.com> | 8706 | M: Zhang Rui <rui.zhang@intel.com> |
| 8704 | M: Eduardo Valentin <eduardo.valentin@ti.com> | 8707 | M: Eduardo Valentin <eduardo.valentin@ti.com> |
| 8705 | L: linux-pm@vger.kernel.org | 8708 | L: linux-pm@vger.kernel.org |
| 8706 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git | 8709 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git |
| 8707 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git | 8710 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git |
| 8708 | Q: https://patchwork.kernel.org/project/linux-pm/list/ | 8711 | Q: https://patchwork.kernel.org/project/linux-pm/list/ |
| 8709 | S: Supported | 8712 | S: Supported |
| 8710 | F: drivers/thermal/ | 8713 | F: drivers/thermal/ |
| 8711 | F: include/linux/thermal.h | 8714 | F: include/linux/thermal.h |
| 8712 | F: include/linux/cpu_cooling.h | 8715 | F: include/linux/cpu_cooling.h |
| 8713 | F: Documentation/devicetree/bindings/thermal/ | 8716 | F: Documentation/devicetree/bindings/thermal/ |
| 8714 | 8717 | ||
| 8715 | THINGM BLINK(1) USB RGB LED DRIVER | 8718 | THINGM BLINK(1) USB RGB LED DRIVER |
| 8716 | M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> | 8719 | M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> |
| @@ -9812,7 +9815,7 @@ ZR36067 VIDEO FOR LINUX DRIVER | |||
| 9812 | L: mjpeg-users@lists.sourceforge.net | 9815 | L: mjpeg-users@lists.sourceforge.net |
| 9813 | L: linux-media@vger.kernel.org | 9816 | L: linux-media@vger.kernel.org |
| 9814 | W: http://mjpeg.sourceforge.net/driver-zoran/ | 9817 | W: http://mjpeg.sourceforge.net/driver-zoran/ |
| 9815 | T: Mercurial http://linuxtv.org/hg/v4l-dvb | 9818 | T: hg http://linuxtv.org/hg/v4l-dvb |
| 9816 | S: Odd Fixes | 9819 | S: Odd Fixes |
| 9817 | F: drivers/media/pci/zoran/ | 9820 | F: drivers/media/pci/zoran/ |
| 9818 | 9821 | ||
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h index 673515bc4135..aa1b2b9088a7 100644 --- a/arch/sh/include/cpu-sh2/cpu/cache.h +++ b/arch/sh/include/cpu-sh2/cpu/cache.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
| 19 | 19 | ||
| 20 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) | 20 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) |
| 21 | #define CCR 0xffffffec | 21 | #define SH_CCR 0xffffffec |
| 22 | 22 | ||
| 23 | #define CCR_CACHE_CE 0x01 /* Cache enable */ | 23 | #define CCR_CACHE_CE 0x01 /* Cache enable */ |
| 24 | #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ | 24 | #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ |
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h index defb0baa5a06..b27ce92cb600 100644 --- a/arch/sh/include/cpu-sh2a/cpu/cache.h +++ b/arch/sh/include/cpu-sh2a/cpu/cache.h | |||
| @@ -17,8 +17,8 @@ | |||
| 17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
| 18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
| 19 | 19 | ||
| 20 | #define CCR 0xfffc1000 /* CCR1 */ | 20 | #define SH_CCR 0xfffc1000 /* CCR1 */ |
| 21 | #define CCR2 0xfffc1004 | 21 | #define SH_CCR2 0xfffc1004 |
| 22 | 22 | ||
| 23 | /* | 23 | /* |
| 24 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not | 24 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not |
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h index bee2d81c56bf..29700fd88c75 100644 --- a/arch/sh/include/cpu-sh3/cpu/cache.h +++ b/arch/sh/include/cpu-sh3/cpu/cache.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
| 18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
| 19 | 19 | ||
| 20 | #define CCR 0xffffffec /* Address of Cache Control Register */ | 20 | #define SH_CCR 0xffffffec /* Address of Cache Control Register */ |
| 21 | 21 | ||
| 22 | #define CCR_CACHE_CE 0x01 /* Cache Enable */ | 22 | #define CCR_CACHE_CE 0x01 /* Cache Enable */ |
| 23 | #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ | 23 | #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h index 7bfb9e8b069c..92c4cd119b66 100644 --- a/arch/sh/include/cpu-sh4/cpu/cache.h +++ b/arch/sh/include/cpu-sh4/cpu/cache.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
| 18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
| 19 | 19 | ||
| 20 | #define CCR 0xff00001c /* Address of Cache Control Register */ | 20 | #define SH_CCR 0xff00001c /* Address of Cache Control Register */ |
| 21 | #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ | 21 | #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ |
| 22 | #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ | 22 | #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ |
| 23 | #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ | 23 | #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ |
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ecf83cd158dc..0d7360d549c1 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
| @@ -112,7 +112,7 @@ static void cache_init(void) | |||
| 112 | unsigned long ccr, flags; | 112 | unsigned long ccr, flags; |
| 113 | 113 | ||
| 114 | jump_to_uncached(); | 114 | jump_to_uncached(); |
| 115 | ccr = __raw_readl(CCR); | 115 | ccr = __raw_readl(SH_CCR); |
| 116 | 116 | ||
| 117 | /* | 117 | /* |
| 118 | * At this point we don't know whether the cache is enabled or not - a | 118 | * At this point we don't know whether the cache is enabled or not - a |
| @@ -189,7 +189,7 @@ static void cache_init(void) | |||
| 189 | 189 | ||
| 190 | l2_cache_init(); | 190 | l2_cache_init(); |
| 191 | 191 | ||
| 192 | __raw_writel(flags, CCR); | 192 | __raw_writel(flags, SH_CCR); |
| 193 | back_to_cached(); | 193 | back_to_cached(); |
| 194 | } | 194 | } |
| 195 | #else | 195 | #else |
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 115725198038..777e50f33c00 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c | |||
| @@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) | |||
| 36 | */ | 36 | */ |
| 37 | jump_to_uncached(); | 37 | jump_to_uncached(); |
| 38 | 38 | ||
| 39 | ccr = __raw_readl(CCR); | 39 | ccr = __raw_readl(SH_CCR); |
| 40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { | 40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { |
| 41 | back_to_cached(); | 41 | back_to_cached(); |
| 42 | 42 | ||
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index defcf719f2e8..a74259f2f981 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
| @@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
| 63 | local_irq_save(flags); | 63 | local_irq_save(flags); |
| 64 | jump_to_uncached(); | 64 | jump_to_uncached(); |
| 65 | 65 | ||
| 66 | ccr = __raw_readl(CCR); | 66 | ccr = __raw_readl(SH_CCR); |
| 67 | ccr |= CCR_CACHE_INVALIDATE; | 67 | ccr |= CCR_CACHE_INVALIDATE; |
| 68 | __raw_writel(ccr, CCR); | 68 | __raw_writel(ccr, SH_CCR); |
| 69 | 69 | ||
| 70 | back_to_cached(); | 70 | back_to_cached(); |
| 71 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 949e2d3138a0..ee87d081259b 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
| @@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
| 134 | 134 | ||
| 135 | /* If there are too many pages then just blow the cache */ | 135 | /* If there are too many pages then just blow the cache */ |
| 136 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { | 136 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
| 137 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); | 137 | __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, |
| 138 | SH_CCR); | ||
| 138 | } else { | 139 | } else { |
| 139 | for (v = begin; v < end; v += L1_CACHE_BYTES) | 140 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
| 140 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); | 141 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); |
| @@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) | |||
| 167 | /* I-Cache invalidate */ | 168 | /* I-Cache invalidate */ |
| 168 | /* If there are too many pages then just blow the cache */ | 169 | /* If there are too many pages then just blow the cache */ |
| 169 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 170 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
| 170 | __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); | 171 | __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, |
| 172 | SH_CCR); | ||
| 171 | } else { | 173 | } else { |
| 172 | for (v = start; v < end; v += L1_CACHE_BYTES) | 174 | for (v = start; v < end; v += L1_CACHE_BYTES) |
| 173 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); | 175 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 0e529285b28d..51d8f7f31d1d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
| @@ -133,9 +133,9 @@ static void flush_icache_all(void) | |||
| 133 | jump_to_uncached(); | 133 | jump_to_uncached(); |
| 134 | 134 | ||
| 135 | /* Flush I-cache */ | 135 | /* Flush I-cache */ |
| 136 | ccr = __raw_readl(CCR); | 136 | ccr = __raw_readl(SH_CCR); |
| 137 | ccr |= CCR_CACHE_ICI; | 137 | ccr |= CCR_CACHE_ICI; |
| 138 | __raw_writel(ccr, CCR); | 138 | __raw_writel(ccr, SH_CCR); |
| 139 | 139 | ||
| 140 | /* | 140 | /* |
| 141 | * back_to_cached() will take care of the barrier for us, don't add | 141 | * back_to_cached() will take care of the barrier for us, don't add |
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index c0adbee97b5f..24c58b7dc022 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c | |||
| @@ -19,7 +19,7 @@ void __init shx3_cache_init(void) | |||
| 19 | { | 19 | { |
| 20 | unsigned int ccr; | 20 | unsigned int ccr; |
| 21 | 21 | ||
| 22 | ccr = __raw_readl(CCR); | 22 | ccr = __raw_readl(SH_CCR); |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * If we've got cache aliases, resolve them in hardware. | 25 | * If we've got cache aliases, resolve them in hardware. |
| @@ -40,5 +40,5 @@ void __init shx3_cache_init(void) | |||
| 40 | ccr |= CCR_CACHE_IBE; | 40 | ccr |= CCR_CACHE_IBE; |
| 41 | #endif | 41 | #endif |
| 42 | 42 | ||
| 43 | writel_uncached(ccr, CCR); | 43 | writel_uncached(ccr, SH_CCR); |
| 44 | } | 44 | } |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 616966a96cba..097c2cdd117f 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
| @@ -285,8 +285,8 @@ void __init cpu_cache_init(void) | |||
| 285 | { | 285 | { |
| 286 | unsigned int cache_disabled = 0; | 286 | unsigned int cache_disabled = 0; |
| 287 | 287 | ||
| 288 | #ifdef CCR | 288 | #ifdef SH_CCR |
| 289 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 289 | cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); |
| 290 | #endif | 290 | #endif |
| 291 | 291 | ||
| 292 | compute_alias(&boot_cpu_data.icache); | 292 | compute_alias(&boot_cpu_data.icache); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8184451b57c0..422b7d84f686 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio) | |||
| 874 | /* Non-zero page count for non-head members of | 874 | /* Non-zero page count for non-head members of |
| 875 | * compound pages is no longer allowed by the kernel. | 875 | * compound pages is no longer allowed by the kernel. |
| 876 | */ | 876 | */ |
| 877 | page = compound_trans_head(bv.bv_page); | 877 | page = compound_head(bv.bv_page); |
| 878 | atomic_inc(&page->_count); | 878 | atomic_inc(&page->_count); |
| 879 | } | 879 | } |
| 880 | } | 880 | } |
| @@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio) | |||
| 887 | struct bvec_iter iter; | 887 | struct bvec_iter iter; |
| 888 | 888 | ||
| 889 | bio_for_each_segment(bv, bio, iter) { | 889 | bio_for_each_segment(bv, bio, iter) { |
| 890 | page = compound_trans_head(bv.bv_page); | 890 | page = compound_head(bv.bv_page); |
| 891 | atomic_dec(&page->_count); | 891 | atomic_dec(&page->_count); |
| 892 | } | 892 | } |
| 893 | } | 893 | } |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 011e55d820b1..51c557cfd92b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
| @@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev, | |||
| 612 | 612 | ||
| 613 | disksize = PAGE_ALIGN(disksize); | 613 | disksize = PAGE_ALIGN(disksize); |
| 614 | meta = zram_meta_alloc(disksize); | 614 | meta = zram_meta_alloc(disksize); |
| 615 | if (!meta) | ||
| 616 | return -ENOMEM; | ||
| 615 | down_write(&zram->init_lock); | 617 | down_write(&zram->init_lock); |
| 616 | if (zram->init_done) { | 618 | if (zram->init_done) { |
| 617 | up_write(&zram->init_lock); | 619 | up_write(&zram->init_lock); |
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index b4b0d83f9ef6..7061ac0ad428 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h | |||
| @@ -678,6 +678,7 @@ struct tsi721_bdma_chan { | |||
| 678 | struct list_head free_list; | 678 | struct list_head free_list; |
| 679 | dma_cookie_t completed_cookie; | 679 | dma_cookie_t completed_cookie; |
| 680 | struct tasklet_struct tasklet; | 680 | struct tasklet_struct tasklet; |
| 681 | bool active; | ||
| 681 | }; | 682 | }; |
| 682 | 683 | ||
| 683 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | 684 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 502663f5f7c6..91245f5dbe81 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
| @@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | |||
| 206 | { | 206 | { |
| 207 | /* Disable BDMA channel interrupts */ | 207 | /* Disable BDMA channel interrupts */ |
| 208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | 208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); |
| 209 | 209 | if (bdma_chan->active) | |
| 210 | tasklet_schedule(&bdma_chan->tasklet); | 210 | tasklet_schedule(&bdma_chan->tasklet); |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | #ifdef CONFIG_PCI_MSI | 213 | #ifdef CONFIG_PCI_MSI |
| @@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
| 562 | } | 562 | } |
| 563 | #endif /* CONFIG_PCI_MSI */ | 563 | #endif /* CONFIG_PCI_MSI */ |
| 564 | 564 | ||
| 565 | tasklet_enable(&bdma_chan->tasklet); | 565 | bdma_chan->active = true; |
| 566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); | 566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
| 567 | 567 | ||
| 568 | return bdma_chan->bd_num - 1; | 568 | return bdma_chan->bd_num - 1; |
| @@ -576,9 +576,7 @@ err_out: | |||
| 576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) | 576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) |
| 577 | { | 577 | { |
| 578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | 578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
| 579 | #ifdef CONFIG_PCI_MSI | ||
| 580 | struct tsi721_device *priv = to_tsi721(dchan->device); | 579 | struct tsi721_device *priv = to_tsi721(dchan->device); |
| 581 | #endif | ||
| 582 | LIST_HEAD(list); | 580 | LIST_HEAD(list); |
| 583 | 581 | ||
| 584 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | 582 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); |
| @@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) | |||
| 589 | BUG_ON(!list_empty(&bdma_chan->active_list)); | 587 | BUG_ON(!list_empty(&bdma_chan->active_list)); |
| 590 | BUG_ON(!list_empty(&bdma_chan->queue)); | 588 | BUG_ON(!list_empty(&bdma_chan->queue)); |
| 591 | 589 | ||
| 592 | tasklet_disable(&bdma_chan->tasklet); | 590 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
| 591 | bdma_chan->active = false; | ||
| 592 | |||
| 593 | #ifdef CONFIG_PCI_MSI | ||
| 594 | if (priv->flags & TSI721_USING_MSIX) { | ||
| 595 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + | ||
| 596 | bdma_chan->id].vector); | ||
| 597 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
| 598 | bdma_chan->id].vector); | ||
| 599 | } else | ||
| 600 | #endif | ||
| 601 | synchronize_irq(priv->pdev->irq); | ||
| 602 | |||
| 603 | tasklet_kill(&bdma_chan->tasklet); | ||
| 593 | 604 | ||
| 594 | spin_lock_bh(&bdma_chan->lock); | 605 | spin_lock_bh(&bdma_chan->lock); |
| 595 | list_splice_init(&bdma_chan->free_list, &list); | 606 | list_splice_init(&bdma_chan->free_list, &list); |
| 596 | spin_unlock_bh(&bdma_chan->lock); | 607 | spin_unlock_bh(&bdma_chan->lock); |
| 597 | 608 | ||
| 598 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | ||
| 599 | |||
| 600 | #ifdef CONFIG_PCI_MSI | 609 | #ifdef CONFIG_PCI_MSI |
| 601 | if (priv->flags & TSI721_USING_MSIX) { | 610 | if (priv->flags & TSI721_USING_MSIX) { |
| 602 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | 611 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + |
| @@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
| 790 | bdma_chan->dchan.cookie = 1; | 799 | bdma_chan->dchan.cookie = 1; |
| 791 | bdma_chan->dchan.chan_id = i; | 800 | bdma_chan->dchan.chan_id = i; |
| 792 | bdma_chan->id = i; | 801 | bdma_chan->id = i; |
| 802 | bdma_chan->active = false; | ||
| 793 | 803 | ||
| 794 | spin_lock_init(&bdma_chan->lock); | 804 | spin_lock_init(&bdma_chan->lock); |
| 795 | 805 | ||
| @@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv) | |||
| 799 | 809 | ||
| 800 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | 810 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, |
| 801 | (unsigned long)bdma_chan); | 811 | (unsigned long)bdma_chan); |
| 802 | tasklet_disable(&bdma_chan->tasklet); | ||
| 803 | list_add_tail(&bdma_chan->dchan.device_node, | 812 | list_add_tail(&bdma_chan->dchan.device_node, |
| 804 | &mport->dma.channels); | 813 | &mport->dma.channels); |
| 805 | } | 814 | } |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 7afd373b9595..c4cde9c08f1f 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev) | |||
| 580 | 580 | ||
| 581 | clk_enable(rtc_clk); | 581 | clk_enable(rtc_clk); |
| 582 | /* save TICNT for anyone using periodic interrupts */ | 582 | /* save TICNT for anyone using periodic interrupts */ |
| 583 | ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); | ||
| 584 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { | 583 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { |
| 585 | ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); | 584 | ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); |
| 586 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; | 585 | ticnt_en_save &= S3C64XX_RTCCON_TICEN; |
| 586 | ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT); | ||
| 587 | } else { | ||
| 588 | ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); | ||
| 587 | } | 589 | } |
| 588 | s3c_rtc_enable(pdev, 0); | 590 | s3c_rtc_enable(pdev, 0); |
| 589 | 591 | ||
| @@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev) | |||
| 605 | 607 | ||
| 606 | clk_enable(rtc_clk); | 608 | clk_enable(rtc_clk); |
| 607 | s3c_rtc_enable(pdev, 1); | 609 | s3c_rtc_enable(pdev, 1); |
| 608 | writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); | 610 | if (s3c_rtc_cpu_type == TYPE_S3C64XX) { |
| 609 | if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { | 611 | writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT); |
| 610 | tmp = readw(s3c_rtc_base + S3C2410_RTCCON); | 612 | if (ticnt_en_save) { |
| 611 | writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); | 613 | tmp = readw(s3c_rtc_base + S3C2410_RTCCON); |
| 614 | writew(tmp | ticnt_en_save, | ||
| 615 | s3c_rtc_base + S3C2410_RTCCON); | ||
| 616 | } | ||
| 617 | } else { | ||
| 618 | writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); | ||
| 612 | } | 619 | } |
| 613 | 620 | ||
| 614 | if (device_may_wakeup(dev) && wake_en) { | 621 | if (device_may_wakeup(dev) && wake_en) { |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4fb7a8f83c8a..54af4e933695 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) | |||
| 186 | if (pfn_valid(pfn)) { | 186 | if (pfn_valid(pfn)) { |
| 187 | bool reserved; | 187 | bool reserved; |
| 188 | struct page *tail = pfn_to_page(pfn); | 188 | struct page *tail = pfn_to_page(pfn); |
| 189 | struct page *head = compound_trans_head(tail); | 189 | struct page *head = compound_head(tail); |
| 190 | reserved = !!(PageReserved(head)); | 190 | reserved = !!(PageReserved(head)); |
| 191 | if (head != tail) { | 191 | if (head != tail) { |
| 192 | /* | 192 | /* |
| 193 | * "head" is not a dangling pointer | 193 | * "head" is not a dangling pointer |
| 194 | * (compound_trans_head takes care of that) | 194 | * (compound_head takes care of that) |
| 195 | * but the hugepage may have been split | 195 | * but the hugepage may have been split |
| 196 | * from under us (and we may not hold a | 196 | * from under us (and we may not hold a |
| 197 | * reference count on the head page so it can | 197 | * reference count on the head page so it can |
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 968eab5bc1f5..68537e8b7a09 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c | |||
| @@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force) | |||
| 75 | int token; | 75 | int token; |
| 76 | 76 | ||
| 77 | if (!input) | 77 | if (!input) |
| 78 | return 0; | 78 | return 1; |
| 79 | 79 | ||
| 80 | while ((p = strsep(&input, ",")) != NULL) { | 80 | while ((p = strsep(&input, ",")) != NULL) { |
| 81 | if (!*p) | 81 | if (!*p) |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index aaa50611ec66..d7b5108789e2 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
| @@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot) | |||
| 717 | */ | 717 | */ |
| 718 | if (status < 0) | 718 | if (status < 0) |
| 719 | mlog_errno(status); | 719 | mlog_errno(status); |
| 720 | /* | ||
| 721 | * Clear dq_off so that we search for the structure in quota file next | ||
| 722 | * time we acquire it. The structure might be deleted and reallocated | ||
| 723 | * elsewhere by another node while our dquot structure is on freelist. | ||
| 724 | */ | ||
| 725 | dquot->dq_off = 0; | ||
| 720 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 726 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
| 721 | out_trans: | 727 | out_trans: |
| 722 | ocfs2_commit_trans(osb, handle); | 728 | ocfs2_commit_trans(osb, handle); |
| @@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) | |||
| 756 | status = ocfs2_lock_global_qf(info, 1); | 762 | status = ocfs2_lock_global_qf(info, 1); |
| 757 | if (status < 0) | 763 | if (status < 0) |
| 758 | goto out; | 764 | goto out; |
| 759 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { | 765 | status = ocfs2_qinfo_lock(info, 0); |
| 760 | status = ocfs2_qinfo_lock(info, 0); | 766 | if (status < 0) |
| 761 | if (status < 0) | 767 | goto out_dq; |
| 762 | goto out_dq; | 768 | /* |
| 763 | status = qtree_read_dquot(&info->dqi_gi, dquot); | 769 | * We always want to read dquot structure from disk because we don't |
| 764 | ocfs2_qinfo_unlock(info, 0); | 770 | * know what happened with it while it was on freelist. |
| 765 | if (status < 0) | 771 | */ |
| 766 | goto out_dq; | 772 | status = qtree_read_dquot(&info->dqi_gi, dquot); |
| 767 | } | 773 | ocfs2_qinfo_unlock(info, 0); |
| 768 | set_bit(DQ_READ_B, &dquot->dq_flags); | 774 | if (status < 0) |
| 775 | goto out_dq; | ||
| 769 | 776 | ||
| 770 | OCFS2_DQUOT(dquot)->dq_use_count++; | 777 | OCFS2_DQUOT(dquot)->dq_use_count++; |
| 771 | OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; | 778 | OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; |
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 2e4344be3b96..2001862bf2b1 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
| @@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) | |||
| 1303 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); | 1303 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); |
| 1304 | 1304 | ||
| 1305 | out: | 1305 | out: |
| 1306 | /* Clear the read bit so that next time someone uses this | ||
| 1307 | * dquot he reads fresh info from disk and allocates local | ||
| 1308 | * dquot structure */ | ||
| 1309 | clear_bit(DQ_READ_B, &dquot->dq_flags); | ||
| 1310 | return status; | 1306 | return status; |
| 1311 | } | 1307 | } |
| 1312 | 1308 | ||
diff --git a/fs/proc/page.c b/fs/proc/page.c index 02174a610315..e647c55275d9 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
| @@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page) | |||
| 121 | * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon | 121 | * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon |
| 122 | * to make sure a given page is a thp, not a non-huge compound page. | 122 | * to make sure a given page is a thp, not a non-huge compound page. |
| 123 | */ | 123 | */ |
| 124 | else if (PageTransCompound(page) && | 124 | else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || |
| 125 | (PageLRU(compound_trans_head(page)) || | 125 | PageAnon(compound_head(page)))) |
| 126 | PageAnon(compound_trans_head(page)))) | ||
| 127 | u |= 1 << KPF_THP; | 126 | u |= 1 << KPF_THP; |
| 128 | 127 | ||
| 129 | /* | 128 | /* |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index db512014e061..b826239bdce0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
| @@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page) | |||
| 157 | return HPAGE_PMD_NR; | 157 | return HPAGE_PMD_NR; |
| 158 | return 1; | 158 | return 1; |
| 159 | } | 159 | } |
| 160 | /* | ||
| 161 | * compound_trans_head() should be used instead of compound_head(), | ||
| 162 | * whenever the "page" passed as parameter could be the tail of a | ||
| 163 | * transparent hugepage that could be undergoing a | ||
| 164 | * __split_huge_page_refcount(). The page structure layout often | ||
| 165 | * changes across releases and it makes extensive use of unions. So if | ||
| 166 | * the page structure layout will change in a way that | ||
| 167 | * page->first_page gets clobbered by __split_huge_page_refcount, the | ||
| 168 | * implementation making use of smp_rmb() will be required. | ||
| 169 | * | ||
| 170 | * Currently we define compound_trans_head as compound_head, because | ||
| 171 | * page->private is in the same union with page->first_page, and | ||
| 172 | * page->private isn't clobbered. However this also means we're | ||
| 173 | * currently leaving dirt into the page->private field of anonymous | ||
| 174 | * pages resulting from a THP split, instead of setting page->private | ||
| 175 | * to zero like for every other page that has PG_private not set. But | ||
| 176 | * anonymous pages don't use page->private so this is not a problem. | ||
| 177 | */ | ||
| 178 | #if 0 | ||
| 179 | /* This will be needed if page->private will be clobbered in split_huge_page */ | ||
| 180 | static inline struct page *compound_trans_head(struct page *page) | ||
| 181 | { | ||
| 182 | if (PageTail(page)) { | ||
| 183 | struct page *head; | ||
| 184 | head = page->first_page; | ||
| 185 | smp_rmb(); | ||
| 186 | /* | ||
| 187 | * head may be a dangling pointer. | ||
| 188 | * __split_huge_page_refcount clears PageTail before | ||
| 189 | * overwriting first_page, so if PageTail is still | ||
| 190 | * there it means the head pointer isn't dangling. | ||
| 191 | */ | ||
| 192 | if (PageTail(page)) | ||
| 193 | return head; | ||
| 194 | } | ||
| 195 | return page; | ||
| 196 | } | ||
| 197 | #else | ||
| 198 | #define compound_trans_head(page) compound_head(page) | ||
| 199 | #endif | ||
| 200 | 160 | ||
| 201 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | 161 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 202 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); | 162 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); |
| @@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page) | |||
| 226 | do { } while (0) | 186 | do { } while (0) |
| 227 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ | 187 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ |
| 228 | do { } while (0) | 188 | do { } while (0) |
| 229 | #define compound_trans_head(page) compound_head(page) | ||
| 230 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 189 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
| 231 | unsigned long *vm_flags, int advice) | 190 | unsigned long *vm_flags, int advice) |
| 232 | { | 191 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f28f46eade6a..c1b7414c7bef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp); | |||
| 175 | * Special vmas that are non-mergable, non-mlock()able. | 175 | * Special vmas that are non-mergable, non-mlock()able. |
| 176 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. | 176 | * Note: mm/huge_memory.c VM_NO_THP depends on this definition. |
| 177 | */ | 177 | */ |
| 178 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) | 178 | #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) |
| 179 | 179 | ||
| 180 | /* | 180 | /* |
| 181 | * mapping from the currently active vm_flags protection bits (the | 181 | * mapping from the currently active vm_flags protection bits (the |
| @@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page, | |||
| 399 | 399 | ||
| 400 | static inline struct page *compound_head(struct page *page) | 400 | static inline struct page *compound_head(struct page *page) |
| 401 | { | 401 | { |
| 402 | if (unlikely(PageTail(page))) | 402 | if (unlikely(PageTail(page))) { |
| 403 | return page->first_page; | 403 | struct page *head = page->first_page; |
| 404 | |||
| 405 | /* | ||
| 406 | * page->first_page may be a dangling pointer to an old | ||
| 407 | * compound page, so recheck that it is still a tail | ||
| 408 | * page before returning. | ||
| 409 | */ | ||
| 410 | smp_rmb(); | ||
| 411 | if (likely(PageTail(page))) | ||
| 412 | return head; | ||
| 413 | } | ||
| 404 | return page; | 414 | return page; |
| 405 | } | 415 | } |
| 406 | 416 | ||
| @@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) | |||
| 757 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS | 767 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
| 758 | static inline int page_cpupid_xchg_last(struct page *page, int cpupid) | 768 | static inline int page_cpupid_xchg_last(struct page *page, int cpupid) |
| 759 | { | 769 | { |
| 760 | return xchg(&page->_last_cpupid, cpupid); | 770 | return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); |
| 761 | } | 771 | } |
| 762 | 772 | ||
| 763 | static inline int page_cpupid_last(struct page *page) | 773 | static inline int page_cpupid_last(struct page *page) |
| @@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page) | |||
| 766 | } | 776 | } |
| 767 | static inline void page_cpupid_reset_last(struct page *page) | 777 | static inline void page_cpupid_reset_last(struct page *page) |
| 768 | { | 778 | { |
| 769 | page->_last_cpupid = -1; | 779 | page->_last_cpupid = -1 & LAST_CPUPID_MASK; |
| 770 | } | 780 | } |
| 771 | #else | 781 | #else |
| 772 | static inline int page_cpupid_last(struct page *page) | 782 | static inline int page_cpupid_last(struct page *page) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 2defd1308b04..98f2d7e91a91 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev) | |||
| 424 | EXPORT_SYMBOL(debug_dma_dump_mappings); | 424 | EXPORT_SYMBOL(debug_dma_dump_mappings); |
| 425 | 425 | ||
| 426 | /* | 426 | /* |
| 427 | * For each page mapped (initial page in the case of | 427 | * For each mapping (initial cacheline in the case of |
| 428 | * dma_alloc_coherent/dma_map_{single|page}, or each page in a | 428 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a |
| 429 | * scatterlist) insert into this tree using the pfn as the key. At | 429 | * scatterlist, or the cacheline specified in dma_map_single) insert |
| 430 | * into this tree using the cacheline as the key. At | ||
| 430 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If | 431 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If |
| 431 | * the pfn already exists at insertion time add a tag as a reference | 432 | * the entry already exists at insertion time add a tag as a reference |
| 432 | * count for the overlapping mappings. For now, the overlap tracking | 433 | * count for the overlapping mappings. For now, the overlap tracking |
| 433 | * just ensures that 'unmaps' balance 'maps' before marking the pfn | 434 | * just ensures that 'unmaps' balance 'maps' before marking the |
| 434 | * idle, but we should also be flagging overlaps as an API violation. | 435 | * cacheline idle, but we should also be flagging overlaps as an API |
| 436 | * violation. | ||
| 435 | * | 437 | * |
| 436 | * Memory usage is mostly constrained by the maximum number of available | 438 | * Memory usage is mostly constrained by the maximum number of available |
| 437 | * dma-debug entries in that we need a free dma_debug_entry before | 439 | * dma-debug entries in that we need a free dma_debug_entry before |
| 438 | * inserting into the tree. In the case of dma_map_{single|page} and | 440 | * inserting into the tree. In the case of dma_map_page and |
| 439 | * dma_alloc_coherent there is only one dma_debug_entry and one pfn to | 441 | * dma_alloc_coherent there is only one dma_debug_entry and one |
| 440 | * track per event. dma_map_sg(), on the other hand, | 442 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the |
| 441 | * consumes a single dma_debug_entry, but inserts 'nents' entries into | 443 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' |
| 442 | * the tree. | 444 | * entries into the tree. |
| 443 | * | 445 | * |
| 444 | * At any time debug_dma_assert_idle() can be called to trigger a | 446 | * At any time debug_dma_assert_idle() can be called to trigger a |
| 445 | * warning if the given page is in the active set. | 447 | * warning if any cachelines in the given page are in the active set. |
| 446 | */ | 448 | */ |
| 447 | static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); | 449 | static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); |
| 448 | static DEFINE_SPINLOCK(radix_lock); | 450 | static DEFINE_SPINLOCK(radix_lock); |
| 449 | #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) | 451 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) |
| 452 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) | ||
| 453 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) | ||
| 450 | 454 | ||
| 451 | static int active_pfn_read_overlap(unsigned long pfn) | 455 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) |
| 456 | { | ||
| 457 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + | ||
| 458 | (entry->offset >> L1_CACHE_SHIFT); | ||
| 459 | } | ||
| 460 | |||
| 461 | static int active_cacheline_read_overlap(phys_addr_t cln) | ||
| 452 | { | 462 | { |
| 453 | int overlap = 0, i; | 463 | int overlap = 0, i; |
| 454 | 464 | ||
| 455 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | 465 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
| 456 | if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) | 466 | if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) |
| 457 | overlap |= 1 << i; | 467 | overlap |= 1 << i; |
| 458 | return overlap; | 468 | return overlap; |
| 459 | } | 469 | } |
| 460 | 470 | ||
| 461 | static int active_pfn_set_overlap(unsigned long pfn, int overlap) | 471 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) |
| 462 | { | 472 | { |
| 463 | int i; | 473 | int i; |
| 464 | 474 | ||
| 465 | if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) | 475 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) |
| 466 | return overlap; | 476 | return overlap; |
| 467 | 477 | ||
| 468 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) | 478 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
| 469 | if (overlap & 1 << i) | 479 | if (overlap & 1 << i) |
| 470 | radix_tree_tag_set(&dma_active_pfn, pfn, i); | 480 | radix_tree_tag_set(&dma_active_cacheline, cln, i); |
| 471 | else | 481 | else |
| 472 | radix_tree_tag_clear(&dma_active_pfn, pfn, i); | 482 | radix_tree_tag_clear(&dma_active_cacheline, cln, i); |
| 473 | 483 | ||
| 474 | return overlap; | 484 | return overlap; |
| 475 | } | 485 | } |
| 476 | 486 | ||
| 477 | static void active_pfn_inc_overlap(unsigned long pfn) | 487 | static void active_cacheline_inc_overlap(phys_addr_t cln) |
| 478 | { | 488 | { |
| 479 | int overlap = active_pfn_read_overlap(pfn); | 489 | int overlap = active_cacheline_read_overlap(cln); |
| 480 | 490 | ||
| 481 | overlap = active_pfn_set_overlap(pfn, ++overlap); | 491 | overlap = active_cacheline_set_overlap(cln, ++overlap); |
| 482 | 492 | ||
| 483 | /* If we overflowed the overlap counter then we're potentially | 493 | /* If we overflowed the overlap counter then we're potentially |
| 484 | * leaking dma-mappings. Otherwise, if maps and unmaps are | 494 | * leaking dma-mappings. Otherwise, if maps and unmaps are |
| 485 | * balanced then this overflow may cause false negatives in | 495 | * balanced then this overflow may cause false negatives in |
| 486 | * debug_dma_assert_idle() as the pfn may be marked idle | 496 | * debug_dma_assert_idle() as the cacheline may be marked idle |
| 487 | * prematurely. | 497 | * prematurely. |
| 488 | */ | 498 | */ |
| 489 | WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, | 499 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
| 490 | "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", | 500 | "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", |
| 491 | ACTIVE_PFN_MAX_OVERLAP, pfn); | 501 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
| 492 | } | 502 | } |
| 493 | 503 | ||
| 494 | static int active_pfn_dec_overlap(unsigned long pfn) | 504 | static int active_cacheline_dec_overlap(phys_addr_t cln) |
| 495 | { | 505 | { |
| 496 | int overlap = active_pfn_read_overlap(pfn); | 506 | int overlap = active_cacheline_read_overlap(cln); |
| 497 | 507 | ||
| 498 | return active_pfn_set_overlap(pfn, --overlap); | 508 | return active_cacheline_set_overlap(cln, --overlap); |
| 499 | } | 509 | } |
| 500 | 510 | ||
| 501 | static int active_pfn_insert(struct dma_debug_entry *entry) | 511 | static int active_cacheline_insert(struct dma_debug_entry *entry) |
| 502 | { | 512 | { |
| 513 | phys_addr_t cln = to_cacheline_number(entry); | ||
| 503 | unsigned long flags; | 514 | unsigned long flags; |
| 504 | int rc; | 515 | int rc; |
| 505 | 516 | ||
| 517 | /* If the device is not writing memory then we don't have any | ||
| 518 | * concerns about the cpu consuming stale data. This mitigates | ||
| 519 | * legitimate usages of overlapping mappings. | ||
| 520 | */ | ||
| 521 | if (entry->direction == DMA_TO_DEVICE) | ||
| 522 | return 0; | ||
| 523 | |||
| 506 | spin_lock_irqsave(&radix_lock, flags); | 524 | spin_lock_irqsave(&radix_lock, flags); |
| 507 | rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); | 525 | rc = radix_tree_insert(&dma_active_cacheline, cln, entry); |
| 508 | if (rc == -EEXIST) | 526 | if (rc == -EEXIST) |
| 509 | active_pfn_inc_overlap(entry->pfn); | 527 | active_cacheline_inc_overlap(cln); |
| 510 | spin_unlock_irqrestore(&radix_lock, flags); | 528 | spin_unlock_irqrestore(&radix_lock, flags); |
| 511 | 529 | ||
| 512 | return rc; | 530 | return rc; |
| 513 | } | 531 | } |
| 514 | 532 | ||
| 515 | static void active_pfn_remove(struct dma_debug_entry *entry) | 533 | static void active_cacheline_remove(struct dma_debug_entry *entry) |
| 516 | { | 534 | { |
| 535 | phys_addr_t cln = to_cacheline_number(entry); | ||
| 517 | unsigned long flags; | 536 | unsigned long flags; |
| 518 | 537 | ||
| 538 | /* ...mirror the insert case */ | ||
| 539 | if (entry->direction == DMA_TO_DEVICE) | ||
| 540 | return; | ||
| 541 | |||
| 519 | spin_lock_irqsave(&radix_lock, flags); | 542 | spin_lock_irqsave(&radix_lock, flags); |
| 520 | /* since we are counting overlaps the final put of the | 543 | /* since we are counting overlaps the final put of the |
| 521 | * entry->pfn will occur when the overlap count is 0. | 544 | * cacheline will occur when the overlap count is 0. |
| 522 | * active_pfn_dec_overlap() returns -1 in that case | 545 | * active_cacheline_dec_overlap() returns -1 in that case |
| 523 | */ | 546 | */ |
| 524 | if (active_pfn_dec_overlap(entry->pfn) < 0) | 547 | if (active_cacheline_dec_overlap(cln) < 0) |
| 525 | radix_tree_delete(&dma_active_pfn, entry->pfn); | 548 | radix_tree_delete(&dma_active_cacheline, cln); |
| 526 | spin_unlock_irqrestore(&radix_lock, flags); | 549 | spin_unlock_irqrestore(&radix_lock, flags); |
| 527 | } | 550 | } |
| 528 | 551 | ||
| 529 | /** | 552 | /** |
| 530 | * debug_dma_assert_idle() - assert that a page is not undergoing dma | 553 | * debug_dma_assert_idle() - assert that a page is not undergoing dma |
| 531 | * @page: page to lookup in the dma_active_pfn tree | 554 | * @page: page to lookup in the dma_active_cacheline tree |
| 532 | * | 555 | * |
| 533 | * Place a call to this routine in cases where the cpu touching the page | 556 | * Place a call to this routine in cases where the cpu touching the page |
| 534 | * before the dma completes (page is dma_unmapped) will lead to data | 557 | * before the dma completes (page is dma_unmapped) will lead to data |
| @@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry) | |||
| 536 | */ | 559 | */ |
| 537 | void debug_dma_assert_idle(struct page *page) | 560 | void debug_dma_assert_idle(struct page *page) |
| 538 | { | 561 | { |
| 562 | static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; | ||
| 563 | struct dma_debug_entry *entry = NULL; | ||
| 564 | void **results = (void **) &ents; | ||
| 565 | unsigned int nents, i; | ||
| 539 | unsigned long flags; | 566 | unsigned long flags; |
| 540 | struct dma_debug_entry *entry; | 567 | phys_addr_t cln; |
| 541 | 568 | ||
| 542 | if (!page) | 569 | if (!page) |
| 543 | return; | 570 | return; |
| 544 | 571 | ||
| 572 | cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; | ||
| 545 | spin_lock_irqsave(&radix_lock, flags); | 573 | spin_lock_irqsave(&radix_lock, flags); |
| 546 | entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); | 574 | nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, |
| 575 | CACHELINES_PER_PAGE); | ||
| 576 | for (i = 0; i < nents; i++) { | ||
| 577 | phys_addr_t ent_cln = to_cacheline_number(ents[i]); | ||
| 578 | |||
| 579 | if (ent_cln == cln) { | ||
| 580 | entry = ents[i]; | ||
| 581 | break; | ||
| 582 | } else if (ent_cln >= cln + CACHELINES_PER_PAGE) | ||
| 583 | break; | ||
| 584 | } | ||
| 547 | spin_unlock_irqrestore(&radix_lock, flags); | 585 | spin_unlock_irqrestore(&radix_lock, flags); |
| 548 | 586 | ||
| 549 | if (!entry) | 587 | if (!entry) |
| 550 | return; | 588 | return; |
| 551 | 589 | ||
| 590 | cln = to_cacheline_number(entry); | ||
| 552 | err_printk(entry->dev, entry, | 591 | err_printk(entry->dev, entry, |
| 553 | "DMA-API: cpu touching an active dma mapped page " | 592 | "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", |
| 554 | "[pfn=0x%lx]\n", entry->pfn); | 593 | &cln); |
| 555 | } | 594 | } |
| 556 | 595 | ||
| 557 | /* | 596 | /* |
| @@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
| 568 | hash_bucket_add(bucket, entry); | 607 | hash_bucket_add(bucket, entry); |
| 569 | put_hash_bucket(bucket, &flags); | 608 | put_hash_bucket(bucket, &flags); |
| 570 | 609 | ||
| 571 | rc = active_pfn_insert(entry); | 610 | rc = active_cacheline_insert(entry); |
| 572 | if (rc == -ENOMEM) { | 611 | if (rc == -ENOMEM) { |
| 573 | pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); | 612 | pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); |
| 574 | global_disable = true; | 613 | global_disable = true; |
| 575 | } | 614 | } |
| 576 | 615 | ||
| @@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
| 631 | { | 670 | { |
| 632 | unsigned long flags; | 671 | unsigned long flags; |
| 633 | 672 | ||
| 634 | active_pfn_remove(entry); | 673 | active_cacheline_remove(entry); |
| 635 | 674 | ||
| 636 | /* | 675 | /* |
| 637 | * add to beginning of the list - this way the entries are | 676 | * add to beginning of the list - this way the entries are |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7811ed3b4e70..bd4a8dfdf0b8 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | |||
| 1253 | 1253 | ||
| 1254 | node = indirect_to_ptr(node); | 1254 | node = indirect_to_ptr(node); |
| 1255 | max_index = radix_tree_maxindex(node->height); | 1255 | max_index = radix_tree_maxindex(node->height); |
| 1256 | if (cur_index > max_index) | 1256 | if (cur_index > max_index) { |
| 1257 | rcu_read_unlock(); | ||
| 1257 | break; | 1258 | break; |
| 1259 | } | ||
| 1258 | 1260 | ||
| 1259 | cur_index = __locate(node, item, cur_index, &found_index); | 1261 | cur_index = __locate(node, item, cur_index, &found_index); |
| 1260 | rcu_read_unlock(); | 1262 | rcu_read_unlock(); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4df39b1bde91..1546655a2d78 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -1961,7 +1961,7 @@ out: | |||
| 1961 | return ret; | 1961 | return ret; |
| 1962 | } | 1962 | } |
| 1963 | 1963 | ||
| 1964 | #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) | 1964 | #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) |
| 1965 | 1965 | ||
| 1966 | int hugepage_madvise(struct vm_area_struct *vma, | 1966 | int hugepage_madvise(struct vm_area_struct *vma, |
| 1967 | unsigned long *vm_flags, int advice) | 1967 | unsigned long *vm_flags, int advice) |
| @@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item) | |||
| 444 | static struct page *page_trans_compound_anon(struct page *page) | 444 | static struct page *page_trans_compound_anon(struct page *page) |
| 445 | { | 445 | { |
| 446 | if (PageTransCompound(page)) { | 446 | if (PageTransCompound(page)) { |
| 447 | struct page *head = compound_trans_head(page); | 447 | struct page *head = compound_head(page); |
| 448 | /* | 448 | /* |
| 449 | * head may actually be splitted and freed from under | 449 | * head may actually be splitted and freed from under |
| 450 | * us but it's ok here. | 450 | * us but it's ok here. |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ce7a8cc7b404..5b6b0039f725 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1127,8 +1127,8 @@ skip_node: | |||
| 1127 | * skipping css reference should be safe. | 1127 | * skipping css reference should be safe. |
| 1128 | */ | 1128 | */ |
| 1129 | if (next_css) { | 1129 | if (next_css) { |
| 1130 | if ((next_css->flags & CSS_ONLINE) && | 1130 | if ((next_css == &root->css) || |
| 1131 | (next_css == &root->css || css_tryget(next_css))) | 1131 | ((next_css->flags & CSS_ONLINE) && css_tryget(next_css))) |
| 1132 | return mem_cgroup_from_css(next_css); | 1132 | return mem_cgroup_from_css(next_css); |
| 1133 | 1133 | ||
| 1134 | prev_css = next_css; | 1134 | prev_css = next_css; |
| @@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
| 6595 | { | 6595 | { |
| 6596 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 6596 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
| 6597 | struct mem_cgroup_event *event, *tmp; | 6597 | struct mem_cgroup_event *event, *tmp; |
| 6598 | struct cgroup_subsys_state *iter; | ||
| 6598 | 6599 | ||
| 6599 | /* | 6600 | /* |
| 6600 | * Unregister events and notify userspace. | 6601 | * Unregister events and notify userspace. |
| @@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) | |||
| 6611 | kmem_cgroup_css_offline(memcg); | 6612 | kmem_cgroup_css_offline(memcg); |
| 6612 | 6613 | ||
| 6613 | mem_cgroup_invalidate_reclaim_iterators(memcg); | 6614 | mem_cgroup_invalidate_reclaim_iterators(memcg); |
| 6614 | mem_cgroup_reparent_charges(memcg); | 6615 | |
| 6616 | /* | ||
| 6617 | * This requires that offlining is serialized. Right now that is | ||
| 6618 | * guaranteed because css_killed_work_fn() holds the cgroup_mutex. | ||
| 6619 | */ | ||
| 6620 | css_for_each_descendant_post(iter, css) | ||
| 6621 | mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); | ||
| 6622 | |||
| 6615 | mem_cgroup_destroy_all_caches(memcg); | 6623 | mem_cgroup_destroy_all_caches(memcg); |
| 6616 | vmpressure_cleanup(&memcg->vmpressure); | 6624 | vmpressure_cleanup(&memcg->vmpressure); |
| 6617 | } | 6625 | } |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2f2f34a4e77d..90002ea43638 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1651 | { | 1651 | { |
| 1652 | int ret; | 1652 | int ret; |
| 1653 | unsigned long pfn = page_to_pfn(page); | 1653 | unsigned long pfn = page_to_pfn(page); |
| 1654 | struct page *hpage = compound_trans_head(page); | 1654 | struct page *hpage = compound_head(page); |
| 1655 | 1655 | ||
| 1656 | if (PageHWPoison(page)) { | 1656 | if (PageHWPoison(page)) { |
| 1657 | pr_info("soft offline: %#lx page already poisoned\n", pfn); | 1657 | pr_info("soft offline: %#lx page already poisoned\n", pfn); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3758a09a009..3bac76ae4b30 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order) | |||
| 369 | __SetPageHead(page); | 369 | __SetPageHead(page); |
| 370 | for (i = 1; i < nr_pages; i++) { | 370 | for (i = 1; i < nr_pages; i++) { |
| 371 | struct page *p = page + i; | 371 | struct page *p = page + i; |
| 372 | __SetPageTail(p); | ||
| 373 | set_page_count(p, 0); | 372 | set_page_count(p, 0); |
| 374 | p->first_page = page; | 373 | p->first_page = page; |
| 374 | /* Make sure p->first_page is always valid for PageTail() */ | ||
| 375 | smp_wmb(); | ||
| 376 | __SetPageTail(p); | ||
| 375 | } | 377 | } |
| 376 | } | 378 | } |
| 377 | 379 | ||
| @@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) | |||
| 1236 | } | 1238 | } |
| 1237 | local_irq_restore(flags); | 1239 | local_irq_restore(flags); |
| 1238 | } | 1240 | } |
| 1241 | static bool gfp_thisnode_allocation(gfp_t gfp_mask) | ||
| 1242 | { | ||
| 1243 | return (gfp_mask & GFP_THISNODE) == GFP_THISNODE; | ||
| 1244 | } | ||
| 1245 | #else | ||
| 1246 | static bool gfp_thisnode_allocation(gfp_t gfp_mask) | ||
| 1247 | { | ||
| 1248 | return false; | ||
| 1249 | } | ||
| 1239 | #endif | 1250 | #endif |
| 1240 | 1251 | ||
| 1241 | /* | 1252 | /* |
| @@ -1572,7 +1583,13 @@ again: | |||
| 1572 | get_pageblock_migratetype(page)); | 1583 | get_pageblock_migratetype(page)); |
| 1573 | } | 1584 | } |
| 1574 | 1585 | ||
| 1575 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); | 1586 | /* |
| 1587 | * NOTE: GFP_THISNODE allocations do not partake in the kswapd | ||
| 1588 | * aging protocol, so they can't be fair. | ||
| 1589 | */ | ||
| 1590 | if (!gfp_thisnode_allocation(gfp_flags)) | ||
| 1591 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); | ||
| 1592 | |||
| 1576 | __count_zone_vm_events(PGALLOC, zone, 1 << order); | 1593 | __count_zone_vm_events(PGALLOC, zone, 1 << order); |
| 1577 | zone_statistics(preferred_zone, zone, gfp_flags); | 1594 | zone_statistics(preferred_zone, zone, gfp_flags); |
| 1578 | local_irq_restore(flags); | 1595 | local_irq_restore(flags); |
| @@ -1944,8 +1961,12 @@ zonelist_scan: | |||
| 1944 | * ultimately fall back to remote zones that do not | 1961 | * ultimately fall back to remote zones that do not |
| 1945 | * partake in the fairness round-robin cycle of this | 1962 | * partake in the fairness round-robin cycle of this |
| 1946 | * zonelist. | 1963 | * zonelist. |
| 1964 | * | ||
| 1965 | * NOTE: GFP_THISNODE allocations do not partake in | ||
| 1966 | * the kswapd aging protocol, so they can't be fair. | ||
| 1947 | */ | 1967 | */ |
| 1948 | if (alloc_flags & ALLOC_WMARK_LOW) { | 1968 | if ((alloc_flags & ALLOC_WMARK_LOW) && |
| 1969 | !gfp_thisnode_allocation(gfp_mask)) { | ||
| 1949 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) | 1970 | if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) |
| 1950 | continue; | 1971 | continue; |
| 1951 | if (!zone_local(preferred_zone, zone)) | 1972 | if (!zone_local(preferred_zone, zone)) |
| @@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
| 2501 | * allowed per node queues are empty and that nodes are | 2522 | * allowed per node queues are empty and that nodes are |
| 2502 | * over allocated. | 2523 | * over allocated. |
| 2503 | */ | 2524 | */ |
| 2504 | if (IS_ENABLED(CONFIG_NUMA) && | 2525 | if (gfp_thisnode_allocation(gfp_mask)) |
| 2505 | (gfp_mask & GFP_THISNODE) == GFP_THISNODE) | ||
| 2506 | goto nopage; | 2526 | goto nopage; |
| 2507 | 2527 | ||
| 2508 | restart: | 2528 | restart: |
| @@ -98,7 +98,7 @@ static void put_compound_page(struct page *page) | |||
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | /* __split_huge_page_refcount can run under us */ | 100 | /* __split_huge_page_refcount can run under us */ |
| 101 | page_head = compound_trans_head(page); | 101 | page_head = compound_head(page); |
| 102 | 102 | ||
| 103 | /* | 103 | /* |
| 104 | * THP can not break up slab pages so avoid taking | 104 | * THP can not break up slab pages so avoid taking |
| @@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page) | |||
| 253 | */ | 253 | */ |
| 254 | unsigned long flags; | 254 | unsigned long flags; |
| 255 | bool got; | 255 | bool got; |
| 256 | struct page *page_head = compound_trans_head(page); | 256 | struct page *page_head = compound_head(page); |
| 257 | 257 | ||
| 258 | /* Ref to put_compound_page() comment. */ | 258 | /* Ref to put_compound_page() comment. */ |
| 259 | if (!__compound_tail_refcounted(page_head)) { | 259 | if (!__compound_tail_refcounted(page_head)) { |
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index ef474098d9f1..17fa901418ae 100644 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh | |||
| @@ -257,7 +257,7 @@ case "$arg" in | |||
| 257 | && compr="lzop -9 -f" | 257 | && compr="lzop -9 -f" |
| 258 | echo "$output_file" | grep -q "\.lz4$" \ | 258 | echo "$output_file" | grep -q "\.lz4$" \ |
| 259 | && [ -x "`which lz4 2> /dev/null`" ] \ | 259 | && [ -x "`which lz4 2> /dev/null`" ] \ |
| 260 | && compr="lz4 -9 -f" | 260 | && compr="lz4 -l -9 -f" |
| 261 | echo "$output_file" | grep -q "\.cpio$" && compr="cat" | 261 | echo "$output_file" | grep -q "\.cpio$" && compr="cat" |
| 262 | shift | 262 | shift |
| 263 | ;; | 263 | ;; |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 10085de886fe..276e84b8a8e5 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
| @@ -330,8 +330,7 @@ static void write_src(void) | |||
| 330 | printf("\tPTR\t_text + %#llx\n", | 330 | printf("\tPTR\t_text + %#llx\n", |
| 331 | table[i].addr - _text); | 331 | table[i].addr - _text); |
| 332 | else | 332 | else |
| 333 | printf("\tPTR\t_text - %#llx\n", | 333 | printf("\tPTR\t%#llx\n", table[i].addr); |
| 334 | _text - table[i].addr); | ||
| 335 | } else { | 334 | } else { |
| 336 | printf("\tPTR\t%#llx\n", table[i].addr); | 335 | printf("\tPTR\t%#llx\n", table[i].addr); |
| 337 | } | 336 | } |
