aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/esdhc.txt2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/sh/mm/fault_32.c2
-rw-r--r--arch/sh/mm/tlbflush_64.c2
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/crypto/fpu.c4
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/kernel/setup_percpu.c219
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/pageattr.c65
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--drivers/crypto/padlock-aes.c138
-rw-r--r--drivers/mmc/host/Kconfig36
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-of.c3
-rw-r--r--drivers/mmc/host/sdhci-pci.c20
-rw-r--r--drivers/mmc/host/sdhci-s3c.c428
-rw-r--r--drivers/mmc/host/sdhci.c52
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mmc/host/via-sdmmc.c1362
-rw-r--r--include/linux/mm.h4
-rw-r--r--ipc/util.h1
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/dma-debug.c149
-rw-r--r--mm/memory.c48
-rw-r--r--mm/percpu.c24
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/patch_realtek.c134
-rw-r--r--sound/soc/txx9/txx9aclc.c4
-rw-r--r--sound/usb/caiaq/audio.c5
-rw-r--r--sound/usb/caiaq/device.c2
54 files changed, 2528 insertions, 282 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 08def8deb5f5..ecad946920d1 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1882,6 +1882,12 @@ and is between 256 and 4096 characters. It is defined in the file
1882 Format: { 0 | 1 } 1882 Format: { 0 | 1 }
1883 See arch/parisc/kernel/pdc_chassis.c 1883 See arch/parisc/kernel/pdc_chassis.c
1884 1884
1885 percpu_alloc= [X86] Select which percpu first chunk allocator to use.
1886 Allowed values are one of "lpage", "embed" and "4k".
1887 See comments in arch/x86/kernel/setup_percpu.c for
1888 details on each allocator. This parameter is primarily
1889 for debugging and performance comparison.
1890
1885 pf. [PARIDE] 1891 pf. [PARIDE]
1886 See Documentation/blockdev/paride.txt. 1892 See Documentation/blockdev/paride.txt.
1887 1893
diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
index 5093ddf900da..3ed3797b5086 100644
--- a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
@@ -10,6 +10,8 @@ Required properties:
10 - interrupts : should contain eSDHC interrupt. 10 - interrupts : should contain eSDHC interrupt.
11 - interrupt-parent : interrupt source phandle. 11 - interrupt-parent : interrupt source phandle.
12 - clock-frequency : specifies eSDHC base clock frequency. 12 - clock-frequency : specifies eSDHC base clock frequency.
13 - sdhci,1-bit-only : (optional) specifies that a controller can
14 only handle 1-bit data transfers.
13 15
14Example: 16Example:
15 17
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index de8e10a94103..0d8d23581c44 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -139,6 +139,7 @@ ALC883/888
139 acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc) 139 acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc)
140 acer-aspire Acer Aspire 9810 140 acer-aspire Acer Aspire 9810
141 acer-aspire-4930g Acer Aspire 4930G 141 acer-aspire-4930g Acer Aspire 4930G
142 acer-aspire-6530g Acer Aspire 6530G
142 acer-aspire-8930g Acer Aspire 8930G 143 acer-aspire-8930g Acer Aspire 8930G
143 medion Medion Laptops 144 medion Medion Laptops
144 medion-md2 Medion MD2 145 medion-md2 Medion MD2
diff --git a/MAINTAINERS b/MAINTAINERS
index dc226e78612c..1d4704300c1d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1010,6 +1010,13 @@ W: http://www.at91.com/
1010S: Maintained 1010S: Maintained
1011F: drivers/mmc/host/at91_mci.c 1011F: drivers/mmc/host/at91_mci.c
1012 1012
1013ATMEL AT91 / AT32 MCI DRIVER
1014P: Nicolas Ferre
1015M: nicolas.ferre@atmel.com
1016S: Maintained
1017F: drivers/mmc/host/atmel-mci.c
1018F: drivers/mmc/host/atmel-mci-regs.h
1019
1013ATMEL AT91 / AT32 SERIAL DRIVER 1020ATMEL AT91 / AT32 SERIAL DRIVER
1014P: Haavard Skinnemoen 1021P: Haavard Skinnemoen
1015M: hskinnemoen@atmel.com 1022M: hskinnemoen@atmel.com
@@ -5094,6 +5101,13 @@ L: sdhci-devel@lists.ossman.eu
5094S: Maintained 5101S: Maintained
5095F: drivers/mmc/host/sdhci.* 5102F: drivers/mmc/host/sdhci.*
5096 5103
5104SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
5105P: Ben Dooks
5106M: ben-linux@fluff.org
5107L: sdhci-devel@lists.ossman.eu
5108S: Maintained
5109F: drivers/mmc/host/sdhci-s3c.c
5110
5097SECURITY SUBSYSTEM 5111SECURITY SUBSYSTEM
5098P: James Morris 5112P: James Morris
5099M: jmorris@namei.org 5113M: jmorris@namei.org
@@ -6216,6 +6230,14 @@ S: Maintained
6216F: Documentation/i2c/busses/i2c-viapro 6230F: Documentation/i2c/busses/i2c-viapro
6217F: drivers/i2c/busses/i2c-viapro.c 6231F: drivers/i2c/busses/i2c-viapro.c
6218 6232
6233VIA SD/MMC CARD CONTROLLER DRIVER
6234P: Joseph Chan
6235M: JosephChan@via.com.tw
6236P: Harald Welte
6237M: HaraldWelte@viatech.com
6238S: Maintained
6239F: drivers/mmc/host/via-sdmmc.c
6240
6219VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER 6241VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
6220P: Joseph Chan 6242P: Joseph Chan
6221M: JosephChan@via.com.tw 6243M: JosephChan@via.com.tw
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 4829f96585b1..00a31deaa96e 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
146 /* If for any reason at all we couldn't handle the fault, 146 /* If for any reason at all we couldn't handle the fault,
147 make sure we exit gracefully rather than endlessly redo 147 make sure we exit gracefully rather than endlessly redo
148 the fault. */ 148 the fault. */
149 fault = handle_mm_fault(mm, vma, address, cause > 0); 149 fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
150 up_read(&mm->mmap_sem); 150 up_read(&mm->mmap_sem);
151 if (unlikely(fault & VM_FAULT_ERROR)) { 151 if (unlikely(fault & VM_FAULT_ERROR)) {
152 if (fault & VM_FAULT_OOM) 152 if (fault & VM_FAULT_OOM)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0455557a2899..6fdcbb709827 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -208,7 +208,7 @@ good_area:
208 * than endlessly redo the fault. 208 * than endlessly redo the fault.
209 */ 209 */
210survive: 210survive:
211 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); 211 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
212 if (unlikely(fault & VM_FAULT_ERROR)) { 212 if (unlikely(fault & VM_FAULT_ERROR)) {
213 if (fault & VM_FAULT_OOM) 213 if (fault & VM_FAULT_OOM)
214 goto out_of_memory; 214 goto out_of_memory;
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 62d4abbaa654..b61d86d3debf 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -133,7 +133,7 @@ good_area:
133 * fault. 133 * fault.
134 */ 134 */
135survive: 135survive:
136 fault = handle_mm_fault(mm, vma, address, writeaccess); 136 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
137 if (unlikely(fault & VM_FAULT_ERROR)) { 137 if (unlikely(fault & VM_FAULT_ERROR)) {
138 if (fault & VM_FAULT_OOM) 138 if (fault & VM_FAULT_OOM)
139 goto out_of_memory; 139 goto out_of_memory;
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index c4c76db90f9c..f925115e3250 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
163 * the fault. 163 * the fault.
164 */ 164 */
165 165
166 fault = handle_mm_fault(mm, vma, address, writeaccess & 1); 166 fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
167 if (unlikely(fault & VM_FAULT_ERROR)) { 167 if (unlikely(fault & VM_FAULT_ERROR)) {
168 if (fault & VM_FAULT_OOM) 168 if (fault & VM_FAULT_OOM)
169 goto out_of_memory; 169 goto out_of_memory;
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 05093d41d98e..30f5d100a81c 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
163 * make sure we exit gracefully rather than endlessly redo 163 * make sure we exit gracefully rather than endlessly redo
164 * the fault. 164 * the fault.
165 */ 165 */
166 fault = handle_mm_fault(mm, vma, ear0, write); 166 fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
167 if (unlikely(fault & VM_FAULT_ERROR)) { 167 if (unlikely(fault & VM_FAULT_ERROR)) {
168 if (fault & VM_FAULT_OOM) 168 if (fault & VM_FAULT_OOM)
169 goto out_of_memory; 169 goto out_of_memory;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 23088bed111e..19261a99e623 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
154 * sure we exit gracefully rather than endlessly redo the 154 * sure we exit gracefully rather than endlessly redo the
155 * fault. 155 * fault.
156 */ 156 */
157 fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); 157 fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
158 if (unlikely(fault & VM_FAULT_ERROR)) { 158 if (unlikely(fault & VM_FAULT_ERROR)) {
159 /* 159 /*
160 * We ran out of memory, or some other thing happened 160 * We ran out of memory, or some other thing happened
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 4a71df4c1b30..7274b47f4c22 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -196,7 +196,7 @@ survive:
196 */ 196 */
197 addr = (address & PAGE_MASK); 197 addr = (address & PAGE_MASK);
198 set_thread_fault_code(error_code); 198 set_thread_fault_code(error_code);
199 fault = handle_mm_fault(mm, vma, addr, write); 199 fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
200 if (unlikely(fault & VM_FAULT_ERROR)) { 200 if (unlikely(fault & VM_FAULT_ERROR)) {
201 if (fault & VM_FAULT_OOM) 201 if (fault & VM_FAULT_OOM)
202 goto out_of_memory; 202 goto out_of_memory;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index f493f03231d5..d0e35cf99fc6 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -155,7 +155,7 @@ good_area:
155 */ 155 */
156 156
157 survive: 157 survive:
158 fault = handle_mm_fault(mm, vma, address, write); 158 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
159#ifdef DEBUG 159#ifdef DEBUG
160 printk("handle_mm_fault returns %d\n",fault); 160 printk("handle_mm_fault returns %d\n",fault);
161#endif 161#endif
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 5e67cd1fab40..956607a63f4c 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -232,7 +232,7 @@ good_area:
232 * the fault. 232 * the fault.
233 */ 233 */
234survive: 234survive:
235 fault = handle_mm_fault(mm, vma, address, is_write); 235 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
236 if (unlikely(fault & VM_FAULT_ERROR)) { 236 if (unlikely(fault & VM_FAULT_ERROR)) {
237 if (fault & VM_FAULT_OOM) 237 if (fault & VM_FAULT_OOM)
238 goto out_of_memory; 238 goto out_of_memory;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 55767ad9f00e..6751ce9ede9e 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -102,7 +102,7 @@ good_area:
102 * make sure we exit gracefully rather than endlessly redo 102 * make sure we exit gracefully rather than endlessly redo
103 * the fault. 103 * the fault.
104 */ 104 */
105 fault = handle_mm_fault(mm, vma, address, write); 105 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
106 if (unlikely(fault & VM_FAULT_ERROR)) { 106 if (unlikely(fault & VM_FAULT_ERROR)) {
107 if (fault & VM_FAULT_OOM) 107 if (fault & VM_FAULT_OOM)
108 goto out_of_memory; 108 goto out_of_memory;
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 33cf25025dac..a62e1e138bc1 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -258,7 +258,7 @@ good_area:
258 * make sure we exit gracefully rather than endlessly redo 258 * make sure we exit gracefully rather than endlessly redo
259 * the fault. 259 * the fault.
260 */ 260 */
261 fault = handle_mm_fault(mm, vma, address, write); 261 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
262 if (unlikely(fault & VM_FAULT_ERROR)) { 262 if (unlikely(fault & VM_FAULT_ERROR)) {
263 if (fault & VM_FAULT_OOM) 263 if (fault & VM_FAULT_OOM)
264 goto out_of_memory; 264 goto out_of_memory;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 92c7fa4ecc3f..bfb6dd6ab380 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -202,7 +202,7 @@ good_area:
202 * fault. 202 * fault.
203 */ 203 */
204 204
205 fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0); 205 fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
206 if (unlikely(fault & VM_FAULT_ERROR)) { 206 if (unlikely(fault & VM_FAULT_ERROR)) {
207 /* 207 /*
208 * We hit a shared mapping outside of the file, or some 208 * We hit a shared mapping outside of the file, or some
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5beffc8f481e..830bef0a1131 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -302,7 +302,7 @@ good_area:
302 * the fault. 302 * the fault.
303 */ 303 */
304 survive: 304 survive:
305 ret = handle_mm_fault(mm, vma, address, is_write); 305 ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
306 if (unlikely(ret & VM_FAULT_ERROR)) { 306 if (unlikely(ret & VM_FAULT_ERROR)) {
307 if (ret & VM_FAULT_OOM) 307 if (ret & VM_FAULT_OOM)
308 goto out_of_memory; 308 goto out_of_memory;
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
index 95d8dadf2d87..d06ba87f1a19 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
70 } 70 }
71 71
72 ret = 0; 72 ret = 0;
73 *flt = handle_mm_fault(mm, vma, ea, is_write); 73 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
74 if (unlikely(*flt & VM_FAULT_ERROR)) { 74 if (unlikely(*flt & VM_FAULT_ERROR)) {
75 if (*flt & VM_FAULT_OOM) { 75 if (*flt & VM_FAULT_OOM) {
76 ret = -ENOMEM; 76 ret = -ENOMEM;
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index b0b84c35b0ad..cb5d59eab0ee 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
66 } 66 }
67 67
68survive: 68survive:
69 fault = handle_mm_fault(mm, vma, address, write_access); 69 fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
70 if (unlikely(fault & VM_FAULT_ERROR)) { 70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM) 71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory; 72 goto out_of_memory;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 220a152c836c..74eb26bf1970 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -352,7 +352,7 @@ good_area:
352 * make sure we exit gracefully rather than endlessly redo 352 * make sure we exit gracefully rather than endlessly redo
353 * the fault. 353 * the fault.
354 */ 354 */
355 fault = handle_mm_fault(mm, vma, address, write); 355 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
356 if (unlikely(fault & VM_FAULT_ERROR)) { 356 if (unlikely(fault & VM_FAULT_ERROR)) {
357 if (fault & VM_FAULT_OOM) { 357 if (fault & VM_FAULT_OOM) {
358 up_read(&mm->mmap_sem); 358 up_read(&mm->mmap_sem);
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 2c50f80fc332..cc8ddbdf3d7a 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -133,7 +133,7 @@ good_area:
133 * the fault. 133 * the fault.
134 */ 134 */
135survive: 135survive:
136 fault = handle_mm_fault(mm, vma, address, writeaccess); 136 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
137 if (unlikely(fault & VM_FAULT_ERROR)) { 137 if (unlikely(fault & VM_FAULT_ERROR)) {
138 if (fault & VM_FAULT_OOM) 138 if (fault & VM_FAULT_OOM)
139 goto out_of_memory; 139 goto out_of_memory;
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 7876997ba19a..fcbb6e135cef 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -187,7 +187,7 @@ good_area:
187 * the fault. 187 * the fault.
188 */ 188 */
189survive: 189survive:
190 fault = handle_mm_fault(mm, vma, address, writeaccess); 190 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
191 if (unlikely(fault & VM_FAULT_ERROR)) { 191 if (unlikely(fault & VM_FAULT_ERROR)) {
192 if (fault & VM_FAULT_OOM) 192 if (fault & VM_FAULT_OOM)
193 goto out_of_memory; 193 goto out_of_memory;
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 12e447fc8542..a5e30c642ee3 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -241,7 +241,7 @@ good_area:
241 * make sure we exit gracefully rather than endlessly redo 241 * make sure we exit gracefully rather than endlessly redo
242 * the fault. 242 * the fault.
243 */ 243 */
244 fault = handle_mm_fault(mm, vma, address, write); 244 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
245 if (unlikely(fault & VM_FAULT_ERROR)) { 245 if (unlikely(fault & VM_FAULT_ERROR)) {
246 if (fault & VM_FAULT_OOM) 246 if (fault & VM_FAULT_OOM)
247 goto out_of_memory; 247 goto out_of_memory;
@@ -484,7 +484,7 @@ good_area:
484 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 484 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
485 goto bad_area; 485 goto bad_area;
486 } 486 }
487 switch (handle_mm_fault(mm, vma, address, write)) { 487 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
488 case VM_FAULT_SIGBUS: 488 case VM_FAULT_SIGBUS:
489 case VM_FAULT_OOM: 489 case VM_FAULT_OOM:
490 goto do_sigbus; 490 goto do_sigbus;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 4ab8993b0863..e5620b27c8bf 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -398,7 +398,7 @@ good_area:
398 goto bad_area; 398 goto bad_area;
399 } 399 }
400 400
401 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); 401 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
402 if (unlikely(fault & VM_FAULT_ERROR)) { 402 if (unlikely(fault & VM_FAULT_ERROR)) {
403 if (fault & VM_FAULT_OOM) 403 if (fault & VM_FAULT_OOM)
404 goto out_of_memory; 404 goto out_of_memory;
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 7384d8accfe7..637c6505dc00 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -65,7 +65,7 @@ good_area:
65 do { 65 do {
66 int fault; 66 int fault;
67 67
68 fault = handle_mm_fault(mm, vma, address, is_write); 68 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
69 if (unlikely(fault & VM_FAULT_ERROR)) { 69 if (unlikely(fault & VM_FAULT_ERROR)) {
70 if (fault & VM_FAULT_OOM) { 70 if (fault & VM_FAULT_OOM) {
71 goto out_of_memory; 71 goto out_of_memory;
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index caba99601703..eb0566e83319 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -845,7 +845,7 @@ ENTRY(aesni_cbc_enc)
845 */ 845 */
846ENTRY(aesni_cbc_dec) 846ENTRY(aesni_cbc_dec)
847 cmp $16, LEN 847 cmp $16, LEN
848 jb .Lcbc_dec_ret 848 jb .Lcbc_dec_just_ret
849 mov 480(KEYP), KLEN 849 mov 480(KEYP), KLEN
850 add $240, KEYP 850 add $240, KEYP
851 movups (IVP), IV 851 movups (IVP), IV
@@ -891,6 +891,7 @@ ENTRY(aesni_cbc_dec)
891 add $16, OUTP 891 add $16, OUTP
892 cmp $16, LEN 892 cmp $16, LEN
893 jge .Lcbc_dec_loop1 893 jge .Lcbc_dec_loop1
894 movups IV, (IVP)
895.Lcbc_dec_ret: 894.Lcbc_dec_ret:
895 movups IV, (IVP)
896.Lcbc_dec_just_ret:
896 ret 897 ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 4e663398f77f..c580c5ec1cad 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -198,6 +198,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
198 198
199 blkcipher_walk_init(&walk, dst, src, nbytes); 199 blkcipher_walk_init(&walk, dst, src, nbytes);
200 err = blkcipher_walk_virt(desc, &walk); 200 err = blkcipher_walk_virt(desc, &walk);
201 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
201 202
202 kernel_fpu_begin(); 203 kernel_fpu_begin();
203 while ((nbytes = walk.nbytes)) { 204 while ((nbytes = walk.nbytes)) {
@@ -221,6 +222,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
221 222
222 blkcipher_walk_init(&walk, dst, src, nbytes); 223 blkcipher_walk_init(&walk, dst, src, nbytes);
223 err = blkcipher_walk_virt(desc, &walk); 224 err = blkcipher_walk_virt(desc, &walk);
225 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
224 226
225 kernel_fpu_begin(); 227 kernel_fpu_begin();
226 while ((nbytes = walk.nbytes)) { 228 while ((nbytes = walk.nbytes)) {
@@ -266,6 +268,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
266 268
267 blkcipher_walk_init(&walk, dst, src, nbytes); 269 blkcipher_walk_init(&walk, dst, src, nbytes);
268 err = blkcipher_walk_virt(desc, &walk); 270 err = blkcipher_walk_virt(desc, &walk);
271 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
269 272
270 kernel_fpu_begin(); 273 kernel_fpu_begin();
271 while ((nbytes = walk.nbytes)) { 274 while ((nbytes = walk.nbytes)) {
@@ -289,6 +292,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
289 292
290 blkcipher_walk_init(&walk, dst, src, nbytes); 293 blkcipher_walk_init(&walk, dst, src, nbytes);
291 err = blkcipher_walk_virt(desc, &walk); 294 err = blkcipher_walk_virt(desc, &walk);
295 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
292 296
293 kernel_fpu_begin(); 297 kernel_fpu_begin();
294 while ((nbytes = walk.nbytes)) { 298 while ((nbytes = walk.nbytes)) {
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 5f9781a3815f..daef6cd2b45d 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -48,7 +48,7 @@ static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
48 struct blkcipher_desc desc = { 48 struct blkcipher_desc desc = {
49 .tfm = child, 49 .tfm = child,
50 .info = desc_in->info, 50 .info = desc_in->info,
51 .flags = desc_in->flags, 51 .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
52 }; 52 };
53 53
54 kernel_fpu_begin(); 54 kernel_fpu_begin();
@@ -67,7 +67,7 @@ static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
67 struct blkcipher_desc desc = { 67 struct blkcipher_desc desc = {
68 .tfm = child, 68 .tfm = child,
69 .info = desc_in->info, 69 .info = desc_in->info,
70 .flags = desc_in->flags, 70 .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
71 }; 71 };
72 72
73 kernel_fpu_begin(); 73 kernel_fpu_begin();
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 02ecb30982a3..103f1ddb0d85 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -42,6 +42,7 @@
42 42
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/kernel.h>
45#include <linux/stringify.h> 46#include <linux/stringify.h>
46 47
47#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
@@ -155,6 +156,15 @@ do { \
155/* We can use this directly for local CPU (faster). */ 156/* We can use this directly for local CPU (faster). */
156DECLARE_PER_CPU(unsigned long, this_cpu_off); 157DECLARE_PER_CPU(unsigned long, this_cpu_off);
157 158
159#ifdef CONFIG_NEED_MULTIPLE_NODES
160void *pcpu_lpage_remapped(void *kaddr);
161#else
162static inline void *pcpu_lpage_remapped(void *kaddr)
163{
164 return NULL;
165}
166#endif
167
158#endif /* !__ASSEMBLY__ */ 168#endif /* !__ASSEMBLY__ */
159 169
160#ifdef CONFIG_SMP 170#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 9c3f0823e6aa..29a3eef7cf4a 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
124} 124}
125 125
126/* 126/*
127 * Remap allocator 127 * Large page remap allocator
128 * 128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for 129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping. 130 * each cpu and each is remapped into vmalloc area using PMD mapping.
@@ -137,105 +137,185 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
137 * better than only using 4k mappings while still being NUMA friendly. 137 * better than only using 4k mappings while still being NUMA friendly.
138 */ 138 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES 139#ifdef CONFIG_NEED_MULTIPLE_NODES
140static size_t pcpur_size __initdata; 140struct pcpul_ent {
141static void **pcpur_ptrs __initdata; 141 unsigned int cpu;
142 void *ptr;
143};
144
145static size_t pcpul_size;
146static struct pcpul_ent *pcpul_map;
147static struct vm_struct pcpul_vm;
142 148
143static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) 149static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
144{ 150{
145 size_t off = (size_t)pageno << PAGE_SHIFT; 151 size_t off = (size_t)pageno << PAGE_SHIFT;
146 152
147 if (off >= pcpur_size) 153 if (off >= pcpul_size)
148 return NULL; 154 return NULL;
149 155
150 return virt_to_page(pcpur_ptrs[cpu] + off); 156 return virt_to_page(pcpul_map[cpu].ptr + off);
151} 157}
152 158
153static ssize_t __init setup_pcpu_remap(size_t static_size) 159static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
154{ 160{
155 static struct vm_struct vm; 161 size_t map_size, dyn_size;
156 size_t ptrs_size, dyn_size;
157 unsigned int cpu; 162 unsigned int cpu;
163 int i, j;
158 ssize_t ret; 164 ssize_t ret;
159 165
160 /* 166 if (!chosen) {
161 * If large page isn't supported, there's no benefit in doing 167 size_t vm_size = VMALLOC_END - VMALLOC_START;
162 * this. Also, on non-NUMA, embedding is better. 168 size_t tot_size = num_possible_cpus() * PMD_SIZE;
163 * 169
164 * NOTE: disabled for now. 170 /* on non-NUMA, embedding is better */
165 */ 171 if (!pcpu_need_numa())
166 if (true || !cpu_has_pse || !pcpu_need_numa()) 172 return -EINVAL;
173
174 /* don't consume more than 20% of vmalloc area */
175 if (tot_size > vm_size / 5) {
176 pr_info("PERCPU: too large chunk size %zuMB for "
177 "large page remap\n", tot_size >> 20);
178 return -EINVAL;
179 }
180 }
181
182 /* need PSE */
183 if (!cpu_has_pse) {
184 pr_warning("PERCPU: lpage allocator requires PSE\n");
167 return -EINVAL; 185 return -EINVAL;
186 }
168 187
169 /* 188 /*
170 * Currently supports only single page. Supporting multiple 189 * Currently supports only single page. Supporting multiple
171 * pages won't be too difficult if it ever becomes necessary. 190 * pages won't be too difficult if it ever becomes necessary.
172 */ 191 */
173 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 192 pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
174 PERCPU_DYNAMIC_RESERVE); 193 PERCPU_DYNAMIC_RESERVE);
175 if (pcpur_size > PMD_SIZE) { 194 if (pcpul_size > PMD_SIZE) {
176 pr_warning("PERCPU: static data is larger than large page, " 195 pr_warning("PERCPU: static data is larger than large page, "
177 "can't use large page\n"); 196 "can't use large page\n");
178 return -EINVAL; 197 return -EINVAL;
179 } 198 }
180 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 199 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
181 200
182 /* allocate pointer array and alloc large pages */ 201 /* allocate pointer array and alloc large pages */
183 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); 202 map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
184 pcpur_ptrs = alloc_bootmem(ptrs_size); 203 pcpul_map = alloc_bootmem(map_size);
185 204
186 for_each_possible_cpu(cpu) { 205 for_each_possible_cpu(cpu) {
187 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); 206 pcpul_map[cpu].cpu = cpu;
188 if (!pcpur_ptrs[cpu]) 207 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
208 PMD_SIZE);
209 if (!pcpul_map[cpu].ptr) {
210 pr_warning("PERCPU: failed to allocate large page "
211 "for cpu%u\n", cpu);
189 goto enomem; 212 goto enomem;
213 }
190 214
191 /* 215 /*
192 * Only use pcpur_size bytes and give back the rest. 216 * Only use pcpul_size bytes and give back the rest.
193 * 217 *
194 * Ingo: The 2MB up-rounding bootmem is needed to make 218 * Ingo: The 2MB up-rounding bootmem is needed to make
195 * sure the partial 2MB page is still fully RAM - it's 219 * sure the partial 2MB page is still fully RAM - it's
196 * not well-specified to have a PAT-incompatible area 220 * not well-specified to have a PAT-incompatible area
197 * (unmapped RAM, device memory, etc.) in that hole. 221 * (unmapped RAM, device memory, etc.) in that hole.
198 */ 222 */
199 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), 223 free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
200 PMD_SIZE - pcpur_size); 224 PMD_SIZE - pcpul_size);
201 225
202 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); 226 memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
203 } 227 }
204 228
205 /* allocate address and map */ 229 /* allocate address and map */
206 vm.flags = VM_ALLOC; 230 pcpul_vm.flags = VM_ALLOC;
207 vm.size = num_possible_cpus() * PMD_SIZE; 231 pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
208 vm_area_register_early(&vm, PMD_SIZE); 232 vm_area_register_early(&pcpul_vm, PMD_SIZE);
209 233
210 for_each_possible_cpu(cpu) { 234 for_each_possible_cpu(cpu) {
211 pmd_t *pmd; 235 pmd_t *pmd, pmd_v;
212 236
213 pmd = populate_extra_pmd((unsigned long)vm.addr 237 pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
214 + cpu * PMD_SIZE); 238 cpu * PMD_SIZE);
215 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), 239 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
216 PAGE_KERNEL_LARGE)); 240 PAGE_KERNEL_LARGE);
241 set_pmd(pmd, pmd_v);
217 } 242 }
218 243
219 /* we're ready, commit */ 244 /* we're ready, commit */
220 pr_info("PERCPU: Remapped at %p with large pages, static data " 245 pr_info("PERCPU: Remapped at %p with large pages, static data "
221 "%zu bytes\n", vm.addr, static_size); 246 "%zu bytes\n", pcpul_vm.addr, static_size);
222 247
223 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 248 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
224 PERCPU_FIRST_CHUNK_RESERVE, dyn_size, 249 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
225 PMD_SIZE, vm.addr, NULL); 250 PMD_SIZE, pcpul_vm.addr, NULL);
226 goto out_free_ar; 251
252 /* sort pcpul_map array for pcpu_lpage_remapped() */
253 for (i = 0; i < num_possible_cpus() - 1; i++)
254 for (j = i + 1; j < num_possible_cpus(); j++)
255 if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
256 struct pcpul_ent tmp = pcpul_map[i];
257 pcpul_map[i] = pcpul_map[j];
258 pcpul_map[j] = tmp;
259 }
260
261 return ret;
227 262
228enomem: 263enomem:
229 for_each_possible_cpu(cpu) 264 for_each_possible_cpu(cpu)
230 if (pcpur_ptrs[cpu]) 265 if (pcpul_map[cpu].ptr)
231 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); 266 free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
232 ret = -ENOMEM; 267 free_bootmem(__pa(pcpul_map), map_size);
233out_free_ar: 268 return -ENOMEM;
234 free_bootmem(__pa(pcpur_ptrs), ptrs_size); 269}
235 return ret; 270
271/**
272 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
273 * @kaddr: the kernel address in question
274 *
275 * Determine whether @kaddr falls in the pcpul recycled area. This is
276 * used by pageattr to detect VM aliases and break up the pcpu PMD
277 * mapping such that the same physical page is not mapped under
278 * different attributes.
279 *
280 * The recycled area is always at the tail of a partially used PMD
281 * page.
282 *
283 * RETURNS:
284 * Address of corresponding remapped pcpu address if match is found;
285 * otherwise, NULL.
286 */
287void *pcpu_lpage_remapped(void *kaddr)
288{
289 void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
290 unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
291 int left = 0, right = num_possible_cpus() - 1;
292 int pos;
293
294 /* pcpul in use at all? */
295 if (!pcpul_map)
296 return NULL;
297
298 /* okay, perform binary search */
299 while (left <= right) {
300 pos = (left + right) / 2;
301
302 if (pcpul_map[pos].ptr < pmd_addr)
303 left = pos + 1;
304 else if (pcpul_map[pos].ptr > pmd_addr)
305 right = pos - 1;
306 else {
307 /* it shouldn't be in the area for the first chunk */
308 WARN_ON(offset < pcpul_size);
309
310 return pcpul_vm.addr +
311 pcpul_map[pos].cpu * PMD_SIZE + offset;
312 }
313 }
314
315 return NULL;
236} 316}
237#else 317#else
238static ssize_t __init setup_pcpu_remap(size_t static_size) 318static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
239{ 319{
240 return -EINVAL; 320 return -EINVAL;
241} 321}
@@ -249,7 +329,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
249 * mapping so that it can use PMD mapping without additional TLB 329 * mapping so that it can use PMD mapping without additional TLB
250 * pressure. 330 * pressure.
251 */ 331 */
252static ssize_t __init setup_pcpu_embed(size_t static_size) 332static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
253{ 333{
254 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 334 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
255 335
@@ -258,7 +338,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
258 * this. Also, embedding allocation doesn't play well with 338 * this. Also, embedding allocation doesn't play well with
259 * NUMA. 339 * NUMA.
260 */ 340 */
261 if (!cpu_has_pse || pcpu_need_numa()) 341 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
262 return -EINVAL; 342 return -EINVAL;
263 343
264 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 344 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
@@ -308,8 +388,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
308 void *ptr; 388 void *ptr;
309 389
310 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); 390 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
311 if (!ptr) 391 if (!ptr) {
392 pr_warning("PERCPU: failed to allocate "
393 "4k page for cpu%u\n", cpu);
312 goto enomem; 394 goto enomem;
395 }
313 396
314 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); 397 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
315 pcpu4k_pages[j++] = virt_to_page(ptr); 398 pcpu4k_pages[j++] = virt_to_page(ptr);
@@ -333,6 +416,16 @@ out_free_ar:
333 return ret; 416 return ret;
334} 417}
335 418
419/* for explicit first chunk allocator selection */
420static char pcpu_chosen_alloc[16] __initdata;
421
422static int __init percpu_alloc_setup(char *str)
423{
424 strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
425 return 0;
426}
427early_param("percpu_alloc", percpu_alloc_setup);
428
336static inline void setup_percpu_segment(int cpu) 429static inline void setup_percpu_segment(int cpu)
337{ 430{
338#ifdef CONFIG_X86_32 431#ifdef CONFIG_X86_32
@@ -346,11 +439,6 @@ static inline void setup_percpu_segment(int cpu)
346#endif 439#endif
347} 440}
348 441
349/*
350 * Great future plan:
351 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
352 * Always point %gs to its beginning
353 */
354void __init setup_per_cpu_areas(void) 442void __init setup_per_cpu_areas(void)
355{ 443{
356 size_t static_size = __per_cpu_end - __per_cpu_start; 444 size_t static_size = __per_cpu_end - __per_cpu_start;
@@ -367,9 +455,26 @@ void __init setup_per_cpu_areas(void)
367 * of large page mappings. Please read comments on top of 455 * of large page mappings. Please read comments on top of
368 * each allocator for details. 456 * each allocator for details.
369 */ 457 */
370 ret = setup_pcpu_remap(static_size); 458 ret = -EINVAL;
371 if (ret < 0) 459 if (strlen(pcpu_chosen_alloc)) {
372 ret = setup_pcpu_embed(static_size); 460 if (strcmp(pcpu_chosen_alloc, "4k")) {
461 if (!strcmp(pcpu_chosen_alloc, "lpage"))
462 ret = setup_pcpu_lpage(static_size, true);
463 else if (!strcmp(pcpu_chosen_alloc, "embed"))
464 ret = setup_pcpu_embed(static_size, true);
465 else
466 pr_warning("PERCPU: unknown allocator %s "
467 "specified\n", pcpu_chosen_alloc);
468 if (ret < 0)
469 pr_warning("PERCPU: %s allocator failed (%zd), "
470 "falling back to 4k\n",
471 pcpu_chosen_alloc, ret);
472 }
473 } else {
474 ret = setup_pcpu_lpage(static_size, false);
475 if (ret < 0)
476 ret = setup_pcpu_embed(static_size, false);
477 }
373 if (ret < 0) 478 if (ret < 0)
374 ret = setup_pcpu_4k(static_size); 479 ret = setup_pcpu_4k(static_size);
375 if (ret < 0) 480 if (ret < 0)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c403526d5d15..78a5fff857be 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1113,7 +1113,7 @@ good_area:
1113 * make sure we exit gracefully rather than endlessly redo 1113 * make sure we exit gracefully rather than endlessly redo
1114 * the fault: 1114 * the fault:
1115 */ 1115 */
1116 fault = handle_mm_fault(mm, vma, address, write); 1116 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
1117 1117
1118 if (unlikely(fault & VM_FAULT_ERROR)) { 1118 if (unlikely(fault & VM_FAULT_ERROR)) {
1119 mm_fault_error(regs, error_code, address, fault); 1119 mm_fault_error(regs, error_code, address, fault);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3cfe9ced8a4c..1b734d7a8966 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -11,6 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/pfn.h>
14 15
15#include <asm/e820.h> 16#include <asm/e820.h>
16#include <asm/processor.h> 17#include <asm/processor.h>
@@ -681,8 +682,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
681static int cpa_process_alias(struct cpa_data *cpa) 682static int cpa_process_alias(struct cpa_data *cpa)
682{ 683{
683 struct cpa_data alias_cpa; 684 struct cpa_data alias_cpa;
684 int ret = 0; 685 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
685 unsigned long temp_cpa_vaddr, vaddr; 686 unsigned long vaddr, remapped;
687 int ret;
686 688
687 if (cpa->pfn >= max_pfn_mapped) 689 if (cpa->pfn >= max_pfn_mapped)
688 return 0; 690 return 0;
@@ -706,42 +708,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
706 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { 708 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
707 709
708 alias_cpa = *cpa; 710 alias_cpa = *cpa;
709 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); 711 alias_cpa.vaddr = &laddr;
710 alias_cpa.vaddr = &temp_cpa_vaddr;
711 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 712 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
712 713
713
714 ret = __change_page_attr_set_clr(&alias_cpa, 0); 714 ret = __change_page_attr_set_clr(&alias_cpa, 0);
715 if (ret)
716 return ret;
715 } 717 }
716 718
717#ifdef CONFIG_X86_64 719#ifdef CONFIG_X86_64
718 if (ret)
719 return ret;
720 /* 720 /*
721 * No need to redo, when the primary call touched the high 721 * If the primary call didn't touch the high mapping already
722 * mapping already: 722 * and the physical address is inside the kernel map, we need
723 */
724 if (within(vaddr, (unsigned long) _text, _brk_end))
725 return 0;
726
727 /*
728 * If the physical address is inside the kernel map, we need
729 * to touch the high mapped kernel as well: 723 * to touch the high mapped kernel as well:
730 */ 724 */
731 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) 725 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
732 return 0; 726 within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
727 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
728 __START_KERNEL_map - phys_base;
729 alias_cpa = *cpa;
730 alias_cpa.vaddr = &temp_cpa_vaddr;
731 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
733 732
734 alias_cpa = *cpa; 733 /*
735 temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; 734 * The high mapping range is imprecise, so ignore the
736 alias_cpa.vaddr = &temp_cpa_vaddr; 735 * return value.
737 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 736 */
737 __change_page_attr_set_clr(&alias_cpa, 0);
738 }
739#endif
738 740
739 /* 741 /*
740 * The high mapping range is imprecise, so ignore the return value. 742 * If the PMD page was partially used for per-cpu remapping,
743 * the recycled area needs to be split and modified. Because
744 * the area is always proper subset of a PMD page
745 * cpa->numpages is guaranteed to be 1 for these areas, so
746 * there's no need to loop over and check for further remaps.
741 */ 747 */
742 __change_page_attr_set_clr(&alias_cpa, 0); 748 remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
743#endif 749 if (remapped) {
744 return ret; 750 WARN_ON(cpa->numpages > 1);
751 alias_cpa = *cpa;
752 alias_cpa.vaddr = &remapped;
753 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
754 ret = __change_page_attr_set_clr(&alias_cpa, 0);
755 if (ret)
756 return ret;
757 }
758
759 return 0;
745} 760}
746 761
747static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 762static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index bdd860d93f72..bc0733359a88 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -106,7 +106,7 @@ good_area:
106 * the fault. 106 * the fault.
107 */ 107 */
108survive: 108survive:
109 fault = handle_mm_fault(mm, vma, address, is_write); 109 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
110 if (unlikely(fault & VM_FAULT_ERROR)) { 110 if (unlikely(fault & VM_FAULT_ERROR)) {
111 if (fault & VM_FAULT_OOM) 111 if (fault & VM_FAULT_OOM)
112 goto out_of_memory; 112 goto out_of_memory;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 87f92c39b5f0..a9952b1236b0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,9 +18,22 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <asm/byteorder.h> 20#include <asm/byteorder.h>
21#include <asm/processor.h>
21#include <asm/i387.h> 22#include <asm/i387.h>
22#include "padlock.h" 23#include "padlock.h"
23 24
25/*
26 * Number of data blocks actually fetched for each xcrypt insn.
27 * Processors with prefetch errata will fetch extra blocks.
28 */
29static unsigned int ecb_fetch_blocks = 2;
30#define MAX_ECB_FETCH_BLOCKS (8)
31#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
32
33static unsigned int cbc_fetch_blocks = 1;
34#define MAX_CBC_FETCH_BLOCKS (4)
35#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
36
24/* Control word. */ 37/* Control word. */
25struct cword { 38struct cword {
26 unsigned int __attribute__ ((__packed__)) 39 unsigned int __attribute__ ((__packed__))
@@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword)
172 * should be used only inside the irq_ts_save/restore() context 185 * should be used only inside the irq_ts_save/restore() context
173 */ 186 */
174 187
175static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 188static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
176 struct cword *control_word) 189 struct cword *control_word, int count)
177{ 190{
178 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 191 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
179 : "+S"(input), "+D"(output) 192 : "+S"(input), "+D"(output)
180 : "d"(control_word), "b"(key), "c"(1)); 193 : "d"(control_word), "b"(key), "c"(count));
194}
195
196static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
197 u8 *iv, struct cword *control_word, int count)
198{
199 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
200 : "+S" (input), "+D" (output), "+a" (iv)
201 : "d" (control_word), "b" (key), "c" (count));
202 return iv;
181} 203}
182 204
183static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) 205static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
206 struct cword *cword, int count)
184{ 207{
185 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; 208 /*
209 * Padlock prefetches extra data so we must provide mapped input buffers.
210 * Assume there are at least 16 bytes of stack already in use.
211 */
212 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
213 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
214
215 memcpy(tmp, in, count * AES_BLOCK_SIZE);
216 rep_xcrypt_ecb(tmp, out, key, cword, count);
217}
218
219static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
220 u8 *iv, struct cword *cword, int count)
221{
222 /*
223 * Padlock prefetches extra data so we must provide mapped input buffers.
224 * Assume there are at least 16 bytes of stack already in use.
225 */
226 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
186 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); 227 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
187 228
188 memcpy(tmp, in, AES_BLOCK_SIZE); 229 memcpy(tmp, in, count * AES_BLOCK_SIZE);
189 padlock_xcrypt(tmp, out, key, cword); 230 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
190} 231}
191 232
192static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, 233static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
193 struct cword *cword) 234 struct cword *cword, int count)
194{ 235{
195 /* padlock_xcrypt requires at least two blocks of data. */ 236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
196 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & 237 * We could avoid some copying here but it's probably not worth it.
197 (PAGE_SIZE - 1)))) { 238 */
198 aes_crypt_copy(in, out, key, cword); 239 if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
240 ecb_crypt_copy(in, out, key, cword, count);
199 return; 241 return;
200 } 242 }
201 243
202 padlock_xcrypt(in, out, key, cword); 244 rep_xcrypt_ecb(in, out, key, cword, count);
245}
246
247static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
248 u8 *iv, struct cword *cword, int count)
249{
250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
251 if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
252 return cbc_crypt_copy(in, out, key, iv, cword, count);
253
254 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
203} 255}
204 256
205static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, 257static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
206 void *control_word, u32 count) 258 void *control_word, u32 count)
207{ 259{
208 if (count == 1) { 260 u32 initial = count & (ecb_fetch_blocks - 1);
209 aes_crypt(input, output, key, control_word); 261
262 if (count < ecb_fetch_blocks) {
263 ecb_crypt(input, output, key, control_word, count);
210 return; 264 return;
211 } 265 }
212 266
213 asm volatile ("test $1, %%cl;" 267 if (initial)
214 "je 1f;" 268 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
215#ifndef CONFIG_X86_64 269 : "+S"(input), "+D"(output)
216 "lea -1(%%ecx), %%eax;" 270 : "d"(control_word), "b"(key), "c"(initial));
217 "mov $1, %%ecx;" 271
218#else 272 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
219 "lea -1(%%rcx), %%rax;"
220 "mov $1, %%rcx;"
221#endif
222 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
223#ifndef CONFIG_X86_64
224 "mov %%eax, %%ecx;"
225#else
226 "mov %%rax, %%rcx;"
227#endif
228 "1:"
229 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
230 : "+S"(input), "+D"(output) 273 : "+S"(input), "+D"(output)
231 : "d"(control_word), "b"(key), "c"(count) 274 : "d"(control_word), "b"(key), "c"(count - initial));
232 : "ax");
233} 275}
234 276
235static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 277static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
236 u8 *iv, void *control_word, u32 count) 278 u8 *iv, void *control_word, u32 count)
237{ 279{
238 /* rep xcryptcbc */ 280 u32 initial = count & (cbc_fetch_blocks - 1);
239 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" 281
282 if (count < cbc_fetch_blocks)
283 return cbc_crypt(input, output, key, iv, control_word, count);
284
285 if (initial)
286 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
287 : "+S" (input), "+D" (output), "+a" (iv)
288 : "d" (control_word), "b" (key), "c" (count));
289
290 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
240 : "+S" (input), "+D" (output), "+a" (iv) 291 : "+S" (input), "+D" (output), "+a" (iv)
241 : "d" (control_word), "b" (key), "c" (count)); 292 : "d" (control_word), "b" (key), "c" (count-initial));
242 return iv; 293 return iv;
243} 294}
244 295
@@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
249 300
250 padlock_reset_key(&ctx->cword.encrypt); 301 padlock_reset_key(&ctx->cword.encrypt);
251 ts_state = irq_ts_save(); 302 ts_state = irq_ts_save();
252 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 303 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
253 irq_ts_restore(ts_state); 304 irq_ts_restore(ts_state);
254 padlock_store_cword(&ctx->cword.encrypt); 305 padlock_store_cword(&ctx->cword.encrypt);
255} 306}
@@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
261 312
262 padlock_reset_key(&ctx->cword.encrypt); 313 padlock_reset_key(&ctx->cword.encrypt);
263 ts_state = irq_ts_save(); 314 ts_state = irq_ts_save();
264 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 315 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
265 irq_ts_restore(ts_state); 316 irq_ts_restore(ts_state);
266 padlock_store_cword(&ctx->cword.encrypt); 317 padlock_store_cword(&ctx->cword.encrypt);
267} 318}
@@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = {
454static int __init padlock_init(void) 505static int __init padlock_init(void)
455{ 506{
456 int ret; 507 int ret;
508 struct cpuinfo_x86 *c = &cpu_data(0);
457 509
458 if (!cpu_has_xcrypt) { 510 if (!cpu_has_xcrypt) {
459 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); 511 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -476,6 +528,12 @@ static int __init padlock_init(void)
476 528
477 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 529 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
478 530
531 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
532 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
533 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
534 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
535 }
536
479out: 537out:
480 return ret; 538 return ret;
481 539
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 40111a6d8d5b..891ef18bd77b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -94,6 +94,31 @@ config MMC_SDHCI_PLTFM
94 94
95 If unsure, say N. 95 If unsure, say N.
96 96
97config MMC_SDHCI_S3C
98 tristate "SDHCI support on Samsung S3C SoC"
99 depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
100 help
101 This selects the Secure Digital Host Controller Interface (SDHCI)
102 often referrered to as the HSMMC block in some of the Samsung S3C
103 range of SoC.
104
105 Note, due to the problems with DMA, the DMA support is only
106 available with CONFIG_EXPERIMENTAL is selected.
107
108 If you have a controller with this interface, say Y or M here.
109
110 If unsure, say N.
111
112config MMC_SDHCI_S3C_DMA
113 bool "DMA support on S3C SDHCI"
114 depends on MMC_SDHCI_S3C && EXPERIMENTAL
115 help
116 Enable DMA support on the Samsung S3C SDHCI glue. The DMA
117 has proved to be problematic if the controller encounters
118 certain errors, and thus should be treated with care.
119
120 YMMV.
121
97config MMC_OMAP 122config MMC_OMAP
98 tristate "TI OMAP Multimedia Card Interface support" 123 tristate "TI OMAP Multimedia Card Interface support"
99 depends on ARCH_OMAP 124 depends on ARCH_OMAP
@@ -265,3 +290,14 @@ config MMC_CB710
265 This driver can also be built as a module. If so, the module 290 This driver can also be built as a module. If so, the module
266 will be called cb710-mmc. 291 will be called cb710-mmc.
267 292
293config MMC_VIA_SDMMC
294 tristate "VIA SD/MMC Card Reader Driver"
295 depends on PCI
296 help
297 This selects the VIA SD/MMC Card Reader driver, say Y or M here.
298 VIA provides one multi-functional card reader which integrated into
299 some motherboards manufactured by VIA. This card reader supports
300 SD/MMC/SDHC.
301 If you have a controller with this interface, say Y or M here.
302
303 If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 79da397c5fea..cf153f628457 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
18obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
18obj-$(CONFIG_MMC_WBSD) += wbsd.o 19obj-$(CONFIG_MMC_WBSD) += wbsd.o
19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 20obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
20obj-$(CONFIG_MMC_OMAP) += omap.o 21obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o
31obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 32obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
32obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 33obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
33obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
34 36
35ifeq ($(CONFIG_CB710_DEBUG),y) 37ifeq ($(CONFIG_CB710_DEBUG),y)
36 CFLAGS-cb710-mmc += -DDEBUG 38 CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 4eb4f37544ab..8c08cd7efa7f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -794,7 +794,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
794 host->mem->start + host->sdidata); 794 host->mem->start + host->sdidata);
795 795
796 if (!setup_ok) { 796 if (!setup_ok) {
797 s3c2410_dma_config(host->dma, 4, 0); 797 s3c2410_dma_config(host->dma, 4);
798 s3c2410_dma_set_buffdone_fn(host->dma, 798 s3c2410_dma_set_buffdone_fn(host->dma,
799 s3cmci_dma_done_callback); 799 s3cmci_dma_done_callback);
800 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); 800 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 128c614d11aa..d79fa55c3b89 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -250,6 +250,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
250 host->ops = &sdhci_of_data->ops; 250 host->ops = &sdhci_of_data->ops;
251 } 251 }
252 252
253 if (of_get_property(np, "sdhci,1-bit-only", NULL))
254 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
255
253 clk = of_get_property(np, "clock-frequency", &size); 256 clk = of_get_property(np, "clock-frequency", &size);
254 if (clk && size == sizeof(*clk) && *clk) 257 if (clk && size == sizeof(*clk) && *clk)
255 of_host->clock = *clk; 258 of_host->clock = *clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 65be27995d5c..2f15cc17d887 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -284,6 +284,18 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
284 .resume = jmicron_resume, 284 .resume = jmicron_resume,
285}; 285};
286 286
287static int via_probe(struct sdhci_pci_chip *chip)
288{
289 if (chip->pdev->revision == 0x10)
290 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
291
292 return 0;
293}
294
295static const struct sdhci_pci_fixes sdhci_via = {
296 .probe = via_probe,
297};
298
287static const struct pci_device_id pci_ids[] __devinitdata = { 299static const struct pci_device_id pci_ids[] __devinitdata = {
288 { 300 {
289 .vendor = PCI_VENDOR_ID_RICOH, 301 .vendor = PCI_VENDOR_ID_RICOH,
@@ -349,6 +361,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
349 .driver_data = (kernel_ulong_t)&sdhci_jmicron, 361 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
350 }, 362 },
351 363
364 {
365 .vendor = PCI_VENDOR_ID_VIA,
366 .device = 0x95d0,
367 .subvendor = PCI_ANY_ID,
368 .subdevice = PCI_ANY_ID,
369 .driver_data = (kernel_ulong_t)&sdhci_via,
370 },
371
352 { /* Generic SD host controller */ 372 { /* Generic SD host controller */
353 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 373 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
354 }, 374 },
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
new file mode 100644
index 000000000000..50997d2a63e7
--- /dev/null
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -0,0 +1,428 @@
1/* linux/drivers/mmc/host/sdhci-s3c.c
2 *
3 * Copyright 2008 Openmoko Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * SDHCI (HSMMC) support for Samsung SoC
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20
21#include <linux/mmc/host.h>
22
23#include <plat/sdhci.h>
24#include <plat/regs-sdhci.h>
25
26#include "sdhci.h"
27
28#define MAX_BUS_CLK (4)
29
30/**
31 * struct sdhci_s3c - S3C SDHCI instance
32 * @host: The SDHCI host created
33 * @pdev: The platform device we where created from.
34 * @ioarea: The resource created when we claimed the IO area.
35 * @pdata: The platform data for this controller.
36 * @cur_clk: The index of the current bus clock.
37 * @clk_io: The clock for the internal bus interface.
38 * @clk_bus: The clocks that are available for the SD/MMC bus clock.
39 */
40struct sdhci_s3c {
41 struct sdhci_host *host;
42 struct platform_device *pdev;
43 struct resource *ioarea;
44 struct s3c_sdhci_platdata *pdata;
45 unsigned int cur_clk;
46
47 struct clk *clk_io;
48 struct clk *clk_bus[MAX_BUS_CLK];
49};
50
51static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
52{
53 return sdhci_priv(host);
54}
55
56/**
57 * get_curclk - convert ctrl2 register to clock source number
58 * @ctrl2: Control2 register value.
59 */
60static u32 get_curclk(u32 ctrl2)
61{
62 ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
63 ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
64
65 return ctrl2;
66}
67
68static void sdhci_s3c_check_sclk(struct sdhci_host *host)
69{
70 struct sdhci_s3c *ourhost = to_s3c(host);
71 u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
72
73 if (get_curclk(tmp) != ourhost->cur_clk) {
74 dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
75
76 tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
77 tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
78 writel(tmp, host->ioaddr + 0x80);
79 }
80}
81
82/**
83 * sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
84 * @host: The SDHCI host instance.
85 *
86 * Callback to return the maximum clock rate acheivable by the controller.
87*/
88static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
89{
90 struct sdhci_s3c *ourhost = to_s3c(host);
91 struct clk *busclk;
92 unsigned int rate, max;
93 int clk;
94
95 /* note, a reset will reset the clock source */
96
97 sdhci_s3c_check_sclk(host);
98
99 for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
100 busclk = ourhost->clk_bus[clk];
101 if (!busclk)
102 continue;
103
104 rate = clk_get_rate(busclk);
105 if (rate > max)
106 max = rate;
107 }
108
109 return max;
110}
111
112static unsigned int sdhci_s3c_get_timeout_clk(struct sdhci_host *host)
113{
114 return sdhci_s3c_get_max_clk(host) / 1000000;
115}
116
117/**
118 * sdhci_s3c_consider_clock - consider one the bus clocks for current setting
119 * @ourhost: Our SDHCI instance.
120 * @src: The source clock index.
121 * @wanted: The clock frequency wanted.
122 */
123static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
124 unsigned int src,
125 unsigned int wanted)
126{
127 unsigned long rate;
128 struct clk *clksrc = ourhost->clk_bus[src];
129 int div;
130
131 if (!clksrc)
132 return UINT_MAX;
133
134 rate = clk_get_rate(clksrc);
135
136 for (div = 1; div < 256; div *= 2) {
137 if ((rate / div) <= wanted)
138 break;
139 }
140
141 dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
142 src, rate, wanted, rate / div);
143
144 return (wanted - (rate / div));
145}
146
147/**
148 * sdhci_s3c_set_clock - callback on clock change
149 * @host: The SDHCI host being changed
150 * @clock: The clock rate being requested.
151 *
152 * When the card's clock is going to be changed, look at the new frequency
153 * and find the best clock source to go with it.
154*/
155static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
156{
157 struct sdhci_s3c *ourhost = to_s3c(host);
158 unsigned int best = UINT_MAX;
159 unsigned int delta;
160 int best_src = 0;
161 int src;
162 u32 ctrl;
163
164 /* don't bother if the clock is going off. */
165 if (clock == 0)
166 return;
167
168 for (src = 0; src < MAX_BUS_CLK; src++) {
169 delta = sdhci_s3c_consider_clock(ourhost, src, clock);
170 if (delta < best) {
171 best = delta;
172 best_src = src;
173 }
174 }
175
176 dev_dbg(&ourhost->pdev->dev,
177 "selected source %d, clock %d, delta %d\n",
178 best_src, clock, best);
179
180 /* select the new clock source */
181
182 if (ourhost->cur_clk != best_src) {
183 struct clk *clk = ourhost->clk_bus[best_src];
184
185 /* turn clock off to card before changing clock source */
186 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
187
188 ourhost->cur_clk = best_src;
189 host->max_clk = clk_get_rate(clk);
190 host->timeout_clk = sdhci_s3c_get_timeout_clk(host);
191
192 ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
193 ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
194 ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
195 writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
196 }
197
198 /* reconfigure the hardware for new clock rate */
199
200 {
201 struct mmc_ios ios;
202
203 ios.clock = clock;
204
205 if (ourhost->pdata->cfg_card)
206 (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
207 &ios, NULL);
208 }
209}
210
211static struct sdhci_ops sdhci_s3c_ops = {
212 .get_max_clock = sdhci_s3c_get_max_clk,
213 .get_timeout_clock = sdhci_s3c_get_timeout_clk,
214 .set_clock = sdhci_s3c_set_clock,
215};
216
217static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
218{
219 struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
220 struct device *dev = &pdev->dev;
221 struct sdhci_host *host;
222 struct sdhci_s3c *sc;
223 struct resource *res;
224 int ret, irq, ptr, clks;
225
226 if (!pdata) {
227 dev_err(dev, "no device data specified\n");
228 return -ENOENT;
229 }
230
231 irq = platform_get_irq(pdev, 0);
232 if (irq < 0) {
233 dev_err(dev, "no irq specified\n");
234 return irq;
235 }
236
237 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
238 if (!res) {
239 dev_err(dev, "no memory specified\n");
240 return -ENOENT;
241 }
242
243 host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
244 if (IS_ERR(host)) {
245 dev_err(dev, "sdhci_alloc_host() failed\n");
246 return PTR_ERR(host);
247 }
248
249 sc = sdhci_priv(host);
250
251 sc->host = host;
252 sc->pdev = pdev;
253 sc->pdata = pdata;
254
255 platform_set_drvdata(pdev, host);
256
257 sc->clk_io = clk_get(dev, "hsmmc");
258 if (IS_ERR(sc->clk_io)) {
259 dev_err(dev, "failed to get io clock\n");
260 ret = PTR_ERR(sc->clk_io);
261 goto err_io_clk;
262 }
263
264 /* enable the local io clock and keep it running for the moment. */
265 clk_enable(sc->clk_io);
266
267 for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
268 struct clk *clk;
269 char *name = pdata->clocks[ptr];
270
271 if (name == NULL)
272 continue;
273
274 clk = clk_get(dev, name);
275 if (IS_ERR(clk)) {
276 dev_err(dev, "failed to get clock %s\n", name);
277 continue;
278 }
279
280 clks++;
281 sc->clk_bus[ptr] = clk;
282 clk_enable(clk);
283
284 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
285 ptr, name, clk_get_rate(clk));
286 }
287
288 if (clks == 0) {
289 dev_err(dev, "failed to find any bus clocks\n");
290 ret = -ENOENT;
291 goto err_no_busclks;
292 }
293
294 sc->ioarea = request_mem_region(res->start, resource_size(res),
295 mmc_hostname(host->mmc));
296 if (!sc->ioarea) {
297 dev_err(dev, "failed to reserve register area\n");
298 ret = -ENXIO;
299 goto err_req_regs;
300 }
301
302 host->ioaddr = ioremap_nocache(res->start, resource_size(res));
303 if (!host->ioaddr) {
304 dev_err(dev, "failed to map registers\n");
305 ret = -ENXIO;
306 goto err_req_regs;
307 }
308
309 /* Ensure we have minimal gpio selected CMD/CLK/Detect */
310 if (pdata->cfg_gpio)
311 pdata->cfg_gpio(pdev, pdata->max_width);
312
313 host->hw_name = "samsung-hsmmc";
314 host->ops = &sdhci_s3c_ops;
315 host->quirks = 0;
316 host->irq = irq;
317
318 /* Setup quirks for the controller */
319
320 /* Currently with ADMA enabled we are getting some length
321 * interrupts that are not being dealt with, do disable
322 * ADMA until this is sorted out. */
323 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
324 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
325
326#ifndef CONFIG_MMC_SDHCI_S3C_DMA
327
328 /* we currently see overruns on errors, so disable the SDMA
329 * support as well. */
330 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
331
332 /* PIO currently has problems with multi-block IO */
333 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
334
335#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
336
337 /* It seems we do not get an DATA transfer complete on non-busy
338 * transfers, not sure if this is a problem with this specific
339 * SDHCI block, or a missing configuration that needs to be set. */
340 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
341
342 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
343 SDHCI_QUIRK_32BIT_DMA_SIZE);
344
345 ret = sdhci_add_host(host);
346 if (ret) {
347 dev_err(dev, "sdhci_add_host() failed\n");
348 goto err_add_host;
349 }
350
351 return 0;
352
353 err_add_host:
354 release_resource(sc->ioarea);
355 kfree(sc->ioarea);
356
357 err_req_regs:
358 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
359 clk_disable(sc->clk_bus[ptr]);
360 clk_put(sc->clk_bus[ptr]);
361 }
362
363 err_no_busclks:
364 clk_disable(sc->clk_io);
365 clk_put(sc->clk_io);
366
367 err_io_clk:
368 sdhci_free_host(host);
369
370 return ret;
371}
372
373static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
374{
375 return 0;
376}
377
378#ifdef CONFIG_PM
379
380static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
381{
382 struct sdhci_host *host = platform_get_drvdata(dev);
383
384 sdhci_suspend_host(host, pm);
385 return 0;
386}
387
388static int sdhci_s3c_resume(struct platform_device *dev)
389{
390 struct sdhci_host *host = platform_get_drvdata(dev);
391
392 sdhci_resume_host(host);
393 return 0;
394}
395
396#else
397#define sdhci_s3c_suspend NULL
398#define sdhci_s3c_resume NULL
399#endif
400
401static struct platform_driver sdhci_s3c_driver = {
402 .probe = sdhci_s3c_probe,
403 .remove = __devexit_p(sdhci_s3c_remove),
404 .suspend = sdhci_s3c_suspend,
405 .resume = sdhci_s3c_resume,
406 .driver = {
407 .owner = THIS_MODULE,
408 .name = "s3c-sdhci",
409 },
410};
411
412static int __init sdhci_s3c_init(void)
413{
414 return platform_driver_register(&sdhci_s3c_driver);
415}
416
417static void __exit sdhci_s3c_exit(void)
418{
419 platform_driver_unregister(&sdhci_s3c_driver);
420}
421
422module_init(sdhci_s3c_init);
423module_exit(sdhci_s3c_exit);
424
425MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
426MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
427MODULE_LICENSE("GPL v2");
428MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 35789c6edc19..6779b4ecab18 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -584,7 +584,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
584 * longer to time out, but that's much better than having a too-short 584 * longer to time out, but that's much better than having a too-short
585 * timeout value. 585 * timeout value.
586 */ 586 */
587 if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)) 587 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
588 return 0xE; 588 return 0xE;
589 589
590 /* timeout in us */ 590 /* timeout in us */
@@ -1051,12 +1051,19 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1051 * At least the Marvell CaFe chip gets confused if we set the voltage 1051 * At least the Marvell CaFe chip gets confused if we set the voltage
1052 * and set turn on power at the same time, so set the voltage first. 1052 * and set turn on power at the same time, so set the voltage first.
1053 */ 1053 */
1054 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 1054 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1055 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1055 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1056 1056
1057 pwr |= SDHCI_POWER_ON; 1057 pwr |= SDHCI_POWER_ON;
1058 1058
1059 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1059 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1060
1061 /*
1062 * Some controllers need an extra 10ms delay of 10ms before they
1063 * can apply clock after applying power
1064 */
1065 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1066 mdelay(10);
1060} 1067}
1061 1068
1062/*****************************************************************************\ 1069/*****************************************************************************\
@@ -1382,6 +1389,35 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1382 sdhci_finish_command(host); 1389 sdhci_finish_command(host);
1383} 1390}
1384 1391
1392#ifdef DEBUG
1393static void sdhci_show_adma_error(struct sdhci_host *host)
1394{
1395 const char *name = mmc_hostname(host->mmc);
1396 u8 *desc = host->adma_desc;
1397 __le32 *dma;
1398 __le16 *len;
1399 u8 attr;
1400
1401 sdhci_dumpregs(host);
1402
1403 while (true) {
1404 dma = (__le32 *)(desc + 4);
1405 len = (__le16 *)(desc + 2);
1406 attr = *desc;
1407
1408 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1409 name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1410
1411 desc += 8;
1412
1413 if (attr & 2)
1414 break;
1415 }
1416}
1417#else
1418static void sdhci_show_adma_error(struct sdhci_host *host) { }
1419#endif
1420
1385static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 1421static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1386{ 1422{
1387 BUG_ON(intmask == 0); 1423 BUG_ON(intmask == 0);
@@ -1411,8 +1447,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1411 host->data->error = -ETIMEDOUT; 1447 host->data->error = -ETIMEDOUT;
1412 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1448 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1413 host->data->error = -EILSEQ; 1449 host->data->error = -EILSEQ;
1414 else if (intmask & SDHCI_INT_ADMA_ERROR) 1450 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1451 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1452 sdhci_show_adma_error(host);
1415 host->data->error = -EIO; 1453 host->data->error = -EIO;
1454 }
1416 1455
1417 if (host->data->error) 1456 if (host->data->error)
1418 sdhci_finish_data(host); 1457 sdhci_finish_data(host);
@@ -1729,7 +1768,10 @@ int sdhci_add_host(struct sdhci_host *host)
1729 mmc->ops = &sdhci_ops; 1768 mmc->ops = &sdhci_ops;
1730 mmc->f_min = host->max_clk / 256; 1769 mmc->f_min = host->max_clk / 256;
1731 mmc->f_max = host->max_clk; 1770 mmc->f_max = host->max_clk;
1732 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 1771 mmc->caps = MMC_CAP_SDIO_IRQ;
1772
1773 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1774 mmc->caps |= MMC_CAP_4_BIT_DATA;
1733 1775
1734 if (caps & SDHCI_CAN_DO_HISPD) 1776 if (caps & SDHCI_CAN_DO_HISPD)
1735 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1777 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1802,7 +1844,7 @@ int sdhci_add_host(struct sdhci_host *host)
1802 /* 1844 /*
1803 * Maximum block count. 1845 * Maximum block count.
1804 */ 1846 */
1805 mmc->max_blk_count = 65535; 1847 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
1806 1848
1807 /* 1849 /*
1808 * Init tasklets. 1850 * Init tasklets.
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2de08349c3ca..831ddf7dcb49 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -226,6 +226,12 @@ struct sdhci_host {
226#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19) 226#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
227/* Controller has to be forced to use block size of 2048 bytes */ 227/* Controller has to be forced to use block size of 2048 bytes */
228#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) 228#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
229/* Controller cannot do multi-block transfers */
230#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
231/* Controller can only handle 1-bit data transfers */
232#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
233/* Controller needs 10ms delay between applying power and clock */
234#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
229 235
230 int irq; /* Device IRQ */ 236 int irq; /* Device IRQ */
231 void __iomem * ioaddr; /* Mapped address */ 237 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
new file mode 100644
index 000000000000..632858a94376
--- /dev/null
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -0,0 +1,1362 @@
1/*
2 * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver
3 * Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10
11#include <linux/pci.h>
12#include <linux/dma-mapping.h>
13#include <linux/highmem.h>
14#include <linux/delay.h>
15
16#include <linux/mmc/host.h>
17
18#define DRV_NAME "via_sdmmc"
19
20#define PCI_DEVICE_ID_VIA_9530 0x9530
21
22#define VIA_CRDR_SDC_OFF 0x200
23#define VIA_CRDR_DDMA_OFF 0x400
24#define VIA_CRDR_PCICTRL_OFF 0x600
25
26#define VIA_CRDR_MIN_CLOCK 375000
27#define VIA_CRDR_MAX_CLOCK 48000000
28
29/*
30 * PCI registers
31 */
32
33#define VIA_CRDR_PCI_WORK_MODE 0x40
34#define VIA_CRDR_PCI_DBG_MODE 0x41
35
36/*
37 * SDC MMIO Registers
38 */
39
40#define VIA_CRDR_SDCTRL 0x0
41#define VIA_CRDR_SDCTRL_START 0x01
42#define VIA_CRDR_SDCTRL_WRITE 0x04
43#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10
44#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20
45#define VIA_CRDR_SDCTRL_MULTI_WR 0x30
46#define VIA_CRDR_SDCTRL_MULTI_RD 0x40
47#define VIA_CRDR_SDCTRL_STOP 0x70
48
49#define VIA_CRDR_SDCTRL_RSP_NONE 0x0
50#define VIA_CRDR_SDCTRL_RSP_R1 0x10000
51#define VIA_CRDR_SDCTRL_RSP_R2 0x20000
52#define VIA_CRDR_SDCTRL_RSP_R3 0x30000
53#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000
54
55#define VIA_CRDR_SDCARG 0x4
56
57#define VIA_CRDR_SDBUSMODE 0x8
58#define VIA_CRDR_SDMODE_4BIT 0x02
59#define VIA_CRDR_SDMODE_CLK_ON 0x40
60
61#define VIA_CRDR_SDBLKLEN 0xc
62/*
63 * Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048.
64 * Bit 11 - Bit 13 : Reserved.
65 * GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design.
66 * INTEN : Enable SD host interrupt.
67 * Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536.
68 */
69#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000
70#define VIA_CRDR_SDBLKLEN_INTEN 0x8000
71#define VIA_CRDR_MAX_BLOCK_COUNT 65536
72#define VIA_CRDR_MAX_BLOCK_LENGTH 2048
73
74#define VIA_CRDR_SDRESP0 0x10
75#define VIA_CRDR_SDRESP1 0x14
76#define VIA_CRDR_SDRESP2 0x18
77#define VIA_CRDR_SDRESP3 0x1c
78
79#define VIA_CRDR_SDCURBLKCNT 0x20
80
81#define VIA_CRDR_SDINTMASK 0x24
82/*
83 * MBDIE : Multiple Blocks transfer Done Interrupt Enable
84 * BDDIE : Block Data transfer Done Interrupt Enable
85 * CIRIE : Card Insertion or Removal Interrupt Enable
86 * CRDIE : Command-Response transfer Done Interrupt Enable
87 * CRTOIE : Command-Response response TimeOut Interrupt Enable
88 * ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable
89 * DTIE : Data access Timeout Interrupt Enable
90 * SCIE : reSponse CRC error Interrupt Enable
91 * RCIE : Read data CRC error Interrupt Enable
92 * WCIE : Write data CRC error Interrupt Enable
93 */
94#define VIA_CRDR_SDINTMASK_MBDIE 0x10
95#define VIA_CRDR_SDINTMASK_BDDIE 0x20
96#define VIA_CRDR_SDINTMASK_CIRIE 0x80
97#define VIA_CRDR_SDINTMASK_CRDIE 0x200
98#define VIA_CRDR_SDINTMASK_CRTOIE 0x400
99#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800
100#define VIA_CRDR_SDINTMASK_DTIE 0x1000
101#define VIA_CRDR_SDINTMASK_SCIE 0x2000
102#define VIA_CRDR_SDINTMASK_RCIE 0x4000
103#define VIA_CRDR_SDINTMASK_WCIE 0x8000
104
105#define VIA_CRDR_SDACTIVE_INTMASK \
106 (VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \
107 | VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \
108 | VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \
109 | VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE)
110
111#define VIA_CRDR_SDSTATUS 0x28
112/*
113 * CECC : Reserved
114 * WP : SD card Write Protect status
115 * SLOTD : Reserved
116 * SLOTG : SD SLOT status(Gpi pin status)
117 * MBD : Multiple Blocks transfer Done interrupt status
118 * BDD : Block Data transfer Done interrupt status
119 * CD : Reserved
120 * CIR : Card Insertion or Removal interrupt detected on GPI pin
121 * IO : Reserved
122 * CRD : Command-Response transfer Done interrupt status
123 * CRTO : Command-Response response TimeOut interrupt status
124 * ASCRDIE : Auto Stop Command-Response transfer Done interrupt status
125 * DT : Data access Timeout interrupt status
126 * SC : reSponse CRC error interrupt status
127 * RC : Read data CRC error interrupt status
128 * WC : Write data CRC error interrupt status
129 */
130#define VIA_CRDR_SDSTS_CECC 0x01
131#define VIA_CRDR_SDSTS_WP 0x02
132#define VIA_CRDR_SDSTS_SLOTD 0x04
133#define VIA_CRDR_SDSTS_SLOTG 0x08
134#define VIA_CRDR_SDSTS_MBD 0x10
135#define VIA_CRDR_SDSTS_BDD 0x20
136#define VIA_CRDR_SDSTS_CD 0x40
137#define VIA_CRDR_SDSTS_CIR 0x80
138#define VIA_CRDR_SDSTS_IO 0x100
139#define VIA_CRDR_SDSTS_CRD 0x200
140#define VIA_CRDR_SDSTS_CRTO 0x400
141#define VIA_CRDR_SDSTS_ASCRDIE 0x800
142#define VIA_CRDR_SDSTS_DT 0x1000
143#define VIA_CRDR_SDSTS_SC 0x2000
144#define VIA_CRDR_SDSTS_RC 0x4000
145#define VIA_CRDR_SDSTS_WC 0x8000
146
147#define VIA_CRDR_SDSTS_IGN_MASK\
148 (VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO)
149#define VIA_CRDR_SDSTS_INT_MASK \
150 (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \
151 | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \
152 | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
153 | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
154#define VIA_CRDR_SDSTS_W1C_MASK \
155 (VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \
156 | VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \
157 | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
158 | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
159#define VIA_CRDR_SDSTS_CMD_MASK \
160 (VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC)
161#define VIA_CRDR_SDSTS_DATA_MASK\
162 (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \
163 | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
164
165#define VIA_CRDR_SDSTATUS2 0x2a
166/*
167 * CFE : Enable SD host automatic Clock FReezing
168 */
169#define VIA_CRDR_SDSTS_CFE 0x80
170
171#define VIA_CRDR_SDRSPTMO 0x2C
172
173#define VIA_CRDR_SDCLKSEL 0x30
174
175#define VIA_CRDR_SDEXTCTRL 0x34
176#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01
177#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02
178#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04
179#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08
180#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10
181#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20
182#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40
183#define VIA_CRDR_SDEXTCTRL_HISPD 0x80
184/* 0x38-0xFF reserved */
185
186/*
187 * Data DMA Control Registers
188 */
189
190#define VIA_CRDR_DMABASEADD 0x0
191#define VIA_CRDR_DMACOUNTER 0x4
192
193#define VIA_CRDR_DMACTRL 0x8
194/*
195 * DIR :Transaction Direction
196 * 0 : From card to memory
197 * 1 : From memory to card
198 */
199#define VIA_CRDR_DMACTRL_DIR 0x100
200#define VIA_CRDR_DMACTRL_ENIRQ 0x10000
201#define VIA_CRDR_DMACTRL_SFTRST 0x1000000
202
203#define VIA_CRDR_DMASTS 0xc
204
205#define VIA_CRDR_DMASTART 0x10
206/*0x14-0xFF reserved*/
207
208/*
209 * PCI Control Registers
210 */
211
212/*0x0 - 0x1 reserved*/
213#define VIA_CRDR_PCICLKGATT 0x2
214/*
215 * SFTRST :
216 * 0 : Soft reset all the controller and it will be de-asserted automatically
217 * 1 : Soft reset is de-asserted
218 */
219#define VIA_CRDR_PCICLKGATT_SFTRST 0x01
220/*
221 * 3V3 : Pad power select
222 * 0 : 1.8V
223 * 1 : 3.3V
224 * NOTE : No mater what the actual value should be, this bit always
225 * read as 0. This is a hardware bug.
226 */
227#define VIA_CRDR_PCICLKGATT_3V3 0x10
228/*
229 * PAD_PWRON : Pad Power on/off select
230 * 0 : Power off
231 * 1 : Power on
232 * NOTE : No mater what the actual value should be, this bit always
233 * read as 0. This is a hardware bug.
234 */
235#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20
236
237#define VIA_CRDR_PCISDCCLK 0x5
238
239#define VIA_CRDR_PCIDMACLK 0x7
240#define VIA_CRDR_PCIDMACLK_SDC 0x2
241
242#define VIA_CRDR_PCIINTCTRL 0x8
243#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04
244
245#define VIA_CRDR_PCIINTSTATUS 0x9
246#define VIA_CRDR_PCIINTSTATUS_SDC 0x04
247
248#define VIA_CRDR_PCITMOCTRL 0xa
249#define VIA_CRDR_PCITMOCTRL_NO 0x0
250#define VIA_CRDR_PCITMOCTRL_32US 0x1
251#define VIA_CRDR_PCITMOCTRL_256US 0x2
252#define VIA_CRDR_PCITMOCTRL_1024US 0x3
253#define VIA_CRDR_PCITMOCTRL_256MS 0x4
254#define VIA_CRDR_PCITMOCTRL_512MS 0x5
255#define VIA_CRDR_PCITMOCTRL_1024MS 0x6
256
257/*0xB-0xFF reserved*/
258
259enum PCI_HOST_CLK_CONTROL {
260 PCI_CLK_375K = 0x03,
261 PCI_CLK_8M = 0x04,
262 PCI_CLK_12M = 0x00,
263 PCI_CLK_16M = 0x05,
264 PCI_CLK_24M = 0x01,
265 PCI_CLK_33M = 0x06,
266 PCI_CLK_48M = 0x02
267};
268
269struct sdhcreg {
270 u32 sdcontrol_reg;
271 u32 sdcmdarg_reg;
272 u32 sdbusmode_reg;
273 u32 sdblklen_reg;
274 u32 sdresp_reg[4];
275 u32 sdcurblkcnt_reg;
276 u32 sdintmask_reg;
277 u32 sdstatus_reg;
278 u32 sdrsptmo_reg;
279 u32 sdclksel_reg;
280 u32 sdextctrl_reg;
281};
282
283struct pcictrlreg {
284 u8 reserve[2];
285 u8 pciclkgat_reg;
286 u8 pcinfcclk_reg;
287 u8 pcimscclk_reg;
288 u8 pcisdclk_reg;
289 u8 pcicaclk_reg;
290 u8 pcidmaclk_reg;
291 u8 pciintctrl_reg;
292 u8 pciintstatus_reg;
293 u8 pcitmoctrl_reg;
294 u8 Resv;
295};
296
297struct via_crdr_mmc_host {
298 struct mmc_host *mmc;
299 struct mmc_request *mrq;
300 struct mmc_command *cmd;
301 struct mmc_data *data;
302
303 void __iomem *mmiobase;
304 void __iomem *sdhc_mmiobase;
305 void __iomem *ddma_mmiobase;
306 void __iomem *pcictrl_mmiobase;
307
308 struct pcictrlreg pm_pcictrl_reg;
309 struct sdhcreg pm_sdhc_reg;
310
311 struct work_struct carddet_work;
312 struct tasklet_struct finish_tasklet;
313
314 struct timer_list timer;
315 spinlock_t lock;
316 u8 power;
317 int reject;
318 unsigned int quirks;
319};
320
321/* some devices need a very long delay for power to stabilize */
322#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
323
324static struct pci_device_id via_ids[] = {
325 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
326 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
327 {0,}
328};
329
330MODULE_DEVICE_TABLE(pci, via_ids);
331
332static void via_print_sdchc(struct via_crdr_mmc_host *host)
333{
334 void __iomem *addrbase = host->sdhc_mmiobase;
335
336 pr_debug("SDC MMIO Registers:\n");
337 pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n",
338 readl(addrbase + VIA_CRDR_SDCTRL),
339 readl(addrbase + VIA_CRDR_SDCARG),
340 readl(addrbase + VIA_CRDR_SDBUSMODE));
341 pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n",
342 readl(addrbase + VIA_CRDR_SDBLKLEN),
343 readl(addrbase + VIA_CRDR_SDCURBLKCNT),
344 readl(addrbase + VIA_CRDR_SDINTMASK));
345 pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n",
346 readl(addrbase + VIA_CRDR_SDSTATUS),
347 readl(addrbase + VIA_CRDR_SDCLKSEL),
348 readl(addrbase + VIA_CRDR_SDEXTCTRL));
349}
350
351static void via_print_pcictrl(struct via_crdr_mmc_host *host)
352{
353 void __iomem *addrbase = host->pcictrl_mmiobase;
354
355 pr_debug("PCI Control Registers:\n");
356 pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n",
357 readb(addrbase + VIA_CRDR_PCICLKGATT),
358 readb(addrbase + VIA_CRDR_PCISDCCLK),
359 readb(addrbase + VIA_CRDR_PCIDMACLK));
360 pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n",
361 readb(addrbase + VIA_CRDR_PCIINTCTRL),
362 readb(addrbase + VIA_CRDR_PCIINTSTATUS));
363}
364
365static void via_save_pcictrlreg(struct via_crdr_mmc_host *host)
366{
367 struct pcictrlreg *pm_pcictrl_reg;
368 void __iomem *addrbase;
369
370 pm_pcictrl_reg = &(host->pm_pcictrl_reg);
371 addrbase = host->pcictrl_mmiobase;
372
373 pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT);
374 pm_pcictrl_reg->pciclkgat_reg |=
375 VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
376 pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK);
377 pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK);
378 pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL);
379 pm_pcictrl_reg->pciintstatus_reg =
380 readb(addrbase + VIA_CRDR_PCIINTSTATUS);
381 pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL);
382}
383
384static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host)
385{
386 struct pcictrlreg *pm_pcictrl_reg;
387 void __iomem *addrbase;
388
389 pm_pcictrl_reg = &(host->pm_pcictrl_reg);
390 addrbase = host->pcictrl_mmiobase;
391
392 writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT);
393 writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK);
394 writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK);
395 writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL);
396 writeb(pm_pcictrl_reg->pciintstatus_reg,
397 addrbase + VIA_CRDR_PCIINTSTATUS);
398 writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL);
399}
400
401static void via_save_sdcreg(struct via_crdr_mmc_host *host)
402{
403 struct sdhcreg *pm_sdhc_reg;
404 void __iomem *addrbase;
405
406 pm_sdhc_reg = &(host->pm_sdhc_reg);
407 addrbase = host->sdhc_mmiobase;
408
409 pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL);
410 pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG);
411 pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE);
412 pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN);
413 pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT);
414 pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK);
415 pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS);
416 pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO);
417 pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL);
418 pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL);
419}
420
421static void via_restore_sdcreg(struct via_crdr_mmc_host *host)
422{
423 struct sdhcreg *pm_sdhc_reg;
424 void __iomem *addrbase;
425
426 pm_sdhc_reg = &(host->pm_sdhc_reg);
427 addrbase = host->sdhc_mmiobase;
428
429 writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
430 writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
431 writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE);
432 writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN);
433 writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT);
434 writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
435 writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS);
436 writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
437 writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
438 writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
439}
440
441static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost)
442{
443 if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY)
444 msleep(300);
445 else
446 msleep(3);
447}
448
449static void via_set_ddma(struct via_crdr_mmc_host *host,
450 dma_addr_t dmaaddr, u32 count, int dir, int enirq)
451{
452 void __iomem *addrbase;
453 u32 ctrl_data = 0;
454
455 if (enirq)
456 ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ;
457
458 if (dir)
459 ctrl_data |= VIA_CRDR_DMACTRL_DIR;
460
461 addrbase = host->ddma_mmiobase;
462
463 writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD);
464 writel(count, addrbase + VIA_CRDR_DMACOUNTER);
465 writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL);
466 writel(0x01, addrbase + VIA_CRDR_DMASTART);
467
468 /* It seems that our DMA can not work normally with 375kHz clock */
469 /* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */
470 addrbase = host->pcictrl_mmiobase;
471 if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) {
472 dev_info(host->mmc->parent, "forcing card speed to 8MHz\n");
473 writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK);
474 }
475}
476
477static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
478 struct mmc_data *data)
479{
480 void __iomem *addrbase;
481 u32 blk_reg;
482 int count;
483
484 WARN_ON(host->data);
485
486 /* Sanity checks */
487 BUG_ON(data->blksz > host->mmc->max_blk_size);
488 BUG_ON(data->blocks > host->mmc->max_blk_count);
489
490 host->data = data;
491
492 count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
493 ((data->flags & MMC_DATA_READ) ?
494 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
495 BUG_ON(count != 1);
496
497 via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
498 (data->flags & MMC_DATA_WRITE) ? 1 : 0, 1);
499
500 addrbase = host->sdhc_mmiobase;
501
502 blk_reg = data->blksz - 1;
503 blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
504 blk_reg |= (data->blocks) << 16;
505
506 writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN);
507}
508
509static void via_sdc_get_response(struct via_crdr_mmc_host *host,
510 struct mmc_command *cmd)
511{
512 void __iomem *addrbase = host->sdhc_mmiobase;
513 u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0);
514 u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1);
515 u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2);
516 u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3);
517
518 if (cmd->flags & MMC_RSP_136) {
519 cmd->resp[0] = ((u8) (dwdata1)) |
520 (((u8) (dwdata0 >> 24)) << 8) |
521 (((u8) (dwdata0 >> 16)) << 16) |
522 (((u8) (dwdata0 >> 8)) << 24);
523
524 cmd->resp[1] = ((u8) (dwdata2)) |
525 (((u8) (dwdata1 >> 24)) << 8) |
526 (((u8) (dwdata1 >> 16)) << 16) |
527 (((u8) (dwdata1 >> 8)) << 24);
528
529 cmd->resp[2] = ((u8) (dwdata3)) |
530 (((u8) (dwdata2 >> 24)) << 8) |
531 (((u8) (dwdata2 >> 16)) << 16) |
532 (((u8) (dwdata2 >> 8)) << 24);
533
534 cmd->resp[3] = 0xff |
535 ((((u8) (dwdata3 >> 24))) << 8) |
536 (((u8) (dwdata3 >> 16)) << 16) |
537 (((u8) (dwdata3 >> 8)) << 24);
538 } else {
539 dwdata0 >>= 8;
540 cmd->resp[0] = ((dwdata0 & 0xff) << 24) |
541 (((dwdata0 >> 8) & 0xff) << 16) |
542 (((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff);
543
544 dwdata1 >>= 8;
545 cmd->resp[1] = ((dwdata1 & 0xff) << 24) |
546 (((dwdata1 >> 8) & 0xff) << 16) |
547 (((dwdata1 >> 16) & 0xff) << 8);
548 }
549}
550
551static void via_sdc_send_command(struct via_crdr_mmc_host *host,
552 struct mmc_command *cmd)
553{
554 void __iomem *addrbase;
555 struct mmc_data *data;
556 u32 cmdctrl = 0;
557
558 WARN_ON(host->cmd);
559
560 data = cmd->data;
561 mod_timer(&host->timer, jiffies + HZ);
562 host->cmd = cmd;
563
564 /*Command index*/
565 cmdctrl = cmd->opcode << 8;
566
567 /*Response type*/
568 switch (mmc_resp_type(cmd)) {
569 case MMC_RSP_NONE:
570 cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE;
571 break;
572 case MMC_RSP_R1:
573 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1;
574 break;
575 case MMC_RSP_R1B:
576 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B;
577 break;
578 case MMC_RSP_R2:
579 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2;
580 break;
581 case MMC_RSP_R3:
582 cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3;
583 break;
584 default:
585 pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc));
586 break;
587 }
588
589 if (!(cmd->data))
590 goto nodata;
591
592 via_sdc_preparedata(host, data);
593
594 /*Command control*/
595 if (data->blocks > 1) {
596 if (data->flags & MMC_DATA_WRITE) {
597 cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
598 cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR;
599 } else {
600 cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD;
601 }
602 } else {
603 if (data->flags & MMC_DATA_WRITE) {
604 cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
605 cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR;
606 } else {
607 cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD;
608 }
609 }
610
611nodata:
612 if (cmd == host->mrq->stop)
613 cmdctrl |= VIA_CRDR_SDCTRL_STOP;
614
615 cmdctrl |= VIA_CRDR_SDCTRL_START;
616
617 addrbase = host->sdhc_mmiobase;
618 writel(cmd->arg, addrbase + VIA_CRDR_SDCARG);
619 writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL);
620}
621
622static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
623{
624 struct mmc_data *data;
625
626 BUG_ON(!host->data);
627
628 data = host->data;
629 host->data = NULL;
630
631 if (data->error)
632 data->bytes_xfered = 0;
633 else
634 data->bytes_xfered = data->blocks * data->blksz;
635
636 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
637 ((data->flags & MMC_DATA_READ) ?
638 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
639
640 if (data->stop)
641 via_sdc_send_command(host, data->stop);
642 else
643 tasklet_schedule(&host->finish_tasklet);
644}
645
646static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
647{
648 via_sdc_get_response(host, host->cmd);
649
650 host->cmd->error = 0;
651
652 if (!host->cmd->data)
653 tasklet_schedule(&host->finish_tasklet);
654
655 host->cmd = NULL;
656}
657
658static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
659{
660 void __iomem *addrbase;
661 struct via_crdr_mmc_host *host;
662 unsigned long flags;
663 u16 status;
664
665 host = mmc_priv(mmc);
666
667 spin_lock_irqsave(&host->lock, flags);
668
669 addrbase = host->pcictrl_mmiobase;
670 writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
671
672 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
673 status &= VIA_CRDR_SDSTS_W1C_MASK;
674 writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
675
676 WARN_ON(host->mrq != NULL);
677 host->mrq = mrq;
678
679 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
680 if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
681 host->mrq->cmd->error = -ENOMEDIUM;
682 tasklet_schedule(&host->finish_tasklet);
683 } else {
684 via_sdc_send_command(host, mrq->cmd);
685 }
686
687 mmiowb();
688 spin_unlock_irqrestore(&host->lock, flags);
689}
690
691static void via_sdc_set_power(struct via_crdr_mmc_host *host,
692 unsigned short power, unsigned int on)
693{
694 unsigned long flags;
695 u8 gatt;
696
697 spin_lock_irqsave(&host->lock, flags);
698
699 host->power = (1 << power);
700
701 gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
702 if (host->power == MMC_VDD_165_195)
703 gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
704 else
705 gatt |= VIA_CRDR_PCICLKGATT_3V3;
706 if (on)
707 gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON;
708 else
709 gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
710 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
711
712 mmiowb();
713 spin_unlock_irqrestore(&host->lock, flags);
714
715 via_pwron_sleep(host);
716}
717
718static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
719{
720 struct via_crdr_mmc_host *host;
721 unsigned long flags;
722 void __iomem *addrbase;
723 u32 org_data, sdextctrl;
724 u8 clock;
725
726 host = mmc_priv(mmc);
727
728 spin_lock_irqsave(&host->lock, flags);
729
730 addrbase = host->sdhc_mmiobase;
731 org_data = readl(addrbase + VIA_CRDR_SDBUSMODE);
732 sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL);
733
734 if (ios->bus_width == MMC_BUS_WIDTH_1)
735 org_data &= ~VIA_CRDR_SDMODE_4BIT;
736 else
737 org_data |= VIA_CRDR_SDMODE_4BIT;
738
739 if (ios->power_mode == MMC_POWER_OFF)
740 org_data &= ~VIA_CRDR_SDMODE_CLK_ON;
741 else
742 org_data |= VIA_CRDR_SDMODE_CLK_ON;
743
744 if (ios->timing == MMC_TIMING_SD_HS)
745 sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD;
746 else
747 sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD;
748
749 writel(org_data, addrbase + VIA_CRDR_SDBUSMODE);
750 writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL);
751
752 if (ios->clock >= 48000000)
753 clock = PCI_CLK_48M;
754 else if (ios->clock >= 33000000)
755 clock = PCI_CLK_33M;
756 else if (ios->clock >= 24000000)
757 clock = PCI_CLK_24M;
758 else if (ios->clock >= 16000000)
759 clock = PCI_CLK_16M;
760 else if (ios->clock >= 12000000)
761 clock = PCI_CLK_12M;
762 else if (ios->clock >= 8000000)
763 clock = PCI_CLK_8M;
764 else
765 clock = PCI_CLK_375K;
766
767 addrbase = host->pcictrl_mmiobase;
768 if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
769 writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
770
771 mmiowb();
772 spin_unlock_irqrestore(&host->lock, flags);
773
774 if (ios->power_mode != MMC_POWER_OFF)
775 via_sdc_set_power(host, ios->vdd, 1);
776 else
777 via_sdc_set_power(host, ios->vdd, 0);
778}
779
780static int via_sdc_get_ro(struct mmc_host *mmc)
781{
782 struct via_crdr_mmc_host *host;
783 unsigned long flags;
784 u16 status;
785
786 host = mmc_priv(mmc);
787
788 spin_lock_irqsave(&host->lock, flags);
789
790 status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
791
792 spin_unlock_irqrestore(&host->lock, flags);
793
794 return !(status & VIA_CRDR_SDSTS_WP);
795}
796
797static const struct mmc_host_ops via_sdc_ops = {
798 .request = via_sdc_request,
799 .set_ios = via_sdc_set_ios,
800 .get_ro = via_sdc_get_ro,
801};
802
803static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
804{
805 void __iomem *addrbase;
806 unsigned long flags;
807 u8 gatt;
808
809 addrbase = host->pcictrl_mmiobase;
810
811 spin_lock_irqsave(&host->lock, flags);
812
813 via_save_pcictrlreg(host);
814 via_save_sdcreg(host);
815
816 spin_unlock_irqrestore(&host->lock, flags);
817
818 gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
819 if (host->power == MMC_VDD_165_195)
820 gatt &= VIA_CRDR_PCICLKGATT_3V3;
821 else
822 gatt |= VIA_CRDR_PCICLKGATT_3V3;
823 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
824 via_pwron_sleep(host);
825 gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
826 writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
827 msleep(3);
828
829 spin_lock_irqsave(&host->lock, flags);
830
831 via_restore_pcictrlreg(host);
832 via_restore_sdcreg(host);
833
834 mmiowb();
835 spin_unlock_irqrestore(&host->lock, flags);
836}
837
838static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
839{
840 BUG_ON(intmask == 0);
841
842 if (!host->cmd) {
843 pr_err("%s: Got command interrupt 0x%x even "
844 "though no command operation was in progress.\n",
845 mmc_hostname(host->mmc), intmask);
846 return;
847 }
848
849 if (intmask & VIA_CRDR_SDSTS_CRTO)
850 host->cmd->error = -ETIMEDOUT;
851 else if (intmask & VIA_CRDR_SDSTS_SC)
852 host->cmd->error = -EILSEQ;
853
854 if (host->cmd->error)
855 tasklet_schedule(&host->finish_tasklet);
856 else if (intmask & VIA_CRDR_SDSTS_CRD)
857 via_sdc_finish_command(host);
858}
859
860static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
861{
862 BUG_ON(intmask == 0);
863
864 if (intmask & VIA_CRDR_SDSTS_DT)
865 host->data->error = -ETIMEDOUT;
866 else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
867 host->data->error = -EILSEQ;
868
869 via_sdc_finish_data(host);
870}
871
872static irqreturn_t via_sdc_isr(int irq, void *dev_id)
873{
874 struct via_crdr_mmc_host *sdhost = dev_id;
875 void __iomem *addrbase;
876 u8 pci_status;
877 u16 sd_status;
878 irqreturn_t result;
879
880 if (!sdhost)
881 return IRQ_NONE;
882
883 spin_lock(&sdhost->lock);
884
885 addrbase = sdhost->pcictrl_mmiobase;
886 pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS);
887 if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) {
888 result = IRQ_NONE;
889 goto out;
890 }
891
892 addrbase = sdhost->sdhc_mmiobase;
893 sd_status = readw(addrbase + VIA_CRDR_SDSTATUS);
894 sd_status &= VIA_CRDR_SDSTS_INT_MASK;
895 sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK;
896 if (!sd_status) {
897 result = IRQ_NONE;
898 goto out;
899 }
900
901 if (sd_status & VIA_CRDR_SDSTS_CIR) {
902 writew(sd_status & VIA_CRDR_SDSTS_CIR,
903 addrbase + VIA_CRDR_SDSTATUS);
904
905 schedule_work(&sdhost->carddet_work);
906 }
907
908 sd_status &= ~VIA_CRDR_SDSTS_CIR;
909 if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) {
910 writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK,
911 addrbase + VIA_CRDR_SDSTATUS);
912 via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK);
913 }
914 if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) {
915 writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK,
916 addrbase + VIA_CRDR_SDSTATUS);
917 via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK);
918 }
919
920 sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK);
921 if (sd_status) {
922 pr_err("%s: Unexpected interrupt 0x%x\n",
923 mmc_hostname(sdhost->mmc), sd_status);
924 writew(sd_status, addrbase + VIA_CRDR_SDSTATUS);
925 }
926
927 result = IRQ_HANDLED;
928
929 mmiowb();
930out:
931 spin_unlock(&sdhost->lock);
932
933 return result;
934}
935
936static void via_sdc_timeout(unsigned long ulongdata)
937{
938 struct via_crdr_mmc_host *sdhost;
939 unsigned long flags;
940
941 sdhost = (struct via_crdr_mmc_host *)ulongdata;
942
943 spin_lock_irqsave(&sdhost->lock, flags);
944
945 if (sdhost->mrq) {
946 pr_err("%s: Timeout waiting for hardware interrupt."
947 "cmd:0x%x\n", mmc_hostname(sdhost->mmc),
948 sdhost->mrq->cmd->opcode);
949
950 if (sdhost->data) {
951 writel(VIA_CRDR_DMACTRL_SFTRST,
952 sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
953 sdhost->data->error = -ETIMEDOUT;
954 via_sdc_finish_data(sdhost);
955 } else {
956 if (sdhost->cmd)
957 sdhost->cmd->error = -ETIMEDOUT;
958 else
959 sdhost->mrq->cmd->error = -ETIMEDOUT;
960 tasklet_schedule(&sdhost->finish_tasklet);
961 }
962 }
963
964 mmiowb();
965 spin_unlock_irqrestore(&sdhost->lock, flags);
966}
967
968static void via_sdc_tasklet_finish(unsigned long param)
969{
970 struct via_crdr_mmc_host *host;
971 unsigned long flags;
972 struct mmc_request *mrq;
973
974 host = (struct via_crdr_mmc_host *)param;
975
976 spin_lock_irqsave(&host->lock, flags);
977
978 del_timer(&host->timer);
979 mrq = host->mrq;
980 host->mrq = NULL;
981 host->cmd = NULL;
982 host->data = NULL;
983
984 spin_unlock_irqrestore(&host->lock, flags);
985
986 mmc_request_done(host->mmc, mrq);
987}
988
989static void via_sdc_card_detect(struct work_struct *work)
990{
991 struct via_crdr_mmc_host *host;
992 void __iomem *addrbase;
993 unsigned long flags;
994 u16 status;
995
996 host = container_of(work, struct via_crdr_mmc_host, carddet_work);
997
998 addrbase = host->ddma_mmiobase;
999 writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL);
1000
1001 spin_lock_irqsave(&host->lock, flags);
1002
1003 addrbase = host->pcictrl_mmiobase;
1004 writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
1005
1006 addrbase = host->sdhc_mmiobase;
1007 status = readw(addrbase + VIA_CRDR_SDSTATUS);
1008 if (!(status & VIA_CRDR_SDSTS_SLOTG)) {
1009 if (host->mrq) {
1010 pr_err("%s: Card removed during transfer!\n",
1011 mmc_hostname(host->mmc));
1012 host->mrq->cmd->error = -ENOMEDIUM;
1013 tasklet_schedule(&host->finish_tasklet);
1014 }
1015
1016 mmiowb();
1017 spin_unlock_irqrestore(&host->lock, flags);
1018
1019 via_reset_pcictrl(host);
1020
1021 spin_lock_irqsave(&host->lock, flags);
1022 }
1023
1024 mmiowb();
1025 spin_unlock_irqrestore(&host->lock, flags);
1026
1027 via_print_pcictrl(host);
1028 via_print_sdchc(host);
1029
1030 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1031}
1032
1033static void via_init_mmc_host(struct via_crdr_mmc_host *host)
1034{
1035 struct mmc_host *mmc = host->mmc;
1036 void __iomem *addrbase;
1037 u32 lenreg;
1038 u32 status;
1039
1040 init_timer(&host->timer);
1041 host->timer.data = (unsigned long)host;
1042 host->timer.function = via_sdc_timeout;
1043
1044 spin_lock_init(&host->lock);
1045
1046 mmc->f_min = VIA_CRDR_MIN_CLOCK;
1047 mmc->f_max = VIA_CRDR_MAX_CLOCK;
1048 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
1049 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED;
1050 mmc->ops = &via_sdc_ops;
1051
1052 /*Hardware cannot do scatter lists*/
1053 mmc->max_hw_segs = 1;
1054 mmc->max_phys_segs = 1;
1055
1056 mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
1057 mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
1058
1059 mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
1060 mmc->max_req_size = mmc->max_seg_size;
1061
1062 INIT_WORK(&host->carddet_work, via_sdc_card_detect);
1063
1064 tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
1065 (unsigned long)host);
1066
1067 addrbase = host->sdhc_mmiobase;
1068 writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
1069 msleep(1);
1070
1071 lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
1072 writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
1073
1074 status = readw(addrbase + VIA_CRDR_SDSTATUS);
1075 status &= VIA_CRDR_SDSTS_W1C_MASK;
1076 writew(status, addrbase + VIA_CRDR_SDSTATUS);
1077
1078 status = readw(addrbase + VIA_CRDR_SDSTATUS2);
1079 status |= VIA_CRDR_SDSTS_CFE;
1080 writew(status, addrbase + VIA_CRDR_SDSTATUS2);
1081
1082 writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL);
1083
1084 writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK);
1085 msleep(1);
1086}
1087
1088static int __devinit via_sd_probe(struct pci_dev *pcidev,
1089 const struct pci_device_id *id)
1090{
1091 struct mmc_host *mmc;
1092 struct via_crdr_mmc_host *sdhost;
1093 u32 base, len;
1094 u8 rev, gatt;
1095 int ret;
1096
1097 pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
1098 pr_info(DRV_NAME
1099 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
1100 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1101 (int)rev);
1102
1103 ret = pci_enable_device(pcidev);
1104 if (ret)
1105 return ret;
1106
1107 ret = pci_request_regions(pcidev, DRV_NAME);
1108 if (ret)
1109 goto disable;
1110
1111 pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
1112 pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
1113
1114 mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
1115 if (!mmc) {
1116 ret = -ENOMEM;
1117 goto release;
1118 }
1119
1120 sdhost = mmc_priv(mmc);
1121 sdhost->mmc = mmc;
1122 dev_set_drvdata(&pcidev->dev, sdhost);
1123
1124 len = pci_resource_len(pcidev, 0);
1125 base = pci_resource_start(pcidev, 0);
1126 sdhost->mmiobase = ioremap_nocache(base, len);
1127 if (!sdhost->mmiobase) {
1128 ret = -ENOMEM;
1129 goto free_mmc_host;
1130 }
1131
1132 sdhost->sdhc_mmiobase =
1133 sdhost->mmiobase + VIA_CRDR_SDC_OFF;
1134 sdhost->ddma_mmiobase =
1135 sdhost->mmiobase + VIA_CRDR_DDMA_OFF;
1136 sdhost->pcictrl_mmiobase =
1137 sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF;
1138
1139 sdhost->power = MMC_VDD_165_195;
1140
1141 gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
1142 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1143 via_pwron_sleep(sdhost);
1144 gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
1145 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1146 msleep(3);
1147
1148 via_init_mmc_host(sdhost);
1149
1150 ret =
1151 request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME,
1152 sdhost);
1153 if (ret)
1154 goto unmap;
1155
1156 writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN,
1157 sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
1158 writeb(VIA_CRDR_PCITMOCTRL_1024MS,
1159 sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL);
1160
1161 /* device-specific quirks */
1162 if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
1163 pcidev->subsystem_device == 0x3891)
1164 sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
1165
1166 mmc_add_host(mmc);
1167
1168 return 0;
1169
1170unmap:
1171 iounmap(sdhost->mmiobase);
1172free_mmc_host:
1173 dev_set_drvdata(&pcidev->dev, NULL);
1174 mmc_free_host(mmc);
1175release:
1176 pci_release_regions(pcidev);
1177disable:
1178 pci_disable_device(pcidev);
1179
1180 return ret;
1181}
1182
1183static void __devexit via_sd_remove(struct pci_dev *pcidev)
1184{
1185 struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
1186 unsigned long flags;
1187 u8 gatt;
1188
1189 spin_lock_irqsave(&sdhost->lock, flags);
1190
1191 /* Ensure we don't accept more commands from mmc layer */
1192 sdhost->reject = 1;
1193
1194 /* Disable generating further interrupts */
1195 writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
1196 mmiowb();
1197
1198 if (sdhost->mrq) {
1199 printk(KERN_ERR "%s: Controller removed during "
1200 "transfer\n", mmc_hostname(sdhost->mmc));
1201
1202 /* make sure all DMA is stopped */
1203 writel(VIA_CRDR_DMACTRL_SFTRST,
1204 sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
1205 mmiowb();
1206 sdhost->mrq->cmd->error = -ENOMEDIUM;
1207 if (sdhost->mrq->stop)
1208 sdhost->mrq->stop->error = -ENOMEDIUM;
1209 tasklet_schedule(&sdhost->finish_tasklet);
1210 }
1211 spin_unlock_irqrestore(&sdhost->lock, flags);
1212
1213 mmc_remove_host(sdhost->mmc);
1214
1215 free_irq(pcidev->irq, sdhost);
1216
1217 del_timer_sync(&sdhost->timer);
1218
1219 tasklet_kill(&sdhost->finish_tasklet);
1220
1221 /* switch off power */
1222 gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1223 gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
1224 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1225
1226 iounmap(sdhost->mmiobase);
1227 dev_set_drvdata(&pcidev->dev, NULL);
1228 mmc_free_host(sdhost->mmc);
1229 pci_release_regions(pcidev);
1230 pci_disable_device(pcidev);
1231
1232 pr_info(DRV_NAME
1233 ": VIA SDMMC controller at %s [%04x:%04x] has been removed\n",
1234 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1235}
1236
1237#ifdef CONFIG_PM
1238
1239static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
1240{
1241 struct sdhcreg *pm_sdhcreg;
1242 void __iomem *addrbase;
1243 u32 lenreg;
1244 u16 status;
1245
1246 pm_sdhcreg = &(host->pm_sdhc_reg);
1247 addrbase = host->sdhc_mmiobase;
1248
1249 writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
1250
1251 lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
1252 writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
1253
1254 status = readw(addrbase + VIA_CRDR_SDSTATUS);
1255 status &= VIA_CRDR_SDSTS_W1C_MASK;
1256 writew(status, addrbase + VIA_CRDR_SDSTATUS);
1257
1258 status = readw(addrbase + VIA_CRDR_SDSTATUS2);
1259 status |= VIA_CRDR_SDSTS_CFE;
1260 writew(status, addrbase + VIA_CRDR_SDSTATUS2);
1261
1262 writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
1263 writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
1264 writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
1265 writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
1266 writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
1267 writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
1268
1269 via_print_pcictrl(host);
1270 via_print_sdchc(host);
1271}
1272
1273static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1274{
1275 struct via_crdr_mmc_host *host;
1276 int ret = 0;
1277
1278 host = pci_get_drvdata(pcidev);
1279
1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host);
1282
1283 ret = mmc_suspend_host(host->mmc, state);
1284
1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1287 pci_disable_device(pcidev);
1288 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1289
1290 return ret;
1291}
1292
1293static int via_sd_resume(struct pci_dev *pcidev)
1294{
1295 struct via_crdr_mmc_host *sdhost;
1296 int ret = 0;
1297 u8 gatt;
1298
1299 sdhost = pci_get_drvdata(pcidev);
1300
1301 gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
1302 if (sdhost->power == MMC_VDD_165_195)
1303 gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
1304 else
1305 gatt |= VIA_CRDR_PCICLKGATT_3V3;
1306 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1307 via_pwron_sleep(sdhost);
1308 gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
1309 writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
1310 msleep(3);
1311
1312 msleep(100);
1313
1314 pci_set_power_state(pcidev, PCI_D0);
1315 pci_restore_state(pcidev);
1316 ret = pci_enable_device(pcidev);
1317 if (ret)
1318 return ret;
1319
1320 via_restore_pcictrlreg(sdhost);
1321 via_init_sdc_pm(sdhost);
1322
1323 ret = mmc_resume_host(sdhost->mmc);
1324
1325 return ret;
1326}
1327
1328#else /* CONFIG_PM */
1329
1330#define via_sd_suspend NULL
1331#define via_sd_resume NULL
1332
1333#endif /* CONFIG_PM */
1334
1335static struct pci_driver via_sd_driver = {
1336 .name = DRV_NAME,
1337 .id_table = via_ids,
1338 .probe = via_sd_probe,
1339 .remove = __devexit_p(via_sd_remove),
1340 .suspend = via_sd_suspend,
1341 .resume = via_sd_resume,
1342};
1343
1344static int __init via_sd_drv_init(void)
1345{
1346 pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
1347 "(C) 2008 VIA Technologies, Inc.\n");
1348
1349 return pci_register_driver(&via_sd_driver);
1350}
1351
1352static void __exit via_sd_drv_exit(void)
1353{
1354 pci_unregister_driver(&via_sd_driver);
1355}
1356
1357module_init(via_sd_drv_init);
1358module_exit(via_sd_drv_exit);
1359
1360MODULE_LICENSE("GPL");
1361MODULE_AUTHOR("VIA Technologies Inc.");
1362MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver");
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cf260d848eb9..d006e93d5c93 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
810 810
811#ifdef CONFIG_MMU 811#ifdef CONFIG_MMU
812extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 812extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
813 unsigned long address, int write_access); 813 unsigned long address, unsigned int flags);
814#else 814#else
815static inline int handle_mm_fault(struct mm_struct *mm, 815static inline int handle_mm_fault(struct mm_struct *mm,
816 struct vm_area_struct *vma, unsigned long address, 816 struct vm_area_struct *vma, unsigned long address,
817 int write_access) 817 unsigned int flags)
818{ 818{
819 /* should never happen if there's no MMU */ 819 /* should never happen if there's no MMU */
820 BUG(); 820 BUG();
diff --git a/ipc/util.h b/ipc/util.h
index ab3ebf2621b9..764b51a37a6a 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -10,6 +10,7 @@
10#ifndef _IPC_UTIL_H 10#ifndef _IPC_UTIL_H
11#define _IPC_UTIL_H 11#define _IPC_UTIL_H
12 12
13#include <linux/unistd.h>
13#include <linux/err.h> 14#include <linux/err.h>
14 15
15#define SEQ_MULTIPLIER (IPCMNI) 16#define SEQ_MULTIPLIER (IPCMNI)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b0c2d8a2129..23067ab1a73c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -472,7 +472,7 @@ config LOCKDEP
472 bool 472 bool
473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
474 select STACKTRACE 474 select STACKTRACE
475 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 475 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
476 select KALLSYMS 476 select KALLSYMS
477 select KALLSYMS_ALL 477 select KALLSYMS_ALL
478 478
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ad65fc0317d9..3b93129a968c 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
262 */ 262 */
263 matches += 1; 263 matches += 1;
264 match_lvl = 0; 264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl; 265 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : match_lvl; 266 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl; 267 entry->direction == ref->direction ? ++match_lvl : 0;
268 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
268 269
269 if (match_lvl == 3) { 270 if (match_lvl == 4) {
270 /* perfect-fit - return the result */ 271 /* perfect-fit - return the result */
271 return entry; 272 return entry;
272 } else if (match_lvl > last_lvl) { 273 } else if (match_lvl > last_lvl) {
@@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
873 "[addr=%p] [size=%llu]\n", addr, size); 874 "[addr=%p] [size=%llu]\n", addr, size);
874} 875}
875 876
876static void check_sync(struct device *dev, dma_addr_t addr, 877static void check_sync(struct device *dev,
877 u64 size, u64 offset, int direction, bool to_cpu) 878 struct dma_debug_entry *ref,
879 bool to_cpu)
878{ 880{
879 struct dma_debug_entry ref = {
880 .dev = dev,
881 .dev_addr = addr,
882 .size = size,
883 .direction = direction,
884 };
885 struct dma_debug_entry *entry; 881 struct dma_debug_entry *entry;
886 struct hash_bucket *bucket; 882 struct hash_bucket *bucket;
887 unsigned long flags; 883 unsigned long flags;
888 884
889 bucket = get_hash_bucket(&ref, &flags); 885 bucket = get_hash_bucket(ref, &flags);
890 886
891 entry = hash_bucket_find(bucket, &ref); 887 entry = hash_bucket_find(bucket, ref);
892 888
893 if (!entry) { 889 if (!entry) {
894 err_printk(dev, NULL, "DMA-API: device driver tries " 890 err_printk(dev, NULL, "DMA-API: device driver tries "
895 "to sync DMA memory it has not allocated " 891 "to sync DMA memory it has not allocated "
896 "[device address=0x%016llx] [size=%llu bytes]\n", 892 "[device address=0x%016llx] [size=%llu bytes]\n",
897 (unsigned long long)addr, size); 893 (unsigned long long)ref->dev_addr, ref->size);
898 goto out; 894 goto out;
899 } 895 }
900 896
901 if ((offset + size) > entry->size) { 897 if (ref->size > entry->size) {
902 err_printk(dev, entry, "DMA-API: device driver syncs" 898 err_printk(dev, entry, "DMA-API: device driver syncs"
903 " DMA memory outside allocated range " 899 " DMA memory outside allocated range "
904 "[device address=0x%016llx] " 900 "[device address=0x%016llx] "
905 "[allocation size=%llu bytes] [sync offset=%llu] " 901 "[allocation size=%llu bytes] "
906 "[sync size=%llu]\n", entry->dev_addr, entry->size, 902 "[sync offset+size=%llu]\n",
907 offset, size); 903 entry->dev_addr, entry->size,
904 ref->size);
908 } 905 }
909 906
910 if (direction != entry->direction) { 907 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs " 908 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction " 909 "DMA memory with different direction "
913 "[device address=0x%016llx] [size=%llu bytes] " 910 "[device address=0x%016llx] [size=%llu bytes] "
914 "[mapped with %s] [synced with %s]\n", 911 "[mapped with %s] [synced with %s]\n",
915 (unsigned long long)addr, entry->size, 912 (unsigned long long)ref->dev_addr, entry->size,
916 dir2name[entry->direction], 913 dir2name[entry->direction],
917 dir2name[direction]); 914 dir2name[ref->direction]);
918 } 915 }
919 916
920 if (entry->direction == DMA_BIDIRECTIONAL) 917 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out; 918 goto out;
922 919
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 920 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(direction == DMA_TO_DEVICE)) 921 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs " 922 err_printk(dev, entry, "DMA-API: device driver syncs "
926 "device read-only DMA memory for cpu " 923 "device read-only DMA memory for cpu "
927 "[device address=0x%016llx] [size=%llu bytes] " 924 "[device address=0x%016llx] [size=%llu bytes] "
928 "[mapped with %s] [synced with %s]\n", 925 "[mapped with %s] [synced with %s]\n",
929 (unsigned long long)addr, entry->size, 926 (unsigned long long)ref->dev_addr, entry->size,
930 dir2name[entry->direction], 927 dir2name[entry->direction],
931 dir2name[direction]); 928 dir2name[ref->direction]);
932 929
933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 930 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
934 !(direction == DMA_FROM_DEVICE)) 931 !(ref->direction == DMA_FROM_DEVICE))
935 err_printk(dev, entry, "DMA-API: device driver syncs " 932 err_printk(dev, entry, "DMA-API: device driver syncs "
936 "device write-only DMA memory to device " 933 "device write-only DMA memory to device "
937 "[device address=0x%016llx] [size=%llu bytes] " 934 "[device address=0x%016llx] [size=%llu bytes] "
938 "[mapped with %s] [synced with %s]\n", 935 "[mapped with %s] [synced with %s]\n",
939 (unsigned long long)addr, entry->size, 936 (unsigned long long)ref->dev_addr, entry->size,
940 dir2name[entry->direction], 937 dir2name[entry->direction],
941 dir2name[direction]); 938 dir2name[ref->direction]);
942 939
943out: 940out:
944 put_hash_bucket(bucket, &flags); 941 put_hash_bucket(bucket, &flags);
@@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1036} 1033}
1037EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
1038 1035
1039static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) 1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1040{ 1038{
1041 struct dma_debug_entry *entry, ref; 1039 struct dma_debug_entry *entry;
1042 struct hash_bucket *bucket; 1040 struct hash_bucket *bucket;
1043 unsigned long flags; 1041 unsigned long flags;
1044 int mapped_ents; 1042 int mapped_ents;
1045 1043
1046 ref.dev = dev; 1044 bucket = get_hash_bucket(ref, &flags);
1047 ref.dev_addr = sg_dma_address(s); 1045 entry = hash_bucket_find(bucket, ref);
1048 ref.size = sg_dma_len(s),
1049
1050 bucket = get_hash_bucket(&ref, &flags);
1051 entry = hash_bucket_find(bucket, &ref);
1052 mapped_ents = 0; 1046 mapped_ents = 0;
1053 1047
1054 if (entry) 1048 if (entry)
@@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1076 .dev_addr = sg_dma_address(s), 1070 .dev_addr = sg_dma_address(s),
1077 .size = sg_dma_len(s), 1071 .size = sg_dma_len(s),
1078 .direction = dir, 1072 .direction = dir,
1079 .sg_call_ents = 0, 1073 .sg_call_ents = nelems,
1080 }; 1074 };
1081 1075
1082 if (mapped_ents && i >= mapped_ents) 1076 if (mapped_ents && i >= mapped_ents)
1083 break; 1077 break;
1084 1078
1085 if (!i) { 1079 if (!i)
1086 ref.sg_call_ents = nelems; 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
1087 mapped_ents = get_nr_mapped_entries(dev, s);
1088 }
1089 1081
1090 check_unmap(&ref); 1082 check_unmap(&ref);
1091 } 1083 }
@@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
1140void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1141 size_t size, int direction) 1133 size_t size, int direction)
1142{ 1134{
1135 struct dma_debug_entry ref;
1136
1143 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
1144 return; 1138 return;
1145 1139
1146 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
1147} 1148}
1148EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1149 1150
@@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
1151 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
1152 int direction) 1153 int direction)
1153{ 1154{
1155 struct dma_debug_entry ref;
1156
1154 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
1155 return; 1158 return;
1156 1159
1157 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
1158} 1168}
1159EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1160 1170
@@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1163 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
1164 int direction) 1174 int direction)
1165{ 1175{
1176 struct dma_debug_entry ref;
1177
1166 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
1167 return; 1179 return;
1168 1180
1169 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
1170} 1189}
1171EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1172 1191
@@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1175 unsigned long offset, 1194 unsigned long offset,
1176 size_t size, int direction) 1195 size_t size, int direction)
1177{ 1196{
1197 struct dma_debug_entry ref;
1198
1178 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
1179 return; 1200 return;
1180 1201
1181 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
1182} 1210}
1183EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1184 1212
@@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1192 return; 1220 return;
1193 1221
1194 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
1223
1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1195 if (!i) 1234 if (!i)
1196 mapped_ents = get_nr_mapped_entries(dev, s); 1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1197 1236
1198 if (i >= mapped_ents) 1237 if (i >= mapped_ents)
1199 break; 1238 break;
1200 1239
1201 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1240 check_sync(dev, &ref, true);
1202 direction, true);
1203 } 1241 }
1204} 1242}
1205EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1214 return; 1252 return;
1215 1253
1216 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
1255
1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1217 if (!i) 1265 if (!i)
1218 mapped_ents = get_nr_mapped_entries(dev, s); 1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1219 1267
1220 if (i >= mapped_ents) 1268 if (i >= mapped_ents)
1221 break; 1269 break;
1222 1270
1223 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1271 check_sync(dev, &ref, false);
1224 direction, false);
1225 } 1272 }
1226} 1273}
1227EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
diff --git a/mm/memory.c b/mm/memory.c
index d5d1653d60a6..98bcb90d5957 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1310 cond_resched(); 1310 cond_resched();
1311 while (!(page = follow_page(vma, start, foll_flags))) { 1311 while (!(page = follow_page(vma, start, foll_flags))) {
1312 int ret; 1312 int ret;
1313 ret = handle_mm_fault(mm, vma, start, 1313
1314 foll_flags & FOLL_WRITE); 1314 /* FOLL_WRITE matches FAULT_FLAG_WRITE! */
1315 ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
1315 if (ret & VM_FAULT_ERROR) { 1316 if (ret & VM_FAULT_ERROR) {
1316 if (ret & VM_FAULT_OOM) 1317 if (ret & VM_FAULT_OOM)
1317 return i ? i : -ENOMEM; 1318 return i ? i : -ENOMEM;
@@ -2496,7 +2497,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2496 */ 2497 */
2497static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 2498static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2498 unsigned long address, pte_t *page_table, pmd_t *pmd, 2499 unsigned long address, pte_t *page_table, pmd_t *pmd,
2499 int write_access, pte_t orig_pte) 2500 unsigned int flags, pte_t orig_pte)
2500{ 2501{
2501 spinlock_t *ptl; 2502 spinlock_t *ptl;
2502 struct page *page; 2503 struct page *page;
@@ -2572,9 +2573,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2572 2573
2573 inc_mm_counter(mm, anon_rss); 2574 inc_mm_counter(mm, anon_rss);
2574 pte = mk_pte(page, vma->vm_page_prot); 2575 pte = mk_pte(page, vma->vm_page_prot);
2575 if (write_access && reuse_swap_page(page)) { 2576 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2576 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2577 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2577 write_access = 0; 2578 flags &= ~FAULT_FLAG_WRITE;
2578 } 2579 }
2579 flush_icache_page(vma, page); 2580 flush_icache_page(vma, page);
2580 set_pte_at(mm, address, page_table, pte); 2581 set_pte_at(mm, address, page_table, pte);
@@ -2587,7 +2588,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2587 try_to_free_swap(page); 2588 try_to_free_swap(page);
2588 unlock_page(page); 2589 unlock_page(page);
2589 2590
2590 if (write_access) { 2591 if (flags & FAULT_FLAG_WRITE) {
2591 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2592 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2592 if (ret & VM_FAULT_ERROR) 2593 if (ret & VM_FAULT_ERROR)
2593 ret &= VM_FAULT_ERROR; 2594 ret &= VM_FAULT_ERROR;
@@ -2616,7 +2617,7 @@ out_page:
2616 */ 2617 */
2617static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 2618static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2618 unsigned long address, pte_t *page_table, pmd_t *pmd, 2619 unsigned long address, pte_t *page_table, pmd_t *pmd,
2619 int write_access) 2620 unsigned int flags)
2620{ 2621{
2621 struct page *page; 2622 struct page *page;
2622 spinlock_t *ptl; 2623 spinlock_t *ptl;
@@ -2776,7 +2777,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2776 * due to the bad i386 page protection. But it's valid 2777 * due to the bad i386 page protection. But it's valid
2777 * for other architectures too. 2778 * for other architectures too.
2778 * 2779 *
2779 * Note that if write_access is true, we either now have 2780 * Note that if FAULT_FLAG_WRITE is set, we either now have
2780 * an exclusive copy of the page, or this is a shared mapping, 2781 * an exclusive copy of the page, or this is a shared mapping,
2781 * so we can make it writable and dirty to avoid having to 2782 * so we can make it writable and dirty to avoid having to
2782 * handle that later. 2783 * handle that later.
@@ -2847,11 +2848,10 @@ unwritable_page:
2847 2848
2848static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2849static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2849 unsigned long address, pte_t *page_table, pmd_t *pmd, 2850 unsigned long address, pte_t *page_table, pmd_t *pmd,
2850 int write_access, pte_t orig_pte) 2851 unsigned int flags, pte_t orig_pte)
2851{ 2852{
2852 pgoff_t pgoff = (((address & PAGE_MASK) 2853 pgoff_t pgoff = (((address & PAGE_MASK)
2853 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2854 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2854 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2855 2855
2856 pte_unmap(page_table); 2856 pte_unmap(page_table);
2857 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2857 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
@@ -2868,12 +2868,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2868 */ 2868 */
2869static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2869static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2870 unsigned long address, pte_t *page_table, pmd_t *pmd, 2870 unsigned long address, pte_t *page_table, pmd_t *pmd,
2871 int write_access, pte_t orig_pte) 2871 unsigned int flags, pte_t orig_pte)
2872{ 2872{
2873 unsigned int flags = FAULT_FLAG_NONLINEAR |
2874 (write_access ? FAULT_FLAG_WRITE : 0);
2875 pgoff_t pgoff; 2873 pgoff_t pgoff;
2876 2874
2875 flags |= FAULT_FLAG_NONLINEAR;
2876
2877 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2877 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2878 return 0; 2878 return 0;
2879 2879
@@ -2904,7 +2904,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2904 */ 2904 */
2905static inline int handle_pte_fault(struct mm_struct *mm, 2905static inline int handle_pte_fault(struct mm_struct *mm,
2906 struct vm_area_struct *vma, unsigned long address, 2906 struct vm_area_struct *vma, unsigned long address,
2907 pte_t *pte, pmd_t *pmd, int write_access) 2907 pte_t *pte, pmd_t *pmd, unsigned int flags)
2908{ 2908{
2909 pte_t entry; 2909 pte_t entry;
2910 spinlock_t *ptl; 2910 spinlock_t *ptl;
@@ -2915,30 +2915,30 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2915 if (vma->vm_ops) { 2915 if (vma->vm_ops) {
2916 if (likely(vma->vm_ops->fault)) 2916 if (likely(vma->vm_ops->fault))
2917 return do_linear_fault(mm, vma, address, 2917 return do_linear_fault(mm, vma, address,
2918 pte, pmd, write_access, entry); 2918 pte, pmd, flags, entry);
2919 } 2919 }
2920 return do_anonymous_page(mm, vma, address, 2920 return do_anonymous_page(mm, vma, address,
2921 pte, pmd, write_access); 2921 pte, pmd, flags);
2922 } 2922 }
2923 if (pte_file(entry)) 2923 if (pte_file(entry))
2924 return do_nonlinear_fault(mm, vma, address, 2924 return do_nonlinear_fault(mm, vma, address,
2925 pte, pmd, write_access, entry); 2925 pte, pmd, flags, entry);
2926 return do_swap_page(mm, vma, address, 2926 return do_swap_page(mm, vma, address,
2927 pte, pmd, write_access, entry); 2927 pte, pmd, flags, entry);
2928 } 2928 }
2929 2929
2930 ptl = pte_lockptr(mm, pmd); 2930 ptl = pte_lockptr(mm, pmd);
2931 spin_lock(ptl); 2931 spin_lock(ptl);
2932 if (unlikely(!pte_same(*pte, entry))) 2932 if (unlikely(!pte_same(*pte, entry)))
2933 goto unlock; 2933 goto unlock;
2934 if (write_access) { 2934 if (flags & FAULT_FLAG_WRITE) {
2935 if (!pte_write(entry)) 2935 if (!pte_write(entry))
2936 return do_wp_page(mm, vma, address, 2936 return do_wp_page(mm, vma, address,
2937 pte, pmd, ptl, entry); 2937 pte, pmd, ptl, entry);
2938 entry = pte_mkdirty(entry); 2938 entry = pte_mkdirty(entry);
2939 } 2939 }
2940 entry = pte_mkyoung(entry); 2940 entry = pte_mkyoung(entry);
2941 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2941 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
2942 update_mmu_cache(vma, address, entry); 2942 update_mmu_cache(vma, address, entry);
2943 } else { 2943 } else {
2944 /* 2944 /*
@@ -2947,7 +2947,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2947 * This still avoids useless tlb flushes for .text page faults 2947 * This still avoids useless tlb flushes for .text page faults
2948 * with threads. 2948 * with threads.
2949 */ 2949 */
2950 if (write_access) 2950 if (flags & FAULT_FLAG_WRITE)
2951 flush_tlb_page(vma, address); 2951 flush_tlb_page(vma, address);
2952 } 2952 }
2953unlock: 2953unlock:
@@ -2959,7 +2959,7 @@ unlock:
2959 * By the time we get here, we already hold the mm semaphore 2959 * By the time we get here, we already hold the mm semaphore
2960 */ 2960 */
2961int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2961int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2962 unsigned long address, int write_access) 2962 unsigned long address, unsigned int flags)
2963{ 2963{
2964 pgd_t *pgd; 2964 pgd_t *pgd;
2965 pud_t *pud; 2965 pud_t *pud;
@@ -2971,7 +2971,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2971 count_vm_event(PGFAULT); 2971 count_vm_event(PGFAULT);
2972 2972
2973 if (unlikely(is_vm_hugetlb_page(vma))) 2973 if (unlikely(is_vm_hugetlb_page(vma)))
2974 return hugetlb_fault(mm, vma, address, write_access); 2974 return hugetlb_fault(mm, vma, address, flags);
2975 2975
2976 pgd = pgd_offset(mm, address); 2976 pgd = pgd_offset(mm, address);
2977 pud = pud_alloc(mm, pgd, address); 2977 pud = pud_alloc(mm, pgd, address);
@@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2984 if (!pte) 2984 if (!pte)
2985 return VM_FAULT_OOM; 2985 return VM_FAULT_OOM;
2986 2986
2987 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2987 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
2988} 2988}
2989 2989
2990#ifndef __PAGETABLE_PUD_FOLDED 2990#ifndef __PAGETABLE_PUD_FOLDED
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e81..b70f2acd8853 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
549 * @chunk: chunk of interest 549 * @chunk: chunk of interest
550 * @page_start: page index of the first page to unmap 550 * @page_start: page index of the first page to unmap
551 * @page_end: page index of the last page to unmap + 1 551 * @page_end: page index of the last page to unmap + 1
552 * @flush: whether to flush cache and tlb or not 552 * @flush_tlb: whether to flush tlb or not
553 * 553 *
554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
555 * If @flush is true, vcache is flushed before unmapping and tlb 555 * If @flush is true, vcache is flushed before unmapping and tlb
556 * after. 556 * after.
557 */ 557 */
558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
559 bool flush) 559 bool flush_tlb)
560{ 560{
561 unsigned int last = num_possible_cpus() - 1; 561 unsigned int last = num_possible_cpus() - 1;
562 unsigned int cpu; 562 unsigned int cpu;
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
569 * the whole region at once rather than doing it for each cpu. 569 * the whole region at once rather than doing it for each cpu.
570 * This could be an overkill but is more scalable. 570 * This could be an overkill but is more scalable.
571 */ 571 */
572 if (flush) 572 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
573 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 573 pcpu_chunk_addr(chunk, last, page_end));
574 pcpu_chunk_addr(chunk, last, page_end));
575 574
576 for_each_possible_cpu(cpu) 575 for_each_possible_cpu(cpu)
577 unmap_kernel_range_noflush( 576 unmap_kernel_range_noflush(
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
579 (page_end - page_start) << PAGE_SHIFT); 578 (page_end - page_start) << PAGE_SHIFT);
580 579
581 /* ditto as flush_cache_vunmap() */ 580 /* ditto as flush_cache_vunmap() */
582 if (flush) 581 if (flush_tlb)
583 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 582 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
584 pcpu_chunk_addr(chunk, last, page_end)); 583 pcpu_chunk_addr(chunk, last, page_end));
585} 584}
@@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1234ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1233ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1235 ssize_t dyn_size, ssize_t unit_size) 1234 ssize_t dyn_size, ssize_t unit_size)
1236{ 1235{
1236 size_t chunk_size;
1237 unsigned int cpu; 1237 unsigned int cpu;
1238 1238
1239 /* determine parameters and allocate */ 1239 /* determine parameters and allocate */
@@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1248 } else 1248 } else
1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); 1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1250 1250
1251 pcpue_ptr = __alloc_bootmem_nopanic( 1251 chunk_size = pcpue_unit_size * num_possible_cpus();
1252 num_possible_cpus() * pcpue_unit_size, 1252
1253 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1253 pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
1254 if (!pcpue_ptr) 1254 __pa(MAX_DMA_ADDRESS));
1255 if (!pcpue_ptr) {
1256 pr_warning("PERCPU: failed to allocate %zu bytes for "
1257 "embedding\n", chunk_size);
1255 return -ENOMEM; 1258 return -ENOMEM;
1259 }
1256 1260
1257 /* return the leftover and copy */ 1261 /* return the leftover and copy */
1258 for_each_possible_cpu(cpu) { 1262 for_each_possible_cpu(cpu) {
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 562403a23488..462e2cedaa6a 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -972,8 +972,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr
972 snd_hda_codec_read(codec, nid, 0, 972 snd_hda_codec_read(codec, nid, 0,
973 AC_VERB_GET_SUBSYSTEM_ID, 0); 973 AC_VERB_GET_SUBSYSTEM_ID, 0);
974 } 974 }
975 if (bus->modelname)
976 codec->modelname = kstrdup(bus->modelname, GFP_KERNEL);
977 975
978 /* power-up all before initialization */ 976 /* power-up all before initialization */
979 hda_set_power_state(codec, 977 hda_set_power_state(codec,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d22b26068014..bf4b78a74a8f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -224,6 +224,7 @@ enum {
224 ALC883_ACER, 224 ALC883_ACER,
225 ALC883_ACER_ASPIRE, 225 ALC883_ACER_ASPIRE,
226 ALC888_ACER_ASPIRE_4930G, 226 ALC888_ACER_ASPIRE_4930G,
227 ALC888_ACER_ASPIRE_6530G,
227 ALC888_ACER_ASPIRE_8930G, 228 ALC888_ACER_ASPIRE_8930G,
228 ALC883_MEDION, 229 ALC883_MEDION,
229 ALC883_MEDION_MD2, 230 ALC883_MEDION_MD2,
@@ -970,7 +971,7 @@ static void alc_automute_pin(struct hda_codec *codec)
970 } 971 }
971} 972}
972 973
973#if 0 /* it's broken in some acses -- temporarily disabled */ 974#if 0 /* it's broken in some cases -- temporarily disabled */
974static void alc_mic_automute(struct hda_codec *codec) 975static void alc_mic_automute(struct hda_codec *codec)
975{ 976{
976 struct alc_spec *spec = codec->spec; 977 struct alc_spec *spec = codec->spec;
@@ -1170,7 +1171,7 @@ static int alc_subsystem_id(struct hda_codec *codec,
1170 1171
1171 /* invalid SSID, check the special NID pin defcfg instead */ 1172 /* invalid SSID, check the special NID pin defcfg instead */
1172 /* 1173 /*
1173 * 31~30 : port conetcivity 1174 * 31~30 : port connectivity
1174 * 29~21 : reserve 1175 * 29~21 : reserve
1175 * 20 : PCBEEP input 1176 * 20 : PCBEEP input
1176 * 19~16 : Check sum (15:1) 1177 * 19~16 : Check sum (15:1)
@@ -1471,6 +1472,25 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = {
1471}; 1472};
1472 1473
1473/* 1474/*
1475 * ALC888 Acer Aspire 6530G model
1476 */
1477
1478static struct hda_verb alc888_acer_aspire_6530g_verbs[] = {
1479/* Bias voltage on for external mic port */
1480 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80},
1481/* Enable unsolicited event for HP jack */
1482 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
1483/* Enable speaker output */
1484 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1485 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1486/* Enable headphone output */
1487 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP},
1488 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1489 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
1490 { }
1491};
1492
1493/*
1474 * ALC889 Acer Aspire 8930G model 1494 * ALC889 Acer Aspire 8930G model
1475 */ 1495 */
1476 1496
@@ -1544,6 +1564,25 @@ static struct hda_input_mux alc888_2_capture_sources[2] = {
1544 } 1564 }
1545}; 1565};
1546 1566
1567static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = {
1568 /* Interal mic only available on one ADC */
1569 {
1570 .num_items = 3,
1571 .items = {
1572 { "Ext Mic", 0x0 },
1573 { "CD", 0x4 },
1574 { "Int Mic", 0xb },
1575 },
1576 },
1577 {
1578 .num_items = 2,
1579 .items = {
1580 { "Ext Mic", 0x0 },
1581 { "CD", 0x4 },
1582 },
1583 }
1584};
1585
1547static struct hda_input_mux alc889_capture_sources[3] = { 1586static struct hda_input_mux alc889_capture_sources[3] = {
1548 /* Digital mic only available on first "ADC" */ 1587 /* Digital mic only available on first "ADC" */
1549 { 1588 {
@@ -6347,7 +6386,7 @@ static struct hda_channel_mode alc882_sixstack_modes[2] = {
6347}; 6386};
6348 6387
6349/* 6388/*
6350 * macbook pro ALC885 can switch LineIn to LineOut without loosing Mic 6389 * macbook pro ALC885 can switch LineIn to LineOut without losing Mic
6351 */ 6390 */
6352 6391
6353/* 6392/*
@@ -7047,7 +7086,7 @@ static struct hda_verb alc882_auto_init_verbs[] = {
7047#define alc882_loopbacks alc880_loopbacks 7086#define alc882_loopbacks alc880_loopbacks
7048#endif 7087#endif
7049 7088
7050/* pcm configuration: identiacal with ALC880 */ 7089/* pcm configuration: identical with ALC880 */
7051#define alc882_pcm_analog_playback alc880_pcm_analog_playback 7090#define alc882_pcm_analog_playback alc880_pcm_analog_playback
7052#define alc882_pcm_analog_capture alc880_pcm_analog_capture 7091#define alc882_pcm_analog_capture alc880_pcm_analog_capture
7053#define alc882_pcm_digital_playback alc880_pcm_digital_playback 7092#define alc882_pcm_digital_playback alc880_pcm_digital_playback
@@ -8068,7 +8107,7 @@ static struct snd_kcontrol_new alc883_fivestack_mixer[] = {
8068 { } /* end */ 8107 { } /* end */
8069}; 8108};
8070 8109
8071static struct snd_kcontrol_new alc883_tagra_mixer[] = { 8110static struct snd_kcontrol_new alc883_targa_mixer[] = {
8072 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 8111 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8073 HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), 8112 HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
8074 HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), 8113 HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
@@ -8088,7 +8127,7 @@ static struct snd_kcontrol_new alc883_tagra_mixer[] = {
8088 { } /* end */ 8127 { } /* end */
8089}; 8128};
8090 8129
8091static struct snd_kcontrol_new alc883_tagra_2ch_mixer[] = { 8130static struct snd_kcontrol_new alc883_targa_2ch_mixer[] = {
8092 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 8131 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8093 HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), 8132 HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
8094 HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), 8133 HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
@@ -8153,6 +8192,19 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = {
8153 { } /* end */ 8192 { } /* end */
8154}; 8193};
8155 8194
8195static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = {
8196 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8197 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
8198 HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
8199 HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT),
8200 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
8201 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
8202 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
8203 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
8204 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
8205 { } /* end */
8206};
8207
8156static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = { 8208static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = {
8157 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 8209 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8158 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), 8210 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -8417,7 +8469,7 @@ static struct hda_verb alc883_2ch_fujitsu_pi2515_verbs[] = {
8417 { } /* end */ 8469 { } /* end */
8418}; 8470};
8419 8471
8420static struct hda_verb alc883_tagra_verbs[] = { 8472static struct hda_verb alc883_targa_verbs[] = {
8421 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8473 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8422 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 8474 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8423 8475
@@ -8626,8 +8678,8 @@ static void alc883_medion_md2_init_hook(struct hda_codec *codec)
8626} 8678}
8627 8679
8628/* toggle speaker-output according to the hp-jack state */ 8680/* toggle speaker-output according to the hp-jack state */
8629#define alc883_tagra_init_hook alc882_targa_init_hook 8681#define alc883_targa_init_hook alc882_targa_init_hook
8630#define alc883_tagra_unsol_event alc882_targa_unsol_event 8682#define alc883_targa_unsol_event alc882_targa_unsol_event
8631 8683
8632static void alc883_clevo_m720_mic_automute(struct hda_codec *codec) 8684static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
8633{ 8685{
@@ -8957,7 +9009,7 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
8957#define alc883_loopbacks alc880_loopbacks 9009#define alc883_loopbacks alc880_loopbacks
8958#endif 9010#endif
8959 9011
8960/* pcm configuration: identiacal with ALC880 */ 9012/* pcm configuration: identical with ALC880 */
8961#define alc883_pcm_analog_playback alc880_pcm_analog_playback 9013#define alc883_pcm_analog_playback alc880_pcm_analog_playback
8962#define alc883_pcm_analog_capture alc880_pcm_analog_capture 9014#define alc883_pcm_analog_capture alc880_pcm_analog_capture
8963#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture 9015#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
@@ -8978,6 +9030,7 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
8978 [ALC883_ACER] = "acer", 9030 [ALC883_ACER] = "acer",
8979 [ALC883_ACER_ASPIRE] = "acer-aspire", 9031 [ALC883_ACER_ASPIRE] = "acer-aspire",
8980 [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g", 9032 [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g",
9033 [ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g",
8981 [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g", 9034 [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g",
8982 [ALC883_MEDION] = "medion", 9035 [ALC883_MEDION] = "medion",
8983 [ALC883_MEDION_MD2] = "medion-md2", 9036 [ALC883_MEDION_MD2] = "medion-md2",
@@ -9021,7 +9074,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
9021 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", 9074 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
9022 ALC888_ACER_ASPIRE_4930G), 9075 ALC888_ACER_ASPIRE_4930G),
9023 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", 9076 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
9024 ALC888_ACER_ASPIRE_4930G), 9077 ALC888_ACER_ASPIRE_6530G),
9025 /* default Acer -- disabled as it causes more problems. 9078 /* default Acer -- disabled as it causes more problems.
9026 * model=auto should work fine now 9079 * model=auto should work fine now
9027 */ 9080 */
@@ -9069,6 +9122,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
9069 SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG), 9122 SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG),
9070 SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG), 9123 SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG),
9071 SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), 9124 SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
9125 SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
9072 SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), 9126 SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
9073 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), 9127 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
9074 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), 9128 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
@@ -9165,8 +9219,8 @@ static struct alc_config_preset alc883_presets[] = {
9165 .input_mux = &alc883_capture_source, 9219 .input_mux = &alc883_capture_source,
9166 }, 9220 },
9167 [ALC883_TARGA_DIG] = { 9221 [ALC883_TARGA_DIG] = {
9168 .mixers = { alc883_tagra_mixer, alc883_chmode_mixer }, 9222 .mixers = { alc883_targa_mixer, alc883_chmode_mixer },
9169 .init_verbs = { alc883_init_verbs, alc883_tagra_verbs}, 9223 .init_verbs = { alc883_init_verbs, alc883_targa_verbs},
9170 .num_dacs = ARRAY_SIZE(alc883_dac_nids), 9224 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9171 .dac_nids = alc883_dac_nids, 9225 .dac_nids = alc883_dac_nids,
9172 .dig_out_nid = ALC883_DIGOUT_NID, 9226 .dig_out_nid = ALC883_DIGOUT_NID,
@@ -9174,12 +9228,12 @@ static struct alc_config_preset alc883_presets[] = {
9174 .channel_mode = alc883_3ST_6ch_modes, 9228 .channel_mode = alc883_3ST_6ch_modes,
9175 .need_dac_fix = 1, 9229 .need_dac_fix = 1,
9176 .input_mux = &alc883_capture_source, 9230 .input_mux = &alc883_capture_source,
9177 .unsol_event = alc883_tagra_unsol_event, 9231 .unsol_event = alc883_targa_unsol_event,
9178 .init_hook = alc883_tagra_init_hook, 9232 .init_hook = alc883_targa_init_hook,
9179 }, 9233 },
9180 [ALC883_TARGA_2ch_DIG] = { 9234 [ALC883_TARGA_2ch_DIG] = {
9181 .mixers = { alc883_tagra_2ch_mixer}, 9235 .mixers = { alc883_targa_2ch_mixer},
9182 .init_verbs = { alc883_init_verbs, alc883_tagra_verbs}, 9236 .init_verbs = { alc883_init_verbs, alc883_targa_verbs},
9183 .num_dacs = ARRAY_SIZE(alc883_dac_nids), 9237 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9184 .dac_nids = alc883_dac_nids, 9238 .dac_nids = alc883_dac_nids,
9185 .adc_nids = alc883_adc_nids_alt, 9239 .adc_nids = alc883_adc_nids_alt,
@@ -9188,13 +9242,13 @@ static struct alc_config_preset alc883_presets[] = {
9188 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), 9242 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9189 .channel_mode = alc883_3ST_2ch_modes, 9243 .channel_mode = alc883_3ST_2ch_modes,
9190 .input_mux = &alc883_capture_source, 9244 .input_mux = &alc883_capture_source,
9191 .unsol_event = alc883_tagra_unsol_event, 9245 .unsol_event = alc883_targa_unsol_event,
9192 .init_hook = alc883_tagra_init_hook, 9246 .init_hook = alc883_targa_init_hook,
9193 }, 9247 },
9194 [ALC883_TARGA_8ch_DIG] = { 9248 [ALC883_TARGA_8ch_DIG] = {
9195 .mixers = { alc883_base_mixer, alc883_chmode_mixer }, 9249 .mixers = { alc883_base_mixer, alc883_chmode_mixer },
9196 .init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs, 9250 .init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs,
9197 alc883_tagra_verbs }, 9251 alc883_targa_verbs },
9198 .num_dacs = ARRAY_SIZE(alc883_dac_nids), 9252 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9199 .dac_nids = alc883_dac_nids, 9253 .dac_nids = alc883_dac_nids,
9200 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev), 9254 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
@@ -9206,8 +9260,8 @@ static struct alc_config_preset alc883_presets[] = {
9206 .channel_mode = alc883_4ST_8ch_modes, 9260 .channel_mode = alc883_4ST_8ch_modes,
9207 .need_dac_fix = 1, 9261 .need_dac_fix = 1,
9208 .input_mux = &alc883_capture_source, 9262 .input_mux = &alc883_capture_source,
9209 .unsol_event = alc883_tagra_unsol_event, 9263 .unsol_event = alc883_targa_unsol_event,
9210 .init_hook = alc883_tagra_init_hook, 9264 .init_hook = alc883_targa_init_hook,
9211 }, 9265 },
9212 [ALC883_ACER] = { 9266 [ALC883_ACER] = {
9213 .mixers = { alc883_base_mixer }, 9267 .mixers = { alc883_base_mixer },
@@ -9255,6 +9309,24 @@ static struct alc_config_preset alc883_presets[] = {
9255 .unsol_event = alc_automute_amp_unsol_event, 9309 .unsol_event = alc_automute_amp_unsol_event,
9256 .init_hook = alc888_acer_aspire_4930g_init_hook, 9310 .init_hook = alc888_acer_aspire_4930g_init_hook,
9257 }, 9311 },
9312 [ALC888_ACER_ASPIRE_6530G] = {
9313 .mixers = { alc888_acer_aspire_6530_mixer },
9314 .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
9315 alc888_acer_aspire_6530g_verbs },
9316 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9317 .dac_nids = alc883_dac_nids,
9318 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
9319 .adc_nids = alc883_adc_nids_rev,
9320 .capsrc_nids = alc883_capsrc_nids_rev,
9321 .dig_out_nid = ALC883_DIGOUT_NID,
9322 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9323 .channel_mode = alc883_3ST_2ch_modes,
9324 .num_mux_defs =
9325 ARRAY_SIZE(alc888_2_capture_sources),
9326 .input_mux = alc888_acer_aspire_6530_sources,
9327 .unsol_event = alc_automute_amp_unsol_event,
9328 .init_hook = alc888_acer_aspire_4930g_init_hook,
9329 },
9258 [ALC888_ACER_ASPIRE_8930G] = { 9330 [ALC888_ACER_ASPIRE_8930G] = {
9259 .mixers = { alc888_base_mixer, 9331 .mixers = { alc888_base_mixer,
9260 alc883_chmode_mixer }, 9332 alc883_chmode_mixer },
@@ -9361,7 +9433,7 @@ static struct alc_config_preset alc883_presets[] = {
9361 .init_hook = alc888_lenovo_ms7195_front_automute, 9433 .init_hook = alc888_lenovo_ms7195_front_automute,
9362 }, 9434 },
9363 [ALC883_HAIER_W66] = { 9435 [ALC883_HAIER_W66] = {
9364 .mixers = { alc883_tagra_2ch_mixer}, 9436 .mixers = { alc883_targa_2ch_mixer},
9365 .init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs}, 9437 .init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs},
9366 .num_dacs = ARRAY_SIZE(alc883_dac_nids), 9438 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9367 .dac_nids = alc883_dac_nids, 9439 .dac_nids = alc883_dac_nids,
@@ -11131,7 +11203,7 @@ static struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
11131#define alc262_loopbacks alc880_loopbacks 11203#define alc262_loopbacks alc880_loopbacks
11132#endif 11204#endif
11133 11205
11134/* pcm configuration: identiacal with ALC880 */ 11206/* pcm configuration: identical with ALC880 */
11135#define alc262_pcm_analog_playback alc880_pcm_analog_playback 11207#define alc262_pcm_analog_playback alc880_pcm_analog_playback
11136#define alc262_pcm_analog_capture alc880_pcm_analog_capture 11208#define alc262_pcm_analog_capture alc880_pcm_analog_capture
11137#define alc262_pcm_digital_playback alc880_pcm_digital_playback 11209#define alc262_pcm_digital_playback alc880_pcm_digital_playback
@@ -12286,7 +12358,7 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
12286 AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2); 12358 AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2);
12287} 12359}
12288 12360
12289/* pcm configuration: identiacal with ALC880 */ 12361/* pcm configuration: identical with ALC880 */
12290#define alc268_pcm_analog_playback alc880_pcm_analog_playback 12362#define alc268_pcm_analog_playback alc880_pcm_analog_playback
12291#define alc268_pcm_analog_capture alc880_pcm_analog_capture 12363#define alc268_pcm_analog_capture alc880_pcm_analog_capture
12292#define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture 12364#define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
@@ -13197,7 +13269,7 @@ static int alc269_auto_create_analog_input_ctls(struct alc_spec *spec,
13197#define alc269_loopbacks alc880_loopbacks 13269#define alc269_loopbacks alc880_loopbacks
13198#endif 13270#endif
13199 13271
13200/* pcm configuration: identiacal with ALC880 */ 13272/* pcm configuration: identical with ALC880 */
13201#define alc269_pcm_analog_playback alc880_pcm_analog_playback 13273#define alc269_pcm_analog_playback alc880_pcm_analog_playback
13202#define alc269_pcm_analog_capture alc880_pcm_analog_capture 13274#define alc269_pcm_analog_capture alc880_pcm_analog_capture
13203#define alc269_pcm_digital_playback alc880_pcm_digital_playback 13275#define alc269_pcm_digital_playback alc880_pcm_digital_playback
@@ -14059,7 +14131,7 @@ static void alc861_toshiba_unsol_event(struct hda_codec *codec,
14059 alc861_toshiba_automute(codec); 14131 alc861_toshiba_automute(codec);
14060} 14132}
14061 14133
14062/* pcm configuration: identiacal with ALC880 */ 14134/* pcm configuration: identical with ALC880 */
14063#define alc861_pcm_analog_playback alc880_pcm_analog_playback 14135#define alc861_pcm_analog_playback alc880_pcm_analog_playback
14064#define alc861_pcm_analog_capture alc880_pcm_analog_capture 14136#define alc861_pcm_analog_capture alc880_pcm_analog_capture
14065#define alc861_pcm_digital_playback alc880_pcm_digital_playback 14137#define alc861_pcm_digital_playback alc880_pcm_digital_playback
@@ -14582,7 +14654,7 @@ static hda_nid_t alc861vd_dac_nids[4] = {
14582 14654
14583/* dac_nids for ALC660vd are in a different order - according to 14655/* dac_nids for ALC660vd are in a different order - according to
14584 * Realtek's driver. 14656 * Realtek's driver.
14585 * This should probably tesult in a different mixer for 6stack models 14657 * This should probably result in a different mixer for 6stack models
14586 * of ALC660vd codecs, but for now there is only 3stack mixer 14658 * of ALC660vd codecs, but for now there is only 3stack mixer
14587 * - and it is the same as in 861vd. 14659 * - and it is the same as in 861vd.
14588 * adc_nids in ALC660vd are (is) the same as in 861vd 14660 * adc_nids in ALC660vd are (is) the same as in 861vd
@@ -15027,7 +15099,7 @@ static void alc861vd_dallas_init_hook(struct hda_codec *codec)
15027#define alc861vd_loopbacks alc880_loopbacks 15099#define alc861vd_loopbacks alc880_loopbacks
15028#endif 15100#endif
15029 15101
15030/* pcm configuration: identiacal with ALC880 */ 15102/* pcm configuration: identical with ALC880 */
15031#define alc861vd_pcm_analog_playback alc880_pcm_analog_playback 15103#define alc861vd_pcm_analog_playback alc880_pcm_analog_playback
15032#define alc861vd_pcm_analog_capture alc880_pcm_analog_capture 15104#define alc861vd_pcm_analog_capture alc880_pcm_analog_capture
15033#define alc861vd_pcm_digital_playback alc880_pcm_digital_playback 15105#define alc861vd_pcm_digital_playback alc880_pcm_digital_playback
@@ -15206,7 +15278,7 @@ static void alc861vd_auto_init_hp_out(struct hda_codec *codec)
15206 hda_nid_t pin; 15278 hda_nid_t pin;
15207 15279
15208 pin = spec->autocfg.hp_pins[0]; 15280 pin = spec->autocfg.hp_pins[0];
15209 if (pin) /* connect to front and use dac 0 */ 15281 if (pin) /* connect to front and use dac 0 */
15210 alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0); 15282 alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
15211 pin = spec->autocfg.speaker_pins[0]; 15283 pin = spec->autocfg.speaker_pins[0];
15212 if (pin) 15284 if (pin)
@@ -16669,7 +16741,7 @@ static struct snd_kcontrol_new alc272_nc10_mixer[] = {
16669#endif 16741#endif
16670 16742
16671 16743
16672/* pcm configuration: identiacal with ALC880 */ 16744/* pcm configuration: identical with ALC880 */
16673#define alc662_pcm_analog_playback alc880_pcm_analog_playback 16745#define alc662_pcm_analog_playback alc880_pcm_analog_playback
16674#define alc662_pcm_analog_capture alc880_pcm_analog_capture 16746#define alc662_pcm_analog_capture alc880_pcm_analog_capture
16675#define alc662_pcm_digital_playback alc880_pcm_digital_playback 16747#define alc662_pcm_digital_playback alc880_pcm_digital_playback
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index fa336616152e..938a58a5a244 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -297,9 +297,9 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
297static bool filter(struct dma_chan *chan, void *param) 297static bool filter(struct dma_chan *chan, void *param)
298{ 298{
299 struct txx9aclc_dmadata *dmadata = param; 299 struct txx9aclc_dmadata *dmadata = param;
300 char devname[BUS_ID_SIZE + 2]; 300 char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */
301 301
302 sprintf(devname, "%s.%d", dmadata->dma_res->name, 302 snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name,
303 (int)dmadata->dma_res->start); 303 (int)dmadata->dma_res->start);
304 if (strcmp(dev_name(chan->device->dev), devname) == 0) { 304 if (strcmp(dev_name(chan->device->dev), devname) == 0) {
305 chan->private = &dmadata->dma_slave; 305 chan->private = &dmadata->dma_slave;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index b14451342166..8f9b60c5d74c 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -199,8 +199,9 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream)
199 dev->period_out_count[index] = BYTES_PER_SAMPLE + 1; 199 dev->period_out_count[index] = BYTES_PER_SAMPLE + 1;
200 dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1; 200 dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1;
201 } else { 201 } else {
202 dev->period_in_count[index] = BYTES_PER_SAMPLE; 202 int in_pos = (dev->spec.data_alignment == 2) ? 0 : 2;
203 dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE; 203 dev->period_in_count[index] = BYTES_PER_SAMPLE + in_pos;
204 dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE + in_pos;
204 } 205 }
205 206
206 if (dev->streaming) 207 if (dev->streaming)
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 22406245a98b..0e5db719de24 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -35,7 +35,7 @@
35#include "input.h" 35#include "input.h"
36 36
37MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 37MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
38MODULE_DESCRIPTION("caiaq USB audio, version 1.3.16"); 38MODULE_DESCRIPTION("caiaq USB audio, version 1.3.17");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," 40MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
41 "{Native Instruments, RigKontrol3}," 41 "{Native Instruments, RigKontrol3},"