diff options
38 files changed, 367 insertions, 184 deletions
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index de8e10a94103..0d8d23581c44 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt | |||
| @@ -139,6 +139,7 @@ ALC883/888 | |||
| 139 | acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc) | 139 | acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc) |
| 140 | acer-aspire Acer Aspire 9810 | 140 | acer-aspire Acer Aspire 9810 |
| 141 | acer-aspire-4930g Acer Aspire 4930G | 141 | acer-aspire-4930g Acer Aspire 4930G |
| 142 | acer-aspire-6530g Acer Aspire 6530G | ||
| 142 | acer-aspire-8930g Acer Aspire 8930G | 143 | acer-aspire-8930g Acer Aspire 8930G |
| 143 | medion Medion Laptops | 144 | medion Medion Laptops |
| 144 | medion-md2 Medion MD2 | 145 | medion-md2 Medion MD2 |
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4829f96585b1..00a31deaa96e 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
| @@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, | |||
| 146 | /* If for any reason at all we couldn't handle the fault, | 146 | /* If for any reason at all we couldn't handle the fault, |
| 147 | make sure we exit gracefully rather than endlessly redo | 147 | make sure we exit gracefully rather than endlessly redo |
| 148 | the fault. */ | 148 | the fault. */ |
| 149 | fault = handle_mm_fault(mm, vma, address, cause > 0); | 149 | fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); |
| 150 | up_read(&mm->mmap_sem); | 150 | up_read(&mm->mmap_sem); |
| 151 | if (unlikely(fault & VM_FAULT_ERROR)) { | 151 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 152 | if (fault & VM_FAULT_OOM) | 152 | if (fault & VM_FAULT_OOM) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0455557a2899..6fdcbb709827 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
| @@ -208,7 +208,7 @@ good_area: | |||
| 208 | * than endlessly redo the fault. | 208 | * than endlessly redo the fault. |
| 209 | */ | 209 | */ |
| 210 | survive: | 210 | survive: |
| 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); | 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); |
| 212 | if (unlikely(fault & VM_FAULT_ERROR)) { | 212 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 213 | if (fault & VM_FAULT_OOM) | 213 | if (fault & VM_FAULT_OOM) |
| 214 | goto out_of_memory; | 214 | goto out_of_memory; |
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 62d4abbaa654..b61d86d3debf 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c | |||
| @@ -133,7 +133,7 @@ good_area: | |||
| 133 | * fault. | 133 | * fault. |
| 134 | */ | 134 | */ |
| 135 | survive: | 135 | survive: |
| 136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
| 137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 138 | if (fault & VM_FAULT_OOM) | 138 | if (fault & VM_FAULT_OOM) |
| 139 | goto out_of_memory; | 139 | goto out_of_memory; |
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index c4c76db90f9c..f925115e3250 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c | |||
| @@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, | |||
| 163 | * the fault. | 163 | * the fault. |
| 164 | */ | 164 | */ |
| 165 | 165 | ||
| 166 | fault = handle_mm_fault(mm, vma, address, writeaccess & 1); | 166 | fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0); |
| 167 | if (unlikely(fault & VM_FAULT_ERROR)) { | 167 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 168 | if (fault & VM_FAULT_OOM) | 168 | if (fault & VM_FAULT_OOM) |
| 169 | goto out_of_memory; | 169 | goto out_of_memory; |
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 05093d41d98e..30f5d100a81c 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c | |||
| @@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear | |||
| 163 | * make sure we exit gracefully rather than endlessly redo | 163 | * make sure we exit gracefully rather than endlessly redo |
| 164 | * the fault. | 164 | * the fault. |
| 165 | */ | 165 | */ |
| 166 | fault = handle_mm_fault(mm, vma, ear0, write); | 166 | fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0); |
| 167 | if (unlikely(fault & VM_FAULT_ERROR)) { | 167 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 168 | if (fault & VM_FAULT_OOM) | 168 | if (fault & VM_FAULT_OOM) |
| 169 | goto out_of_memory; | 169 | goto out_of_memory; |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 23088bed111e..19261a99e623 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
| @@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
| 154 | * sure we exit gracefully rather than endlessly redo the | 154 | * sure we exit gracefully rather than endlessly redo the |
| 155 | * fault. | 155 | * fault. |
| 156 | */ | 156 | */ |
| 157 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); | 157 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
| 158 | if (unlikely(fault & VM_FAULT_ERROR)) { | 158 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 159 | /* | 159 | /* |
| 160 | * We ran out of memory, or some other thing happened | 160 | * We ran out of memory, or some other thing happened |
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 4a71df4c1b30..7274b47f4c22 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c | |||
| @@ -196,7 +196,7 @@ survive: | |||
| 196 | */ | 196 | */ |
| 197 | addr = (address & PAGE_MASK); | 197 | addr = (address & PAGE_MASK); |
| 198 | set_thread_fault_code(error_code); | 198 | set_thread_fault_code(error_code); |
| 199 | fault = handle_mm_fault(mm, vma, addr, write); | 199 | fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0); |
| 200 | if (unlikely(fault & VM_FAULT_ERROR)) { | 200 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 201 | if (fault & VM_FAULT_OOM) | 201 | if (fault & VM_FAULT_OOM) |
| 202 | goto out_of_memory; | 202 | goto out_of_memory; |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index f493f03231d5..d0e35cf99fc6 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
| @@ -155,7 +155,7 @@ good_area: | |||
| 155 | */ | 155 | */ |
| 156 | 156 | ||
| 157 | survive: | 157 | survive: |
| 158 | fault = handle_mm_fault(mm, vma, address, write); | 158 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
| 159 | #ifdef DEBUG | 159 | #ifdef DEBUG |
| 160 | printk("handle_mm_fault returns %d\n",fault); | 160 | printk("handle_mm_fault returns %d\n",fault); |
| 161 | #endif | 161 | #endif |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5e67cd1fab40..956607a63f4c 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
| @@ -232,7 +232,7 @@ good_area: | |||
| 232 | * the fault. | 232 | * the fault. |
| 233 | */ | 233 | */ |
| 234 | survive: | 234 | survive: |
| 235 | fault = handle_mm_fault(mm, vma, address, is_write); | 235 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
| 236 | if (unlikely(fault & VM_FAULT_ERROR)) { | 236 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 237 | if (fault & VM_FAULT_OOM) | 237 | if (fault & VM_FAULT_OOM) |
| 238 | goto out_of_memory; | 238 | goto out_of_memory; |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 55767ad9f00e..6751ce9ede9e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
| @@ -102,7 +102,7 @@ good_area: | |||
| 102 | * make sure we exit gracefully rather than endlessly redo | 102 | * make sure we exit gracefully rather than endlessly redo |
| 103 | * the fault. | 103 | * the fault. |
| 104 | */ | 104 | */ |
| 105 | fault = handle_mm_fault(mm, vma, address, write); | 105 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
| 106 | if (unlikely(fault & VM_FAULT_ERROR)) { | 106 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 107 | if (fault & VM_FAULT_OOM) | 107 | if (fault & VM_FAULT_OOM) |
| 108 | goto out_of_memory; | 108 | goto out_of_memory; |
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 33cf25025dac..a62e1e138bc1 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c | |||
| @@ -258,7 +258,7 @@ good_area: | |||
| 258 | * make sure we exit gracefully rather than endlessly redo | 258 | * make sure we exit gracefully rather than endlessly redo |
| 259 | * the fault. | 259 | * the fault. |
| 260 | */ | 260 | */ |
| 261 | fault = handle_mm_fault(mm, vma, address, write); | 261 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
| 262 | if (unlikely(fault & VM_FAULT_ERROR)) { | 262 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 263 | if (fault & VM_FAULT_OOM) | 263 | if (fault & VM_FAULT_OOM) |
| 264 | goto out_of_memory; | 264 | goto out_of_memory; |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 92c7fa4ecc3f..bfb6dd6ab380 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
| @@ -202,7 +202,7 @@ good_area: | |||
| 202 | * fault. | 202 | * fault. |
| 203 | */ | 203 | */ |
| 204 | 204 | ||
| 205 | fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0); | 205 | fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
| 206 | if (unlikely(fault & VM_FAULT_ERROR)) { | 206 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 207 | /* | 207 | /* |
| 208 | * We hit a shared mapping outside of the file, or some | 208 | * We hit a shared mapping outside of the file, or some |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5beffc8f481e..830bef0a1131 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
| @@ -302,7 +302,7 @@ good_area: | |||
| 302 | * the fault. | 302 | * the fault. |
| 303 | */ | 303 | */ |
| 304 | survive: | 304 | survive: |
| 305 | ret = handle_mm_fault(mm, vma, address, is_write); | 305 | ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
| 306 | if (unlikely(ret & VM_FAULT_ERROR)) { | 306 | if (unlikely(ret & VM_FAULT_ERROR)) { |
| 307 | if (ret & VM_FAULT_OOM) | 307 | if (ret & VM_FAULT_OOM) |
| 308 | goto out_of_memory; | 308 | goto out_of_memory; |
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c index 95d8dadf2d87..d06ba87f1a19 100644 --- a/arch/powerpc/platforms/cell/spu_fault.c +++ b/arch/powerpc/platforms/cell/spu_fault.c | |||
| @@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | |||
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | ret = 0; | 72 | ret = 0; |
| 73 | *flt = handle_mm_fault(mm, vma, ea, is_write); | 73 | *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); |
| 74 | if (unlikely(*flt & VM_FAULT_ERROR)) { | 74 | if (unlikely(*flt & VM_FAULT_ERROR)) { |
| 75 | if (*flt & VM_FAULT_OOM) { | 75 | if (*flt & VM_FAULT_OOM) { |
| 76 | ret = -ENOMEM; | 76 | ret = -ENOMEM; |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b0b84c35b0ad..cb5d59eab0ee 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
| @@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address, | |||
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | survive: | 68 | survive: |
| 69 | fault = handle_mm_fault(mm, vma, address, write_access); | 69 | fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0); |
| 70 | if (unlikely(fault & VM_FAULT_ERROR)) { | 70 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 71 | if (fault & VM_FAULT_OOM) | 71 | if (fault & VM_FAULT_OOM) |
| 72 | goto out_of_memory; | 72 | goto out_of_memory; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 220a152c836c..74eb26bf1970 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
| @@ -352,7 +352,7 @@ good_area: | |||
| 352 | * make sure we exit gracefully rather than endlessly redo | 352 | * make sure we exit gracefully rather than endlessly redo |
| 353 | * the fault. | 353 | * the fault. |
| 354 | */ | 354 | */ |
| 355 | fault = handle_mm_fault(mm, vma, address, write); | 355 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
| 356 | if (unlikely(fault & VM_FAULT_ERROR)) { | 356 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 357 | if (fault & VM_FAULT_OOM) { | 357 | if (fault & VM_FAULT_OOM) { |
| 358 | up_read(&mm->mmap_sem); | 358 | up_read(&mm->mmap_sem); |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 2c50f80fc332..cc8ddbdf3d7a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
| @@ -133,7 +133,7 @@ good_area: | |||
| 133 | * the fault. | 133 | * the fault. |
| 134 | */ | 134 | */ |
| 135 | survive: | 135 | survive: |
| 136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
| 137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 138 | if (fault & VM_FAULT_OOM) | 138 | if (fault & VM_FAULT_OOM) |
| 139 | goto out_of_memory; | 139 | goto out_of_memory; |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7876997ba19a..fcbb6e135cef 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
| @@ -187,7 +187,7 @@ good_area: | |||
| 187 | * the fault. | 187 | * the fault. |
| 188 | */ | 188 | */ |
| 189 | survive: | 189 | survive: |
| 190 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 190 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
| 191 | if (unlikely(fault & VM_FAULT_ERROR)) { | 191 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 192 | if (fault & VM_FAULT_OOM) | 192 | if (fault & VM_FAULT_OOM) |
| 193 | goto out_of_memory; | 193 | goto out_of_memory; |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 12e447fc8542..a5e30c642ee3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
| @@ -241,7 +241,7 @@ good_area: | |||
| 241 | * make sure we exit gracefully rather than endlessly redo | 241 | * make sure we exit gracefully rather than endlessly redo |
| 242 | * the fault. | 242 | * the fault. |
| 243 | */ | 243 | */ |
| 244 | fault = handle_mm_fault(mm, vma, address, write); | 244 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
| 245 | if (unlikely(fault & VM_FAULT_ERROR)) { | 245 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 246 | if (fault & VM_FAULT_OOM) | 246 | if (fault & VM_FAULT_OOM) |
| 247 | goto out_of_memory; | 247 | goto out_of_memory; |
| @@ -484,7 +484,7 @@ good_area: | |||
| 484 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) | 484 | if(!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 485 | goto bad_area; | 485 | goto bad_area; |
| 486 | } | 486 | } |
| 487 | switch (handle_mm_fault(mm, vma, address, write)) { | 487 | switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { |
| 488 | case VM_FAULT_SIGBUS: | 488 | case VM_FAULT_SIGBUS: |
| 489 | case VM_FAULT_OOM: | 489 | case VM_FAULT_OOM: |
| 490 | goto do_sigbus; | 490 | goto do_sigbus; |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4ab8993b0863..e5620b27c8bf 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
| @@ -398,7 +398,7 @@ good_area: | |||
| 398 | goto bad_area; | 398 | goto bad_area; |
| 399 | } | 399 | } |
| 400 | 400 | ||
| 401 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); | 401 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); |
| 402 | if (unlikely(fault & VM_FAULT_ERROR)) { | 402 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 403 | if (fault & VM_FAULT_OOM) | 403 | if (fault & VM_FAULT_OOM) |
| 404 | goto out_of_memory; | 404 | goto out_of_memory; |
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 7384d8accfe7..637c6505dc00 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c | |||
| @@ -65,7 +65,7 @@ good_area: | |||
| 65 | do { | 65 | do { |
| 66 | int fault; | 66 | int fault; |
| 67 | 67 | ||
| 68 | fault = handle_mm_fault(mm, vma, address, is_write); | 68 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
| 69 | if (unlikely(fault & VM_FAULT_ERROR)) { | 69 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 70 | if (fault & VM_FAULT_OOM) { | 70 | if (fault & VM_FAULT_OOM) { |
| 71 | goto out_of_memory; | 71 | goto out_of_memory; |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index caba99601703..eb0566e83319 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
| @@ -845,7 +845,7 @@ ENTRY(aesni_cbc_enc) | |||
| 845 | */ | 845 | */ |
| 846 | ENTRY(aesni_cbc_dec) | 846 | ENTRY(aesni_cbc_dec) |
| 847 | cmp $16, LEN | 847 | cmp $16, LEN |
| 848 | jb .Lcbc_dec_ret | 848 | jb .Lcbc_dec_just_ret |
| 849 | mov 480(KEYP), KLEN | 849 | mov 480(KEYP), KLEN |
| 850 | add $240, KEYP | 850 | add $240, KEYP |
| 851 | movups (IVP), IV | 851 | movups (IVP), IV |
| @@ -891,6 +891,7 @@ ENTRY(aesni_cbc_dec) | |||
| 891 | add $16, OUTP | 891 | add $16, OUTP |
| 892 | cmp $16, LEN | 892 | cmp $16, LEN |
| 893 | jge .Lcbc_dec_loop1 | 893 | jge .Lcbc_dec_loop1 |
| 894 | movups IV, (IVP) | ||
| 895 | .Lcbc_dec_ret: | 894 | .Lcbc_dec_ret: |
| 895 | movups IV, (IVP) | ||
| 896 | .Lcbc_dec_just_ret: | ||
| 896 | ret | 897 | ret |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 4e663398f77f..c580c5ec1cad 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -198,6 +198,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, | |||
| 198 | 198 | ||
| 199 | blkcipher_walk_init(&walk, dst, src, nbytes); | 199 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 200 | err = blkcipher_walk_virt(desc, &walk); | 200 | err = blkcipher_walk_virt(desc, &walk); |
| 201 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 201 | 202 | ||
| 202 | kernel_fpu_begin(); | 203 | kernel_fpu_begin(); |
| 203 | while ((nbytes = walk.nbytes)) { | 204 | while ((nbytes = walk.nbytes)) { |
| @@ -221,6 +222,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, | |||
| 221 | 222 | ||
| 222 | blkcipher_walk_init(&walk, dst, src, nbytes); | 223 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 223 | err = blkcipher_walk_virt(desc, &walk); | 224 | err = blkcipher_walk_virt(desc, &walk); |
| 225 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 224 | 226 | ||
| 225 | kernel_fpu_begin(); | 227 | kernel_fpu_begin(); |
| 226 | while ((nbytes = walk.nbytes)) { | 228 | while ((nbytes = walk.nbytes)) { |
| @@ -266,6 +268,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, | |||
| 266 | 268 | ||
| 267 | blkcipher_walk_init(&walk, dst, src, nbytes); | 269 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 268 | err = blkcipher_walk_virt(desc, &walk); | 270 | err = blkcipher_walk_virt(desc, &walk); |
| 271 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 269 | 272 | ||
| 270 | kernel_fpu_begin(); | 273 | kernel_fpu_begin(); |
| 271 | while ((nbytes = walk.nbytes)) { | 274 | while ((nbytes = walk.nbytes)) { |
| @@ -289,6 +292,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, | |||
| 289 | 292 | ||
| 290 | blkcipher_walk_init(&walk, dst, src, nbytes); | 293 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 291 | err = blkcipher_walk_virt(desc, &walk); | 294 | err = blkcipher_walk_virt(desc, &walk); |
| 295 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 292 | 296 | ||
| 293 | kernel_fpu_begin(); | 297 | kernel_fpu_begin(); |
| 294 | while ((nbytes = walk.nbytes)) { | 298 | while ((nbytes = walk.nbytes)) { |
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index 5f9781a3815f..daef6cd2b45d 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c | |||
| @@ -48,7 +48,7 @@ static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, | |||
| 48 | struct blkcipher_desc desc = { | 48 | struct blkcipher_desc desc = { |
| 49 | .tfm = child, | 49 | .tfm = child, |
| 50 | .info = desc_in->info, | 50 | .info = desc_in->info, |
| 51 | .flags = desc_in->flags, | 51 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | kernel_fpu_begin(); | 54 | kernel_fpu_begin(); |
| @@ -67,7 +67,7 @@ static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, | |||
| 67 | struct blkcipher_desc desc = { | 67 | struct blkcipher_desc desc = { |
| 68 | .tfm = child, | 68 | .tfm = child, |
| 69 | .info = desc_in->info, | 69 | .info = desc_in->info, |
| 70 | .flags = desc_in->flags, | 70 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | kernel_fpu_begin(); | 73 | kernel_fpu_begin(); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c403526d5d15..78a5fff857be 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -1113,7 +1113,7 @@ good_area: | |||
| 1113 | * make sure we exit gracefully rather than endlessly redo | 1113 | * make sure we exit gracefully rather than endlessly redo |
| 1114 | * the fault: | 1114 | * the fault: |
| 1115 | */ | 1115 | */ |
| 1116 | fault = handle_mm_fault(mm, vma, address, write); | 1116 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
| 1117 | 1117 | ||
| 1118 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1118 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 1119 | mm_fault_error(regs, error_code, address, fault); | 1119 | mm_fault_error(regs, error_code, address, fault); |
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index bdd860d93f72..bc0733359a88 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
| @@ -106,7 +106,7 @@ good_area: | |||
| 106 | * the fault. | 106 | * the fault. |
| 107 | */ | 107 | */ |
| 108 | survive: | 108 | survive: |
| 109 | fault = handle_mm_fault(mm, vma, address, is_write); | 109 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); |
| 110 | if (unlikely(fault & VM_FAULT_ERROR)) { | 110 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 111 | if (fault & VM_FAULT_OOM) | 111 | if (fault & VM_FAULT_OOM) |
| 112 | goto out_of_memory; | 112 | goto out_of_memory; |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 87f92c39b5f0..a9952b1236b0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
| @@ -18,9 +18,22 @@ | |||
| 18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
| 19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
| 20 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> |
| 21 | #include <asm/processor.h> | ||
| 21 | #include <asm/i387.h> | 22 | #include <asm/i387.h> |
| 22 | #include "padlock.h" | 23 | #include "padlock.h" |
| 23 | 24 | ||
| 25 | /* | ||
| 26 | * Number of data blocks actually fetched for each xcrypt insn. | ||
| 27 | * Processors with prefetch errata will fetch extra blocks. | ||
| 28 | */ | ||
| 29 | static unsigned int ecb_fetch_blocks = 2; | ||
| 30 | #define MAX_ECB_FETCH_BLOCKS (8) | ||
| 31 | #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) | ||
| 32 | |||
| 33 | static unsigned int cbc_fetch_blocks = 1; | ||
| 34 | #define MAX_CBC_FETCH_BLOCKS (4) | ||
| 35 | #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) | ||
| 36 | |||
| 24 | /* Control word. */ | 37 | /* Control word. */ |
| 25 | struct cword { | 38 | struct cword { |
| 26 | unsigned int __attribute__ ((__packed__)) | 39 | unsigned int __attribute__ ((__packed__)) |
| @@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword) | |||
| 172 | * should be used only inside the irq_ts_save/restore() context | 185 | * should be used only inside the irq_ts_save/restore() context |
| 173 | */ | 186 | */ |
| 174 | 187 | ||
| 175 | static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, | 188 | static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
| 176 | struct cword *control_word) | 189 | struct cword *control_word, int count) |
| 177 | { | 190 | { |
| 178 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | 191 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
| 179 | : "+S"(input), "+D"(output) | 192 | : "+S"(input), "+D"(output) |
| 180 | : "d"(control_word), "b"(key), "c"(1)); | 193 | : "d"(control_word), "b"(key), "c"(count)); |
| 194 | } | ||
| 195 | |||
| 196 | static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, | ||
| 197 | u8 *iv, struct cword *control_word, int count) | ||
| 198 | { | ||
| 199 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | ||
| 200 | : "+S" (input), "+D" (output), "+a" (iv) | ||
| 201 | : "d" (control_word), "b" (key), "c" (count)); | ||
| 202 | return iv; | ||
| 181 | } | 203 | } |
| 182 | 204 | ||
| 183 | static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) | 205 | static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, |
| 206 | struct cword *cword, int count) | ||
| 184 | { | 207 | { |
| 185 | u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; | 208 | /* |
| 209 | * Padlock prefetches extra data so we must provide mapped input buffers. | ||
| 210 | * Assume there are at least 16 bytes of stack already in use. | ||
| 211 | */ | ||
| 212 | u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; | ||
| 213 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | ||
| 214 | |||
| 215 | memcpy(tmp, in, count * AES_BLOCK_SIZE); | ||
| 216 | rep_xcrypt_ecb(tmp, out, key, cword, count); | ||
| 217 | } | ||
| 218 | |||
| 219 | static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, | ||
| 220 | u8 *iv, struct cword *cword, int count) | ||
| 221 | { | ||
| 222 | /* | ||
| 223 | * Padlock prefetches extra data so we must provide mapped input buffers. | ||
| 224 | * Assume there are at least 16 bytes of stack already in use. | ||
| 225 | */ | ||
| 226 | u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; | ||
| 186 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 227 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
| 187 | 228 | ||
| 188 | memcpy(tmp, in, AES_BLOCK_SIZE); | 229 | memcpy(tmp, in, count * AES_BLOCK_SIZE); |
| 189 | padlock_xcrypt(tmp, out, key, cword); | 230 | return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); |
| 190 | } | 231 | } |
| 191 | 232 | ||
| 192 | static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, | 233 | static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, |
| 193 | struct cword *cword) | 234 | struct cword *cword, int count) |
| 194 | { | 235 | { |
| 195 | /* padlock_xcrypt requires at least two blocks of data. */ | 236 | /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. |
| 196 | if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & | 237 | * We could avoid some copying here but it's probably not worth it. |
| 197 | (PAGE_SIZE - 1)))) { | 238 | */ |
| 198 | aes_crypt_copy(in, out, key, cword); | 239 | if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { |
| 240 | ecb_crypt_copy(in, out, key, cword, count); | ||
| 199 | return; | 241 | return; |
| 200 | } | 242 | } |
| 201 | 243 | ||
| 202 | padlock_xcrypt(in, out, key, cword); | 244 | rep_xcrypt_ecb(in, out, key, cword, count); |
| 245 | } | ||
| 246 | |||
| 247 | static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, | ||
| 248 | u8 *iv, struct cword *cword, int count) | ||
| 249 | { | ||
| 250 | /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ | ||
| 251 | if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE)) | ||
| 252 | return cbc_crypt_copy(in, out, key, iv, cword, count); | ||
| 253 | |||
| 254 | return rep_xcrypt_cbc(in, out, key, iv, cword, count); | ||
| 203 | } | 255 | } |
| 204 | 256 | ||
| 205 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, | 257 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
| 206 | void *control_word, u32 count) | 258 | void *control_word, u32 count) |
| 207 | { | 259 | { |
| 208 | if (count == 1) { | 260 | u32 initial = count & (ecb_fetch_blocks - 1); |
| 209 | aes_crypt(input, output, key, control_word); | 261 | |
| 262 | if (count < ecb_fetch_blocks) { | ||
| 263 | ecb_crypt(input, output, key, control_word, count); | ||
| 210 | return; | 264 | return; |
| 211 | } | 265 | } |
| 212 | 266 | ||
| 213 | asm volatile ("test $1, %%cl;" | 267 | if (initial) |
| 214 | "je 1f;" | 268 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
| 215 | #ifndef CONFIG_X86_64 | 269 | : "+S"(input), "+D"(output) |
| 216 | "lea -1(%%ecx), %%eax;" | 270 | : "d"(control_word), "b"(key), "c"(initial)); |
| 217 | "mov $1, %%ecx;" | 271 | |
| 218 | #else | 272 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
| 219 | "lea -1(%%rcx), %%rax;" | ||
| 220 | "mov $1, %%rcx;" | ||
| 221 | #endif | ||
| 222 | ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ | ||
| 223 | #ifndef CONFIG_X86_64 | ||
| 224 | "mov %%eax, %%ecx;" | ||
| 225 | #else | ||
| 226 | "mov %%rax, %%rcx;" | ||
| 227 | #endif | ||
| 228 | "1:" | ||
| 229 | ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | ||
| 230 | : "+S"(input), "+D"(output) | 273 | : "+S"(input), "+D"(output) |
| 231 | : "d"(control_word), "b"(key), "c"(count) | 274 | : "d"(control_word), "b"(key), "c"(count - initial)); |
| 232 | : "ax"); | ||
| 233 | } | 275 | } |
| 234 | 276 | ||
| 235 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, | 277 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
| 236 | u8 *iv, void *control_word, u32 count) | 278 | u8 *iv, void *control_word, u32 count) |
| 237 | { | 279 | { |
| 238 | /* rep xcryptcbc */ | 280 | u32 initial = count & (cbc_fetch_blocks - 1); |
| 239 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" | 281 | |
| 282 | if (count < cbc_fetch_blocks) | ||
| 283 | return cbc_crypt(input, output, key, iv, control_word, count); | ||
| 284 | |||
| 285 | if (initial) | ||
| 286 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | ||
| 287 | : "+S" (input), "+D" (output), "+a" (iv) | ||
| 288 | : "d" (control_word), "b" (key), "c" (count)); | ||
| 289 | |||
| 290 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | ||
| 240 | : "+S" (input), "+D" (output), "+a" (iv) | 291 | : "+S" (input), "+D" (output), "+a" (iv) |
| 241 | : "d" (control_word), "b" (key), "c" (count)); | 292 | : "d" (control_word), "b" (key), "c" (count-initial)); |
| 242 | return iv; | 293 | return iv; |
| 243 | } | 294 | } |
| 244 | 295 | ||
| @@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
| 249 | 300 | ||
| 250 | padlock_reset_key(&ctx->cword.encrypt); | 301 | padlock_reset_key(&ctx->cword.encrypt); |
| 251 | ts_state = irq_ts_save(); | 302 | ts_state = irq_ts_save(); |
| 252 | aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); | 303 | ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); |
| 253 | irq_ts_restore(ts_state); | 304 | irq_ts_restore(ts_state); |
| 254 | padlock_store_cword(&ctx->cword.encrypt); | 305 | padlock_store_cword(&ctx->cword.encrypt); |
| 255 | } | 306 | } |
| @@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | |||
| 261 | 312 | ||
| 262 | padlock_reset_key(&ctx->cword.encrypt); | 313 | padlock_reset_key(&ctx->cword.encrypt); |
| 263 | ts_state = irq_ts_save(); | 314 | ts_state = irq_ts_save(); |
| 264 | aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); | 315 | ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); |
| 265 | irq_ts_restore(ts_state); | 316 | irq_ts_restore(ts_state); |
| 266 | padlock_store_cword(&ctx->cword.encrypt); | 317 | padlock_store_cword(&ctx->cword.encrypt); |
| 267 | } | 318 | } |
| @@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = { | |||
| 454 | static int __init padlock_init(void) | 505 | static int __init padlock_init(void) |
| 455 | { | 506 | { |
| 456 | int ret; | 507 | int ret; |
| 508 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
| 457 | 509 | ||
| 458 | if (!cpu_has_xcrypt) { | 510 | if (!cpu_has_xcrypt) { |
| 459 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); | 511 | printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); |
| @@ -476,6 +528,12 @@ static int __init padlock_init(void) | |||
| 476 | 528 | ||
| 477 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 529 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); |
| 478 | 530 | ||
| 531 | if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { | ||
| 532 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; | ||
| 533 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; | ||
| 534 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); | ||
| 535 | } | ||
| 536 | |||
| 479 | out: | 537 | out: |
| 480 | return ret; | 538 | return ret; |
| 481 | 539 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index cf260d848eb9..d006e93d5c93 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); | |||
| 810 | 810 | ||
| 811 | #ifdef CONFIG_MMU | 811 | #ifdef CONFIG_MMU |
| 812 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 812 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 813 | unsigned long address, int write_access); | 813 | unsigned long address, unsigned int flags); |
| 814 | #else | 814 | #else |
| 815 | static inline int handle_mm_fault(struct mm_struct *mm, | 815 | static inline int handle_mm_fault(struct mm_struct *mm, |
| 816 | struct vm_area_struct *vma, unsigned long address, | 816 | struct vm_area_struct *vma, unsigned long address, |
| 817 | int write_access) | 817 | unsigned int flags) |
| 818 | { | 818 | { |
| 819 | /* should never happen if there's no MMU */ | 819 | /* should never happen if there's no MMU */ |
| 820 | BUG(); | 820 | BUG(); |
diff --git a/ipc/util.h b/ipc/util.h index ab3ebf2621b9..764b51a37a6a 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #ifndef _IPC_UTIL_H | 10 | #ifndef _IPC_UTIL_H |
| 11 | #define _IPC_UTIL_H | 11 | #define _IPC_UTIL_H |
| 12 | 12 | ||
| 13 | #include <linux/unistd.h> | ||
| 13 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 14 | 15 | ||
| 15 | #define SEQ_MULTIPLIER (IPCMNI) | 16 | #define SEQ_MULTIPLIER (IPCMNI) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6b0c2d8a2129..23067ab1a73c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -472,7 +472,7 @@ config LOCKDEP | |||
| 472 | bool | 472 | bool |
| 473 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 473 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| 474 | select STACKTRACE | 474 | select STACKTRACE |
| 475 | select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 | 475 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 |
| 476 | select KALLSYMS | 476 | select KALLSYMS |
| 477 | select KALLSYMS_ALL | 477 | select KALLSYMS_ALL |
| 478 | 478 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ad65fc0317d9..3b93129a968c 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
| 262 | */ | 262 | */ |
| 263 | matches += 1; | 263 | matches += 1; |
| 264 | match_lvl = 0; | 264 | match_lvl = 0; |
| 265 | entry->size == ref->size ? ++match_lvl : match_lvl; | 265 | entry->size == ref->size ? ++match_lvl : 0; |
| 266 | entry->type == ref->type ? ++match_lvl : match_lvl; | 266 | entry->type == ref->type ? ++match_lvl : 0; |
| 267 | entry->direction == ref->direction ? ++match_lvl : match_lvl; | 267 | entry->direction == ref->direction ? ++match_lvl : 0; |
| 268 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; | ||
| 268 | 269 | ||
| 269 | if (match_lvl == 3) { | 270 | if (match_lvl == 4) { |
| 270 | /* perfect-fit - return the result */ | 271 | /* perfect-fit - return the result */ |
| 271 | return entry; | 272 | return entry; |
| 272 | } else if (match_lvl > last_lvl) { | 273 | } else if (match_lvl > last_lvl) { |
| @@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size) | |||
| 873 | "[addr=%p] [size=%llu]\n", addr, size); | 874 | "[addr=%p] [size=%llu]\n", addr, size); |
| 874 | } | 875 | } |
| 875 | 876 | ||
| 876 | static void check_sync(struct device *dev, dma_addr_t addr, | 877 | static void check_sync(struct device *dev, |
| 877 | u64 size, u64 offset, int direction, bool to_cpu) | 878 | struct dma_debug_entry *ref, |
| 879 | bool to_cpu) | ||
| 878 | { | 880 | { |
| 879 | struct dma_debug_entry ref = { | ||
| 880 | .dev = dev, | ||
| 881 | .dev_addr = addr, | ||
| 882 | .size = size, | ||
| 883 | .direction = direction, | ||
| 884 | }; | ||
| 885 | struct dma_debug_entry *entry; | 881 | struct dma_debug_entry *entry; |
| 886 | struct hash_bucket *bucket; | 882 | struct hash_bucket *bucket; |
| 887 | unsigned long flags; | 883 | unsigned long flags; |
| 888 | 884 | ||
| 889 | bucket = get_hash_bucket(&ref, &flags); | 885 | bucket = get_hash_bucket(ref, &flags); |
| 890 | 886 | ||
| 891 | entry = hash_bucket_find(bucket, &ref); | 887 | entry = hash_bucket_find(bucket, ref); |
| 892 | 888 | ||
| 893 | if (!entry) { | 889 | if (!entry) { |
| 894 | err_printk(dev, NULL, "DMA-API: device driver tries " | 890 | err_printk(dev, NULL, "DMA-API: device driver tries " |
| 895 | "to sync DMA memory it has not allocated " | 891 | "to sync DMA memory it has not allocated " |
| 896 | "[device address=0x%016llx] [size=%llu bytes]\n", | 892 | "[device address=0x%016llx] [size=%llu bytes]\n", |
| 897 | (unsigned long long)addr, size); | 893 | (unsigned long long)ref->dev_addr, ref->size); |
| 898 | goto out; | 894 | goto out; |
| 899 | } | 895 | } |
| 900 | 896 | ||
| 901 | if ((offset + size) > entry->size) { | 897 | if (ref->size > entry->size) { |
| 902 | err_printk(dev, entry, "DMA-API: device driver syncs" | 898 | err_printk(dev, entry, "DMA-API: device driver syncs" |
| 903 | " DMA memory outside allocated range " | 899 | " DMA memory outside allocated range " |
| 904 | "[device address=0x%016llx] " | 900 | "[device address=0x%016llx] " |
| 905 | "[allocation size=%llu bytes] [sync offset=%llu] " | 901 | "[allocation size=%llu bytes] " |
| 906 | "[sync size=%llu]\n", entry->dev_addr, entry->size, | 902 | "[sync offset+size=%llu]\n", |
| 907 | offset, size); | 903 | entry->dev_addr, entry->size, |
| 904 | ref->size); | ||
| 908 | } | 905 | } |
| 909 | 906 | ||
| 910 | if (direction != entry->direction) { | 907 | if (ref->direction != entry->direction) { |
| 911 | err_printk(dev, entry, "DMA-API: device driver syncs " | 908 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| 912 | "DMA memory with different direction " | 909 | "DMA memory with different direction " |
| 913 | "[device address=0x%016llx] [size=%llu bytes] " | 910 | "[device address=0x%016llx] [size=%llu bytes] " |
| 914 | "[mapped with %s] [synced with %s]\n", | 911 | "[mapped with %s] [synced with %s]\n", |
| 915 | (unsigned long long)addr, entry->size, | 912 | (unsigned long long)ref->dev_addr, entry->size, |
| 916 | dir2name[entry->direction], | 913 | dir2name[entry->direction], |
| 917 | dir2name[direction]); | 914 | dir2name[ref->direction]); |
| 918 | } | 915 | } |
| 919 | 916 | ||
| 920 | if (entry->direction == DMA_BIDIRECTIONAL) | 917 | if (entry->direction == DMA_BIDIRECTIONAL) |
| 921 | goto out; | 918 | goto out; |
| 922 | 919 | ||
| 923 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | 920 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
| 924 | !(direction == DMA_TO_DEVICE)) | 921 | !(ref->direction == DMA_TO_DEVICE)) |
| 925 | err_printk(dev, entry, "DMA-API: device driver syncs " | 922 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| 926 | "device read-only DMA memory for cpu " | 923 | "device read-only DMA memory for cpu " |
| 927 | "[device address=0x%016llx] [size=%llu bytes] " | 924 | "[device address=0x%016llx] [size=%llu bytes] " |
| 928 | "[mapped with %s] [synced with %s]\n", | 925 | "[mapped with %s] [synced with %s]\n", |
| 929 | (unsigned long long)addr, entry->size, | 926 | (unsigned long long)ref->dev_addr, entry->size, |
| 930 | dir2name[entry->direction], | 927 | dir2name[entry->direction], |
| 931 | dir2name[direction]); | 928 | dir2name[ref->direction]); |
| 932 | 929 | ||
| 933 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | 930 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
| 934 | !(direction == DMA_FROM_DEVICE)) | 931 | !(ref->direction == DMA_FROM_DEVICE)) |
| 935 | err_printk(dev, entry, "DMA-API: device driver syncs " | 932 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| 936 | "device write-only DMA memory to device " | 933 | "device write-only DMA memory to device " |
| 937 | "[device address=0x%016llx] [size=%llu bytes] " | 934 | "[device address=0x%016llx] [size=%llu bytes] " |
| 938 | "[mapped with %s] [synced with %s]\n", | 935 | "[mapped with %s] [synced with %s]\n", |
| 939 | (unsigned long long)addr, entry->size, | 936 | (unsigned long long)ref->dev_addr, entry->size, |
| 940 | dir2name[entry->direction], | 937 | dir2name[entry->direction], |
| 941 | dir2name[direction]); | 938 | dir2name[ref->direction]); |
| 942 | 939 | ||
| 943 | out: | 940 | out: |
| 944 | put_hash_bucket(bucket, &flags); | 941 | put_hash_bucket(bucket, &flags); |
| @@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 1036 | } | 1033 | } |
| 1037 | EXPORT_SYMBOL(debug_dma_map_sg); | 1034 | EXPORT_SYMBOL(debug_dma_map_sg); |
| 1038 | 1035 | ||
| 1039 | static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) | 1036 | static int get_nr_mapped_entries(struct device *dev, |
| 1037 | struct dma_debug_entry *ref) | ||
| 1040 | { | 1038 | { |
| 1041 | struct dma_debug_entry *entry, ref; | 1039 | struct dma_debug_entry *entry; |
| 1042 | struct hash_bucket *bucket; | 1040 | struct hash_bucket *bucket; |
| 1043 | unsigned long flags; | 1041 | unsigned long flags; |
| 1044 | int mapped_ents; | 1042 | int mapped_ents; |
| 1045 | 1043 | ||
| 1046 | ref.dev = dev; | 1044 | bucket = get_hash_bucket(ref, &flags); |
| 1047 | ref.dev_addr = sg_dma_address(s); | 1045 | entry = hash_bucket_find(bucket, ref); |
| 1048 | ref.size = sg_dma_len(s), | ||
| 1049 | |||
| 1050 | bucket = get_hash_bucket(&ref, &flags); | ||
| 1051 | entry = hash_bucket_find(bucket, &ref); | ||
| 1052 | mapped_ents = 0; | 1046 | mapped_ents = 0; |
| 1053 | 1047 | ||
| 1054 | if (entry) | 1048 | if (entry) |
| @@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1076 | .dev_addr = sg_dma_address(s), | 1070 | .dev_addr = sg_dma_address(s), |
| 1077 | .size = sg_dma_len(s), | 1071 | .size = sg_dma_len(s), |
| 1078 | .direction = dir, | 1072 | .direction = dir, |
| 1079 | .sg_call_ents = 0, | 1073 | .sg_call_ents = nelems, |
| 1080 | }; | 1074 | }; |
| 1081 | 1075 | ||
| 1082 | if (mapped_ents && i >= mapped_ents) | 1076 | if (mapped_ents && i >= mapped_ents) |
| 1083 | break; | 1077 | break; |
| 1084 | 1078 | ||
| 1085 | if (!i) { | 1079 | if (!i) |
| 1086 | ref.sg_call_ents = nelems; | 1080 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
| 1087 | mapped_ents = get_nr_mapped_entries(dev, s); | ||
| 1088 | } | ||
| 1089 | 1081 | ||
| 1090 | check_unmap(&ref); | 1082 | check_unmap(&ref); |
| 1091 | } | 1083 | } |
| @@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent); | |||
| 1140 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 1132 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
| 1141 | size_t size, int direction) | 1133 | size_t size, int direction) |
| 1142 | { | 1134 | { |
| 1135 | struct dma_debug_entry ref; | ||
| 1136 | |||
| 1143 | if (unlikely(global_disable)) | 1137 | if (unlikely(global_disable)) |
| 1144 | return; | 1138 | return; |
| 1145 | 1139 | ||
| 1146 | check_sync(dev, dma_handle, size, 0, direction, true); | 1140 | ref.type = dma_debug_single; |
| 1141 | ref.dev = dev; | ||
| 1142 | ref.dev_addr = dma_handle; | ||
| 1143 | ref.size = size; | ||
| 1144 | ref.direction = direction; | ||
| 1145 | ref.sg_call_ents = 0; | ||
| 1146 | |||
| 1147 | check_sync(dev, &ref, true); | ||
| 1147 | } | 1148 | } |
| 1148 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | 1149 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); |
| 1149 | 1150 | ||
| @@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev, | |||
| 1151 | dma_addr_t dma_handle, size_t size, | 1152 | dma_addr_t dma_handle, size_t size, |
| 1152 | int direction) | 1153 | int direction) |
| 1153 | { | 1154 | { |
| 1155 | struct dma_debug_entry ref; | ||
| 1156 | |||
| 1154 | if (unlikely(global_disable)) | 1157 | if (unlikely(global_disable)) |
| 1155 | return; | 1158 | return; |
| 1156 | 1159 | ||
| 1157 | check_sync(dev, dma_handle, size, 0, direction, false); | 1160 | ref.type = dma_debug_single; |
| 1161 | ref.dev = dev; | ||
| 1162 | ref.dev_addr = dma_handle; | ||
| 1163 | ref.size = size; | ||
| 1164 | ref.direction = direction; | ||
| 1165 | ref.sg_call_ents = 0; | ||
| 1166 | |||
| 1167 | check_sync(dev, &ref, false); | ||
| 1158 | } | 1168 | } |
| 1159 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | 1169 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); |
| 1160 | 1170 | ||
| @@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, | |||
| 1163 | unsigned long offset, size_t size, | 1173 | unsigned long offset, size_t size, |
| 1164 | int direction) | 1174 | int direction) |
| 1165 | { | 1175 | { |
| 1176 | struct dma_debug_entry ref; | ||
| 1177 | |||
| 1166 | if (unlikely(global_disable)) | 1178 | if (unlikely(global_disable)) |
| 1167 | return; | 1179 | return; |
| 1168 | 1180 | ||
| 1169 | check_sync(dev, dma_handle, size, offset, direction, true); | 1181 | ref.type = dma_debug_single; |
| 1182 | ref.dev = dev; | ||
| 1183 | ref.dev_addr = dma_handle; | ||
| 1184 | ref.size = offset + size; | ||
| 1185 | ref.direction = direction; | ||
| 1186 | ref.sg_call_ents = 0; | ||
| 1187 | |||
| 1188 | check_sync(dev, &ref, true); | ||
| 1170 | } | 1189 | } |
| 1171 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | 1190 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); |
| 1172 | 1191 | ||
| @@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev, | |||
| 1175 | unsigned long offset, | 1194 | unsigned long offset, |
| 1176 | size_t size, int direction) | 1195 | size_t size, int direction) |
| 1177 | { | 1196 | { |
| 1197 | struct dma_debug_entry ref; | ||
| 1198 | |||
| 1178 | if (unlikely(global_disable)) | 1199 | if (unlikely(global_disable)) |
| 1179 | return; | 1200 | return; |
| 1180 | 1201 | ||
| 1181 | check_sync(dev, dma_handle, size, offset, direction, false); | 1202 | ref.type = dma_debug_single; |
| 1203 | ref.dev = dev; | ||
| 1204 | ref.dev_addr = dma_handle; | ||
| 1205 | ref.size = offset + size; | ||
| 1206 | ref.direction = direction; | ||
| 1207 | ref.sg_call_ents = 0; | ||
| 1208 | |||
| 1209 | check_sync(dev, &ref, false); | ||
| 1182 | } | 1210 | } |
| 1183 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | 1211 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); |
| 1184 | 1212 | ||
| @@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
| 1192 | return; | 1220 | return; |
| 1193 | 1221 | ||
| 1194 | for_each_sg(sg, s, nelems, i) { | 1222 | for_each_sg(sg, s, nelems, i) { |
| 1223 | |||
| 1224 | struct dma_debug_entry ref = { | ||
| 1225 | .type = dma_debug_sg, | ||
| 1226 | .dev = dev, | ||
| 1227 | .paddr = sg_phys(s), | ||
| 1228 | .dev_addr = sg_dma_address(s), | ||
| 1229 | .size = sg_dma_len(s), | ||
| 1230 | .direction = direction, | ||
| 1231 | .sg_call_ents = nelems, | ||
| 1232 | }; | ||
| 1233 | |||
| 1195 | if (!i) | 1234 | if (!i) |
| 1196 | mapped_ents = get_nr_mapped_entries(dev, s); | 1235 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
| 1197 | 1236 | ||
| 1198 | if (i >= mapped_ents) | 1237 | if (i >= mapped_ents) |
| 1199 | break; | 1238 | break; |
| 1200 | 1239 | ||
| 1201 | check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, | 1240 | check_sync(dev, &ref, true); |
| 1202 | direction, true); | ||
| 1203 | } | 1241 | } |
| 1204 | } | 1242 | } |
| 1205 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | 1243 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); |
| @@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 1214 | return; | 1252 | return; |
| 1215 | 1253 | ||
| 1216 | for_each_sg(sg, s, nelems, i) { | 1254 | for_each_sg(sg, s, nelems, i) { |
| 1255 | |||
| 1256 | struct dma_debug_entry ref = { | ||
| 1257 | .type = dma_debug_sg, | ||
| 1258 | .dev = dev, | ||
| 1259 | .paddr = sg_phys(s), | ||
| 1260 | .dev_addr = sg_dma_address(s), | ||
| 1261 | .size = sg_dma_len(s), | ||
| 1262 | .direction = direction, | ||
| 1263 | .sg_call_ents = nelems, | ||
| 1264 | }; | ||
| 1217 | if (!i) | 1265 | if (!i) |
| 1218 | mapped_ents = get_nr_mapped_entries(dev, s); | 1266 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
| 1219 | 1267 | ||
| 1220 | if (i >= mapped_ents) | 1268 | if (i >= mapped_ents) |
| 1221 | break; | 1269 | break; |
| 1222 | 1270 | ||
| 1223 | check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, | 1271 | check_sync(dev, &ref, false); |
| 1224 | direction, false); | ||
| 1225 | } | 1272 | } |
| 1226 | } | 1273 | } |
| 1227 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | 1274 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |
diff --git a/mm/memory.c b/mm/memory.c index d5d1653d60a6..98bcb90d5957 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1310 | cond_resched(); | 1310 | cond_resched(); |
| 1311 | while (!(page = follow_page(vma, start, foll_flags))) { | 1311 | while (!(page = follow_page(vma, start, foll_flags))) { |
| 1312 | int ret; | 1312 | int ret; |
| 1313 | ret = handle_mm_fault(mm, vma, start, | 1313 | |
| 1314 | foll_flags & FOLL_WRITE); | 1314 | /* FOLL_WRITE matches FAULT_FLAG_WRITE! */ |
| 1315 | ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); | ||
| 1315 | if (ret & VM_FAULT_ERROR) { | 1316 | if (ret & VM_FAULT_ERROR) { |
| 1316 | if (ret & VM_FAULT_OOM) | 1317 | if (ret & VM_FAULT_OOM) |
| 1317 | return i ? i : -ENOMEM; | 1318 | return i ? i : -ENOMEM; |
| @@ -2496,7 +2497,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | |||
| 2496 | */ | 2497 | */ |
| 2497 | static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2498 | static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2498 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2499 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
| 2499 | int write_access, pte_t orig_pte) | 2500 | unsigned int flags, pte_t orig_pte) |
| 2500 | { | 2501 | { |
| 2501 | spinlock_t *ptl; | 2502 | spinlock_t *ptl; |
| 2502 | struct page *page; | 2503 | struct page *page; |
| @@ -2572,9 +2573,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2572 | 2573 | ||
| 2573 | inc_mm_counter(mm, anon_rss); | 2574 | inc_mm_counter(mm, anon_rss); |
| 2574 | pte = mk_pte(page, vma->vm_page_prot); | 2575 | pte = mk_pte(page, vma->vm_page_prot); |
| 2575 | if (write_access && reuse_swap_page(page)) { | 2576 | if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { |
| 2576 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); | 2577 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); |
| 2577 | write_access = 0; | 2578 | flags &= ~FAULT_FLAG_WRITE; |
| 2578 | } | 2579 | } |
| 2579 | flush_icache_page(vma, page); | 2580 | flush_icache_page(vma, page); |
| 2580 | set_pte_at(mm, address, page_table, pte); | 2581 | set_pte_at(mm, address, page_table, pte); |
| @@ -2587,7 +2588,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2587 | try_to_free_swap(page); | 2588 | try_to_free_swap(page); |
| 2588 | unlock_page(page); | 2589 | unlock_page(page); |
| 2589 | 2590 | ||
| 2590 | if (write_access) { | 2591 | if (flags & FAULT_FLAG_WRITE) { |
| 2591 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); | 2592 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
| 2592 | if (ret & VM_FAULT_ERROR) | 2593 | if (ret & VM_FAULT_ERROR) |
| 2593 | ret &= VM_FAULT_ERROR; | 2594 | ret &= VM_FAULT_ERROR; |
| @@ -2616,7 +2617,7 @@ out_page: | |||
| 2616 | */ | 2617 | */ |
| 2617 | static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2618 | static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2618 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2619 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
| 2619 | int write_access) | 2620 | unsigned int flags) |
| 2620 | { | 2621 | { |
| 2621 | struct page *page; | 2622 | struct page *page; |
| 2622 | spinlock_t *ptl; | 2623 | spinlock_t *ptl; |
| @@ -2776,7 +2777,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2776 | * due to the bad i386 page protection. But it's valid | 2777 | * due to the bad i386 page protection. But it's valid |
| 2777 | * for other architectures too. | 2778 | * for other architectures too. |
| 2778 | * | 2779 | * |
| 2779 | * Note that if write_access is true, we either now have | 2780 | * Note that if FAULT_FLAG_WRITE is set, we either now have |
| 2780 | * an exclusive copy of the page, or this is a shared mapping, | 2781 | * an exclusive copy of the page, or this is a shared mapping, |
| 2781 | * so we can make it writable and dirty to avoid having to | 2782 | * so we can make it writable and dirty to avoid having to |
| 2782 | * handle that later. | 2783 | * handle that later. |
| @@ -2847,11 +2848,10 @@ unwritable_page: | |||
| 2847 | 2848 | ||
| 2848 | static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2849 | static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2849 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2850 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
| 2850 | int write_access, pte_t orig_pte) | 2851 | unsigned int flags, pte_t orig_pte) |
| 2851 | { | 2852 | { |
| 2852 | pgoff_t pgoff = (((address & PAGE_MASK) | 2853 | pgoff_t pgoff = (((address & PAGE_MASK) |
| 2853 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 2854 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 2854 | unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); | ||
| 2855 | 2855 | ||
| 2856 | pte_unmap(page_table); | 2856 | pte_unmap(page_table); |
| 2857 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); | 2857 | return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); |
| @@ -2868,12 +2868,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2868 | */ | 2868 | */ |
| 2869 | static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2869 | static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2870 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2870 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
| 2871 | int write_access, pte_t orig_pte) | 2871 | unsigned int flags, pte_t orig_pte) |
| 2872 | { | 2872 | { |
| 2873 | unsigned int flags = FAULT_FLAG_NONLINEAR | | ||
| 2874 | (write_access ? FAULT_FLAG_WRITE : 0); | ||
| 2875 | pgoff_t pgoff; | 2873 | pgoff_t pgoff; |
| 2876 | 2874 | ||
| 2875 | flags |= FAULT_FLAG_NONLINEAR; | ||
| 2876 | |||
| 2877 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) | 2877 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) |
| 2878 | return 0; | 2878 | return 0; |
| 2879 | 2879 | ||
| @@ -2904,7 +2904,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2904 | */ | 2904 | */ |
| 2905 | static inline int handle_pte_fault(struct mm_struct *mm, | 2905 | static inline int handle_pte_fault(struct mm_struct *mm, |
| 2906 | struct vm_area_struct *vma, unsigned long address, | 2906 | struct vm_area_struct *vma, unsigned long address, |
| 2907 | pte_t *pte, pmd_t *pmd, int write_access) | 2907 | pte_t *pte, pmd_t *pmd, unsigned int flags) |
| 2908 | { | 2908 | { |
| 2909 | pte_t entry; | 2909 | pte_t entry; |
| 2910 | spinlock_t *ptl; | 2910 | spinlock_t *ptl; |
| @@ -2915,30 +2915,30 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
| 2915 | if (vma->vm_ops) { | 2915 | if (vma->vm_ops) { |
| 2916 | if (likely(vma->vm_ops->fault)) | 2916 | if (likely(vma->vm_ops->fault)) |
| 2917 | return do_linear_fault(mm, vma, address, | 2917 | return do_linear_fault(mm, vma, address, |
| 2918 | pte, pmd, write_access, entry); | 2918 | pte, pmd, flags, entry); |
| 2919 | } | 2919 | } |
| 2920 | return do_anonymous_page(mm, vma, address, | 2920 | return do_anonymous_page(mm, vma, address, |
| 2921 | pte, pmd, write_access); | 2921 | pte, pmd, flags); |
| 2922 | } | 2922 | } |
| 2923 | if (pte_file(entry)) | 2923 | if (pte_file(entry)) |
| 2924 | return do_nonlinear_fault(mm, vma, address, | 2924 | return do_nonlinear_fault(mm, vma, address, |
| 2925 | pte, pmd, write_access, entry); | 2925 | pte, pmd, flags, entry); |
| 2926 | return do_swap_page(mm, vma, address, | 2926 | return do_swap_page(mm, vma, address, |
| 2927 | pte, pmd, write_access, entry); | 2927 | pte, pmd, flags, entry); |
| 2928 | } | 2928 | } |
| 2929 | 2929 | ||
| 2930 | ptl = pte_lockptr(mm, pmd); | 2930 | ptl = pte_lockptr(mm, pmd); |
| 2931 | spin_lock(ptl); | 2931 | spin_lock(ptl); |
| 2932 | if (unlikely(!pte_same(*pte, entry))) | 2932 | if (unlikely(!pte_same(*pte, entry))) |
| 2933 | goto unlock; | 2933 | goto unlock; |
| 2934 | if (write_access) { | 2934 | if (flags & FAULT_FLAG_WRITE) { |
| 2935 | if (!pte_write(entry)) | 2935 | if (!pte_write(entry)) |
| 2936 | return do_wp_page(mm, vma, address, | 2936 | return do_wp_page(mm, vma, address, |
| 2937 | pte, pmd, ptl, entry); | 2937 | pte, pmd, ptl, entry); |
| 2938 | entry = pte_mkdirty(entry); | 2938 | entry = pte_mkdirty(entry); |
| 2939 | } | 2939 | } |
| 2940 | entry = pte_mkyoung(entry); | 2940 | entry = pte_mkyoung(entry); |
| 2941 | if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { | 2941 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { |
| 2942 | update_mmu_cache(vma, address, entry); | 2942 | update_mmu_cache(vma, address, entry); |
| 2943 | } else { | 2943 | } else { |
| 2944 | /* | 2944 | /* |
| @@ -2947,7 +2947,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
| 2947 | * This still avoids useless tlb flushes for .text page faults | 2947 | * This still avoids useless tlb flushes for .text page faults |
| 2948 | * with threads. | 2948 | * with threads. |
| 2949 | */ | 2949 | */ |
| 2950 | if (write_access) | 2950 | if (flags & FAULT_FLAG_WRITE) |
| 2951 | flush_tlb_page(vma, address); | 2951 | flush_tlb_page(vma, address); |
| 2952 | } | 2952 | } |
| 2953 | unlock: | 2953 | unlock: |
| @@ -2959,7 +2959,7 @@ unlock: | |||
| 2959 | * By the time we get here, we already hold the mm semaphore | 2959 | * By the time we get here, we already hold the mm semaphore |
| 2960 | */ | 2960 | */ |
| 2961 | int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 2961 | int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2962 | unsigned long address, int write_access) | 2962 | unsigned long address, unsigned int flags) |
| 2963 | { | 2963 | { |
| 2964 | pgd_t *pgd; | 2964 | pgd_t *pgd; |
| 2965 | pud_t *pud; | 2965 | pud_t *pud; |
| @@ -2971,7 +2971,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2971 | count_vm_event(PGFAULT); | 2971 | count_vm_event(PGFAULT); |
| 2972 | 2972 | ||
| 2973 | if (unlikely(is_vm_hugetlb_page(vma))) | 2973 | if (unlikely(is_vm_hugetlb_page(vma))) |
| 2974 | return hugetlb_fault(mm, vma, address, write_access); | 2974 | return hugetlb_fault(mm, vma, address, flags); |
| 2975 | 2975 | ||
| 2976 | pgd = pgd_offset(mm, address); | 2976 | pgd = pgd_offset(mm, address); |
| 2977 | pud = pud_alloc(mm, pgd, address); | 2977 | pud = pud_alloc(mm, pgd, address); |
| @@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2984 | if (!pte) | 2984 | if (!pte) |
| 2985 | return VM_FAULT_OOM; | 2985 | return VM_FAULT_OOM; |
| 2986 | 2986 | ||
| 2987 | return handle_pte_fault(mm, vma, address, pte, pmd, write_access); | 2987 | return handle_pte_fault(mm, vma, address, pte, pmd, flags); |
| 2988 | } | 2988 | } |
| 2989 | 2989 | ||
| 2990 | #ifndef __PAGETABLE_PUD_FOLDED | 2990 | #ifndef __PAGETABLE_PUD_FOLDED |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 562403a23488..462e2cedaa6a 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
| @@ -972,8 +972,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr | |||
| 972 | snd_hda_codec_read(codec, nid, 0, | 972 | snd_hda_codec_read(codec, nid, 0, |
| 973 | AC_VERB_GET_SUBSYSTEM_ID, 0); | 973 | AC_VERB_GET_SUBSYSTEM_ID, 0); |
| 974 | } | 974 | } |
| 975 | if (bus->modelname) | ||
| 976 | codec->modelname = kstrdup(bus->modelname, GFP_KERNEL); | ||
| 977 | 975 | ||
| 978 | /* power-up all before initialization */ | 976 | /* power-up all before initialization */ |
| 979 | hda_set_power_state(codec, | 977 | hda_set_power_state(codec, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d22b26068014..bf4b78a74a8f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -224,6 +224,7 @@ enum { | |||
| 224 | ALC883_ACER, | 224 | ALC883_ACER, |
| 225 | ALC883_ACER_ASPIRE, | 225 | ALC883_ACER_ASPIRE, |
| 226 | ALC888_ACER_ASPIRE_4930G, | 226 | ALC888_ACER_ASPIRE_4930G, |
| 227 | ALC888_ACER_ASPIRE_6530G, | ||
| 227 | ALC888_ACER_ASPIRE_8930G, | 228 | ALC888_ACER_ASPIRE_8930G, |
| 228 | ALC883_MEDION, | 229 | ALC883_MEDION, |
| 229 | ALC883_MEDION_MD2, | 230 | ALC883_MEDION_MD2, |
| @@ -970,7 +971,7 @@ static void alc_automute_pin(struct hda_codec *codec) | |||
| 970 | } | 971 | } |
| 971 | } | 972 | } |
| 972 | 973 | ||
| 973 | #if 0 /* it's broken in some acses -- temporarily disabled */ | 974 | #if 0 /* it's broken in some cases -- temporarily disabled */ |
| 974 | static void alc_mic_automute(struct hda_codec *codec) | 975 | static void alc_mic_automute(struct hda_codec *codec) |
| 975 | { | 976 | { |
| 976 | struct alc_spec *spec = codec->spec; | 977 | struct alc_spec *spec = codec->spec; |
| @@ -1170,7 +1171,7 @@ static int alc_subsystem_id(struct hda_codec *codec, | |||
| 1170 | 1171 | ||
| 1171 | /* invalid SSID, check the special NID pin defcfg instead */ | 1172 | /* invalid SSID, check the special NID pin defcfg instead */ |
| 1172 | /* | 1173 | /* |
| 1173 | * 31~30 : port conetcivity | 1174 | * 31~30 : port connectivity |
| 1174 | * 29~21 : reserve | 1175 | * 29~21 : reserve |
| 1175 | * 20 : PCBEEP input | 1176 | * 20 : PCBEEP input |
| 1176 | * 19~16 : Check sum (15:1) | 1177 | * 19~16 : Check sum (15:1) |
| @@ -1471,6 +1472,25 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = { | |||
| 1471 | }; | 1472 | }; |
| 1472 | 1473 | ||
| 1473 | /* | 1474 | /* |
| 1475 | * ALC888 Acer Aspire 6530G model | ||
| 1476 | */ | ||
| 1477 | |||
| 1478 | static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { | ||
| 1479 | /* Bias voltage on for external mic port */ | ||
| 1480 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, | ||
| 1481 | /* Enable unsolicited event for HP jack */ | ||
| 1482 | {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, | ||
| 1483 | /* Enable speaker output */ | ||
| 1484 | {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, | ||
| 1485 | {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, | ||
| 1486 | /* Enable headphone output */ | ||
| 1487 | {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP}, | ||
| 1488 | {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, | ||
| 1489 | {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, | ||
| 1490 | { } | ||
| 1491 | }; | ||
| 1492 | |||
| 1493 | /* | ||
| 1474 | * ALC889 Acer Aspire 8930G model | 1494 | * ALC889 Acer Aspire 8930G model |
| 1475 | */ | 1495 | */ |
| 1476 | 1496 | ||
| @@ -1544,6 +1564,25 @@ static struct hda_input_mux alc888_2_capture_sources[2] = { | |||
| 1544 | } | 1564 | } |
| 1545 | }; | 1565 | }; |
| 1546 | 1566 | ||
| 1567 | static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = { | ||
| 1568 | /* Interal mic only available on one ADC */ | ||
| 1569 | { | ||
| 1570 | .num_items = 3, | ||
| 1571 | .items = { | ||
| 1572 | { "Ext Mic", 0x0 }, | ||
| 1573 | { "CD", 0x4 }, | ||
| 1574 | { "Int Mic", 0xb }, | ||
| 1575 | }, | ||
| 1576 | }, | ||
| 1577 | { | ||
| 1578 | .num_items = 2, | ||
| 1579 | .items = { | ||
| 1580 | { "Ext Mic", 0x0 }, | ||
| 1581 | { "CD", 0x4 }, | ||
| 1582 | }, | ||
| 1583 | } | ||
| 1584 | }; | ||
| 1585 | |||
| 1547 | static struct hda_input_mux alc889_capture_sources[3] = { | 1586 | static struct hda_input_mux alc889_capture_sources[3] = { |
| 1548 | /* Digital mic only available on first "ADC" */ | 1587 | /* Digital mic only available on first "ADC" */ |
| 1549 | { | 1588 | { |
| @@ -6347,7 +6386,7 @@ static struct hda_channel_mode alc882_sixstack_modes[2] = { | |||
| 6347 | }; | 6386 | }; |
| 6348 | 6387 | ||
| 6349 | /* | 6388 | /* |
| 6350 | * macbook pro ALC885 can switch LineIn to LineOut without loosing Mic | 6389 | * macbook pro ALC885 can switch LineIn to LineOut without losing Mic |
| 6351 | */ | 6390 | */ |
| 6352 | 6391 | ||
| 6353 | /* | 6392 | /* |
| @@ -7047,7 +7086,7 @@ static struct hda_verb alc882_auto_init_verbs[] = { | |||
| 7047 | #define alc882_loopbacks alc880_loopbacks | 7086 | #define alc882_loopbacks alc880_loopbacks |
| 7048 | #endif | 7087 | #endif |
| 7049 | 7088 | ||
| 7050 | /* pcm configuration: identiacal with ALC880 */ | 7089 | /* pcm configuration: identical with ALC880 */ |
| 7051 | #define alc882_pcm_analog_playback alc880_pcm_analog_playback | 7090 | #define alc882_pcm_analog_playback alc880_pcm_analog_playback |
| 7052 | #define alc882_pcm_analog_capture alc880_pcm_analog_capture | 7091 | #define alc882_pcm_analog_capture alc880_pcm_analog_capture |
| 7053 | #define alc882_pcm_digital_playback alc880_pcm_digital_playback | 7092 | #define alc882_pcm_digital_playback alc880_pcm_digital_playback |
| @@ -8068,7 +8107,7 @@ static struct snd_kcontrol_new alc883_fivestack_mixer[] = { | |||
| 8068 | { } /* end */ | 8107 | { } /* end */ |
| 8069 | }; | 8108 | }; |
| 8070 | 8109 | ||
| 8071 | static struct snd_kcontrol_new alc883_tagra_mixer[] = { | 8110 | static struct snd_kcontrol_new alc883_targa_mixer[] = { |
| 8072 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 8111 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
| 8073 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), | 8112 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), |
| 8074 | HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), | 8113 | HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), |
| @@ -8088,7 +8127,7 @@ static struct snd_kcontrol_new alc883_tagra_mixer[] = { | |||
| 8088 | { } /* end */ | 8127 | { } /* end */ |
| 8089 | }; | 8128 | }; |
| 8090 | 8129 | ||
| 8091 | static struct snd_kcontrol_new alc883_tagra_2ch_mixer[] = { | 8130 | static struct snd_kcontrol_new alc883_targa_2ch_mixer[] = { |
| 8092 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 8131 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
| 8093 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), | 8132 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), |
| 8094 | HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), | 8133 | HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), |
| @@ -8153,6 +8192,19 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { | |||
| 8153 | { } /* end */ | 8192 | { } /* end */ |
| 8154 | }; | 8193 | }; |
| 8155 | 8194 | ||
| 8195 | static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { | ||
| 8196 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | ||
| 8197 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | ||
| 8198 | HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), | ||
| 8199 | HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT), | ||
| 8200 | HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), | ||
| 8201 | HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), | ||
| 8202 | HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), | ||
| 8203 | HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), | ||
| 8204 | HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), | ||
| 8205 | { } /* end */ | ||
| 8206 | }; | ||
| 8207 | |||
| 8156 | static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = { | 8208 | static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = { |
| 8157 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), | 8209 | HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), |
| 8158 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), | 8210 | HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), |
| @@ -8417,7 +8469,7 @@ static struct hda_verb alc883_2ch_fujitsu_pi2515_verbs[] = { | |||
| 8417 | { } /* end */ | 8469 | { } /* end */ |
| 8418 | }; | 8470 | }; |
| 8419 | 8471 | ||
| 8420 | static struct hda_verb alc883_tagra_verbs[] = { | 8472 | static struct hda_verb alc883_targa_verbs[] = { |
| 8421 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, | 8473 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, |
| 8422 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, | 8474 | {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, |
| 8423 | 8475 | ||
| @@ -8626,8 +8678,8 @@ static void alc883_medion_md2_init_hook(struct hda_codec *codec) | |||
| 8626 | } | 8678 | } |
| 8627 | 8679 | ||
| 8628 | /* toggle speaker-output according to the hp-jack state */ | 8680 | /* toggle speaker-output according to the hp-jack state */ |
| 8629 | #define alc883_tagra_init_hook alc882_targa_init_hook | 8681 | #define alc883_targa_init_hook alc882_targa_init_hook |
| 8630 | #define alc883_tagra_unsol_event alc882_targa_unsol_event | 8682 | #define alc883_targa_unsol_event alc882_targa_unsol_event |
| 8631 | 8683 | ||
| 8632 | static void alc883_clevo_m720_mic_automute(struct hda_codec *codec) | 8684 | static void alc883_clevo_m720_mic_automute(struct hda_codec *codec) |
| 8633 | { | 8685 | { |
| @@ -8957,7 +9009,7 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res) | |||
| 8957 | #define alc883_loopbacks alc880_loopbacks | 9009 | #define alc883_loopbacks alc880_loopbacks |
| 8958 | #endif | 9010 | #endif |
| 8959 | 9011 | ||
| 8960 | /* pcm configuration: identiacal with ALC880 */ | 9012 | /* pcm configuration: identical with ALC880 */ |
| 8961 | #define alc883_pcm_analog_playback alc880_pcm_analog_playback | 9013 | #define alc883_pcm_analog_playback alc880_pcm_analog_playback |
| 8962 | #define alc883_pcm_analog_capture alc880_pcm_analog_capture | 9014 | #define alc883_pcm_analog_capture alc880_pcm_analog_capture |
| 8963 | #define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture | 9015 | #define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture |
| @@ -8978,6 +9030,7 @@ static const char *alc883_models[ALC883_MODEL_LAST] = { | |||
| 8978 | [ALC883_ACER] = "acer", | 9030 | [ALC883_ACER] = "acer", |
| 8979 | [ALC883_ACER_ASPIRE] = "acer-aspire", | 9031 | [ALC883_ACER_ASPIRE] = "acer-aspire", |
| 8980 | [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g", | 9032 | [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g", |
| 9033 | [ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g", | ||
| 8981 | [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g", | 9034 | [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g", |
| 8982 | [ALC883_MEDION] = "medion", | 9035 | [ALC883_MEDION] = "medion", |
| 8983 | [ALC883_MEDION_MD2] = "medion-md2", | 9036 | [ALC883_MEDION_MD2] = "medion-md2", |
| @@ -9021,7 +9074,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
| 9021 | SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", | 9074 | SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", |
| 9022 | ALC888_ACER_ASPIRE_4930G), | 9075 | ALC888_ACER_ASPIRE_4930G), |
| 9023 | SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", | 9076 | SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", |
| 9024 | ALC888_ACER_ASPIRE_4930G), | 9077 | ALC888_ACER_ASPIRE_6530G), |
| 9025 | /* default Acer -- disabled as it causes more problems. | 9078 | /* default Acer -- disabled as it causes more problems. |
| 9026 | * model=auto should work fine now | 9079 | * model=auto should work fine now |
| 9027 | */ | 9080 | */ |
| @@ -9069,6 +9122,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { | |||
| 9069 | SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG), | 9122 | SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG), |
| 9070 | SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG), | 9123 | SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG), |
| 9071 | SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), | 9124 | SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), |
| 9125 | SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG), | ||
| 9072 | SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), | 9126 | SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), |
| 9073 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), | 9127 | SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), |
| 9074 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), | 9128 | SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), |
| @@ -9165,8 +9219,8 @@ static struct alc_config_preset alc883_presets[] = { | |||
| 9165 | .input_mux = &alc883_capture_source, | 9219 | .input_mux = &alc883_capture_source, |
| 9166 | }, | 9220 | }, |
| 9167 | [ALC883_TARGA_DIG] = { | 9221 | [ALC883_TARGA_DIG] = { |
| 9168 | .mixers = { alc883_tagra_mixer, alc883_chmode_mixer }, | 9222 | .mixers = { alc883_targa_mixer, alc883_chmode_mixer }, |
| 9169 | .init_verbs = { alc883_init_verbs, alc883_tagra_verbs}, | 9223 | .init_verbs = { alc883_init_verbs, alc883_targa_verbs}, |
| 9170 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | 9224 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), |
| 9171 | .dac_nids = alc883_dac_nids, | 9225 | .dac_nids = alc883_dac_nids, |
| 9172 | .dig_out_nid = ALC883_DIGOUT_NID, | 9226 | .dig_out_nid = ALC883_DIGOUT_NID, |
| @@ -9174,12 +9228,12 @@ static struct alc_config_preset alc883_presets[] = { | |||
| 9174 | .channel_mode = alc883_3ST_6ch_modes, | 9228 | .channel_mode = alc883_3ST_6ch_modes, |
| 9175 | .need_dac_fix = 1, | 9229 | .need_dac_fix = 1, |
| 9176 | .input_mux = &alc883_capture_source, | 9230 | .input_mux = &alc883_capture_source, |
| 9177 | .unsol_event = alc883_tagra_unsol_event, | 9231 | .unsol_event = alc883_targa_unsol_event, |
| 9178 | .init_hook = alc883_tagra_init_hook, | 9232 | .init_hook = alc883_targa_init_hook, |
| 9179 | }, | 9233 | }, |
| 9180 | [ALC883_TARGA_2ch_DIG] = { | 9234 | [ALC883_TARGA_2ch_DIG] = { |
| 9181 | .mixers = { alc883_tagra_2ch_mixer}, | 9235 | .mixers = { alc883_targa_2ch_mixer}, |
| 9182 | .init_verbs = { alc883_init_verbs, alc883_tagra_verbs}, | 9236 | .init_verbs = { alc883_init_verbs, alc883_targa_verbs}, |
| 9183 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | 9237 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), |
| 9184 | .dac_nids = alc883_dac_nids, | 9238 | .dac_nids = alc883_dac_nids, |
| 9185 | .adc_nids = alc883_adc_nids_alt, | 9239 | .adc_nids = alc883_adc_nids_alt, |
| @@ -9188,13 +9242,13 @@ static struct alc_config_preset alc883_presets[] = { | |||
| 9188 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), | 9242 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), |
| 9189 | .channel_mode = alc883_3ST_2ch_modes, | 9243 | .channel_mode = alc883_3ST_2ch_modes, |
| 9190 | .input_mux = &alc883_capture_source, | 9244 | .input_mux = &alc883_capture_source, |
| 9191 | .unsol_event = alc883_tagra_unsol_event, | 9245 | .unsol_event = alc883_targa_unsol_event, |
| 9192 | .init_hook = alc883_tagra_init_hook, | 9246 | .init_hook = alc883_targa_init_hook, |
| 9193 | }, | 9247 | }, |
| 9194 | [ALC883_TARGA_8ch_DIG] = { | 9248 | [ALC883_TARGA_8ch_DIG] = { |
| 9195 | .mixers = { alc883_base_mixer, alc883_chmode_mixer }, | 9249 | .mixers = { alc883_base_mixer, alc883_chmode_mixer }, |
| 9196 | .init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs, | 9250 | .init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs, |
| 9197 | alc883_tagra_verbs }, | 9251 | alc883_targa_verbs }, |
| 9198 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | 9252 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), |
| 9199 | .dac_nids = alc883_dac_nids, | 9253 | .dac_nids = alc883_dac_nids, |
| 9200 | .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev), | 9254 | .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev), |
| @@ -9206,8 +9260,8 @@ static struct alc_config_preset alc883_presets[] = { | |||
| 9206 | .channel_mode = alc883_4ST_8ch_modes, | 9260 | .channel_mode = alc883_4ST_8ch_modes, |
| 9207 | .need_dac_fix = 1, | 9261 | .need_dac_fix = 1, |
| 9208 | .input_mux = &alc883_capture_source, | 9262 | .input_mux = &alc883_capture_source, |
| 9209 | .unsol_event = alc883_tagra_unsol_event, | 9263 | .unsol_event = alc883_targa_unsol_event, |
| 9210 | .init_hook = alc883_tagra_init_hook, | 9264 | .init_hook = alc883_targa_init_hook, |
| 9211 | }, | 9265 | }, |
| 9212 | [ALC883_ACER] = { | 9266 | [ALC883_ACER] = { |
| 9213 | .mixers = { alc883_base_mixer }, | 9267 | .mixers = { alc883_base_mixer }, |
| @@ -9255,6 +9309,24 @@ static struct alc_config_preset alc883_presets[] = { | |||
| 9255 | .unsol_event = alc_automute_amp_unsol_event, | 9309 | .unsol_event = alc_automute_amp_unsol_event, |
| 9256 | .init_hook = alc888_acer_aspire_4930g_init_hook, | 9310 | .init_hook = alc888_acer_aspire_4930g_init_hook, |
| 9257 | }, | 9311 | }, |
| 9312 | [ALC888_ACER_ASPIRE_6530G] = { | ||
| 9313 | .mixers = { alc888_acer_aspire_6530_mixer }, | ||
| 9314 | .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs, | ||
| 9315 | alc888_acer_aspire_6530g_verbs }, | ||
| 9316 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | ||
| 9317 | .dac_nids = alc883_dac_nids, | ||
| 9318 | .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev), | ||
| 9319 | .adc_nids = alc883_adc_nids_rev, | ||
| 9320 | .capsrc_nids = alc883_capsrc_nids_rev, | ||
| 9321 | .dig_out_nid = ALC883_DIGOUT_NID, | ||
| 9322 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), | ||
| 9323 | .channel_mode = alc883_3ST_2ch_modes, | ||
| 9324 | .num_mux_defs = | ||
| 9325 | ARRAY_SIZE(alc888_2_capture_sources), | ||
| 9326 | .input_mux = alc888_acer_aspire_6530_sources, | ||
| 9327 | .unsol_event = alc_automute_amp_unsol_event, | ||
| 9328 | .init_hook = alc888_acer_aspire_4930g_init_hook, | ||
| 9329 | }, | ||
| 9258 | [ALC888_ACER_ASPIRE_8930G] = { | 9330 | [ALC888_ACER_ASPIRE_8930G] = { |
| 9259 | .mixers = { alc888_base_mixer, | 9331 | .mixers = { alc888_base_mixer, |
| 9260 | alc883_chmode_mixer }, | 9332 | alc883_chmode_mixer }, |
| @@ -9361,7 +9433,7 @@ static struct alc_config_preset alc883_presets[] = { | |||
| 9361 | .init_hook = alc888_lenovo_ms7195_front_automute, | 9433 | .init_hook = alc888_lenovo_ms7195_front_automute, |
| 9362 | }, | 9434 | }, |
| 9363 | [ALC883_HAIER_W66] = { | 9435 | [ALC883_HAIER_W66] = { |
| 9364 | .mixers = { alc883_tagra_2ch_mixer}, | 9436 | .mixers = { alc883_targa_2ch_mixer}, |
| 9365 | .init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs}, | 9437 | .init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs}, |
| 9366 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), | 9438 | .num_dacs = ARRAY_SIZE(alc883_dac_nids), |
| 9367 | .dac_nids = alc883_dac_nids, | 9439 | .dac_nids = alc883_dac_nids, |
| @@ -11131,7 +11203,7 @@ static struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = { | |||
| 11131 | #define alc262_loopbacks alc880_loopbacks | 11203 | #define alc262_loopbacks alc880_loopbacks |
| 11132 | #endif | 11204 | #endif |
| 11133 | 11205 | ||
| 11134 | /* pcm configuration: identiacal with ALC880 */ | 11206 | /* pcm configuration: identical with ALC880 */ |
| 11135 | #define alc262_pcm_analog_playback alc880_pcm_analog_playback | 11207 | #define alc262_pcm_analog_playback alc880_pcm_analog_playback |
| 11136 | #define alc262_pcm_analog_capture alc880_pcm_analog_capture | 11208 | #define alc262_pcm_analog_capture alc880_pcm_analog_capture |
| 11137 | #define alc262_pcm_digital_playback alc880_pcm_digital_playback | 11209 | #define alc262_pcm_digital_playback alc880_pcm_digital_playback |
| @@ -12286,7 +12358,7 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec) | |||
| 12286 | AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2); | 12358 | AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2); |
| 12287 | } | 12359 | } |
| 12288 | 12360 | ||
| 12289 | /* pcm configuration: identiacal with ALC880 */ | 12361 | /* pcm configuration: identical with ALC880 */ |
| 12290 | #define alc268_pcm_analog_playback alc880_pcm_analog_playback | 12362 | #define alc268_pcm_analog_playback alc880_pcm_analog_playback |
| 12291 | #define alc268_pcm_analog_capture alc880_pcm_analog_capture | 12363 | #define alc268_pcm_analog_capture alc880_pcm_analog_capture |
| 12292 | #define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture | 12364 | #define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture |
| @@ -13197,7 +13269,7 @@ static int alc269_auto_create_analog_input_ctls(struct alc_spec *spec, | |||
| 13197 | #define alc269_loopbacks alc880_loopbacks | 13269 | #define alc269_loopbacks alc880_loopbacks |
| 13198 | #endif | 13270 | #endif |
| 13199 | 13271 | ||
| 13200 | /* pcm configuration: identiacal with ALC880 */ | 13272 | /* pcm configuration: identical with ALC880 */ |
| 13201 | #define alc269_pcm_analog_playback alc880_pcm_analog_playback | 13273 | #define alc269_pcm_analog_playback alc880_pcm_analog_playback |
| 13202 | #define alc269_pcm_analog_capture alc880_pcm_analog_capture | 13274 | #define alc269_pcm_analog_capture alc880_pcm_analog_capture |
| 13203 | #define alc269_pcm_digital_playback alc880_pcm_digital_playback | 13275 | #define alc269_pcm_digital_playback alc880_pcm_digital_playback |
| @@ -14059,7 +14131,7 @@ static void alc861_toshiba_unsol_event(struct hda_codec *codec, | |||
| 14059 | alc861_toshiba_automute(codec); | 14131 | alc861_toshiba_automute(codec); |
| 14060 | } | 14132 | } |
| 14061 | 14133 | ||
| 14062 | /* pcm configuration: identiacal with ALC880 */ | 14134 | /* pcm configuration: identical with ALC880 */ |
| 14063 | #define alc861_pcm_analog_playback alc880_pcm_analog_playback | 14135 | #define alc861_pcm_analog_playback alc880_pcm_analog_playback |
| 14064 | #define alc861_pcm_analog_capture alc880_pcm_analog_capture | 14136 | #define alc861_pcm_analog_capture alc880_pcm_analog_capture |
| 14065 | #define alc861_pcm_digital_playback alc880_pcm_digital_playback | 14137 | #define alc861_pcm_digital_playback alc880_pcm_digital_playback |
| @@ -14582,7 +14654,7 @@ static hda_nid_t alc861vd_dac_nids[4] = { | |||
| 14582 | 14654 | ||
| 14583 | /* dac_nids for ALC660vd are in a different order - according to | 14655 | /* dac_nids for ALC660vd are in a different order - according to |
| 14584 | * Realtek's driver. | 14656 | * Realtek's driver. |
| 14585 | * This should probably tesult in a different mixer for 6stack models | 14657 | * This should probably result in a different mixer for 6stack models |
| 14586 | * of ALC660vd codecs, but for now there is only 3stack mixer | 14658 | * of ALC660vd codecs, but for now there is only 3stack mixer |
| 14587 | * - and it is the same as in 861vd. | 14659 | * - and it is the same as in 861vd. |
| 14588 | * adc_nids in ALC660vd are (is) the same as in 861vd | 14660 | * adc_nids in ALC660vd are (is) the same as in 861vd |
| @@ -15027,7 +15099,7 @@ static void alc861vd_dallas_init_hook(struct hda_codec *codec) | |||
| 15027 | #define alc861vd_loopbacks alc880_loopbacks | 15099 | #define alc861vd_loopbacks alc880_loopbacks |
| 15028 | #endif | 15100 | #endif |
| 15029 | 15101 | ||
| 15030 | /* pcm configuration: identiacal with ALC880 */ | 15102 | /* pcm configuration: identical with ALC880 */ |
| 15031 | #define alc861vd_pcm_analog_playback alc880_pcm_analog_playback | 15103 | #define alc861vd_pcm_analog_playback alc880_pcm_analog_playback |
| 15032 | #define alc861vd_pcm_analog_capture alc880_pcm_analog_capture | 15104 | #define alc861vd_pcm_analog_capture alc880_pcm_analog_capture |
| 15033 | #define alc861vd_pcm_digital_playback alc880_pcm_digital_playback | 15105 | #define alc861vd_pcm_digital_playback alc880_pcm_digital_playback |
| @@ -15206,7 +15278,7 @@ static void alc861vd_auto_init_hp_out(struct hda_codec *codec) | |||
| 15206 | hda_nid_t pin; | 15278 | hda_nid_t pin; |
| 15207 | 15279 | ||
| 15208 | pin = spec->autocfg.hp_pins[0]; | 15280 | pin = spec->autocfg.hp_pins[0]; |
| 15209 | if (pin) /* connect to front and use dac 0 */ | 15281 | if (pin) /* connect to front and use dac 0 */ |
| 15210 | alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0); | 15282 | alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0); |
| 15211 | pin = spec->autocfg.speaker_pins[0]; | 15283 | pin = spec->autocfg.speaker_pins[0]; |
| 15212 | if (pin) | 15284 | if (pin) |
| @@ -16669,7 +16741,7 @@ static struct snd_kcontrol_new alc272_nc10_mixer[] = { | |||
| 16669 | #endif | 16741 | #endif |
| 16670 | 16742 | ||
| 16671 | 16743 | ||
| 16672 | /* pcm configuration: identiacal with ALC880 */ | 16744 | /* pcm configuration: identical with ALC880 */ |
| 16673 | #define alc662_pcm_analog_playback alc880_pcm_analog_playback | 16745 | #define alc662_pcm_analog_playback alc880_pcm_analog_playback |
| 16674 | #define alc662_pcm_analog_capture alc880_pcm_analog_capture | 16746 | #define alc662_pcm_analog_capture alc880_pcm_analog_capture |
| 16675 | #define alc662_pcm_digital_playback alc880_pcm_digital_playback | 16747 | #define alc662_pcm_digital_playback alc880_pcm_digital_playback |
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c index fa336616152e..938a58a5a244 100644 --- a/sound/soc/txx9/txx9aclc.c +++ b/sound/soc/txx9/txx9aclc.c | |||
| @@ -297,9 +297,9 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, | |||
| 297 | static bool filter(struct dma_chan *chan, void *param) | 297 | static bool filter(struct dma_chan *chan, void *param) |
| 298 | { | 298 | { |
| 299 | struct txx9aclc_dmadata *dmadata = param; | 299 | struct txx9aclc_dmadata *dmadata = param; |
| 300 | char devname[BUS_ID_SIZE + 2]; | 300 | char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */ |
| 301 | 301 | ||
| 302 | sprintf(devname, "%s.%d", dmadata->dma_res->name, | 302 | snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name, |
| 303 | (int)dmadata->dma_res->start); | 303 | (int)dmadata->dma_res->start); |
| 304 | if (strcmp(dev_name(chan->device->dev), devname) == 0) { | 304 | if (strcmp(dev_name(chan->device->dev), devname) == 0) { |
| 305 | chan->private = &dmadata->dma_slave; | 305 | chan->private = &dmadata->dma_slave; |
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index b14451342166..8f9b60c5d74c 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c | |||
| @@ -199,8 +199,9 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream) | |||
| 199 | dev->period_out_count[index] = BYTES_PER_SAMPLE + 1; | 199 | dev->period_out_count[index] = BYTES_PER_SAMPLE + 1; |
| 200 | dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1; | 200 | dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1; |
| 201 | } else { | 201 | } else { |
| 202 | dev->period_in_count[index] = BYTES_PER_SAMPLE; | 202 | int in_pos = (dev->spec.data_alignment == 2) ? 0 : 2; |
| 203 | dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE; | 203 | dev->period_in_count[index] = BYTES_PER_SAMPLE + in_pos; |
| 204 | dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE + in_pos; | ||
| 204 | } | 205 | } |
| 205 | 206 | ||
| 206 | if (dev->streaming) | 207 | if (dev->streaming) |
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 22406245a98b..0e5db719de24 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #include "input.h" | 35 | #include "input.h" |
| 36 | 36 | ||
| 37 | MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); | 37 | MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); |
| 38 | MODULE_DESCRIPTION("caiaq USB audio, version 1.3.16"); | 38 | MODULE_DESCRIPTION("caiaq USB audio, version 1.3.17"); |
| 39 | MODULE_LICENSE("GPL"); | 39 | MODULE_LICENSE("GPL"); |
| 40 | MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," | 40 | MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," |
| 41 | "{Native Instruments, RigKontrol3}," | 41 | "{Native Instruments, RigKontrol3}," |
