diff options
150 files changed, 1309 insertions, 1065 deletions
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt index bf3256e04027..51a8021ee532 100644 --- a/Documentation/kobject.txt +++ b/Documentation/kobject.txt | |||
@@ -305,7 +305,7 @@ should not be manipulated by any other user. | |||
305 | 305 | ||
306 | A kset keeps its children in a standard kernel linked list. Kobjects point | 306 | A kset keeps its children in a standard kernel linked list. Kobjects point |
307 | back to their containing kset via their kset field. In almost all cases, | 307 | back to their containing kset via their kset field. In almost all cases, |
308 | the kobjects belonging to a ket have that kset (or, strictly, its embedded | 308 | the kobjects belonging to a kset have that kset (or, strictly, its embedded |
309 | kobject) in their parent. | 309 | kobject) in their parent. |
310 | 310 | ||
311 | As a kset contains a kobject within it, it should always be dynamically | 311 | As a kset contains a kobject within it, it should always be dynamically |
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt index 01c6c3d8a7e3..64b3f146e4b0 100644 --- a/Documentation/laptops/thinkpad-acpi.txt +++ b/Documentation/laptops/thinkpad-acpi.txt | |||
@@ -503,7 +503,7 @@ generate input device EV_KEY events. | |||
503 | In addition to the EV_KEY events, thinkpad-acpi may also issue EV_SW | 503 | In addition to the EV_KEY events, thinkpad-acpi may also issue EV_SW |
504 | events for switches: | 504 | events for switches: |
505 | 505 | ||
506 | SW_RADIO T60 and later hardare rfkill rocker switch | 506 | SW_RFKILL_ALL T60 and later hardare rfkill rocker switch |
507 | SW_TABLET_MODE Tablet ThinkPads HKEY events 0x5009 and 0x500A | 507 | SW_TABLET_MODE Tablet ThinkPads HKEY events 0x5009 and 0x500A |
508 | 508 | ||
509 | Non hot-key ACPI HKEY event map: | 509 | Non hot-key ACPI HKEY event map: |
diff --git a/MAINTAINERS b/MAINTAINERS index cb71eb47c331..9d4304266043 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -228,21 +228,21 @@ ACPI BATTERY DRIVERS | |||
228 | P: Alexey Starikovskiy | 228 | P: Alexey Starikovskiy |
229 | M: astarikovskiy@suse.de | 229 | M: astarikovskiy@suse.de |
230 | L: linux-acpi@vger.kernel.org | 230 | L: linux-acpi@vger.kernel.org |
231 | W: http://acpi.sourceforge.net/ | 231 | W: http://www.lesswatts.org/projects/acpi/ |
232 | S: Supported | 232 | S: Supported |
233 | 233 | ||
234 | ACPI EC DRIVER | 234 | ACPI EC DRIVER |
235 | P: Alexey Starikovskiy | 235 | P: Alexey Starikovskiy |
236 | M: astarikovskiy@suse.de | 236 | M: astarikovskiy@suse.de |
237 | L: linux-acpi@vger.kernel.org | 237 | L: linux-acpi@vger.kernel.org |
238 | W: http://acpi.sourceforge.net/ | 238 | W: http://www.lesswatts.org/projects/acpi/ |
239 | S: Supported | 239 | S: Supported |
240 | 240 | ||
241 | ACPI FAN DRIVER | 241 | ACPI FAN DRIVER |
242 | P: Len Brown | 242 | P: Len Brown |
243 | M: len.brown@intel.com | 243 | M: len.brown@intel.com |
244 | L: linux-acpi@vger.kernel.org | 244 | L: linux-acpi@vger.kernel.org |
245 | W: http://acpi.sourceforge.net/ | 245 | W: http://www.lesswatts.org/projects/acpi/ |
246 | S: Supported | 246 | S: Supported |
247 | 247 | ||
248 | ACPI PCI HOTPLUG DRIVER | 248 | ACPI PCI HOTPLUG DRIVER |
@@ -255,14 +255,14 @@ ACPI THERMAL DRIVER | |||
255 | P: Len Brown | 255 | P: Len Brown |
256 | M: len.brown@intel.com | 256 | M: len.brown@intel.com |
257 | L: linux-acpi@vger.kernel.org | 257 | L: linux-acpi@vger.kernel.org |
258 | W: http://acpi.sourceforge.net/ | 258 | W: http://www.lesswatts.org/projects/acpi/ |
259 | S: Supported | 259 | S: Supported |
260 | 260 | ||
261 | ACPI VIDEO DRIVER | 261 | ACPI VIDEO DRIVER |
262 | P: Rui Zhang | 262 | P: Rui Zhang |
263 | M: rui.zhang@intel.com | 263 | M: rui.zhang@intel.com |
264 | L: linux-acpi@vger.kernel.org | 264 | L: linux-acpi@vger.kernel.org |
265 | W: http://acpi.sourceforge.net/ | 265 | W: http://www.lesswatts.org/projects/acpi/ |
266 | S: Supported | 266 | S: Supported |
267 | 267 | ||
268 | ACPI WMI DRIVER | 268 | ACPI WMI DRIVER |
@@ -3658,13 +3658,6 @@ M: romieu@fr.zoreil.com | |||
3658 | L: netdev@vger.kernel.org | 3658 | L: netdev@vger.kernel.org |
3659 | S: Maintained | 3659 | S: Maintained |
3660 | 3660 | ||
3661 | SIS 5513 IDE CONTROLLER DRIVER | ||
3662 | P: Lionel Bouton | ||
3663 | M: Lionel.Bouton@inet6.fr | ||
3664 | W: http://inet6.dyn.dhs.org/sponsoring/sis5513/index.html | ||
3665 | W: http://gyver.homeip.net/sis5513/index.html | ||
3666 | S: Maintained | ||
3667 | |||
3668 | SIS 900/7016 FAST ETHERNET DRIVER | 3661 | SIS 900/7016 FAST ETHERNET DRIVER |
3669 | P: Daniele Venzano | 3662 | P: Daniele Venzano |
3670 | M: venza@brownhat.org | 3663 | M: venza@brownhat.org |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 853d1f11be00..43687cc60dfb 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | |||
465 | printk(KERN_ERR | 465 | printk(KERN_ERR |
466 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | 466 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", |
467 | len, slit->header.length); | 467 | len, slit->header.length); |
468 | memset(numa_slit, 10, sizeof(numa_slit)); | ||
469 | return; | 468 | return; |
470 | } | 469 | } |
471 | slit_table = slit; | 470 | slit_table = slit; |
@@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void) | |||
574 | printk(KERN_INFO "Number of memory chunks in system = %d\n", | 573 | printk(KERN_INFO "Number of memory chunks in system = %d\n", |
575 | num_node_memblks); | 574 | num_node_memblks); |
576 | 575 | ||
577 | if (!slit_table) | 576 | if (!slit_table) { |
577 | for (i = 0; i < MAX_NUMNODES; i++) | ||
578 | for (j = 0; j < MAX_NUMNODES; j++) | ||
579 | node_distance(i, j) = i == j ? LOCAL_DISTANCE : | ||
580 | REMOTE_DISTANCE; | ||
578 | return; | 581 | return; |
582 | } | ||
583 | |||
579 | memset(numa_slit, -1, sizeof(numa_slit)); | 584 | memset(numa_slit, -1, sizeof(numa_slit)); |
580 | for (i = 0; i < slit_table->locality_count; i++) { | 585 | for (i = 0; i < slit_table->locality_count; i++) { |
581 | if (!pxm_bit_test(i)) | 586 | if (!pxm_bit_test(i)) |
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 351bf70da463..7f1a858bc69f 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c | |||
@@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, | |||
159 | 159 | ||
160 | if (p->u.ioreq.state == STATE_IORESP_READY) { | 160 | if (p->u.ioreq.state == STATE_IORESP_READY) { |
161 | if (dir == IOREQ_READ) | 161 | if (dir == IOREQ_READ) |
162 | *dest = p->u.ioreq.data; | 162 | /* it's necessary to ensure zero extending */ |
163 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); | ||
163 | } else | 164 | } else |
164 | panic_vm(vcpu); | 165 | panic_vm(vcpu); |
165 | out: | 166 | out: |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index f5d7a5eab96e..75dff7cfa814 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -116,8 +116,6 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | |||
116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; | 116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; |
117 | struct page *page = vcpu->arch.shadow_pages[index]; | 117 | struct page *page = vcpu->arch.shadow_pages[index]; |
118 | 118 | ||
119 | kunmap(vcpu->arch.shadow_pages[index]); | ||
120 | |||
121 | if (get_tlb_v(stlbe)) { | 119 | if (get_tlb_v(stlbe)) { |
122 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | 120 | if (kvmppc_44x_tlbe_is_writable(stlbe)) |
123 | kvm_release_page_dirty(page); | 121 | kvm_release_page_dirty(page); |
@@ -144,18 +142,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
144 | stlbe = &vcpu->arch.shadow_tlb[victim]; | 142 | stlbe = &vcpu->arch.shadow_tlb[victim]; |
145 | 143 | ||
146 | /* Get reference to new page. */ | 144 | /* Get reference to new page. */ |
147 | down_write(¤t->mm->mmap_sem); | 145 | down_read(¤t->mm->mmap_sem); |
148 | new_page = gfn_to_page(vcpu->kvm, gfn); | 146 | new_page = gfn_to_page(vcpu->kvm, gfn); |
149 | if (is_error_page(new_page)) { | 147 | if (is_error_page(new_page)) { |
150 | printk(KERN_ERR "Couldn't get guest page!\n"); | 148 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
151 | kvm_release_page_clean(new_page); | 149 | kvm_release_page_clean(new_page); |
150 | up_read(¤t->mm->mmap_sem); | ||
152 | return; | 151 | return; |
153 | } | 152 | } |
154 | hpaddr = page_to_phys(new_page); | 153 | hpaddr = page_to_phys(new_page); |
155 | 154 | ||
156 | /* Drop reference to old page. */ | 155 | /* Drop reference to old page. */ |
157 | kvmppc_44x_shadow_release(vcpu, victim); | 156 | kvmppc_44x_shadow_release(vcpu, victim); |
158 | up_write(¤t->mm->mmap_sem); | 157 | up_read(¤t->mm->mmap_sem); |
159 | 158 | ||
160 | vcpu->arch.shadow_pages[victim] = new_page; | 159 | vcpu->arch.shadow_pages[victim] = new_page; |
161 | 160 | ||
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c index 712d89a28c46..9c8ad850c6e3 100644 --- a/arch/powerpc/kvm/booke_guest.c +++ b/arch/powerpc/kvm/booke_guest.c | |||
@@ -227,39 +227,6 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
227 | } | 227 | } |
228 | } | 228 | } |
229 | 229 | ||
230 | static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | enum emulation_result er; | ||
233 | int r; | ||
234 | |||
235 | er = kvmppc_emulate_instruction(run, vcpu); | ||
236 | switch (er) { | ||
237 | case EMULATE_DONE: | ||
238 | /* Future optimization: only reload non-volatiles if they were | ||
239 | * actually modified. */ | ||
240 | r = RESUME_GUEST_NV; | ||
241 | break; | ||
242 | case EMULATE_DO_MMIO: | ||
243 | run->exit_reason = KVM_EXIT_MMIO; | ||
244 | /* We must reload nonvolatiles because "update" load/store | ||
245 | * instructions modify register state. */ | ||
246 | /* Future optimization: only reload non-volatiles if they were | ||
247 | * actually modified. */ | ||
248 | r = RESUME_HOST_NV; | ||
249 | break; | ||
250 | case EMULATE_FAIL: | ||
251 | /* XXX Deliver Program interrupt to guest. */ | ||
252 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | ||
253 | vcpu->arch.last_inst); | ||
254 | r = RESUME_HOST; | ||
255 | break; | ||
256 | default: | ||
257 | BUG(); | ||
258 | } | ||
259 | |||
260 | return r; | ||
261 | } | ||
262 | |||
263 | /** | 230 | /** |
264 | * kvmppc_handle_exit | 231 | * kvmppc_handle_exit |
265 | * | 232 | * |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index a03fe0c80698..000097461283 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -246,6 +246,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
246 | case 31: | 246 | case 31: |
247 | switch (get_xop(inst)) { | 247 | switch (get_xop(inst)) { |
248 | 248 | ||
249 | case 23: /* lwzx */ | ||
250 | rt = get_rt(inst); | ||
251 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
252 | break; | ||
253 | |||
249 | case 83: /* mfmsr */ | 254 | case 83: /* mfmsr */ |
250 | rt = get_rt(inst); | 255 | rt = get_rt(inst); |
251 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | 256 | vcpu->arch.gpr[rt] = vcpu->arch.msr; |
@@ -267,6 +272,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
267 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | 272 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); |
268 | break; | 273 | break; |
269 | 274 | ||
275 | case 151: /* stwx */ | ||
276 | rs = get_rs(inst); | ||
277 | emulated = kvmppc_handle_store(run, vcpu, | ||
278 | vcpu->arch.gpr[rs], | ||
279 | 4, 1); | ||
280 | break; | ||
281 | |||
270 | case 163: /* wrteei */ | 282 | case 163: /* wrteei */ |
271 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 283 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) |
272 | | (inst & MSR_EE); | 284 | | (inst & MSR_EE); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 93acb3c1859d..107e492cb47e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -304,6 +304,7 @@ config ARCH_SPARSEMEM_ENABLE | |||
304 | def_bool y | 304 | def_bool y |
305 | select SPARSEMEM_VMEMMAP_ENABLE | 305 | select SPARSEMEM_VMEMMAP_ENABLE |
306 | select SPARSEMEM_VMEMMAP | 306 | select SPARSEMEM_VMEMMAP |
307 | select SPARSEMEM_STATIC if !64BIT | ||
307 | 308 | ||
308 | config ARCH_SPARSEMEM_DEFAULT | 309 | config ARCH_SPARSEMEM_DEFAULT |
309 | def_bool y | 310 | def_bool y |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 42b1d12ebb10..5d4fa4b1c74c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -711,7 +711,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
711 | memset(sf, 0, sizeof(struct stack_frame)); | 711 | memset(sf, 0, sizeof(struct stack_frame)); |
712 | sf->gprs[9] = (unsigned long) sf; | 712 | sf->gprs[9] = (unsigned long) sf; |
713 | cpu_lowcore->save_area[15] = (unsigned long) sf; | 713 | cpu_lowcore->save_area[15] = (unsigned long) sf; |
714 | __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); | 714 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); |
715 | asm volatile( | 715 | asm volatile( |
716 | " stam 0,15,0(%0)" | 716 | " stam 0,15,0(%0)" |
717 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 717 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index f639a152869f..a0775e1f08df 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) | |||
20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); | 20 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); |
21 | vcpu->stat.diagnose_44++; | 21 | vcpu->stat.diagnose_44++; |
22 | vcpu_put(vcpu); | 22 | vcpu_put(vcpu); |
23 | schedule(); | 23 | yield(); |
24 | vcpu_load(vcpu); | 24 | vcpu_load(vcpu); |
25 | return 0; | 25 | return 0; |
26 | } | 26 | } |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index fcd1ed8015c1..84a7fed4cd4e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
339 | if (kvm_cpu_has_interrupt(vcpu)) | 339 | if (kvm_cpu_has_interrupt(vcpu)) |
340 | return 0; | 340 | return 0; |
341 | 341 | ||
342 | __set_cpu_idle(vcpu); | ||
343 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
344 | vcpu->arch.local_int.timer_due = 0; | ||
345 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
346 | |||
342 | if (psw_interrupts_disabled(vcpu)) { | 347 | if (psw_interrupts_disabled(vcpu)) { |
343 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | 348 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
344 | __unset_cpu_idle(vcpu); | 349 | __unset_cpu_idle(vcpu); |
@@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
366 | no_timer: | 371 | no_timer: |
367 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | 372 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); |
368 | spin_lock_bh(&vcpu->arch.local_int.lock); | 373 | spin_lock_bh(&vcpu->arch.local_int.lock); |
369 | __set_cpu_idle(vcpu); | ||
370 | vcpu->arch.local_int.timer_due = 0; | ||
371 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); | 374 | add_wait_queue(&vcpu->arch.local_int.wq, &wait); |
372 | while (list_empty(&vcpu->arch.local_int.list) && | 375 | while (list_empty(&vcpu->arch.local_int.list) && |
373 | list_empty(&vcpu->arch.local_int.float_int->list) && | 376 | list_empty(&vcpu->arch.local_int.float_int->list) && |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0ac36a649eba..6558b09ff579 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
423 | return -EINVAL; /* not implemented yet */ | 423 | return -EINVAL; /* not implemented yet */ |
424 | } | 424 | } |
425 | 425 | ||
426 | extern void s390_handle_mcck(void); | ||
427 | |||
426 | static void __vcpu_run(struct kvm_vcpu *vcpu) | 428 | static void __vcpu_run(struct kvm_vcpu *vcpu) |
427 | { | 429 | { |
428 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); | 430 | memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); |
@@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) | |||
430 | if (need_resched()) | 432 | if (need_resched()) |
431 | schedule(); | 433 | schedule(); |
432 | 434 | ||
435 | if (test_thread_flag(TIF_MCCK_PENDING)) | ||
436 | s390_handle_mcck(); | ||
437 | |||
438 | kvm_s390_deliver_pending_interrupts(vcpu); | ||
439 | |||
433 | vcpu->arch.sie_block->icptcode = 0; | 440 | vcpu->arch.sie_block->icptcode = 0; |
434 | local_irq_disable(); | 441 | local_irq_disable(); |
435 | kvm_guest_enter(); | 442 | kvm_guest_enter(); |
436 | local_irq_enable(); | 443 | local_irq_enable(); |
437 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | 444 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", |
438 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 445 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
439 | sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); | 446 | if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { |
447 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | ||
448 | kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
449 | } | ||
440 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | 450 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", |
441 | vcpu->arch.sie_block->icptcode); | 451 | vcpu->arch.sie_block->icptcode); |
442 | local_irq_disable(); | 452 | local_irq_disable(); |
@@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
475 | might_sleep(); | 485 | might_sleep(); |
476 | 486 | ||
477 | do { | 487 | do { |
478 | kvm_s390_deliver_pending_interrupts(vcpu); | ||
479 | __vcpu_run(vcpu); | 488 | __vcpu_run(vcpu); |
480 | rc = kvm_handle_sie_intercept(vcpu); | 489 | rc = kvm_handle_sie_intercept(vcpu); |
481 | } while (!signal_pending(current) && !rc); | 490 | } while (!signal_pending(current) && !rc); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5c1aea97cd12..3d98ba82ea67 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
254 | int s390_enable_sie(void) | 254 | int s390_enable_sie(void) |
255 | { | 255 | { |
256 | struct task_struct *tsk = current; | 256 | struct task_struct *tsk = current; |
257 | struct mm_struct *mm; | 257 | struct mm_struct *mm, *old_mm; |
258 | int rc; | ||
259 | 258 | ||
260 | task_lock(tsk); | 259 | /* Do we have pgstes? if yes, we are done */ |
261 | |||
262 | rc = 0; | ||
263 | if (tsk->mm->context.pgstes) | 260 | if (tsk->mm->context.pgstes) |
264 | goto unlock; | 261 | return 0; |
265 | 262 | ||
266 | rc = -EINVAL; | 263 | /* lets check if we are allowed to replace the mm */ |
264 | task_lock(tsk); | ||
267 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 265 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
268 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) | 266 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { |
269 | goto unlock; | 267 | task_unlock(tsk); |
268 | return -EINVAL; | ||
269 | } | ||
270 | task_unlock(tsk); | ||
270 | 271 | ||
271 | tsk->mm->context.pgstes = 1; /* dirty little tricks .. */ | 272 | /* we copy the mm with pgstes enabled */ |
273 | tsk->mm->context.pgstes = 1; | ||
272 | mm = dup_mm(tsk); | 274 | mm = dup_mm(tsk); |
273 | tsk->mm->context.pgstes = 0; | 275 | tsk->mm->context.pgstes = 0; |
274 | |||
275 | rc = -ENOMEM; | ||
276 | if (!mm) | 276 | if (!mm) |
277 | goto unlock; | 277 | return -ENOMEM; |
278 | mmput(tsk->mm); | 278 | |
279 | /* Now lets check again if somebody attached ptrace etc */ | ||
280 | task_lock(tsk); | ||
281 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | ||
282 | tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { | ||
283 | mmput(mm); | ||
284 | task_unlock(tsk); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | |||
288 | /* ok, we are alone. No ptrace, no threads, etc. */ | ||
289 | old_mm = tsk->mm; | ||
279 | tsk->mm = tsk->active_mm = mm; | 290 | tsk->mm = tsk->active_mm = mm; |
280 | preempt_disable(); | 291 | preempt_disable(); |
281 | update_mm(mm, tsk); | 292 | update_mm(mm, tsk); |
282 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 293 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); |
283 | preempt_enable(); | 294 | preempt_enable(); |
284 | rc = 0; | ||
285 | unlock: | ||
286 | task_unlock(tsk); | 295 | task_unlock(tsk); |
287 | return rc; | 296 | mmput(old_mm); |
297 | return 0; | ||
288 | } | 298 | } |
289 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 299 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index f591188fa2c0..e4868bfc672f 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -236,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg) | |||
236 | { | 236 | { |
237 | struct memory_segment *tmp; | 237 | struct memory_segment *tmp; |
238 | 238 | ||
239 | if (seg->start + seg->size >= VMEM_MAX_PHYS || | 239 | if (seg->start + seg->size > VMEM_MAX_PHYS || |
240 | seg->start + seg->size < seg->start) | 240 | seg->start + seg->size < seg->start) |
241 | return -ERANGE; | 241 | return -ERANGE; |
242 | 242 | ||
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 7c077a9d9777..f2f5d260874e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -200,7 +200,6 @@ int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
200 | 200 | ||
201 | atomic_inc(&pt->pending); | 201 | atomic_inc(&pt->pending); |
202 | smp_mb__after_atomic_inc(); | 202 | smp_mb__after_atomic_inc(); |
203 | /* FIXME: handle case where the guest is in guest mode */ | ||
204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { | 203 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { |
205 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 204 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
206 | wake_up_interruptible(&vcpu0->wq); | 205 | wake_up_interruptible(&vcpu0->wq); |
@@ -237,6 +236,19 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) | |||
237 | return HRTIMER_NORESTART; | 236 | return HRTIMER_NORESTART; |
238 | } | 237 | } |
239 | 238 | ||
239 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) | ||
240 | { | ||
241 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | ||
242 | struct hrtimer *timer; | ||
243 | |||
244 | if (vcpu->vcpu_id != 0 || !pit) | ||
245 | return; | ||
246 | |||
247 | timer = &pit->pit_state.pit_timer.timer; | ||
248 | if (hrtimer_cancel(timer)) | ||
249 | hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); | ||
250 | } | ||
251 | |||
240 | static void destroy_pit_timer(struct kvm_kpit_timer *pt) | 252 | static void destroy_pit_timer(struct kvm_kpit_timer *pt) |
241 | { | 253 | { |
242 | pr_debug("pit: execute del timer!\n"); | 254 | pr_debug("pit: execute del timer!\n"); |
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index ce1f583459b1..76d736b5f664 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c | |||
@@ -94,3 +94,9 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec) | |||
94 | /* TODO: PIT, RTC etc. */ | 94 | /* TODO: PIT, RTC etc. */ |
95 | } | 95 | } |
96 | EXPORT_SYMBOL_GPL(kvm_timer_intr_post); | 96 | EXPORT_SYMBOL_GPL(kvm_timer_intr_post); |
97 | |||
98 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | __kvm_migrate_apic_timer(vcpu); | ||
101 | __kvm_migrate_pit_timer(vcpu); | ||
102 | } | ||
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 1802134b836f..2a15be2275c0 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -84,6 +84,8 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); | |||
84 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); | 84 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); |
85 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); | 85 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); |
86 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); | 86 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); |
87 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); | ||
88 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu); | ||
87 | 89 | ||
88 | int pit_has_pending_timer(struct kvm_vcpu *vcpu); | 90 | int pit_has_pending_timer(struct kvm_vcpu *vcpu); |
89 | int apic_has_pending_timer(struct kvm_vcpu *vcpu); | 91 | int apic_has_pending_timer(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7246b60afb96..ee3f53098f0c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -658,7 +658,7 @@ static int is_empty_shadow_page(u64 *spt) | |||
658 | u64 *end; | 658 | u64 *end; |
659 | 659 | ||
660 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) | 660 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
661 | if (*pos != shadow_trap_nonpresent_pte) { | 661 | if (is_shadow_present_pte(*pos)) { |
662 | printk(KERN_ERR "%s: %p %llx\n", __func__, | 662 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
663 | pos, *pos); | 663 | pos, *pos); |
664 | return 0; | 664 | return 0; |
@@ -1858,6 +1858,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1858 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, | 1858 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, |
1859 | struct kvm_mmu_page, link); | 1859 | struct kvm_mmu_page, link); |
1860 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1860 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1861 | cond_resched(); | ||
1861 | } | 1862 | } |
1862 | free_page((unsigned long)vcpu->arch.mmu.pae_root); | 1863 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
1863 | } | 1864 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 156fe10288ae..934c7b619396 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -418,7 +418,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
418 | 418 | ||
419 | /* mmio */ | 419 | /* mmio */ |
420 | if (is_error_pfn(pfn)) { | 420 | if (is_error_pfn(pfn)) { |
421 | pgprintk("gfn %x is mmio\n", walker.gfn); | 421 | pgprintk("gfn %lx is mmio\n", walker.gfn); |
422 | kvm_release_pfn_clean(pfn); | 422 | kvm_release_pfn_clean(pfn); |
423 | return 1; | 423 | return 1; |
424 | } | 424 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ab22615eee89..6b0d5fa5bab3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -688,7 +688,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
688 | delta = vcpu->arch.host_tsc - tsc_this; | 688 | delta = vcpu->arch.host_tsc - tsc_this; |
689 | svm->vmcb->control.tsc_offset += delta; | 689 | svm->vmcb->control.tsc_offset += delta; |
690 | vcpu->cpu = cpu; | 690 | vcpu->cpu = cpu; |
691 | kvm_migrate_apic_timer(vcpu); | 691 | kvm_migrate_timers(vcpu); |
692 | } | 692 | } |
693 | 693 | ||
694 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 694 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bfe4db11989c..02efbe75f317 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -608,7 +608,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
608 | 608 | ||
609 | if (vcpu->cpu != cpu) { | 609 | if (vcpu->cpu != cpu) { |
610 | vcpu_clear(vmx); | 610 | vcpu_clear(vmx); |
611 | kvm_migrate_apic_timer(vcpu); | 611 | kvm_migrate_timers(vcpu); |
612 | vpid_sync_vcpu_all(vmx); | 612 | vpid_sync_vcpu_all(vmx); |
613 | } | 613 | } |
614 | 614 | ||
@@ -1036,6 +1036,7 @@ static void hardware_enable(void *garbage) | |||
1036 | static void hardware_disable(void *garbage) | 1036 | static void hardware_disable(void *garbage) |
1037 | { | 1037 | { |
1038 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | 1038 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); |
1039 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
1039 | } | 1040 | } |
1040 | 1041 | ||
1041 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | 1042 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21338bdb28ff..00acf1301a15 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2758,7 +2758,7 @@ again: | |||
2758 | 2758 | ||
2759 | if (vcpu->requests) { | 2759 | if (vcpu->requests) { |
2760 | if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) | 2760 | if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) |
2761 | __kvm_migrate_apic_timer(vcpu); | 2761 | __kvm_migrate_timers(vcpu); |
2762 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, | 2762 | if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, |
2763 | &vcpu->requests)) { | 2763 | &vcpu->requests)) { |
2764 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; | 2764 | kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; |
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 8a96320ab071..932f216d890c 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c | |||
@@ -1727,7 +1727,8 @@ twobyte_insn: | |||
1727 | if (rc) | 1727 | if (rc) |
1728 | goto done; | 1728 | goto done; |
1729 | 1729 | ||
1730 | kvm_emulate_hypercall(ctxt->vcpu); | 1730 | /* Let the processor re-execute the fixed hypercall */ |
1731 | c->eip = ctxt->vcpu->arch.rip; | ||
1731 | /* Disable writeback. */ | 1732 | /* Disable writeback. */ |
1732 | c->dst.type = OP_NONE; | 1733 | c->dst.type = OP_NONE; |
1733 | break; | 1734 | break; |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 3890234e5b26..99649dccad28 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -97,36 +97,9 @@ static __init inline int srat_disabled(void) | |||
97 | return numa_off || acpi_numa < 0; | 97 | return numa_off || acpi_numa < 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | ||
101 | * A lot of BIOS fill in 10 (= no distance) everywhere. This messes | ||
102 | * up the NUMA heuristics which wants the local node to have a smaller | ||
103 | * distance than the others. | ||
104 | * Do some quick checks here and only use the SLIT if it passes. | ||
105 | */ | ||
106 | static __init int slit_valid(struct acpi_table_slit *slit) | ||
107 | { | ||
108 | int i, j; | ||
109 | int d = slit->locality_count; | ||
110 | for (i = 0; i < d; i++) { | ||
111 | for (j = 0; j < d; j++) { | ||
112 | u8 val = slit->entry[d*i + j]; | ||
113 | if (i == j) { | ||
114 | if (val != LOCAL_DISTANCE) | ||
115 | return 0; | ||
116 | } else if (val <= LOCAL_DISTANCE) | ||
117 | return 0; | ||
118 | } | ||
119 | } | ||
120 | return 1; | ||
121 | } | ||
122 | |||
123 | /* Callback for SLIT parsing */ | 100 | /* Callback for SLIT parsing */ |
124 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | 101 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) |
125 | { | 102 | { |
126 | if (!slit_valid(slit)) { | ||
127 | printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); | ||
128 | return; | ||
129 | } | ||
130 | acpi_slit = slit; | 103 | acpi_slit = slit; |
131 | } | 104 | } |
132 | 105 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c39e1a5aa241..52b2e3856980 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/clocksource.h> | 12 | #include <linux/clocksource.h> |
13 | #include <linux/clockchips.h> | 13 | #include <linux/clockchips.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/math64.h> | ||
15 | 16 | ||
16 | #include <asm/xen/hypervisor.h> | 17 | #include <asm/xen/hypervisor.h> |
17 | #include <asm/xen/hypercall.h> | 18 | #include <asm/xen/hypercall.h> |
@@ -150,11 +151,7 @@ static void do_stolen_accounting(void) | |||
150 | if (stolen < 0) | 151 | if (stolen < 0) |
151 | stolen = 0; | 152 | stolen = 0; |
152 | 153 | ||
153 | ticks = 0; | 154 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
154 | while (stolen >= NS_PER_TICK) { | ||
155 | ticks++; | ||
156 | stolen -= NS_PER_TICK; | ||
157 | } | ||
158 | __get_cpu_var(residual_stolen) = stolen; | 155 | __get_cpu_var(residual_stolen) = stolen; |
159 | account_steal_time(NULL, ticks); | 156 | account_steal_time(NULL, ticks); |
160 | 157 | ||
@@ -166,11 +163,7 @@ static void do_stolen_accounting(void) | |||
166 | if (blocked < 0) | 163 | if (blocked < 0) |
167 | blocked = 0; | 164 | blocked = 0; |
168 | 165 | ||
169 | ticks = 0; | 166 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
170 | while (blocked >= NS_PER_TICK) { | ||
171 | ticks++; | ||
172 | blocked -= NS_PER_TICK; | ||
173 | } | ||
174 | __get_cpu_var(residual_blocked) = blocked; | 167 | __get_cpu_var(residual_blocked) = blocked; |
175 | account_steal_time(idle_task(smp_processor_id()), ticks); | 168 | account_steal_time(idle_task(smp_processor_id()), ticks); |
176 | } | 169 | } |
diff --git a/block/blktrace.c b/block/blktrace.c index 7ae87cc4a163..8d3a27780260 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -79,16 +79,17 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) | |||
79 | { | 79 | { |
80 | int n; | 80 | int n; |
81 | va_list args; | 81 | va_list args; |
82 | unsigned long flags; | ||
82 | char *buf; | 83 | char *buf; |
83 | 84 | ||
84 | preempt_disable(); | 85 | local_irq_save(flags); |
85 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); | 86 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); |
86 | va_start(args, fmt); | 87 | va_start(args, fmt); |
87 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); | 88 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); |
88 | va_end(args); | 89 | va_end(args); |
89 | 90 | ||
90 | trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); | 91 | trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); |
91 | preempt_enable(); | 92 | local_irq_restore(flags); |
92 | } | 93 | } |
93 | EXPORT_SYMBOL_GPL(__trace_note_message); | 94 | EXPORT_SYMBOL_GPL(__trace_note_message); |
94 | 95 | ||
@@ -158,10 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
158 | /* | 159 | /* |
159 | * A word about the locking here - we disable interrupts to reserve | 160 | * A word about the locking here - we disable interrupts to reserve |
160 | * some space in the relay per-cpu buffer, to prevent an irq | 161 | * some space in the relay per-cpu buffer, to prevent an irq |
161 | * from coming in and stepping on our toes. Once reserved, it's | 162 | * from coming in and stepping on our toes. |
162 | * enough to get preemption disabled to prevent read of this data | ||
163 | * before we are through filling it. get_cpu()/put_cpu() does this | ||
164 | * for us | ||
165 | */ | 163 | */ |
166 | local_irq_save(flags); | 164 | local_irq_save(flags); |
167 | 165 | ||
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c index d2fc94161848..26038c2a2a71 100644 --- a/drivers/acpi/bay.c +++ b/drivers/acpi/bay.c | |||
@@ -301,16 +301,20 @@ static int bay_add(acpi_handle handle, int id) | |||
301 | */ | 301 | */ |
302 | pdev->dev.uevent_suppress = 0; | 302 | pdev->dev.uevent_suppress = 0; |
303 | 303 | ||
304 | if (acpi_bay_add_fs(new_bay)) { | ||
305 | platform_device_unregister(new_bay->pdev); | ||
306 | goto bay_add_err; | ||
307 | } | ||
308 | |||
309 | /* register for events on this device */ | 304 | /* register for events on this device */ |
310 | status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | 305 | status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, |
311 | bay_notify, new_bay); | 306 | bay_notify, new_bay); |
312 | if (ACPI_FAILURE(status)) { | 307 | if (ACPI_FAILURE(status)) { |
313 | printk(KERN_ERR PREFIX "Error installing bay notify handler\n"); | 308 | printk(KERN_INFO PREFIX "Error installing bay notify handler\n"); |
309 | platform_device_unregister(new_bay->pdev); | ||
310 | goto bay_add_err; | ||
311 | } | ||
312 | |||
313 | if (acpi_bay_add_fs(new_bay)) { | ||
314 | acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | ||
315 | bay_notify); | ||
316 | platform_device_unregister(new_bay->pdev); | ||
317 | goto bay_add_err; | ||
314 | } | 318 | } |
315 | 319 | ||
316 | /* if we are on a dock station, we should register for dock | 320 | /* if we are on a dock station, we should register for dock |
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c index c78078315be9..f988a5e7d2b4 100644 --- a/drivers/acpi/dispatcher/dsfield.c +++ b/drivers/acpi/dispatcher/dsfield.c | |||
@@ -450,10 +450,6 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, | |||
450 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 450 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
451 | } | 451 | } |
452 | 452 | ||
453 | if (!arg) { | ||
454 | return_ACPI_STATUS(AE_AML_NO_OPERAND); | ||
455 | } | ||
456 | |||
457 | /* Creating new namespace node(s), should not already exist */ | 453 | /* Creating new namespace node(s), should not already exist */ |
458 | 454 | ||
459 | flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE | | 455 | flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE | |
@@ -467,6 +463,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, | |||
467 | 463 | ||
468 | /* | 464 | /* |
469 | * Walk the list of entries in the field_list | 465 | * Walk the list of entries in the field_list |
466 | * Note: field_list can be of zero length. In this case, Arg will be NULL. | ||
470 | */ | 467 | */ |
471 | while (arg) { | 468 | while (arg) { |
472 | /* | 469 | /* |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index fa44fb96fc34..96c542f7fded 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -834,7 +834,7 @@ static int dock_add(acpi_handle handle) | |||
834 | goto dock_add_err; | 834 | goto dock_add_err; |
835 | } | 835 | } |
836 | 836 | ||
837 | printk(KERN_INFO PREFIX "%s \n", ACPI_DOCK_DRIVER_DESCRIPTION); | 837 | printk(KERN_INFO PREFIX "%s\n", ACPI_DOCK_DRIVER_DESCRIPTION); |
838 | 838 | ||
839 | return 0; | 839 | return 0; |
840 | 840 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0924992187e8..5622aee996b2 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -194,7 +194,7 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) | |||
194 | while (time_before(jiffies, delay)) { | 194 | while (time_before(jiffies, delay)) { |
195 | if (acpi_ec_check_status(ec, event)) | 195 | if (acpi_ec_check_status(ec, event)) |
196 | return 0; | 196 | return 0; |
197 | udelay(ACPI_EC_UDELAY); | 197 | msleep(1); |
198 | } | 198 | } |
199 | } | 199 | } |
200 | pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n", | 200 | pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n", |
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c index 24da921d13e3..39d742190584 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/executer/exconfig.c | |||
@@ -375,9 +375,15 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
375 | goto cleanup; | 375 | goto cleanup; |
376 | } | 376 | } |
377 | 377 | ||
378 | /* | ||
379 | * Add the table to the namespace. | ||
380 | * | ||
381 | * Note: We load the table objects relative to the root of the namespace. | ||
382 | * This appears to go against the ACPI specification, but we do it for | ||
383 | * compatibility with other ACPI implementations. | ||
384 | */ | ||
378 | status = | 385 | status = |
379 | acpi_ex_add_table(table_index, walk_state->scope_info->scope.node, | 386 | acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle); |
380 | &ddb_handle); | ||
381 | if (ACPI_FAILURE(status)) { | 387 | if (ACPI_FAILURE(status)) { |
382 | 388 | ||
383 | /* On error, table_ptr was deallocated above */ | 389 | /* On error, table_ptr was deallocated above */ |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 06f8634fe58b..2808dc60fd67 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -272,6 +272,12 @@ static u32 rtc_handler(void *context) | |||
272 | static inline void rtc_wake_setup(void) | 272 | static inline void rtc_wake_setup(void) |
273 | { | 273 | { |
274 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 274 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); |
275 | /* | ||
276 | * After the RTC handler is installed, the Fixed_RTC event should | ||
277 | * be disabled. Only when the RTC alarm is set will it be enabled. | ||
278 | */ | ||
279 | acpi_clear_event(ACPI_EVENT_RTC); | ||
280 | acpi_disable_event(ACPI_EVENT_RTC, 0); | ||
275 | } | 281 | } |
276 | 282 | ||
277 | static void rtc_wake_on(struct device *dev) | 283 | static void rtc_wake_on(struct device *dev) |
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c index d9937e05ec6a..dba3cfbe8cba 100644 --- a/drivers/acpi/hardware/hwsleep.c +++ b/drivers/acpi/hardware/hwsleep.c | |||
@@ -223,15 +223,17 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) | |||
223 | break; | 223 | break; |
224 | } | 224 | } |
225 | 225 | ||
226 | /* Set the system indicators to show the desired sleep state. */ | 226 | /* |
227 | 227 | * Set the system indicators to show the desired sleep state. | |
228 | * _SST is an optional method (return no error if not found) | ||
229 | */ | ||
228 | status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL); | 230 | status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL); |
229 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 231 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
230 | ACPI_EXCEPTION((AE_INFO, status, | 232 | ACPI_EXCEPTION((AE_INFO, status, |
231 | "While executing method _SST")); | 233 | "While executing method _SST")); |
232 | } | 234 | } |
233 | 235 | ||
234 | return_ACPI_STATUS(status); | 236 | return_ACPI_STATUS(AE_OK); |
235 | } | 237 | } |
236 | 238 | ||
237 | ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) | 239 | ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 5d59cb33b1a5..658e5f3abae0 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -140,19 +140,42 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) | |||
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | /* | ||
144 | * A lot of BIOS fill in 10 (= no distance) everywhere. This messes | ||
145 | * up the NUMA heuristics which wants the local node to have a smaller | ||
146 | * distance than the others. | ||
147 | * Do some quick checks here and only use the SLIT if it passes. | ||
148 | */ | ||
149 | static __init int slit_valid(struct acpi_table_slit *slit) | ||
150 | { | ||
151 | int i, j; | ||
152 | int d = slit->locality_count; | ||
153 | for (i = 0; i < d; i++) { | ||
154 | for (j = 0; j < d; j++) { | ||
155 | u8 val = slit->entry[d*i + j]; | ||
156 | if (i == j) { | ||
157 | if (val != LOCAL_DISTANCE) | ||
158 | return 0; | ||
159 | } else if (val <= LOCAL_DISTANCE) | ||
160 | return 0; | ||
161 | } | ||
162 | } | ||
163 | return 1; | ||
164 | } | ||
165 | |||
143 | static int __init acpi_parse_slit(struct acpi_table_header *table) | 166 | static int __init acpi_parse_slit(struct acpi_table_header *table) |
144 | { | 167 | { |
145 | struct acpi_table_slit *slit; | 168 | struct acpi_table_slit *slit; |
146 | u32 localities; | ||
147 | 169 | ||
148 | if (!table) | 170 | if (!table) |
149 | return -EINVAL; | 171 | return -EINVAL; |
150 | 172 | ||
151 | slit = (struct acpi_table_slit *)table; | 173 | slit = (struct acpi_table_slit *)table; |
152 | 174 | ||
153 | /* downcast just for %llu vs %lu for i386/ia64 */ | 175 | if (!slit_valid(slit)) { |
154 | localities = (u32) slit->locality_count; | 176 | printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); |
155 | 177 | return -EINVAL; | |
178 | } | ||
156 | acpi_numa_slit_init(slit); | 179 | acpi_numa_slit_init(slit); |
157 | 180 | ||
158 | return 0; | 181 | return 0; |
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c index f1e8bf65e24e..e94463778845 100644 --- a/drivers/acpi/parser/psargs.c +++ b/drivers/acpi/parser/psargs.c | |||
@@ -268,7 +268,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, | |||
268 | */ | 268 | */ |
269 | if (ACPI_SUCCESS(status) && | 269 | if (ACPI_SUCCESS(status) && |
270 | possible_method_call && (node->type == ACPI_TYPE_METHOD)) { | 270 | possible_method_call && (node->type == ACPI_TYPE_METHOD)) { |
271 | if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) { | 271 | if (walk_state->opcode == AML_UNLOAD_OP) { |
272 | /* | 272 | /* |
273 | * acpi_ps_get_next_namestring has increased the AML pointer, | 273 | * acpi_ps_get_next_namestring has increased the AML pointer, |
274 | * so we need to restore the saved AML pointer for method call. | 274 | * so we need to restore the saved AML pointer for method call. |
@@ -691,7 +691,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, | |||
691 | 691 | ||
692 | /* To support super_name arg of Unload */ | 692 | /* To support super_name arg of Unload */ |
693 | 693 | ||
694 | if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) { | 694 | if (walk_state->opcode == AML_UNLOAD_OP) { |
695 | status = | 695 | status = |
696 | acpi_ps_get_next_namepath(walk_state, | 696 | acpi_ps_get_next_namepath(walk_state, |
697 | parser_state, arg, | 697 | parser_state, arg, |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 386e5aa48834..9dd0fa93b9e1 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -86,7 +86,6 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); | |||
86 | static void acpi_processor_notify(acpi_handle handle, u32 event, void *data); | 86 | static void acpi_processor_notify(acpi_handle handle, u32 event, void *data); |
87 | static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); | 87 | static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); |
88 | static int acpi_processor_handle_eject(struct acpi_processor *pr); | 88 | static int acpi_processor_handle_eject(struct acpi_processor *pr); |
89 | extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr); | ||
90 | 89 | ||
91 | 90 | ||
92 | static const struct acpi_device_id processor_device_ids[] = { | 91 | static const struct acpi_device_id processor_device_ids[] = { |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2dd2c1f3a01c..556ee1585192 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -1669,6 +1669,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1669 | return -EINVAL; | 1669 | return -EINVAL; |
1670 | } | 1670 | } |
1671 | 1671 | ||
1672 | dev->cpu = pr->id; | ||
1672 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | 1673 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { |
1673 | dev->states[i].name[0] = '\0'; | 1674 | dev->states[i].name[0] = '\0'; |
1674 | dev->states[i].desc[0] = '\0'; | 1675 | dev->states[i].desc[0] = '\0'; |
@@ -1738,7 +1739,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1738 | 1739 | ||
1739 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | 1740 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) |
1740 | { | 1741 | { |
1741 | int ret; | 1742 | int ret = 0; |
1742 | 1743 | ||
1743 | if (boot_option_idle_override) | 1744 | if (boot_option_idle_override) |
1744 | return 0; | 1745 | return 0; |
@@ -1756,8 +1757,10 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1756 | cpuidle_pause_and_lock(); | 1757 | cpuidle_pause_and_lock(); |
1757 | cpuidle_disable_device(&pr->power.dev); | 1758 | cpuidle_disable_device(&pr->power.dev); |
1758 | acpi_processor_get_power_info(pr); | 1759 | acpi_processor_get_power_info(pr); |
1759 | acpi_processor_setup_cpuidle(pr); | 1760 | if (pr->flags.power) { |
1760 | ret = cpuidle_enable_device(&pr->power.dev); | 1761 | acpi_processor_setup_cpuidle(pr); |
1762 | ret = cpuidle_enable_device(&pr->power.dev); | ||
1763 | } | ||
1761 | cpuidle_resume_and_unlock(); | 1764 | cpuidle_resume_and_unlock(); |
1762 | 1765 | ||
1763 | return ret; | 1766 | return ret; |
@@ -1813,7 +1816,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1813 | if (pr->flags.power) { | 1816 | if (pr->flags.power) { |
1814 | #ifdef CONFIG_CPU_IDLE | 1817 | #ifdef CONFIG_CPU_IDLE |
1815 | acpi_processor_setup_cpuidle(pr); | 1818 | acpi_processor_setup_cpuidle(pr); |
1816 | pr->power.dev.cpu = pr->id; | ||
1817 | if (cpuidle_register_device(&pr->power.dev)) | 1819 | if (cpuidle_register_device(&pr->power.dev)) |
1818 | return -EIO; | 1820 | return -EIO; |
1819 | #endif | 1821 | #endif |
@@ -1850,8 +1852,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1850 | return 0; | 1852 | return 0; |
1851 | 1853 | ||
1852 | #ifdef CONFIG_CPU_IDLE | 1854 | #ifdef CONFIG_CPU_IDLE |
1853 | if (pr->flags.power) | 1855 | cpuidle_unregister_device(&pr->power.dev); |
1854 | cpuidle_unregister_device(&pr->power.dev); | ||
1855 | #endif | 1856 | #endif |
1856 | pr->flags.power_setup_done = 0; | 1857 | pr->flags.power_setup_done = 0; |
1857 | 1858 | ||
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 8a5fe8710513..224c57c03381 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -495,6 +495,12 @@ static int __init acpi_sleep_proc_init(void) | |||
495 | acpi_root_dir, &acpi_system_alarm_fops); | 495 | acpi_root_dir, &acpi_system_alarm_fops); |
496 | 496 | ||
497 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 497 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); |
498 | /* | ||
499 | * Disable the RTC event after installing RTC handler. | ||
500 | * Only when RTC alarm is set will it be enabled. | ||
501 | */ | ||
502 | acpi_clear_event(ACPI_EVENT_RTC); | ||
503 | acpi_disable_event(ACPI_EVENT_RTC, 0); | ||
498 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | 504 | #endif /* HAVE_ACPI_LEGACY_ALARM */ |
499 | 505 | ||
500 | /* 'wakeup device' [R/W] */ | 506 | /* 'wakeup device' [R/W] */ |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 769f24855eb6..5bd2dec9a7ac 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -77,7 +77,6 @@ static ssize_t acpi_table_show(struct kobject *kobj, | |||
77 | container_of(bin_attr, struct acpi_table_attr, attr); | 77 | container_of(bin_attr, struct acpi_table_attr, attr); |
78 | struct acpi_table_header *table_header = NULL; | 78 | struct acpi_table_header *table_header = NULL; |
79 | acpi_status status; | 79 | acpi_status status; |
80 | ssize_t ret_count = count; | ||
81 | 80 | ||
82 | status = | 81 | status = |
83 | acpi_get_table(table_attr->name, table_attr->instance, | 82 | acpi_get_table(table_attr->name, table_attr->instance, |
@@ -85,18 +84,8 @@ static ssize_t acpi_table_show(struct kobject *kobj, | |||
85 | if (ACPI_FAILURE(status)) | 84 | if (ACPI_FAILURE(status)) |
86 | return -ENODEV; | 85 | return -ENODEV; |
87 | 86 | ||
88 | if (offset >= table_header->length) { | 87 | return memory_read_from_buffer(buf, count, &offset, |
89 | ret_count = 0; | 88 | table_header, table_header->length); |
90 | goto end; | ||
91 | } | ||
92 | |||
93 | if (offset + ret_count > table_header->length) | ||
94 | ret_count = table_header->length - offset; | ||
95 | |||
96 | memcpy(buf, ((char *)table_header) + offset, ret_count); | ||
97 | |||
98 | end: | ||
99 | return ret_count; | ||
100 | } | 89 | } |
101 | 90 | ||
102 | static void acpi_table_attr_init(struct acpi_table_attr *table_attr, | 91 | static void acpi_table_attr_init(struct acpi_table_attr *table_attr, |
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c index 402f93e1ff20..5336ce88f89f 100644 --- a/drivers/acpi/tables/tbinstal.c +++ b/drivers/acpi/tables/tbinstal.c | |||
@@ -123,24 +123,13 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, | |||
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
126 | /* The table must be either an SSDT or a PSDT or an OEMx */ | 126 | /* |
127 | 127 | * Originally, we checked the table signature for "SSDT" or "PSDT" here. | |
128 | if (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT)&& | 128 | * Next, we added support for OEMx tables, signature "OEM". |
129 | !ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)&& | 129 | * Valid tables were encountered with a null signature, so we've just |
130 | strncmp(table_desc->pointer->signature, "OEM", 3)) { | 130 | * given up on validating the signature, since it seems to be a waste |
131 | /* Check for a printable name */ | 131 | * of code. The original code was removed (05/2008). |
132 | if (acpi_ut_valid_acpi_name( | 132 | */ |
133 | *(u32 *) table_desc->pointer->signature)) { | ||
134 | ACPI_ERROR((AE_INFO, "Table has invalid signature " | ||
135 | "[%4.4s], must be SSDT or PSDT", | ||
136 | table_desc->pointer->signature)); | ||
137 | } else { | ||
138 | ACPI_ERROR((AE_INFO, "Table has invalid signature " | ||
139 | "(0x%8.8X), must be SSDT or PSDT", | ||
140 | *(u32 *) table_desc->pointer->signature)); | ||
141 | } | ||
142 | return_ACPI_STATUS(AE_BAD_SIGNATURE); | ||
143 | } | ||
144 | 133 | ||
145 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | 134 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); |
146 | 135 | ||
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c index fb57b93c2495..0e319604d3e7 100644 --- a/drivers/acpi/tables/tbxface.c +++ b/drivers/acpi/tables/tbxface.c | |||
@@ -540,7 +540,7 @@ static acpi_status acpi_tb_load_namespace(void) | |||
540 | acpi_tb_print_table_header(0, table); | 540 | acpi_tb_print_table_header(0, table); |
541 | 541 | ||
542 | if (no_auto_ssdt == 0) { | 542 | if (no_auto_ssdt == 0) { |
543 | printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\""); | 543 | printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\"\n"); |
544 | } | 544 | } |
545 | } | 545 | } |
546 | 546 | ||
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 504385b1f211..84c795fb9b1e 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -364,10 +364,17 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) | |||
364 | if (flag & ACPI_TRIPS_CRITICAL) { | 364 | if (flag & ACPI_TRIPS_CRITICAL) { |
365 | status = acpi_evaluate_integer(tz->device->handle, | 365 | status = acpi_evaluate_integer(tz->device->handle, |
366 | "_CRT", NULL, &tz->trips.critical.temperature); | 366 | "_CRT", NULL, &tz->trips.critical.temperature); |
367 | if (ACPI_FAILURE(status)) { | 367 | /* |
368 | * Treat freezing temperatures as invalid as well; some | ||
369 | * BIOSes return really low values and cause reboots at startup. | ||
370 | * Below zero (Celcius) values clearly aren't right for sure.. | ||
371 | * ... so lets discard those as invalid. | ||
372 | */ | ||
373 | if (ACPI_FAILURE(status) || | ||
374 | tz->trips.critical.temperature <= 2732) { | ||
368 | tz->trips.critical.flags.valid = 0; | 375 | tz->trips.critical.flags.valid = 0; |
369 | ACPI_EXCEPTION((AE_INFO, status, | 376 | ACPI_EXCEPTION((AE_INFO, status, |
370 | "No critical threshold")); | 377 | "No or invalid critical threshold")); |
371 | return -ENODEV; | 378 | return -ENODEV; |
372 | } else { | 379 | } else { |
373 | tz->trips.critical.flags.valid = 1; | 380 | tz->trips.critical.flags.valid = 1; |
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c index e4ba7192cd15..1f057b71db1a 100644 --- a/drivers/acpi/utilities/utmisc.c +++ b/drivers/acpi/utilities/utmisc.c | |||
@@ -1048,6 +1048,7 @@ acpi_ut_exception(char *module_name, | |||
1048 | va_start(args, format); | 1048 | va_start(args, format); |
1049 | acpi_os_vprintf(format, args); | 1049 | acpi_os_vprintf(format, args); |
1050 | acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); | 1050 | acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); |
1051 | va_end(args); | ||
1051 | } | 1052 | } |
1052 | 1053 | ||
1053 | EXPORT_SYMBOL(acpi_ut_exception); | 1054 | EXPORT_SYMBOL(acpi_ut_exception); |
@@ -1063,7 +1064,6 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...) | |||
1063 | acpi_os_vprintf(format, args); | 1064 | acpi_os_vprintf(format, args); |
1064 | acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); | 1065 | acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); |
1065 | va_end(args); | 1066 | va_end(args); |
1066 | va_end(args); | ||
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | void ACPI_INTERNAL_VAR_XFACE | 1069 | void ACPI_INTERNAL_VAR_XFACE |
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h index d04fefb0841f..e4c9525e60b3 100644 --- a/drivers/atm/eni.h +++ b/drivers/atm/eni.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #include "midway.h" | 18 | #include "midway.h" |
19 | 19 | ||
20 | 20 | ||
21 | #define KERNEL_OFFSET 0xC0000000 /* kernel 0x0 is at phys 0xC0000000 */ | ||
22 | #define DEV_LABEL "eni" | 21 | #define DEV_LABEL "eni" |
23 | 22 | ||
24 | #define UBR_BUFFER (128*1024) /* UBR buffer size */ | 23 | #define UBR_BUFFER (128*1024) /* UBR buffer size */ |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 422cfcad486d..ee0a51a3a41d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -762,6 +762,7 @@ static void device_remove_class_symlinks(struct device *dev) | |||
762 | /** | 762 | /** |
763 | * dev_set_name - set a device name | 763 | * dev_set_name - set a device name |
764 | * @dev: device | 764 | * @dev: device |
765 | * @fmt: format string for the device's name | ||
765 | */ | 766 | */ |
766 | int dev_set_name(struct device *dev, const char *fmt, ...) | 767 | int dev_set_name(struct device *dev, const char *fmt, ...) |
767 | { | 768 | { |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index fc555a90bb21..23554b676d6e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -38,6 +38,8 @@ static void cpuidle_kick_cpus(void) | |||
38 | static void cpuidle_kick_cpus(void) {} | 38 | static void cpuidle_kick_cpus(void) {} |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | static int __cpuidle_register_device(struct cpuidle_device *dev); | ||
42 | |||
41 | /** | 43 | /** |
42 | * cpuidle_idle_call - the main idle loop | 44 | * cpuidle_idle_call - the main idle loop |
43 | * | 45 | * |
@@ -138,6 +140,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
138 | if (!dev->state_count) | 140 | if (!dev->state_count) |
139 | return -EINVAL; | 141 | return -EINVAL; |
140 | 142 | ||
143 | if (dev->registered == 0) { | ||
144 | ret = __cpuidle_register_device(dev); | ||
145 | if (ret) | ||
146 | return ret; | ||
147 | } | ||
148 | |||
141 | if ((ret = cpuidle_add_state_sysfs(dev))) | 149 | if ((ret = cpuidle_add_state_sysfs(dev))) |
142 | return ret; | 150 | return ret; |
143 | 151 | ||
@@ -232,10 +240,13 @@ static void poll_idle_init(struct cpuidle_device *dev) {} | |||
232 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | 240 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ |
233 | 241 | ||
234 | /** | 242 | /** |
235 | * cpuidle_register_device - registers a CPU's idle PM feature | 243 | * __cpuidle_register_device - internal register function called before register |
244 | * and enable routines | ||
236 | * @dev: the cpu | 245 | * @dev: the cpu |
246 | * | ||
247 | * cpuidle_lock mutex must be held before this is called | ||
237 | */ | 248 | */ |
238 | int cpuidle_register_device(struct cpuidle_device *dev) | 249 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
239 | { | 250 | { |
240 | int ret; | 251 | int ret; |
241 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | 252 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); |
@@ -247,18 +258,34 @@ int cpuidle_register_device(struct cpuidle_device *dev) | |||
247 | 258 | ||
248 | init_completion(&dev->kobj_unregister); | 259 | init_completion(&dev->kobj_unregister); |
249 | 260 | ||
250 | mutex_lock(&cpuidle_lock); | ||
251 | |||
252 | poll_idle_init(dev); | 261 | poll_idle_init(dev); |
253 | 262 | ||
254 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 263 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
255 | list_add(&dev->device_list, &cpuidle_detected_devices); | 264 | list_add(&dev->device_list, &cpuidle_detected_devices); |
256 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | 265 | if ((ret = cpuidle_add_sysfs(sys_dev))) { |
257 | mutex_unlock(&cpuidle_lock); | ||
258 | module_put(cpuidle_curr_driver->owner); | 266 | module_put(cpuidle_curr_driver->owner); |
259 | return ret; | 267 | return ret; |
260 | } | 268 | } |
261 | 269 | ||
270 | dev->registered = 1; | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * cpuidle_register_device - registers a CPU's idle PM feature | ||
276 | * @dev: the cpu | ||
277 | */ | ||
278 | int cpuidle_register_device(struct cpuidle_device *dev) | ||
279 | { | ||
280 | int ret; | ||
281 | |||
282 | mutex_lock(&cpuidle_lock); | ||
283 | |||
284 | if ((ret = __cpuidle_register_device(dev))) { | ||
285 | mutex_unlock(&cpuidle_lock); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
262 | cpuidle_enable_device(dev); | 289 | cpuidle_enable_device(dev); |
263 | cpuidle_install_idle_handler(); | 290 | cpuidle_install_idle_handler(); |
264 | 291 | ||
@@ -278,6 +305,9 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) | |||
278 | { | 305 | { |
279 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | 306 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); |
280 | 307 | ||
308 | if (dev->registered == 0) | ||
309 | return; | ||
310 | |||
281 | cpuidle_pause_and_lock(); | 311 | cpuidle_pause_and_lock(); |
282 | 312 | ||
283 | cpuidle_disable_device(dev); | 313 | cpuidle_disable_device(dev); |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index b4f3aefa12b6..1607536ff5fb 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -1028,6 +1028,7 @@ endif | |||
1028 | 1028 | ||
1029 | config BLK_DEV_HD_ONLY | 1029 | config BLK_DEV_HD_ONLY |
1030 | bool "Old hard disk (MFM/RLL/IDE) driver" | 1030 | bool "Old hard disk (MFM/RLL/IDE) driver" |
1031 | depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN | ||
1031 | help | 1032 | help |
1032 | There are two drivers for MFM/RLL/IDE hard disks. Most people use | 1033 | There are two drivers for MFM/RLL/IDE hard disks. Most people use |
1033 | the newer enhanced driver, but this old one is still around for two | 1034 | the newer enhanced driver, but this old one is still around for two |
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c index 713cef20622e..8e8c28104b45 100644 --- a/drivers/ide/arm/bast-ide.c +++ b/drivers/ide/arm/bast-ide.c | |||
@@ -42,6 +42,7 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq) | |||
42 | 42 | ||
43 | hw.io_ports.ctl_addr = aux + (6 * 0x20); | 43 | hw.io_ports.ctl_addr = aux + (6 * 0x20); |
44 | hw.irq = irq; | 44 | hw.irq = irq; |
45 | hw.chipset = ide_generic; | ||
45 | 46 | ||
46 | hwif = ide_find_port(); | 47 | hwif = ide_find_port(); |
47 | if (hwif == NULL) | 48 | if (hwif == NULL) |
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c index 4263ffd4ab20..2f311da4c963 100644 --- a/drivers/ide/arm/ide_arm.c +++ b/drivers/ide/arm/ide_arm.c | |||
@@ -49,6 +49,7 @@ static int __init ide_arm_init(void) | |||
49 | memset(&hw, 0, sizeof(hw)); | 49 | memset(&hw, 0, sizeof(hw)); |
50 | ide_std_init_ports(&hw, base, ctl); | 50 | ide_std_init_ports(&hw, base, ctl); |
51 | hw.irq = IDE_ARM_IRQ; | 51 | hw.irq = IDE_ARM_IRQ; |
52 | hw.chipset = ide_generic; | ||
52 | 53 | ||
53 | hwif = ide_find_port(); | 54 | hwif = ide_find_port(); |
54 | if (hwif) { | 55 | if (hwif) { |
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c index 96378ebfb31f..d024ac8fad14 100644 --- a/drivers/ide/arm/palm_bk3710.c +++ b/drivers/ide/arm/palm_bk3710.c | |||
@@ -409,9 +409,6 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) | |||
409 | 409 | ||
410 | ide_device_add(idx, &palm_bk3710_port_info); | 410 | ide_device_add(idx, &palm_bk3710_port_info); |
411 | 411 | ||
412 | if (!hwif->present) | ||
413 | goto out; | ||
414 | |||
415 | return 0; | 412 | return 0; |
416 | out: | 413 | out: |
417 | printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); | 414 | printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); |
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index a6073e248f45..9134488ac043 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c | |||
@@ -125,6 +125,7 @@ static int __init ide_generic_init(void) | |||
125 | memset(&hw, 0, sizeof(hw)); | 125 | memset(&hw, 0, sizeof(hw)); |
126 | ide_std_init_ports(&hw, io_addr, io_addr + 0x206); | 126 | ide_std_init_ports(&hw, io_addr, io_addr + 0x206); |
127 | hw.irq = ide_default_irq(io_addr); | 127 | hw.irq = ide_default_irq(io_addr); |
128 | hw.chipset = ide_generic; | ||
128 | ide_init_port_hw(hwif, &hw); | 129 | ide_init_port_hw(hwif, &hw); |
129 | 130 | ||
130 | idx[i] = i; | 131 | idx[i] = i; |
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 6a8953f68e9f..adbd01784162 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c | |||
@@ -55,6 +55,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
55 | memset(&hw, 0, sizeof(hw)); | 55 | memset(&hw, 0, sizeof(hw)); |
56 | ide_std_init_ports(&hw, base, ctl); | 56 | ide_std_init_ports(&hw, base, ctl); |
57 | hw.irq = pnp_irq(dev, 0); | 57 | hw.irq = pnp_irq(dev, 0); |
58 | hw.chipset = ide_generic; | ||
58 | 59 | ||
59 | hwif = ide_find_port(); | 60 | hwif = ide_find_port(); |
60 | if (hwif) { | 61 | if (hwif) { |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 655ec7ef568a..380fa0c8cc84 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -1333,8 +1333,7 @@ static void ide_port_init_devices(ide_hwif_t *hwif) | |||
1333 | static void ide_init_port(ide_hwif_t *hwif, unsigned int port, | 1333 | static void ide_init_port(ide_hwif_t *hwif, unsigned int port, |
1334 | const struct ide_port_info *d) | 1334 | const struct ide_port_info *d) |
1335 | { | 1335 | { |
1336 | if (d->chipset != ide_etrax100) | 1336 | hwif->channel = port; |
1337 | hwif->channel = port; | ||
1338 | 1337 | ||
1339 | if (d->chipset) | 1338 | if (d->chipset) |
1340 | hwif->chipset = d->chipset; | 1339 | hwif->chipset = d->chipset; |
@@ -1519,7 +1518,7 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1519 | continue; | 1518 | continue; |
1520 | } | 1519 | } |
1521 | 1520 | ||
1522 | if (d->chipset != ide_etrax100 && (i & 1) && mate) { | 1521 | if ((i & 1) && mate) { |
1523 | hwif->mate = mate; | 1522 | hwif->mate = mate; |
1524 | mate->mate = hwif; | 1523 | mate->mate = hwif; |
1525 | } | 1524 | } |
@@ -1665,6 +1664,7 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no, | |||
1665 | 1664 | ||
1666 | ide_std_init_ports(hw, base, ctl); | 1665 | ide_std_init_ports(hw, base, ctl); |
1667 | hw->irq = irq; | 1666 | hw->irq = irq; |
1667 | hw->chipset = d->chipset; | ||
1668 | 1668 | ||
1669 | hwif = ide_find_port_slot(d); | 1669 | hwif = ide_find_port_slot(d); |
1670 | if (hwif) { | 1670 | if (hwif) { |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 8d6ad812a014..55ec7f798772 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -63,7 +63,6 @@ static int proc_ide_read_imodel | |||
63 | case ide_pmac: name = "mac-io"; break; | 63 | case ide_pmac: name = "mac-io"; break; |
64 | case ide_au1xxx: name = "au1xxx"; break; | 64 | case ide_au1xxx: name = "au1xxx"; break; |
65 | case ide_palm3710: name = "palm3710"; break; | 65 | case ide_palm3710: name = "palm3710"; break; |
66 | case ide_etrax100: name = "etrax100"; break; | ||
67 | case ide_acorn: name = "acorn"; break; | 66 | case ide_acorn: name = "acorn"; break; |
68 | default: name = "(unknown)"; break; | 67 | default: name = "(unknown)"; break; |
69 | } | 68 | } |
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index 5c730e4dd735..9a1d27ef3f8a 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c | |||
@@ -138,6 +138,8 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, | |||
138 | 138 | ||
139 | hw->irq = IRQ_AMIGA_PORTS; | 139 | hw->irq = IRQ_AMIGA_PORTS; |
140 | hw->ack_intr = ack_intr; | 140 | hw->ack_intr = ack_intr; |
141 | |||
142 | hw->chipset = ide_generic; | ||
141 | } | 143 | } |
142 | 144 | ||
143 | /* | 145 | /* |
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index 9e449a0c623f..af11028b4794 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c | |||
@@ -81,6 +81,8 @@ static void __init falconide_setup_ports(hw_regs_t *hw) | |||
81 | 81 | ||
82 | hw->irq = IRQ_MFP_IDE; | 82 | hw->irq = IRQ_MFP_IDE; |
83 | hw->ack_intr = NULL; | 83 | hw->ack_intr = NULL; |
84 | |||
85 | hw->chipset = ide_generic; | ||
84 | } | 86 | } |
85 | 87 | ||
86 | /* | 88 | /* |
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index a9c2593a898c..fed7d812761c 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ide.h> | 16 | #include <linux/ide.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/zorro.h> | 18 | #include <linux/zorro.h> |
19 | #include <linux/module.h> | ||
19 | 20 | ||
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <asm/amigahw.h> | 22 | #include <asm/amigahw.h> |
@@ -62,7 +63,10 @@ | |||
62 | GAYLE_NUM_HWIFS-1) | 63 | GAYLE_NUM_HWIFS-1) |
63 | #define GAYLE_HAS_CONTROL_REG (!ide_doubler) | 64 | #define GAYLE_HAS_CONTROL_REG (!ide_doubler) |
64 | #define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) | 65 | #define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) |
66 | |||
65 | int ide_doubler = 0; /* support IDE doublers? */ | 67 | int ide_doubler = 0; /* support IDE doublers? */ |
68 | EXPORT_SYMBOL_GPL(ide_doubler); | ||
69 | |||
66 | module_param_named(doubler, ide_doubler, bool, 0); | 70 | module_param_named(doubler, ide_doubler, bool, 0); |
67 | MODULE_PARM_DESC(doubler, "enable support for IDE doublers"); | 71 | MODULE_PARM_DESC(doubler, "enable support for IDE doublers"); |
68 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ | 72 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ |
@@ -112,6 +116,8 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, | |||
112 | 116 | ||
113 | hw->irq = IRQ_AMIGA_PORTS; | 117 | hw->irq = IRQ_AMIGA_PORTS; |
114 | hw->ack_intr = ack_intr; | 118 | hw->ack_intr = ack_intr; |
119 | |||
120 | hw->chipset = ide_generic; | ||
115 | } | 121 | } |
116 | 122 | ||
117 | /* | 123 | /* |
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index caa2632dd08e..2e84290d0bcc 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c | |||
@@ -78,6 +78,8 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, | |||
78 | 78 | ||
79 | hw->irq = irq; | 79 | hw->irq = irq; |
80 | hw->ack_intr = ack_intr; | 80 | hw->ack_intr = ack_intr; |
81 | |||
82 | hw->chipset = ide_generic; | ||
81 | } | 83 | } |
82 | 84 | ||
83 | static const char *mac_ide_name[] = | 85 | static const char *mac_ide_name[] = |
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index 6f535d00e638..8ff6e2d20834 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c | |||
@@ -70,6 +70,8 @@ static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base, | |||
70 | 70 | ||
71 | hw->irq = irq; | 71 | hw->irq = irq; |
72 | hw->ack_intr = ack_intr; | 72 | hw->ack_intr = ack_intr; |
73 | |||
74 | hw->chipset = ide_generic; | ||
73 | } | 75 | } |
74 | 76 | ||
75 | static void q40ide_input_data(ide_drive_t *drive, struct request *rq, | 77 | static void q40ide_input_data(ide_drive_t *drive, struct request *rq, |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index aaf38109eaec..b38a1980dcd5 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -747,9 +747,11 @@ static int __init cmd640x_init(void) | |||
747 | 747 | ||
748 | ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); | 748 | ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); |
749 | hw[0].irq = 14; | 749 | hw[0].irq = 14; |
750 | hw[0].chipset = ide_cmd640; | ||
750 | 751 | ||
751 | ide_std_init_ports(&hw[1], 0x170, 0x376); | 752 | ide_std_init_ports(&hw[1], 0x170, 0x376); |
752 | hw[1].irq = 15; | 753 | hw[1].irq = 15; |
754 | hw[1].chipset = ide_cmd640; | ||
753 | 755 | ||
754 | printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" | 756 | printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" |
755 | "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); | 757 | "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); |
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index b9e457996d0e..af0f30051d5a 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c | |||
@@ -47,13 +47,18 @@ static const struct ide_port_ops delkin_cb_port_ops = { | |||
47 | .quirkproc = ide_undecoded_slave, | 47 | .quirkproc = ide_undecoded_slave, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | static const struct ide_port_info delkin_cb_port_info = { | ||
51 | .port_ops = &delkin_cb_port_ops, | ||
52 | .host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_UNMASK_IRQS | | ||
53 | IDE_HFLAG_NO_DMA, | ||
54 | }; | ||
55 | |||
50 | static int __devinit | 56 | static int __devinit |
51 | delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | 57 | delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) |
52 | { | 58 | { |
53 | unsigned long base; | 59 | unsigned long base; |
54 | hw_regs_t hw; | 60 | hw_regs_t hw; |
55 | ide_hwif_t *hwif = NULL; | 61 | ide_hwif_t *hwif = NULL; |
56 | ide_drive_t *drive; | ||
57 | int i, rc; | 62 | int i, rc; |
58 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 63 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; |
59 | 64 | ||
@@ -79,6 +84,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | |||
79 | memset(&hw, 0, sizeof(hw)); | 84 | memset(&hw, 0, sizeof(hw)); |
80 | ide_std_init_ports(&hw, base + 0x10, base + 0x1e); | 85 | ide_std_init_ports(&hw, base + 0x10, base + 0x1e); |
81 | hw.irq = dev->irq; | 86 | hw.irq = dev->irq; |
87 | hw.dev = &dev->dev; | ||
82 | hw.chipset = ide_pci; /* this enables IRQ sharing */ | 88 | hw.chipset = ide_pci; /* this enables IRQ sharing */ |
83 | 89 | ||
84 | hwif = ide_find_port(); | 90 | hwif = ide_find_port(); |
@@ -89,26 +95,16 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | |||
89 | 95 | ||
90 | ide_init_port_data(hwif, i); | 96 | ide_init_port_data(hwif, i); |
91 | ide_init_port_hw(hwif, &hw); | 97 | ide_init_port_hw(hwif, &hw); |
92 | hwif->port_ops = &delkin_cb_port_ops; | ||
93 | 98 | ||
94 | idx[0] = i; | 99 | idx[0] = i; |
95 | 100 | ||
96 | ide_device_add(idx, NULL); | 101 | ide_device_add(idx, &delkin_cb_port_info); |
97 | |||
98 | if (!hwif->present) | ||
99 | goto out_disable; | ||
100 | 102 | ||
101 | pci_set_drvdata(dev, hwif); | 103 | pci_set_drvdata(dev, hwif); |
102 | hwif->dev = &dev->dev; | 104 | |
103 | drive = &hwif->drives[0]; | ||
104 | if (drive->present) { | ||
105 | drive->io_32bit = 1; | ||
106 | drive->unmask = 1; | ||
107 | } | ||
108 | return 0; | 105 | return 0; |
109 | 106 | ||
110 | out_disable: | 107 | out_disable: |
111 | printk(KERN_ERR "delkin_cb: no IDE devices found\n"); | ||
112 | pci_release_regions(dev); | 108 | pci_release_regions(dev); |
113 | pci_disable_device(dev); | 109 | pci_disable_device(dev); |
114 | return -ENODEV; | 110 | return -ENODEV; |
@@ -139,14 +135,12 @@ static struct pci_driver driver = { | |||
139 | .remove = delkin_cb_remove, | 135 | .remove = delkin_cb_remove, |
140 | }; | 136 | }; |
141 | 137 | ||
142 | static int | 138 | static int __init delkin_cb_init(void) |
143 | delkin_cb_init (void) | ||
144 | { | 139 | { |
145 | return pci_register_driver(&driver); | 140 | return pci_register_driver(&driver); |
146 | } | 141 | } |
147 | 142 | ||
148 | static void | 143 | static void __exit delkin_cb_exit(void) |
149 | delkin_cb_exit (void) | ||
150 | { | 144 | { |
151 | pci_unregister_driver(&driver); | 145 | pci_unregister_driver(&driver); |
152 | } | 146 | } |
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 4b0b85d8faf5..e127eb25ab63 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c | |||
@@ -569,6 +569,11 @@ static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_devi | |||
569 | { | 569 | { |
570 | struct ide_port_info d = sis5513_chipset; | 570 | struct ide_port_info d = sis5513_chipset; |
571 | u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; | 571 | u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; |
572 | int rc; | ||
573 | |||
574 | rc = pci_enable_device(dev); | ||
575 | if (rc) | ||
576 | return rc; | ||
572 | 577 | ||
573 | if (sis_find_family(dev) == 0) | 578 | if (sis_find_family(dev) == 0) |
574 | return -ENOTSUPP; | 579 | return -ENOTSUPP; |
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c index f0e638dcc3ab..236f9c38e519 100644 --- a/drivers/ide/ppc/mpc8xx.c +++ b/drivers/ide/ppc/mpc8xx.c | |||
@@ -303,6 +303,8 @@ static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) | |||
303 | pcmp->pcmc_per = 0x100000 >> (16 * _slot_); | 303 | pcmp->pcmc_per = 0x100000 >> (16 * _slot_); |
304 | #endif /* CONFIG_IDE_8xx_PCCARD */ | 304 | #endif /* CONFIG_IDE_8xx_PCCARD */ |
305 | 305 | ||
306 | hw->chipset = ide_generic; | ||
307 | |||
306 | return 0; | 308 | return 0; |
307 | } | 309 | } |
308 | #endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */ | 310 | #endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */ |
@@ -377,6 +379,8 @@ static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) | |||
377 | ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= | 379 | ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= |
378 | (0x80000000 >> ioport_dsc[data_port].irq); | 380 | (0x80000000 >> ioport_dsc[data_port].irq); |
379 | 381 | ||
382 | hw->chipset = ide_generic; | ||
383 | |||
380 | return 0; | 384 | return 0; |
381 | } | 385 | } |
382 | #endif /* CONFIG_IDE_8xx_DIRECT */ | 386 | #endif /* CONFIG_IDE_8xx_DIRECT */ |
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index 5fcbdccd7a53..16a874bb1561 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c | |||
@@ -806,7 +806,6 @@ static int DIVA_INIT_FUNCTION divas_init(void) | |||
806 | 806 | ||
807 | if (!create_divas_proc()) { | 807 | if (!create_divas_proc()) { |
808 | #ifdef MODULE | 808 | #ifdef MODULE |
809 | remove_divas_proc(); | ||
810 | divas_unregister_chrdev(); | 809 | divas_unregister_chrdev(); |
811 | divasfunc_exit(); | 810 | divasfunc_exit(); |
812 | #endif | 811 | #endif |
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c index fae895828a17..040827288ec9 100644 --- a/drivers/isdn/hardware/eicon/divasproc.c +++ b/drivers/isdn/hardware/eicon/divasproc.c | |||
@@ -125,8 +125,8 @@ static const struct file_operations divas_fops = { | |||
125 | 125 | ||
126 | int create_divas_proc(void) | 126 | int create_divas_proc(void) |
127 | { | 127 | { |
128 | proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon, | 128 | divas_proc_entry = proc_create(divas_proc_name, S_IFREG | S_IRUGO, |
129 | &divas_fops); | 129 | proc_net_eicon, &divas_fops); |
130 | if (!divas_proc_entry) | 130 | if (!divas_proc_entry) |
131 | return (0); | 131 | return (0); |
132 | 132 | ||
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 15906d005b05..484299b031f8 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c | |||
@@ -207,30 +207,17 @@ hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t | |||
207 | /* read conf file -> output card info data */ | 207 | /* read conf file -> output card info data */ |
208 | /*******************************************/ | 208 | /*******************************************/ |
209 | static ssize_t | 209 | static ssize_t |
210 | hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t * off) | 210 | hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off) |
211 | { | 211 | { |
212 | char *cp; | 212 | char *cp; |
213 | int i; | ||
214 | 213 | ||
215 | if (file->f_mode & FMODE_READ) { | 214 | if (!(file->f_mode & FMODE_READ)) |
216 | if (!(cp = file->private_data)) | 215 | return -EPERM; /* no permission to read */ |
217 | return (-EFAULT); /* should never happen */ | 216 | |
218 | i = strlen(cp); /* get total string length */ | 217 | if (!(cp = file->private_data)) |
219 | if (*off < i) { | 218 | return -EFAULT; /* should never happen */ |
220 | /* still bytes to transfer */ | 219 | |
221 | cp += *off; /* point to desired data offset */ | 220 | return simple_read_from_buffer(buf, count, off, cp, strlen(cp)); |
222 | i -= *off; /* remaining length */ | ||
223 | if (i > count) | ||
224 | i = count; /* limit length to transfer */ | ||
225 | if (copy_to_user(buf, cp, i)) | ||
226 | return (-EFAULT); /* copy error */ | ||
227 | *off += i; /* adjust offset */ | ||
228 | } else | ||
229 | return (0); | ||
230 | } else | ||
231 | return (-EPERM); /* no permission to read */ | ||
232 | |||
233 | return (i); | ||
234 | } /* hysdn_conf_read */ | 221 | } /* hysdn_conf_read */ |
235 | 222 | ||
236 | /******************/ | 223 | /******************/ |
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c index a0ce0b2fa03e..b5969298f3d3 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/misc/thinkpad_acpi.c | |||
@@ -1293,7 +1293,7 @@ static void tpacpi_input_send_radiosw(void) | |||
1293 | mutex_lock(&tpacpi_inputdev_send_mutex); | 1293 | mutex_lock(&tpacpi_inputdev_send_mutex); |
1294 | 1294 | ||
1295 | input_report_switch(tpacpi_inputdev, | 1295 | input_report_switch(tpacpi_inputdev, |
1296 | SW_RADIO, !!wlsw); | 1296 | SW_RFKILL_ALL, !!wlsw); |
1297 | input_sync(tpacpi_inputdev); | 1297 | input_sync(tpacpi_inputdev); |
1298 | 1298 | ||
1299 | mutex_unlock(&tpacpi_inputdev_send_mutex); | 1299 | mutex_unlock(&tpacpi_inputdev_send_mutex); |
@@ -1921,6 +1921,29 @@ static struct attribute *hotkey_mask_attributes[] __initdata = { | |||
1921 | &dev_attr_hotkey_wakeup_hotunplug_complete.attr, | 1921 | &dev_attr_hotkey_wakeup_hotunplug_complete.attr, |
1922 | }; | 1922 | }; |
1923 | 1923 | ||
1924 | static void hotkey_exit(void) | ||
1925 | { | ||
1926 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | ||
1927 | hotkey_poll_stop_sync(); | ||
1928 | #endif | ||
1929 | |||
1930 | if (hotkey_dev_attributes) | ||
1931 | delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj); | ||
1932 | |||
1933 | kfree(hotkey_keycode_map); | ||
1934 | |||
1935 | if (tp_features.hotkey) { | ||
1936 | dbg_printk(TPACPI_DBG_EXIT, | ||
1937 | "restoring original hot key mask\n"); | ||
1938 | /* no short-circuit boolean operator below! */ | ||
1939 | if ((hotkey_mask_set(hotkey_orig_mask) | | ||
1940 | hotkey_status_set(hotkey_orig_status)) != 0) | ||
1941 | printk(TPACPI_ERR | ||
1942 | "failed to restore hot key mask " | ||
1943 | "to BIOS defaults\n"); | ||
1944 | } | ||
1945 | } | ||
1946 | |||
1924 | static int __init hotkey_init(struct ibm_init_struct *iibm) | 1947 | static int __init hotkey_init(struct ibm_init_struct *iibm) |
1925 | { | 1948 | { |
1926 | /* Requirements for changing the default keymaps: | 1949 | /* Requirements for changing the default keymaps: |
@@ -2060,226 +2083,220 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
2060 | vdbg_printk(TPACPI_DBG_INIT, "hotkeys are %s\n", | 2083 | vdbg_printk(TPACPI_DBG_INIT, "hotkeys are %s\n", |
2061 | str_supported(tp_features.hotkey)); | 2084 | str_supported(tp_features.hotkey)); |
2062 | 2085 | ||
2063 | if (tp_features.hotkey) { | 2086 | if (!tp_features.hotkey) |
2064 | hotkey_dev_attributes = create_attr_set(13, NULL); | 2087 | return 1; |
2065 | if (!hotkey_dev_attributes) | ||
2066 | return -ENOMEM; | ||
2067 | res = add_many_to_attr_set(hotkey_dev_attributes, | ||
2068 | hotkey_attributes, | ||
2069 | ARRAY_SIZE(hotkey_attributes)); | ||
2070 | if (res) | ||
2071 | return res; | ||
2072 | 2088 | ||
2073 | /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, | 2089 | hotkey_dev_attributes = create_attr_set(13, NULL); |
2074 | A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking | 2090 | if (!hotkey_dev_attributes) |
2075 | for HKEY interface version 0x100 */ | 2091 | return -ENOMEM; |
2076 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { | 2092 | res = add_many_to_attr_set(hotkey_dev_attributes, |
2077 | if ((hkeyv >> 8) != 1) { | 2093 | hotkey_attributes, |
2078 | printk(TPACPI_ERR "unknown version of the " | 2094 | ARRAY_SIZE(hotkey_attributes)); |
2079 | "HKEY interface: 0x%x\n", hkeyv); | 2095 | if (res) |
2080 | printk(TPACPI_ERR "please report this to %s\n", | 2096 | goto err_exit; |
2081 | TPACPI_MAIL); | 2097 | |
2082 | } else { | 2098 | /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, |
2083 | /* | 2099 | A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking |
2084 | * MHKV 0x100 in A31, R40, R40e, | 2100 | for HKEY interface version 0x100 */ |
2085 | * T4x, X31, and later | 2101 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { |
2086 | */ | 2102 | if ((hkeyv >> 8) != 1) { |
2087 | tp_features.hotkey_mask = 1; | 2103 | printk(TPACPI_ERR "unknown version of the " |
2088 | } | 2104 | "HKEY interface: 0x%x\n", hkeyv); |
2105 | printk(TPACPI_ERR "please report this to %s\n", | ||
2106 | TPACPI_MAIL); | ||
2107 | } else { | ||
2108 | /* | ||
2109 | * MHKV 0x100 in A31, R40, R40e, | ||
2110 | * T4x, X31, and later | ||
2111 | */ | ||
2112 | tp_features.hotkey_mask = 1; | ||
2089 | } | 2113 | } |
2114 | } | ||
2090 | 2115 | ||
2091 | vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n", | 2116 | vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n", |
2092 | str_supported(tp_features.hotkey_mask)); | 2117 | str_supported(tp_features.hotkey_mask)); |
2093 | 2118 | ||
2094 | if (tp_features.hotkey_mask) { | 2119 | if (tp_features.hotkey_mask) { |
2095 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, | 2120 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, |
2096 | "MHKA", "qd")) { | 2121 | "MHKA", "qd")) { |
2097 | printk(TPACPI_ERR | 2122 | printk(TPACPI_ERR |
2098 | "missing MHKA handler, " | 2123 | "missing MHKA handler, " |
2099 | "please report this to %s\n", | 2124 | "please report this to %s\n", |
2100 | TPACPI_MAIL); | 2125 | TPACPI_MAIL); |
2101 | /* FN+F12, FN+F4, FN+F3 */ | 2126 | /* FN+F12, FN+F4, FN+F3 */ |
2102 | hotkey_all_mask = 0x080cU; | 2127 | hotkey_all_mask = 0x080cU; |
2103 | } | ||
2104 | } | 2128 | } |
2129 | } | ||
2105 | 2130 | ||
2106 | /* hotkey_source_mask *must* be zero for | 2131 | /* hotkey_source_mask *must* be zero for |
2107 | * the first hotkey_mask_get */ | 2132 | * the first hotkey_mask_get */ |
2108 | res = hotkey_status_get(&hotkey_orig_status); | 2133 | res = hotkey_status_get(&hotkey_orig_status); |
2109 | if (!res && tp_features.hotkey_mask) { | 2134 | if (res) |
2110 | res = hotkey_mask_get(); | 2135 | goto err_exit; |
2111 | hotkey_orig_mask = hotkey_mask; | 2136 | |
2112 | if (!res) { | 2137 | if (tp_features.hotkey_mask) { |
2113 | res = add_many_to_attr_set( | 2138 | res = hotkey_mask_get(); |
2114 | hotkey_dev_attributes, | 2139 | if (res) |
2115 | hotkey_mask_attributes, | 2140 | goto err_exit; |
2116 | ARRAY_SIZE(hotkey_mask_attributes)); | 2141 | |
2117 | } | 2142 | hotkey_orig_mask = hotkey_mask; |
2118 | } | 2143 | res = add_many_to_attr_set( |
2144 | hotkey_dev_attributes, | ||
2145 | hotkey_mask_attributes, | ||
2146 | ARRAY_SIZE(hotkey_mask_attributes)); | ||
2147 | if (res) | ||
2148 | goto err_exit; | ||
2149 | } | ||
2119 | 2150 | ||
2120 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | 2151 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL |
2121 | if (tp_features.hotkey_mask) { | 2152 | if (tp_features.hotkey_mask) { |
2122 | hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK | 2153 | hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK |
2123 | & ~hotkey_all_mask; | 2154 | & ~hotkey_all_mask; |
2124 | } else { | 2155 | } else { |
2125 | hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK; | 2156 | hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK; |
2126 | } | 2157 | } |
2127 | 2158 | ||
2128 | vdbg_printk(TPACPI_DBG_INIT, | 2159 | vdbg_printk(TPACPI_DBG_INIT, |
2129 | "hotkey source mask 0x%08x, polling freq %d\n", | 2160 | "hotkey source mask 0x%08x, polling freq %d\n", |
2130 | hotkey_source_mask, hotkey_poll_freq); | 2161 | hotkey_source_mask, hotkey_poll_freq); |
2131 | #endif | 2162 | #endif |
2132 | 2163 | ||
2133 | /* Not all thinkpads have a hardware radio switch */ | 2164 | /* Not all thinkpads have a hardware radio switch */ |
2134 | if (!res && acpi_evalf(hkey_handle, &status, "WLSW", "qd")) { | 2165 | if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) { |
2135 | tp_features.hotkey_wlsw = 1; | 2166 | tp_features.hotkey_wlsw = 1; |
2136 | printk(TPACPI_INFO | 2167 | printk(TPACPI_INFO |
2137 | "radio switch found; radios are %s\n", | 2168 | "radio switch found; radios are %s\n", |
2138 | enabled(status, 0)); | 2169 | enabled(status, 0)); |
2139 | res = add_to_attr_set(hotkey_dev_attributes, | 2170 | res = add_to_attr_set(hotkey_dev_attributes, |
2140 | &dev_attr_hotkey_radio_sw.attr); | 2171 | &dev_attr_hotkey_radio_sw.attr); |
2141 | } | 2172 | } |
2142 | 2173 | ||
2143 | /* For X41t, X60t, X61t Tablets... */ | 2174 | /* For X41t, X60t, X61t Tablets... */ |
2144 | if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) { | 2175 | if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) { |
2145 | tp_features.hotkey_tablet = 1; | 2176 | tp_features.hotkey_tablet = 1; |
2146 | printk(TPACPI_INFO | 2177 | printk(TPACPI_INFO |
2147 | "possible tablet mode switch found; " | 2178 | "possible tablet mode switch found; " |
2148 | "ThinkPad in %s mode\n", | 2179 | "ThinkPad in %s mode\n", |
2149 | (status & TP_HOTKEY_TABLET_MASK)? | 2180 | (status & TP_HOTKEY_TABLET_MASK)? |
2150 | "tablet" : "laptop"); | 2181 | "tablet" : "laptop"); |
2151 | res = add_to_attr_set(hotkey_dev_attributes, | 2182 | res = add_to_attr_set(hotkey_dev_attributes, |
2152 | &dev_attr_hotkey_tablet_mode.attr); | 2183 | &dev_attr_hotkey_tablet_mode.attr); |
2153 | } | 2184 | } |
2154 | 2185 | ||
2155 | if (!res) | 2186 | if (!res) |
2156 | res = register_attr_set_with_sysfs( | 2187 | res = register_attr_set_with_sysfs( |
2157 | hotkey_dev_attributes, | 2188 | hotkey_dev_attributes, |
2158 | &tpacpi_pdev->dev.kobj); | 2189 | &tpacpi_pdev->dev.kobj); |
2159 | if (res) | 2190 | if (res) |
2160 | return res; | 2191 | goto err_exit; |
2161 | 2192 | ||
2162 | /* Set up key map */ | 2193 | /* Set up key map */ |
2163 | 2194 | ||
2164 | hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, | 2195 | hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, |
2165 | GFP_KERNEL); | 2196 | GFP_KERNEL); |
2166 | if (!hotkey_keycode_map) { | 2197 | if (!hotkey_keycode_map) { |
2167 | printk(TPACPI_ERR | 2198 | printk(TPACPI_ERR |
2168 | "failed to allocate memory for key map\n"); | 2199 | "failed to allocate memory for key map\n"); |
2169 | return -ENOMEM; | 2200 | res = -ENOMEM; |
2170 | } | 2201 | goto err_exit; |
2202 | } | ||
2171 | 2203 | ||
2172 | if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) { | 2204 | if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) { |
2173 | dbg_printk(TPACPI_DBG_INIT, | 2205 | dbg_printk(TPACPI_DBG_INIT, |
2174 | "using Lenovo default hot key map\n"); | 2206 | "using Lenovo default hot key map\n"); |
2175 | memcpy(hotkey_keycode_map, &lenovo_keycode_map, | 2207 | memcpy(hotkey_keycode_map, &lenovo_keycode_map, |
2176 | TPACPI_HOTKEY_MAP_SIZE); | 2208 | TPACPI_HOTKEY_MAP_SIZE); |
2209 | } else { | ||
2210 | dbg_printk(TPACPI_DBG_INIT, | ||
2211 | "using IBM default hot key map\n"); | ||
2212 | memcpy(hotkey_keycode_map, &ibm_keycode_map, | ||
2213 | TPACPI_HOTKEY_MAP_SIZE); | ||
2214 | } | ||
2215 | |||
2216 | set_bit(EV_KEY, tpacpi_inputdev->evbit); | ||
2217 | set_bit(EV_MSC, tpacpi_inputdev->evbit); | ||
2218 | set_bit(MSC_SCAN, tpacpi_inputdev->mscbit); | ||
2219 | tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; | ||
2220 | tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN; | ||
2221 | tpacpi_inputdev->keycode = hotkey_keycode_map; | ||
2222 | for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) { | ||
2223 | if (hotkey_keycode_map[i] != KEY_RESERVED) { | ||
2224 | set_bit(hotkey_keycode_map[i], | ||
2225 | tpacpi_inputdev->keybit); | ||
2177 | } else { | 2226 | } else { |
2178 | dbg_printk(TPACPI_DBG_INIT, | 2227 | if (i < sizeof(hotkey_reserved_mask)*8) |
2179 | "using IBM default hot key map\n"); | 2228 | hotkey_reserved_mask |= 1 << i; |
2180 | memcpy(hotkey_keycode_map, &ibm_keycode_map, | ||
2181 | TPACPI_HOTKEY_MAP_SIZE); | ||
2182 | } | ||
2183 | |||
2184 | set_bit(EV_KEY, tpacpi_inputdev->evbit); | ||
2185 | set_bit(EV_MSC, tpacpi_inputdev->evbit); | ||
2186 | set_bit(MSC_SCAN, tpacpi_inputdev->mscbit); | ||
2187 | tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; | ||
2188 | tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN; | ||
2189 | tpacpi_inputdev->keycode = hotkey_keycode_map; | ||
2190 | for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) { | ||
2191 | if (hotkey_keycode_map[i] != KEY_RESERVED) { | ||
2192 | set_bit(hotkey_keycode_map[i], | ||
2193 | tpacpi_inputdev->keybit); | ||
2194 | } else { | ||
2195 | if (i < sizeof(hotkey_reserved_mask)*8) | ||
2196 | hotkey_reserved_mask |= 1 << i; | ||
2197 | } | ||
2198 | } | ||
2199 | |||
2200 | if (tp_features.hotkey_wlsw) { | ||
2201 | set_bit(EV_SW, tpacpi_inputdev->evbit); | ||
2202 | set_bit(SW_RADIO, tpacpi_inputdev->swbit); | ||
2203 | } | ||
2204 | if (tp_features.hotkey_tablet) { | ||
2205 | set_bit(EV_SW, tpacpi_inputdev->evbit); | ||
2206 | set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit); | ||
2207 | } | 2229 | } |
2230 | } | ||
2208 | 2231 | ||
2209 | /* Do not issue duplicate brightness change events to | 2232 | if (tp_features.hotkey_wlsw) { |
2210 | * userspace */ | 2233 | set_bit(EV_SW, tpacpi_inputdev->evbit); |
2211 | if (!tp_features.bright_acpimode) | 2234 | set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit); |
2212 | /* update bright_acpimode... */ | 2235 | } |
2213 | tpacpi_check_std_acpi_brightness_support(); | 2236 | if (tp_features.hotkey_tablet) { |
2214 | 2237 | set_bit(EV_SW, tpacpi_inputdev->evbit); | |
2215 | if (tp_features.bright_acpimode) { | 2238 | set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit); |
2216 | printk(TPACPI_INFO | 2239 | } |
2217 | "This ThinkPad has standard ACPI backlight " | ||
2218 | "brightness control, supported by the ACPI " | ||
2219 | "video driver\n"); | ||
2220 | printk(TPACPI_NOTICE | ||
2221 | "Disabling thinkpad-acpi brightness events " | ||
2222 | "by default...\n"); | ||
2223 | |||
2224 | /* The hotkey_reserved_mask change below is not | ||
2225 | * necessary while the keys are at KEY_RESERVED in the | ||
2226 | * default map, but better safe than sorry, leave it | ||
2227 | * here as a marker of what we have to do, especially | ||
2228 | * when we finally become able to set this at runtime | ||
2229 | * on response to X.org requests */ | ||
2230 | hotkey_reserved_mask |= | ||
2231 | (1 << TP_ACPI_HOTKEYSCAN_FNHOME) | ||
2232 | | (1 << TP_ACPI_HOTKEYSCAN_FNEND); | ||
2233 | } | ||
2234 | 2240 | ||
2235 | dbg_printk(TPACPI_DBG_INIT, | 2241 | /* Do not issue duplicate brightness change events to |
2236 | "enabling hot key handling\n"); | 2242 | * userspace */ |
2237 | res = hotkey_status_set(1); | 2243 | if (!tp_features.bright_acpimode) |
2238 | if (res) | 2244 | /* update bright_acpimode... */ |
2239 | return res; | 2245 | tpacpi_check_std_acpi_brightness_support(); |
2240 | res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask) | ||
2241 | & ~hotkey_reserved_mask) | ||
2242 | | hotkey_orig_mask); | ||
2243 | if (res < 0 && res != -ENXIO) | ||
2244 | return res; | ||
2245 | 2246 | ||
2246 | dbg_printk(TPACPI_DBG_INIT, | 2247 | if (tp_features.bright_acpimode) { |
2247 | "legacy hot key reporting over procfs %s\n", | 2248 | printk(TPACPI_INFO |
2248 | (hotkey_report_mode < 2) ? | 2249 | "This ThinkPad has standard ACPI backlight " |
2249 | "enabled" : "disabled"); | 2250 | "brightness control, supported by the ACPI " |
2251 | "video driver\n"); | ||
2252 | printk(TPACPI_NOTICE | ||
2253 | "Disabling thinkpad-acpi brightness events " | ||
2254 | "by default...\n"); | ||
2255 | |||
2256 | /* The hotkey_reserved_mask change below is not | ||
2257 | * necessary while the keys are at KEY_RESERVED in the | ||
2258 | * default map, but better safe than sorry, leave it | ||
2259 | * here as a marker of what we have to do, especially | ||
2260 | * when we finally become able to set this at runtime | ||
2261 | * on response to X.org requests */ | ||
2262 | hotkey_reserved_mask |= | ||
2263 | (1 << TP_ACPI_HOTKEYSCAN_FNHOME) | ||
2264 | | (1 << TP_ACPI_HOTKEYSCAN_FNEND); | ||
2265 | } | ||
2266 | |||
2267 | dbg_printk(TPACPI_DBG_INIT, "enabling hot key handling\n"); | ||
2268 | res = hotkey_status_set(1); | ||
2269 | if (res) { | ||
2270 | hotkey_exit(); | ||
2271 | return res; | ||
2272 | } | ||
2273 | res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask) | ||
2274 | & ~hotkey_reserved_mask) | ||
2275 | | hotkey_orig_mask); | ||
2276 | if (res < 0 && res != -ENXIO) { | ||
2277 | hotkey_exit(); | ||
2278 | return res; | ||
2279 | } | ||
2250 | 2280 | ||
2251 | tpacpi_inputdev->open = &hotkey_inputdev_open; | 2281 | dbg_printk(TPACPI_DBG_INIT, |
2252 | tpacpi_inputdev->close = &hotkey_inputdev_close; | 2282 | "legacy hot key reporting over procfs %s\n", |
2283 | (hotkey_report_mode < 2) ? | ||
2284 | "enabled" : "disabled"); | ||
2253 | 2285 | ||
2254 | hotkey_poll_setup_safe(1); | 2286 | tpacpi_inputdev->open = &hotkey_inputdev_open; |
2255 | tpacpi_input_send_radiosw(); | 2287 | tpacpi_inputdev->close = &hotkey_inputdev_close; |
2256 | tpacpi_input_send_tabletsw(); | ||
2257 | } | ||
2258 | 2288 | ||
2259 | return (tp_features.hotkey)? 0 : 1; | 2289 | hotkey_poll_setup_safe(1); |
2260 | } | 2290 | tpacpi_input_send_radiosw(); |
2291 | tpacpi_input_send_tabletsw(); | ||
2261 | 2292 | ||
2262 | static void hotkey_exit(void) | 2293 | return 0; |
2263 | { | ||
2264 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | ||
2265 | hotkey_poll_stop_sync(); | ||
2266 | #endif | ||
2267 | 2294 | ||
2268 | if (tp_features.hotkey) { | 2295 | err_exit: |
2269 | dbg_printk(TPACPI_DBG_EXIT, | 2296 | delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj); |
2270 | "restoring original hot key mask\n"); | 2297 | hotkey_dev_attributes = NULL; |
2271 | /* no short-circuit boolean operator below! */ | ||
2272 | if ((hotkey_mask_set(hotkey_orig_mask) | | ||
2273 | hotkey_status_set(hotkey_orig_status)) != 0) | ||
2274 | printk(TPACPI_ERR | ||
2275 | "failed to restore hot key mask " | ||
2276 | "to BIOS defaults\n"); | ||
2277 | } | ||
2278 | 2298 | ||
2279 | if (hotkey_dev_attributes) { | 2299 | return (res < 0)? res : 1; |
2280 | delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj); | ||
2281 | hotkey_dev_attributes = NULL; | ||
2282 | } | ||
2283 | } | 2300 | } |
2284 | 2301 | ||
2285 | static void hotkey_notify(struct ibm_struct *ibm, u32 event) | 2302 | static void hotkey_notify(struct ibm_struct *ibm, u32 event) |
@@ -3319,7 +3336,7 @@ static struct tpacpi_led_classdev tpacpi_led_thinklight = { | |||
3319 | 3336 | ||
3320 | static int __init light_init(struct ibm_init_struct *iibm) | 3337 | static int __init light_init(struct ibm_init_struct *iibm) |
3321 | { | 3338 | { |
3322 | int rc = 0; | 3339 | int rc; |
3323 | 3340 | ||
3324 | vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n"); | 3341 | vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n"); |
3325 | 3342 | ||
@@ -3337,20 +3354,23 @@ static int __init light_init(struct ibm_init_struct *iibm) | |||
3337 | tp_features.light_status = | 3354 | tp_features.light_status = |
3338 | acpi_evalf(ec_handle, NULL, "KBLT", "qv"); | 3355 | acpi_evalf(ec_handle, NULL, "KBLT", "qv"); |
3339 | 3356 | ||
3340 | vdbg_printk(TPACPI_DBG_INIT, "light is %s\n", | 3357 | vdbg_printk(TPACPI_DBG_INIT, "light is %s, light status is %s\n", |
3341 | str_supported(tp_features.light)); | 3358 | str_supported(tp_features.light), |
3359 | str_supported(tp_features.light_status)); | ||
3342 | 3360 | ||
3343 | if (tp_features.light) { | 3361 | if (!tp_features.light) |
3344 | rc = led_classdev_register(&tpacpi_pdev->dev, | 3362 | return 1; |
3345 | &tpacpi_led_thinklight.led_classdev); | 3363 | |
3346 | } | 3364 | rc = led_classdev_register(&tpacpi_pdev->dev, |
3365 | &tpacpi_led_thinklight.led_classdev); | ||
3347 | 3366 | ||
3348 | if (rc < 0) { | 3367 | if (rc < 0) { |
3349 | tp_features.light = 0; | 3368 | tp_features.light = 0; |
3350 | tp_features.light_status = 0; | 3369 | tp_features.light_status = 0; |
3351 | } else { | 3370 | } else { |
3352 | rc = (tp_features.light)? 0 : 1; | 3371 | rc = 0; |
3353 | } | 3372 | } |
3373 | |||
3354 | return rc; | 3374 | return rc; |
3355 | } | 3375 | } |
3356 | 3376 | ||
@@ -3833,7 +3853,7 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { | |||
3833 | "tpacpi::standby", | 3853 | "tpacpi::standby", |
3834 | }; | 3854 | }; |
3835 | 3855 | ||
3836 | static int led_get_status(unsigned int led) | 3856 | static int led_get_status(const unsigned int led) |
3837 | { | 3857 | { |
3838 | int status; | 3858 | int status; |
3839 | enum led_status_t led_s; | 3859 | enum led_status_t led_s; |
@@ -3857,41 +3877,42 @@ static int led_get_status(unsigned int led) | |||
3857 | /* not reached */ | 3877 | /* not reached */ |
3858 | } | 3878 | } |
3859 | 3879 | ||
3860 | static int led_set_status(unsigned int led, enum led_status_t ledstatus) | 3880 | static int led_set_status(const unsigned int led, |
3881 | const enum led_status_t ledstatus) | ||
3861 | { | 3882 | { |
3862 | /* off, on, blink. Index is led_status_t */ | 3883 | /* off, on, blink. Index is led_status_t */ |
3863 | static const int led_sled_arg1[] = { 0, 1, 3 }; | 3884 | static const unsigned int led_sled_arg1[] = { 0, 1, 3 }; |
3864 | static const int led_exp_hlbl[] = { 0, 0, 1 }; /* led# * */ | 3885 | static const unsigned int led_led_arg1[] = { 0, 0x80, 0xc0 }; |
3865 | static const int led_exp_hlcl[] = { 0, 1, 1 }; /* led# * */ | ||
3866 | static const int led_led_arg1[] = { 0, 0x80, 0xc0 }; | ||
3867 | 3886 | ||
3868 | int rc = 0; | 3887 | int rc = 0; |
3869 | 3888 | ||
3870 | switch (led_supported) { | 3889 | switch (led_supported) { |
3871 | case TPACPI_LED_570: | 3890 | case TPACPI_LED_570: |
3872 | /* 570 */ | 3891 | /* 570 */ |
3873 | led = 1 << led; | 3892 | if (led > 7) |
3874 | if (!acpi_evalf(led_handle, NULL, NULL, "vdd", | 3893 | return -EINVAL; |
3875 | led, led_sled_arg1[ledstatus])) | 3894 | if (!acpi_evalf(led_handle, NULL, NULL, "vdd", |
3876 | rc = -EIO; | 3895 | (1 << led), led_sled_arg1[ledstatus])) |
3877 | break; | 3896 | rc = -EIO; |
3897 | break; | ||
3878 | case TPACPI_LED_OLD: | 3898 | case TPACPI_LED_OLD: |
3879 | /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */ | 3899 | /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */ |
3880 | led = 1 << led; | 3900 | if (led > 7) |
3881 | rc = ec_write(TPACPI_LED_EC_HLMS, led); | 3901 | return -EINVAL; |
3882 | if (rc >= 0) | 3902 | rc = ec_write(TPACPI_LED_EC_HLMS, (1 << led)); |
3883 | rc = ec_write(TPACPI_LED_EC_HLBL, | 3903 | if (rc >= 0) |
3884 | led * led_exp_hlbl[ledstatus]); | 3904 | rc = ec_write(TPACPI_LED_EC_HLBL, |
3885 | if (rc >= 0) | 3905 | (ledstatus == TPACPI_LED_BLINK) << led); |
3886 | rc = ec_write(TPACPI_LED_EC_HLCL, | 3906 | if (rc >= 0) |
3887 | led * led_exp_hlcl[ledstatus]); | 3907 | rc = ec_write(TPACPI_LED_EC_HLCL, |
3888 | break; | 3908 | (ledstatus != TPACPI_LED_OFF) << led); |
3909 | break; | ||
3889 | case TPACPI_LED_NEW: | 3910 | case TPACPI_LED_NEW: |
3890 | /* all others */ | 3911 | /* all others */ |
3891 | if (!acpi_evalf(led_handle, NULL, NULL, "vdd", | 3912 | if (!acpi_evalf(led_handle, NULL, NULL, "vdd", |
3892 | led, led_led_arg1[ledstatus])) | 3913 | led, led_led_arg1[ledstatus])) |
3893 | rc = -EIO; | 3914 | rc = -EIO; |
3894 | break; | 3915 | break; |
3895 | default: | 3916 | default: |
3896 | rc = -ENXIO; | 3917 | rc = -ENXIO; |
3897 | } | 3918 | } |
@@ -3978,7 +3999,6 @@ static void led_exit(void) | |||
3978 | } | 3999 | } |
3979 | 4000 | ||
3980 | kfree(tpacpi_leds); | 4001 | kfree(tpacpi_leds); |
3981 | tpacpi_leds = NULL; | ||
3982 | } | 4002 | } |
3983 | 4003 | ||
3984 | static int __init led_init(struct ibm_init_struct *iibm) | 4004 | static int __init led_init(struct ibm_init_struct *iibm) |
@@ -4802,7 +4822,6 @@ static void brightness_exit(void) | |||
4802 | vdbg_printk(TPACPI_DBG_EXIT, | 4822 | vdbg_printk(TPACPI_DBG_EXIT, |
4803 | "calling backlight_device_unregister()\n"); | 4823 | "calling backlight_device_unregister()\n"); |
4804 | backlight_device_unregister(ibm_backlight_device); | 4824 | backlight_device_unregister(ibm_backlight_device); |
4805 | ibm_backlight_device = NULL; | ||
4806 | } | 4825 | } |
4807 | } | 4826 | } |
4808 | 4827 | ||
@@ -5764,11 +5783,16 @@ static int __init fan_init(struct ibm_init_struct *iibm) | |||
5764 | fan_control_access_mode != TPACPI_FAN_WR_NONE) { | 5783 | fan_control_access_mode != TPACPI_FAN_WR_NONE) { |
5765 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, | 5784 | rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, |
5766 | &fan_attr_group); | 5785 | &fan_attr_group); |
5767 | if (!(rc < 0)) | ||
5768 | rc = driver_create_file(&tpacpi_hwmon_pdriver.driver, | ||
5769 | &driver_attr_fan_watchdog); | ||
5770 | if (rc < 0) | 5786 | if (rc < 0) |
5771 | return rc; | 5787 | return rc; |
5788 | |||
5789 | rc = driver_create_file(&tpacpi_hwmon_pdriver.driver, | ||
5790 | &driver_attr_fan_watchdog); | ||
5791 | if (rc < 0) { | ||
5792 | sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, | ||
5793 | &fan_attr_group); | ||
5794 | return rc; | ||
5795 | } | ||
5772 | return 0; | 5796 | return 0; |
5773 | } else | 5797 | } else |
5774 | return 1; | 5798 | return 1; |
diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 750a46f4bc58..ad6b8a5b6574 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c | |||
@@ -506,6 +506,7 @@ int lance_open (struct net_device *dev) | |||
506 | 506 | ||
507 | return res; | 507 | return res; |
508 | } | 508 | } |
509 | EXPORT_SYMBOL_GPL(lance_open); | ||
509 | 510 | ||
510 | int lance_close (struct net_device *dev) | 511 | int lance_close (struct net_device *dev) |
511 | { | 512 | { |
@@ -521,6 +522,7 @@ int lance_close (struct net_device *dev) | |||
521 | 522 | ||
522 | return 0; | 523 | return 0; |
523 | } | 524 | } |
525 | EXPORT_SYMBOL_GPL(lance_close); | ||
524 | 526 | ||
525 | void lance_tx_timeout(struct net_device *dev) | 527 | void lance_tx_timeout(struct net_device *dev) |
526 | { | 528 | { |
@@ -529,7 +531,7 @@ void lance_tx_timeout(struct net_device *dev) | |||
529 | dev->trans_start = jiffies; | 531 | dev->trans_start = jiffies; |
530 | netif_wake_queue (dev); | 532 | netif_wake_queue (dev); |
531 | } | 533 | } |
532 | 534 | EXPORT_SYMBOL_GPL(lance_tx_timeout); | |
533 | 535 | ||
534 | int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | 536 | int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) |
535 | { | 537 | { |
@@ -586,6 +588,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
586 | 588 | ||
587 | return 0; | 589 | return 0; |
588 | } | 590 | } |
591 | EXPORT_SYMBOL_GPL(lance_start_xmit); | ||
589 | 592 | ||
590 | /* taken from the depca driver via a2065.c */ | 593 | /* taken from the depca driver via a2065.c */ |
591 | static void lance_load_multicast (struct net_device *dev) | 594 | static void lance_load_multicast (struct net_device *dev) |
@@ -654,6 +657,7 @@ void lance_set_multicast (struct net_device *dev) | |||
654 | if (!stopped) | 657 | if (!stopped) |
655 | netif_start_queue (dev); | 658 | netif_start_queue (dev); |
656 | } | 659 | } |
660 | EXPORT_SYMBOL_GPL(lance_set_multicast); | ||
657 | 661 | ||
658 | #ifdef CONFIG_NET_POLL_CONTROLLER | 662 | #ifdef CONFIG_NET_POLL_CONTROLLER |
659 | void lance_poll(struct net_device *dev) | 663 | void lance_poll(struct net_device *dev) |
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 6ddc911e7d15..99e0b4cdc56f 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c | |||
@@ -637,22 +637,6 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) | |||
637 | } | 637 | } |
638 | 638 | ||
639 | /* | 639 | /* |
640 | * Force the PHY into power saving mode using vendor magic. | ||
641 | */ | ||
642 | #ifdef CONFIG_PM | ||
643 | static void atl1_phy_enter_power_saving(struct atl1_hw *hw) | ||
644 | { | ||
645 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 0); | ||
646 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E); | ||
647 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 2); | ||
648 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000); | ||
649 | atl1_write_phy_reg(hw, MII_DBG_ADDR, 3); | ||
650 | atl1_write_phy_reg(hw, MII_DBG_DATA, 0); | ||
651 | |||
652 | } | ||
653 | #endif | ||
654 | |||
655 | /* | ||
656 | * Resets the PHY and make all config validate | 640 | * Resets the PHY and make all config validate |
657 | * hw - Struct containing variables accessed by shared code | 641 | * hw - Struct containing variables accessed by shared code |
658 | * | 642 | * |
@@ -2860,7 +2844,6 @@ disable_wol: | |||
2860 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | 2844 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; |
2861 | iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); | 2845 | iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); |
2862 | ioread32(hw->hw_addr + REG_PCIE_PHYMISC); | 2846 | ioread32(hw->hw_addr + REG_PCIE_PHYMISC); |
2863 | atl1_phy_enter_power_saving(hw); | ||
2864 | hw->phy_configured = false; | 2847 | hw->phy_configured = false; |
2865 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | 2848 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); |
2866 | exit: | 2849 | exit: |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 287a61918739..faae01dc1c4b 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -1766,16 +1766,20 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1766 | mutex_lock(&ehea_bcmc_regs.lock); | 1766 | mutex_lock(&ehea_bcmc_regs.lock); |
1767 | 1767 | ||
1768 | /* Deregister old MAC in pHYP */ | 1768 | /* Deregister old MAC in pHYP */ |
1769 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | 1769 | if (port->state == EHEA_PORT_UP) { |
1770 | if (ret) | 1770 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); |
1771 | goto out_upregs; | 1771 | if (ret) |
1772 | goto out_upregs; | ||
1773 | } | ||
1772 | 1774 | ||
1773 | port->mac_addr = cb0->port_mac_addr << 16; | 1775 | port->mac_addr = cb0->port_mac_addr << 16; |
1774 | 1776 | ||
1775 | /* Register new MAC in pHYP */ | 1777 | /* Register new MAC in pHYP */ |
1776 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | 1778 | if (port->state == EHEA_PORT_UP) { |
1777 | if (ret) | 1779 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); |
1778 | goto out_upregs; | 1780 | if (ret) |
1781 | goto out_upregs; | ||
1782 | } | ||
1779 | 1783 | ||
1780 | ret = 0; | 1784 | ret = 0; |
1781 | 1785 | ||
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 9eca97fb0a54..2cb244763292 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -3273,6 +3273,20 @@ static void nv_link_irq(struct net_device *dev) | |||
3273 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); | 3273 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); |
3274 | } | 3274 | } |
3275 | 3275 | ||
3276 | static void nv_msi_workaround(struct fe_priv *np) | ||
3277 | { | ||
3278 | |||
3279 | /* Need to toggle the msi irq mask within the ethernet device, | ||
3280 | * otherwise, future interrupts will not be detected. | ||
3281 | */ | ||
3282 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
3283 | u8 __iomem *base = np->base; | ||
3284 | |||
3285 | writel(0, base + NvRegMSIIrqMask); | ||
3286 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
3287 | } | ||
3288 | } | ||
3289 | |||
3276 | static irqreturn_t nv_nic_irq(int foo, void *data) | 3290 | static irqreturn_t nv_nic_irq(int foo, void *data) |
3277 | { | 3291 | { |
3278 | struct net_device *dev = (struct net_device *) data; | 3292 | struct net_device *dev = (struct net_device *) data; |
@@ -3295,6 +3309,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3295 | if (!(events & np->irqmask)) | 3309 | if (!(events & np->irqmask)) |
3296 | break; | 3310 | break; |
3297 | 3311 | ||
3312 | nv_msi_workaround(np); | ||
3313 | |||
3298 | spin_lock(&np->lock); | 3314 | spin_lock(&np->lock); |
3299 | nv_tx_done(dev); | 3315 | nv_tx_done(dev); |
3300 | spin_unlock(&np->lock); | 3316 | spin_unlock(&np->lock); |
@@ -3410,6 +3426,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3410 | if (!(events & np->irqmask)) | 3426 | if (!(events & np->irqmask)) |
3411 | break; | 3427 | break; |
3412 | 3428 | ||
3429 | nv_msi_workaround(np); | ||
3430 | |||
3413 | spin_lock(&np->lock); | 3431 | spin_lock(&np->lock); |
3414 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); | 3432 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
3415 | spin_unlock(&np->lock); | 3433 | spin_unlock(&np->lock); |
@@ -3750,6 +3768,8 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data) | |||
3750 | if (!(events & NVREG_IRQ_TIMER)) | 3768 | if (!(events & NVREG_IRQ_TIMER)) |
3751 | return IRQ_RETVAL(0); | 3769 | return IRQ_RETVAL(0); |
3752 | 3770 | ||
3771 | nv_msi_workaround(np); | ||
3772 | |||
3753 | spin_lock(&np->lock); | 3773 | spin_lock(&np->lock); |
3754 | np->intr_test = 1; | 3774 | np->intr_test = 1; |
3755 | spin_unlock(&np->lock); | 3775 | spin_unlock(&np->lock); |
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index ce816ba9c40d..e6317557a531 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig | |||
@@ -329,6 +329,7 @@ config PXA_FICP | |||
329 | config MCS_FIR | 329 | config MCS_FIR |
330 | tristate "MosChip MCS7780 IrDA-USB dongle" | 330 | tristate "MosChip MCS7780 IrDA-USB dongle" |
331 | depends on IRDA && USB && EXPERIMENTAL | 331 | depends on IRDA && USB && EXPERIMENTAL |
332 | select CRC32 | ||
332 | help | 333 | help |
333 | Say Y or M here if you want to build support for the MosChip | 334 | Say Y or M here if you want to build support for the MosChip |
334 | MCS7780 IrDA-USB bridge device driver. | 335 | MCS7780 IrDA-USB bridge device driver. |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 6321b059ce13..2f38e847e2cd 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -58,8 +58,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); | |||
58 | 58 | ||
59 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | 59 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) |
60 | { | 60 | { |
61 | hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES; | 61 | hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; |
62 | hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES; | 62 | hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; |
63 | hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; | 63 | hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; |
64 | 64 | ||
65 | /* PHY ops are filled in by default properly for Fiber only */ | 65 | /* PHY ops are filled in by default properly for Fiber only */ |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 58a26a47af29..bafb69b6f7cb 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -341,12 +341,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
341 | struct pppox_sock *relay_po; | 341 | struct pppox_sock *relay_po; |
342 | 342 | ||
343 | if (sk->sk_state & PPPOX_BOUND) { | 343 | if (sk->sk_state & PPPOX_BOUND) { |
344 | struct pppoe_hdr *ph = pppoe_hdr(skb); | ||
345 | int len = ntohs(ph->length); | ||
346 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); | ||
347 | if (pskb_trim_rcsum(skb, len)) | ||
348 | goto abort_kfree; | ||
349 | |||
350 | ppp_input(&po->chan, skb); | 344 | ppp_input(&po->chan, skb); |
351 | } else if (sk->sk_state & PPPOX_RELAY) { | 345 | } else if (sk->sk_state & PPPOX_RELAY) { |
352 | relay_po = get_item_by_addr(&po->pppoe_relay); | 346 | relay_po = get_item_by_addr(&po->pppoe_relay); |
@@ -357,7 +351,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
357 | if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) | 351 | if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) |
358 | goto abort_put; | 352 | goto abort_put; |
359 | 353 | ||
360 | skb_pull(skb, sizeof(struct pppoe_hdr)); | ||
361 | if (!__pppoe_xmit(sk_pppox(relay_po), skb)) | 354 | if (!__pppoe_xmit(sk_pppox(relay_po), skb)) |
362 | goto abort_put; | 355 | goto abort_put; |
363 | } else { | 356 | } else { |
@@ -388,6 +381,7 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
388 | { | 381 | { |
389 | struct pppoe_hdr *ph; | 382 | struct pppoe_hdr *ph; |
390 | struct pppox_sock *po; | 383 | struct pppox_sock *po; |
384 | int len; | ||
391 | 385 | ||
392 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 386 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
393 | goto out; | 387 | goto out; |
@@ -399,10 +393,21 @@ static int pppoe_rcv(struct sk_buff *skb, | |||
399 | goto drop; | 393 | goto drop; |
400 | 394 | ||
401 | ph = pppoe_hdr(skb); | 395 | ph = pppoe_hdr(skb); |
396 | len = ntohs(ph->length); | ||
397 | |||
398 | skb_pull_rcsum(skb, sizeof(*ph)); | ||
399 | if (skb->len < len) | ||
400 | goto drop; | ||
402 | 401 | ||
403 | po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); | 402 | po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); |
404 | if (po != NULL) | 403 | if (!po) |
405 | return sk_receive_skb(sk_pppox(po), skb, 0); | 404 | goto drop; |
405 | |||
406 | if (pskb_trim_rcsum(skb, len)) | ||
407 | goto drop; | ||
408 | |||
409 | return sk_receive_skb(sk_pppox(po), skb, 0); | ||
410 | |||
406 | drop: | 411 | drop: |
407 | kfree_skb(skb); | 412 | kfree_skb(skb); |
408 | out: | 413 | out: |
@@ -427,12 +432,12 @@ static int pppoe_disc_rcv(struct sk_buff *skb, | |||
427 | if (dev_net(dev) != &init_net) | 432 | if (dev_net(dev) != &init_net) |
428 | goto abort; | 433 | goto abort; |
429 | 434 | ||
430 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | ||
431 | goto abort; | ||
432 | |||
433 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 435 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
434 | goto out; | 436 | goto out; |
435 | 437 | ||
438 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | ||
439 | goto abort; | ||
440 | |||
436 | ph = pppoe_hdr(skb); | 441 | ph = pppoe_hdr(skb); |
437 | if (ph->code != PADT_CODE) | 442 | if (ph->code != PADT_CODE) |
438 | goto abort; | 443 | goto abort; |
@@ -937,12 +942,10 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
937 | m->msg_namelen = 0; | 942 | m->msg_namelen = 0; |
938 | 943 | ||
939 | if (skb) { | 944 | if (skb) { |
940 | struct pppoe_hdr *ph = pppoe_hdr(skb); | 945 | total_len = min(total_len, skb->len); |
941 | const int len = ntohs(ph->length); | 946 | error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); |
942 | |||
943 | error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); | ||
944 | if (error == 0) | 947 | if (error == 0) |
945 | error = len; | 948 | error = total_len; |
946 | } | 949 | } |
947 | 950 | ||
948 | kfree_skb(skb); | 951 | kfree_skb(skb); |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 70cfdb46aa27..f9298827a76c 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -783,14 +783,18 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
783 | err = 0; | 783 | err = 0; |
784 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 784 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
785 | flags & MSG_DONTWAIT, &err); | 785 | flags & MSG_DONTWAIT, &err); |
786 | if (skb) { | 786 | if (!skb) |
787 | err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data, | 787 | goto end; |
788 | skb->len); | 788 | |
789 | if (err < 0) | 789 | if (len > skb->len) |
790 | goto do_skb_free; | 790 | len = skb->len; |
791 | err = skb->len; | 791 | else if (len < skb->len) |
792 | } | 792 | msg->msg_flags |= MSG_TRUNC; |
793 | do_skb_free: | 793 | |
794 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); | ||
795 | if (likely(err == 0)) | ||
796 | err = len; | ||
797 | |||
794 | kfree_skb(skb); | 798 | kfree_skb(skb); |
795 | end: | 799 | end: |
796 | return err; | 800 | return err; |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d3f749c72d41..790db89db345 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -733,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue) | |||
733 | continue; | 733 | continue; |
734 | break; | 734 | break; |
735 | } | 735 | } |
736 | if (rc) | 736 | if (rc) { |
737 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); | 737 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); |
738 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | ||
739 | } | ||
738 | 740 | ||
739 | /* Remove RX descriptor ring from card */ | 741 | /* Remove RX descriptor ring from card */ |
740 | EFX_ZERO_OWORD(rx_desc_ptr); | 742 | EFX_ZERO_OWORD(rx_desc_ptr); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3bb60530d4d7..62436b3a18c6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -4404,7 +4404,9 @@ static int sky2_resume(struct pci_dev *pdev) | |||
4404 | if (err) { | 4404 | if (err) { |
4405 | printk(KERN_ERR PFX "%s: could not up: %d\n", | 4405 | printk(KERN_ERR PFX "%s: could not up: %d\n", |
4406 | dev->name, err); | 4406 | dev->name, err); |
4407 | rtnl_lock(); | ||
4407 | dev_close(dev); | 4408 | dev_close(dev); |
4409 | rtnl_unlock(); | ||
4408 | goto out; | 4410 | goto out; |
4409 | } | 4411 | } |
4410 | } | 4412 | } |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 69e97a1cb1c4..8606818653f8 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -93,14 +93,14 @@ | |||
93 | #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) | 93 | #define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) |
94 | # endif | 94 | # endif |
95 | /* check if the mac in reg is valid */ | 95 | /* check if the mac in reg is valid */ |
96 | #define SMC_GET_MAC_ADDR(addr) \ | 96 | #define SMC_GET_MAC_ADDR(lp, addr) \ |
97 | do { \ | 97 | do { \ |
98 | unsigned int __v; \ | 98 | unsigned int __v; \ |
99 | __v = SMC_inw(ioaddr, ADDR0_REG); \ | 99 | __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \ |
100 | addr[0] = __v; addr[1] = __v >> 8; \ | 100 | addr[0] = __v; addr[1] = __v >> 8; \ |
101 | __v = SMC_inw(ioaddr, ADDR1_REG); \ | 101 | __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ |
102 | addr[2] = __v; addr[3] = __v >> 8; \ | 102 | addr[2] = __v; addr[3] = __v >> 8; \ |
103 | __v = SMC_inw(ioaddr, ADDR2_REG); \ | 103 | __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ |
104 | addr[4] = __v; addr[5] = __v >> 8; \ | 104 | addr[4] = __v; addr[5] = __v >> 8; \ |
105 | if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ | 105 | if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \ |
106 | random_ether_addr(addr); \ | 106 | random_ether_addr(addr); \ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 07b3f77e7626..cc4bde852542 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -64,8 +64,8 @@ | |||
64 | 64 | ||
65 | #define DRV_MODULE_NAME "tg3" | 65 | #define DRV_MODULE_NAME "tg3" |
66 | #define PFX DRV_MODULE_NAME ": " | 66 | #define PFX DRV_MODULE_NAME ": " |
67 | #define DRV_MODULE_VERSION "3.92" | 67 | #define DRV_MODULE_VERSION "3.92.1" |
68 | #define DRV_MODULE_RELDATE "May 2, 2008" | 68 | #define DRV_MODULE_RELDATE "June 9, 2008" |
69 | 69 | ||
70 | #define TG3_DEF_MAC_MODE 0 | 70 | #define TG3_DEF_MAC_MODE 0 |
71 | #define TG3_DEF_RX_MODE 0 | 71 | #define TG3_DEF_RX_MODE 0 |
@@ -1295,6 +1295,21 @@ static void tg3_frob_aux_power(struct tg3 *tp) | |||
1295 | GRC_LCLCTRL_GPIO_OUTPUT0 | | 1295 | GRC_LCLCTRL_GPIO_OUTPUT0 | |
1296 | GRC_LCLCTRL_GPIO_OUTPUT1), | 1296 | GRC_LCLCTRL_GPIO_OUTPUT1), |
1297 | 100); | 1297 | 100); |
1298 | } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { | ||
1299 | /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ | ||
1300 | u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | | ||
1301 | GRC_LCLCTRL_GPIO_OE1 | | ||
1302 | GRC_LCLCTRL_GPIO_OE2 | | ||
1303 | GRC_LCLCTRL_GPIO_OUTPUT0 | | ||
1304 | GRC_LCLCTRL_GPIO_OUTPUT1 | | ||
1305 | tp->grc_local_ctrl; | ||
1306 | tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | ||
1307 | |||
1308 | grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; | ||
1309 | tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | ||
1310 | |||
1311 | grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; | ||
1312 | tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | ||
1298 | } else { | 1313 | } else { |
1299 | u32 no_gpio2; | 1314 | u32 no_gpio2; |
1300 | u32 grc_local_ctrl = 0; | 1315 | u32 grc_local_ctrl = 0; |
@@ -3168,8 +3183,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | |||
3168 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); | 3183 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); |
3169 | 3184 | ||
3170 | if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && | 3185 | if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && |
3171 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && | 3186 | (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { |
3172 | tp->link_config.flowctrl == tp->link_config.active_flowctrl) { | ||
3173 | /* do nothing, just check for link up at the end */ | 3187 | /* do nothing, just check for link up at the end */ |
3174 | } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 3188 | } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { |
3175 | u32 adv, new_adv; | 3189 | u32 adv, new_adv; |
@@ -8599,7 +8613,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
8599 | (cmd->speed == SPEED_1000)) | 8613 | (cmd->speed == SPEED_1000)) |
8600 | return -EINVAL; | 8614 | return -EINVAL; |
8601 | else if ((cmd->speed == SPEED_1000) && | 8615 | else if ((cmd->speed == SPEED_1000) && |
8602 | (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) | 8616 | (tp->tg3_flags & TG3_FLAG_10_100_ONLY)) |
8603 | return -EINVAL; | 8617 | return -EINVAL; |
8604 | 8618 | ||
8605 | tg3_full_lock(tp, 0); | 8619 | tg3_full_lock(tp, 0); |
@@ -11768,6 +11782,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
11768 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 11782 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) |
11769 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 11783 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; |
11770 | 11784 | ||
11785 | if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { | ||
11786 | /* Turn off the debug UART. */ | ||
11787 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | ||
11788 | if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | ||
11789 | /* Keep VMain power. */ | ||
11790 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | | ||
11791 | GRC_LCLCTRL_GPIO_OUTPUT0; | ||
11792 | } | ||
11793 | |||
11771 | /* Force the chip into D0. */ | 11794 | /* Force the chip into D0. */ |
11772 | err = tg3_set_power_state(tp, PCI_D0); | 11795 | err = tg3_set_power_state(tp, PCI_D0); |
11773 | if (err) { | 11796 | if (err) { |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5450eac9e263..4452306d5328 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -44,11 +44,15 @@ struct virtnet_info | |||
44 | /* The skb we couldn't send because buffers were full. */ | 44 | /* The skb we couldn't send because buffers were full. */ |
45 | struct sk_buff *last_xmit_skb; | 45 | struct sk_buff *last_xmit_skb; |
46 | 46 | ||
47 | /* If we need to free in a timer, this is it. */ | ||
48 | struct timer_list xmit_free_timer; | ||
49 | |||
47 | /* Number of input buffers, and max we've ever had. */ | 50 | /* Number of input buffers, and max we've ever had. */ |
48 | unsigned int num, max; | 51 | unsigned int num, max; |
49 | 52 | ||
50 | /* For cleaning up after transmission. */ | 53 | /* For cleaning up after transmission. */ |
51 | struct tasklet_struct tasklet; | 54 | struct tasklet_struct tasklet; |
55 | bool free_in_tasklet; | ||
52 | 56 | ||
53 | /* Receive & send queues. */ | 57 | /* Receive & send queues. */ |
54 | struct sk_buff_head recv; | 58 | struct sk_buff_head recv; |
@@ -72,7 +76,7 @@ static void skb_xmit_done(struct virtqueue *svq) | |||
72 | /* Suppress further interrupts. */ | 76 | /* Suppress further interrupts. */ |
73 | svq->vq_ops->disable_cb(svq); | 77 | svq->vq_ops->disable_cb(svq); |
74 | 78 | ||
75 | /* We were waiting for more output buffers. */ | 79 | /* We were probably waiting for more output buffers. */ |
76 | netif_wake_queue(vi->dev); | 80 | netif_wake_queue(vi->dev); |
77 | 81 | ||
78 | /* Make sure we re-xmit last_xmit_skb: if there are no more packets | 82 | /* Make sure we re-xmit last_xmit_skb: if there are no more packets |
@@ -94,9 +98,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
94 | BUG_ON(len > MAX_PACKET_LEN); | 98 | BUG_ON(len > MAX_PACKET_LEN); |
95 | 99 | ||
96 | skb_trim(skb, len); | 100 | skb_trim(skb, len); |
97 | skb->protocol = eth_type_trans(skb, dev); | 101 | |
98 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | ||
99 | ntohs(skb->protocol), skb->len, skb->pkt_type); | ||
100 | dev->stats.rx_bytes += skb->len; | 102 | dev->stats.rx_bytes += skb->len; |
101 | dev->stats.rx_packets++; | 103 | dev->stats.rx_packets++; |
102 | 104 | ||
@@ -106,6 +108,10 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
106 | goto frame_err; | 108 | goto frame_err; |
107 | } | 109 | } |
108 | 110 | ||
111 | skb->protocol = eth_type_trans(skb, dev); | ||
112 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | ||
113 | ntohs(skb->protocol), skb->len, skb->pkt_type); | ||
114 | |||
109 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 115 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
110 | pr_debug("GSO!\n"); | 116 | pr_debug("GSO!\n"); |
111 | switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | 117 | switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
@@ -238,9 +244,25 @@ static void free_old_xmit_skbs(struct virtnet_info *vi) | |||
238 | } | 244 | } |
239 | } | 245 | } |
240 | 246 | ||
247 | /* If the virtio transport doesn't always notify us when all in-flight packets | ||
248 | * are consumed, we fall back to using this function on a timer to free them. */ | ||
249 | static void xmit_free(unsigned long data) | ||
250 | { | ||
251 | struct virtnet_info *vi = (void *)data; | ||
252 | |||
253 | netif_tx_lock(vi->dev); | ||
254 | |||
255 | free_old_xmit_skbs(vi); | ||
256 | |||
257 | if (!skb_queue_empty(&vi->send)) | ||
258 | mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); | ||
259 | |||
260 | netif_tx_unlock(vi->dev); | ||
261 | } | ||
262 | |||
241 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | 263 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) |
242 | { | 264 | { |
243 | int num; | 265 | int num, err; |
244 | struct scatterlist sg[2+MAX_SKB_FRAGS]; | 266 | struct scatterlist sg[2+MAX_SKB_FRAGS]; |
245 | struct virtio_net_hdr *hdr; | 267 | struct virtio_net_hdr *hdr; |
246 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | 268 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
@@ -283,7 +305,11 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | |||
283 | vnet_hdr_to_sg(sg, skb); | 305 | vnet_hdr_to_sg(sg, skb); |
284 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; | 306 | num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; |
285 | 307 | ||
286 | return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); | 308 | err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); |
309 | if (!err && !vi->free_in_tasklet) | ||
310 | mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); | ||
311 | |||
312 | return err; | ||
287 | } | 313 | } |
288 | 314 | ||
289 | static void xmit_tasklet(unsigned long data) | 315 | static void xmit_tasklet(unsigned long data) |
@@ -295,6 +321,8 @@ static void xmit_tasklet(unsigned long data) | |||
295 | vi->svq->vq_ops->kick(vi->svq); | 321 | vi->svq->vq_ops->kick(vi->svq); |
296 | vi->last_xmit_skb = NULL; | 322 | vi->last_xmit_skb = NULL; |
297 | } | 323 | } |
324 | if (vi->free_in_tasklet) | ||
325 | free_old_xmit_skbs(vi); | ||
298 | netif_tx_unlock_bh(vi->dev); | 326 | netif_tx_unlock_bh(vi->dev); |
299 | } | 327 | } |
300 | 328 | ||
@@ -435,6 +463,10 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
435 | vi->vdev = vdev; | 463 | vi->vdev = vdev; |
436 | vdev->priv = vi; | 464 | vdev->priv = vi; |
437 | 465 | ||
466 | /* If they give us a callback when all buffers are done, we don't need | ||
467 | * the timer. */ | ||
468 | vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); | ||
469 | |||
438 | /* We expect two virtqueues, receive then send. */ | 470 | /* We expect two virtqueues, receive then send. */ |
439 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); | 471 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); |
440 | if (IS_ERR(vi->rvq)) { | 472 | if (IS_ERR(vi->rvq)) { |
@@ -454,6 +486,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
454 | 486 | ||
455 | tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); | 487 | tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); |
456 | 488 | ||
489 | if (!vi->free_in_tasklet) | ||
490 | setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi); | ||
491 | |||
457 | err = register_netdev(dev); | 492 | err = register_netdev(dev); |
458 | if (err) { | 493 | if (err) { |
459 | pr_debug("virtio_net: registering device failed\n"); | 494 | pr_debug("virtio_net: registering device failed\n"); |
@@ -491,6 +526,9 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
491 | /* Stop all the virtqueues. */ | 526 | /* Stop all the virtqueues. */ |
492 | vdev->config->reset(vdev); | 527 | vdev->config->reset(vdev); |
493 | 528 | ||
529 | if (!vi->free_in_tasklet) | ||
530 | del_timer_sync(&vi->xmit_free_timer); | ||
531 | |||
494 | /* Free our skbs in send and recv queues, if any. */ | 532 | /* Free our skbs in send and recv queues, if any. */ |
495 | while ((skb = __skb_dequeue(&vi->recv)) != NULL) { | 533 | while ((skb = __skb_dequeue(&vi->recv)) != NULL) { |
496 | kfree_skb(skb); | 534 | kfree_skb(skb); |
@@ -514,7 +552,7 @@ static struct virtio_device_id id_table[] = { | |||
514 | static unsigned int features[] = { | 552 | static unsigned int features[] = { |
515 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 553 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
516 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 554 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
517 | VIRTIO_NET_F_HOST_ECN, | 555 | VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, |
518 | }; | 556 | }; |
519 | 557 | ||
520 | static struct virtio_driver virtio_net = { | 558 | static struct virtio_driver virtio_net = { |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 729336774828..6e704608947c 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1753,6 +1753,8 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |||
1753 | 1753 | ||
1754 | if (priv->workqueue) { | 1754 | if (priv->workqueue) { |
1755 | cancel_delayed_work(&priv->request_scan); | 1755 | cancel_delayed_work(&priv->request_scan); |
1756 | cancel_delayed_work(&priv->request_direct_scan); | ||
1757 | cancel_delayed_work(&priv->request_passive_scan); | ||
1756 | cancel_delayed_work(&priv->scan_event); | 1758 | cancel_delayed_work(&priv->scan_event); |
1757 | } | 1759 | } |
1758 | queue_work(priv->workqueue, &priv->down); | 1760 | queue_work(priv->workqueue, &priv->down); |
@@ -2005,6 +2007,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
2005 | wake_up_interruptible(&priv->wait_command_queue); | 2007 | wake_up_interruptible(&priv->wait_command_queue); |
2006 | priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); | 2008 | priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); |
2007 | cancel_delayed_work(&priv->request_scan); | 2009 | cancel_delayed_work(&priv->request_scan); |
2010 | cancel_delayed_work(&priv->request_direct_scan); | ||
2011 | cancel_delayed_work(&priv->request_passive_scan); | ||
2008 | cancel_delayed_work(&priv->scan_event); | 2012 | cancel_delayed_work(&priv->scan_event); |
2009 | schedule_work(&priv->link_down); | 2013 | schedule_work(&priv->link_down); |
2010 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); | 2014 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); |
@@ -4712,6 +4716,12 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4712 | priv->status &= ~STATUS_SCAN_FORCED; | 4716 | priv->status &= ~STATUS_SCAN_FORCED; |
4713 | #endif /* CONFIG_IPW2200_MONITOR */ | 4717 | #endif /* CONFIG_IPW2200_MONITOR */ |
4714 | 4718 | ||
4719 | /* Do queued direct scans first */ | ||
4720 | if (priv->status & STATUS_DIRECT_SCAN_PENDING) { | ||
4721 | queue_delayed_work(priv->workqueue, | ||
4722 | &priv->request_direct_scan, 0); | ||
4723 | } | ||
4724 | |||
4715 | if (!(priv->status & (STATUS_ASSOCIATED | | 4725 | if (!(priv->status & (STATUS_ASSOCIATED | |
4716 | STATUS_ASSOCIATING | | 4726 | STATUS_ASSOCIATING | |
4717 | STATUS_ROAMING | | 4727 | STATUS_ROAMING | |
@@ -6267,7 +6277,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv, | |||
6267 | } | 6277 | } |
6268 | } | 6278 | } |
6269 | 6279 | ||
6270 | static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | 6280 | static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) |
6271 | { | 6281 | { |
6272 | struct ipw_scan_request_ext scan; | 6282 | struct ipw_scan_request_ext scan; |
6273 | int err = 0, scan_type; | 6283 | int err = 0, scan_type; |
@@ -6278,22 +6288,31 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | |||
6278 | 6288 | ||
6279 | mutex_lock(&priv->mutex); | 6289 | mutex_lock(&priv->mutex); |
6280 | 6290 | ||
6291 | if (direct && (priv->direct_scan_ssid_len == 0)) { | ||
6292 | IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); | ||
6293 | priv->status &= ~STATUS_DIRECT_SCAN_PENDING; | ||
6294 | goto done; | ||
6295 | } | ||
6296 | |||
6281 | if (priv->status & STATUS_SCANNING) { | 6297 | if (priv->status & STATUS_SCANNING) { |
6282 | IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n"); | 6298 | IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); |
6283 | priv->status |= STATUS_SCAN_PENDING; | 6299 | priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : |
6300 | STATUS_SCAN_PENDING; | ||
6284 | goto done; | 6301 | goto done; |
6285 | } | 6302 | } |
6286 | 6303 | ||
6287 | if (!(priv->status & STATUS_SCAN_FORCED) && | 6304 | if (!(priv->status & STATUS_SCAN_FORCED) && |
6288 | priv->status & STATUS_SCAN_ABORTING) { | 6305 | priv->status & STATUS_SCAN_ABORTING) { |
6289 | IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); | 6306 | IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); |
6290 | priv->status |= STATUS_SCAN_PENDING; | 6307 | priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : |
6308 | STATUS_SCAN_PENDING; | ||
6291 | goto done; | 6309 | goto done; |
6292 | } | 6310 | } |
6293 | 6311 | ||
6294 | if (priv->status & STATUS_RF_KILL_MASK) { | 6312 | if (priv->status & STATUS_RF_KILL_MASK) { |
6295 | IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n"); | 6313 | IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); |
6296 | priv->status |= STATUS_SCAN_PENDING; | 6314 | priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : |
6315 | STATUS_SCAN_PENDING; | ||
6297 | goto done; | 6316 | goto done; |
6298 | } | 6317 | } |
6299 | 6318 | ||
@@ -6321,6 +6340,7 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | |||
6321 | cpu_to_le16(20); | 6340 | cpu_to_le16(20); |
6322 | 6341 | ||
6323 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); | 6342 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); |
6343 | scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); | ||
6324 | 6344 | ||
6325 | #ifdef CONFIG_IPW2200_MONITOR | 6345 | #ifdef CONFIG_IPW2200_MONITOR |
6326 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 6346 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
@@ -6360,13 +6380,23 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type) | |||
6360 | cpu_to_le16(2000); | 6380 | cpu_to_le16(2000); |
6361 | } else { | 6381 | } else { |
6362 | #endif /* CONFIG_IPW2200_MONITOR */ | 6382 | #endif /* CONFIG_IPW2200_MONITOR */ |
6363 | /* If we are roaming, then make this a directed scan for the | 6383 | /* Honor direct scans first, otherwise if we are roaming make |
6364 | * current network. Otherwise, ensure that every other scan | 6384 | * this a direct scan for the current network. Finally, |
6365 | * is a fast channel hop scan */ | 6385 | * ensure that every other scan is a fast channel hop scan */ |
6366 | if ((priv->status & STATUS_ROAMING) | 6386 | if (direct) { |
6367 | || (!(priv->status & STATUS_ASSOCIATED) | 6387 | err = ipw_send_ssid(priv, priv->direct_scan_ssid, |
6368 | && (priv->config & CFG_STATIC_ESSID) | 6388 | priv->direct_scan_ssid_len); |
6369 | && (le32_to_cpu(scan.full_scan_index) % 2))) { | 6389 | if (err) { |
6390 | IPW_DEBUG_HC("Attempt to send SSID command " | ||
6391 | "failed\n"); | ||
6392 | goto done; | ||
6393 | } | ||
6394 | |||
6395 | scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; | ||
6396 | } else if ((priv->status & STATUS_ROAMING) | ||
6397 | || (!(priv->status & STATUS_ASSOCIATED) | ||
6398 | && (priv->config & CFG_STATIC_ESSID) | ||
6399 | && (le32_to_cpu(scan.full_scan_index) % 2))) { | ||
6370 | err = ipw_send_ssid(priv, priv->essid, priv->essid_len); | 6400 | err = ipw_send_ssid(priv, priv->essid, priv->essid_len); |
6371 | if (err) { | 6401 | if (err) { |
6372 | IPW_DEBUG_HC("Attempt to send SSID command " | 6402 | IPW_DEBUG_HC("Attempt to send SSID command " |
@@ -6391,7 +6421,12 @@ send_request: | |||
6391 | } | 6421 | } |
6392 | 6422 | ||
6393 | priv->status |= STATUS_SCANNING; | 6423 | priv->status |= STATUS_SCANNING; |
6394 | priv->status &= ~STATUS_SCAN_PENDING; | 6424 | if (direct) { |
6425 | priv->status &= ~STATUS_DIRECT_SCAN_PENDING; | ||
6426 | priv->direct_scan_ssid_len = 0; | ||
6427 | } else | ||
6428 | priv->status &= ~STATUS_SCAN_PENDING; | ||
6429 | |||
6395 | queue_delayed_work(priv->workqueue, &priv->scan_check, | 6430 | queue_delayed_work(priv->workqueue, &priv->scan_check, |
6396 | IPW_SCAN_CHECK_WATCHDOG); | 6431 | IPW_SCAN_CHECK_WATCHDOG); |
6397 | done: | 6432 | done: |
@@ -6402,15 +6437,22 @@ done: | |||
6402 | static void ipw_request_passive_scan(struct work_struct *work) | 6437 | static void ipw_request_passive_scan(struct work_struct *work) |
6403 | { | 6438 | { |
6404 | struct ipw_priv *priv = | 6439 | struct ipw_priv *priv = |
6405 | container_of(work, struct ipw_priv, request_passive_scan); | 6440 | container_of(work, struct ipw_priv, request_passive_scan.work); |
6406 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | 6441 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); |
6407 | } | 6442 | } |
6408 | 6443 | ||
6409 | static void ipw_request_scan(struct work_struct *work) | 6444 | static void ipw_request_scan(struct work_struct *work) |
6410 | { | 6445 | { |
6411 | struct ipw_priv *priv = | 6446 | struct ipw_priv *priv = |
6412 | container_of(work, struct ipw_priv, request_scan.work); | 6447 | container_of(work, struct ipw_priv, request_scan.work); |
6413 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | 6448 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); |
6449 | } | ||
6450 | |||
6451 | static void ipw_request_direct_scan(struct work_struct *work) | ||
6452 | { | ||
6453 | struct ipw_priv *priv = | ||
6454 | container_of(work, struct ipw_priv, request_direct_scan.work); | ||
6455 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); | ||
6414 | } | 6456 | } |
6415 | 6457 | ||
6416 | static void ipw_bg_abort_scan(struct work_struct *work) | 6458 | static void ipw_bg_abort_scan(struct work_struct *work) |
@@ -9477,99 +9519,38 @@ static int ipw_wx_get_retry(struct net_device *dev, | |||
9477 | return 0; | 9519 | return 0; |
9478 | } | 9520 | } |
9479 | 9521 | ||
9480 | static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, | ||
9481 | int essid_len) | ||
9482 | { | ||
9483 | struct ipw_scan_request_ext scan; | ||
9484 | int err = 0, scan_type; | ||
9485 | |||
9486 | if (!(priv->status & STATUS_INIT) || | ||
9487 | (priv->status & STATUS_EXIT_PENDING)) | ||
9488 | return 0; | ||
9489 | |||
9490 | mutex_lock(&priv->mutex); | ||
9491 | |||
9492 | if (priv->status & STATUS_RF_KILL_MASK) { | ||
9493 | IPW_DEBUG_HC("Aborting scan due to RF kill activation\n"); | ||
9494 | priv->status |= STATUS_SCAN_PENDING; | ||
9495 | goto done; | ||
9496 | } | ||
9497 | |||
9498 | IPW_DEBUG_HC("starting request direct scan!\n"); | ||
9499 | |||
9500 | if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { | ||
9501 | /* We should not sleep here; otherwise we will block most | ||
9502 | * of the system (for instance, we hold rtnl_lock when we | ||
9503 | * get here). | ||
9504 | */ | ||
9505 | err = -EAGAIN; | ||
9506 | goto done; | ||
9507 | } | ||
9508 | memset(&scan, 0, sizeof(scan)); | ||
9509 | |||
9510 | if (priv->config & CFG_SPEED_SCAN) | ||
9511 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = | ||
9512 | cpu_to_le16(30); | ||
9513 | else | ||
9514 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = | ||
9515 | cpu_to_le16(20); | ||
9516 | |||
9517 | scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = | ||
9518 | cpu_to_le16(20); | ||
9519 | scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); | ||
9520 | scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); | ||
9521 | |||
9522 | scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); | ||
9523 | |||
9524 | err = ipw_send_ssid(priv, essid, essid_len); | ||
9525 | if (err) { | ||
9526 | IPW_DEBUG_HC("Attempt to send SSID command failed\n"); | ||
9527 | goto done; | ||
9528 | } | ||
9529 | scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; | ||
9530 | |||
9531 | ipw_add_scan_channels(priv, &scan, scan_type); | ||
9532 | |||
9533 | err = ipw_send_scan_request_ext(priv, &scan); | ||
9534 | if (err) { | ||
9535 | IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); | ||
9536 | goto done; | ||
9537 | } | ||
9538 | |||
9539 | priv->status |= STATUS_SCANNING; | ||
9540 | |||
9541 | done: | ||
9542 | mutex_unlock(&priv->mutex); | ||
9543 | return err; | ||
9544 | } | ||
9545 | |||
9546 | static int ipw_wx_set_scan(struct net_device *dev, | 9522 | static int ipw_wx_set_scan(struct net_device *dev, |
9547 | struct iw_request_info *info, | 9523 | struct iw_request_info *info, |
9548 | union iwreq_data *wrqu, char *extra) | 9524 | union iwreq_data *wrqu, char *extra) |
9549 | { | 9525 | { |
9550 | struct ipw_priv *priv = ieee80211_priv(dev); | 9526 | struct ipw_priv *priv = ieee80211_priv(dev); |
9551 | struct iw_scan_req *req = (struct iw_scan_req *)extra; | 9527 | struct iw_scan_req *req = (struct iw_scan_req *)extra; |
9528 | struct delayed_work *work = NULL; | ||
9552 | 9529 | ||
9553 | mutex_lock(&priv->mutex); | 9530 | mutex_lock(&priv->mutex); |
9531 | |||
9554 | priv->user_requested_scan = 1; | 9532 | priv->user_requested_scan = 1; |
9555 | mutex_unlock(&priv->mutex); | ||
9556 | 9533 | ||
9557 | if (wrqu->data.length == sizeof(struct iw_scan_req)) { | 9534 | if (wrqu->data.length == sizeof(struct iw_scan_req)) { |
9558 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { | 9535 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { |
9559 | ipw_request_direct_scan(priv, req->essid, | 9536 | int len = min((int)req->essid_len, |
9560 | req->essid_len); | 9537 | (int)sizeof(priv->direct_scan_ssid)); |
9561 | return 0; | 9538 | memcpy(priv->direct_scan_ssid, req->essid, len); |
9562 | } | 9539 | priv->direct_scan_ssid_len = len; |
9563 | if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { | 9540 | work = &priv->request_direct_scan; |
9564 | queue_work(priv->workqueue, | 9541 | } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { |
9565 | &priv->request_passive_scan); | 9542 | work = &priv->request_passive_scan; |
9566 | return 0; | ||
9567 | } | 9543 | } |
9544 | } else { | ||
9545 | /* Normal active broadcast scan */ | ||
9546 | work = &priv->request_scan; | ||
9568 | } | 9547 | } |
9569 | 9548 | ||
9549 | mutex_unlock(&priv->mutex); | ||
9550 | |||
9570 | IPW_DEBUG_WX("Start scan\n"); | 9551 | IPW_DEBUG_WX("Start scan\n"); |
9571 | 9552 | ||
9572 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); | 9553 | queue_delayed_work(priv->workqueue, work, 0); |
9573 | 9554 | ||
9574 | return 0; | 9555 | return 0; |
9575 | } | 9556 | } |
@@ -10731,6 +10712,8 @@ static void ipw_link_up(struct ipw_priv *priv) | |||
10731 | } | 10712 | } |
10732 | 10713 | ||
10733 | cancel_delayed_work(&priv->request_scan); | 10714 | cancel_delayed_work(&priv->request_scan); |
10715 | cancel_delayed_work(&priv->request_direct_scan); | ||
10716 | cancel_delayed_work(&priv->request_passive_scan); | ||
10734 | cancel_delayed_work(&priv->scan_event); | 10717 | cancel_delayed_work(&priv->scan_event); |
10735 | ipw_reset_stats(priv); | 10718 | ipw_reset_stats(priv); |
10736 | /* Ensure the rate is updated immediately */ | 10719 | /* Ensure the rate is updated immediately */ |
@@ -10761,6 +10744,8 @@ static void ipw_link_down(struct ipw_priv *priv) | |||
10761 | 10744 | ||
10762 | /* Cancel any queued work ... */ | 10745 | /* Cancel any queued work ... */ |
10763 | cancel_delayed_work(&priv->request_scan); | 10746 | cancel_delayed_work(&priv->request_scan); |
10747 | cancel_delayed_work(&priv->request_direct_scan); | ||
10748 | cancel_delayed_work(&priv->request_passive_scan); | ||
10764 | cancel_delayed_work(&priv->adhoc_check); | 10749 | cancel_delayed_work(&priv->adhoc_check); |
10765 | cancel_delayed_work(&priv->gather_stats); | 10750 | cancel_delayed_work(&priv->gather_stats); |
10766 | 10751 | ||
@@ -10800,8 +10785,9 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10800 | INIT_WORK(&priv->up, ipw_bg_up); | 10785 | INIT_WORK(&priv->up, ipw_bg_up); |
10801 | INIT_WORK(&priv->down, ipw_bg_down); | 10786 | INIT_WORK(&priv->down, ipw_bg_down); |
10802 | INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); | 10787 | INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); |
10788 | INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); | ||
10789 | INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); | ||
10803 | INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); | 10790 | INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); |
10804 | INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); | ||
10805 | INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); | 10791 | INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); |
10806 | INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); | 10792 | INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); |
10807 | INIT_WORK(&priv->roam, ipw_bg_roam); | 10793 | INIT_WORK(&priv->roam, ipw_bg_roam); |
@@ -11835,6 +11821,8 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) | |||
11835 | cancel_delayed_work(&priv->adhoc_check); | 11821 | cancel_delayed_work(&priv->adhoc_check); |
11836 | cancel_delayed_work(&priv->gather_stats); | 11822 | cancel_delayed_work(&priv->gather_stats); |
11837 | cancel_delayed_work(&priv->request_scan); | 11823 | cancel_delayed_work(&priv->request_scan); |
11824 | cancel_delayed_work(&priv->request_direct_scan); | ||
11825 | cancel_delayed_work(&priv->request_passive_scan); | ||
11838 | cancel_delayed_work(&priv->scan_event); | 11826 | cancel_delayed_work(&priv->scan_event); |
11839 | cancel_delayed_work(&priv->rf_kill); | 11827 | cancel_delayed_work(&priv->rf_kill); |
11840 | cancel_delayed_work(&priv->scan_check); | 11828 | cancel_delayed_work(&priv->scan_check); |
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index cd3295b66dd6..d4ab28b73b32 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -1037,6 +1037,7 @@ struct ipw_cmd { /* XXX */ | |||
1037 | #define STATUS_DISASSOC_PENDING (1<<12) | 1037 | #define STATUS_DISASSOC_PENDING (1<<12) |
1038 | #define STATUS_STATE_PENDING (1<<13) | 1038 | #define STATUS_STATE_PENDING (1<<13) |
1039 | 1039 | ||
1040 | #define STATUS_DIRECT_SCAN_PENDING (1<<19) | ||
1040 | #define STATUS_SCAN_PENDING (1<<20) | 1041 | #define STATUS_SCAN_PENDING (1<<20) |
1041 | #define STATUS_SCANNING (1<<21) | 1042 | #define STATUS_SCANNING (1<<21) |
1042 | #define STATUS_SCAN_ABORTING (1<<22) | 1043 | #define STATUS_SCAN_ABORTING (1<<22) |
@@ -1292,6 +1293,8 @@ struct ipw_priv { | |||
1292 | struct iw_public_data wireless_data; | 1293 | struct iw_public_data wireless_data; |
1293 | 1294 | ||
1294 | int user_requested_scan; | 1295 | int user_requested_scan; |
1296 | u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; | ||
1297 | u8 direct_scan_ssid_len; | ||
1295 | 1298 | ||
1296 | struct workqueue_struct *workqueue; | 1299 | struct workqueue_struct *workqueue; |
1297 | 1300 | ||
@@ -1301,8 +1304,9 @@ struct ipw_priv { | |||
1301 | struct work_struct system_config; | 1304 | struct work_struct system_config; |
1302 | struct work_struct rx_replenish; | 1305 | struct work_struct rx_replenish; |
1303 | struct delayed_work request_scan; | 1306 | struct delayed_work request_scan; |
1307 | struct delayed_work request_direct_scan; | ||
1308 | struct delayed_work request_passive_scan; | ||
1304 | struct delayed_work scan_event; | 1309 | struct delayed_work scan_event; |
1305 | struct work_struct request_passive_scan; | ||
1306 | struct work_struct adapter_restart; | 1310 | struct work_struct adapter_restart; |
1307 | struct delayed_work rf_kill; | 1311 | struct delayed_work rf_kill; |
1308 | struct work_struct up; | 1312 | struct work_struct up; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c index d200d08fb086..8b1528e52d43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c | |||
@@ -229,14 +229,15 @@ static int iwl3945_led_register_led(struct iwl3945_priv *priv, | |||
229 | led->led_dev.brightness_set = iwl3945_led_brightness_set; | 229 | led->led_dev.brightness_set = iwl3945_led_brightness_set; |
230 | led->led_dev.default_trigger = trigger; | 230 | led->led_dev.default_trigger = trigger; |
231 | 231 | ||
232 | led->priv = priv; | ||
233 | led->type = type; | ||
234 | |||
232 | ret = led_classdev_register(device, &led->led_dev); | 235 | ret = led_classdev_register(device, &led->led_dev); |
233 | if (ret) { | 236 | if (ret) { |
234 | IWL_ERROR("Error: failed to register led handler.\n"); | 237 | IWL_ERROR("Error: failed to register led handler.\n"); |
235 | return ret; | 238 | return ret; |
236 | } | 239 | } |
237 | 240 | ||
238 | led->priv = priv; | ||
239 | led->type = type; | ||
240 | led->registered = 1; | 241 | led->registered = 1; |
241 | 242 | ||
242 | if (set_led && led->led_on) | 243 | if (set_led && led->led_on) |
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 6328b9593877..8124fd9b1353 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
@@ -1842,6 +1842,9 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) | |||
1842 | 1842 | ||
1843 | spin_lock_irqsave(&priv->driver_lock, flags); | 1843 | spin_lock_irqsave(&priv->driver_lock, flags); |
1844 | 1844 | ||
1845 | /* We don't get a response on the sleep-confirmation */ | ||
1846 | priv->dnld_sent = DNLD_RES_RECEIVED; | ||
1847 | |||
1845 | /* If nothing to do, go back to sleep (?) */ | 1848 | /* If nothing to do, go back to sleep (?) */ |
1846 | if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) | 1849 | if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) |
1847 | priv->psstate = PS_STATE_SLEEP; | 1850 | priv->psstate = PS_STATE_SLEEP; |
@@ -1904,12 +1907,12 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) | |||
1904 | 1907 | ||
1905 | lbs_deb_enter(LBS_DEB_HOST); | 1908 | lbs_deb_enter(LBS_DEB_HOST); |
1906 | 1909 | ||
1910 | spin_lock_irqsave(&priv->driver_lock, flags); | ||
1907 | if (priv->dnld_sent) { | 1911 | if (priv->dnld_sent) { |
1908 | allowed = 0; | 1912 | allowed = 0; |
1909 | lbs_deb_host("dnld_sent was set\n"); | 1913 | lbs_deb_host("dnld_sent was set\n"); |
1910 | } | 1914 | } |
1911 | 1915 | ||
1912 | spin_lock_irqsave(&priv->driver_lock, flags); | ||
1913 | /* In-progress command? */ | 1916 | /* In-progress command? */ |
1914 | if (priv->cur_cmd) { | 1917 | if (priv->cur_cmd) { |
1915 | allowed = 0; | 1918 | allowed = 0; |
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index e1f066068590..acfc4bfcc262 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -732,8 +732,8 @@ static int lbs_thread(void *data) | |||
732 | lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", | 732 | lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", |
733 | priv->currenttxskb, priv->dnld_sent); | 733 | priv->currenttxskb, priv->dnld_sent); |
734 | 734 | ||
735 | spin_lock_irq(&priv->driver_lock); | ||
736 | /* Process any pending command response */ | 735 | /* Process any pending command response */ |
736 | spin_lock_irq(&priv->driver_lock); | ||
737 | resp_idx = priv->resp_idx; | 737 | resp_idx = priv->resp_idx; |
738 | if (priv->resp_len[resp_idx]) { | 738 | if (priv->resp_len[resp_idx]) { |
739 | spin_unlock_irq(&priv->driver_lock); | 739 | spin_unlock_irq(&priv->driver_lock); |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 6424e5a2c83d..418606ac1c3b 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -719,7 +719,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) | |||
719 | fc = le16_to_cpu(*((__le16 *) buffer)); | 719 | fc = le16_to_cpu(*((__le16 *) buffer)); |
720 | 720 | ||
721 | is_qos = ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | 721 | is_qos = ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && |
722 | ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_QOS_DATA); | 722 | (fc & IEEE80211_STYPE_QOS_DATA); |
723 | is_4addr = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == | 723 | is_4addr = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == |
724 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); | 724 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); |
725 | need_padding = is_qos ^ is_4addr; | 725 | need_padding = is_qos ^ is_4addr; |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 0201c8adfda7..46c791adb894 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -50,15 +50,17 @@ static int irq_flags(int triggering, int polarity, int shareable) | |||
50 | flags = IORESOURCE_IRQ_HIGHEDGE; | 50 | flags = IORESOURCE_IRQ_HIGHEDGE; |
51 | } | 51 | } |
52 | 52 | ||
53 | if (shareable) | 53 | if (shareable == ACPI_SHARED) |
54 | flags |= IORESOURCE_IRQ_SHAREABLE; | 54 | flags |= IORESOURCE_IRQ_SHAREABLE; |
55 | 55 | ||
56 | return flags; | 56 | return flags; |
57 | } | 57 | } |
58 | 58 | ||
59 | static void decode_irq_flags(int flag, int *triggering, int *polarity) | 59 | static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, |
60 | int *polarity, int *shareable) | ||
60 | { | 61 | { |
61 | switch (flag) { | 62 | switch (flags & (IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL | |
63 | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE)) { | ||
62 | case IORESOURCE_IRQ_LOWLEVEL: | 64 | case IORESOURCE_IRQ_LOWLEVEL: |
63 | *triggering = ACPI_LEVEL_SENSITIVE; | 65 | *triggering = ACPI_LEVEL_SENSITIVE; |
64 | *polarity = ACPI_ACTIVE_LOW; | 66 | *polarity = ACPI_ACTIVE_LOW; |
@@ -75,7 +77,18 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity) | |||
75 | *triggering = ACPI_EDGE_SENSITIVE; | 77 | *triggering = ACPI_EDGE_SENSITIVE; |
76 | *polarity = ACPI_ACTIVE_HIGH; | 78 | *polarity = ACPI_ACTIVE_HIGH; |
77 | break; | 79 | break; |
80 | default: | ||
81 | dev_err(&dev->dev, "can't encode invalid IRQ mode %#x\n", | ||
82 | flags); | ||
83 | *triggering = ACPI_EDGE_SENSITIVE; | ||
84 | *polarity = ACPI_ACTIVE_HIGH; | ||
85 | break; | ||
78 | } | 86 | } |
87 | |||
88 | if (flags & IORESOURCE_IRQ_SHAREABLE) | ||
89 | *shareable = ACPI_SHARED; | ||
90 | else | ||
91 | *shareable = ACPI_EXCLUSIVE; | ||
79 | } | 92 | } |
80 | 93 | ||
81 | static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, | 94 | static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, |
@@ -742,6 +755,9 @@ static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data) | |||
742 | if (pnpacpi_supported_resource(res)) { | 755 | if (pnpacpi_supported_resource(res)) { |
743 | (*resource)->type = res->type; | 756 | (*resource)->type = res->type; |
744 | (*resource)->length = sizeof(struct acpi_resource); | 757 | (*resource)->length = sizeof(struct acpi_resource); |
758 | if (res->type == ACPI_RESOURCE_TYPE_IRQ) | ||
759 | (*resource)->data.irq.descriptor_length = | ||
760 | res->data.irq.descriptor_length; | ||
745 | (*resource)++; | 761 | (*resource)++; |
746 | } | 762 | } |
747 | 763 | ||
@@ -788,22 +804,21 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev, | |||
788 | struct resource *p) | 804 | struct resource *p) |
789 | { | 805 | { |
790 | struct acpi_resource_irq *irq = &resource->data.irq; | 806 | struct acpi_resource_irq *irq = &resource->data.irq; |
791 | int triggering, polarity; | 807 | int triggering, polarity, shareable; |
792 | 808 | ||
793 | decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity); | 809 | decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); |
794 | irq->triggering = triggering; | 810 | irq->triggering = triggering; |
795 | irq->polarity = polarity; | 811 | irq->polarity = polarity; |
796 | if (triggering == ACPI_EDGE_SENSITIVE) | 812 | irq->sharable = shareable; |
797 | irq->sharable = ACPI_EXCLUSIVE; | ||
798 | else | ||
799 | irq->sharable = ACPI_SHARED; | ||
800 | irq->interrupt_count = 1; | 813 | irq->interrupt_count = 1; |
801 | irq->interrupts[0] = p->start; | 814 | irq->interrupts[0] = p->start; |
802 | 815 | ||
803 | dev_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start, | 816 | dev_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n", |
817 | (int) p->start, | ||
804 | triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge", | 818 | triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge", |
805 | polarity == ACPI_ACTIVE_LOW ? "low" : "high", | 819 | polarity == ACPI_ACTIVE_LOW ? "low" : "high", |
806 | irq->sharable == ACPI_SHARED ? "shared" : "exclusive"); | 820 | irq->sharable == ACPI_SHARED ? "shared" : "exclusive", |
821 | irq->descriptor_length); | ||
807 | } | 822 | } |
808 | 823 | ||
809 | static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, | 824 | static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, |
@@ -811,16 +826,13 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, | |||
811 | struct resource *p) | 826 | struct resource *p) |
812 | { | 827 | { |
813 | struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq; | 828 | struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq; |
814 | int triggering, polarity; | 829 | int triggering, polarity, shareable; |
815 | 830 | ||
816 | decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity); | 831 | decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); |
817 | extended_irq->producer_consumer = ACPI_CONSUMER; | 832 | extended_irq->producer_consumer = ACPI_CONSUMER; |
818 | extended_irq->triggering = triggering; | 833 | extended_irq->triggering = triggering; |
819 | extended_irq->polarity = polarity; | 834 | extended_irq->polarity = polarity; |
820 | if (triggering == ACPI_EDGE_SENSITIVE) | 835 | extended_irq->sharable = shareable; |
821 | extended_irq->sharable = ACPI_EXCLUSIVE; | ||
822 | else | ||
823 | extended_irq->sharable = ACPI_SHARED; | ||
824 | extended_irq->interrupt_count = 1; | 836 | extended_irq->interrupt_count = 1; |
825 | extended_irq->interrupts[0] = p->start; | 837 | extended_irq->interrupts[0] = p->start; |
826 | 838 | ||
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 62576af36f47..3e577f655b18 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -773,6 +773,7 @@ sclp_vt220_con_init(void) | |||
773 | { | 773 | { |
774 | int rc; | 774 | int rc; |
775 | 775 | ||
776 | INIT_LIST_HEAD(&sclp_vt220_register.list); | ||
776 | if (!CONSOLE_IS_SCLP) | 777 | if (!CONSOLE_IS_SCLP) |
777 | return 0; | 778 | return 0; |
778 | rc = __sclp_vt220_init(); | 779 | rc = __sclp_vt220_init(); |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 8246ef3ab095..42ce7915fc5d 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -1598,7 +1598,7 @@ tape_3590_setup_device(struct tape_device *device) | |||
1598 | rc = tape_3590_read_dev_chars(device, rdc_data); | 1598 | rc = tape_3590_read_dev_chars(device, rdc_data); |
1599 | if (rc) { | 1599 | if (rc) { |
1600 | DBF_LH(3, "Read device characteristics failed!\n"); | 1600 | DBF_LH(3, "Read device characteristics failed!\n"); |
1601 | goto fail_kmalloc; | 1601 | goto fail_rdc_data; |
1602 | } | 1602 | } |
1603 | rc = tape_std_assign(device); | 1603 | rc = tape_std_assign(device); |
1604 | if (rc) | 1604 | if (rc) |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a4a5f2efea48..0bfcbbe375c4 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -97,8 +97,8 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit, | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static int parse_busid(char *str, int *cssid, int *ssid, int *devno, | 100 | static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, |
101 | int msgtrigger) | 101 | unsigned int *devno, int msgtrigger) |
102 | { | 102 | { |
103 | char *str_work; | 103 | char *str_work; |
104 | int val, rc, ret; | 104 | int val, rc, ret; |
@@ -148,7 +148,7 @@ out: | |||
148 | static int blacklist_parse_parameters(char *str, range_action action, | 148 | static int blacklist_parse_parameters(char *str, range_action action, |
149 | int msgtrigger) | 149 | int msgtrigger) |
150 | { | 150 | { |
151 | int from_cssid, to_cssid, from_ssid, to_ssid, from, to; | 151 | unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; |
152 | int rc, totalrc; | 152 | int rc, totalrc; |
153 | char *parm; | 153 | char *parm; |
154 | range_action ra; | 154 | range_action ra; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 82c6a2d45128..b32d7eb3d81a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -576,12 +576,14 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
576 | err = -ENODEV; | 576 | err = -ENODEV; |
577 | goto out; | 577 | goto out; |
578 | } | 578 | } |
579 | if (cio_is_console(sch->schid)) | 579 | if (cio_is_console(sch->schid)) { |
580 | sch->opm = 0xff; | 580 | sch->opm = 0xff; |
581 | else | 581 | sch->isc = 1; |
582 | } else { | ||
582 | sch->opm = chp_get_sch_opm(sch); | 583 | sch->opm = chp_get_sch_opm(sch); |
584 | sch->isc = 3; | ||
585 | } | ||
583 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 586 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
584 | sch->isc = 3; | ||
585 | 587 | ||
586 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " | 588 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " |
587 | "- PIM = %02X, PAM = %02X, POM = %02X\n", | 589 | "- PIM = %02X, PAM = %02X, POM = %02X\n", |
@@ -704,9 +706,9 @@ void wait_cons_dev(void) | |||
704 | if (!console_subchannel_in_use) | 706 | if (!console_subchannel_in_use) |
705 | return; | 707 | return; |
706 | 708 | ||
707 | /* disable all but isc 7 (console device) */ | 709 | /* disable all but isc 1 (console device) */ |
708 | __ctl_store (save_cr6, 6, 6); | 710 | __ctl_store (save_cr6, 6, 6); |
709 | cr6 = 0x01000000; | 711 | cr6 = 0x40000000; |
710 | __ctl_load (cr6, 6, 6); | 712 | __ctl_load (cr6, 6, 6); |
711 | 713 | ||
712 | do { | 714 | do { |
@@ -788,11 +790,11 @@ cio_probe_console(void) | |||
788 | } | 790 | } |
789 | 791 | ||
790 | /* | 792 | /* |
791 | * enable console I/O-interrupt subclass 7 | 793 | * enable console I/O-interrupt subclass 1 |
792 | */ | 794 | */ |
793 | ctl_set_bit(6, 24); | 795 | ctl_set_bit(6, 30); |
794 | console_subchannel.isc = 7; | 796 | console_subchannel.isc = 1; |
795 | console_subchannel.schib.pmcw.isc = 7; | 797 | console_subchannel.schib.pmcw.isc = 1; |
796 | console_subchannel.schib.pmcw.intparm = | 798 | console_subchannel.schib.pmcw.intparm = |
797 | (u32)(addr_t)&console_subchannel; | 799 | (u32)(addr_t)&console_subchannel; |
798 | ret = cio_modify(&console_subchannel); | 800 | ret = cio_modify(&console_subchannel); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 436bf1f6d4a6..9a71dae223e8 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -290,9 +290,6 @@ int qeth_set_large_send(struct qeth_card *card, | |||
290 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | | 290 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | |
291 | NETIF_F_HW_CSUM; | 291 | NETIF_F_HW_CSUM; |
292 | } else { | 292 | } else { |
293 | PRINT_WARN("TSO not supported on %s. " | ||
294 | "large_send set to 'no'.\n", | ||
295 | card->dev->name); | ||
296 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | | 293 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | |
297 | NETIF_F_HW_CSUM); | 294 | NETIF_F_HW_CSUM); |
298 | card->options.large_send = QETH_LARGE_SEND_NO; | 295 | card->options.large_send = QETH_LARGE_SEND_NO; |
@@ -1407,12 +1404,6 @@ static void qeth_init_func_level(struct qeth_card *card) | |||
1407 | } | 1404 | } |
1408 | } | 1405 | } |
1409 | 1406 | ||
1410 | static inline __u16 qeth_raw_devno_from_bus_id(char *id) | ||
1411 | { | ||
1412 | id += (strlen(id) - 4); | ||
1413 | return (__u16) simple_strtoul(id, &id, 16); | ||
1414 | } | ||
1415 | |||
1416 | static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | 1407 | static int qeth_idx_activate_get_answer(struct qeth_channel *channel, |
1417 | void (*idx_reply_cb)(struct qeth_channel *, | 1408 | void (*idx_reply_cb)(struct qeth_channel *, |
1418 | struct qeth_cmd_buffer *)) | 1409 | struct qeth_cmd_buffer *)) |
@@ -1439,7 +1430,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1439 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 1430 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
1440 | 1431 | ||
1441 | if (rc) { | 1432 | if (rc) { |
1442 | PRINT_ERR("Error2 in activating channel rc=%d\n", rc); | 1433 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); |
1443 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 1434 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
1444 | atomic_set(&channel->irq_pending, 0); | 1435 | atomic_set(&channel->irq_pending, 0); |
1445 | wake_up(&card->wait_q); | 1436 | wake_up(&card->wait_q); |
@@ -1468,6 +1459,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1468 | __u16 temp; | 1459 | __u16 temp; |
1469 | __u8 tmp; | 1460 | __u8 tmp; |
1470 | int rc; | 1461 | int rc; |
1462 | struct ccw_dev_id temp_devid; | ||
1471 | 1463 | ||
1472 | card = CARD_FROM_CDEV(channel->ccwdev); | 1464 | card = CARD_FROM_CDEV(channel->ccwdev); |
1473 | 1465 | ||
@@ -1494,8 +1486,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1494 | &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); | 1486 | &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); |
1495 | memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), | 1487 | memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), |
1496 | &card->info.func_level, sizeof(__u16)); | 1488 | &card->info.func_level, sizeof(__u16)); |
1497 | temp = qeth_raw_devno_from_bus_id(CARD_DDEV_ID(card)); | 1489 | ccw_device_get_id(CARD_DDEV(card), &temp_devid); |
1498 | memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2); | 1490 | memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); |
1499 | temp = (card->info.cula << 8) + card->info.unit_addr2; | 1491 | temp = (card->info.cula << 8) + card->info.unit_addr2; |
1500 | memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); | 1492 | memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); |
1501 | 1493 | ||
@@ -1508,7 +1500,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1508 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 1500 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
1509 | 1501 | ||
1510 | if (rc) { | 1502 | if (rc) { |
1511 | PRINT_ERR("Error1 in activating channel. rc=%d\n", rc); | 1503 | QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", |
1504 | rc); | ||
1512 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1505 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1513 | atomic_set(&channel->irq_pending, 0); | 1506 | atomic_set(&channel->irq_pending, 0); |
1514 | wake_up(&card->wait_q); | 1507 | wake_up(&card->wait_q); |
@@ -1658,7 +1651,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1658 | 1651 | ||
1659 | reply = qeth_alloc_reply(card); | 1652 | reply = qeth_alloc_reply(card); |
1660 | if (!reply) { | 1653 | if (!reply) { |
1661 | PRINT_WARN("Could not alloc qeth_reply!\n"); | ||
1662 | return -ENOMEM; | 1654 | return -ENOMEM; |
1663 | } | 1655 | } |
1664 | reply->callback = reply_cb; | 1656 | reply->callback = reply_cb; |
@@ -2612,15 +2604,9 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2612 | if (newcount < count) { | 2604 | if (newcount < count) { |
2613 | /* we are in memory shortage so we switch back to | 2605 | /* we are in memory shortage so we switch back to |
2614 | traditional skb allocation and drop packages */ | 2606 | traditional skb allocation and drop packages */ |
2615 | if (!atomic_read(&card->force_alloc_skb) && | ||
2616 | net_ratelimit()) | ||
2617 | PRINT_WARN("Switch to alloc skb\n"); | ||
2618 | atomic_set(&card->force_alloc_skb, 3); | 2607 | atomic_set(&card->force_alloc_skb, 3); |
2619 | count = newcount; | 2608 | count = newcount; |
2620 | } else { | 2609 | } else { |
2621 | if ((atomic_read(&card->force_alloc_skb) == 1) && | ||
2622 | net_ratelimit()) | ||
2623 | PRINT_WARN("Switch to sg\n"); | ||
2624 | atomic_add_unless(&card->force_alloc_skb, -1, 0); | 2610 | atomic_add_unless(&card->force_alloc_skb, -1, 0); |
2625 | } | 2611 | } |
2626 | 2612 | ||
@@ -3034,7 +3020,7 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3034 | elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) | 3020 | elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) |
3035 | + skb->len) >> PAGE_SHIFT); | 3021 | + skb->len) >> PAGE_SHIFT); |
3036 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3022 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3037 | PRINT_ERR("Invalid size of IP packet " | 3023 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3038 | "(Number=%d / Length=%d). Discarded.\n", | 3024 | "(Number=%d / Length=%d). Discarded.\n", |
3039 | (elements_needed+elems), skb->len); | 3025 | (elements_needed+elems), skb->len); |
3040 | return 0; | 3026 | return 0; |
@@ -3247,8 +3233,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
3247 | * free buffers) to handle eddp context */ | 3233 | * free buffers) to handle eddp context */ |
3248 | if (qeth_eddp_check_buffers_for_context(queue, ctx) | 3234 | if (qeth_eddp_check_buffers_for_context(queue, ctx) |
3249 | < 0) { | 3235 | < 0) { |
3250 | if (net_ratelimit()) | ||
3251 | PRINT_WARN("eddp tx_dropped 1\n"); | ||
3252 | rc = -EBUSY; | 3236 | rc = -EBUSY; |
3253 | goto out; | 3237 | goto out; |
3254 | } | 3238 | } |
@@ -3260,7 +3244,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
3260 | tmp = qeth_eddp_fill_buffer(queue, ctx, | 3244 | tmp = qeth_eddp_fill_buffer(queue, ctx, |
3261 | queue->next_buf_to_fill); | 3245 | queue->next_buf_to_fill); |
3262 | if (tmp < 0) { | 3246 | if (tmp < 0) { |
3263 | PRINT_ERR("eddp tx_dropped 2\n"); | ||
3264 | rc = -EBUSY; | 3247 | rc = -EBUSY; |
3265 | goto out; | 3248 | goto out; |
3266 | } | 3249 | } |
@@ -3602,8 +3585,6 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3602 | 3585 | ||
3603 | if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && | 3586 | if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && |
3604 | (!card->options.layer2)) { | 3587 | (!card->options.layer2)) { |
3605 | PRINT_WARN("SNMP Query MIBS not supported " | ||
3606 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
3607 | return -EOPNOTSUPP; | 3588 | return -EOPNOTSUPP; |
3608 | } | 3589 | } |
3609 | /* skip 4 bytes (data_len struct member) to get req_len */ | 3590 | /* skip 4 bytes (data_len struct member) to get req_len */ |
@@ -3634,7 +3615,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3634 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, | 3615 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, |
3635 | qeth_snmp_command_cb, (void *)&qinfo); | 3616 | qeth_snmp_command_cb, (void *)&qinfo); |
3636 | if (rc) | 3617 | if (rc) |
3637 | PRINT_WARN("SNMP command failed on %s: (0x%x)\n", | 3618 | QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", |
3638 | QETH_CARD_IFNAME(card), rc); | 3619 | QETH_CARD_IFNAME(card), rc); |
3639 | else { | 3620 | else { |
3640 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) | 3621 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
@@ -3807,8 +3788,8 @@ retry: | |||
3807 | if (mpno) | 3788 | if (mpno) |
3808 | mpno = min(mpno - 1, QETH_MAX_PORTNO); | 3789 | mpno = min(mpno - 1, QETH_MAX_PORTNO); |
3809 | if (card->info.portno > mpno) { | 3790 | if (card->info.portno > mpno) { |
3810 | PRINT_ERR("Device %s does not offer port number %d \n.", | 3791 | QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d" |
3811 | CARD_BUS_ID(card), card->info.portno); | 3792 | "\n.", CARD_BUS_ID(card), card->info.portno); |
3812 | rc = -ENODEV; | 3793 | rc = -ENODEV; |
3813 | goto out; | 3794 | goto out; |
3814 | } | 3795 | } |
@@ -3985,8 +3966,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
3985 | return skb; | 3966 | return skb; |
3986 | no_mem: | 3967 | no_mem: |
3987 | if (net_ratelimit()) { | 3968 | if (net_ratelimit()) { |
3988 | PRINT_WARN("No memory for packet received on %s.\n", | ||
3989 | QETH_CARD_IFNAME(card)); | ||
3990 | QETH_DBF_TEXT(TRACE, 2, "noskbmem"); | 3969 | QETH_DBF_TEXT(TRACE, 2, "noskbmem"); |
3991 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | 3970 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
3992 | } | 3971 | } |
@@ -4004,15 +3983,17 @@ static void qeth_unregister_dbf_views(void) | |||
4004 | } | 3983 | } |
4005 | } | 3984 | } |
4006 | 3985 | ||
4007 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...) | 3986 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...) |
4008 | { | 3987 | { |
4009 | char dbf_txt_buf[32]; | 3988 | char dbf_txt_buf[32]; |
3989 | va_list args; | ||
4010 | 3990 | ||
4011 | if (level > (qeth_dbf[dbf_nix].id)->level) | 3991 | if (level > (qeth_dbf[dbf_nix].id)->level) |
4012 | return; | 3992 | return; |
4013 | snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text); | 3993 | va_start(args, fmt); |
3994 | vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); | ||
3995 | va_end(args); | ||
4014 | debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); | 3996 | debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); |
4015 | |||
4016 | } | 3997 | } |
4017 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); | 3998 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); |
4018 | 3999 | ||
diff --git a/drivers/s390/net/qeth_core_offl.c b/drivers/s390/net/qeth_core_offl.c index 822df8362856..452874e89740 100644 --- a/drivers/s390/net/qeth_core_offl.c +++ b/drivers/s390/net/qeth_core_offl.c | |||
@@ -122,8 +122,8 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | |||
122 | if (element == 0) | 122 | if (element == 0) |
123 | return -EBUSY; | 123 | return -EBUSY; |
124 | else { | 124 | else { |
125 | PRINT_WARN("could only partially fill eddp " | 125 | QETH_DBF_MESSAGE(2, "could only partially fill" |
126 | "buffer!\n"); | 126 | "eddp buffer!\n"); |
127 | goto out; | 127 | goto out; |
128 | } | 128 | } |
129 | } | 129 | } |
@@ -143,8 +143,6 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | |||
143 | if (must_refcnt) { | 143 | if (must_refcnt) { |
144 | must_refcnt = 0; | 144 | must_refcnt = 0; |
145 | if (qeth_eddp_buf_ref_context(buf, ctx)) { | 145 | if (qeth_eddp_buf_ref_context(buf, ctx)) { |
146 | PRINT_WARN("no memory to create eddp context " | ||
147 | "reference\n"); | ||
148 | goto out_check; | 146 | goto out_check; |
149 | } | 147 | } |
150 | } | 148 | } |
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 08a50f057284..c26e842ad905 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c | |||
@@ -129,7 +129,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev, | |||
129 | 129 | ||
130 | portno = simple_strtoul(buf, &tmp, 16); | 130 | portno = simple_strtoul(buf, &tmp, 16); |
131 | if (portno > QETH_MAX_PORTNO) { | 131 | if (portno > QETH_MAX_PORTNO) { |
132 | PRINT_WARN("portno 0x%X is out of range\n", portno); | ||
133 | return -EINVAL; | 132 | return -EINVAL; |
134 | } | 133 | } |
135 | 134 | ||
@@ -223,8 +222,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, | |||
223 | * if though we have to permit priority queueing | 222 | * if though we have to permit priority queueing |
224 | */ | 223 | */ |
225 | if (card->qdio.no_out_queues == 1) { | 224 | if (card->qdio.no_out_queues == 1) { |
226 | PRINT_WARN("Priority queueing disabled due " | ||
227 | "to hardware limitations!\n"); | ||
228 | card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; | 225 | card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; |
229 | return -EPERM; | 226 | return -EPERM; |
230 | } | 227 | } |
@@ -250,7 +247,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, | |||
250 | card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; | 247 | card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; |
251 | card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; | 248 | card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; |
252 | } else { | 249 | } else { |
253 | PRINT_WARN("Unknown queueing type '%s'\n", tmp); | ||
254 | return -EINVAL; | 250 | return -EINVAL; |
255 | } | 251 | } |
256 | return count; | 252 | return count; |
@@ -291,9 +287,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, | |||
291 | ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); | 287 | ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); |
292 | if (old_cnt != cnt) { | 288 | if (old_cnt != cnt) { |
293 | rc = qeth_realloc_buffer_pool(card, cnt); | 289 | rc = qeth_realloc_buffer_pool(card, cnt); |
294 | if (rc) | ||
295 | PRINT_WARN("Error (%d) while setting " | ||
296 | "buffer count.\n", rc); | ||
297 | } | 290 | } |
298 | return count; | 291 | return count; |
299 | } | 292 | } |
@@ -355,7 +348,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev, | |||
355 | card->perf_stats.initial_rx_packets = card->stats.rx_packets; | 348 | card->perf_stats.initial_rx_packets = card->stats.rx_packets; |
356 | card->perf_stats.initial_tx_packets = card->stats.tx_packets; | 349 | card->perf_stats.initial_tx_packets = card->stats.tx_packets; |
357 | } else { | 350 | } else { |
358 | PRINT_WARN("performance_stats: write 0 or 1 to this file!\n"); | ||
359 | return -EINVAL; | 351 | return -EINVAL; |
360 | } | 352 | } |
361 | return count; | 353 | return count; |
@@ -399,7 +391,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
399 | newdis = QETH_DISCIPLINE_LAYER2; | 391 | newdis = QETH_DISCIPLINE_LAYER2; |
400 | break; | 392 | break; |
401 | default: | 393 | default: |
402 | PRINT_WARN("layer2: write 0 or 1 to this file!\n"); | ||
403 | return -EINVAL; | 394 | return -EINVAL; |
404 | } | 395 | } |
405 | 396 | ||
@@ -463,7 +454,6 @@ static ssize_t qeth_dev_large_send_store(struct device *dev, | |||
463 | } else if (!strcmp(tmp, "TSO")) { | 454 | } else if (!strcmp(tmp, "TSO")) { |
464 | type = QETH_LARGE_SEND_TSO; | 455 | type = QETH_LARGE_SEND_TSO; |
465 | } else { | 456 | } else { |
466 | PRINT_WARN("large_send: invalid mode %s!\n", tmp); | ||
467 | return -EINVAL; | 457 | return -EINVAL; |
468 | } | 458 | } |
469 | if (card->options.large_send == type) | 459 | if (card->options.large_send == type) |
@@ -503,8 +493,6 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card, | |||
503 | if (i <= max_value) { | 493 | if (i <= max_value) { |
504 | *value = i; | 494 | *value = i; |
505 | } else { | 495 | } else { |
506 | PRINT_WARN("blkt total time: write values between" | ||
507 | " 0 and %d to this file!\n", max_value); | ||
508 | return -EINVAL; | 496 | return -EINVAL; |
509 | } | 497 | } |
510 | return count; | 498 | return count; |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 86ec50ddae13..f682f7b14480 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -101,19 +101,16 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) | |||
101 | { | 101 | { |
102 | struct qeth_card *card; | 102 | struct qeth_card *card; |
103 | struct net_device *ndev; | 103 | struct net_device *ndev; |
104 | unsigned char *readno; | 104 | __u16 temp_dev_no; |
105 | __u16 temp_dev_no, card_dev_no; | ||
106 | char *endp; | ||
107 | unsigned long flags; | 105 | unsigned long flags; |
106 | struct ccw_dev_id read_devid; | ||
108 | 107 | ||
109 | ndev = NULL; | 108 | ndev = NULL; |
110 | memcpy(&temp_dev_no, read_dev_no, 2); | 109 | memcpy(&temp_dev_no, read_dev_no, 2); |
111 | read_lock_irqsave(&qeth_core_card_list.rwlock, flags); | 110 | read_lock_irqsave(&qeth_core_card_list.rwlock, flags); |
112 | list_for_each_entry(card, &qeth_core_card_list.list, list) { | 111 | list_for_each_entry(card, &qeth_core_card_list.list, list) { |
113 | readno = CARD_RDEV_ID(card); | 112 | ccw_device_get_id(CARD_RDEV(card), &read_devid); |
114 | readno += (strlen(readno) - 4); | 113 | if (read_devid.devno == temp_dev_no) { |
115 | card_dev_no = simple_strtoul(readno, &endp, 16); | ||
116 | if (card_dev_no == temp_dev_no) { | ||
117 | ndev = card->dev; | 114 | ndev = card->dev; |
118 | break; | 115 | break; |
119 | } | 116 | } |
@@ -134,14 +131,14 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | |||
134 | mac = &cmd->data.setdelmac.mac[0]; | 131 | mac = &cmd->data.setdelmac.mac[0]; |
135 | /* MAC already registered, needed in couple/uncouple case */ | 132 | /* MAC already registered, needed in couple/uncouple case */ |
136 | if (cmd->hdr.return_code == 0x2005) { | 133 | if (cmd->hdr.return_code == 0x2005) { |
137 | PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \ | 134 | QETH_DBF_MESSAGE(2, "Group MAC %02x:%02x:%02x:%02x:%02x:%02x " |
138 | "already existing on %s \n", | 135 | "already existing on %s \n", |
139 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | 136 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], |
140 | QETH_CARD_IFNAME(card)); | 137 | QETH_CARD_IFNAME(card)); |
141 | cmd->hdr.return_code = 0; | 138 | cmd->hdr.return_code = 0; |
142 | } | 139 | } |
143 | if (cmd->hdr.return_code) | 140 | if (cmd->hdr.return_code) |
144 | PRINT_ERR("Could not set group MAC " \ | 141 | QETH_DBF_MESSAGE(2, "Could not set group MAC " |
145 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", | 142 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", |
146 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | 143 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], |
147 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 144 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
@@ -166,7 +163,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | |||
166 | cmd = (struct qeth_ipa_cmd *) data; | 163 | cmd = (struct qeth_ipa_cmd *) data; |
167 | mac = &cmd->data.setdelmac.mac[0]; | 164 | mac = &cmd->data.setdelmac.mac[0]; |
168 | if (cmd->hdr.return_code) | 165 | if (cmd->hdr.return_code) |
169 | PRINT_ERR("Could not delete group MAC " \ | 166 | QETH_DBF_MESSAGE(2, "Could not delete group MAC " |
170 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", | 167 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", |
171 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | 168 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], |
172 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 169 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
@@ -186,10 +183,8 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac) | |||
186 | 183 | ||
187 | mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); | 184 | mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); |
188 | 185 | ||
189 | if (!mc) { | 186 | if (!mc) |
190 | PRINT_ERR("no mem vor mc mac address\n"); | ||
191 | return; | 187 | return; |
192 | } | ||
193 | 188 | ||
194 | memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); | 189 | memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); |
195 | mc->mc_addrlen = OSA_ADDR_LEN; | 190 | mc->mc_addrlen = OSA_ADDR_LEN; |
@@ -280,7 +275,7 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, | |||
280 | QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); | 275 | QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); |
281 | cmd = (struct qeth_ipa_cmd *) data; | 276 | cmd = (struct qeth_ipa_cmd *) data; |
282 | if (cmd->hdr.return_code) { | 277 | if (cmd->hdr.return_code) { |
283 | PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. " | 278 | QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " |
284 | "Continuing\n", cmd->data.setdelvlan.vlan_id, | 279 | "Continuing\n", cmd->data.setdelvlan.vlan_id, |
285 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 280 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
286 | QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); | 281 | QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); |
@@ -333,8 +328,6 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
333 | spin_lock_bh(&card->vlanlock); | 328 | spin_lock_bh(&card->vlanlock); |
334 | list_add_tail(&id->list, &card->vid_list); | 329 | list_add_tail(&id->list, &card->vid_list); |
335 | spin_unlock_bh(&card->vlanlock); | 330 | spin_unlock_bh(&card->vlanlock); |
336 | } else { | ||
337 | PRINT_ERR("no memory for vid\n"); | ||
338 | } | 331 | } |
339 | } | 332 | } |
340 | 333 | ||
@@ -550,16 +543,15 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
550 | 543 | ||
551 | rc = qeth_query_setadapterparms(card); | 544 | rc = qeth_query_setadapterparms(card); |
552 | if (rc) { | 545 | if (rc) { |
553 | PRINT_WARN("could not query adapter parameters on device %s: " | 546 | QETH_DBF_MESSAGE(2, "could not query adapter parameters on " |
554 | "x%x\n", CARD_BUS_ID(card), rc); | 547 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
555 | } | 548 | } |
556 | 549 | ||
557 | if (card->info.guestlan) { | 550 | if (card->info.guestlan) { |
558 | rc = qeth_setadpparms_change_macaddr(card); | 551 | rc = qeth_setadpparms_change_macaddr(card); |
559 | if (rc) { | 552 | if (rc) { |
560 | PRINT_WARN("couldn't get MAC address on " | 553 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " |
561 | "device %s: x%x\n", | 554 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
562 | CARD_BUS_ID(card), rc); | ||
563 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 555 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
564 | return rc; | 556 | return rc; |
565 | } | 557 | } |
@@ -585,8 +577,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
585 | } | 577 | } |
586 | 578 | ||
587 | if (card->info.type == QETH_CARD_TYPE_OSN) { | 579 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
588 | PRINT_WARN("Setting MAC address on %s is not supported.\n", | ||
589 | dev->name); | ||
590 | QETH_DBF_TEXT(TRACE, 3, "setmcOSN"); | 580 | QETH_DBF_TEXT(TRACE, 3, "setmcOSN"); |
591 | return -EOPNOTSUPP; | 581 | return -EOPNOTSUPP; |
592 | } | 582 | } |
@@ -666,7 +656,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
666 | ctx = qeth_eddp_create_context(card, new_skb, hdr, | 656 | ctx = qeth_eddp_create_context(card, new_skb, hdr, |
667 | skb->sk->sk_protocol); | 657 | skb->sk->sk_protocol); |
668 | if (ctx == NULL) { | 658 | if (ctx == NULL) { |
669 | PRINT_WARN("could not create eddp context\n"); | 659 | QETH_DBF_MESSAGE(2, "could not create eddp context\n"); |
670 | goto tx_drop; | 660 | goto tx_drop; |
671 | } | 661 | } |
672 | } else { | 662 | } else { |
@@ -731,6 +721,7 @@ tx_drop: | |||
731 | if ((new_skb != skb) && new_skb) | 721 | if ((new_skb != skb) && new_skb) |
732 | dev_kfree_skb_any(new_skb); | 722 | dev_kfree_skb_any(new_skb); |
733 | dev_kfree_skb_any(skb); | 723 | dev_kfree_skb_any(skb); |
724 | netif_wake_queue(dev); | ||
734 | return NETDEV_TX_OK; | 725 | return NETDEV_TX_OK; |
735 | } | 726 | } |
736 | 727 | ||
@@ -1155,7 +1146,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1155 | (addr_t) iob, 0, 0); | 1146 | (addr_t) iob, 0, 0); |
1156 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); | 1147 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); |
1157 | if (rc) { | 1148 | if (rc) { |
1158 | PRINT_WARN("qeth_osn_send_control_data: " | 1149 | QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " |
1159 | "ccw_device_start rc = %i\n", rc); | 1150 | "ccw_device_start rc = %i\n", rc); |
1160 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1151 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1161 | qeth_release_buffer(iob->channel, iob); | 1152 | qeth_release_buffer(iob->channel, iob); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 94a8ead64ed4..999552c83bbe 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -311,7 +311,6 @@ static struct qeth_ipaddr *qeth_l3_get_addr_buffer( | |||
311 | 311 | ||
312 | addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); | 312 | addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); |
313 | if (addr == NULL) { | 313 | if (addr == NULL) { |
314 | PRINT_WARN("Not enough memory to add address\n"); | ||
315 | return NULL; | 314 | return NULL; |
316 | } | 315 | } |
317 | addr->type = QETH_IP_TYPE_NORMAL; | 316 | addr->type = QETH_IP_TYPE_NORMAL; |
@@ -649,15 +648,6 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
649 | } | 648 | } |
650 | } | 649 | } |
651 | out_inval: | 650 | out_inval: |
652 | PRINT_WARN("Routing type '%s' not supported for interface %s.\n" | ||
653 | "Router status set to 'no router'.\n", | ||
654 | ((*type == PRIMARY_ROUTER)? "primary router" : | ||
655 | (*type == SECONDARY_ROUTER)? "secondary router" : | ||
656 | (*type == PRIMARY_CONNECTOR)? "primary connector" : | ||
657 | (*type == SECONDARY_CONNECTOR)? "secondary connector" : | ||
658 | (*type == MULTICAST_ROUTER)? "multicast router" : | ||
659 | "unknown"), | ||
660 | card->dev->name); | ||
661 | *type = NO_ROUTER; | 651 | *type = NO_ROUTER; |
662 | } | 652 | } |
663 | 653 | ||
@@ -674,9 +664,9 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
674 | QETH_PROT_IPV4); | 664 | QETH_PROT_IPV4); |
675 | if (rc) { | 665 | if (rc) { |
676 | card->options.route4.type = NO_ROUTER; | 666 | card->options.route4.type = NO_ROUTER; |
677 | PRINT_WARN("Error (0x%04x) while setting routing type on %s. " | 667 | QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" |
678 | "Type set to 'no router'.\n", | 668 | " on %s. Type set to 'no router'.\n", rc, |
679 | rc, QETH_CARD_IFNAME(card)); | 669 | QETH_CARD_IFNAME(card)); |
680 | } | 670 | } |
681 | return rc; | 671 | return rc; |
682 | } | 672 | } |
@@ -697,9 +687,9 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
697 | QETH_PROT_IPV6); | 687 | QETH_PROT_IPV6); |
698 | if (rc) { | 688 | if (rc) { |
699 | card->options.route6.type = NO_ROUTER; | 689 | card->options.route6.type = NO_ROUTER; |
700 | PRINT_WARN("Error (0x%04x) while setting routing type on %s. " | 690 | QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" |
701 | "Type set to 'no router'.\n", | 691 | " on %s. Type set to 'no router'.\n", rc, |
702 | rc, QETH_CARD_IFNAME(card)); | 692 | QETH_CARD_IFNAME(card)); |
703 | } | 693 | } |
704 | #endif | 694 | #endif |
705 | return rc; | 695 | return rc; |
@@ -737,7 +727,6 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, | |||
737 | if (!memcmp(ipatoe->addr, new->addr, | 727 | if (!memcmp(ipatoe->addr, new->addr, |
738 | (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && | 728 | (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && |
739 | (ipatoe->mask_bits == new->mask_bits)) { | 729 | (ipatoe->mask_bits == new->mask_bits)) { |
740 | PRINT_WARN("ipato entry already exists!\n"); | ||
741 | rc = -EEXIST; | 730 | rc = -EEXIST; |
742 | break; | 731 | break; |
743 | } | 732 | } |
@@ -802,7 +791,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
802 | rc = -EEXIST; | 791 | rc = -EEXIST; |
803 | spin_unlock_irqrestore(&card->ip_lock, flags); | 792 | spin_unlock_irqrestore(&card->ip_lock, flags); |
804 | if (rc) { | 793 | if (rc) { |
805 | PRINT_WARN("Cannot add VIPA. Address already exists!\n"); | ||
806 | return rc; | 794 | return rc; |
807 | } | 795 | } |
808 | if (!qeth_l3_add_ip(card, ipaddr)) | 796 | if (!qeth_l3_add_ip(card, ipaddr)) |
@@ -867,7 +855,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
867 | rc = -EEXIST; | 855 | rc = -EEXIST; |
868 | spin_unlock_irqrestore(&card->ip_lock, flags); | 856 | spin_unlock_irqrestore(&card->ip_lock, flags); |
869 | if (rc) { | 857 | if (rc) { |
870 | PRINT_WARN("Cannot add RXIP. Address already exists!\n"); | ||
871 | return rc; | 858 | return rc; |
872 | } | 859 | } |
873 | if (!qeth_l3_add_ip(card, ipaddr)) | 860 | if (!qeth_l3_add_ip(card, ipaddr)) |
@@ -1020,23 +1007,23 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card) | |||
1020 | IPA_SETADP_SET_BROADCAST_MODE, | 1007 | IPA_SETADP_SET_BROADCAST_MODE, |
1021 | card->options.broadcast_mode); | 1008 | card->options.broadcast_mode); |
1022 | if (rc) | 1009 | if (rc) |
1023 | PRINT_WARN("couldn't set broadcast mode on " | 1010 | QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on " |
1024 | "device %s: x%x\n", | 1011 | "device %s: x%x\n", |
1025 | CARD_BUS_ID(card), rc); | 1012 | CARD_BUS_ID(card), rc); |
1026 | rc = qeth_l3_send_setadp_mode(card, | 1013 | rc = qeth_l3_send_setadp_mode(card, |
1027 | IPA_SETADP_ALTER_MAC_ADDRESS, | 1014 | IPA_SETADP_ALTER_MAC_ADDRESS, |
1028 | card->options.macaddr_mode); | 1015 | card->options.macaddr_mode); |
1029 | if (rc) | 1016 | if (rc) |
1030 | PRINT_WARN("couldn't set macaddr mode on " | 1017 | QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on " |
1031 | "device %s: x%x\n", CARD_BUS_ID(card), rc); | 1018 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
1032 | return rc; | 1019 | return rc; |
1033 | } | 1020 | } |
1034 | if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) | 1021 | if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) |
1035 | PRINT_WARN("set adapter parameters not available " | 1022 | QETH_DBF_MESSAGE(2, "set adapter parameters not available " |
1036 | "to set broadcast mode, using ALLRINGS " | 1023 | "to set broadcast mode, using ALLRINGS " |
1037 | "on device %s:\n", CARD_BUS_ID(card)); | 1024 | "on device %s:\n", CARD_BUS_ID(card)); |
1038 | if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) | 1025 | if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) |
1039 | PRINT_WARN("set adapter parameters not available " | 1026 | QETH_DBF_MESSAGE(2, "set adapter parameters not available " |
1040 | "to set macaddr mode, using NONCANONICAL " | 1027 | "to set macaddr mode, using NONCANONICAL " |
1041 | "on device %s:\n", CARD_BUS_ID(card)); | 1028 | "on device %s:\n", CARD_BUS_ID(card)); |
1042 | return 0; | 1029 | return 0; |
@@ -2070,7 +2057,7 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) | |||
2070 | card = netdev_priv(dev); | 2057 | card = netdev_priv(dev); |
2071 | else if (rc == QETH_VLAN_CARD) | 2058 | else if (rc == QETH_VLAN_CARD) |
2072 | card = netdev_priv(vlan_dev_info(dev)->real_dev); | 2059 | card = netdev_priv(vlan_dev_info(dev)->real_dev); |
2073 | if (card->options.layer2) | 2060 | if (card && card->options.layer2) |
2074 | card = NULL; | 2061 | card = NULL; |
2075 | QETH_DBF_TEXT_(TRACE, 4, "%d", rc); | 2062 | QETH_DBF_TEXT_(TRACE, 4, "%d", rc); |
2076 | return card ; | 2063 | return card ; |
@@ -2182,8 +2169,6 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
2182 | if (card->info.guestlan) | 2169 | if (card->info.guestlan) |
2183 | return -EOPNOTSUPP; | 2170 | return -EOPNOTSUPP; |
2184 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2171 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2185 | PRINT_WARN("ARP processing not supported " | ||
2186 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2187 | return -EOPNOTSUPP; | 2172 | return -EOPNOTSUPP; |
2188 | } | 2173 | } |
2189 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 2174 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
@@ -2191,8 +2176,8 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
2191 | no_entries); | 2176 | no_entries); |
2192 | if (rc) { | 2177 | if (rc) { |
2193 | tmp = rc; | 2178 | tmp = rc; |
2194 | PRINT_WARN("Could not set number of ARP entries on %s: " | 2179 | QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " |
2195 | "%s (0x%x/%d)\n", QETH_CARD_IFNAME(card), | 2180 | "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), |
2196 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | 2181 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2197 | } | 2182 | } |
2198 | return rc; | 2183 | return rc; |
@@ -2260,9 +2245,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, | |||
2260 | qdata->no_entries * uentry_size){ | 2245 | qdata->no_entries * uentry_size){ |
2261 | QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); | 2246 | QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); |
2262 | cmd->hdr.return_code = -ENOMEM; | 2247 | cmd->hdr.return_code = -ENOMEM; |
2263 | PRINT_WARN("query ARP user space buffer is too small for " | ||
2264 | "the returned number of ARP entries. " | ||
2265 | "Aborting query!\n"); | ||
2266 | goto out_error; | 2248 | goto out_error; |
2267 | } | 2249 | } |
2268 | QETH_DBF_TEXT_(TRACE, 4, "anore%i", | 2250 | QETH_DBF_TEXT_(TRACE, 4, "anore%i", |
@@ -2324,8 +2306,6 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2324 | 2306 | ||
2325 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ | 2307 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ |
2326 | IPA_ARP_PROCESSING)) { | 2308 | IPA_ARP_PROCESSING)) { |
2327 | PRINT_WARN("ARP processing not supported " | ||
2328 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2329 | return -EOPNOTSUPP; | 2309 | return -EOPNOTSUPP; |
2330 | } | 2310 | } |
2331 | /* get size of userspace buffer and mask_bits -> 6 bytes */ | 2311 | /* get size of userspace buffer and mask_bits -> 6 bytes */ |
@@ -2344,7 +2324,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2344 | qeth_l3_arp_query_cb, (void *)&qinfo); | 2324 | qeth_l3_arp_query_cb, (void *)&qinfo); |
2345 | if (rc) { | 2325 | if (rc) { |
2346 | tmp = rc; | 2326 | tmp = rc; |
2347 | PRINT_WARN("Error while querying ARP cache on %s: %s " | 2327 | QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s " |
2348 | "(0x%x/%d)\n", QETH_CARD_IFNAME(card), | 2328 | "(0x%x/%d)\n", QETH_CARD_IFNAME(card), |
2349 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | 2329 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2350 | if (copy_to_user(udata, qinfo.udata, 4)) | 2330 | if (copy_to_user(udata, qinfo.udata, 4)) |
@@ -2375,8 +2355,6 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2375 | if (card->info.guestlan) | 2355 | if (card->info.guestlan) |
2376 | return -EOPNOTSUPP; | 2356 | return -EOPNOTSUPP; |
2377 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2357 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2378 | PRINT_WARN("ARP processing not supported " | ||
2379 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2380 | return -EOPNOTSUPP; | 2358 | return -EOPNOTSUPP; |
2381 | } | 2359 | } |
2382 | 2360 | ||
@@ -2391,10 +2369,9 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2391 | if (rc) { | 2369 | if (rc) { |
2392 | tmp = rc; | 2370 | tmp = rc; |
2393 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); | 2371 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); |
2394 | PRINT_WARN("Could not add ARP entry for address %s on %s: " | 2372 | QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " |
2395 | "%s (0x%x/%d)\n", | 2373 | "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), |
2396 | buf, QETH_CARD_IFNAME(card), | 2374 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2397 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | ||
2398 | } | 2375 | } |
2399 | return rc; | 2376 | return rc; |
2400 | } | 2377 | } |
@@ -2417,8 +2394,6 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2417 | if (card->info.guestlan) | 2394 | if (card->info.guestlan) |
2418 | return -EOPNOTSUPP; | 2395 | return -EOPNOTSUPP; |
2419 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2396 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2420 | PRINT_WARN("ARP processing not supported " | ||
2421 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2422 | return -EOPNOTSUPP; | 2397 | return -EOPNOTSUPP; |
2423 | } | 2398 | } |
2424 | memcpy(buf, entry, 12); | 2399 | memcpy(buf, entry, 12); |
@@ -2433,10 +2408,9 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2433 | tmp = rc; | 2408 | tmp = rc; |
2434 | memset(buf, 0, 16); | 2409 | memset(buf, 0, 16); |
2435 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); | 2410 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); |
2436 | PRINT_WARN("Could not delete ARP entry for address %s on %s: " | 2411 | QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" |
2437 | "%s (0x%x/%d)\n", | 2412 | " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), |
2438 | buf, QETH_CARD_IFNAME(card), | 2413 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2439 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | ||
2440 | } | 2414 | } |
2441 | return rc; | 2415 | return rc; |
2442 | } | 2416 | } |
@@ -2456,16 +2430,14 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) | |||
2456 | if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) | 2430 | if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) |
2457 | return -EOPNOTSUPP; | 2431 | return -EOPNOTSUPP; |
2458 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2432 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2459 | PRINT_WARN("ARP processing not supported " | ||
2460 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2461 | return -EOPNOTSUPP; | 2433 | return -EOPNOTSUPP; |
2462 | } | 2434 | } |
2463 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 2435 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
2464 | IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); | 2436 | IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); |
2465 | if (rc) { | 2437 | if (rc) { |
2466 | tmp = rc; | 2438 | tmp = rc; |
2467 | PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n", | 2439 | QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " |
2468 | QETH_CARD_IFNAME(card), | 2440 | "(0x%x/%d)\n", QETH_CARD_IFNAME(card), |
2469 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | 2441 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2470 | } | 2442 | } |
2471 | return rc; | 2443 | return rc; |
@@ -2724,7 +2696,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2724 | ctx = qeth_eddp_create_context(card, new_skb, hdr, | 2696 | ctx = qeth_eddp_create_context(card, new_skb, hdr, |
2725 | skb->sk->sk_protocol); | 2697 | skb->sk->sk_protocol); |
2726 | if (ctx == NULL) { | 2698 | if (ctx == NULL) { |
2727 | PRINT_WARN("could not create eddp context\n"); | 2699 | QETH_DBF_MESSAGE(2, "could not create eddp context\n"); |
2728 | goto tx_drop; | 2700 | goto tx_drop; |
2729 | } | 2701 | } |
2730 | } else { | 2702 | } else { |
@@ -2792,6 +2764,7 @@ tx_drop: | |||
2792 | if ((new_skb != skb) && new_skb) | 2764 | if ((new_skb != skb) && new_skb) |
2793 | dev_kfree_skb_any(new_skb); | 2765 | dev_kfree_skb_any(new_skb); |
2794 | dev_kfree_skb_any(skb); | 2766 | dev_kfree_skb_any(skb); |
2767 | netif_wake_queue(dev); | ||
2795 | return NETDEV_TX_OK; | 2768 | return NETDEV_TX_OK; |
2796 | } | 2769 | } |
2797 | 2770 | ||
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 08f51fd902c4..ac1993708ae9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -85,7 +85,6 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, | |||
85 | } else if (!strcmp(tmp, "multicast_router")) { | 85 | } else if (!strcmp(tmp, "multicast_router")) { |
86 | route->type = MULTICAST_ROUTER; | 86 | route->type = MULTICAST_ROUTER; |
87 | } else { | 87 | } else { |
88 | PRINT_WARN("Invalid routing type '%s'.\n", tmp); | ||
89 | return -EINVAL; | 88 | return -EINVAL; |
90 | } | 89 | } |
91 | if (((card->state == CARD_STATE_SOFTSETUP) || | 90 | if (((card->state == CARD_STATE_SOFTSETUP) || |
@@ -137,9 +136,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev, | |||
137 | return -EINVAL; | 136 | return -EINVAL; |
138 | 137 | ||
139 | if (!qeth_is_supported(card, IPA_IPV6)) { | 138 | if (!qeth_is_supported(card, IPA_IPV6)) { |
140 | PRINT_WARN("IPv6 not supported for interface %s.\n" | ||
141 | "Routing status no changed.\n", | ||
142 | QETH_CARD_IFNAME(card)); | ||
143 | return -ENOTSUPP; | 139 | return -ENOTSUPP; |
144 | } | 140 | } |
145 | 141 | ||
@@ -179,7 +175,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, | |||
179 | if ((i == 0) || (i == 1)) | 175 | if ((i == 0) || (i == 1)) |
180 | card->options.fake_broadcast = i; | 176 | card->options.fake_broadcast = i; |
181 | else { | 177 | else { |
182 | PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n"); | ||
183 | return -EINVAL; | 178 | return -EINVAL; |
184 | } | 179 | } |
185 | return count; | 180 | return count; |
@@ -220,7 +215,6 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev, | |||
220 | 215 | ||
221 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 216 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
222 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { | 217 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { |
223 | PRINT_WARN("Device is not a tokenring device!\n"); | ||
224 | return -EINVAL; | 218 | return -EINVAL; |
225 | } | 219 | } |
226 | 220 | ||
@@ -233,8 +227,6 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev, | |||
233 | card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; | 227 | card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; |
234 | return count; | 228 | return count; |
235 | } else { | 229 | } else { |
236 | PRINT_WARN("broadcast_mode: invalid mode %s!\n", | ||
237 | tmp); | ||
238 | return -EINVAL; | 230 | return -EINVAL; |
239 | } | 231 | } |
240 | return count; | 232 | return count; |
@@ -275,7 +267,6 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev, | |||
275 | 267 | ||
276 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 268 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
277 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { | 269 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { |
278 | PRINT_WARN("Device is not a tokenring device!\n"); | ||
279 | return -EINVAL; | 270 | return -EINVAL; |
280 | } | 271 | } |
281 | 272 | ||
@@ -285,7 +276,6 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev, | |||
285 | QETH_TR_MACADDR_CANONICAL : | 276 | QETH_TR_MACADDR_CANONICAL : |
286 | QETH_TR_MACADDR_NONCANONICAL; | 277 | QETH_TR_MACADDR_NONCANONICAL; |
287 | else { | 278 | else { |
288 | PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n"); | ||
289 | return -EINVAL; | 279 | return -EINVAL; |
290 | } | 280 | } |
291 | return count; | 281 | return count; |
@@ -327,7 +317,6 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev, | |||
327 | else if (!strcmp(tmp, "no_checksumming")) | 317 | else if (!strcmp(tmp, "no_checksumming")) |
328 | card->options.checksum_type = NO_CHECKSUMMING; | 318 | card->options.checksum_type = NO_CHECKSUMMING; |
329 | else { | 319 | else { |
330 | PRINT_WARN("Unknown checksumming type '%s'\n", tmp); | ||
331 | return -EINVAL; | 320 | return -EINVAL; |
332 | } | 321 | } |
333 | return count; | 322 | return count; |
@@ -382,8 +371,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
382 | } else if (!strcmp(tmp, "0")) { | 371 | } else if (!strcmp(tmp, "0")) { |
383 | card->ipato.enabled = 0; | 372 | card->ipato.enabled = 0; |
384 | } else { | 373 | } else { |
385 | PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to " | ||
386 | "this file\n"); | ||
387 | return -EINVAL; | 374 | return -EINVAL; |
388 | } | 375 | } |
389 | return count; | 376 | return count; |
@@ -422,8 +409,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, | |||
422 | } else if (!strcmp(tmp, "0")) { | 409 | } else if (!strcmp(tmp, "0")) { |
423 | card->ipato.invert4 = 0; | 410 | card->ipato.invert4 = 0; |
424 | } else { | 411 | } else { |
425 | PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to " | ||
426 | "this file\n"); | ||
427 | return -EINVAL; | 412 | return -EINVAL; |
428 | } | 413 | } |
429 | return count; | 414 | return count; |
@@ -486,13 +471,10 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, | |||
486 | /* get address string */ | 471 | /* get address string */ |
487 | end = strchr(start, '/'); | 472 | end = strchr(start, '/'); |
488 | if (!end || (end - start >= 40)) { | 473 | if (!end || (end - start >= 40)) { |
489 | PRINT_WARN("Invalid format for ipato_addx/delx. " | ||
490 | "Use <ip addr>/<mask bits>\n"); | ||
491 | return -EINVAL; | 474 | return -EINVAL; |
492 | } | 475 | } |
493 | strncpy(buffer, start, end - start); | 476 | strncpy(buffer, start, end - start); |
494 | if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) { | 477 | if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) { |
495 | PRINT_WARN("Invalid IP address format!\n"); | ||
496 | return -EINVAL; | 478 | return -EINVAL; |
497 | } | 479 | } |
498 | start = end + 1; | 480 | start = end + 1; |
@@ -500,7 +482,6 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, | |||
500 | if (!strlen(start) || | 482 | if (!strlen(start) || |
501 | (tmp == start) || | 483 | (tmp == start) || |
502 | (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { | 484 | (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { |
503 | PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n"); | ||
504 | return -EINVAL; | 485 | return -EINVAL; |
505 | } | 486 | } |
506 | return 0; | 487 | return 0; |
@@ -520,7 +501,6 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, | |||
520 | 501 | ||
521 | ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); | 502 | ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); |
522 | if (!ipatoe) { | 503 | if (!ipatoe) { |
523 | PRINT_WARN("No memory to allocate ipato entry\n"); | ||
524 | return -ENOMEM; | 504 | return -ENOMEM; |
525 | } | 505 | } |
526 | ipatoe->proto = proto; | 506 | ipatoe->proto = proto; |
@@ -609,8 +589,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, | |||
609 | } else if (!strcmp(tmp, "0")) { | 589 | } else if (!strcmp(tmp, "0")) { |
610 | card->ipato.invert6 = 0; | 590 | card->ipato.invert6 = 0; |
611 | } else { | 591 | } else { |
612 | PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to " | ||
613 | "this file\n"); | ||
614 | return -EINVAL; | 592 | return -EINVAL; |
615 | } | 593 | } |
616 | return count; | 594 | return count; |
@@ -724,7 +702,6 @@ static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto, | |||
724 | u8 *addr) | 702 | u8 *addr) |
725 | { | 703 | { |
726 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { | 704 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { |
727 | PRINT_WARN("Invalid IP address format!\n"); | ||
728 | return -EINVAL; | 705 | return -EINVAL; |
729 | } | 706 | } |
730 | return 0; | 707 | return 0; |
@@ -891,7 +868,6 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, | |||
891 | u8 *addr) | 868 | u8 *addr) |
892 | { | 869 | { |
893 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { | 870 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { |
894 | PRINT_WARN("Invalid IP address format!\n"); | ||
895 | return -EINVAL; | 871 | return -EINVAL; |
896 | } | 872 | } |
897 | return 0; | 873 | return 0; |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 5080f343ad74..5bfbe7659830 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -207,6 +207,7 @@ s390_handle_mcck(void) | |||
207 | do_exit(SIGSEGV); | 207 | do_exit(SIGSEGV); |
208 | } | 208 | } |
209 | } | 209 | } |
210 | EXPORT_SYMBOL_GPL(s390_handle_mcck); | ||
210 | 211 | ||
211 | /* | 212 | /* |
212 | * returns 0 if all registers could be validated | 213 | * returns 0 if all registers could be validated |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 8eb4da332f56..94789be54ca3 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -644,6 +644,48 @@ static void hub_stop(struct usb_hub *hub) | |||
644 | 644 | ||
645 | #ifdef CONFIG_PM | 645 | #ifdef CONFIG_PM |
646 | 646 | ||
647 | /* Try to identify which devices need USB-PERSIST handling */ | ||
648 | static int persistent_device(struct usb_device *udev) | ||
649 | { | ||
650 | int i; | ||
651 | int retval; | ||
652 | struct usb_host_config *actconfig; | ||
653 | |||
654 | /* Explicitly not marked persistent? */ | ||
655 | if (!udev->persist_enabled) | ||
656 | return 0; | ||
657 | |||
658 | /* No active config? */ | ||
659 | actconfig = udev->actconfig; | ||
660 | if (!actconfig) | ||
661 | return 0; | ||
662 | |||
663 | /* FIXME! We should check whether it's open here or not! */ | ||
664 | |||
665 | /* | ||
666 | * Check that all the interface drivers have a | ||
667 | * 'reset_resume' entrypoint | ||
668 | */ | ||
669 | retval = 0; | ||
670 | for (i = 0; i < actconfig->desc.bNumInterfaces; i++) { | ||
671 | struct usb_interface *intf; | ||
672 | struct usb_driver *driver; | ||
673 | |||
674 | intf = actconfig->interface[i]; | ||
675 | if (!intf->dev.driver) | ||
676 | continue; | ||
677 | driver = to_usb_driver(intf->dev.driver); | ||
678 | if (!driver->reset_resume) | ||
679 | return 0; | ||
680 | /* | ||
681 | * We have at least one driver, and that one | ||
682 | * has a reset_resume method. | ||
683 | */ | ||
684 | retval = 1; | ||
685 | } | ||
686 | return retval; | ||
687 | } | ||
688 | |||
647 | static void hub_restart(struct usb_hub *hub, int type) | 689 | static void hub_restart(struct usb_hub *hub, int type) |
648 | { | 690 | { |
649 | struct usb_device *hdev = hub->hdev; | 691 | struct usb_device *hdev = hub->hdev; |
@@ -689,8 +731,8 @@ static void hub_restart(struct usb_hub *hub, int type) | |||
689 | * turn off the various status changes to prevent | 731 | * turn off the various status changes to prevent |
690 | * khubd from disconnecting it later. | 732 | * khubd from disconnecting it later. |
691 | */ | 733 | */ |
692 | if (udev->persist_enabled && status == 0 && | 734 | if (status == 0 && !(portstatus & USB_PORT_STAT_ENABLE) && |
693 | !(portstatus & USB_PORT_STAT_ENABLE)) { | 735 | persistent_device(udev)) { |
694 | if (portchange & USB_PORT_STAT_C_ENABLE) | 736 | if (portchange & USB_PORT_STAT_C_ENABLE) |
695 | clear_port_feature(hub->hdev, port1, | 737 | clear_port_feature(hub->hdev, port1, |
696 | USB_PORT_FEAT_C_ENABLE); | 738 | USB_PORT_FEAT_C_ENABLE); |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index c9cec8738261..65aa5ecf569a 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
@@ -2207,14 +2207,14 @@ struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq, | |||
2207 | goto err_put; | 2207 | goto err_put; |
2208 | } | 2208 | } |
2209 | 2209 | ||
2210 | ret = usb_add_hcd(hcd, irq, irqflags); | ||
2211 | if (ret) | ||
2212 | goto err_unmap; | ||
2213 | |||
2214 | hcd->irq = irq; | 2210 | hcd->irq = irq; |
2215 | hcd->rsrc_start = res_start; | 2211 | hcd->rsrc_start = res_start; |
2216 | hcd->rsrc_len = res_len; | 2212 | hcd->rsrc_len = res_len; |
2217 | 2213 | ||
2214 | ret = usb_add_hcd(hcd, irq, irqflags); | ||
2215 | if (ret) | ||
2216 | goto err_unmap; | ||
2217 | |||
2218 | return hcd; | 2218 | return hcd; |
2219 | 2219 | ||
2220 | err_unmap: | 2220 | err_unmap: |
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index eb6c06979f3b..001789c9a11a 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig | |||
@@ -272,6 +272,7 @@ config USB_TEST | |||
272 | config USB_ISIGHTFW | 272 | config USB_ISIGHTFW |
273 | tristate "iSight firmware loading support" | 273 | tristate "iSight firmware loading support" |
274 | depends on USB | 274 | depends on USB |
275 | select FW_LOADER | ||
275 | help | 276 | help |
276 | This driver loads firmware for USB Apple iSight cameras, allowing | 277 | This driver loads firmware for USB Apple iSight cameras, allowing |
277 | them to be driven by the USB video class driver available at | 278 | them to be driven by the USB video class driver available at |
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c index 390e04885536..9f30aa1f8a5d 100644 --- a/drivers/usb/misc/isight_firmware.c +++ b/drivers/usb/misc/isight_firmware.c | |||
@@ -39,9 +39,12 @@ static int isight_firmware_load(struct usb_interface *intf, | |||
39 | struct usb_device *dev = interface_to_usbdev(intf); | 39 | struct usb_device *dev = interface_to_usbdev(intf); |
40 | int llen, len, req, ret = 0; | 40 | int llen, len, req, ret = 0; |
41 | const struct firmware *firmware; | 41 | const struct firmware *firmware; |
42 | unsigned char *buf; | 42 | unsigned char *buf = kmalloc(50, GFP_KERNEL); |
43 | unsigned char data[4]; | 43 | unsigned char data[4]; |
44 | char *ptr; | 44 | u8 *ptr; |
45 | |||
46 | if (!buf) | ||
47 | return -ENOMEM; | ||
45 | 48 | ||
46 | if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) { | 49 | if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) { |
47 | printk(KERN_ERR "Unable to load isight firmware\n"); | 50 | printk(KERN_ERR "Unable to load isight firmware\n"); |
@@ -59,7 +62,7 @@ static int isight_firmware_load(struct usb_interface *intf, | |||
59 | goto out; | 62 | goto out; |
60 | } | 63 | } |
61 | 64 | ||
62 | while (1) { | 65 | while (ptr+4 <= firmware->data+firmware->size) { |
63 | memcpy(data, ptr, 4); | 66 | memcpy(data, ptr, 4); |
64 | len = (data[0] << 8 | data[1]); | 67 | len = (data[0] << 8 | data[1]); |
65 | req = (data[2] << 8 | data[3]); | 68 | req = (data[2] << 8 | data[3]); |
@@ -71,10 +74,14 @@ static int isight_firmware_load(struct usb_interface *intf, | |||
71 | continue; | 74 | continue; |
72 | 75 | ||
73 | for (; len > 0; req += 50) { | 76 | for (; len > 0; req += 50) { |
74 | llen = len > 50 ? 50 : len; | 77 | llen = min(len, 50); |
75 | len -= llen; | 78 | len -= llen; |
76 | 79 | if (ptr+llen > firmware->data+firmware->size) { | |
77 | buf = kmalloc(llen, GFP_KERNEL); | 80 | printk(KERN_ERR |
81 | "Malformed isight firmware"); | ||
82 | ret = -ENODEV; | ||
83 | goto out; | ||
84 | } | ||
78 | memcpy(buf, ptr, llen); | 85 | memcpy(buf, ptr, llen); |
79 | 86 | ||
80 | ptr += llen; | 87 | ptr += llen; |
@@ -89,16 +96,18 @@ static int isight_firmware_load(struct usb_interface *intf, | |||
89 | goto out; | 96 | goto out; |
90 | } | 97 | } |
91 | 98 | ||
92 | kfree(buf); | ||
93 | } | 99 | } |
94 | } | 100 | } |
101 | |||
95 | if (usb_control_msg | 102 | if (usb_control_msg |
96 | (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1, | 103 | (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1, |
97 | 300) != 1) { | 104 | 300) != 1) { |
98 | printk(KERN_ERR "isight firmware loading completion failed\n"); | 105 | printk(KERN_ERR "isight firmware loading completion failed\n"); |
99 | ret = -ENODEV; | 106 | ret = -ENODEV; |
100 | } | 107 | } |
108 | |||
101 | out: | 109 | out: |
110 | kfree(buf); | ||
102 | release_firmware(firmware); | 111 | release_firmware(firmware); |
103 | return ret; | 112 | return ret; |
104 | } | 113 | } |
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index b50bb03cb5ab..0a2785361ca3 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c | |||
@@ -1320,7 +1320,7 @@ static void free_irq_local(int irq) | |||
1320 | * Power management hooks. Note that we won't be called from IRQ context, | 1320 | * Power management hooks. Note that we won't be called from IRQ context, |
1321 | * unlike the blank functions above, so we may sleep. | 1321 | * unlike the blank functions above, so we may sleep. |
1322 | */ | 1322 | */ |
1323 | static int fsl_diu_suspend(struct of_device *dev, pm_message_t state) | 1323 | static int fsl_diu_suspend(struct of_device *ofdev, pm_message_t state) |
1324 | { | 1324 | { |
1325 | struct fsl_diu_data *machine_data; | 1325 | struct fsl_diu_data *machine_data; |
1326 | 1326 | ||
@@ -1330,7 +1330,7 @@ static int fsl_diu_suspend(struct of_device *dev, pm_message_t state) | |||
1330 | return 0; | 1330 | return 0; |
1331 | } | 1331 | } |
1332 | 1332 | ||
1333 | static int fsl_diu_resume(struct of_device *dev) | 1333 | static int fsl_diu_resume(struct of_device *ofdev) |
1334 | { | 1334 | { |
1335 | struct fsl_diu_data *machine_data; | 1335 | struct fsl_diu_data *machine_data; |
1336 | 1336 | ||
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 28e3d5c5fcac..1f3465201fdf 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -2,6 +2,11 @@ Version 1.53 | |||
2 | ------------ | 2 | ------------ |
3 | DFS support added (Microsoft Distributed File System client support needed | 3 | DFS support added (Microsoft Distributed File System client support needed |
4 | for referrals which enable a hierarchical name space among servers). | 4 | for referrals which enable a hierarchical name space among servers). |
5 | Disable temporary caching of mode bits to servers which do not support | ||
6 | storing of mode (e.g. Windows servers, when client mounts without cifsacl | ||
7 | mount option) and add new "dynperm" mount option to enable temporary caching | ||
8 | of mode (enable old behavior). Fix hang on mount caused when server crashes | ||
9 | tcp session during negotiate protocol. | ||
5 | 10 | ||
6 | Version 1.52 | 11 | Version 1.52 |
7 | ------------ | 12 | ------------ |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5df93fd6303f..86b4d5f405ae 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -97,9 +97,6 @@ cifs_read_super(struct super_block *sb, void *data, | |||
97 | { | 97 | { |
98 | struct inode *inode; | 98 | struct inode *inode; |
99 | struct cifs_sb_info *cifs_sb; | 99 | struct cifs_sb_info *cifs_sb; |
100 | #ifdef CONFIG_CIFS_DFS_UPCALL | ||
101 | int len; | ||
102 | #endif | ||
103 | int rc = 0; | 100 | int rc = 0; |
104 | 101 | ||
105 | /* BB should we make this contingent on mount parm? */ | 102 | /* BB should we make this contingent on mount parm? */ |
@@ -117,15 +114,17 @@ cifs_read_super(struct super_block *sb, void *data, | |||
117 | * complex operation (mount), and in case of fail | 114 | * complex operation (mount), and in case of fail |
118 | * just exit instead of doing mount and attempting | 115 | * just exit instead of doing mount and attempting |
119 | * undo it if this copy fails?*/ | 116 | * undo it if this copy fails?*/ |
120 | len = strlen(data); | 117 | if (data) { |
121 | cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL); | 118 | int len = strlen(data); |
122 | if (cifs_sb->mountdata == NULL) { | 119 | cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL); |
123 | kfree(sb->s_fs_info); | 120 | if (cifs_sb->mountdata == NULL) { |
124 | sb->s_fs_info = NULL; | 121 | kfree(sb->s_fs_info); |
125 | return -ENOMEM; | 122 | sb->s_fs_info = NULL; |
123 | return -ENOMEM; | ||
124 | } | ||
125 | strncpy(cifs_sb->mountdata, data, len + 1); | ||
126 | cifs_sb->mountdata[len] = '\0'; | ||
126 | } | 127 | } |
127 | strncpy(cifs_sb->mountdata, data, len + 1); | ||
128 | cifs_sb->mountdata[len] = '\0'; | ||
129 | #endif | 128 | #endif |
130 | 129 | ||
131 | rc = cifs_mount(sb, cifs_sb, data, devname); | 130 | rc = cifs_mount(sb, cifs_sb, data, devname); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 08914053242b..9cfcf326ead3 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -333,7 +333,6 @@ struct cifsFileInfo { | |||
333 | bool messageMode:1; /* for pipes: message vs byte mode */ | 333 | bool messageMode:1; /* for pipes: message vs byte mode */ |
334 | atomic_t wrtPending; /* handle in use - defer close */ | 334 | atomic_t wrtPending; /* handle in use - defer close */ |
335 | struct semaphore fh_sem; /* prevents reopen race after dead ses*/ | 335 | struct semaphore fh_sem; /* prevents reopen race after dead ses*/ |
336 | char *search_resume_name; /* BB removeme BB */ | ||
337 | struct cifs_search_info srch_inf; | 336 | struct cifs_search_info srch_inf; |
338 | }; | 337 | }; |
339 | 338 | ||
@@ -626,7 +625,7 @@ GLOBAL_EXTERN atomic_t tcpSesAllocCount; | |||
626 | GLOBAL_EXTERN atomic_t tcpSesReconnectCount; | 625 | GLOBAL_EXTERN atomic_t tcpSesReconnectCount; |
627 | GLOBAL_EXTERN atomic_t tconInfoReconnectCount; | 626 | GLOBAL_EXTERN atomic_t tconInfoReconnectCount; |
628 | 627 | ||
629 | /* Various Debug counters to remove someday (BB) */ | 628 | /* Various Debug counters */ |
630 | GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ | 629 | GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ |
631 | #ifdef CONFIG_CIFS_STATS2 | 630 | #ifdef CONFIG_CIFS_STATS2 |
632 | GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ | 631 | GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ |
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 65d58b4e6a61..0f327c224da3 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h | |||
@@ -79,6 +79,19 @@ | |||
79 | #define TRANS2_GET_DFS_REFERRAL 0x10 | 79 | #define TRANS2_GET_DFS_REFERRAL 0x10 |
80 | #define TRANS2_REPORT_DFS_INCOSISTENCY 0x11 | 80 | #define TRANS2_REPORT_DFS_INCOSISTENCY 0x11 |
81 | 81 | ||
82 | /* SMB Transact (Named Pipe) subcommand codes */ | ||
83 | #define TRANS_SET_NMPIPE_STATE 0x0001 | ||
84 | #define TRANS_RAW_READ_NMPIPE 0x0011 | ||
85 | #define TRANS_QUERY_NMPIPE_STATE 0x0021 | ||
86 | #define TRANS_QUERY_NMPIPE_INFO 0x0022 | ||
87 | #define TRANS_PEEK_NMPIPE 0x0023 | ||
88 | #define TRANS_TRANSACT_NMPIPE 0x0026 | ||
89 | #define TRANS_RAW_WRITE_NMPIPE 0x0031 | ||
90 | #define TRANS_READ_NMPIPE 0x0036 | ||
91 | #define TRANS_WRITE_NMPIPE 0x0037 | ||
92 | #define TRANS_WAIT_NMPIPE 0x0053 | ||
93 | #define TRANS_CALL_NMPIPE 0x0054 | ||
94 | |||
82 | /* NT Transact subcommand codes */ | 95 | /* NT Transact subcommand codes */ |
83 | #define NT_TRANSACT_CREATE 0x01 | 96 | #define NT_TRANSACT_CREATE 0x01 |
84 | #define NT_TRANSACT_IOCTL 0x02 | 97 | #define NT_TRANSACT_IOCTL 0x02 |
@@ -328,12 +341,13 @@ | |||
328 | #define CREATE_COMPLETE_IF_OPLK 0x00000100 /* should be zero */ | 341 | #define CREATE_COMPLETE_IF_OPLK 0x00000100 /* should be zero */ |
329 | #define CREATE_NO_EA_KNOWLEDGE 0x00000200 | 342 | #define CREATE_NO_EA_KNOWLEDGE 0x00000200 |
330 | #define CREATE_EIGHT_DOT_THREE 0x00000400 /* doc says this is obsolete | 343 | #define CREATE_EIGHT_DOT_THREE 0x00000400 /* doc says this is obsolete |
331 | open for recovery flag - should | 344 | "open for recovery" flag - should |
332 | be zero */ | 345 | be zero in any case */ |
346 | #define CREATE_OPEN_FOR_RECOVERY 0x00000400 | ||
333 | #define CREATE_RANDOM_ACCESS 0x00000800 | 347 | #define CREATE_RANDOM_ACCESS 0x00000800 |
334 | #define CREATE_DELETE_ON_CLOSE 0x00001000 | 348 | #define CREATE_DELETE_ON_CLOSE 0x00001000 |
335 | #define CREATE_OPEN_BY_ID 0x00002000 | 349 | #define CREATE_OPEN_BY_ID 0x00002000 |
336 | #define CREATE_OPEN_BACKUP_INTN 0x00004000 | 350 | #define CREATE_OPEN_BACKUP_INTENT 0x00004000 |
337 | #define CREATE_NO_COMPRESSION 0x00008000 | 351 | #define CREATE_NO_COMPRESSION 0x00008000 |
338 | #define CREATE_RESERVE_OPFILTER 0x00100000 /* should be zero */ | 352 | #define CREATE_RESERVE_OPFILTER 0x00100000 /* should be zero */ |
339 | #define OPEN_REPARSE_POINT 0x00200000 | 353 | #define OPEN_REPARSE_POINT 0x00200000 |
@@ -722,7 +736,6 @@ typedef struct smb_com_tconx_rsp_ext { | |||
722 | #define SMB_CSC_CACHE_AUTO_REINT 0x0004 | 736 | #define SMB_CSC_CACHE_AUTO_REINT 0x0004 |
723 | #define SMB_CSC_CACHE_VDO 0x0008 | 737 | #define SMB_CSC_CACHE_VDO 0x0008 |
724 | #define SMB_CSC_NO_CACHING 0x000C | 738 | #define SMB_CSC_NO_CACHING 0x000C |
725 | |||
726 | #define SMB_UNIQUE_FILE_NAME 0x0010 | 739 | #define SMB_UNIQUE_FILE_NAME 0x0010 |
727 | #define SMB_EXTENDED_SIGNATURES 0x0020 | 740 | #define SMB_EXTENDED_SIGNATURES 0x0020 |
728 | 741 | ||
@@ -806,7 +819,7 @@ typedef struct smb_com_findclose_req { | |||
806 | #define ICOUNT_MASK 0x00FF | 819 | #define ICOUNT_MASK 0x00FF |
807 | #define PIPE_READ_MODE 0x0100 | 820 | #define PIPE_READ_MODE 0x0100 |
808 | #define NAMED_PIPE_TYPE 0x0400 | 821 | #define NAMED_PIPE_TYPE 0x0400 |
809 | #define PIPE_END_POINT 0x0800 | 822 | #define PIPE_END_POINT 0x4000 |
810 | #define BLOCKING_NAMED_PIPE 0x8000 | 823 | #define BLOCKING_NAMED_PIPE 0x8000 |
811 | 824 | ||
812 | typedef struct smb_com_open_req { /* also handles create */ | 825 | typedef struct smb_com_open_req { /* also handles create */ |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index fb655b4593c6..4511b708f0f3 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -1728,7 +1728,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1728 | { | 1728 | { |
1729 | int rc = 0; | 1729 | int rc = 0; |
1730 | LOCK_REQ *pSMB = NULL; | 1730 | LOCK_REQ *pSMB = NULL; |
1731 | LOCK_RSP *pSMBr = NULL; | 1731 | /* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */ |
1732 | int bytes_returned; | 1732 | int bytes_returned; |
1733 | int timeout = 0; | 1733 | int timeout = 0; |
1734 | __u16 count; | 1734 | __u16 count; |
@@ -1739,8 +1739,6 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1739 | if (rc) | 1739 | if (rc) |
1740 | return rc; | 1740 | return rc; |
1741 | 1741 | ||
1742 | pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */ | ||
1743 | |||
1744 | if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { | 1742 | if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) { |
1745 | timeout = CIFS_ASYNC_OP; /* no response expected */ | 1743 | timeout = CIFS_ASYNC_OP; /* no response expected */ |
1746 | pSMB->Timeout = 0; | 1744 | pSMB->Timeout = 0; |
@@ -1774,7 +1772,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1774 | 1772 | ||
1775 | if (waitFlag) { | 1773 | if (waitFlag) { |
1776 | rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, | 1774 | rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, |
1777 | (struct smb_hdr *) pSMBr, &bytes_returned); | 1775 | (struct smb_hdr *) pSMB, &bytes_returned); |
1778 | cifs_small_buf_release(pSMB); | 1776 | cifs_small_buf_release(pSMB); |
1779 | } else { | 1777 | } else { |
1780 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB, | 1778 | rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB, |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 023434f72c15..e8fa46c7cff2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -653,6 +653,7 @@ multi_t2_fnd: | |||
653 | spin_lock(&GlobalMid_Lock); | 653 | spin_lock(&GlobalMid_Lock); |
654 | server->tcpStatus = CifsExiting; | 654 | server->tcpStatus = CifsExiting; |
655 | spin_unlock(&GlobalMid_Lock); | 655 | spin_unlock(&GlobalMid_Lock); |
656 | wake_up_all(&server->response_q); | ||
656 | 657 | ||
657 | /* don't exit until kthread_stop is called */ | 658 | /* don't exit until kthread_stop is called */ |
658 | set_current_state(TASK_UNINTERRUPTIBLE); | 659 | set_current_state(TASK_UNINTERRUPTIBLE); |
@@ -2120,6 +2121,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | |||
2120 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; | 2121 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; |
2121 | } | 2122 | } |
2122 | 2123 | ||
2124 | if ((volume_info.cifs_acl) && (volume_info.dynperm)) | ||
2125 | cERROR(1, ("mount option dynperm ignored if cifsacl " | ||
2126 | "mount option supported")); | ||
2127 | |||
2123 | tcon = | 2128 | tcon = |
2124 | find_unc(sin_server.sin_addr.s_addr, volume_info.UNC, | 2129 | find_unc(sin_server.sin_addr.s_addr, volume_info.UNC, |
2125 | volume_info.username); | 2130 | volume_info.username); |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index f0b5b5f3dd2e..fb69c1fa85c9 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -260,7 +260,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
260 | buf, inode->i_sb, xid, | 260 | buf, inode->i_sb, xid, |
261 | &fileHandle); | 261 | &fileHandle); |
262 | if (newinode) { | 262 | if (newinode) { |
263 | newinode->i_mode = mode; | 263 | if (cifs_sb->mnt_cifs_flags & |
264 | CIFS_MOUNT_DYNPERM) | ||
265 | newinode->i_mode = mode; | ||
264 | if ((oplock & CIFS_CREATE_ACTION) && | 266 | if ((oplock & CIFS_CREATE_ACTION) && |
265 | (cifs_sb->mnt_cifs_flags & | 267 | (cifs_sb->mnt_cifs_flags & |
266 | CIFS_MOUNT_SET_UID)) { | 268 | CIFS_MOUNT_SET_UID)) { |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8636cec2642c..0aac824371a5 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -546,7 +546,6 @@ int cifs_close(struct inode *inode, struct file *file) | |||
546 | msleep(timeout); | 546 | msleep(timeout); |
547 | timeout *= 8; | 547 | timeout *= 8; |
548 | } | 548 | } |
549 | kfree(pSMBFile->search_resume_name); | ||
550 | kfree(file->private_data); | 549 | kfree(file->private_data); |
551 | file->private_data = NULL; | 550 | file->private_data = NULL; |
552 | } else | 551 | } else |
@@ -605,12 +604,6 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
605 | else | 604 | else |
606 | cifs_buf_release(ptmp); | 605 | cifs_buf_release(ptmp); |
607 | } | 606 | } |
608 | ptmp = pCFileStruct->search_resume_name; | ||
609 | if (ptmp) { | ||
610 | cFYI(1, ("closedir free resume name")); | ||
611 | pCFileStruct->search_resume_name = NULL; | ||
612 | kfree(ptmp); | ||
613 | } | ||
614 | kfree(file->private_data); | 607 | kfree(file->private_data); |
615 | file->private_data = NULL; | 608 | file->private_data = NULL; |
616 | } | 609 | } |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 129dbfe4dca7..722be543ceec 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -418,6 +418,7 @@ int cifs_get_inode_info(struct inode **pinode, | |||
418 | char *buf = NULL; | 418 | char *buf = NULL; |
419 | bool adjustTZ = false; | 419 | bool adjustTZ = false; |
420 | bool is_dfs_referral = false; | 420 | bool is_dfs_referral = false; |
421 | umode_t default_mode; | ||
421 | 422 | ||
422 | pTcon = cifs_sb->tcon; | 423 | pTcon = cifs_sb->tcon; |
423 | cFYI(1, ("Getting info on %s", full_path)); | 424 | cFYI(1, ("Getting info on %s", full_path)); |
@@ -530,47 +531,42 @@ int cifs_get_inode_info(struct inode **pinode, | |||
530 | inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj; | 531 | inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj; |
531 | } | 532 | } |
532 | 533 | ||
533 | /* set default mode. will override for dirs below */ | 534 | /* get default inode mode */ |
534 | if (atomic_read(&cifsInfo->inUse) == 0) | 535 | if (attr & ATTR_DIRECTORY) |
535 | /* new inode, can safely set these fields */ | 536 | default_mode = cifs_sb->mnt_dir_mode; |
536 | inode->i_mode = cifs_sb->mnt_file_mode; | 537 | else |
537 | else /* since we set the inode type below we need to mask off | 538 | default_mode = cifs_sb->mnt_file_mode; |
538 | to avoid strange results if type changes and both | 539 | |
539 | get orred in */ | 540 | /* set permission bits */ |
540 | inode->i_mode &= ~S_IFMT; | 541 | if (atomic_read(&cifsInfo->inUse) == 0 || |
541 | /* if (attr & ATTR_REPARSE) */ | 542 | (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) |
542 | /* We no longer handle these as symlinks because we could not | 543 | inode->i_mode = default_mode; |
543 | follow them due to the absolute path with drive letter */ | 544 | else { |
544 | if (attr & ATTR_DIRECTORY) { | 545 | /* just reenable write bits if !ATTR_READONLY */ |
545 | /* override default perms since we do not do byte range locking | 546 | if ((inode->i_mode & S_IWUGO) == 0 && |
546 | on dirs */ | 547 | (attr & ATTR_READONLY) == 0) |
547 | inode->i_mode = cifs_sb->mnt_dir_mode; | 548 | inode->i_mode |= (S_IWUGO & default_mode); |
548 | inode->i_mode |= S_IFDIR; | 549 | inode->i_mode &= ~S_IFMT; |
549 | } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) && | 550 | } |
550 | (cifsInfo->cifsAttrs & ATTR_SYSTEM) && | 551 | /* clear write bits if ATTR_READONLY is set */ |
551 | /* No need to le64 convert size of zero */ | 552 | if (attr & ATTR_READONLY) |
552 | (pfindData->EndOfFile == 0)) { | 553 | inode->i_mode &= ~S_IWUGO; |
553 | inode->i_mode = cifs_sb->mnt_file_mode; | 554 | |
554 | inode->i_mode |= S_IFIFO; | 555 | /* set inode type */ |
555 | /* BB Finish for SFU style symlinks and devices */ | 556 | if ((attr & ATTR_SYSTEM) && |
556 | } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) && | 557 | (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { |
557 | (cifsInfo->cifsAttrs & ATTR_SYSTEM)) { | 558 | /* no need to fix endianness on 0 */ |
558 | if (decode_sfu_inode(inode, le64_to_cpu(pfindData->EndOfFile), | 559 | if (pfindData->EndOfFile == 0) |
559 | full_path, cifs_sb, xid)) | 560 | inode->i_mode |= S_IFIFO; |
560 | cFYI(1, ("Unrecognized sfu inode type")); | 561 | else if (decode_sfu_inode(inode, |
561 | 562 | le64_to_cpu(pfindData->EndOfFile), | |
562 | cFYI(1, ("sfu mode 0%o", inode->i_mode)); | 563 | full_path, cifs_sb, xid)) |
564 | cFYI(1, ("unknown SFU file type\n")); | ||
563 | } else { | 565 | } else { |
564 | inode->i_mode |= S_IFREG; | 566 | if (attr & ATTR_DIRECTORY) |
565 | /* treat dos attribute of read-only as read-only mode eg 555 */ | 567 | inode->i_mode |= S_IFDIR; |
566 | if (cifsInfo->cifsAttrs & ATTR_READONLY) | 568 | else |
567 | inode->i_mode &= ~(S_IWUGO); | 569 | inode->i_mode |= S_IFREG; |
568 | else if ((inode->i_mode & S_IWUGO) == 0) | ||
569 | /* the ATTR_READONLY flag may have been */ | ||
570 | /* changed on server -- set any w bits */ | ||
571 | /* allowed by mnt_file_mode */ | ||
572 | inode->i_mode |= (S_IWUGO & cifs_sb->mnt_file_mode); | ||
573 | /* BB add code to validate if device or weird share or device type? */ | ||
574 | } | 570 | } |
575 | 571 | ||
576 | spin_lock(&inode->i_lock); | 572 | spin_lock(&inode->i_lock); |
@@ -1019,8 +1015,11 @@ mkdir_get_info: | |||
1019 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1015 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1020 | } | 1016 | } |
1021 | if (direntry->d_inode) { | 1017 | if (direntry->d_inode) { |
1022 | direntry->d_inode->i_mode = mode; | 1018 | if (cifs_sb->mnt_cifs_flags & |
1023 | direntry->d_inode->i_mode |= S_IFDIR; | 1019 | CIFS_MOUNT_DYNPERM) |
1020 | direntry->d_inode->i_mode = | ||
1021 | (mode | S_IFDIR); | ||
1022 | |||
1024 | if (cifs_sb->mnt_cifs_flags & | 1023 | if (cifs_sb->mnt_cifs_flags & |
1025 | CIFS_MOUNT_SET_UID) { | 1024 | CIFS_MOUNT_SET_UID) { |
1026 | direntry->d_inode->i_uid = | 1025 | direntry->d_inode->i_uid = |
@@ -1547,13 +1546,26 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1547 | } else | 1546 | } else |
1548 | goto cifs_setattr_exit; | 1547 | goto cifs_setattr_exit; |
1549 | } | 1548 | } |
1550 | if (attrs->ia_valid & ATTR_UID) { | 1549 | |
1551 | cFYI(1, ("UID changed to %d", attrs->ia_uid)); | 1550 | /* |
1552 | uid = attrs->ia_uid; | 1551 | * Without unix extensions we can't send ownership changes to the |
1553 | } | 1552 | * server, so silently ignore them. This is consistent with how |
1554 | if (attrs->ia_valid & ATTR_GID) { | 1553 | * local DOS/Windows filesystems behave (VFAT, NTFS, etc). With |
1555 | cFYI(1, ("GID changed to %d", attrs->ia_gid)); | 1554 | * CIFSACL support + proper Windows to Unix idmapping, we may be |
1556 | gid = attrs->ia_gid; | 1555 | * able to support this in the future. |
1556 | */ | ||
1557 | if (!pTcon->unix_ext && | ||
1558 | !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) { | ||
1559 | attrs->ia_valid &= ~(ATTR_UID | ATTR_GID); | ||
1560 | } else { | ||
1561 | if (attrs->ia_valid & ATTR_UID) { | ||
1562 | cFYI(1, ("UID changed to %d", attrs->ia_uid)); | ||
1563 | uid = attrs->ia_uid; | ||
1564 | } | ||
1565 | if (attrs->ia_valid & ATTR_GID) { | ||
1566 | cFYI(1, ("GID changed to %d", attrs->ia_gid)); | ||
1567 | gid = attrs->ia_gid; | ||
1568 | } | ||
1557 | } | 1569 | } |
1558 | 1570 | ||
1559 | time_buf.Attributes = 0; | 1571 | time_buf.Attributes = 0; |
@@ -1563,7 +1575,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1563 | attrs->ia_valid &= ~ATTR_MODE; | 1575 | attrs->ia_valid &= ~ATTR_MODE; |
1564 | 1576 | ||
1565 | if (attrs->ia_valid & ATTR_MODE) { | 1577 | if (attrs->ia_valid & ATTR_MODE) { |
1566 | cFYI(1, ("Mode changed to 0x%x", attrs->ia_mode)); | 1578 | cFYI(1, ("Mode changed to 0%o", attrs->ia_mode)); |
1567 | mode = attrs->ia_mode; | 1579 | mode = attrs->ia_mode; |
1568 | } | 1580 | } |
1569 | 1581 | ||
@@ -1578,18 +1590,18 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1578 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 1590 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
1579 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) | 1591 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) |
1580 | rc = mode_to_acl(inode, full_path, mode); | 1592 | rc = mode_to_acl(inode, full_path, mode); |
1581 | else if ((mode & S_IWUGO) == 0) { | 1593 | else |
1582 | #else | ||
1583 | if ((mode & S_IWUGO) == 0) { | ||
1584 | #endif | 1594 | #endif |
1585 | /* not writeable */ | 1595 | if (((mode & S_IWUGO) == 0) && |
1586 | if ((cifsInode->cifsAttrs & ATTR_READONLY) == 0) { | 1596 | (cifsInode->cifsAttrs & ATTR_READONLY) == 0) { |
1587 | set_dosattr = true; | 1597 | set_dosattr = true; |
1588 | time_buf.Attributes = | 1598 | time_buf.Attributes = cpu_to_le32(cifsInode->cifsAttrs | |
1589 | cpu_to_le32(cifsInode->cifsAttrs | | 1599 | ATTR_READONLY); |
1590 | ATTR_READONLY); | 1600 | /* fix up mode if we're not using dynperm */ |
1591 | } | 1601 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) |
1592 | } else if (cifsInode->cifsAttrs & ATTR_READONLY) { | 1602 | attrs->ia_mode = inode->i_mode & ~S_IWUGO; |
1603 | } else if ((mode & S_IWUGO) && | ||
1604 | (cifsInode->cifsAttrs & ATTR_READONLY)) { | ||
1593 | /* If file is readonly on server, we would | 1605 | /* If file is readonly on server, we would |
1594 | not be able to write to it - so if any write | 1606 | not be able to write to it - so if any write |
1595 | bit is enabled for user or group or other we | 1607 | bit is enabled for user or group or other we |
@@ -1600,6 +1612,20 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
1600 | /* Windows ignores set to zero */ | 1612 | /* Windows ignores set to zero */ |
1601 | if (time_buf.Attributes == 0) | 1613 | if (time_buf.Attributes == 0) |
1602 | time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL); | 1614 | time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL); |
1615 | |||
1616 | /* reset local inode permissions to normal */ | ||
1617 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) { | ||
1618 | attrs->ia_mode &= ~(S_IALLUGO); | ||
1619 | if (S_ISDIR(inode->i_mode)) | ||
1620 | attrs->ia_mode |= | ||
1621 | cifs_sb->mnt_dir_mode; | ||
1622 | else | ||
1623 | attrs->ia_mode |= | ||
1624 | cifs_sb->mnt_file_mode; | ||
1625 | } | ||
1626 | } else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) { | ||
1627 | /* ignore mode change - ATTR_READONLY hasn't changed */ | ||
1628 | attrs->ia_valid &= ~ATTR_MODE; | ||
1603 | } | 1629 | } |
1604 | } | 1630 | } |
1605 | 1631 | ||
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 1d69b8014e0b..4b17f8fe3157 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -519,8 +519,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
519 | pnotify = (struct file_notify_information *) | 519 | pnotify = (struct file_notify_information *) |
520 | ((char *)&pSMBr->hdr.Protocol + data_offset); | 520 | ((char *)&pSMBr->hdr.Protocol + data_offset); |
521 | cFYI(1, ("dnotify on %s Action: 0x%x", | 521 | cFYI(1, ("dnotify on %s Action: 0x%x", |
522 | pnotify->FileName, | 522 | pnotify->FileName, pnotify->Action)); |
523 | pnotify->Action)); /* BB removeme BB */ | ||
524 | /* cifs_dump_mem("Rcvd notify Data: ",buf, | 523 | /* cifs_dump_mem("Rcvd notify Data: ",buf, |
525 | sizeof(struct smb_hdr)+60); */ | 524 | sizeof(struct smb_hdr)+60); */ |
526 | return true; | 525 | return true; |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 713c25110197..83f306954883 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -132,6 +132,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
132 | __u32 attr; | 132 | __u32 attr; |
133 | __u64 allocation_size; | 133 | __u64 allocation_size; |
134 | __u64 end_of_file; | 134 | __u64 end_of_file; |
135 | umode_t default_mode; | ||
135 | 136 | ||
136 | /* save mtime and size */ | 137 | /* save mtime and size */ |
137 | local_mtime = tmp_inode->i_mtime; | 138 | local_mtime = tmp_inode->i_mtime; |
@@ -187,48 +188,54 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, | |||
187 | if (atomic_read(&cifsInfo->inUse) == 0) { | 188 | if (atomic_read(&cifsInfo->inUse) == 0) { |
188 | tmp_inode->i_uid = cifs_sb->mnt_uid; | 189 | tmp_inode->i_uid = cifs_sb->mnt_uid; |
189 | tmp_inode->i_gid = cifs_sb->mnt_gid; | 190 | tmp_inode->i_gid = cifs_sb->mnt_gid; |
190 | /* set default mode. will override for dirs below */ | 191 | } |
191 | tmp_inode->i_mode = cifs_sb->mnt_file_mode; | 192 | |
192 | } else { | 193 | if (attr & ATTR_DIRECTORY) |
193 | /* mask off the type bits since it gets set | 194 | default_mode = cifs_sb->mnt_dir_mode; |
194 | below and we do not want to get two type | 195 | else |
195 | bits set */ | 196 | default_mode = cifs_sb->mnt_file_mode; |
197 | |||
198 | /* set initial permissions */ | ||
199 | if ((atomic_read(&cifsInfo->inUse) == 0) || | ||
200 | (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) | ||
201 | tmp_inode->i_mode = default_mode; | ||
202 | else { | ||
203 | /* just reenable write bits if !ATTR_READONLY */ | ||
204 | if ((tmp_inode->i_mode & S_IWUGO) == 0 && | ||
205 | (attr & ATTR_READONLY) == 0) | ||
206 | tmp_inode->i_mode |= (S_IWUGO & default_mode); | ||
207 | |||
196 | tmp_inode->i_mode &= ~S_IFMT; | 208 | tmp_inode->i_mode &= ~S_IFMT; |
197 | } | 209 | } |
198 | 210 | ||
199 | if (attr & ATTR_DIRECTORY) { | 211 | /* clear write bits if ATTR_READONLY is set */ |
200 | *pobject_type = DT_DIR; | 212 | if (attr & ATTR_READONLY) |
201 | /* override default perms since we do not lock dirs */ | 213 | tmp_inode->i_mode &= ~S_IWUGO; |
202 | if (atomic_read(&cifsInfo->inUse) == 0) | 214 | |
203 | tmp_inode->i_mode = cifs_sb->mnt_dir_mode; | 215 | /* set inode type */ |
204 | tmp_inode->i_mode |= S_IFDIR; | 216 | if ((attr & ATTR_SYSTEM) && |
205 | } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) && | 217 | (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { |
206 | (attr & ATTR_SYSTEM)) { | ||
207 | if (end_of_file == 0) { | 218 | if (end_of_file == 0) { |
208 | *pobject_type = DT_FIFO; | ||
209 | tmp_inode->i_mode |= S_IFIFO; | 219 | tmp_inode->i_mode |= S_IFIFO; |
220 | *pobject_type = DT_FIFO; | ||
210 | } else { | 221 | } else { |
211 | /* rather than get the type here, we mark the | 222 | /* |
212 | inode as needing revalidate and get the real type | 223 | * trying to get the type can be slow, so just call |
213 | (blk vs chr vs. symlink) later ie in lookup */ | 224 | * this a regular file for now, and mark for reval |
214 | *pobject_type = DT_REG; | 225 | */ |
215 | tmp_inode->i_mode |= S_IFREG; | 226 | tmp_inode->i_mode |= S_IFREG; |
227 | *pobject_type = DT_REG; | ||
216 | cifsInfo->time = 0; | 228 | cifsInfo->time = 0; |
217 | } | 229 | } |
218 | /* we no longer mark these because we could not follow them */ | ||
219 | /* } else if (attr & ATTR_REPARSE) { | ||
220 | *pobject_type = DT_LNK; | ||
221 | tmp_inode->i_mode |= S_IFLNK; */ | ||
222 | } else { | 230 | } else { |
223 | *pobject_type = DT_REG; | 231 | if (attr & ATTR_DIRECTORY) { |
224 | tmp_inode->i_mode |= S_IFREG; | 232 | tmp_inode->i_mode |= S_IFDIR; |
225 | if (attr & ATTR_READONLY) | 233 | *pobject_type = DT_DIR; |
226 | tmp_inode->i_mode &= ~(S_IWUGO); | 234 | } else { |
227 | else if ((tmp_inode->i_mode & S_IWUGO) == 0) | 235 | tmp_inode->i_mode |= S_IFREG; |
228 | /* the ATTR_READONLY flag may have been changed on */ | 236 | *pobject_type = DT_REG; |
229 | /* server -- set any w bits allowed by mnt_file_mode */ | 237 | } |
230 | tmp_inode->i_mode |= (S_IWUGO & cifs_sb->mnt_file_mode); | 238 | } |
231 | } /* could add code here - to validate if device or weird share type? */ | ||
232 | 239 | ||
233 | /* can not fill in nlink here as in qpathinfo version and Unx search */ | 240 | /* can not fill in nlink here as in qpathinfo version and Unx search */ |
234 | if (atomic_read(&cifsInfo->inUse) == 0) | 241 | if (atomic_read(&cifsInfo->inUse) == 0) |
@@ -675,8 +682,6 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | |||
675 | cifsFile->invalidHandle = true; | 682 | cifsFile->invalidHandle = true; |
676 | CIFSFindClose(xid, pTcon, cifsFile->netfid); | 683 | CIFSFindClose(xid, pTcon, cifsFile->netfid); |
677 | } | 684 | } |
678 | kfree(cifsFile->search_resume_name); | ||
679 | cifsFile->search_resume_name = NULL; | ||
680 | if (cifsFile->srch_inf.ntwrk_buf_start) { | 685 | if (cifsFile->srch_inf.ntwrk_buf_start) { |
681 | cFYI(1, ("freeing SMB ff cache buf on search rewind")); | 686 | cFYI(1, ("freeing SMB ff cache buf on search rewind")); |
682 | if (cifsFile->srch_inf.smallBuf) | 687 | if (cifsFile->srch_inf.smallBuf) |
@@ -1043,9 +1048,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
1043 | } /* else { | 1048 | } /* else { |
1044 | cifsFile->invalidHandle = true; | 1049 | cifsFile->invalidHandle = true; |
1045 | CIFSFindClose(xid, pTcon, cifsFile->netfid); | 1050 | CIFSFindClose(xid, pTcon, cifsFile->netfid); |
1046 | } | 1051 | } */ |
1047 | kfree(cifsFile->search_resume_name); | ||
1048 | cifsFile->search_resume_name = NULL; */ | ||
1049 | 1052 | ||
1050 | rc = find_cifs_entry(xid, pTcon, file, | 1053 | rc = find_cifs_entry(xid, pTcon, file, |
1051 | ¤t_entry, &num_to_fill); | 1054 | ¤t_entry, &num_to_fill); |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 06480bcabfdc..06ebb6ef72aa 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -319,6 +319,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr) | |||
319 | #endif /* CONFIG_CPU_FREQ */ | 319 | #endif /* CONFIG_CPU_FREQ */ |
320 | 320 | ||
321 | /* in processor_throttling.c */ | 321 | /* in processor_throttling.c */ |
322 | int acpi_processor_tstate_has_changed(struct acpi_processor *pr); | ||
322 | int acpi_processor_get_throttling_info(struct acpi_processor *pr); | 323 | int acpi_processor_get_throttling_info(struct acpi_processor *pr); |
323 | extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); | 324 | extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); |
324 | extern struct file_operations acpi_processor_throttling_fops; | 325 | extern struct file_operations acpi_processor_throttling_fops; |
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h index b35a7e3ef978..5a21115228af 100644 --- a/include/asm-powerpc/kvm_ppc.h +++ b/include/asm-powerpc/kvm_ppc.h | |||
@@ -57,6 +57,7 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
57 | 57 | ||
58 | extern int kvmppc_emulate_instruction(struct kvm_run *run, | 58 | extern int kvmppc_emulate_instruction(struct kvm_run *run, |
59 | struct kvm_vcpu *vcpu); | 59 | struct kvm_vcpu *vcpu); |
60 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
60 | 61 | ||
61 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, | 62 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, |
62 | u64 asid, u32 flags); | 63 | u64 asid, u32 flags); |
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index e0d4500d5f95..819e7d99ca0c 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h | |||
@@ -315,14 +315,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
315 | asm volatile( \ | 315 | asm volatile( \ |
316 | " lctlg %1,%2,0(%0)\n" \ | 316 | " lctlg %1,%2,0(%0)\n" \ |
317 | : : "a" (&array), "i" (low), "i" (high), \ | 317 | : : "a" (&array), "i" (low), "i" (high), \ |
318 | "m" (*(addrtype *)(array))); \ | 318 | "m" (*(addrtype *)(&array))); \ |
319 | }) | 319 | }) |
320 | 320 | ||
321 | #define __ctl_store(array, low, high) ({ \ | 321 | #define __ctl_store(array, low, high) ({ \ |
322 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 322 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
323 | asm volatile( \ | 323 | asm volatile( \ |
324 | " stctg %2,%3,0(%1)\n" \ | 324 | " stctg %2,%3,0(%1)\n" \ |
325 | : "=m" (*(addrtype *)(array)) \ | 325 | : "=m" (*(addrtype *)(&array)) \ |
326 | : "a" (&array), "i" (low), "i" (high)); \ | 326 | : "a" (&array), "i" (low), "i" (high)); \ |
327 | }) | 327 | }) |
328 | 328 | ||
@@ -333,14 +333,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
333 | asm volatile( \ | 333 | asm volatile( \ |
334 | " lctl %1,%2,0(%0)\n" \ | 334 | " lctl %1,%2,0(%0)\n" \ |
335 | : : "a" (&array), "i" (low), "i" (high), \ | 335 | : : "a" (&array), "i" (low), "i" (high), \ |
336 | "m" (*(addrtype *)(array))); \ | 336 | "m" (*(addrtype *)(&array))); \ |
337 | }) | 337 | }) |
338 | 338 | ||
339 | #define __ctl_store(array, low, high) ({ \ | 339 | #define __ctl_store(array, low, high) ({ \ |
340 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 340 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
341 | asm volatile( \ | 341 | asm volatile( \ |
342 | " stctl %2,%3,0(%1)\n" \ | 342 | " stctl %2,%3,0(%1)\n" \ |
343 | : "=m" (*(addrtype *)(array)) \ | 343 | : "=m" (*(addrtype *)(&array)) \ |
344 | : "a" (&array), "i" (low), "i" (high)); \ | 344 | : "a" (&array), "i" (low), "i" (high)); \ |
345 | }) | 345 | }) |
346 | 346 | ||
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 51e6b1e520e6..dcf77fa826b5 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -82,6 +82,7 @@ struct cpuidle_state_kobj { | |||
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct cpuidle_device { | 84 | struct cpuidle_device { |
85 | unsigned int registered:1; | ||
85 | unsigned int enabled:1; | 86 | unsigned int enabled:1; |
86 | unsigned int cpu; | 87 | unsigned int cpu; |
87 | 88 | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index f8f195c20da2..9918772bf274 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -153,7 +153,7 @@ enum { ide_unknown, ide_generic, ide_pci, | |||
153 | ide_qd65xx, ide_umc8672, ide_ht6560b, | 153 | ide_qd65xx, ide_umc8672, ide_ht6560b, |
154 | ide_rz1000, ide_trm290, | 154 | ide_rz1000, ide_trm290, |
155 | ide_cmd646, ide_cy82c693, ide_4drives, | 155 | ide_cmd646, ide_cy82c693, ide_4drives, |
156 | ide_pmac, ide_etrax100, ide_acorn, | 156 | ide_pmac, ide_acorn, |
157 | ide_au1xxx, ide_palm3710 | 157 | ide_au1xxx, ide_palm3710 |
158 | }; | 158 | }; |
159 | 159 | ||
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index d5d40a9f7929..c6801bffe76d 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -53,14 +53,14 @@ struct resource_list { | |||
53 | #define IORESOURCE_AUTO 0x40000000 | 53 | #define IORESOURCE_AUTO 0x40000000 |
54 | #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ | 54 | #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ |
55 | 55 | ||
56 | /* ISA PnP IRQ specific bits (IORESOURCE_BITS) */ | 56 | /* PnP IRQ specific bits (IORESOURCE_BITS) */ |
57 | #define IORESOURCE_IRQ_HIGHEDGE (1<<0) | 57 | #define IORESOURCE_IRQ_HIGHEDGE (1<<0) |
58 | #define IORESOURCE_IRQ_LOWEDGE (1<<1) | 58 | #define IORESOURCE_IRQ_LOWEDGE (1<<1) |
59 | #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) | 59 | #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) |
60 | #define IORESOURCE_IRQ_LOWLEVEL (1<<3) | 60 | #define IORESOURCE_IRQ_LOWLEVEL (1<<3) |
61 | #define IORESOURCE_IRQ_SHAREABLE (1<<4) | 61 | #define IORESOURCE_IRQ_SHAREABLE (1<<4) |
62 | 62 | ||
63 | /* ISA PnP DMA specific bits (IORESOURCE_BITS) */ | 63 | /* PnP DMA specific bits (IORESOURCE_BITS) */ |
64 | #define IORESOURCE_DMA_TYPE_MASK (3<<0) | 64 | #define IORESOURCE_DMA_TYPE_MASK (3<<0) |
65 | #define IORESOURCE_DMA_8BIT (0<<0) | 65 | #define IORESOURCE_DMA_8BIT (0<<0) |
66 | #define IORESOURCE_DMA_8AND16BIT (1<<0) | 66 | #define IORESOURCE_DMA_8AND16BIT (1<<0) |
@@ -76,7 +76,7 @@ struct resource_list { | |||
76 | #define IORESOURCE_DMA_TYPEB (2<<6) | 76 | #define IORESOURCE_DMA_TYPEB (2<<6) |
77 | #define IORESOURCE_DMA_TYPEF (3<<6) | 77 | #define IORESOURCE_DMA_TYPEF (3<<6) |
78 | 78 | ||
79 | /* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */ | 79 | /* PnP memory I/O specific bits (IORESOURCE_BITS) */ |
80 | #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ | 80 | #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ |
81 | #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ | 81 | #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ |
82 | #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ | 82 | #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 10b666b61add..cde056e08181 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -396,8 +396,10 @@ static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *op | |||
396 | { | 396 | { |
397 | struct request_sock *req = reqsk_alloc(ops); | 397 | struct request_sock *req = reqsk_alloc(ops); |
398 | 398 | ||
399 | if (req != NULL) | 399 | if (req != NULL) { |
400 | inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req); | 400 | inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req); |
401 | inet6_rsk(req)->pktopts = NULL; | ||
402 | } | ||
401 | 403 | ||
402 | return req; | 404 | return req; |
403 | } | 405 | } |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 398978972b7a..092b1b25291d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -297,7 +297,7 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn) | |||
297 | return (gpa_t)gfn << PAGE_SHIFT; | 297 | return (gpa_t)gfn << PAGE_SHIFT; |
298 | } | 298 | } |
299 | 299 | ||
300 | static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | 300 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) |
301 | { | 301 | { |
302 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); | 302 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); |
303 | } | 303 | } |
diff --git a/include/linux/math64.h b/include/linux/math64.h index c1a5f81501ff..c87f1528703a 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h | |||
@@ -81,4 +81,25 @@ static inline s64 div_s64(s64 dividend, s32 divisor) | |||
81 | } | 81 | } |
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); | ||
85 | |||
86 | static __always_inline u32 | ||
87 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | ||
88 | { | ||
89 | u32 ret = 0; | ||
90 | |||
91 | while (dividend >= divisor) { | ||
92 | /* The following asm() prevents the compiler from | ||
93 | optimising this loop into a modulo operation. */ | ||
94 | asm("" : "+rm"(dividend)); | ||
95 | |||
96 | dividend -= divisor; | ||
97 | ret++; | ||
98 | } | ||
99 | |||
100 | *remainder = dividend; | ||
101 | |||
102 | return ret; | ||
103 | } | ||
104 | |||
84 | #endif /* _LINUX_MATH64_H */ | 105 | #endif /* _LINUX_MATH64_H */ |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index a2aec2c0cfb5..b358c704d102 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -246,6 +246,7 @@ enum rt_class_t | |||
246 | { | 246 | { |
247 | RT_TABLE_UNSPEC=0, | 247 | RT_TABLE_UNSPEC=0, |
248 | /* User defined values */ | 248 | /* User defined values */ |
249 | RT_TABLE_COMPAT=252, | ||
249 | RT_TABLE_DEFAULT=253, | 250 | RT_TABLE_DEFAULT=253, |
250 | RT_TABLE_MAIN=254, | 251 | RT_TABLE_MAIN=254, |
251 | RT_TABLE_LOCAL=255, | 252 | RT_TABLE_LOCAL=255, |
diff --git a/include/linux/sched.h b/include/linux/sched.h index ae0be3c62375..c5d3f847ca8d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2026,6 +2026,19 @@ static inline int fatal_signal_pending(struct task_struct *p) | |||
2026 | return signal_pending(p) && __fatal_signal_pending(p); | 2026 | return signal_pending(p) && __fatal_signal_pending(p); |
2027 | } | 2027 | } |
2028 | 2028 | ||
2029 | static inline int signal_pending_state(long state, struct task_struct *p) | ||
2030 | { | ||
2031 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) | ||
2032 | return 0; | ||
2033 | if (!signal_pending(p)) | ||
2034 | return 0; | ||
2035 | |||
2036 | if (state & (__TASK_STOPPED | __TASK_TRACED)) | ||
2037 | return 0; | ||
2038 | |||
2039 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); | ||
2040 | } | ||
2041 | |||
2029 | static inline int need_resched(void) | 2042 | static inline int need_resched(void) |
2030 | { | 2043 | { |
2031 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 2044 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); |
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h index 01fbdf5fef22..942e38736901 100644 --- a/include/linux/ssb/ssb_driver_gige.h +++ b/include/linux/ssb/ssb_driver_gige.h | |||
@@ -100,7 +100,7 @@ extern char * nvram_get(const char *name); | |||
100 | /* Get the device MAC address */ | 100 | /* Get the device MAC address */ |
101 | static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) | 101 | static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) |
102 | { | 102 | { |
103 | #ifdef CONFIG_BCM947XX | 103 | #ifdef CONFIG_BCM47XX |
104 | char *res = nvram_get("et0macaddr"); | 104 | char *res = nvram_get("et0macaddr"); |
105 | if (res) | 105 | if (res) |
106 | memcpy(macaddr, res, 6); | 106 | memcpy(macaddr, res, 6); |
diff --git a/include/linux/time.h b/include/linux/time.h index d32ef0ad4c0a..e15206a7e82e 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
7 | # include <linux/cache.h> | 7 | # include <linux/cache.h> |
8 | # include <linux/seqlock.h> | 8 | # include <linux/seqlock.h> |
9 | # include <linux/math64.h> | ||
9 | #endif | 10 | #endif |
10 | 11 | ||
11 | #ifndef _STRUCT_TIMESPEC | 12 | #ifndef _STRUCT_TIMESPEC |
@@ -169,18 +170,13 @@ extern struct timeval ns_to_timeval(const s64 nsec); | |||
169 | * timespec_add_ns - Adds nanoseconds to a timespec | 170 | * timespec_add_ns - Adds nanoseconds to a timespec |
170 | * @a: pointer to timespec to be incremented | 171 | * @a: pointer to timespec to be incremented |
171 | * @ns: unsigned nanoseconds value to be added | 172 | * @ns: unsigned nanoseconds value to be added |
173 | * | ||
174 | * This must always be inlined because its used from the x86-64 vdso, | ||
175 | * which cannot call other kernel functions. | ||
172 | */ | 176 | */ |
173 | static inline void timespec_add_ns(struct timespec *a, u64 ns) | 177 | static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) |
174 | { | 178 | { |
175 | ns += a->tv_nsec; | 179 | a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); |
176 | while(unlikely(ns >= NSEC_PER_SEC)) { | ||
177 | /* The following asm() prevents the compiler from | ||
178 | * optimising this loop into a modulo operation. */ | ||
179 | asm("" : "+r"(ns)); | ||
180 | |||
181 | ns -= NSEC_PER_SEC; | ||
182 | a->tv_sec++; | ||
183 | } | ||
184 | a->tv_nsec = ns; | 180 | a->tv_nsec = ns; |
185 | } | 181 | } |
186 | #endif /* __KERNEL__ */ | 182 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 9405aa6cdf26..38c0571820fb 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -38,7 +38,7 @@ struct virtio_net_hdr | |||
38 | #define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set | 38 | #define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set |
39 | __u8 gso_type; | 39 | __u8 gso_type; |
40 | __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ | 40 | __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ |
41 | __u16 gso_size; /* Bytes to append to gso_hdr_len per frame */ | 41 | __u16 gso_size; /* Bytes to append to hdr_len per frame */ |
42 | __u16 csum_start; /* Position to start checksumming from */ | 42 | __u16 csum_start; /* Position to start checksumming from */ |
43 | __u16 csum_offset; /* Offset after that to place checksum */ | 43 | __u16 csum_offset; /* Offset after that to place checksum */ |
44 | }; | 44 | }; |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index a42cd63d241a..9fabe5b38912 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -197,4 +197,14 @@ static inline int inet_iif(const struct sk_buff *skb) | |||
197 | return skb->rtable->rt_iif; | 197 | return skb->rtable->rt_iif; |
198 | } | 198 | } |
199 | 199 | ||
200 | static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops) | ||
201 | { | ||
202 | struct request_sock *req = reqsk_alloc(ops); | ||
203 | |||
204 | if (req != NULL) | ||
205 | inet_rsk(req)->opt = NULL; | ||
206 | |||
207 | return req; | ||
208 | } | ||
209 | |||
200 | #endif /* _INET_SOCK_H */ | 210 | #endif /* _INET_SOCK_H */ |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 633147cb6bbc..d448310c82c1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -433,7 +433,6 @@ extern struct sk_buff * tcp_make_synack(struct sock *sk, | |||
433 | 433 | ||
434 | extern int tcp_disconnect(struct sock *sk, int flags); | 434 | extern int tcp_disconnect(struct sock *sk, int flags); |
435 | 435 | ||
436 | extern void tcp_unhash(struct sock *sk); | ||
437 | 436 | ||
438 | /* From syncookies.c */ | 437 | /* From syncookies.c */ |
439 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | 438 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; |
@@ -894,8 +894,6 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) | |||
894 | if (!sfd) | 894 | if (!sfd) |
895 | goto out_put_dentry; | 895 | goto out_put_dentry; |
896 | 896 | ||
897 | err = -ENOMEM; | ||
898 | |||
899 | file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations); | 897 | file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations); |
900 | if (!file) | 898 | if (!file) |
901 | goto out_free; | 899 | goto out_free; |
diff --git a/kernel/sched.c b/kernel/sched.c index bfb8ad8ed171..eaf6751e7612 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -312,12 +312,15 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
312 | #endif | 312 | #endif |
313 | 313 | ||
314 | /* | 314 | /* |
315 | * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. | 315 | * A weight of 0 or 1 can cause arithmetics problems. |
316 | * A weight of a cfs_rq is the sum of weights of which entities | ||
317 | * are queued on this cfs_rq, so a weight of a entity should not be | ||
318 | * too large, so as the shares value of a task group. | ||
316 | * (The default weight is 1024 - so there's no practical | 319 | * (The default weight is 1024 - so there's no practical |
317 | * limitation from this.) | 320 | * limitation from this.) |
318 | */ | 321 | */ |
319 | #define MIN_SHARES 2 | 322 | #define MIN_SHARES 2 |
320 | #define MAX_SHARES (ULONG_MAX - 1) | 323 | #define MAX_SHARES (1UL << 18) |
321 | 324 | ||
322 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 325 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; |
323 | #endif | 326 | #endif |
@@ -1337,8 +1340,13 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
1337 | { | 1340 | { |
1338 | u64 tmp; | 1341 | u64 tmp; |
1339 | 1342 | ||
1340 | if (!lw->inv_weight) | 1343 | if (!lw->inv_weight) { |
1341 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1); | 1344 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) |
1345 | lw->inv_weight = 1; | ||
1346 | else | ||
1347 | lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) | ||
1348 | / (lw->weight+1); | ||
1349 | } | ||
1342 | 1350 | ||
1343 | tmp = (u64)delta_exec * weight; | 1351 | tmp = (u64)delta_exec * weight; |
1344 | /* | 1352 | /* |
@@ -4159,12 +4167,10 @@ need_resched_nonpreemptible: | |||
4159 | clear_tsk_need_resched(prev); | 4167 | clear_tsk_need_resched(prev); |
4160 | 4168 | ||
4161 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 4169 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
4162 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | 4170 | if (unlikely(signal_pending_state(prev->state, prev))) |
4163 | signal_pending(prev))) { | ||
4164 | prev->state = TASK_RUNNING; | 4171 | prev->state = TASK_RUNNING; |
4165 | } else { | 4172 | else |
4166 | deactivate_task(rq, prev, 1); | 4173 | deactivate_task(rq, prev, 1); |
4167 | } | ||
4168 | switch_count = &prev->nvcsw; | 4174 | switch_count = &prev->nvcsw; |
4169 | } | 4175 | } |
4170 | 4176 | ||
diff --git a/lib/div64.c b/lib/div64.c index bb5bd0c0f030..a111eb8de9cf 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64); | |||
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #endif /* BITS_PER_LONG == 32 */ | 100 | #endif /* BITS_PER_LONG == 32 */ |
101 | |||
102 | /* | ||
103 | * Iterative div/mod for use when dividend is not expected to be much | ||
104 | * bigger than divisor. | ||
105 | */ | ||
106 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | ||
107 | { | ||
108 | return __iter_div_u64_rem(dividend, divisor, remainder); | ||
109 | } | ||
110 | EXPORT_SYMBOL(iter_div_u64_rem); | ||
diff --git a/mm/nommu.c b/mm/nommu.c index 3abd0845bda4..4462b6a3fcb9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -104,21 +104,15 @@ EXPORT_SYMBOL(vmtruncate); | |||
104 | unsigned int kobjsize(const void *objp) | 104 | unsigned int kobjsize(const void *objp) |
105 | { | 105 | { |
106 | struct page *page; | 106 | struct page *page; |
107 | int order = 0; | ||
108 | 107 | ||
109 | /* | 108 | /* |
110 | * If the object we have should not have ksize performed on it, | 109 | * If the object we have should not have ksize performed on it, |
111 | * return size of 0 | 110 | * return size of 0 |
112 | */ | 111 | */ |
113 | if (!objp) | 112 | if (!objp || !virt_addr_valid(objp)) |
114 | return 0; | ||
115 | |||
116 | if ((unsigned long)objp >= memory_end) | ||
117 | return 0; | 113 | return 0; |
118 | 114 | ||
119 | page = virt_to_head_page(objp); | 115 | page = virt_to_head_page(objp); |
120 | if (!page) | ||
121 | return 0; | ||
122 | 116 | ||
123 | /* | 117 | /* |
124 | * If the allocator sets PageSlab, we know the pointer came from | 118 | * If the allocator sets PageSlab, we know the pointer came from |
@@ -129,18 +123,9 @@ unsigned int kobjsize(const void *objp) | |||
129 | 123 | ||
130 | /* | 124 | /* |
131 | * The ksize() function is only guaranteed to work for pointers | 125 | * The ksize() function is only guaranteed to work for pointers |
132 | * returned by kmalloc(). So handle arbitrary pointers, that we expect | 126 | * returned by kmalloc(). So handle arbitrary pointers here. |
133 | * always to be compound pages, here. | ||
134 | */ | ||
135 | if (PageCompound(page)) | ||
136 | order = compound_order(page); | ||
137 | |||
138 | /* | ||
139 | * Finally, handle arbitrary pointers that don't set PageSlab. | ||
140 | * Default to 0-order in the case when we're unable to ksize() | ||
141 | * the object. | ||
142 | */ | 127 | */ |
143 | return PAGE_SIZE << order; | 128 | return PAGE_SIZE << compound_order(page); |
144 | } | 129 | } |
145 | 130 | ||
146 | /* | 131 | /* |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index c22a3780c14e..37d27bcb361f 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -589,7 +589,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
589 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | 589 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) |
590 | goto drop; | 590 | goto drop; |
591 | 591 | ||
592 | req = reqsk_alloc(&dccp_request_sock_ops); | 592 | req = inet_reqsk_alloc(&dccp_request_sock_ops); |
593 | if (req == NULL) | 593 | if (req == NULL) |
594 | goto drop; | 594 | goto drop; |
595 | 595 | ||
@@ -605,7 +605,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
605 | ireq = inet_rsk(req); | 605 | ireq = inet_rsk(req); |
606 | ireq->loc_addr = ip_hdr(skb)->daddr; | 606 | ireq->loc_addr = ip_hdr(skb)->daddr; |
607 | ireq->rmt_addr = ip_hdr(skb)->saddr; | 607 | ireq->rmt_addr = ip_hdr(skb)->saddr; |
608 | ireq->opt = NULL; | ||
609 | 608 | ||
610 | /* | 609 | /* |
611 | * Step 3: Process LISTEN state | 610 | * Step 3: Process LISTEN state |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 9b1129bb7ece..f7fe2a572d7b 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -421,7 +421,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
421 | ireq6 = inet6_rsk(req); | 421 | ireq6 = inet6_rsk(req); |
422 | ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); | 422 | ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); |
423 | ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); | 423 | ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); |
424 | ireq6->pktopts = NULL; | ||
425 | 424 | ||
426 | if (ipv6_opt_accepted(sk, skb) || | 425 | if (ipv6_opt_accepted(sk, skb) || |
427 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | 426 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3b83c34019fc..0d4d72827e4b 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -960,7 +960,10 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
960 | rtm->rtm_dst_len = dst_len; | 960 | rtm->rtm_dst_len = dst_len; |
961 | rtm->rtm_src_len = 0; | 961 | rtm->rtm_src_len = 0; |
962 | rtm->rtm_tos = tos; | 962 | rtm->rtm_tos = tos; |
963 | rtm->rtm_table = tb_id; | 963 | if (tb_id < 256) |
964 | rtm->rtm_table = tb_id; | ||
965 | else | ||
966 | rtm->rtm_table = RT_TABLE_COMPAT; | ||
964 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); | 967 | NLA_PUT_U32(skb, RTA_TABLE, tb_id); |
965 | rtm->rtm_type = type; | 968 | rtm->rtm_type = type; |
966 | rtm->rtm_flags = fi->fib_flags; | 969 | rtm->rtm_flags = fi->fib_flags; |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 73ba98921d64..d182a2a26291 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -285,7 +285,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
285 | cookie_check_timestamp(&tcp_opt); | 285 | cookie_check_timestamp(&tcp_opt); |
286 | 286 | ||
287 | ret = NULL; | 287 | ret = NULL; |
288 | req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */ | 288 | req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ |
289 | if (!req) | 289 | if (!req) |
290 | goto out; | 290 | goto out; |
291 | 291 | ||
@@ -301,7 +301,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
301 | ireq->rmt_port = th->source; | 301 | ireq->rmt_port = th->source; |
302 | ireq->loc_addr = ip_hdr(skb)->daddr; | 302 | ireq->loc_addr = ip_hdr(skb)->daddr; |
303 | ireq->rmt_addr = ip_hdr(skb)->saddr; | 303 | ireq->rmt_addr = ip_hdr(skb)->saddr; |
304 | ireq->opt = NULL; | ||
305 | ireq->snd_wscale = tcp_opt.snd_wscale; | 304 | ireq->snd_wscale = tcp_opt.snd_wscale; |
306 | ireq->rcv_wscale = tcp_opt.rcv_wscale; | 305 | ireq->rcv_wscale = tcp_opt.rcv_wscale; |
307 | ireq->sack_ok = tcp_opt.sack_ok; | 306 | ireq->sack_ok = tcp_opt.sack_ok; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cd601a866c2f..4f8485c67d1a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1285,7 +1285,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1285 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | 1285 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) |
1286 | goto drop; | 1286 | goto drop; |
1287 | 1287 | ||
1288 | req = reqsk_alloc(&tcp_request_sock_ops); | 1288 | req = inet_reqsk_alloc(&tcp_request_sock_ops); |
1289 | if (!req) | 1289 | if (!req) |
1290 | goto drop; | 1290 | goto drop; |
1291 | 1291 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 3c6aafb02183..e84b3fd17fb4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -191,7 +191,7 @@ lookup_protocol: | |||
191 | np->mcast_hops = -1; | 191 | np->mcast_hops = -1; |
192 | np->mc_loop = 1; | 192 | np->mc_loop = 1; |
193 | np->pmtudisc = IPV6_PMTUDISC_WANT; | 193 | np->pmtudisc = IPV6_PMTUDISC_WANT; |
194 | np->ipv6only = init_net.ipv6.sysctl.bindv6only; | 194 | np->ipv6only = net->ipv6.sysctl.bindv6only; |
195 | 195 | ||
196 | /* Init the ipv4 part of the socket since we can have sockets | 196 | /* Init the ipv4 part of the socket since we can have sockets |
197 | * using v6 API for ipv4. | 197 | * using v6 API for ipv4. |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 938ce4ecde55..3ecc1157994e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -198,7 +198,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
198 | ireq = inet_rsk(req); | 198 | ireq = inet_rsk(req); |
199 | ireq6 = inet6_rsk(req); | 199 | ireq6 = inet6_rsk(req); |
200 | treq = tcp_rsk(req); | 200 | treq = tcp_rsk(req); |
201 | ireq6->pktopts = NULL; | ||
202 | 201 | ||
203 | if (security_inet_conn_request(sk, skb, req)) { | 202 | if (security_inet_conn_request(sk, skb, req)) { |
204 | reqsk_free(req); | 203 | reqsk_free(req); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 715965f0fac0..cb46749d4c32 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1299,7 +1299,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1299 | treq = inet6_rsk(req); | 1299 | treq = inet6_rsk(req); |
1300 | ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); | 1300 | ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); |
1301 | ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); | 1301 | ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); |
1302 | treq->pktopts = NULL; | ||
1303 | if (!want_cookie) | 1302 | if (!want_cookie) |
1304 | TCP_ECN_create_request(req, tcp_hdr(skb)); | 1303 | TCP_ECN_create_request(req, tcp_hdr(skb)); |
1305 | 1304 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 9bba7ac5fee0..7470e367272b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -3030,6 +3030,9 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c) | |||
3030 | 3030 | ||
3031 | static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c) | 3031 | static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c) |
3032 | { | 3032 | { |
3033 | if (atomic_read(&pfkey_socks_nr) == 0) | ||
3034 | return 0; | ||
3035 | |||
3033 | switch (c->event) { | 3036 | switch (c->event) { |
3034 | case XFRM_MSG_EXPIRE: | 3037 | case XFRM_MSG_EXPIRE: |
3035 | return key_notify_sa_expire(x, c); | 3038 | return key_notify_sa_expire(x, c); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c7314bf4bec2..006486b26726 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -899,7 +899,7 @@ extern const struct iw_handler_def ieee80211_iw_handler_def; | |||
899 | 899 | ||
900 | 900 | ||
901 | /* ieee80211_ioctl.c */ | 901 | /* ieee80211_ioctl.c */ |
902 | int ieee80211_set_freq(struct ieee80211_local *local, int freq); | 902 | int ieee80211_set_freq(struct net_device *dev, int freq); |
903 | /* ieee80211_sta.c */ | 903 | /* ieee80211_sta.c */ |
904 | void ieee80211_sta_timer(unsigned long data); | 904 | void ieee80211_sta_timer(unsigned long data); |
905 | void ieee80211_sta_work(struct work_struct *work); | 905 | void ieee80211_sta_work(struct work_struct *work); |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 5c876450b14c..98c0b5e56ecc 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -511,6 +511,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
511 | case IEEE80211_IF_TYPE_STA: | 511 | case IEEE80211_IF_TYPE_STA: |
512 | case IEEE80211_IF_TYPE_IBSS: | 512 | case IEEE80211_IF_TYPE_IBSS: |
513 | sdata->u.sta.state = IEEE80211_DISABLED; | 513 | sdata->u.sta.state = IEEE80211_DISABLED; |
514 | memset(sdata->u.sta.bssid, 0, ETH_ALEN); | ||
514 | del_timer_sync(&sdata->u.sta.timer); | 515 | del_timer_sync(&sdata->u.sta.timer); |
515 | /* | 516 | /* |
516 | * When we get here, the interface is marked down. | 517 | * When we get here, the interface is marked down. |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 841278f1df8e..4d2b582dd055 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) | 44 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) |
45 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) | 45 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) |
46 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) | 46 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) |
47 | #define IEEE80211_IBSS_JOIN_TIMEOUT (20 * HZ) | 47 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) |
48 | 48 | ||
49 | #define IEEE80211_PROBE_DELAY (HZ / 33) | 49 | #define IEEE80211_PROBE_DELAY (HZ / 33) |
50 | #define IEEE80211_CHANNEL_TIME (HZ / 33) | 50 | #define IEEE80211_CHANNEL_TIME (HZ / 33) |
@@ -2336,6 +2336,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2336 | u8 *pos; | 2336 | u8 *pos; |
2337 | struct ieee80211_sub_if_data *sdata; | 2337 | struct ieee80211_sub_if_data *sdata; |
2338 | struct ieee80211_supported_band *sband; | 2338 | struct ieee80211_supported_band *sband; |
2339 | union iwreq_data wrqu; | ||
2339 | 2340 | ||
2340 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 2341 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
2341 | 2342 | ||
@@ -2358,13 +2359,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2358 | sdata->drop_unencrypted = bss->capability & | 2359 | sdata->drop_unencrypted = bss->capability & |
2359 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 2360 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
2360 | 2361 | ||
2361 | res = ieee80211_set_freq(local, bss->freq); | 2362 | res = ieee80211_set_freq(dev, bss->freq); |
2362 | 2363 | ||
2363 | if (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) { | 2364 | if (res) |
2364 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " | 2365 | return res; |
2365 | "%d MHz\n", dev->name, local->oper_channel->center_freq); | ||
2366 | return -1; | ||
2367 | } | ||
2368 | 2366 | ||
2369 | /* Set beacon template */ | 2367 | /* Set beacon template */ |
2370 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 2368 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
@@ -2479,6 +2477,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2479 | ifsta->state = IEEE80211_IBSS_JOINED; | 2477 | ifsta->state = IEEE80211_IBSS_JOINED; |
2480 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 2478 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
2481 | 2479 | ||
2480 | memset(&wrqu, 0, sizeof(wrqu)); | ||
2481 | memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); | ||
2482 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | ||
2483 | |||
2482 | return res; | 2484 | return res; |
2483 | } | 2485 | } |
2484 | 2486 | ||
@@ -3486,7 +3488,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, | |||
3486 | spin_unlock_bh(&local->sta_bss_lock); | 3488 | spin_unlock_bh(&local->sta_bss_lock); |
3487 | 3489 | ||
3488 | if (selected) { | 3490 | if (selected) { |
3489 | ieee80211_set_freq(local, selected->freq); | 3491 | ieee80211_set_freq(dev, selected->freq); |
3490 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) | 3492 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) |
3491 | ieee80211_sta_set_ssid(dev, selected->ssid, | 3493 | ieee80211_sta_set_ssid(dev, selected->ssid, |
3492 | selected->ssid_len); | 3494 | selected->ssid_len); |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 8311bb24f9f3..a8bb8e31b1ec 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c | |||
@@ -290,14 +290,22 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev, | |||
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
293 | int ieee80211_set_freq(struct ieee80211_local *local, int freqMHz) | 293 | int ieee80211_set_freq(struct net_device *dev, int freqMHz) |
294 | { | 294 | { |
295 | int ret = -EINVAL; | 295 | int ret = -EINVAL; |
296 | struct ieee80211_channel *chan; | 296 | struct ieee80211_channel *chan; |
297 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
298 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
297 | 299 | ||
298 | chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); | 300 | chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); |
299 | 301 | ||
300 | if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { | 302 | if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { |
303 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
304 | chan->flags & IEEE80211_CHAN_NO_IBSS) { | ||
305 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " | ||
306 | "%d MHz\n", dev->name, chan->center_freq); | ||
307 | return ret; | ||
308 | } | ||
301 | local->oper_channel = chan; | 309 | local->oper_channel = chan; |
302 | 310 | ||
303 | if (local->sta_sw_scanning || local->sta_hw_scanning) | 311 | if (local->sta_sw_scanning || local->sta_hw_scanning) |
@@ -315,7 +323,6 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, | |||
315 | struct iw_request_info *info, | 323 | struct iw_request_info *info, |
316 | struct iw_freq *freq, char *extra) | 324 | struct iw_freq *freq, char *extra) |
317 | { | 325 | { |
318 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
319 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 326 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
320 | 327 | ||
321 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) | 328 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) |
@@ -329,14 +336,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, | |||
329 | IEEE80211_STA_AUTO_CHANNEL_SEL; | 336 | IEEE80211_STA_AUTO_CHANNEL_SEL; |
330 | return 0; | 337 | return 0; |
331 | } else | 338 | } else |
332 | return ieee80211_set_freq(local, | 339 | return ieee80211_set_freq(dev, |
333 | ieee80211_channel_to_frequency(freq->m)); | 340 | ieee80211_channel_to_frequency(freq->m)); |
334 | } else { | 341 | } else { |
335 | int i, div = 1000000; | 342 | int i, div = 1000000; |
336 | for (i = 0; i < freq->e; i++) | 343 | for (i = 0; i < freq->e; i++) |
337 | div /= 10; | 344 | div /= 10; |
338 | if (div > 0) | 345 | if (div > 0) |
339 | return ieee80211_set_freq(local, freq->m / div); | 346 | return ieee80211_set_freq(dev, freq->m / div); |
340 | else | 347 | else |
341 | return -EINVAL; | 348 | return -EINVAL; |
342 | } | 349 | } |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 508c5895c680..a07f91aac920 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -467,6 +467,25 @@ static void parse_elf_finish(struct elf_info *info) | |||
467 | release_file(info->hdr, info->size); | 467 | release_file(info->hdr, info->size); |
468 | } | 468 | } |
469 | 469 | ||
470 | static int ignore_undef_symbol(struct elf_info *info, const char *symname) | ||
471 | { | ||
472 | /* ignore __this_module, it will be resolved shortly */ | ||
473 | if (strcmp(symname, MODULE_SYMBOL_PREFIX "__this_module") == 0) | ||
474 | return 1; | ||
475 | /* ignore global offset table */ | ||
476 | if (strcmp(symname, "_GLOBAL_OFFSET_TABLE_") == 0) | ||
477 | return 1; | ||
478 | if (info->hdr->e_machine == EM_PPC) | ||
479 | /* Special register function linked on all modules during final link of .ko */ | ||
480 | if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 || | ||
481 | strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 || | ||
482 | strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 || | ||
483 | strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0) | ||
484 | return 1; | ||
485 | /* Do not ignore this symbol */ | ||
486 | return 0; | ||
487 | } | ||
488 | |||
470 | #define CRC_PFX MODULE_SYMBOL_PREFIX "__crc_" | 489 | #define CRC_PFX MODULE_SYMBOL_PREFIX "__crc_" |
471 | #define KSYMTAB_PFX MODULE_SYMBOL_PREFIX "__ksymtab_" | 490 | #define KSYMTAB_PFX MODULE_SYMBOL_PREFIX "__ksymtab_" |
472 | 491 | ||
@@ -493,11 +512,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info, | |||
493 | if (ELF_ST_BIND(sym->st_info) != STB_GLOBAL && | 512 | if (ELF_ST_BIND(sym->st_info) != STB_GLOBAL && |
494 | ELF_ST_BIND(sym->st_info) != STB_WEAK) | 513 | ELF_ST_BIND(sym->st_info) != STB_WEAK) |
495 | break; | 514 | break; |
496 | /* ignore global offset table */ | 515 | if (ignore_undef_symbol(info, symname)) |
497 | if (strcmp(symname, "_GLOBAL_OFFSET_TABLE_") == 0) | ||
498 | break; | ||
499 | /* ignore __this_module, it will be resolved shortly */ | ||
500 | if (strcmp(symname, MODULE_SYMBOL_PREFIX "__this_module") == 0) | ||
501 | break; | 516 | break; |
502 | /* cope with newer glibc (2.3.4 or higher) STT_ definition in elf.h */ | 517 | /* cope with newer glibc (2.3.4 or higher) STT_ definition in elf.h */ |
503 | #if defined(STT_REGISTER) || defined(STT_SPARC_REGISTER) | 518 | #if defined(STT_REGISTER) || defined(STT_SPARC_REGISTER) |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 4232fd75dd20..98778cb69c6e 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #else | 45 | #else |
46 | #define ioapic_debug(fmt, arg...) | 46 | #define ioapic_debug(fmt, arg...) |
47 | #endif | 47 | #endif |
48 | static void ioapic_deliver(struct kvm_ioapic *vioapic, int irq); | 48 | static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); |
49 | 49 | ||
50 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | 50 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
51 | unsigned long addr, | 51 | unsigned long addr, |
@@ -89,8 +89,8 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | |||
89 | pent = &ioapic->redirtbl[idx]; | 89 | pent = &ioapic->redirtbl[idx]; |
90 | 90 | ||
91 | if (!pent->fields.mask) { | 91 | if (!pent->fields.mask) { |
92 | ioapic_deliver(ioapic, idx); | 92 | int injected = ioapic_deliver(ioapic, idx); |
93 | if (pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 93 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
94 | pent->fields.remote_irr = 1; | 94 | pent->fields.remote_irr = 1; |
95 | } | 95 | } |
96 | if (!pent->fields.trig_mode) | 96 | if (!pent->fields.trig_mode) |
@@ -133,7 +133,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
133 | } | 133 | } |
134 | } | 134 | } |
135 | 135 | ||
136 | static void ioapic_inj_irq(struct kvm_ioapic *ioapic, | 136 | static int ioapic_inj_irq(struct kvm_ioapic *ioapic, |
137 | struct kvm_vcpu *vcpu, | 137 | struct kvm_vcpu *vcpu, |
138 | u8 vector, u8 trig_mode, u8 delivery_mode) | 138 | u8 vector, u8 trig_mode, u8 delivery_mode) |
139 | { | 139 | { |
@@ -143,7 +143,7 @@ static void ioapic_inj_irq(struct kvm_ioapic *ioapic, | |||
143 | ASSERT((delivery_mode == IOAPIC_FIXED) || | 143 | ASSERT((delivery_mode == IOAPIC_FIXED) || |
144 | (delivery_mode == IOAPIC_LOWEST_PRIORITY)); | 144 | (delivery_mode == IOAPIC_LOWEST_PRIORITY)); |
145 | 145 | ||
146 | kvm_apic_set_irq(vcpu, vector, trig_mode); | 146 | return kvm_apic_set_irq(vcpu, vector, trig_mode); |
147 | } | 147 | } |
148 | 148 | ||
149 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | 149 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, |
@@ -186,7 +186,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | |||
186 | return mask; | 186 | return mask; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 189 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) |
190 | { | 190 | { |
191 | u8 dest = ioapic->redirtbl[irq].fields.dest_id; | 191 | u8 dest = ioapic->redirtbl[irq].fields.dest_id; |
192 | u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode; | 192 | u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode; |
@@ -195,7 +195,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
195 | u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; | 195 | u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; |
196 | u32 deliver_bitmask; | 196 | u32 deliver_bitmask; |
197 | struct kvm_vcpu *vcpu; | 197 | struct kvm_vcpu *vcpu; |
198 | int vcpu_id; | 198 | int vcpu_id, r = 0; |
199 | 199 | ||
200 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 200 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " |
201 | "vector=%x trig_mode=%x\n", | 201 | "vector=%x trig_mode=%x\n", |
@@ -204,7 +204,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
204 | deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode); | 204 | deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode); |
205 | if (!deliver_bitmask) { | 205 | if (!deliver_bitmask) { |
206 | ioapic_debug("no target on destination\n"); | 206 | ioapic_debug("no target on destination\n"); |
207 | return; | 207 | return 0; |
208 | } | 208 | } |
209 | 209 | ||
210 | switch (delivery_mode) { | 210 | switch (delivery_mode) { |
@@ -216,7 +216,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
216 | vcpu = ioapic->kvm->vcpus[0]; | 216 | vcpu = ioapic->kvm->vcpus[0]; |
217 | #endif | 217 | #endif |
218 | if (vcpu != NULL) | 218 | if (vcpu != NULL) |
219 | ioapic_inj_irq(ioapic, vcpu, vector, | 219 | r = ioapic_inj_irq(ioapic, vcpu, vector, |
220 | trig_mode, delivery_mode); | 220 | trig_mode, delivery_mode); |
221 | else | 221 | else |
222 | ioapic_debug("null lowest prio vcpu: " | 222 | ioapic_debug("null lowest prio vcpu: " |
@@ -234,7 +234,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
234 | deliver_bitmask &= ~(1 << vcpu_id); | 234 | deliver_bitmask &= ~(1 << vcpu_id); |
235 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | 235 | vcpu = ioapic->kvm->vcpus[vcpu_id]; |
236 | if (vcpu) { | 236 | if (vcpu) { |
237 | ioapic_inj_irq(ioapic, vcpu, vector, | 237 | r = ioapic_inj_irq(ioapic, vcpu, vector, |
238 | trig_mode, delivery_mode); | 238 | trig_mode, delivery_mode); |
239 | } | 239 | } |
240 | } | 240 | } |
@@ -246,6 +246,7 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
246 | delivery_mode); | 246 | delivery_mode); |
247 | break; | 247 | break; |
248 | } | 248 | } |
249 | return r; | ||
249 | } | 250 | } |
250 | 251 | ||
251 | void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | 252 | void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) |