aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-30 08:40:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-30 08:40:09 -0500
commitdd430ca20c40ecccd6954a7efd13d4398f507728 (patch)
treeb65089436d17b2bcc6054ede2e335a821b50007f /drivers
parent60e233172eabdd1f831bd48631b9626ce2279d9b (diff)
parentafadcd788f37bfa62d92662e54a720c26c91becf (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (890 commits) x86: fix nodemap_size according to nodeid bits x86: fix overlap between pagetable with bss section x86: add PCI IDs to k8topology_64.c x86: fix early_ioremap pagetable ops x86: use the same pgd_list for PAE and 64-bit x86: defer cr3 reload when doing pud_clear() x86: early boot debugging via FireWire (ohci1394_dma=early) x86: don't special-case pmd allocations as much x86: shrink some ifdefs in fault.c x86: ignore spurious faults x86: remove nx_enabled from fault.c x86: unify fault_32|64.c x86: unify fault_32|64.c with ifdefs x86: unify fault_32|64.c by ifdef'd function bodies x86: arch/x86/mm/init_32.c printk fixes x86: arch/x86/mm/init_32.c cleanup x86: arch/x86/mm/init_64.c printk fixes x86: unify ioremap x86: fixes some bugs about EFI memory map handling x86: use reboot_type on EFI 32 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/processor_idle.c34
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/backend.c3
-rw-r--r--drivers/char/agp/generic.c3
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c11
-rw-r--r--drivers/char/hpet.c126
-rw-r--r--drivers/char/rtc.c253
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/firmware/dmi_scan.c26
-rw-r--r--drivers/ieee1394/Makefile1
-rw-r--r--drivers/ieee1394/init_ohci1394_dma.c285
-rw-r--r--drivers/input/mouse/pc110pad.c7
-rw-r--r--drivers/kvm/svm.c2
-rw-r--r--drivers/kvm/vmx.c8
-rw-r--r--drivers/lguest/x86/core.c4
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c5
-rw-r--r--drivers/video/vermilion/vermilion.c15
19 files changed, 550 insertions, 241 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 8cb37e3557d4..d92d4d82d001 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_SCSI) += scsi/
38obj-$(CONFIG_ATA) += ata/ 38obj-$(CONFIG_ATA) += ata/
39obj-$(CONFIG_FUSION) += message/ 39obj-$(CONFIG_FUSION) += message/
40obj-$(CONFIG_FIREWIRE) += firewire/ 40obj-$(CONFIG_FIREWIRE) += firewire/
41obj-$(CONFIG_IEEE1394) += ieee1394/ 41obj-y += ieee1394/
42obj-$(CONFIG_UIO) += uio/ 42obj-$(CONFIG_UIO) += uio/
43obj-y += cdrom/ 43obj-y += cdrom/
44obj-y += auxdisplay/ 44obj-y += auxdisplay/
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2235f4e02d26..eb1f82f79153 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -357,6 +357,26 @@ int acpi_processor_resume(struct acpi_device * device)
357 return 0; 357 return 0;
358} 358}
359 359
360#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
361static int tsc_halts_in_c(int state)
362{
363 switch (boot_cpu_data.x86_vendor) {
364 case X86_VENDOR_AMD:
365 /*
366 * AMD Fam10h TSC will tick in all
367 * C/P/S0/S1 states when this bit is set.
368 */
369 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
370 return 0;
371 /*FALL THROUGH*/
372 case X86_VENDOR_INTEL:
373 /* Several cases known where TSC halts in C2 too */
374 default:
375 return state > ACPI_STATE_C1;
376 }
377}
378#endif
379
360#ifndef CONFIG_CPU_IDLE 380#ifndef CONFIG_CPU_IDLE
361static void acpi_processor_idle(void) 381static void acpi_processor_idle(void)
362{ 382{
@@ -516,7 +536,8 @@ static void acpi_processor_idle(void)
516 536
517#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 537#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
518 /* TSC halts in C2, so notify users */ 538 /* TSC halts in C2, so notify users */
519 mark_tsc_unstable("possible TSC halt in C2"); 539 if (tsc_halts_in_c(ACPI_STATE_C2))
540 mark_tsc_unstable("possible TSC halt in C2");
520#endif 541#endif
521 /* Compute time (ticks) that we were actually asleep */ 542 /* Compute time (ticks) that we were actually asleep */
522 sleep_ticks = ticks_elapsed(t1, t2); 543 sleep_ticks = ticks_elapsed(t1, t2);
@@ -534,6 +555,7 @@ static void acpi_processor_idle(void)
534 break; 555 break;
535 556
536 case ACPI_STATE_C3: 557 case ACPI_STATE_C3:
558 acpi_unlazy_tlb(smp_processor_id());
537 /* 559 /*
538 * Must be done before busmaster disable as we might 560 * Must be done before busmaster disable as we might
539 * need to access HPET ! 561 * need to access HPET !
@@ -579,7 +601,8 @@ static void acpi_processor_idle(void)
579 601
580#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 602#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
581 /* TSC halts in C3, so notify users */ 603 /* TSC halts in C3, so notify users */
582 mark_tsc_unstable("TSC halts in C3"); 604 if (tsc_halts_in_c(ACPI_STATE_C3))
605 mark_tsc_unstable("TSC halts in C3");
583#endif 606#endif
584 /* Compute time (ticks) that we were actually asleep */ 607 /* Compute time (ticks) that we were actually asleep */
585 sleep_ticks = ticks_elapsed(t1, t2); 608 sleep_ticks = ticks_elapsed(t1, t2);
@@ -1423,6 +1446,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1423 return 0; 1446 return 0;
1424 } 1447 }
1425 1448
1449 acpi_unlazy_tlb(smp_processor_id());
1426 /* 1450 /*
1427 * Must be done before busmaster disable as we might need to 1451 * Must be done before busmaster disable as we might need to
1428 * access HPET ! 1452 * access HPET !
@@ -1443,7 +1467,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1443 1467
1444#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 1468#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1445 /* TSC could halt in idle, so notify users */ 1469 /* TSC could halt in idle, so notify users */
1446 mark_tsc_unstable("TSC halts in idle");; 1470 if (tsc_halts_in_c(cx->type))
1471 mark_tsc_unstable("TSC halts in idle");;
1447#endif 1472#endif
1448 sleep_ticks = ticks_elapsed(t1, t2); 1473 sleep_ticks = ticks_elapsed(t1, t2);
1449 1474
@@ -1554,7 +1579,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1554 1579
1555#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 1580#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1556 /* TSC could halt in idle, so notify users */ 1581 /* TSC could halt in idle, so notify users */
1557 mark_tsc_unstable("TSC halts in idle"); 1582 if (tsc_halts_in_c(ACPI_STATE_C3))
1583 mark_tsc_unstable("TSC halts in idle");
1558#endif 1584#endif
1559 sleep_ticks = ticks_elapsed(t1, t2); 1585 sleep_ticks = ticks_elapsed(t1, t2);
1560 /* Tell the scheduler how much we idled: */ 1586 /* Tell the scheduler how much we idled: */
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index aa5ddb716ffb..1ffb381130c3 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
145 void *addr = agp_generic_alloc_page(agp_bridge); 145 void *addr = agp_generic_alloc_page(agp_bridge);
146 u32 temp; 146 u32 temp;
147 147
148 global_flush_tlb();
149 if (!addr) 148 if (!addr)
150 return NULL; 149 return NULL;
151 150
@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
162 if (flags & AGP_PAGE_DESTROY_UNMAP) { 161 if (flags & AGP_PAGE_DESTROY_UNMAP) {
163 global_cache_flush(); /* is this really needed? --hch */ 162 global_cache_flush(); /* is this really needed? --hch */
164 agp_generic_destroy_page(addr, flags); 163 agp_generic_destroy_page(addr, flags);
165 global_flush_tlb();
166 } else 164 } else
167 agp_generic_destroy_page(addr, flags); 165 agp_generic_destroy_page(addr, flags);
168 } 166 }
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 832ded20fe70..2720882e66fe 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); 147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
148 return -ENOMEM; 148 return -ENOMEM;
149 } 149 }
150 flush_agp_mappings();
151 150
152 bridge->scratch_page_real = virt_to_gart(addr); 151 bridge->scratch_page_real = virt_to_gart(addr);
153 bridge->scratch_page = 152 bridge->scratch_page =
@@ -191,7 +190,6 @@ err_out:
191 if (bridge->driver->needs_scratch_page) { 190 if (bridge->driver->needs_scratch_page) {
192 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 191 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
193 AGP_PAGE_DESTROY_UNMAP); 192 AGP_PAGE_DESTROY_UNMAP);
194 flush_agp_mappings();
195 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 193 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
196 AGP_PAGE_DESTROY_FREE); 194 AGP_PAGE_DESTROY_FREE);
197 } 195 }
@@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
219 bridge->driver->needs_scratch_page) { 217 bridge->driver->needs_scratch_page) {
220 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 218 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
221 AGP_PAGE_DESTROY_UNMAP); 219 AGP_PAGE_DESTROY_UNMAP);
222 flush_agp_mappings();
223 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 220 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
224 AGP_PAGE_DESTROY_FREE); 221 AGP_PAGE_DESTROY_FREE);
225 } 222 }
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 64b2f6d7059d..1a4674ce0c71 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr)
197 for (i = 0; i < curr->page_count; i++) { 197 for (i = 0; i < curr->page_count; i++) {
198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP); 198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
199 } 199 }
200 flush_agp_mappings();
201 for (i = 0; i < curr->page_count; i++) { 200 for (i = 0; i < curr->page_count; i++) {
202 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE); 201 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
203 } 202 }
@@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
267 } 266 }
268 new->bridge = bridge; 267 new->bridge = bridge;
269 268
270 flush_agp_mappings();
271
272 return new; 269 return new;
273} 270}
274EXPORT_SYMBOL(agp_allocate_memory); 271EXPORT_SYMBOL(agp_allocate_memory);
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index e72a83e2bad5..76f581c85a7d 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
527 527
528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
529 page = agp_generic_alloc_page(agp_bridge); 529 page = agp_generic_alloc_page(agp_bridge);
530 global_flush_tlb();
531 } else 530 } else
532 /* Returning NULL would cause problems */ 531 /* Returning NULL would cause problems */
533 /* AK: really dubious code. */ 532 /* AK: really dubious code. */
@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
539{ 538{
540 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 539 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
541 agp_generic_destroy_page(page, flags); 540 agp_generic_destroy_page(page, flags);
542 global_flush_tlb();
543 } 541 }
544} 542}
545 543
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 03eac1eb8e0f..189efb6ef970 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -210,13 +210,11 @@ static void *i8xx_alloc_pages(void)
210 if (page == NULL) 210 if (page == NULL)
211 return NULL; 211 return NULL;
212 212
213 if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) { 213 if (set_pages_uc(page, 4) < 0) {
214 change_page_attr(page, 4, PAGE_KERNEL); 214 set_pages_wb(page, 4);
215 global_flush_tlb();
216 __free_pages(page, 2); 215 __free_pages(page, 2);
217 return NULL; 216 return NULL;
218 } 217 }
219 global_flush_tlb();
220 get_page(page); 218 get_page(page);
221 atomic_inc(&agp_bridge->current_memory_agp); 219 atomic_inc(&agp_bridge->current_memory_agp);
222 return page_address(page); 220 return page_address(page);
@@ -230,8 +228,7 @@ static void i8xx_destroy_pages(void *addr)
230 return; 228 return;
231 229
232 page = virt_to_page(addr); 230 page = virt_to_page(addr);
233 change_page_attr(page, 4, PAGE_KERNEL); 231 set_pages_wb(page, 4);
234 global_flush_tlb();
235 put_page(page); 232 put_page(page);
236 __free_pages(page, 2); 233 __free_pages(page, 2);
237 atomic_dec(&agp_bridge->current_memory_agp); 234 atomic_dec(&agp_bridge->current_memory_agp);
@@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
341 338
342 switch (pg_count) { 339 switch (pg_count) {
343 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); 340 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
344 global_flush_tlb();
345 break; 341 break;
346 case 4: 342 case 4:
347 /* kludge to get 4 physical pages for ARGB cursor */ 343 /* kludge to get 4 physical pages for ARGB cursor */
@@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
404 else { 400 else {
405 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), 401 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
406 AGP_PAGE_DESTROY_UNMAP); 402 AGP_PAGE_DESTROY_UNMAP);
407 global_flush_tlb();
408 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), 403 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
409 AGP_PAGE_DESTROY_FREE); 404 AGP_PAGE_DESTROY_FREE);
410 } 405 }
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4c16778e3f84..465ad35ed38f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -600,63 +600,6 @@ static int hpet_is_known(struct hpet_data *hdp)
600 return 0; 600 return 0;
601} 601}
602 602
603EXPORT_SYMBOL(hpet_alloc);
604EXPORT_SYMBOL(hpet_register);
605EXPORT_SYMBOL(hpet_unregister);
606EXPORT_SYMBOL(hpet_control);
607
608int hpet_register(struct hpet_task *tp, int periodic)
609{
610 unsigned int i;
611 u64 mask;
612 struct hpet_timer __iomem *timer;
613 struct hpet_dev *devp;
614 struct hpets *hpetp;
615
616 switch (periodic) {
617 case 1:
618 mask = Tn_PER_INT_CAP_MASK;
619 break;
620 case 0:
621 mask = 0;
622 break;
623 default:
624 return -EINVAL;
625 }
626
627 tp->ht_opaque = NULL;
628
629 spin_lock_irq(&hpet_task_lock);
630 spin_lock(&hpet_lock);
631
632 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
633 for (timer = hpetp->hp_hpet->hpet_timers, i = 0;
634 i < hpetp->hp_ntimer; i++, timer++) {
635 if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK)
636 != mask)
637 continue;
638
639 devp = &hpetp->hp_dev[i];
640
641 if (devp->hd_flags & HPET_OPEN || devp->hd_task) {
642 devp = NULL;
643 continue;
644 }
645
646 tp->ht_opaque = devp;
647 devp->hd_task = tp;
648 break;
649 }
650
651 spin_unlock(&hpet_lock);
652 spin_unlock_irq(&hpet_task_lock);
653
654 if (tp->ht_opaque)
655 return 0;
656 else
657 return -EBUSY;
658}
659
660static inline int hpet_tpcheck(struct hpet_task *tp) 603static inline int hpet_tpcheck(struct hpet_task *tp)
661{ 604{
662 struct hpet_dev *devp; 605 struct hpet_dev *devp;
@@ -706,24 +649,6 @@ int hpet_unregister(struct hpet_task *tp)
706 return 0; 649 return 0;
707} 650}
708 651
709int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
710{
711 struct hpet_dev *devp;
712 int err;
713
714 if ((err = hpet_tpcheck(tp)))
715 return err;
716
717 spin_lock_irq(&hpet_lock);
718 devp = tp->ht_opaque;
719 if (devp->hd_task != tp) {
720 spin_unlock_irq(&hpet_lock);
721 return -ENXIO;
722 }
723 spin_unlock_irq(&hpet_lock);
724 return hpet_ioctl_common(devp, cmd, arg, 1);
725}
726
727static ctl_table hpet_table[] = { 652static ctl_table hpet_table[] = {
728 { 653 {
729 .ctl_name = CTL_UNNUMBERED, 654 .ctl_name = CTL_UNNUMBERED,
@@ -806,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
806 731
807int hpet_alloc(struct hpet_data *hdp) 732int hpet_alloc(struct hpet_data *hdp)
808{ 733{
809 u64 cap, mcfg; 734 u64 cap, mcfg, hpet_config;
810 struct hpet_dev *devp; 735 struct hpet_dev *devp;
811 u32 i, ntimer; 736 u32 i, ntimer, irq;
812 struct hpets *hpetp; 737 struct hpets *hpetp;
813 size_t siz; 738 size_t siz;
814 struct hpet __iomem *hpet; 739 struct hpet __iomem *hpet;
815 static struct hpets *last = NULL; 740 static struct hpets *last = NULL;
816 unsigned long period; 741 unsigned long period, irq_bitmap;
817 unsigned long long temp; 742 unsigned long long temp;
818 743
819 /* 744 /*
@@ -840,11 +765,47 @@ int hpet_alloc(struct hpet_data *hdp)
840 hpetp->hp_hpet_phys = hdp->hd_phys_address; 765 hpetp->hp_hpet_phys = hdp->hd_phys_address;
841 766
842 hpetp->hp_ntimer = hdp->hd_nirqs; 767 hpetp->hp_ntimer = hdp->hd_nirqs;
768 hpet = hpetp->hp_hpet;
843 769
844 for (i = 0; i < hdp->hd_nirqs; i++) 770 /* Assign IRQs statically for legacy devices */
845 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 771 hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0];
772 hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1];
846 773
847 hpet = hpetp->hp_hpet; 774 /* Assign IRQs dynamically for the others */
775 for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) {
776 struct hpet_timer __iomem *timer;
777
778 timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
779
780 /* Check if there's already an IRQ assigned to the timer */
781 if (hdp->hd_irq[i]) {
782 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
783 continue;
784 }
785
786 hpet_config = readq(&timer->hpet_config);
787 irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK)
788 >> Tn_INT_ROUTE_CAP_SHIFT;
789 if (!irq_bitmap)
790 irq = 0; /* No valid IRQ Assignable */
791 else {
792 irq = find_first_bit(&irq_bitmap, 32);
793 do {
794 hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT;
795 writeq(hpet_config, &timer->hpet_config);
796
797 /*
798 * Verify whether we have written a valid
799 * IRQ number by reading it back again
800 */
801 hpet_config = readq(&timer->hpet_config);
802 if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK)
803 >> Tn_INT_ROUTE_CNF_SHIFT)
804 break; /* Success */
805 } while ((irq = (find_next_bit(&irq_bitmap, 32, irq))));
806 }
807 hpetp->hp_dev[i].hd_hdwirq = irq;
808 }
848 809
849 cap = readq(&hpet->hpet_cap); 810 cap = readq(&hpet->hpet_cap);
850 811
@@ -875,7 +836,8 @@ int hpet_alloc(struct hpet_data *hdp)
875 hpetp->hp_which, hdp->hd_phys_address, 836 hpetp->hp_which, hdp->hd_phys_address,
876 hpetp->hp_ntimer > 1 ? "s" : ""); 837 hpetp->hp_ntimer > 1 ? "s" : "");
877 for (i = 0; i < hpetp->hp_ntimer; i++) 838 for (i = 0; i < hpetp->hp_ntimer; i++)
878 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 839 printk("%s %d", i > 0 ? "," : "",
840 hpetp->hp_dev[i].hd_hdwirq);
879 printk("\n"); 841 printk("\n");
880 842
881 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", 843 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 0c66b802736a..78b151c4d20f 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Real Time Clock interface for Linux 2 * Real Time Clock interface for Linux
3 * 3 *
4 * Copyright (C) 1996 Paul Gortmaker 4 * Copyright (C) 1996 Paul Gortmaker
5 * 5 *
@@ -17,7 +17,7 @@
17 * has been received. If a RTC interrupt has already happened, 17 * has been received. If a RTC interrupt has already happened,
18 * it will output an unsigned long and then block. The output value 18 * it will output an unsigned long and then block. The output value
19 * contains the interrupt status in the low byte and the number of 19 * contains the interrupt status in the low byte and the number of
20 * interrupts since the last read in the remaining high bytes. The 20 * interrupts since the last read in the remaining high bytes. The
21 * /dev/rtc interface can also be used with the select(2) call. 21 * /dev/rtc interface can also be used with the select(2) call.
22 * 22 *
23 * This program is free software; you can redistribute it and/or 23 * This program is free software; you can redistribute it and/or
@@ -104,12 +104,14 @@ static int rtc_has_irq = 1;
104 104
105#ifndef CONFIG_HPET_EMULATE_RTC 105#ifndef CONFIG_HPET_EMULATE_RTC
106#define is_hpet_enabled() 0 106#define is_hpet_enabled() 0
107#define hpet_set_alarm_time(hrs, min, sec) 0 107#define hpet_set_alarm_time(hrs, min, sec) 0
108#define hpet_set_periodic_freq(arg) 0 108#define hpet_set_periodic_freq(arg) 0
109#define hpet_mask_rtc_irq_bit(arg) 0 109#define hpet_mask_rtc_irq_bit(arg) 0
110#define hpet_set_rtc_irq_bit(arg) 0 110#define hpet_set_rtc_irq_bit(arg) 0
111#define hpet_rtc_timer_init() do { } while (0) 111#define hpet_rtc_timer_init() do { } while (0)
112#define hpet_rtc_dropped_irq() 0 112#define hpet_rtc_dropped_irq() 0
113#define hpet_register_irq_handler(h) 0
114#define hpet_unregister_irq_handler(h) 0
113#ifdef RTC_IRQ 115#ifdef RTC_IRQ
114static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 116static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
115{ 117{
@@ -147,7 +149,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
147static unsigned int rtc_poll(struct file *file, poll_table *wait); 149static unsigned int rtc_poll(struct file *file, poll_table *wait);
148#endif 150#endif
149 151
150static void get_rtc_alm_time (struct rtc_time *alm_tm); 152static void get_rtc_alm_time(struct rtc_time *alm_tm);
151#ifdef RTC_IRQ 153#ifdef RTC_IRQ
152static void set_rtc_irq_bit_locked(unsigned char bit); 154static void set_rtc_irq_bit_locked(unsigned char bit);
153static void mask_rtc_irq_bit_locked(unsigned char bit); 155static void mask_rtc_irq_bit_locked(unsigned char bit);
@@ -185,9 +187,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
185 * rtc_status but before mod_timer is called, which would then reenable the 187 * rtc_status but before mod_timer is called, which would then reenable the
186 * timer (but you would need to have an awful timing before you'd trip on it) 188 * timer (but you would need to have an awful timing before you'd trip on it)
187 */ 189 */
188static unsigned long rtc_status = 0; /* bitmapped status byte. */ 190static unsigned long rtc_status; /* bitmapped status byte. */
189static unsigned long rtc_freq = 0; /* Current periodic IRQ rate */ 191static unsigned long rtc_freq; /* Current periodic IRQ rate */
190static unsigned long rtc_irq_data = 0; /* our output to the world */ 192static unsigned long rtc_irq_data; /* our output to the world */
191static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ 193static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
192 194
193#ifdef RTC_IRQ 195#ifdef RTC_IRQ
@@ -195,7 +197,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
195 * rtc_task_lock nests inside rtc_lock. 197 * rtc_task_lock nests inside rtc_lock.
196 */ 198 */
197static DEFINE_SPINLOCK(rtc_task_lock); 199static DEFINE_SPINLOCK(rtc_task_lock);
198static rtc_task_t *rtc_callback = NULL; 200static rtc_task_t *rtc_callback;
199#endif 201#endif
200 202
201/* 203/*
@@ -205,7 +207,7 @@ static rtc_task_t *rtc_callback = NULL;
205 207
206static unsigned long epoch = 1900; /* year corresponding to 0x00 */ 208static unsigned long epoch = 1900; /* year corresponding to 0x00 */
207 209
208static const unsigned char days_in_mo[] = 210static const unsigned char days_in_mo[] =
209{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; 211{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
210 212
211/* 213/*
@@ -242,7 +244,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
242 * the last read in the remainder of rtc_irq_data. 244 * the last read in the remainder of rtc_irq_data.
243 */ 245 */
244 246
245 spin_lock (&rtc_lock); 247 spin_lock(&rtc_lock);
246 rtc_irq_data += 0x100; 248 rtc_irq_data += 0x100;
247 rtc_irq_data &= ~0xff; 249 rtc_irq_data &= ~0xff;
248 if (is_hpet_enabled()) { 250 if (is_hpet_enabled()) {
@@ -259,16 +261,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
259 if (rtc_status & RTC_TIMER_ON) 261 if (rtc_status & RTC_TIMER_ON)
260 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); 262 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
261 263
262 spin_unlock (&rtc_lock); 264 spin_unlock(&rtc_lock);
263 265
264 /* Now do the rest of the actions */ 266 /* Now do the rest of the actions */
265 spin_lock(&rtc_task_lock); 267 spin_lock(&rtc_task_lock);
266 if (rtc_callback) 268 if (rtc_callback)
267 rtc_callback->func(rtc_callback->private_data); 269 rtc_callback->func(rtc_callback->private_data);
268 spin_unlock(&rtc_task_lock); 270 spin_unlock(&rtc_task_lock);
269 wake_up_interruptible(&rtc_wait); 271 wake_up_interruptible(&rtc_wait);
270 272
271 kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); 273 kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
272 274
273 return IRQ_HANDLED; 275 return IRQ_HANDLED;
274} 276}
@@ -335,7 +337,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
335 DECLARE_WAITQUEUE(wait, current); 337 DECLARE_WAITQUEUE(wait, current);
336 unsigned long data; 338 unsigned long data;
337 ssize_t retval; 339 ssize_t retval;
338 340
339 if (rtc_has_irq == 0) 341 if (rtc_has_irq == 0)
340 return -EIO; 342 return -EIO;
341 343
@@ -358,11 +360,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
358 * confusing. And no, xchg() is not the answer. */ 360 * confusing. And no, xchg() is not the answer. */
359 361
360 __set_current_state(TASK_INTERRUPTIBLE); 362 __set_current_state(TASK_INTERRUPTIBLE);
361 363
362 spin_lock_irq (&rtc_lock); 364 spin_lock_irq(&rtc_lock);
363 data = rtc_irq_data; 365 data = rtc_irq_data;
364 rtc_irq_data = 0; 366 rtc_irq_data = 0;
365 spin_unlock_irq (&rtc_lock); 367 spin_unlock_irq(&rtc_lock);
366 368
367 if (data != 0) 369 if (data != 0)
368 break; 370 break;
@@ -378,10 +380,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
378 schedule(); 380 schedule();
379 } while (1); 381 } while (1);
380 382
381 if (count == sizeof(unsigned int)) 383 if (count == sizeof(unsigned int)) {
382 retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); 384 retval = put_user(data,
383 else 385 (unsigned int __user *)buf) ?: sizeof(int);
384 retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long); 386 } else {
387 retval = put_user(data,
388 (unsigned long __user *)buf) ?: sizeof(long);
389 }
385 if (!retval) 390 if (!retval)
386 retval = count; 391 retval = count;
387 out: 392 out:
@@ -394,7 +399,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
394 399
395static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) 400static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
396{ 401{
397 struct rtc_time wtime; 402 struct rtc_time wtime;
398 403
399#ifdef RTC_IRQ 404#ifdef RTC_IRQ
400 if (rtc_has_irq == 0) { 405 if (rtc_has_irq == 0) {
@@ -426,35 +431,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
426 } 431 }
427 case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ 432 case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
428 { 433 {
429 unsigned long flags; /* can be called from isr via rtc_control() */ 434 /* can be called from isr via rtc_control() */
430 spin_lock_irqsave (&rtc_lock, flags); 435 unsigned long flags;
436
437 spin_lock_irqsave(&rtc_lock, flags);
431 mask_rtc_irq_bit_locked(RTC_PIE); 438 mask_rtc_irq_bit_locked(RTC_PIE);
432 if (rtc_status & RTC_TIMER_ON) { 439 if (rtc_status & RTC_TIMER_ON) {
433 rtc_status &= ~RTC_TIMER_ON; 440 rtc_status &= ~RTC_TIMER_ON;
434 del_timer(&rtc_irq_timer); 441 del_timer(&rtc_irq_timer);
435 } 442 }
436 spin_unlock_irqrestore (&rtc_lock, flags); 443 spin_unlock_irqrestore(&rtc_lock, flags);
444
437 return 0; 445 return 0;
438 } 446 }
439 case RTC_PIE_ON: /* Allow periodic ints */ 447 case RTC_PIE_ON: /* Allow periodic ints */
440 { 448 {
441 unsigned long flags; /* can be called from isr via rtc_control() */ 449 /* can be called from isr via rtc_control() */
450 unsigned long flags;
451
442 /* 452 /*
443 * We don't really want Joe User enabling more 453 * We don't really want Joe User enabling more
444 * than 64Hz of interrupts on a multi-user machine. 454 * than 64Hz of interrupts on a multi-user machine.
445 */ 455 */
446 if (!kernel && (rtc_freq > rtc_max_user_freq) && 456 if (!kernel && (rtc_freq > rtc_max_user_freq) &&
447 (!capable(CAP_SYS_RESOURCE))) 457 (!capable(CAP_SYS_RESOURCE)))
448 return -EACCES; 458 return -EACCES;
449 459
450 spin_lock_irqsave (&rtc_lock, flags); 460 spin_lock_irqsave(&rtc_lock, flags);
451 if (!(rtc_status & RTC_TIMER_ON)) { 461 if (!(rtc_status & RTC_TIMER_ON)) {
452 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 462 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
453 2*HZ/100); 463 2*HZ/100);
454 rtc_status |= RTC_TIMER_ON; 464 rtc_status |= RTC_TIMER_ON;
455 } 465 }
456 set_rtc_irq_bit_locked(RTC_PIE); 466 set_rtc_irq_bit_locked(RTC_PIE);
457 spin_unlock_irqrestore (&rtc_lock, flags); 467 spin_unlock_irqrestore(&rtc_lock, flags);
468
458 return 0; 469 return 0;
459 } 470 }
460 case RTC_UIE_OFF: /* Mask ints from RTC updates. */ 471 case RTC_UIE_OFF: /* Mask ints from RTC updates. */
@@ -477,7 +488,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
477 */ 488 */
478 memset(&wtime, 0, sizeof(struct rtc_time)); 489 memset(&wtime, 0, sizeof(struct rtc_time));
479 get_rtc_alm_time(&wtime); 490 get_rtc_alm_time(&wtime);
480 break; 491 break;
481 } 492 }
482 case RTC_ALM_SET: /* Store a time into the alarm */ 493 case RTC_ALM_SET: /* Store a time into the alarm */
483 { 494 {
@@ -505,16 +516,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
505 */ 516 */
506 } 517 }
507 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || 518 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
508 RTC_ALWAYS_BCD) 519 RTC_ALWAYS_BCD) {
509 { 520 if (sec < 60)
510 if (sec < 60) BIN_TO_BCD(sec); 521 BIN_TO_BCD(sec);
511 else sec = 0xff; 522 else
512 523 sec = 0xff;
513 if (min < 60) BIN_TO_BCD(min); 524
514 else min = 0xff; 525 if (min < 60)
515 526 BIN_TO_BCD(min);
516 if (hrs < 24) BIN_TO_BCD(hrs); 527 else
517 else hrs = 0xff; 528 min = 0xff;
529
530 if (hrs < 24)
531 BIN_TO_BCD(hrs);
532 else
533 hrs = 0xff;
518 } 534 }
519 CMOS_WRITE(hrs, RTC_HOURS_ALARM); 535 CMOS_WRITE(hrs, RTC_HOURS_ALARM);
520 CMOS_WRITE(min, RTC_MINUTES_ALARM); 536 CMOS_WRITE(min, RTC_MINUTES_ALARM);
@@ -563,11 +579,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
563 579
564 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) 580 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
565 return -EINVAL; 581 return -EINVAL;
566 582
567 if ((hrs >= 24) || (min >= 60) || (sec >= 60)) 583 if ((hrs >= 24) || (min >= 60) || (sec >= 60))
568 return -EINVAL; 584 return -EINVAL;
569 585
570 if ((yrs -= epoch) > 255) /* They are unsigned */ 586 yrs -= epoch;
587 if (yrs > 255) /* They are unsigned */
571 return -EINVAL; 588 return -EINVAL;
572 589
573 spin_lock_irq(&rtc_lock); 590 spin_lock_irq(&rtc_lock);
@@ -635,9 +652,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
635 { 652 {
636 int tmp = 0; 653 int tmp = 0;
637 unsigned char val; 654 unsigned char val;
638 unsigned long flags; /* can be called from isr via rtc_control() */ 655 /* can be called from isr via rtc_control() */
656 unsigned long flags;
639 657
640 /* 658 /*
641 * The max we can do is 8192Hz. 659 * The max we can do is 8192Hz.
642 */ 660 */
643 if ((arg < 2) || (arg > 8192)) 661 if ((arg < 2) || (arg > 8192))
@@ -646,7 +664,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
646 * We don't really want Joe User generating more 664 * We don't really want Joe User generating more
647 * than 64Hz of interrupts on a multi-user machine. 665 * than 64Hz of interrupts on a multi-user machine.
648 */ 666 */
649 if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) 667 if (!kernel && (arg > rtc_max_user_freq) &&
668 !capable(CAP_SYS_RESOURCE))
650 return -EACCES; 669 return -EACCES;
651 670
652 while (arg > (1<<tmp)) 671 while (arg > (1<<tmp))
@@ -674,11 +693,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
674#endif 693#endif
675 case RTC_EPOCH_READ: /* Read the epoch. */ 694 case RTC_EPOCH_READ: /* Read the epoch. */
676 { 695 {
677 return put_user (epoch, (unsigned long __user *)arg); 696 return put_user(epoch, (unsigned long __user *)arg);
678 } 697 }
679 case RTC_EPOCH_SET: /* Set the epoch. */ 698 case RTC_EPOCH_SET: /* Set the epoch. */
680 { 699 {
681 /* 700 /*
682 * There were no RTC clocks before 1900. 701 * There were no RTC clocks before 1900.
683 */ 702 */
684 if (arg < 1900) 703 if (arg < 1900)
@@ -693,7 +712,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
693 default: 712 default:
694 return -ENOTTY; 713 return -ENOTTY;
695 } 714 }
696 return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; 715 return copy_to_user((void __user *)arg,
716 &wtime, sizeof wtime) ? -EFAULT : 0;
697} 717}
698 718
699static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 719static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
@@ -712,26 +732,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
712 * needed here. Or anywhere else in this driver. */ 732 * needed here. Or anywhere else in this driver. */
713static int rtc_open(struct inode *inode, struct file *file) 733static int rtc_open(struct inode *inode, struct file *file)
714{ 734{
715 spin_lock_irq (&rtc_lock); 735 spin_lock_irq(&rtc_lock);
716 736
717 if(rtc_status & RTC_IS_OPEN) 737 if (rtc_status & RTC_IS_OPEN)
718 goto out_busy; 738 goto out_busy;
719 739
720 rtc_status |= RTC_IS_OPEN; 740 rtc_status |= RTC_IS_OPEN;
721 741
722 rtc_irq_data = 0; 742 rtc_irq_data = 0;
723 spin_unlock_irq (&rtc_lock); 743 spin_unlock_irq(&rtc_lock);
724 return 0; 744 return 0;
725 745
726out_busy: 746out_busy:
727 spin_unlock_irq (&rtc_lock); 747 spin_unlock_irq(&rtc_lock);
728 return -EBUSY; 748 return -EBUSY;
729} 749}
730 750
731static int rtc_fasync (int fd, struct file *filp, int on) 751static int rtc_fasync(int fd, struct file *filp, int on)
732
733{ 752{
734 return fasync_helper (fd, filp, on, &rtc_async_queue); 753 return fasync_helper(fd, filp, on, &rtc_async_queue);
735} 754}
736 755
737static int rtc_release(struct inode *inode, struct file *file) 756static int rtc_release(struct inode *inode, struct file *file)
@@ -762,16 +781,16 @@ static int rtc_release(struct inode *inode, struct file *file)
762 } 781 }
763 spin_unlock_irq(&rtc_lock); 782 spin_unlock_irq(&rtc_lock);
764 783
765 if (file->f_flags & FASYNC) { 784 if (file->f_flags & FASYNC)
766 rtc_fasync (-1, file, 0); 785 rtc_fasync(-1, file, 0);
767 }
768no_irq: 786no_irq:
769#endif 787#endif
770 788
771 spin_lock_irq (&rtc_lock); 789 spin_lock_irq(&rtc_lock);
772 rtc_irq_data = 0; 790 rtc_irq_data = 0;
773 rtc_status &= ~RTC_IS_OPEN; 791 rtc_status &= ~RTC_IS_OPEN;
774 spin_unlock_irq (&rtc_lock); 792 spin_unlock_irq(&rtc_lock);
793
775 return 0; 794 return 0;
776} 795}
777 796
@@ -786,9 +805,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
786 805
787 poll_wait(file, &rtc_wait, wait); 806 poll_wait(file, &rtc_wait, wait);
788 807
789 spin_lock_irq (&rtc_lock); 808 spin_lock_irq(&rtc_lock);
790 l = rtc_irq_data; 809 l = rtc_irq_data;
791 spin_unlock_irq (&rtc_lock); 810 spin_unlock_irq(&rtc_lock);
792 811
793 if (l != 0) 812 if (l != 0)
794 return POLLIN | POLLRDNORM; 813 return POLLIN | POLLRDNORM;
@@ -796,14 +815,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
796} 815}
797#endif 816#endif
798 817
799/*
800 * exported stuffs
801 */
802
803EXPORT_SYMBOL(rtc_register);
804EXPORT_SYMBOL(rtc_unregister);
805EXPORT_SYMBOL(rtc_control);
806
807int rtc_register(rtc_task_t *task) 818int rtc_register(rtc_task_t *task)
808{ 819{
809#ifndef RTC_IRQ 820#ifndef RTC_IRQ
@@ -829,6 +840,7 @@ int rtc_register(rtc_task_t *task)
829 return 0; 840 return 0;
830#endif 841#endif
831} 842}
843EXPORT_SYMBOL(rtc_register);
832 844
833int rtc_unregister(rtc_task_t *task) 845int rtc_unregister(rtc_task_t *task)
834{ 846{
@@ -845,7 +857,7 @@ int rtc_unregister(rtc_task_t *task)
845 return -ENXIO; 857 return -ENXIO;
846 } 858 }
847 rtc_callback = NULL; 859 rtc_callback = NULL;
848 860
849 /* disable controls */ 861 /* disable controls */
850 if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { 862 if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
851 tmp = CMOS_READ(RTC_CONTROL); 863 tmp = CMOS_READ(RTC_CONTROL);
@@ -865,6 +877,7 @@ int rtc_unregister(rtc_task_t *task)
865 return 0; 877 return 0;
866#endif 878#endif
867} 879}
880EXPORT_SYMBOL(rtc_unregister);
868 881
869int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) 882int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
870{ 883{
@@ -883,7 +896,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
883 return rtc_do_ioctl(cmd, arg, 1); 896 return rtc_do_ioctl(cmd, arg, 1);
884#endif 897#endif
885} 898}
886 899EXPORT_SYMBOL(rtc_control);
887 900
888/* 901/*
889 * The various file operations we support. 902 * The various file operations we support.
@@ -910,11 +923,11 @@ static struct miscdevice rtc_dev = {
910 923
911#ifdef CONFIG_PROC_FS 924#ifdef CONFIG_PROC_FS
912static const struct file_operations rtc_proc_fops = { 925static const struct file_operations rtc_proc_fops = {
913 .owner = THIS_MODULE, 926 .owner = THIS_MODULE,
914 .open = rtc_proc_open, 927 .open = rtc_proc_open,
915 .read = seq_read, 928 .read = seq_read,
916 .llseek = seq_lseek, 929 .llseek = seq_lseek,
917 .release = single_release, 930 .release = single_release,
918}; 931};
919#endif 932#endif
920 933
@@ -965,7 +978,7 @@ static int __init rtc_init(void)
965#ifdef CONFIG_SPARC32 978#ifdef CONFIG_SPARC32
966 for_each_ebus(ebus) { 979 for_each_ebus(ebus) {
967 for_each_ebusdev(edev, ebus) { 980 for_each_ebusdev(edev, ebus) {
968 if(strcmp(edev->prom_node->name, "rtc") == 0) { 981 if (strcmp(edev->prom_node->name, "rtc") == 0) {
969 rtc_port = edev->resource[0].start; 982 rtc_port = edev->resource[0].start;
970 rtc_irq = edev->irqs[0]; 983 rtc_irq = edev->irqs[0];
971 goto found; 984 goto found;
@@ -986,7 +999,8 @@ found:
986 * XXX Interrupt pin #7 in Espresso is shared between RTC and 999 * XXX Interrupt pin #7 in Espresso is shared between RTC and
987 * PCI Slot 2 INTA# (and some INTx# in Slot 1). 1000 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
988 */ 1001 */
989 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { 1002 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
1003 (void *)&rtc_port)) {
990 rtc_has_irq = 0; 1004 rtc_has_irq = 0;
991 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); 1005 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
992 return -EIO; 1006 return -EIO;
@@ -1015,16 +1029,26 @@ no_irq:
1015 1029
1016#ifdef RTC_IRQ 1030#ifdef RTC_IRQ
1017 if (is_hpet_enabled()) { 1031 if (is_hpet_enabled()) {
1032 int err;
1033
1018 rtc_int_handler_ptr = hpet_rtc_interrupt; 1034 rtc_int_handler_ptr = hpet_rtc_interrupt;
1035 err = hpet_register_irq_handler(rtc_interrupt);
1036 if (err != 0) {
1037 printk(KERN_WARNING "hpet_register_irq_handler failed "
1038 "in rtc_init().");
1039 return err;
1040 }
1019 } else { 1041 } else {
1020 rtc_int_handler_ptr = rtc_interrupt; 1042 rtc_int_handler_ptr = rtc_interrupt;
1021 } 1043 }
1022 1044
1023 if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { 1045 if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
1046 "rtc", NULL)) {
1024 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ 1047 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
1025 rtc_has_irq = 0; 1048 rtc_has_irq = 0;
1026 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); 1049 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
1027 rtc_release_region(); 1050 rtc_release_region();
1051
1028 return -EIO; 1052 return -EIO;
1029 } 1053 }
1030 hpet_rtc_timer_init(); 1054 hpet_rtc_timer_init();
@@ -1036,6 +1060,7 @@ no_irq:
1036 if (misc_register(&rtc_dev)) { 1060 if (misc_register(&rtc_dev)) {
1037#ifdef RTC_IRQ 1061#ifdef RTC_IRQ
1038 free_irq(RTC_IRQ, NULL); 1062 free_irq(RTC_IRQ, NULL);
1063 hpet_unregister_irq_handler(rtc_interrupt);
1039 rtc_has_irq = 0; 1064 rtc_has_irq = 0;
1040#endif 1065#endif
1041 rtc_release_region(); 1066 rtc_release_region();
@@ -1052,21 +1077,21 @@ no_irq:
1052 1077
1053#if defined(__alpha__) || defined(__mips__) 1078#if defined(__alpha__) || defined(__mips__)
1054 rtc_freq = HZ; 1079 rtc_freq = HZ;
1055 1080
1056 /* Each operating system on an Alpha uses its own epoch. 1081 /* Each operating system on an Alpha uses its own epoch.
1057 Let's try to guess which one we are using now. */ 1082 Let's try to guess which one we are using now. */
1058 1083
1059 if (rtc_is_updating() != 0) 1084 if (rtc_is_updating() != 0)
1060 msleep(20); 1085 msleep(20);
1061 1086
1062 spin_lock_irq(&rtc_lock); 1087 spin_lock_irq(&rtc_lock);
1063 year = CMOS_READ(RTC_YEAR); 1088 year = CMOS_READ(RTC_YEAR);
1064 ctrl = CMOS_READ(RTC_CONTROL); 1089 ctrl = CMOS_READ(RTC_CONTROL);
1065 spin_unlock_irq(&rtc_lock); 1090 spin_unlock_irq(&rtc_lock);
1066 1091
1067 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 1092 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
1068 BCD_TO_BIN(year); /* This should never happen... */ 1093 BCD_TO_BIN(year); /* This should never happen... */
1069 1094
1070 if (year < 20) { 1095 if (year < 20) {
1071 epoch = 2000; 1096 epoch = 2000;
1072 guess = "SRM (post-2000)"; 1097 guess = "SRM (post-2000)";
@@ -1087,7 +1112,8 @@ no_irq:
1087#endif 1112#endif
1088 } 1113 }
1089 if (guess) 1114 if (guess)
1090 printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch); 1115 printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
1116 guess, epoch);
1091#endif 1117#endif
1092#ifdef RTC_IRQ 1118#ifdef RTC_IRQ
1093 if (rtc_has_irq == 0) 1119 if (rtc_has_irq == 0)
@@ -1096,8 +1122,12 @@ no_irq:
1096 spin_lock_irq(&rtc_lock); 1122 spin_lock_irq(&rtc_lock);
1097 rtc_freq = 1024; 1123 rtc_freq = 1024;
1098 if (!hpet_set_periodic_freq(rtc_freq)) { 1124 if (!hpet_set_periodic_freq(rtc_freq)) {
1099 /* Initialize periodic freq. to CMOS reset default, which is 1024Hz */ 1125 /*
1100 CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); 1126 * Initialize periodic frequency to CMOS reset default,
1127 * which is 1024Hz
1128 */
1129 CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
1130 RTC_FREQ_SELECT);
1101 } 1131 }
1102 spin_unlock_irq(&rtc_lock); 1132 spin_unlock_irq(&rtc_lock);
1103no_irq2: 1133no_irq2:
@@ -1110,20 +1140,22 @@ no_irq2:
1110 return 0; 1140 return 0;
1111} 1141}
1112 1142
1113static void __exit rtc_exit (void) 1143static void __exit rtc_exit(void)
1114{ 1144{
1115 cleanup_sysctl(); 1145 cleanup_sysctl();
1116 remove_proc_entry ("driver/rtc", NULL); 1146 remove_proc_entry("driver/rtc", NULL);
1117 misc_deregister(&rtc_dev); 1147 misc_deregister(&rtc_dev);
1118 1148
1119#ifdef CONFIG_SPARC32 1149#ifdef CONFIG_SPARC32
1120 if (rtc_has_irq) 1150 if (rtc_has_irq)
1121 free_irq (rtc_irq, &rtc_port); 1151 free_irq(rtc_irq, &rtc_port);
1122#else 1152#else
1123 rtc_release_region(); 1153 rtc_release_region();
1124#ifdef RTC_IRQ 1154#ifdef RTC_IRQ
1125 if (rtc_has_irq) 1155 if (rtc_has_irq) {
1126 free_irq (RTC_IRQ, NULL); 1156 free_irq(RTC_IRQ, NULL);
1157 hpet_unregister_irq_handler(hpet_rtc_interrupt);
1158 }
1127#endif 1159#endif
1128#endif /* CONFIG_SPARC32 */ 1160#endif /* CONFIG_SPARC32 */
1129} 1161}
@@ -1133,14 +1165,14 @@ module_exit(rtc_exit);
1133 1165
1134#ifdef RTC_IRQ 1166#ifdef RTC_IRQ
1135/* 1167/*
1136 * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. 1168 * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
1137 * (usually during an IDE disk interrupt, with IRQ unmasking off) 1169 * (usually during an IDE disk interrupt, with IRQ unmasking off)
1138 * Since the interrupt handler doesn't get called, the IRQ status 1170 * Since the interrupt handler doesn't get called, the IRQ status
1139 * byte doesn't get read, and the RTC stops generating interrupts. 1171 * byte doesn't get read, and the RTC stops generating interrupts.
1140 * A timer is set, and will call this function if/when that happens. 1172 * A timer is set, and will call this function if/when that happens.
1141 * To get it out of this stalled state, we just read the status. 1173 * To get it out of this stalled state, we just read the status.
1142 * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. 1174 * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
1143 * (You *really* shouldn't be trying to use a non-realtime system 1175 * (You *really* shouldn't be trying to use a non-realtime system
1144 * for something that requires a steady > 1KHz signal anyways.) 1176 * for something that requires a steady > 1KHz signal anyways.)
1145 */ 1177 */
1146 1178
@@ -1148,7 +1180,7 @@ static void rtc_dropped_irq(unsigned long data)
1148{ 1180{
1149 unsigned long freq; 1181 unsigned long freq;
1150 1182
1151 spin_lock_irq (&rtc_lock); 1183 spin_lock_irq(&rtc_lock);
1152 1184
1153 if (hpet_rtc_dropped_irq()) { 1185 if (hpet_rtc_dropped_irq()) {
1154 spin_unlock_irq(&rtc_lock); 1186 spin_unlock_irq(&rtc_lock);
@@ -1167,13 +1199,15 @@ static void rtc_dropped_irq(unsigned long data)
1167 1199
1168 spin_unlock_irq(&rtc_lock); 1200 spin_unlock_irq(&rtc_lock);
1169 1201
1170 if (printk_ratelimit()) 1202 if (printk_ratelimit()) {
1171 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); 1203 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
1204 freq);
1205 }
1172 1206
1173 /* Now we have new data */ 1207 /* Now we have new data */
1174 wake_up_interruptible(&rtc_wait); 1208 wake_up_interruptible(&rtc_wait);
1175 1209
1176 kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); 1210 kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
1177} 1211}
1178#endif 1212#endif
1179 1213
@@ -1277,7 +1311,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1277 * can take just over 2ms. We wait 20ms. There is no need to 1311 * can take just over 2ms. We wait 20ms. There is no need to
1278 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. 1312 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
1279 * If you need to know *exactly* when a second has started, enable 1313 * If you need to know *exactly* when a second has started, enable
1280 * periodic update complete interrupts, (via ioctl) and then 1314 * periodic update complete interrupts, (via ioctl) and then
1281 * immediately read /dev/rtc which will block until you get the IRQ. 1315 * immediately read /dev/rtc which will block until you get the IRQ.
1282 * Once the read clears, read the RTC time (again via ioctl). Easy. 1316 * Once the read clears, read the RTC time (again via ioctl). Easy.
1283 */ 1317 */
@@ -1307,8 +1341,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1307 ctrl = CMOS_READ(RTC_CONTROL); 1341 ctrl = CMOS_READ(RTC_CONTROL);
1308 spin_unlock_irqrestore(&rtc_lock, flags); 1342 spin_unlock_irqrestore(&rtc_lock, flags);
1309 1343
1310 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 1344 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1311 {
1312 BCD_TO_BIN(rtc_tm->tm_sec); 1345 BCD_TO_BIN(rtc_tm->tm_sec);
1313 BCD_TO_BIN(rtc_tm->tm_min); 1346 BCD_TO_BIN(rtc_tm->tm_min);
1314 BCD_TO_BIN(rtc_tm->tm_hour); 1347 BCD_TO_BIN(rtc_tm->tm_hour);
@@ -1326,7 +1359,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1326 * Account for differences between how the RTC uses the values 1359 * Account for differences between how the RTC uses the values
1327 * and how they are defined in a struct rtc_time; 1360 * and how they are defined in a struct rtc_time;
1328 */ 1361 */
1329 if ((rtc_tm->tm_year += (epoch - 1900)) <= 69) 1362 rtc_tm->tm_year += epoch - 1900;
1363 if (rtc_tm->tm_year <= 69)
1330 rtc_tm->tm_year += 100; 1364 rtc_tm->tm_year += 100;
1331 1365
1332 rtc_tm->tm_mon--; 1366 rtc_tm->tm_mon--;
@@ -1347,8 +1381,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm)
1347 ctrl = CMOS_READ(RTC_CONTROL); 1381 ctrl = CMOS_READ(RTC_CONTROL);
1348 spin_unlock_irq(&rtc_lock); 1382 spin_unlock_irq(&rtc_lock);
1349 1383
1350 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 1384 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1351 {
1352 BCD_TO_BIN(alm_tm->tm_sec); 1385 BCD_TO_BIN(alm_tm->tm_sec);
1353 BCD_TO_BIN(alm_tm->tm_min); 1386 BCD_TO_BIN(alm_tm->tm_min);
1354 BCD_TO_BIN(alm_tm->tm_hour); 1387 BCD_TO_BIN(alm_tm->tm_hour);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5efd5550f4ca..b730d6709529 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1604,7 +1604,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1604 memcpy(&policy->cpuinfo, &data->cpuinfo, 1604 memcpy(&policy->cpuinfo, &data->cpuinfo,
1605 sizeof(struct cpufreq_cpuinfo)); 1605 sizeof(struct cpufreq_cpuinfo));
1606 1606
1607 if (policy->min > data->min && policy->min > policy->max) { 1607 if (policy->min > data->max || policy->max < data->min) {
1608 ret = -EINVAL; 1608 ret = -EINVAL;
1609 goto error_out; 1609 goto error_out;
1610 } 1610 }
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 5e596a7e3601..9008ed5ef4ce 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -8,6 +8,8 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <asm/dmi.h> 9#include <asm/dmi.h>
10 10
11static char dmi_empty_string[] = " ";
12
11static char * __init dmi_string(const struct dmi_header *dm, u8 s) 13static char * __init dmi_string(const struct dmi_header *dm, u8 s)
12{ 14{
13 const u8 *bp = ((u8 *) dm) + dm->length; 15 const u8 *bp = ((u8 *) dm) + dm->length;
@@ -21,11 +23,16 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
21 } 23 }
22 24
23 if (*bp != 0) { 25 if (*bp != 0) {
24 str = dmi_alloc(strlen(bp) + 1); 26 size_t len = strlen(bp)+1;
27 size_t cmp_len = len > 8 ? 8 : len;
28
29 if (!memcmp(bp, dmi_empty_string, cmp_len))
30 return dmi_empty_string;
31 str = dmi_alloc(len);
25 if (str != NULL) 32 if (str != NULL)
26 strcpy(str, bp); 33 strcpy(str, bp);
27 else 34 else
28 printk(KERN_ERR "dmi_string: out of memory.\n"); 35 printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
29 } 36 }
30 } 37 }
31 38
@@ -175,12 +182,23 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
175 } 182 }
176} 183}
177 184
185static struct dmi_device empty_oem_string_dev = {
186 .name = dmi_empty_string,
187};
188
178static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 189static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
179{ 190{
180 int i, count = *(u8 *)(dm + 1); 191 int i, count = *(u8 *)(dm + 1);
181 struct dmi_device *dev; 192 struct dmi_device *dev;
182 193
183 for (i = 1; i <= count; i++) { 194 for (i = 1; i <= count; i++) {
195 char *devname = dmi_string(dm, i);
196
197 if (!strcmp(devname, dmi_empty_string)) {
198 list_add(&empty_oem_string_dev.list, &dmi_devices);
199 continue;
200 }
201
184 dev = dmi_alloc(sizeof(*dev)); 202 dev = dmi_alloc(sizeof(*dev));
185 if (!dev) { 203 if (!dev) {
186 printk(KERN_ERR 204 printk(KERN_ERR
@@ -189,7 +207,7 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
189 } 207 }
190 208
191 dev->type = DMI_DEV_TYPE_OEM_STRING; 209 dev->type = DMI_DEV_TYPE_OEM_STRING;
192 dev->name = dmi_string(dm, i); 210 dev->name = devname;
193 dev->device_data = NULL; 211 dev->device_data = NULL;
194 212
195 list_add(&dev->list, &dmi_devices); 213 list_add(&dev->list, &dmi_devices);
@@ -331,9 +349,11 @@ void __init dmi_scan_machine(void)
331 rc = dmi_present(q); 349 rc = dmi_present(q);
332 if (!rc) { 350 if (!rc) {
333 dmi_available = 1; 351 dmi_available = 1;
352 dmi_iounmap(p, 0x10000);
334 return; 353 return;
335 } 354 }
336 } 355 }
356 dmi_iounmap(p, 0x10000);
337 } 357 }
338 out: printk(KERN_INFO "DMI not present or invalid.\n"); 358 out: printk(KERN_INFO "DMI not present or invalid.\n");
339} 359}
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
index 489c133664d5..1f8153b57503 100644
--- a/drivers/ieee1394/Makefile
+++ b/drivers/ieee1394/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o 15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o 16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
17 17
18obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
new file mode 100644
index 000000000000..ddaab6eb8ace
--- /dev/null
+++ b/drivers/ieee1394/init_ohci1394_dma.c
@@ -0,0 +1,285 @@
1/*
2 * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers
3 *
4 * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de>
5 *
6 * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c
7 * this file has functions to:
8 * - scan the PCI very early on boot for all OHCI 1394-compliant controllers
9 * - reset and initialize them and make them join the IEEE1394 bus and
10 * - enable physical DMA on them to allow remote debugging
11 *
12 * All code and data is marked as __init and __initdata, respective as
13 * during boot, all OHCI1394 controllers may be claimed by the firewire
14 * stack and at this point, this code should not touch them anymore.
15 *
16 * To use physical DMA after the initialization of the firewire stack,
17 * be sure that the stack enables it and (re-)attach after the bus reset
18 * which may be caused by the firewire stack initialization.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software Foundation,
32 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 */
34
35#include <linux/interrupt.h> /* for ohci1394.h */
36#include <linux/delay.h>
37#include <linux/pci.h> /* for PCI defines */
38#include <linux/init_ohci1394_dma.h>
39#include <asm/pci-direct.h> /* for direct PCI config space access */
40#include <asm/fixmap.h>
41
42#include "ieee1394_types.h"
43#include "ohci1394.h"
44
45int __initdata init_ohci1394_dma_early;
46
47/* Reads a PHY register of an OHCI-1394 controller */
48static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr)
49{
50 int i;
51 quadlet_t r;
52
53 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
54
55 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
56 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
57 break;
58 mdelay(1);
59 }
60 r = reg_read(ohci, OHCI1394_PhyControl);
61
62 return (r & 0x00ff0000) >> 16;
63}
64
65/* Writes to a PHY register of an OHCI-1394 controller */
66static inline void __init set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
67{
68 int i;
69
70 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
71
72 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
73 u32 r = reg_read(ohci, OHCI1394_PhyControl);
74 if (!(r & 0x00004000))
75 break;
76 mdelay(1);
77 }
78}
79
80/* Resets an OHCI-1394 controller (for sane state before initialization) */
81static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) {
82 int i;
83
84 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
85
86 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
87 if (!(reg_read(ohci, OHCI1394_HCControlSet)
88 & OHCI1394_HCControl_softReset))
89 break;
90 mdelay(1);
91 }
92}
93
94/* Basic OHCI-1394 register and port inititalization */
95static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci)
96{
97 quadlet_t bus_options;
98 int num_ports, i;
99
100 /* Put some defaults to these undefined bus options */
101 bus_options = reg_read(ohci, OHCI1394_BusOptions);
102 bus_options |= 0x60000000; /* Enable CMC and ISC */
103 bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
104 bus_options &= ~0x18000000; /* Disable PMC and BMC */
105 reg_write(ohci, OHCI1394_BusOptions, bus_options);
106
107 /* Set the bus number */
108 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
109
110 /* Enable posted writes */
111 reg_write(ohci, OHCI1394_HCControlSet,
112 OHCI1394_HCControl_postedWriteEnable);
113
114 /* Clear link control register */
115 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
116
117 /* enable phys */
118 reg_write(ohci, OHCI1394_LinkControlSet,
119 OHCI1394_LinkControl_RcvPhyPkt);
120
121 /* Don't accept phy packets into AR request context */
122 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
123
124 /* Clear the Isochonouys interrupt masks */
125 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
126 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
127 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
128 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
129
130 /* Accept asyncronous transfer requests from all nodes for now */
131 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
132
133 /* Specify asyncronous transfer retries */
134 reg_write(ohci, OHCI1394_ATRetries,
135 OHCI1394_MAX_AT_REQ_RETRIES |
136 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
137 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
138
139 /* We don't want hardware swapping */
140 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
141
142 /* Enable link */
143 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
144
145 /* If anything is connected to a port, make sure it is enabled */
146 num_ports = get_phy_reg(ohci, 2) & 0xf;
147 for (i = 0; i < num_ports; i++) {
148 unsigned int status;
149
150 set_phy_reg(ohci, 7, i);
151 status = get_phy_reg(ohci, 8);
152
153 if (status & 0x20)
154 set_phy_reg(ohci, 8, status & ~1);
155 }
156}
157
158/**
159 * init_ohci1394_wait_for_busresets - wait until bus resets are completed
160 *
161 * OHCI1394 initialization itself and any device going on- or offline
162 * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
163 * specifies that physical DMA is disabled on each bus reset and it
164 * has to be enabled after each bus reset when needed. We resort
165 * to polling here because on early boot, we have no interrupts.
166 */
167static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci)
168{
169 int i, events;
170
171 for (i=0; i < 9; i++) {
172 mdelay(200);
173 events = reg_read(ohci, OHCI1394_IntEventSet);
174 if (events & OHCI1394_busReset)
175 reg_write(ohci, OHCI1394_IntEventClear,
176 OHCI1394_busReset);
177 }
178}
179
180/**
181 * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
182 * This enables remote DMA access over IEEE1394 from every host for the low
183 * 4GB of address space. DMA accesses above 4GB are not available currently.
184 */
185static inline void __init init_ohci1394_enable_physical_dma(struct ti_ohci *hci)
186{
187 reg_write(hci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
188 reg_write(hci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
189 reg_write(hci, OHCI1394_PhyUpperBound, 0xffff0000);
190}
191
192/**
193 * init_ohci1394_reset_and_init_dma - init controller and enable DMA
194 * This initializes the given controller and enables physical DMA engine in it.
195 */
196static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci)
197{
198 /* Start off with a soft reset, clears everything to a sane state. */
199 init_ohci1394_soft_reset(ohci);
200
201 /* Accessing some registers without LPS enabled may cause lock up */
202 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
203
204 /* Disable and clear interrupts */
205 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
206 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
207
208 mdelay(50); /* Wait 50msec to make sure we have full link enabled */
209
210 init_ohci1394_initialize(ohci);
211 /*
212 * The initialization causes at least one IEEE1394 bus reset. Enabling
213 * physical DMA only works *after* *all* bus resets have calmed down:
214 */
215 init_ohci1394_wait_for_busresets(ohci);
216
217 /* We had to wait and do this now if we want to debug early problems */
218 init_ohci1394_enable_physical_dma(ohci);
219}
220
221/**
222 * init_ohci1394_controller - Map the registers of the controller and init DMA
223 * This maps the registers of the specified controller and initializes it
224 */
225static inline void __init init_ohci1394_controller(int num, int slot, int func)
226{
227 unsigned long ohci_base;
228 struct ti_ohci ohci;
229
230 printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394"
231 " at %02x:%02x.%x\n", num, slot, func);
232
233 ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2))
234 & PCI_BASE_ADDRESS_MEM_MASK;
235
236 set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base);
237
238 ohci.registers = (void *)fix_to_virt(FIX_OHCI1394_BASE);
239
240 init_ohci1394_reset_and_init_dma(&ohci);
241}
242
243/**
244 * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them
245 * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them
246 */
247void __init init_ohci1394_dma_on_all_controllers(void)
248{
249 int num, slot, func;
250
251 if (!early_pci_allowed())
252 return;
253
254 /* Poor man's PCI discovery, the only thing we can do at early boot */
255 for (num = 0; num < 32; num++) {
256 for (slot = 0; slot < 32; slot++) {
257 for (func = 0; func < 8; func++) {
258 u32 class = read_pci_config(num,slot,func,
259 PCI_CLASS_REVISION);
260 if ((class == 0xffffffff))
261 continue; /* No device at this func */
262
263 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
264 continue; /* Not an OHCI-1394 device */
265
266 init_ohci1394_controller(num, slot, func);
267 break; /* Assume one controller per device */
268 }
269 }
270 }
271 printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n");
272}
273
274/**
275 * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization
276 */
277static int __init setup_ohci1394_dma(char *opt)
278{
279 if (!strcmp(opt, "early"))
280 init_ohci1394_dma_early = 1;
281 return 0;
282}
283
284/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */
285early_param("ohci1394_dma", setup_ohci1394_dma);
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index 8991ab0b4fe3..61cff8374e6c 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -39,6 +39,7 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/pci.h> 41#include <linux/pci.h>
42#include <linux/delay.h>
42 43
43#include <asm/io.h> 44#include <asm/io.h>
44#include <asm/irq.h> 45#include <asm/irq.h>
@@ -62,8 +63,10 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr)
62 int value = inb_p(pc110pad_io); 63 int value = inb_p(pc110pad_io);
63 int handshake = inb_p(pc110pad_io + 2); 64 int handshake = inb_p(pc110pad_io + 2);
64 65
65 outb_p(handshake | 1, pc110pad_io + 2); 66 outb(handshake | 1, pc110pad_io + 2);
66 outb_p(handshake & ~1, pc110pad_io + 2); 67 udelay(2);
68 outb(handshake & ~1, pc110pad_io + 2);
69 udelay(2);
67 inb_p(0x64); 70 inb_p(0x64);
68 71
69 pc110pad_data[pc110pad_count++] = value; 72 pc110pad_data[pc110pad_count++] = value;
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 4e04e49a2f1c..ced4ac1955db 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -290,7 +290,7 @@ static void svm_hardware_enable(void *garbage)
290#ifdef CONFIG_X86_64 290#ifdef CONFIG_X86_64
291 struct desc_ptr gdt_descr; 291 struct desc_ptr gdt_descr;
292#else 292#else
293 struct Xgt_desc_struct gdt_descr; 293 struct desc_ptr gdt_descr;
294#endif 294#endif
295 struct desc_struct *gdt; 295 struct desc_struct *gdt;
296 int me = raw_smp_processor_id(); 296 int me = raw_smp_processor_id();
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index bb56ae3f89b6..5b397b6c9f93 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -524,7 +524,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
524static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 524static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
525{ 525{
526 if (vcpu->rmode.active) 526 if (vcpu->rmode.active)
527 rflags |= IOPL_MASK | X86_EFLAGS_VM; 527 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
528 vmcs_writel(GUEST_RFLAGS, rflags); 528 vmcs_writel(GUEST_RFLAGS, rflags);
529} 529}
530 530
@@ -1050,7 +1050,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1050 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar); 1050 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
1051 1051
1052 flags = vmcs_readl(GUEST_RFLAGS); 1052 flags = vmcs_readl(GUEST_RFLAGS);
1053 flags &= ~(IOPL_MASK | X86_EFLAGS_VM); 1053 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1054 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); 1054 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
1055 vmcs_writel(GUEST_RFLAGS, flags); 1055 vmcs_writel(GUEST_RFLAGS, flags);
1056 1056
@@ -1107,9 +1107,9 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1107 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 1107 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1108 1108
1109 flags = vmcs_readl(GUEST_RFLAGS); 1109 flags = vmcs_readl(GUEST_RFLAGS);
1110 vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT; 1110 vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1111 1111
1112 flags |= IOPL_MASK | X86_EFLAGS_VM; 1112 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1113 1113
1114 vmcs_writel(GUEST_RFLAGS, flags); 1114 vmcs_writel(GUEST_RFLAGS, flags);
1115 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 1115 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 96d0fd07c57d..44adb00e1490 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -94,7 +94,7 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
94 /* Set up the two "TSS" members which tell the CPU what stack to use 94 /* Set up the two "TSS" members which tell the CPU what stack to use
95 * for traps which do directly into the Guest (ie. traps at privilege 95 * for traps which do directly into the Guest (ie. traps at privilege
96 * level 1). */ 96 * level 1). */
97 pages->state.guest_tss.esp1 = lg->esp1; 97 pages->state.guest_tss.sp1 = lg->esp1;
98 pages->state.guest_tss.ss1 = lg->ss1; 98 pages->state.guest_tss.ss1 = lg->ss1;
99 99
100 /* Copy direct-to-Guest trap entries. */ 100 /* Copy direct-to-Guest trap entries. */
@@ -416,7 +416,7 @@ void __init lguest_arch_host_init(void)
416 /* We know where we want the stack to be when the Guest enters 416 /* We know where we want the stack to be when the Guest enters
417 * the switcher: in pages->regs. The stack grows upwards, so 417 * the switcher: in pages->regs. The stack grows upwards, so
418 * we start it at the end of that structure. */ 418 * we start it at the end of that structure. */
419 state->guest_tss.esp0 = (long)(&pages->regs + 1); 419 state->guest_tss.sp0 = (long)(&pages->regs + 1);
420 /* And this is the GDT entry to use for the stack: we keep a 420 /* And this is the GDT entry to use for the stack: we keep a
421 * couple of special LGUEST entries. */ 421 * couple of special LGUEST entries. */
422 state->guest_tss.ss0 = LGUEST_DS; 422 state->guest_tss.ss0 = LGUEST_DS;
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 5dba68fe33f5..a8364d815222 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -61,7 +61,7 @@ set_base(gdt[(selname) >> 3], (u32)(address)); \
61set_limit(gdt[(selname) >> 3], size); \ 61set_limit(gdt[(selname) >> 3], size); \
62} while(0) 62} while(0)
63 63
64static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; 64static struct desc_struct bad_bios_desc;
65 65
66/* 66/*
67 * At some point we want to use this stack frame pointer to unwind 67 * At some point we want to use this stack frame pointer to unwind
@@ -477,6 +477,9 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
477 pnp_bios_callpoint.offset = header->fields.pm16offset; 477 pnp_bios_callpoint.offset = header->fields.pm16offset;
478 pnp_bios_callpoint.segment = PNP_CS16; 478 pnp_bios_callpoint.segment = PNP_CS16;
479 479
480 bad_bios_desc.a = 0;
481 bad_bios_desc.b = 0x00409200;
482
480 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 483 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
481 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 484 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
482 for (i = 0; i < NR_CPUS; i++) { 485 for (i = 0; i < NR_CPUS; i++) {
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index c31f549ebea0..1c656667b937 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -88,9 +88,7 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
88{ 88{
89 gfp_t flags; 89 gfp_t flags;
90 unsigned long i; 90 unsigned long i;
91 pgprot_t wc_pageprot;
92 91
93 wc_pageprot = PAGE_KERNEL_NOCACHE;
94 max_order++; 92 max_order++;
95 do { 93 do {
96 /* 94 /*
@@ -126,14 +124,8 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
126 /* 124 /*
127 * Change caching policy of the linear kernel map to avoid 125 * Change caching policy of the linear kernel map to avoid
128 * mapping type conflicts with user-space mappings. 126 * mapping type conflicts with user-space mappings.
129 * The first global_flush_tlb() is really only there to do a global
130 * wbinvd().
131 */ 127 */
132 128 set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
133 global_flush_tlb();
134 change_page_attr(virt_to_page(va->logical), va->size >> PAGE_SHIFT,
135 wc_pageprot);
136 global_flush_tlb();
137 129
138 printk(KERN_DEBUG MODULE_NAME 130 printk(KERN_DEBUG MODULE_NAME
139 ": Allocated %ld bytes vram area at 0x%08lx\n", 131 ": Allocated %ld bytes vram area at 0x%08lx\n",
@@ -157,9 +149,8 @@ static void vmlfb_free_vram_area(struct vram_area *va)
157 * Reset the linear kernel map caching policy. 149 * Reset the linear kernel map caching policy.
158 */ 150 */
159 151
160 change_page_attr(virt_to_page(va->logical), 152 set_pages_wb(virt_to_page(va->logical),
161 va->size >> PAGE_SHIFT, PAGE_KERNEL); 153 va->size >> PAGE_SHIFT);
162 global_flush_tlb();
163 154
164 /* 155 /*
165 * Decrease the usage count on the pages we've used 156 * Decrease the usage count on the pages we've used