aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-30 19:25:51 -0500
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /drivers/char
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/backend.c3
-rw-r--r--drivers/char/agp/generic.c3
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c22
-rw-r--r--drivers/char/drm/drm_pciids.h1
-rw-r--r--drivers/char/hpet.c126
-rw-r--r--drivers/char/hvc_console.c80
-rw-r--r--drivers/char/hvcs.c78
-rw-r--r--drivers/char/hw_random/amd-rng.c12
-rw-r--r--drivers/char/hw_random/core.c24
-rw-r--r--drivers/char/hw_random/geode-rng.c12
-rw-r--r--drivers/char/hw_random/intel-rng.c15
-rw-r--r--drivers/char/hw_random/omap-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c16
-rw-r--r--drivers/char/hw_random/via-rng.c19
-rw-r--r--drivers/char/nozomi.c1993
-rw-r--r--drivers/char/rtc.c253
20 files changed, 2335 insertions, 350 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2e3a0d4bc4c2..466629594776 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -373,6 +373,16 @@ config ISTALLION
373 To compile this driver as a module, choose M here: the 373 To compile this driver as a module, choose M here: the
374 module will be called istallion. 374 module will be called istallion.
375 375
376config NOZOMI
377 tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
378 depends on PCI && EXPERIMENTAL
379 help
380 If you have a HSDPA driver Broadband Wireless Data Card -
381 Globe Trotter PCMCIA card, say Y here.
382
383 To compile this driver as a module, choose M here, the module
384 will be called nozomi.
385
376config A2232 386config A2232
377 tristate "Commodore A2232 serial support (EXPERIMENTAL)" 387 tristate "Commodore A2232 serial support (EXPERIMENTAL)"
378 depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP 388 depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 07304d50e0cb..96fc01eddefe 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SERIAL167) += serial167.o
26obj-$(CONFIG_CYCLADES) += cyclades.o 26obj-$(CONFIG_CYCLADES) += cyclades.o
27obj-$(CONFIG_STALLION) += stallion.o 27obj-$(CONFIG_STALLION) += stallion.o
28obj-$(CONFIG_ISTALLION) += istallion.o 28obj-$(CONFIG_ISTALLION) += istallion.o
29obj-$(CONFIG_NOZOMI) += nozomi.o
29obj-$(CONFIG_DIGIEPCA) += epca.o 30obj-$(CONFIG_DIGIEPCA) += epca.o
30obj-$(CONFIG_SPECIALIX) += specialix.o 31obj-$(CONFIG_SPECIALIX) += specialix.o
31obj-$(CONFIG_MOXA_INTELLIO) += moxa.o 32obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index aa5ddb716ffb..1ffb381130c3 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
145 void *addr = agp_generic_alloc_page(agp_bridge); 145 void *addr = agp_generic_alloc_page(agp_bridge);
146 u32 temp; 146 u32 temp;
147 147
148 global_flush_tlb();
149 if (!addr) 148 if (!addr)
150 return NULL; 149 return NULL;
151 150
@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
162 if (flags & AGP_PAGE_DESTROY_UNMAP) { 161 if (flags & AGP_PAGE_DESTROY_UNMAP) {
163 global_cache_flush(); /* is this really needed? --hch */ 162 global_cache_flush(); /* is this really needed? --hch */
164 agp_generic_destroy_page(addr, flags); 163 agp_generic_destroy_page(addr, flags);
165 global_flush_tlb();
166 } else 164 } else
167 agp_generic_destroy_page(addr, flags); 165 agp_generic_destroy_page(addr, flags);
168 } 166 }
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 832ded20fe70..2720882e66fe 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); 147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
148 return -ENOMEM; 148 return -ENOMEM;
149 } 149 }
150 flush_agp_mappings();
151 150
152 bridge->scratch_page_real = virt_to_gart(addr); 151 bridge->scratch_page_real = virt_to_gart(addr);
153 bridge->scratch_page = 152 bridge->scratch_page =
@@ -191,7 +190,6 @@ err_out:
191 if (bridge->driver->needs_scratch_page) { 190 if (bridge->driver->needs_scratch_page) {
192 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 191 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
193 AGP_PAGE_DESTROY_UNMAP); 192 AGP_PAGE_DESTROY_UNMAP);
194 flush_agp_mappings();
195 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 193 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
196 AGP_PAGE_DESTROY_FREE); 194 AGP_PAGE_DESTROY_FREE);
197 } 195 }
@@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
219 bridge->driver->needs_scratch_page) { 217 bridge->driver->needs_scratch_page) {
220 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 218 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
221 AGP_PAGE_DESTROY_UNMAP); 219 AGP_PAGE_DESTROY_UNMAP);
222 flush_agp_mappings();
223 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), 220 bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
224 AGP_PAGE_DESTROY_FREE); 221 AGP_PAGE_DESTROY_FREE);
225 } 222 }
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 64b2f6d7059d..1a4674ce0c71 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr)
197 for (i = 0; i < curr->page_count; i++) { 197 for (i = 0; i < curr->page_count; i++) {
198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP); 198 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
199 } 199 }
200 flush_agp_mappings();
201 for (i = 0; i < curr->page_count; i++) { 200 for (i = 0; i < curr->page_count; i++) {
202 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE); 201 curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
203 } 202 }
@@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
267 } 266 }
268 new->bridge = bridge; 267 new->bridge = bridge;
269 268
270 flush_agp_mappings();
271
272 return new; 269 return new;
273} 270}
274EXPORT_SYMBOL(agp_allocate_memory); 271EXPORT_SYMBOL(agp_allocate_memory);
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index e72a83e2bad5..76f581c85a7d 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
527 527
528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
529 page = agp_generic_alloc_page(agp_bridge); 529 page = agp_generic_alloc_page(agp_bridge);
530 global_flush_tlb();
531 } else 530 } else
532 /* Returning NULL would cause problems */ 531 /* Returning NULL would cause problems */
533 /* AK: really dubious code. */ 532 /* AK: really dubious code. */
@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
539{ 538{
540 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 539 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
541 agp_generic_destroy_page(page, flags); 540 agp_generic_destroy_page(page, flags);
542 global_flush_tlb();
543 } 541 }
544} 542}
545 543
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d87961993ccf..189efb6ef970 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -10,6 +10,8 @@
10#include <linux/agp_backend.h> 10#include <linux/agp_backend.h>
11#include "agp.h" 11#include "agp.h"
12 12
13#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
14#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
13#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 15#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
14#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972 16#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
15#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980 17#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
@@ -208,13 +210,11 @@ static void *i8xx_alloc_pages(void)
208 if (page == NULL) 210 if (page == NULL)
209 return NULL; 211 return NULL;
210 212
211 if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) { 213 if (set_pages_uc(page, 4) < 0) {
212 change_page_attr(page, 4, PAGE_KERNEL); 214 set_pages_wb(page, 4);
213 global_flush_tlb();
214 __free_pages(page, 2); 215 __free_pages(page, 2);
215 return NULL; 216 return NULL;
216 } 217 }
217 global_flush_tlb();
218 get_page(page); 218 get_page(page);
219 atomic_inc(&agp_bridge->current_memory_agp); 219 atomic_inc(&agp_bridge->current_memory_agp);
220 return page_address(page); 220 return page_address(page);
@@ -228,8 +228,7 @@ static void i8xx_destroy_pages(void *addr)
228 return; 228 return;
229 229
230 page = virt_to_page(addr); 230 page = virt_to_page(addr);
231 change_page_attr(page, 4, PAGE_KERNEL); 231 set_pages_wb(page, 4);
232 global_flush_tlb();
233 put_page(page); 232 put_page(page);
234 __free_pages(page, 2); 233 __free_pages(page, 2);
235 atomic_dec(&agp_bridge->current_memory_agp); 234 atomic_dec(&agp_bridge->current_memory_agp);
@@ -339,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
339 338
340 switch (pg_count) { 339 switch (pg_count) {
341 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); 340 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
342 global_flush_tlb();
343 break; 341 break;
344 case 4: 342 case 4:
345 /* kludge to get 4 physical pages for ARGB cursor */ 343 /* kludge to get 4 physical pages for ARGB cursor */
@@ -402,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
402 else { 400 else {
403 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), 401 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
404 AGP_PAGE_DESTROY_UNMAP); 402 AGP_PAGE_DESTROY_UNMAP);
405 global_flush_tlb();
406 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), 403 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
407 AGP_PAGE_DESTROY_FREE); 404 AGP_PAGE_DESTROY_FREE);
408 } 405 }
@@ -526,7 +523,8 @@ static void intel_i830_init_gtt_entries(void)
526 break; 523 break;
527 case I915_GMCH_GMS_STOLEN_48M: 524 case I915_GMCH_GMS_STOLEN_48M:
528 /* Check it's really I915G */ 525 /* Check it's really I915G */
529 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 526 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
527 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
530 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 528 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
531 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || 529 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
532 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || 530 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
@@ -538,7 +536,8 @@ static void intel_i830_init_gtt_entries(void)
538 break; 536 break;
539 case I915_GMCH_GMS_STOLEN_64M: 537 case I915_GMCH_GMS_STOLEN_64M:
540 /* Check it's really I915G */ 538 /* Check it's really I915G */
541 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 539 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
540 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
542 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 541 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
543 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || 542 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
544 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || 543 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
@@ -1854,6 +1853,8 @@ static const struct intel_driver_description {
1854 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", 1853 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
1855 &intel_845_driver, &intel_830_driver }, 1854 &intel_845_driver, &intel_830_driver },
1856 { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, 1855 { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
1856 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
1857 NULL, &intel_915_driver },
1857 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", 1858 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
1858 NULL, &intel_915_driver }, 1859 NULL, &intel_915_driver },
1859 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", 1860 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
@@ -2059,6 +2060,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2059 ID(PCI_DEVICE_ID_INTEL_82875_HB), 2060 ID(PCI_DEVICE_ID_INTEL_82875_HB),
2060 ID(PCI_DEVICE_ID_INTEL_7505_0), 2061 ID(PCI_DEVICE_ID_INTEL_7505_0),
2061 ID(PCI_DEVICE_ID_INTEL_7205_0), 2062 ID(PCI_DEVICE_ID_INTEL_7205_0),
2063 ID(PCI_DEVICE_ID_INTEL_E7221_HB),
2062 ID(PCI_DEVICE_ID_INTEL_82915G_HB), 2064 ID(PCI_DEVICE_ID_INTEL_82915G_HB),
2063 ID(PCI_DEVICE_ID_INTEL_82915GM_HB), 2065 ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
2064 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 2066 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index f3593974496c..43d3c42df360 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -297,6 +297,7 @@
297 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 297 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
298 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 298 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
299 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 299 {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
300 {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
300 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 301 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
301 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 302 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
302 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 303 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4c16778e3f84..465ad35ed38f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -600,63 +600,6 @@ static int hpet_is_known(struct hpet_data *hdp)
600 return 0; 600 return 0;
601} 601}
602 602
603EXPORT_SYMBOL(hpet_alloc);
604EXPORT_SYMBOL(hpet_register);
605EXPORT_SYMBOL(hpet_unregister);
606EXPORT_SYMBOL(hpet_control);
607
608int hpet_register(struct hpet_task *tp, int periodic)
609{
610 unsigned int i;
611 u64 mask;
612 struct hpet_timer __iomem *timer;
613 struct hpet_dev *devp;
614 struct hpets *hpetp;
615
616 switch (periodic) {
617 case 1:
618 mask = Tn_PER_INT_CAP_MASK;
619 break;
620 case 0:
621 mask = 0;
622 break;
623 default:
624 return -EINVAL;
625 }
626
627 tp->ht_opaque = NULL;
628
629 spin_lock_irq(&hpet_task_lock);
630 spin_lock(&hpet_lock);
631
632 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
633 for (timer = hpetp->hp_hpet->hpet_timers, i = 0;
634 i < hpetp->hp_ntimer; i++, timer++) {
635 if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK)
636 != mask)
637 continue;
638
639 devp = &hpetp->hp_dev[i];
640
641 if (devp->hd_flags & HPET_OPEN || devp->hd_task) {
642 devp = NULL;
643 continue;
644 }
645
646 tp->ht_opaque = devp;
647 devp->hd_task = tp;
648 break;
649 }
650
651 spin_unlock(&hpet_lock);
652 spin_unlock_irq(&hpet_task_lock);
653
654 if (tp->ht_opaque)
655 return 0;
656 else
657 return -EBUSY;
658}
659
660static inline int hpet_tpcheck(struct hpet_task *tp) 603static inline int hpet_tpcheck(struct hpet_task *tp)
661{ 604{
662 struct hpet_dev *devp; 605 struct hpet_dev *devp;
@@ -706,24 +649,6 @@ int hpet_unregister(struct hpet_task *tp)
706 return 0; 649 return 0;
707} 650}
708 651
709int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
710{
711 struct hpet_dev *devp;
712 int err;
713
714 if ((err = hpet_tpcheck(tp)))
715 return err;
716
717 spin_lock_irq(&hpet_lock);
718 devp = tp->ht_opaque;
719 if (devp->hd_task != tp) {
720 spin_unlock_irq(&hpet_lock);
721 return -ENXIO;
722 }
723 spin_unlock_irq(&hpet_lock);
724 return hpet_ioctl_common(devp, cmd, arg, 1);
725}
726
727static ctl_table hpet_table[] = { 652static ctl_table hpet_table[] = {
728 { 653 {
729 .ctl_name = CTL_UNNUMBERED, 654 .ctl_name = CTL_UNNUMBERED,
@@ -806,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
806 731
807int hpet_alloc(struct hpet_data *hdp) 732int hpet_alloc(struct hpet_data *hdp)
808{ 733{
809 u64 cap, mcfg; 734 u64 cap, mcfg, hpet_config;
810 struct hpet_dev *devp; 735 struct hpet_dev *devp;
811 u32 i, ntimer; 736 u32 i, ntimer, irq;
812 struct hpets *hpetp; 737 struct hpets *hpetp;
813 size_t siz; 738 size_t siz;
814 struct hpet __iomem *hpet; 739 struct hpet __iomem *hpet;
815 static struct hpets *last = NULL; 740 static struct hpets *last = NULL;
816 unsigned long period; 741 unsigned long period, irq_bitmap;
817 unsigned long long temp; 742 unsigned long long temp;
818 743
819 /* 744 /*
@@ -840,11 +765,47 @@ int hpet_alloc(struct hpet_data *hdp)
840 hpetp->hp_hpet_phys = hdp->hd_phys_address; 765 hpetp->hp_hpet_phys = hdp->hd_phys_address;
841 766
842 hpetp->hp_ntimer = hdp->hd_nirqs; 767 hpetp->hp_ntimer = hdp->hd_nirqs;
768 hpet = hpetp->hp_hpet;
843 769
844 for (i = 0; i < hdp->hd_nirqs; i++) 770 /* Assign IRQs statically for legacy devices */
845 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 771 hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0];
772 hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1];
846 773
847 hpet = hpetp->hp_hpet; 774 /* Assign IRQs dynamically for the others */
775 for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) {
776 struct hpet_timer __iomem *timer;
777
778 timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
779
780 /* Check if there's already an IRQ assigned to the timer */
781 if (hdp->hd_irq[i]) {
782 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
783 continue;
784 }
785
786 hpet_config = readq(&timer->hpet_config);
787 irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK)
788 >> Tn_INT_ROUTE_CAP_SHIFT;
789 if (!irq_bitmap)
790 irq = 0; /* No valid IRQ Assignable */
791 else {
792 irq = find_first_bit(&irq_bitmap, 32);
793 do {
794 hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT;
795 writeq(hpet_config, &timer->hpet_config);
796
797 /*
798 * Verify whether we have written a valid
799 * IRQ number by reading it back again
800 */
801 hpet_config = readq(&timer->hpet_config);
802 if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK)
803 >> Tn_INT_ROUTE_CNF_SHIFT)
804 break; /* Success */
805 } while ((irq = (find_next_bit(&irq_bitmap, 32, irq))));
806 }
807 hpetp->hp_dev[i].hd_hdwirq = irq;
808 }
848 809
849 cap = readq(&hpet->hpet_cap); 810 cap = readq(&hpet->hpet_cap);
850 811
@@ -875,7 +836,8 @@ int hpet_alloc(struct hpet_data *hdp)
875 hpetp->hp_which, hdp->hd_phys_address, 836 hpetp->hp_which, hdp->hd_phys_address,
876 hpetp->hp_ntimer > 1 ? "s" : ""); 837 hpetp->hp_ntimer > 1 ? "s" : "");
877 for (i = 0; i < hpetp->hp_ntimer; i++) 838 for (i = 0; i < hpetp->hp_ntimer; i++)
878 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 839 printk("%s %d", i > 0 ? "," : "",
840 hpetp->hp_dev[i].hd_hdwirq);
879 printk("\n"); 841 printk("\n");
880 842
881 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", 843 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 8252f8668538..480fae29c9b2 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -27,7 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/kbd_kern.h> 28#include <linux/kbd_kern.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/kobject.h> 30#include <linux/kref.h>
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/module.h> 33#include <linux/module.h>
@@ -89,7 +89,7 @@ struct hvc_struct {
89 int irq_requested; 89 int irq_requested;
90 int irq; 90 int irq;
91 struct list_head next; 91 struct list_head next;
92 struct kobject kobj; /* ref count & hvc_struct lifetime */ 92 struct kref kref; /* ref count & hvc_struct lifetime */
93}; 93};
94 94
95/* dynamic list of hvc_struct instances */ 95/* dynamic list of hvc_struct instances */
@@ -110,7 +110,7 @@ static int last_hvc = -1;
110 110
111/* 111/*
112 * Do not call this function with either the hvc_structs_lock or the hvc_struct 112 * Do not call this function with either the hvc_structs_lock or the hvc_struct
113 * lock held. If successful, this function increments the kobject reference 113 * lock held. If successful, this function increments the kref reference
114 * count against the target hvc_struct so it should be released when finished. 114 * count against the target hvc_struct so it should be released when finished.
115 */ 115 */
116static struct hvc_struct *hvc_get_by_index(int index) 116static struct hvc_struct *hvc_get_by_index(int index)
@@ -123,7 +123,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
123 list_for_each_entry(hp, &hvc_structs, next) { 123 list_for_each_entry(hp, &hvc_structs, next) {
124 spin_lock_irqsave(&hp->lock, flags); 124 spin_lock_irqsave(&hp->lock, flags);
125 if (hp->index == index) { 125 if (hp->index == index) {
126 kobject_get(&hp->kobj); 126 kref_get(&hp->kref);
127 spin_unlock_irqrestore(&hp->lock, flags); 127 spin_unlock_irqrestore(&hp->lock, flags);
128 spin_unlock(&hvc_structs_lock); 128 spin_unlock(&hvc_structs_lock);
129 return hp; 129 return hp;
@@ -242,6 +242,23 @@ static int __init hvc_console_init(void)
242} 242}
243console_initcall(hvc_console_init); 243console_initcall(hvc_console_init);
244 244
245/* callback when the kboject ref count reaches zero. */
246static void destroy_hvc_struct(struct kref *kref)
247{
248 struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref);
249 unsigned long flags;
250
251 spin_lock(&hvc_structs_lock);
252
253 spin_lock_irqsave(&hp->lock, flags);
254 list_del(&(hp->next));
255 spin_unlock_irqrestore(&hp->lock, flags);
256
257 spin_unlock(&hvc_structs_lock);
258
259 kfree(hp);
260}
261
245/* 262/*
246 * hvc_instantiate() is an early console discovery method which locates 263 * hvc_instantiate() is an early console discovery method which locates
247 * consoles * prior to the vio subsystem discovering them. Hotplugged 264 * consoles * prior to the vio subsystem discovering them. Hotplugged
@@ -261,7 +278,7 @@ int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
261 /* make sure no no tty has been registered in this index */ 278 /* make sure no no tty has been registered in this index */
262 hp = hvc_get_by_index(index); 279 hp = hvc_get_by_index(index);
263 if (hp) { 280 if (hp) {
264 kobject_put(&hp->kobj); 281 kref_put(&hp->kref, destroy_hvc_struct);
265 return -1; 282 return -1;
266 } 283 }
267 284
@@ -318,9 +335,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
318 unsigned long flags; 335 unsigned long flags;
319 int irq = 0; 336 int irq = 0;
320 int rc = 0; 337 int rc = 0;
321 struct kobject *kobjp;
322 338
323 /* Auto increments kobject reference if found. */ 339 /* Auto increments kref reference if found. */
324 if (!(hp = hvc_get_by_index(tty->index))) 340 if (!(hp = hvc_get_by_index(tty->index)))
325 return -ENODEV; 341 return -ENODEV;
326 342
@@ -341,8 +357,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
341 if (irq) 357 if (irq)
342 hp->irq_requested = 1; 358 hp->irq_requested = 1;
343 359
344 kobjp = &hp->kobj;
345
346 spin_unlock_irqrestore(&hp->lock, flags); 360 spin_unlock_irqrestore(&hp->lock, flags);
347 /* check error, fallback to non-irq */ 361 /* check error, fallback to non-irq */
348 if (irq) 362 if (irq)
@@ -352,7 +366,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
352 * If the request_irq() fails and we return an error. The tty layer 366 * If the request_irq() fails and we return an error. The tty layer
353 * will call hvc_close() after a failed open but we don't want to clean 367 * will call hvc_close() after a failed open but we don't want to clean
354 * up there so we'll clean up here and clear out the previously set 368 * up there so we'll clean up here and clear out the previously set
355 * tty fields and return the kobject reference. 369 * tty fields and return the kref reference.
356 */ 370 */
357 if (rc) { 371 if (rc) {
358 spin_lock_irqsave(&hp->lock, flags); 372 spin_lock_irqsave(&hp->lock, flags);
@@ -360,7 +374,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
360 hp->irq_requested = 0; 374 hp->irq_requested = 0;
361 spin_unlock_irqrestore(&hp->lock, flags); 375 spin_unlock_irqrestore(&hp->lock, flags);
362 tty->driver_data = NULL; 376 tty->driver_data = NULL;
363 kobject_put(kobjp); 377 kref_put(&hp->kref, destroy_hvc_struct);
364 printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); 378 printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
365 } 379 }
366 /* Force wakeup of the polling thread */ 380 /* Force wakeup of the polling thread */
@@ -372,7 +386,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
372static void hvc_close(struct tty_struct *tty, struct file * filp) 386static void hvc_close(struct tty_struct *tty, struct file * filp)
373{ 387{
374 struct hvc_struct *hp; 388 struct hvc_struct *hp;
375 struct kobject *kobjp;
376 int irq = 0; 389 int irq = 0;
377 unsigned long flags; 390 unsigned long flags;
378 391
@@ -382,7 +395,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
382 /* 395 /*
383 * No driver_data means that this close was issued after a failed 396 * No driver_data means that this close was issued after a failed
384 * hvc_open by the tty layer's release_dev() function and we can just 397 * hvc_open by the tty layer's release_dev() function and we can just
385 * exit cleanly because the kobject reference wasn't made. 398 * exit cleanly because the kref reference wasn't made.
386 */ 399 */
387 if (!tty->driver_data) 400 if (!tty->driver_data)
388 return; 401 return;
@@ -390,7 +403,6 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
390 hp = tty->driver_data; 403 hp = tty->driver_data;
391 spin_lock_irqsave(&hp->lock, flags); 404 spin_lock_irqsave(&hp->lock, flags);
392 405
393 kobjp = &hp->kobj;
394 if (--hp->count == 0) { 406 if (--hp->count == 0) {
395 if (hp->irq_requested) 407 if (hp->irq_requested)
396 irq = hp->irq; 408 irq = hp->irq;
@@ -417,7 +429,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
417 spin_unlock_irqrestore(&hp->lock, flags); 429 spin_unlock_irqrestore(&hp->lock, flags);
418 } 430 }
419 431
420 kobject_put(kobjp); 432 kref_put(&hp->kref, destroy_hvc_struct);
421} 433}
422 434
423static void hvc_hangup(struct tty_struct *tty) 435static void hvc_hangup(struct tty_struct *tty)
@@ -426,7 +438,6 @@ static void hvc_hangup(struct tty_struct *tty)
426 unsigned long flags; 438 unsigned long flags;
427 int irq = 0; 439 int irq = 0;
428 int temp_open_count; 440 int temp_open_count;
429 struct kobject *kobjp;
430 441
431 if (!hp) 442 if (!hp)
432 return; 443 return;
@@ -443,7 +454,6 @@ static void hvc_hangup(struct tty_struct *tty)
443 return; 454 return;
444 } 455 }
445 456
446 kobjp = &hp->kobj;
447 temp_open_count = hp->count; 457 temp_open_count = hp->count;
448 hp->count = 0; 458 hp->count = 0;
449 hp->n_outbuf = 0; 459 hp->n_outbuf = 0;
@@ -457,7 +467,7 @@ static void hvc_hangup(struct tty_struct *tty)
457 free_irq(irq, hp); 467 free_irq(irq, hp);
458 while(temp_open_count) { 468 while(temp_open_count) {
459 --temp_open_count; 469 --temp_open_count;
460 kobject_put(kobjp); 470 kref_put(&hp->kref, destroy_hvc_struct);
461 } 471 }
462} 472}
463 473
@@ -729,27 +739,6 @@ static const struct tty_operations hvc_ops = {
729 .chars_in_buffer = hvc_chars_in_buffer, 739 .chars_in_buffer = hvc_chars_in_buffer,
730}; 740};
731 741
732/* callback when the kboject ref count reaches zero. */
733static void destroy_hvc_struct(struct kobject *kobj)
734{
735 struct hvc_struct *hp = container_of(kobj, struct hvc_struct, kobj);
736 unsigned long flags;
737
738 spin_lock(&hvc_structs_lock);
739
740 spin_lock_irqsave(&hp->lock, flags);
741 list_del(&(hp->next));
742 spin_unlock_irqrestore(&hp->lock, flags);
743
744 spin_unlock(&hvc_structs_lock);
745
746 kfree(hp);
747}
748
749static struct kobj_type hvc_kobj_type = {
750 .release = destroy_hvc_struct,
751};
752
753struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, 742struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
754 struct hv_ops *ops, int outbuf_size) 743 struct hv_ops *ops, int outbuf_size)
755{ 744{
@@ -776,8 +765,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
776 hp->outbuf_size = outbuf_size; 765 hp->outbuf_size = outbuf_size;
777 hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; 766 hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
778 767
779 kobject_init(&hp->kobj); 768 kref_init(&hp->kref);
780 hp->kobj.ktype = &hvc_kobj_type;
781 769
782 spin_lock_init(&hp->lock); 770 spin_lock_init(&hp->lock);
783 spin_lock(&hvc_structs_lock); 771 spin_lock(&hvc_structs_lock);
@@ -806,12 +794,10 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
806int __devexit hvc_remove(struct hvc_struct *hp) 794int __devexit hvc_remove(struct hvc_struct *hp)
807{ 795{
808 unsigned long flags; 796 unsigned long flags;
809 struct kobject *kobjp;
810 struct tty_struct *tty; 797 struct tty_struct *tty;
811 798
812 spin_lock_irqsave(&hp->lock, flags); 799 spin_lock_irqsave(&hp->lock, flags);
813 tty = hp->tty; 800 tty = hp->tty;
814 kobjp = &hp->kobj;
815 801
816 if (hp->index < MAX_NR_HVC_CONSOLES) 802 if (hp->index < MAX_NR_HVC_CONSOLES)
817 vtermnos[hp->index] = -1; 803 vtermnos[hp->index] = -1;
@@ -821,12 +807,12 @@ int __devexit hvc_remove(struct hvc_struct *hp)
821 spin_unlock_irqrestore(&hp->lock, flags); 807 spin_unlock_irqrestore(&hp->lock, flags);
822 808
823 /* 809 /*
824 * We 'put' the instance that was grabbed when the kobject instance 810 * We 'put' the instance that was grabbed when the kref instance
825 * was initialized using kobject_init(). Let the last holder of this 811 * was initialized using kref_init(). Let the last holder of this
826 * kobject cause it to be removed, which will probably be the tty_hangup 812 * kref cause it to be removed, which will probably be the tty_hangup
827 * below. 813 * below.
828 */ 814 */
829 kobject_put(kobjp); 815 kref_put(&hp->kref, destroy_hvc_struct);
830 816
831 /* 817 /*
832 * This function call will auto chain call hvc_hangup. The tty should 818 * This function call will auto chain call hvc_hangup. The tty should
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 69d8866de783..fd7559084b82 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -57,11 +57,7 @@
57 * rescanning partner information upon a user's request. 57 * rescanning partner information upon a user's request.
58 * 58 *
59 * Each vty-server, prior to being exposed to this driver is reference counted 59 * Each vty-server, prior to being exposed to this driver is reference counted
60 * using the 2.6 Linux kernel kobject construct. This kobject is also used by 60 * using the 2.6 Linux kernel kref construct.
61 * the vio bus to provide a vio device sysfs entry that this driver attaches
62 * device specific attributes to, including partner information. The vio bus
63 * framework also provides a sysfs entry for each vio driver. The hvcs driver
64 * provides driver attributes in this entry.
65 * 61 *
66 * For direction on installation and usage of this driver please reference 62 * For direction on installation and usage of this driver please reference
67 * Documentation/powerpc/hvcs.txt. 63 * Documentation/powerpc/hvcs.txt.
@@ -71,7 +67,7 @@
71#include <linux/init.h> 67#include <linux/init.h>
72#include <linux/interrupt.h> 68#include <linux/interrupt.h>
73#include <linux/kernel.h> 69#include <linux/kernel.h>
74#include <linux/kobject.h> 70#include <linux/kref.h>
75#include <linux/kthread.h> 71#include <linux/kthread.h>
76#include <linux/list.h> 72#include <linux/list.h>
77#include <linux/major.h> 73#include <linux/major.h>
@@ -293,12 +289,12 @@ struct hvcs_struct {
293 int chars_in_buffer; 289 int chars_in_buffer;
294 290
295 /* 291 /*
296 * Any variable below the kobject is valid before a tty is connected and 292 * Any variable below the kref is valid before a tty is connected and
297 * stays valid after the tty is disconnected. These shouldn't be 293 * stays valid after the tty is disconnected. These shouldn't be
298 * whacked until the koject refcount reaches zero though some entries 294 * whacked until the koject refcount reaches zero though some entries
299 * may be changed via sysfs initiatives. 295 * may be changed via sysfs initiatives.
300 */ 296 */
301 struct kobject kobj; /* ref count & hvcs_struct lifetime */ 297 struct kref kref; /* ref count & hvcs_struct lifetime */
302 int connected; /* is the vty-server currently connected to a vty? */ 298 int connected; /* is the vty-server currently connected to a vty? */
303 uint32_t p_unit_address; /* partner unit address */ 299 uint32_t p_unit_address; /* partner unit address */
304 uint32_t p_partition_ID; /* partner partition ID */ 300 uint32_t p_partition_ID; /* partner partition ID */
@@ -307,8 +303,8 @@ struct hvcs_struct {
307 struct vio_dev *vdev; 303 struct vio_dev *vdev;
308}; 304};
309 305
310/* Required to back map a kobject to its containing object */ 306/* Required to back map a kref to its containing object */
311#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj) 307#define from_kref(k) container_of(k, struct hvcs_struct, kref)
312 308
313static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs); 309static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
314static DEFINE_SPINLOCK(hvcs_structs_lock); 310static DEFINE_SPINLOCK(hvcs_structs_lock);
@@ -334,7 +330,6 @@ static void hvcs_partner_free(struct hvcs_struct *hvcsd);
334static int hvcs_enable_device(struct hvcs_struct *hvcsd, 330static int hvcs_enable_device(struct hvcs_struct *hvcsd,
335 uint32_t unit_address, unsigned int irq, struct vio_dev *dev); 331 uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
336 332
337static void destroy_hvcs_struct(struct kobject *kobj);
338static int hvcs_open(struct tty_struct *tty, struct file *filp); 333static int hvcs_open(struct tty_struct *tty, struct file *filp);
339static void hvcs_close(struct tty_struct *tty, struct file *filp); 334static void hvcs_close(struct tty_struct *tty, struct file *filp);
340static void hvcs_hangup(struct tty_struct * tty); 335static void hvcs_hangup(struct tty_struct * tty);
@@ -703,10 +698,10 @@ static void hvcs_return_index(int index)
703 hvcs_index_list[index] = -1; 698 hvcs_index_list[index] = -1;
704} 699}
705 700
706/* callback when the kboject ref count reaches zero */ 701/* callback when the kref ref count reaches zero */
707static void destroy_hvcs_struct(struct kobject *kobj) 702static void destroy_hvcs_struct(struct kref *kref)
708{ 703{
709 struct hvcs_struct *hvcsd = from_kobj(kobj); 704 struct hvcs_struct *hvcsd = from_kref(kref);
710 struct vio_dev *vdev; 705 struct vio_dev *vdev;
711 unsigned long flags; 706 unsigned long flags;
712 707
@@ -743,10 +738,6 @@ static void destroy_hvcs_struct(struct kobject *kobj)
743 kfree(hvcsd); 738 kfree(hvcsd);
744} 739}
745 740
746static struct kobj_type hvcs_kobj_type = {
747 .release = destroy_hvcs_struct,
748};
749
750static int hvcs_get_index(void) 741static int hvcs_get_index(void)
751{ 742{
752 int i; 743 int i;
@@ -791,9 +782,7 @@ static int __devinit hvcs_probe(
791 782
792 spin_lock_init(&hvcsd->lock); 783 spin_lock_init(&hvcsd->lock);
793 /* Automatically incs the refcount the first time */ 784 /* Automatically incs the refcount the first time */
794 kobject_init(&hvcsd->kobj); 785 kref_init(&hvcsd->kref);
795 /* Set up the callback for terminating the hvcs_struct's life */
796 hvcsd->kobj.ktype = &hvcs_kobj_type;
797 786
798 hvcsd->vdev = dev; 787 hvcsd->vdev = dev;
799 dev->dev.driver_data = hvcsd; 788 dev->dev.driver_data = hvcsd;
@@ -844,7 +833,6 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
844{ 833{
845 struct hvcs_struct *hvcsd = dev->dev.driver_data; 834 struct hvcs_struct *hvcsd = dev->dev.driver_data;
846 unsigned long flags; 835 unsigned long flags;
847 struct kobject *kobjp;
848 struct tty_struct *tty; 836 struct tty_struct *tty;
849 837
850 if (!hvcsd) 838 if (!hvcsd)
@@ -856,15 +844,13 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
856 844
857 tty = hvcsd->tty; 845 tty = hvcsd->tty;
858 846
859 kobjp = &hvcsd->kobj;
860
861 spin_unlock_irqrestore(&hvcsd->lock, flags); 847 spin_unlock_irqrestore(&hvcsd->lock, flags);
862 848
863 /* 849 /*
864 * Let the last holder of this object cause it to be removed, which 850 * Let the last holder of this object cause it to be removed, which
865 * would probably be tty_hangup below. 851 * would probably be tty_hangup below.
866 */ 852 */
867 kobject_put (kobjp); 853 kref_put(&hvcsd->kref, destroy_hvcs_struct);
868 854
869 /* 855 /*
870 * The hangup is a scheduled function which will auto chain call 856 * The hangup is a scheduled function which will auto chain call
@@ -1086,7 +1072,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
1086} 1072}
1087 1073
1088/* 1074/*
1089 * This always increments the kobject ref count if the call is successful. 1075 * This always increments the kref ref count if the call is successful.
1090 * Please remember to dec when you are done with the instance. 1076 * Please remember to dec when you are done with the instance.
1091 * 1077 *
1092 * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when 1078 * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
@@ -1103,7 +1089,7 @@ static struct hvcs_struct *hvcs_get_by_index(int index)
1103 list_for_each_entry(hvcsd, &hvcs_structs, next) { 1089 list_for_each_entry(hvcsd, &hvcs_structs, next) {
1104 spin_lock_irqsave(&hvcsd->lock, flags); 1090 spin_lock_irqsave(&hvcsd->lock, flags);
1105 if (hvcsd->index == index) { 1091 if (hvcsd->index == index) {
1106 kobject_get(&hvcsd->kobj); 1092 kref_get(&hvcsd->kref);
1107 spin_unlock_irqrestore(&hvcsd->lock, flags); 1093 spin_unlock_irqrestore(&hvcsd->lock, flags);
1108 spin_unlock(&hvcs_structs_lock); 1094 spin_unlock(&hvcs_structs_lock);
1109 return hvcsd; 1095 return hvcsd;
@@ -1129,14 +1115,13 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
1129 unsigned int irq; 1115 unsigned int irq;
1130 struct vio_dev *vdev; 1116 struct vio_dev *vdev;
1131 unsigned long unit_address; 1117 unsigned long unit_address;
1132 struct kobject *kobjp;
1133 1118
1134 if (tty->driver_data) 1119 if (tty->driver_data)
1135 goto fast_open; 1120 goto fast_open;
1136 1121
1137 /* 1122 /*
1138 * Is there a vty-server that shares the same index? 1123 * Is there a vty-server that shares the same index?
1139 * This function increments the kobject index. 1124 * This function increments the kref index.
1140 */ 1125 */
1141 if (!(hvcsd = hvcs_get_by_index(tty->index))) { 1126 if (!(hvcsd = hvcs_get_by_index(tty->index))) {
1142 printk(KERN_WARNING "HVCS: open failed, no device associated" 1127 printk(KERN_WARNING "HVCS: open failed, no device associated"
@@ -1181,7 +1166,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
1181 * and will grab the spinlock and free the connection if it fails. 1166 * and will grab the spinlock and free the connection if it fails.
1182 */ 1167 */
1183 if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) { 1168 if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) {
1184 kobject_put(&hvcsd->kobj); 1169 kref_put(&hvcsd->kref, destroy_hvcs_struct);
1185 printk(KERN_WARNING "HVCS: enable device failed.\n"); 1170 printk(KERN_WARNING "HVCS: enable device failed.\n");
1186 return rc; 1171 return rc;
1187 } 1172 }
@@ -1192,17 +1177,11 @@ fast_open:
1192 hvcsd = tty->driver_data; 1177 hvcsd = tty->driver_data;
1193 1178
1194 spin_lock_irqsave(&hvcsd->lock, flags); 1179 spin_lock_irqsave(&hvcsd->lock, flags);
1195 if (!kobject_get(&hvcsd->kobj)) { 1180 kref_get(&hvcsd->kref);
1196 spin_unlock_irqrestore(&hvcsd->lock, flags);
1197 printk(KERN_ERR "HVCS: Kobject of open"
1198 " hvcs doesn't exist.\n");
1199 return -EFAULT; /* Is this the right return value? */
1200 }
1201
1202 hvcsd->open_count++; 1181 hvcsd->open_count++;
1203
1204 hvcsd->todo_mask |= HVCS_SCHED_READ; 1182 hvcsd->todo_mask |= HVCS_SCHED_READ;
1205 spin_unlock_irqrestore(&hvcsd->lock, flags); 1183 spin_unlock_irqrestore(&hvcsd->lock, flags);
1184
1206open_success: 1185open_success:
1207 hvcs_kick(); 1186 hvcs_kick();
1208 1187
@@ -1212,9 +1191,8 @@ open_success:
1212 return 0; 1191 return 0;
1213 1192
1214error_release: 1193error_release:
1215 kobjp = &hvcsd->kobj;
1216 spin_unlock_irqrestore(&hvcsd->lock, flags); 1194 spin_unlock_irqrestore(&hvcsd->lock, flags);
1217 kobject_put(&hvcsd->kobj); 1195 kref_put(&hvcsd->kref, destroy_hvcs_struct);
1218 1196
1219 printk(KERN_WARNING "HVCS: partner connect failed.\n"); 1197 printk(KERN_WARNING "HVCS: partner connect failed.\n");
1220 return retval; 1198 return retval;
@@ -1224,7 +1202,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
1224{ 1202{
1225 struct hvcs_struct *hvcsd; 1203 struct hvcs_struct *hvcsd;
1226 unsigned long flags; 1204 unsigned long flags;
1227 struct kobject *kobjp;
1228 int irq = NO_IRQ; 1205 int irq = NO_IRQ;
1229 1206
1230 /* 1207 /*
@@ -1245,7 +1222,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
1245 hvcsd = tty->driver_data; 1222 hvcsd = tty->driver_data;
1246 1223
1247 spin_lock_irqsave(&hvcsd->lock, flags); 1224 spin_lock_irqsave(&hvcsd->lock, flags);
1248 kobjp = &hvcsd->kobj;
1249 if (--hvcsd->open_count == 0) { 1225 if (--hvcsd->open_count == 0) {
1250 1226
1251 vio_disable_interrupts(hvcsd->vdev); 1227 vio_disable_interrupts(hvcsd->vdev);
@@ -1270,7 +1246,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
1270 tty->driver_data = NULL; 1246 tty->driver_data = NULL;
1271 1247
1272 free_irq(irq, hvcsd); 1248 free_irq(irq, hvcsd);
1273 kobject_put(kobjp); 1249 kref_put(&hvcsd->kref, destroy_hvcs_struct);
1274 return; 1250 return;
1275 } else if (hvcsd->open_count < 0) { 1251 } else if (hvcsd->open_count < 0) {
1276 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" 1252 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
@@ -1279,7 +1255,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
1279 } 1255 }
1280 1256
1281 spin_unlock_irqrestore(&hvcsd->lock, flags); 1257 spin_unlock_irqrestore(&hvcsd->lock, flags);
1282 kobject_put(kobjp); 1258 kref_put(&hvcsd->kref, destroy_hvcs_struct);
1283} 1259}
1284 1260
1285static void hvcs_hangup(struct tty_struct * tty) 1261static void hvcs_hangup(struct tty_struct * tty)
@@ -1287,21 +1263,17 @@ static void hvcs_hangup(struct tty_struct * tty)
1287 struct hvcs_struct *hvcsd = tty->driver_data; 1263 struct hvcs_struct *hvcsd = tty->driver_data;
1288 unsigned long flags; 1264 unsigned long flags;
1289 int temp_open_count; 1265 int temp_open_count;
1290 struct kobject *kobjp;
1291 int irq = NO_IRQ; 1266 int irq = NO_IRQ;
1292 1267
1293 spin_lock_irqsave(&hvcsd->lock, flags); 1268 spin_lock_irqsave(&hvcsd->lock, flags);
1294 /* Preserve this so that we know how many kobject refs to put */ 1269 /* Preserve this so that we know how many kref refs to put */
1295 temp_open_count = hvcsd->open_count; 1270 temp_open_count = hvcsd->open_count;
1296 1271
1297 /* 1272 /*
1298 * Don't kobject put inside the spinlock because the destruction 1273 * Don't kref put inside the spinlock because the destruction
1299 * callback may use the spinlock and it may get called before the 1274 * callback may use the spinlock and it may get called before the
1300 * spinlock has been released. Get a pointer to the kobject and 1275 * spinlock has been released.
1301 * kobject_put on that after releasing the spinlock.
1302 */ 1276 */
1303 kobjp = &hvcsd->kobj;
1304
1305 vio_disable_interrupts(hvcsd->vdev); 1277 vio_disable_interrupts(hvcsd->vdev);
1306 1278
1307 hvcsd->todo_mask = 0; 1279 hvcsd->todo_mask = 0;
@@ -1324,7 +1296,7 @@ static void hvcs_hangup(struct tty_struct * tty)
1324 free_irq(irq, hvcsd); 1296 free_irq(irq, hvcsd);
1325 1297
1326 /* 1298 /*
1327 * We need to kobject_put() for every open_count we have since the 1299 * We need to kref_put() for every open_count we have since the
1328 * tty_hangup() function doesn't invoke a close per open connection on a 1300 * tty_hangup() function doesn't invoke a close per open connection on a
1329 * non-console device. 1301 * non-console device.
1330 */ 1302 */
@@ -1335,7 +1307,7 @@ static void hvcs_hangup(struct tty_struct * tty)
1335 * NOTE: If this hangup was signaled from user space then the 1307 * NOTE: If this hangup was signaled from user space then the
1336 * final put will never happen. 1308 * final put will never happen.
1337 */ 1309 */
1338 kobject_put(kobjp); 1310 kref_put(&hvcsd->kref, destroy_hvcs_struct);
1339 } 1311 }
1340} 1312}
1341 1313
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 556fd81fa815..c422e870dc52 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -28,6 +28,7 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/hw_random.h> 30#include <linux/hw_random.h>
31#include <linux/delay.h>
31#include <asm/io.h> 32#include <asm/io.h>
32 33
33 34
@@ -52,11 +53,18 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
52static struct pci_dev *amd_pdev; 53static struct pci_dev *amd_pdev;
53 54
54 55
55static int amd_rng_data_present(struct hwrng *rng) 56static int amd_rng_data_present(struct hwrng *rng, int wait)
56{ 57{
57 u32 pmbase = (u32)rng->priv; 58 u32 pmbase = (u32)rng->priv;
59 int data, i;
58 60
59 return !!(inl(pmbase + 0xF4) & 1); 61 for (i = 0; i < 20; i++) {
62 data = !!(inl(pmbase + 0xF4) & 1);
63 if (data || !wait)
64 break;
65 udelay(10);
66 }
67 return data;
60} 68}
61 69
62static int amd_rng_data_read(struct hwrng *rng, u32 *data) 70static int amd_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 26a860adcb38..0118b9817a95 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -66,11 +66,11 @@ static inline void hwrng_cleanup(struct hwrng *rng)
66 rng->cleanup(rng); 66 rng->cleanup(rng);
67} 67}
68 68
69static inline int hwrng_data_present(struct hwrng *rng) 69static inline int hwrng_data_present(struct hwrng *rng, int wait)
70{ 70{
71 if (!rng->data_present) 71 if (!rng->data_present)
72 return 1; 72 return 1;
73 return rng->data_present(rng); 73 return rng->data_present(rng, wait);
74} 74}
75 75
76static inline int hwrng_data_read(struct hwrng *rng, u32 *data) 76static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
@@ -94,8 +94,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
94{ 94{
95 u32 data; 95 u32 data;
96 ssize_t ret = 0; 96 ssize_t ret = 0;
97 int i, err = 0; 97 int err = 0;
98 int data_present;
99 int bytes_read; 98 int bytes_read;
100 99
101 while (size) { 100 while (size) {
@@ -107,21 +106,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
107 err = -ENODEV; 106 err = -ENODEV;
108 goto out; 107 goto out;
109 } 108 }
110 if (filp->f_flags & O_NONBLOCK) { 109
111 data_present = hwrng_data_present(current_rng);
112 } else {
113 /* Some RNG require some time between data_reads to gather
114 * new entropy. Poll it.
115 */
116 for (i = 0; i < 20; i++) {
117 data_present = hwrng_data_present(current_rng);
118 if (data_present)
119 break;
120 udelay(10);
121 }
122 }
123 bytes_read = 0; 110 bytes_read = 0;
124 if (data_present) 111 if (hwrng_data_present(current_rng,
112 !(filp->f_flags & O_NONBLOCK)))
125 bytes_read = hwrng_data_read(current_rng, &data); 113 bytes_read = hwrng_data_read(current_rng, &data);
126 mutex_unlock(&rng_mutex); 114 mutex_unlock(&rng_mutex);
127 115
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 8e8658dcd2e3..fed4ef5569f5 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -28,6 +28,7 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/hw_random.h> 30#include <linux/hw_random.h>
31#include <linux/delay.h>
31#include <asm/io.h> 32#include <asm/io.h>
32 33
33 34
@@ -61,11 +62,18 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
61 return 4; 62 return 4;
62} 63}
63 64
64static int geode_rng_data_present(struct hwrng *rng) 65static int geode_rng_data_present(struct hwrng *rng, int wait)
65{ 66{
66 void __iomem *mem = (void __iomem *)rng->priv; 67 void __iomem *mem = (void __iomem *)rng->priv;
68 int data, i;
67 69
68 return !!(readl(mem + GEODE_RNG_STATUS_REG)); 70 for (i = 0; i < 20; i++) {
71 data = !!(readl(mem + GEODE_RNG_STATUS_REG));
72 if (data || !wait)
73 break;
74 udelay(10);
75 }
76 return data;
69} 77}
70 78
71 79
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 753f46052b87..5cc651ef75eb 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
32#include <linux/delay.h>
32#include <asm/io.h> 33#include <asm/io.h>
33 34
34 35
@@ -162,11 +163,19 @@ static inline u8 hwstatus_set(void __iomem *mem,
162 return hwstatus_get(mem); 163 return hwstatus_get(mem);
163} 164}
164 165
165static int intel_rng_data_present(struct hwrng *rng) 166static int intel_rng_data_present(struct hwrng *rng, int wait)
166{ 167{
167 void __iomem *mem = (void __iomem *)rng->priv; 168 void __iomem *mem = (void __iomem *)rng->priv;
168 169 int data, i;
169 return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT); 170
171 for (i = 0; i < 20; i++) {
172 data = !!(readb(mem + INTEL_RNG_STATUS) &
173 INTEL_RNG_DATA_PRESENT);
174 if (data || !wait)
175 break;
176 udelay(10);
177 }
178 return data;
170} 179}
171 180
172static int intel_rng_data_read(struct hwrng *rng, u32 *data) 181static int intel_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3f35a1c562b1..7e319951fa41 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -29,6 +29,7 @@
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/hw_random.h> 31#include <linux/hw_random.h>
32#include <linux/delay.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34 35
@@ -65,9 +66,17 @@ static void omap_rng_write_reg(int reg, u32 val)
65} 66}
66 67
67/* REVISIT: Does the status bit really work on 16xx? */ 68/* REVISIT: Does the status bit really work on 16xx? */
68static int omap_rng_data_present(struct hwrng *rng) 69static int omap_rng_data_present(struct hwrng *rng, int wait)
69{ 70{
70 return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1; 71 int data, i;
72
73 for (i = 0; i < 20; i++) {
74 data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
75 if (data || !wait)
76 break;
77 udelay(10);
78 }
79 return data;
71} 80}
72 81
73static int omap_rng_data_read(struct hwrng *rng, u32 *data) 82static int omap_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 24ae3073991f..6d50e9bc700b 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hw_random.h> 25#include <linux/hw_random.h>
26#include <linux/delay.h>
26#include <asm/of_platform.h> 27#include <asm/of_platform.h>
27#include <asm/io.h> 28#include <asm/io.h>
28 29
@@ -41,12 +42,19 @@
41 42
42#define MODULE_NAME "pasemi_rng" 43#define MODULE_NAME "pasemi_rng"
43 44
44static int pasemi_rng_data_present(struct hwrng *rng) 45static int pasemi_rng_data_present(struct hwrng *rng, int wait)
45{ 46{
46 void __iomem *rng_regs = (void __iomem *)rng->priv; 47 void __iomem *rng_regs = (void __iomem *)rng->priv;
47 48 int data, i;
48 return (in_le32(rng_regs + SDCRNG_CTL_REG) 49
49 & SDCRNG_CTL_FVLD_M) ? 1 : 0; 50 for (i = 0; i < 20; i++) {
51 data = (in_le32(rng_regs + SDCRNG_CTL_REG)
52 & SDCRNG_CTL_FVLD_M) ? 1 : 0;
53 if (data || !wait)
54 break;
55 udelay(10);
56 }
57 return data;
50} 58}
51 59
52static int pasemi_rng_data_read(struct hwrng *rng, u32 *data) 60static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index ec435cb25c4f..868e39fd42e4 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/hw_random.h> 29#include <linux/hw_random.h>
30#include <linux/delay.h>
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/msr.h> 32#include <asm/msr.h>
32#include <asm/cpufeature.h> 33#include <asm/cpufeature.h>
@@ -77,10 +78,11 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
77 return eax_out; 78 return eax_out;
78} 79}
79 80
80static int via_rng_data_present(struct hwrng *rng) 81static int via_rng_data_present(struct hwrng *rng, int wait)
81{ 82{
82 u32 bytes_out; 83 u32 bytes_out;
83 u32 *via_rng_datum = (u32 *)(&rng->priv); 84 u32 *via_rng_datum = (u32 *)(&rng->priv);
85 int i;
84 86
85 /* We choose the recommended 1-byte-per-instruction RNG rate, 87 /* We choose the recommended 1-byte-per-instruction RNG rate,
86 * for greater randomness at the expense of speed. Larger 88 * for greater randomness at the expense of speed. Larger
@@ -95,12 +97,15 @@ static int via_rng_data_present(struct hwrng *rng)
95 * completes. 97 * completes.
96 */ 98 */
97 99
98 *via_rng_datum = 0; /* paranoia, not really necessary */ 100 for (i = 0; i < 20; i++) {
99 bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1); 101 *via_rng_datum = 0; /* paranoia, not really necessary */
100 bytes_out &= VIA_XSTORE_CNT_MASK; 102 bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
101 if (bytes_out == 0) 103 bytes_out &= VIA_XSTORE_CNT_MASK;
102 return 0; 104 if (bytes_out || !wait)
103 return 1; 105 break;
106 udelay(10);
107 }
108 return bytes_out ? 1 : 0;
104} 109}
105 110
106static int via_rng_data_read(struct hwrng *rng, u32 *data) 111static int via_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
new file mode 100644
index 000000000000..6076e662886a
--- /dev/null
+++ b/drivers/char/nozomi.c
@@ -0,0 +1,1993 @@
1/*
2 * nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
3 *
4 * Written by: Ulf Jakobsson,
5 * Jan �erfeldt,
6 * Stefan Thomasson,
7 *
8 * Maintained by: Paul Hardwick (p.hardwick@option.com)
9 *
10 * Patches:
11 * Locking code changes for Vodafone by Sphere Systems Ltd,
12 * Andrew Bird (ajb@spheresystems.co.uk )
13 * & Phil Sanderson
14 *
15 * Source has been ported from an implementation made by Filip Aben @ Option
16 *
17 * --------------------------------------------------------------------------
18 *
19 * Copyright (c) 2005,2006 Option Wireless Sweden AB
20 * Copyright (c) 2006 Sphere Systems Ltd
21 * Copyright (c) 2006 Option Wireless n/v
22 * All rights Reserved.
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2 of the License, or
27 * (at your option) any later version.
28 *
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
33 *
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
37 *
38 * --------------------------------------------------------------------------
39 */
40
41/*
42 * CHANGELOG
43 * Version 2.1d
44 * 11-November-2007 Jiri Slaby, Frank Seidel
45 * - Big rework of multicard support by Jiri
46 * - Major cleanups (semaphore to mutex, endianess, no major reservation)
47 * - Optimizations
48 *
49 * Version 2.1c
50 * 30-October-2007 Frank Seidel
51 * - Completed multicard support
52 * - Minor cleanups
53 *
54 * Version 2.1b
55 * 07-August-2007 Frank Seidel
56 * - Minor cleanups
57 * - theoretical multicard support
58 *
59 * Version 2.1
60 * 03-July-2006 Paul Hardwick
61 *
62 * - Stability Improvements. Incorporated spinlock wraps patch.
63 * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
64 * - using __devexit macro for tty
65 *
66 *
67 * Version 2.0
68 * 08-feb-2006 15:34:10:Ulf
69 *
70 * -Fixed issue when not waking up line disipine layer, could probably result
71 * in better uplink performance for 2.4.
72 *
73 * -Fixed issue with big endian during initalization, now proper toggle flags
74 * are handled between preloader and maincode.
75 *
76 * -Fixed flow control issue.
77 *
78 * -Added support for setting DTR.
79 *
80 * -For 2.4 kernels, removing temporary buffer that's not needed.
81 *
82 * -Reading CTS only for modem port (only port that supports it).
83 *
84 * -Return 0 in write_room instead of netative value, it's not handled in
85 * upper layer.
86 *
87 * --------------------------------------------------------------------------
88 * Version 1.0
89 *
90 * First version of driver, only tested with card of type F32_2.
91 * Works fine with 2.4 and 2.6 kernels.
92 * Driver also support big endian architecture.
93 */
94
95/* Enable this to have a lot of debug printouts */
96#define DEBUG
97
98#include <linux/kernel.h>
99#include <linux/module.h>
100#include <linux/pci.h>
101#include <linux/ioport.h>
102#include <linux/tty.h>
103#include <linux/tty_driver.h>
104#include <linux/tty_flip.h>
105#include <linux/serial.h>
106#include <linux/interrupt.h>
107#include <linux/kmod.h>
108#include <linux/init.h>
109#include <linux/kfifo.h>
110#include <linux/uaccess.h>
111#include <asm/byteorder.h>
112
113#include <linux/delay.h>
114
115
116#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \
117 __DATE__ " " __TIME__ ")"
118
119/* Macros definitions */
120
121/* Default debug printout level */
122#define NOZOMI_DEBUG_LEVEL 0x00
123
124#define P_BUF_SIZE 128
125#define NFO(_err_flag_, args...) \
126do { \
127 char tmp[P_BUF_SIZE]; \
128 snprintf(tmp, sizeof(tmp), ##args); \
129 printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \
130 __FUNCTION__, tmp); \
131} while (0)
132
133#define DBG1(args...) D_(0x01, ##args)
134#define DBG2(args...) D_(0x02, ##args)
135#define DBG3(args...) D_(0x04, ##args)
136#define DBG4(args...) D_(0x08, ##args)
137#define DBG5(args...) D_(0x10, ##args)
138#define DBG6(args...) D_(0x20, ##args)
139#define DBG7(args...) D_(0x40, ##args)
140#define DBG8(args...) D_(0x80, ##args)
141
142#ifdef DEBUG
143/* Do we need this settable at runtime? */
144static int debug = NOZOMI_DEBUG_LEVEL;
145
146#define D(lvl, args...) do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
147 while (0)
148#define D_(lvl, args...) D(lvl, ##args)
149
150/* These printouts are always printed */
151
152#else
153static int debug;
154#define D_(lvl, args...)
155#endif
156
157/* TODO: rewrite to optimize macros... */
158
159#define TMP_BUF_MAX 256
160
161#define DUMP(buf__,len__) \
162 do { \
163 char tbuf[TMP_BUF_MAX] = {0};\
164 if (len__ > 1) {\
165 snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\
166 if (tbuf[len__-2] == '\r') {\
167 tbuf[len__-2] = 'r';\
168 } \
169 DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\
170 } else {\
171 DBG1("SENDING: '%s' (%d)", tbuf, len__);\
172 } \
173} while (0)
174
175/* Defines */
176#define NOZOMI_NAME "nozomi"
177#define NOZOMI_NAME_TTY "nozomi_tty"
178#define DRIVER_DESC "Nozomi driver"
179
180#define NTTY_TTY_MAXMINORS 256
181#define NTTY_FIFO_BUFFER_SIZE 8192
182
183/* Must be power of 2 */
184#define FIFO_BUFFER_SIZE_UL 8192
185
186/* Size of tmp send buffer to card */
187#define SEND_BUF_MAX 1024
188#define RECEIVE_BUF_MAX 4
189
190
191/* Define all types of vendors and devices to support */
192#define VENDOR1 0x1931 /* Vendor Option */
193#define DEVICE1 0x000c /* HSDPA card */
194
195#define R_IIR 0x0000 /* Interrupt Identity Register */
196#define R_FCR 0x0000 /* Flow Control Register */
197#define R_IER 0x0004 /* Interrupt Enable Register */
198
199#define CONFIG_MAGIC 0xEFEFFEFE
200#define TOGGLE_VALID 0x0000
201
202/* Definition of interrupt tokens */
203#define MDM_DL1 0x0001
204#define MDM_UL1 0x0002
205#define MDM_DL2 0x0004
206#define MDM_UL2 0x0008
207#define DIAG_DL1 0x0010
208#define DIAG_DL2 0x0020
209#define DIAG_UL 0x0040
210#define APP1_DL 0x0080
211#define APP1_UL 0x0100
212#define APP2_DL 0x0200
213#define APP2_UL 0x0400
214#define CTRL_DL 0x0800
215#define CTRL_UL 0x1000
216#define RESET 0x8000
217
218#define MDM_DL (MDM_DL1 | MDM_DL2)
219#define MDM_UL (MDM_UL1 | MDM_UL2)
220#define DIAG_DL (DIAG_DL1 | DIAG_DL2)
221
222/* modem signal definition */
223#define CTRL_DSR 0x0001
224#define CTRL_DCD 0x0002
225#define CTRL_RI 0x0004
226#define CTRL_CTS 0x0008
227
228#define CTRL_DTR 0x0001
229#define CTRL_RTS 0x0002
230
231#define MAX_PORT 4
232#define NOZOMI_MAX_PORTS 5
233#define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT)
234
235/* Type definitions */
236
237/*
238 * There are two types of nozomi cards,
239 * one with 2048 memory and with 8192 memory
240 */
241enum card_type {
242 F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */
243 F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
244};
245
246/* Two different toggle channels exist */
247enum channel_type {
248 CH_A = 0,
249 CH_B = 1,
250};
251
252/* Port definition for the card regarding flow control */
253enum ctrl_port_type {
254 CTRL_CMD = 0,
255 CTRL_MDM = 1,
256 CTRL_DIAG = 2,
257 CTRL_APP1 = 3,
258 CTRL_APP2 = 4,
259 CTRL_ERROR = -1,
260};
261
262/* Ports that the nozomi has */
263enum port_type {
264 PORT_MDM = 0,
265 PORT_DIAG = 1,
266 PORT_APP1 = 2,
267 PORT_APP2 = 3,
268 PORT_CTRL = 4,
269 PORT_ERROR = -1,
270};
271
272#ifdef __BIG_ENDIAN
273/* Big endian */
274
275struct toggles {
276 unsigned enabled:5; /*
277 * Toggle fields are valid if enabled is 0,
278 * else A-channels must always be used.
279 */
280 unsigned diag_dl:1;
281 unsigned mdm_dl:1;
282 unsigned mdm_ul:1;
283} __attribute__ ((packed));
284
285/* Configuration table to read at startup of card */
286/* Is for now only needed during initialization phase */
287struct config_table {
288 u32 signature;
289 u16 product_information;
290 u16 version;
291 u8 pad3[3];
292 struct toggles toggle;
293 u8 pad1[4];
294 u16 dl_mdm_len1; /*
295 * If this is 64, it can hold
296 * 60 bytes + 4 that is length field
297 */
298 u16 dl_start;
299
300 u16 dl_diag_len1;
301 u16 dl_mdm_len2; /*
302 * If this is 64, it can hold
303 * 60 bytes + 4 that is length field
304 */
305 u16 dl_app1_len;
306
307 u16 dl_diag_len2;
308 u16 dl_ctrl_len;
309 u16 dl_app2_len;
310 u8 pad2[16];
311 u16 ul_mdm_len1;
312 u16 ul_start;
313 u16 ul_diag_len;
314 u16 ul_mdm_len2;
315 u16 ul_app1_len;
316 u16 ul_app2_len;
317 u16 ul_ctrl_len;
318} __attribute__ ((packed));
319
320/* This stores all control downlink flags */
321struct ctrl_dl {
322 u8 port;
323 unsigned reserved:4;
324 unsigned CTS:1;
325 unsigned RI:1;
326 unsigned DCD:1;
327 unsigned DSR:1;
328} __attribute__ ((packed));
329
330/* This stores all control uplink flags */
331struct ctrl_ul {
332 u8 port;
333 unsigned reserved:6;
334 unsigned RTS:1;
335 unsigned DTR:1;
336} __attribute__ ((packed));
337
338#else
339/* Little endian */
340
341/* This represents the toggle information */
342struct toggles {
343 unsigned mdm_ul:1;
344 unsigned mdm_dl:1;
345 unsigned diag_dl:1;
346 unsigned enabled:5; /*
347 * Toggle fields are valid if enabled is 0,
348 * else A-channels must always be used.
349 */
350} __attribute__ ((packed));
351
352/* Configuration table to read at startup of card */
353struct config_table {
354 u32 signature;
355 u16 version;
356 u16 product_information;
357 struct toggles toggle;
358 u8 pad1[7];
359 u16 dl_start;
360 u16 dl_mdm_len1; /*
361 * If this is 64, it can hold
362 * 60 bytes + 4 that is length field
363 */
364 u16 dl_mdm_len2;
365 u16 dl_diag_len1;
366 u16 dl_diag_len2;
367 u16 dl_app1_len;
368 u16 dl_app2_len;
369 u16 dl_ctrl_len;
370 u8 pad2[16];
371 u16 ul_start;
372 u16 ul_mdm_len2;
373 u16 ul_mdm_len1;
374 u16 ul_diag_len;
375 u16 ul_app1_len;
376 u16 ul_app2_len;
377 u16 ul_ctrl_len;
378} __attribute__ ((packed));
379
380/* This stores all control downlink flags */
381struct ctrl_dl {
382 unsigned DSR:1;
383 unsigned DCD:1;
384 unsigned RI:1;
385 unsigned CTS:1;
386 unsigned reserverd:4;
387 u8 port;
388} __attribute__ ((packed));
389
390/* This stores all control uplink flags */
391struct ctrl_ul {
392 unsigned DTR:1;
393 unsigned RTS:1;
394 unsigned reserved:6;
395 u8 port;
396} __attribute__ ((packed));
397#endif
398
399/* This holds all information that is needed regarding a port */
400struct port {
401 u8 update_flow_control;
402 struct ctrl_ul ctrl_ul;
403 struct ctrl_dl ctrl_dl;
404 struct kfifo *fifo_ul;
405 void __iomem *dl_addr[2];
406 u32 dl_size[2];
407 u8 toggle_dl;
408 void __iomem *ul_addr[2];
409 u32 ul_size[2];
410 u8 toggle_ul;
411 u16 token_dl;
412
413 struct tty_struct *tty;
414 int tty_open_count;
415 /* mutex to ensure one access patch to this port */
416 struct mutex tty_sem;
417 wait_queue_head_t tty_wait;
418 struct async_icount tty_icount;
419};
420
421/* Private data one for each card in the system */
422struct nozomi {
423 void __iomem *base_addr;
424 unsigned long flip;
425
426 /* Pointers to registers */
427 void __iomem *reg_iir;
428 void __iomem *reg_fcr;
429 void __iomem *reg_ier;
430
431 u16 last_ier;
432 enum card_type card_type;
433 struct config_table config_table; /* Configuration table */
434 struct pci_dev *pdev;
435 struct port port[NOZOMI_MAX_PORTS];
436 u8 *send_buf;
437
438 spinlock_t spin_mutex; /* secures access to registers and tty */
439
440 unsigned int index_start;
441 u32 open_ttys;
442};
443
444/* This is a data packet that is read or written to/from card */
445struct buffer {
446 u32 size; /* size is the length of the data buffer */
447 u8 *data;
448} __attribute__ ((packed));
449
450/* Global variables */
451static struct pci_device_id nozomi_pci_tbl[] = {
452 {PCI_DEVICE(VENDOR1, DEVICE1)},
453 {},
454};
455
456MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
457
458static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
459static struct tty_driver *ntty_driver;
460
461/*
462 * find card by tty_index
463 */
464static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty)
465{
466 return tty ? ndevs[tty->index / MAX_PORT] : NULL;
467}
468
469static inline struct port *get_port_by_tty(const struct tty_struct *tty)
470{
471 struct nozomi *ndev = get_dc_by_tty(tty);
472 return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL;
473}
474
475/*
476 * TODO:
477 * -Optimize
478 * -Rewrite cleaner
479 */
480
481static void read_mem32(u32 *buf, const void __iomem *mem_addr_start,
482 u32 size_bytes)
483{
484 u32 i = 0;
485 const u32 *ptr = (__force u32 *) mem_addr_start;
486 u16 *buf16;
487
488 if (unlikely(!ptr || !buf))
489 goto out;
490
491 /* shortcut for extremely often used cases */
492 switch (size_bytes) {
493 case 2: /* 2 bytes */
494 buf16 = (u16 *) buf;
495 *buf16 = __le16_to_cpu(readw((void __iomem *)ptr));
496 goto out;
497 break;
498 case 4: /* 4 bytes */
499 *(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
500 goto out;
501 break;
502 }
503
504 while (i < size_bytes) {
505 if (size_bytes - i == 2) {
506 /* Handle 2 bytes in the end */
507 buf16 = (u16 *) buf;
508 *(buf16) = __le16_to_cpu(readw((void __iomem *)ptr));
509 i += 2;
510 } else {
511 /* Read 4 bytes */
512 *(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
513 i += 4;
514 }
515 buf++;
516 ptr++;
517 }
518out:
519 return;
520}
521
522/*
523 * TODO:
524 * -Optimize
525 * -Rewrite cleaner
526 */
527static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
528 u32 size_bytes)
529{
530 u32 i = 0;
531 u32 *ptr = (__force u32 *) mem_addr_start;
532 u16 *buf16;
533
534 if (unlikely(!ptr || !buf))
535 return 0;
536
537 /* shortcut for extremely often used cases */
538 switch (size_bytes) {
539 case 2: /* 2 bytes */
540 buf16 = (u16 *) buf;
541 writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
542 return 2;
543 break;
544 case 1: /*
545 * also needs to write 4 bytes in this case
546 * so falling through..
547 */
548 case 4: /* 4 bytes */
549 writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
550 return 4;
551 break;
552 }
553
554 while (i < size_bytes) {
555 if (size_bytes - i == 2) {
556 /* 2 bytes */
557 buf16 = (u16 *) buf;
558 writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
559 i += 2;
560 } else {
561 /* 4 bytes */
562 writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
563 i += 4;
564 }
565 buf++;
566 ptr++;
567 }
568 return i;
569}
570
571/* Setup pointers to different channels and also setup buffer sizes. */
572static void setup_memory(struct nozomi *dc)
573{
574 void __iomem *offset = dc->base_addr + dc->config_table.dl_start;
575 /* The length reported is including the length field of 4 bytes,
576 * hence subtract with 4.
577 */
578 const u16 buff_offset = 4;
579
580 /* Modem port dl configuration */
581 dc->port[PORT_MDM].dl_addr[CH_A] = offset;
582 dc->port[PORT_MDM].dl_addr[CH_B] =
583 (offset += dc->config_table.dl_mdm_len1);
584 dc->port[PORT_MDM].dl_size[CH_A] =
585 dc->config_table.dl_mdm_len1 - buff_offset;
586 dc->port[PORT_MDM].dl_size[CH_B] =
587 dc->config_table.dl_mdm_len2 - buff_offset;
588
589 /* Diag port dl configuration */
590 dc->port[PORT_DIAG].dl_addr[CH_A] =
591 (offset += dc->config_table.dl_mdm_len2);
592 dc->port[PORT_DIAG].dl_size[CH_A] =
593 dc->config_table.dl_diag_len1 - buff_offset;
594 dc->port[PORT_DIAG].dl_addr[CH_B] =
595 (offset += dc->config_table.dl_diag_len1);
596 dc->port[PORT_DIAG].dl_size[CH_B] =
597 dc->config_table.dl_diag_len2 - buff_offset;
598
599 /* App1 port dl configuration */
600 dc->port[PORT_APP1].dl_addr[CH_A] =
601 (offset += dc->config_table.dl_diag_len2);
602 dc->port[PORT_APP1].dl_size[CH_A] =
603 dc->config_table.dl_app1_len - buff_offset;
604
605 /* App2 port dl configuration */
606 dc->port[PORT_APP2].dl_addr[CH_A] =
607 (offset += dc->config_table.dl_app1_len);
608 dc->port[PORT_APP2].dl_size[CH_A] =
609 dc->config_table.dl_app2_len - buff_offset;
610
611 /* Ctrl dl configuration */
612 dc->port[PORT_CTRL].dl_addr[CH_A] =
613 (offset += dc->config_table.dl_app2_len);
614 dc->port[PORT_CTRL].dl_size[CH_A] =
615 dc->config_table.dl_ctrl_len - buff_offset;
616
617 offset = dc->base_addr + dc->config_table.ul_start;
618
619 /* Modem Port ul configuration */
620 dc->port[PORT_MDM].ul_addr[CH_A] = offset;
621 dc->port[PORT_MDM].ul_size[CH_A] =
622 dc->config_table.ul_mdm_len1 - buff_offset;
623 dc->port[PORT_MDM].ul_addr[CH_B] =
624 (offset += dc->config_table.ul_mdm_len1);
625 dc->port[PORT_MDM].ul_size[CH_B] =
626 dc->config_table.ul_mdm_len2 - buff_offset;
627
628 /* Diag port ul configuration */
629 dc->port[PORT_DIAG].ul_addr[CH_A] =
630 (offset += dc->config_table.ul_mdm_len2);
631 dc->port[PORT_DIAG].ul_size[CH_A] =
632 dc->config_table.ul_diag_len - buff_offset;
633
634 /* App1 port ul configuration */
635 dc->port[PORT_APP1].ul_addr[CH_A] =
636 (offset += dc->config_table.ul_diag_len);
637 dc->port[PORT_APP1].ul_size[CH_A] =
638 dc->config_table.ul_app1_len - buff_offset;
639
640 /* App2 port ul configuration */
641 dc->port[PORT_APP2].ul_addr[CH_A] =
642 (offset += dc->config_table.ul_app1_len);
643 dc->port[PORT_APP2].ul_size[CH_A] =
644 dc->config_table.ul_app2_len - buff_offset;
645
646 /* Ctrl ul configuration */
647 dc->port[PORT_CTRL].ul_addr[CH_A] =
648 (offset += dc->config_table.ul_app2_len);
649 dc->port[PORT_CTRL].ul_size[CH_A] =
650 dc->config_table.ul_ctrl_len - buff_offset;
651}
652
653/* Dump config table under initalization phase */
654#ifdef DEBUG
655static void dump_table(const struct nozomi *dc)
656{
657 DBG3("signature: 0x%08X", dc->config_table.signature);
658 DBG3("version: 0x%04X", dc->config_table.version);
659 DBG3("product_information: 0x%04X", \
660 dc->config_table.product_information);
661 DBG3("toggle enabled: %d", dc->config_table.toggle.enabled);
662 DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul);
663 DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl);
664 DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl);
665
666 DBG3("dl_start: 0x%04X", dc->config_table.dl_start);
667 DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1,
668 dc->config_table.dl_mdm_len1);
669 DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2,
670 dc->config_table.dl_mdm_len2);
671 DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1,
672 dc->config_table.dl_diag_len1);
673 DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2,
674 dc->config_table.dl_diag_len2);
675 DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len,
676 dc->config_table.dl_app1_len);
677 DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len,
678 dc->config_table.dl_app2_len);
679 DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len,
680 dc->config_table.dl_ctrl_len);
681 DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start,
682 dc->config_table.ul_start);
683 DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1,
684 dc->config_table.ul_mdm_len1);
685 DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2,
686 dc->config_table.ul_mdm_len2);
687 DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len,
688 dc->config_table.ul_diag_len);
689 DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len,
690 dc->config_table.ul_app1_len);
691 DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len,
692 dc->config_table.ul_app2_len);
693 DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len,
694 dc->config_table.ul_ctrl_len);
695}
696#else
697static __inline__ void dump_table(const struct nozomi *dc) { }
698#endif
699
700/*
701 * Read configuration table from card under intalization phase
702 * Returns 1 if ok, else 0
703 */
704static int nozomi_read_config_table(struct nozomi *dc)
705{
706 read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
707 sizeof(struct config_table));
708
709 if (dc->config_table.signature != CONFIG_MAGIC) {
710 dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
711 dc->config_table.signature, CONFIG_MAGIC);
712 return 0;
713 }
714
715 if ((dc->config_table.version == 0)
716 || (dc->config_table.toggle.enabled == TOGGLE_VALID)) {
717 int i;
718 DBG1("Second phase, configuring card");
719
720 setup_memory(dc);
721
722 dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul;
723 dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl;
724 dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl;
725 DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d",
726 dc->port[PORT_MDM].toggle_ul,
727 dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl);
728
729 dump_table(dc);
730
731 for (i = PORT_MDM; i < MAX_PORT; i++) {
732 dc->port[i].fifo_ul =
733 kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
734 memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
735 memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
736 }
737
738 /* Enable control channel */
739 dc->last_ier = dc->last_ier | CTRL_DL;
740 writew(dc->last_ier, dc->reg_ier);
741
742 dev_info(&dc->pdev->dev, "Initialization OK!\n");
743 return 1;
744 }
745
746 if ((dc->config_table.version > 0)
747 && (dc->config_table.toggle.enabled != TOGGLE_VALID)) {
748 u32 offset = 0;
749 DBG1("First phase: pushing upload buffers, clearing download");
750
751 dev_info(&dc->pdev->dev, "Version of card: %d\n",
752 dc->config_table.version);
753
754 /* Here we should disable all I/O over F32. */
755 setup_memory(dc);
756
757 /*
758 * We should send ALL channel pair tokens back along
759 * with reset token
760 */
761
762 /* push upload modem buffers */
763 write_mem32(dc->port[PORT_MDM].ul_addr[CH_A],
764 (u32 *) &offset, 4);
765 write_mem32(dc->port[PORT_MDM].ul_addr[CH_B],
766 (u32 *) &offset, 4);
767
768 writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr);
769
770 DBG1("First phase done");
771 }
772
773 return 1;
774}
775
776/* Enable uplink interrupts */
777static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
778{
779 u16 mask[NOZOMI_MAX_PORTS] = \
780 {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
781
782 if (port < NOZOMI_MAX_PORTS) {
783 dc->last_ier |= mask[port];
784 writew(dc->last_ier, dc->reg_ier);
785 } else {
786 dev_err(&dc->pdev->dev, "Called with wrong port?\n");
787 }
788}
789
790/* Disable uplink interrupts */
791static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
792{
793 u16 mask[NOZOMI_MAX_PORTS] = \
794 {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
795
796 if (port < NOZOMI_MAX_PORTS) {
797 dc->last_ier &= mask[port];
798 writew(dc->last_ier, dc->reg_ier);
799 } else {
800 dev_err(&dc->pdev->dev, "Called with wrong port?\n");
801 }
802}
803
804/* Enable downlink interrupts */
805static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
806{
807 u16 mask[NOZOMI_MAX_PORTS] = \
808 {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
809
810 if (port < NOZOMI_MAX_PORTS) {
811 dc->last_ier |= mask[port];
812 writew(dc->last_ier, dc->reg_ier);
813 } else {
814 dev_err(&dc->pdev->dev, "Called with wrong port?\n");
815 }
816}
817
818/* Disable downlink interrupts */
819static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
820{
821 u16 mask[NOZOMI_MAX_PORTS] = \
822 {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
823
824 if (port < NOZOMI_MAX_PORTS) {
825 dc->last_ier &= mask[port];
826 writew(dc->last_ier, dc->reg_ier);
827 } else {
828 dev_err(&dc->pdev->dev, "Called with wrong port?\n");
829 }
830}
831
832/*
833 * Return 1 - send buffer to card and ack.
834 * Return 0 - don't ack, don't send buffer to card.
835 */
836static int send_data(enum port_type index, struct nozomi *dc)
837{
838 u32 size = 0;
839 struct port *port = &dc->port[index];
840 u8 toggle = port->toggle_ul;
841 void __iomem *addr = port->ul_addr[toggle];
842 u32 ul_size = port->ul_size[toggle];
843 struct tty_struct *tty = port->tty;
844
845 /* Get data from tty and place in buf for now */
846 size = __kfifo_get(port->fifo_ul, dc->send_buf,
847 ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
848
849 if (size == 0) {
850 DBG4("No more data to send, disable link:");
851 return 0;
852 }
853
854 /* DUMP(buf, size); */
855
856 /* Write length + data */
857 write_mem32(addr, (u32 *) &size, 4);
858 write_mem32(addr + 4, (u32 *) dc->send_buf, size);
859
860 if (tty)
861 tty_wakeup(tty);
862
863 return 1;
864}
865
866/* If all data has been read, return 1, else 0 */
867static int receive_data(enum port_type index, struct nozomi *dc)
868{
869 u8 buf[RECEIVE_BUF_MAX] = { 0 };
870 int size;
871 u32 offset = 4;
872 struct port *port = &dc->port[index];
873 void __iomem *addr = port->dl_addr[port->toggle_dl];
874 struct tty_struct *tty = port->tty;
875 int i;
876
877 if (unlikely(!tty)) {
878 DBG1("tty not open for port: %d?", index);
879 return 1;
880 }
881
882 read_mem32((u32 *) &size, addr, 4);
883 /* DBG1( "%d bytes port: %d", size, index); */
884
885 if (test_bit(TTY_THROTTLED, &tty->flags)) {
886 DBG1("No room in tty, don't read data, don't ack interrupt, "
887 "disable interrupt");
888
889 /* disable interrupt in downlink... */
890 disable_transmit_dl(index, dc);
891 return 0;
892 }
893
894 if (unlikely(size == 0)) {
895 dev_err(&dc->pdev->dev, "size == 0?\n");
896 return 1;
897 }
898
899 tty_buffer_request_room(tty, size);
900
901 while (size > 0) {
902 read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
903
904 if (size == 1) {
905 tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
906 size = 0;
907 } else if (size < RECEIVE_BUF_MAX) {
908 size -= tty_insert_flip_string(tty, (char *) buf, size);
909 } else {
910 i = tty_insert_flip_string(tty, \
911 (char *) buf, RECEIVE_BUF_MAX);
912 size -= i;
913 offset += i;
914 }
915 }
916
917 set_bit(index, &dc->flip);
918
919 return 1;
920}
921
922/* Debug for interrupts */
923#ifdef DEBUG
924static char *interrupt2str(u16 interrupt)
925{
926 static char buf[TMP_BUF_MAX];
927 char *p = buf;
928
929 interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
930 interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
931 "MDM_DL2 ") : NULL;
932
933 interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
934 "MDM_UL1 ") : NULL;
935 interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
936 "MDM_UL2 ") : NULL;
937
938 interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
939 "DIAG_DL1 ") : NULL;
940 interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
941 "DIAG_DL2 ") : NULL;
942
943 interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
944 "DIAG_UL ") : NULL;
945
946 interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
947 "APP1_DL ") : NULL;
948 interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
949 "APP2_DL ") : NULL;
950
951 interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
952 "APP1_UL ") : NULL;
953 interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
954 "APP2_UL ") : NULL;
955
956 interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
957 "CTRL_DL ") : NULL;
958 interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
959 "CTRL_UL ") : NULL;
960
961 interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
962 "RESET ") : NULL;
963
964 return buf;
965}
966#endif
967
968/*
969 * Receive flow control
970 * Return 1 - If ok, else 0
971 */
972static int receive_flow_control(struct nozomi *dc)
973{
974 enum port_type port = PORT_MDM;
975 struct ctrl_dl ctrl_dl;
976 struct ctrl_dl old_ctrl;
977 u16 enable_ier = 0;
978
979 read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2);
980
981 switch (ctrl_dl.port) {
982 case CTRL_CMD:
983 DBG1("The Base Band sends this value as a response to a "
984 "request for IMSI detach sent over the control "
985 "channel uplink (see section 7.6.1).");
986 break;
987 case CTRL_MDM:
988 port = PORT_MDM;
989 enable_ier = MDM_DL;
990 break;
991 case CTRL_DIAG:
992 port = PORT_DIAG;
993 enable_ier = DIAG_DL;
994 break;
995 case CTRL_APP1:
996 port = PORT_APP1;
997 enable_ier = APP1_DL;
998 break;
999 case CTRL_APP2:
1000 port = PORT_APP2;
1001 enable_ier = APP2_DL;
1002 break;
1003 default:
1004 dev_err(&dc->pdev->dev,
1005 "ERROR: flow control received for non-existing port\n");
1006 return 0;
1007 };
1008
1009 DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
1010 *((u16 *)&ctrl_dl));
1011
1012 old_ctrl = dc->port[port].ctrl_dl;
1013 dc->port[port].ctrl_dl = ctrl_dl;
1014
1015 if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) {
1016 DBG1("Disable interrupt (0x%04X) on port: %d",
1017 enable_ier, port);
1018 disable_transmit_ul(port, dc);
1019
1020 } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
1021
1022 if (__kfifo_len(dc->port[port].fifo_ul)) {
1023 DBG1("Enable interrupt (0x%04X) on port: %d",
1024 enable_ier, port);
1025 DBG1("Data in buffer [%d], enable transmit! ",
1026 __kfifo_len(dc->port[port].fifo_ul));
1027 enable_transmit_ul(port, dc);
1028 } else {
1029 DBG1("No data in buffer...");
1030 }
1031 }
1032
1033 if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) {
1034 DBG1(" No change in mctrl");
1035 return 1;
1036 }
1037 /* Update statistics */
1038 if (old_ctrl.CTS != ctrl_dl.CTS)
1039 dc->port[port].tty_icount.cts++;
1040 if (old_ctrl.DSR != ctrl_dl.DSR)
1041 dc->port[port].tty_icount.dsr++;
1042 if (old_ctrl.RI != ctrl_dl.RI)
1043 dc->port[port].tty_icount.rng++;
1044 if (old_ctrl.DCD != ctrl_dl.DCD)
1045 dc->port[port].tty_icount.dcd++;
1046
1047 wake_up_interruptible(&dc->port[port].tty_wait);
1048
1049 DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)",
1050 port,
1051 dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts,
1052 dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr);
1053
1054 return 1;
1055}
1056
1057static enum ctrl_port_type port2ctrl(enum port_type port,
1058 const struct nozomi *dc)
1059{
1060 switch (port) {
1061 case PORT_MDM:
1062 return CTRL_MDM;
1063 case PORT_DIAG:
1064 return CTRL_DIAG;
1065 case PORT_APP1:
1066 return CTRL_APP1;
1067 case PORT_APP2:
1068 return CTRL_APP2;
1069 default:
1070 dev_err(&dc->pdev->dev,
1071 "ERROR: send flow control " \
1072 "received for non-existing port\n");
1073 };
1074 return CTRL_ERROR;
1075}
1076
1077/*
1078 * Send flow control, can only update one channel at a time
1079 * Return 0 - If we have updated all flow control
1080 * Return 1 - If we need to update more flow control, ack current enable more
1081 */
1082static int send_flow_control(struct nozomi *dc)
1083{
1084 u32 i, more_flow_control_to_be_updated = 0;
1085 u16 *ctrl;
1086
1087 for (i = PORT_MDM; i < MAX_PORT; i++) {
1088 if (dc->port[i].update_flow_control) {
1089 if (more_flow_control_to_be_updated) {
1090 /* We have more flow control to be updated */
1091 return 1;
1092 }
1093 dc->port[i].ctrl_ul.port = port2ctrl(i, dc);
1094 ctrl = (u16 *)&dc->port[i].ctrl_ul;
1095 write_mem32(dc->port[PORT_CTRL].ul_addr[0], \
1096 (u32 *) ctrl, 2);
1097 dc->port[i].update_flow_control = 0;
1098 more_flow_control_to_be_updated = 1;
1099 }
1100 }
1101 return 0;
1102}
1103
1104/*
1105 * Handle donlink data, ports that are handled are modem and diagnostics
1106 * Return 1 - ok
1107 * Return 0 - toggle fields are out of sync
1108 */
1109static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle,
1110 u16 read_iir, u16 mask1, u16 mask2)
1111{
1112 if (*toggle == 0 && read_iir & mask1) {
1113 if (receive_data(port, dc)) {
1114 writew(mask1, dc->reg_fcr);
1115 *toggle = !(*toggle);
1116 }
1117
1118 if (read_iir & mask2) {
1119 if (receive_data(port, dc)) {
1120 writew(mask2, dc->reg_fcr);
1121 *toggle = !(*toggle);
1122 }
1123 }
1124 } else if (*toggle == 1 && read_iir & mask2) {
1125 if (receive_data(port, dc)) {
1126 writew(mask2, dc->reg_fcr);
1127 *toggle = !(*toggle);
1128 }
1129
1130 if (read_iir & mask1) {
1131 if (receive_data(port, dc)) {
1132 writew(mask1, dc->reg_fcr);
1133 *toggle = !(*toggle);
1134 }
1135 }
1136 } else {
1137 dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n",
1138 *toggle);
1139 return 0;
1140 }
1141 return 1;
1142}
1143
1144/*
1145 * Handle uplink data, this is currently for the modem port
1146 * Return 1 - ok
1147 * Return 0 - toggle field are out of sync
1148 */
1149static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir)
1150{
1151 u8 *toggle = &(dc->port[port].toggle_ul);
1152
1153 if (*toggle == 0 && read_iir & MDM_UL1) {
1154 dc->last_ier &= ~MDM_UL;
1155 writew(dc->last_ier, dc->reg_ier);
1156 if (send_data(port, dc)) {
1157 writew(MDM_UL1, dc->reg_fcr);
1158 dc->last_ier = dc->last_ier | MDM_UL;
1159 writew(dc->last_ier, dc->reg_ier);
1160 *toggle = !*toggle;
1161 }
1162
1163 if (read_iir & MDM_UL2) {
1164 dc->last_ier &= ~MDM_UL;
1165 writew(dc->last_ier, dc->reg_ier);
1166 if (send_data(port, dc)) {
1167 writew(MDM_UL2, dc->reg_fcr);
1168 dc->last_ier = dc->last_ier | MDM_UL;
1169 writew(dc->last_ier, dc->reg_ier);
1170 *toggle = !*toggle;
1171 }
1172 }
1173
1174 } else if (*toggle == 1 && read_iir & MDM_UL2) {
1175 dc->last_ier &= ~MDM_UL;
1176 writew(dc->last_ier, dc->reg_ier);
1177 if (send_data(port, dc)) {
1178 writew(MDM_UL2, dc->reg_fcr);
1179 dc->last_ier = dc->last_ier | MDM_UL;
1180 writew(dc->last_ier, dc->reg_ier);
1181 *toggle = !*toggle;
1182 }
1183
1184 if (read_iir & MDM_UL1) {
1185 dc->last_ier &= ~MDM_UL;
1186 writew(dc->last_ier, dc->reg_ier);
1187 if (send_data(port, dc)) {
1188 writew(MDM_UL1, dc->reg_fcr);
1189 dc->last_ier = dc->last_ier | MDM_UL;
1190 writew(dc->last_ier, dc->reg_ier);
1191 *toggle = !*toggle;
1192 }
1193 }
1194 } else {
1195 writew(read_iir & MDM_UL, dc->reg_fcr);
1196 dev_err(&dc->pdev->dev, "port out of sync!\n");
1197 return 0;
1198 }
1199 return 1;
1200}
1201
1202static irqreturn_t interrupt_handler(int irq, void *dev_id)
1203{
1204 struct nozomi *dc = dev_id;
1205 unsigned int a;
1206 u16 read_iir;
1207
1208 if (!dc)
1209 return IRQ_NONE;
1210
1211 spin_lock(&dc->spin_mutex);
1212 read_iir = readw(dc->reg_iir);
1213
1214 /* Card removed */
1215 if (read_iir == (u16)-1)
1216 goto none;
1217 /*
1218 * Just handle interrupt enabled in IER
1219 * (by masking with dc->last_ier)
1220 */
1221 read_iir &= dc->last_ier;
1222
1223 if (read_iir == 0)
1224 goto none;
1225
1226
1227 DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
1228 dc->last_ier);
1229
1230 if (read_iir & RESET) {
1231 if (unlikely(!nozomi_read_config_table(dc))) {
1232 dc->last_ier = 0x0;
1233 writew(dc->last_ier, dc->reg_ier);
1234 dev_err(&dc->pdev->dev, "Could not read status from "
1235 "card, we should disable interface\n");
1236 } else {
1237 writew(RESET, dc->reg_fcr);
1238 }
1239 /* No more useful info if this was the reset interrupt. */
1240 goto exit_handler;
1241 }
1242 if (read_iir & CTRL_UL) {
1243 DBG1("CTRL_UL");
1244 dc->last_ier &= ~CTRL_UL;
1245 writew(dc->last_ier, dc->reg_ier);
1246 if (send_flow_control(dc)) {
1247 writew(CTRL_UL, dc->reg_fcr);
1248 dc->last_ier = dc->last_ier | CTRL_UL;
1249 writew(dc->last_ier, dc->reg_ier);
1250 }
1251 }
1252 if (read_iir & CTRL_DL) {
1253 receive_flow_control(dc);
1254 writew(CTRL_DL, dc->reg_fcr);
1255 }
1256 if (read_iir & MDM_DL) {
1257 if (!handle_data_dl(dc, PORT_MDM,
1258 &(dc->port[PORT_MDM].toggle_dl), read_iir,
1259 MDM_DL1, MDM_DL2)) {
1260 dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
1261 goto exit_handler;
1262 }
1263 }
1264 if (read_iir & MDM_UL) {
1265 if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
1266 dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
1267 goto exit_handler;
1268 }
1269 }
1270 if (read_iir & DIAG_DL) {
1271 if (!handle_data_dl(dc, PORT_DIAG,
1272 &(dc->port[PORT_DIAG].toggle_dl), read_iir,
1273 DIAG_DL1, DIAG_DL2)) {
1274 dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
1275 goto exit_handler;
1276 }
1277 }
1278 if (read_iir & DIAG_UL) {
1279 dc->last_ier &= ~DIAG_UL;
1280 writew(dc->last_ier, dc->reg_ier);
1281 if (send_data(PORT_DIAG, dc)) {
1282 writew(DIAG_UL, dc->reg_fcr);
1283 dc->last_ier = dc->last_ier | DIAG_UL;
1284 writew(dc->last_ier, dc->reg_ier);
1285 }
1286 }
1287 if (read_iir & APP1_DL) {
1288 if (receive_data(PORT_APP1, dc))
1289 writew(APP1_DL, dc->reg_fcr);
1290 }
1291 if (read_iir & APP1_UL) {
1292 dc->last_ier &= ~APP1_UL;
1293 writew(dc->last_ier, dc->reg_ier);
1294 if (send_data(PORT_APP1, dc)) {
1295 writew(APP1_UL, dc->reg_fcr);
1296 dc->last_ier = dc->last_ier | APP1_UL;
1297 writew(dc->last_ier, dc->reg_ier);
1298 }
1299 }
1300 if (read_iir & APP2_DL) {
1301 if (receive_data(PORT_APP2, dc))
1302 writew(APP2_DL, dc->reg_fcr);
1303 }
1304 if (read_iir & APP2_UL) {
1305 dc->last_ier &= ~APP2_UL;
1306 writew(dc->last_ier, dc->reg_ier);
1307 if (send_data(PORT_APP2, dc)) {
1308 writew(APP2_UL, dc->reg_fcr);
1309 dc->last_ier = dc->last_ier | APP2_UL;
1310 writew(dc->last_ier, dc->reg_ier);
1311 }
1312 }
1313
1314exit_handler:
1315 spin_unlock(&dc->spin_mutex);
1316 for (a = 0; a < NOZOMI_MAX_PORTS; a++)
1317 if (test_and_clear_bit(a, &dc->flip))
1318 tty_flip_buffer_push(dc->port[a].tty);
1319 return IRQ_HANDLED;
1320none:
1321 spin_unlock(&dc->spin_mutex);
1322 return IRQ_NONE;
1323}
1324
1325static void nozomi_get_card_type(struct nozomi *dc)
1326{
1327 int i;
1328 u32 size = 0;
1329
1330 for (i = 0; i < 6; i++)
1331 size += pci_resource_len(dc->pdev, i);
1332
1333 /* Assume card type F32_8 if no match */
1334 dc->card_type = size == 2048 ? F32_2 : F32_8;
1335
1336 dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type);
1337}
1338
1339static void nozomi_setup_private_data(struct nozomi *dc)
1340{
1341 void __iomem *offset = dc->base_addr + dc->card_type / 2;
1342 unsigned int i;
1343
1344 dc->reg_fcr = (void __iomem *)(offset + R_FCR);
1345 dc->reg_iir = (void __iomem *)(offset + R_IIR);
1346 dc->reg_ier = (void __iomem *)(offset + R_IER);
1347 dc->last_ier = 0;
1348 dc->flip = 0;
1349
1350 dc->port[PORT_MDM].token_dl = MDM_DL;
1351 dc->port[PORT_DIAG].token_dl = DIAG_DL;
1352 dc->port[PORT_APP1].token_dl = APP1_DL;
1353 dc->port[PORT_APP2].token_dl = APP2_DL;
1354
1355 for (i = 0; i < MAX_PORT; i++)
1356 init_waitqueue_head(&dc->port[i].tty_wait);
1357}
1358
1359static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
1360 char *buf)
1361{
1362 struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
1363
1364 return sprintf(buf, "%d\n", dc->card_type);
1365}
1366static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
1367
1368static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
1369 char *buf)
1370{
1371 struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
1372
1373 return sprintf(buf, "%u\n", dc->open_ttys);
1374}
1375static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
1376
1377static void make_sysfs_files(struct nozomi *dc)
1378{
1379 if (device_create_file(&dc->pdev->dev, &dev_attr_card_type))
1380 dev_err(&dc->pdev->dev,
1381 "Could not create sysfs file for card_type\n");
1382 if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys))
1383 dev_err(&dc->pdev->dev,
1384 "Could not create sysfs file for open_ttys\n");
1385}
1386
1387static void remove_sysfs_files(struct nozomi *dc)
1388{
1389 device_remove_file(&dc->pdev->dev, &dev_attr_card_type);
1390 device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys);
1391}
1392
1393/* Allocate memory for one device */
1394static int __devinit nozomi_card_init(struct pci_dev *pdev,
1395 const struct pci_device_id *ent)
1396{
1397 resource_size_t start;
1398 int ret;
1399 struct nozomi *dc = NULL;
1400 int ndev_idx;
1401 int i;
1402
1403 dev_dbg(&pdev->dev, "Init, new card found\n");
1404
1405 for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++)
1406 if (!ndevs[ndev_idx])
1407 break;
1408
1409 if (ndev_idx >= ARRAY_SIZE(ndevs)) {
1410 dev_err(&pdev->dev, "no free tty range for this card left\n");
1411 ret = -EIO;
1412 goto err;
1413 }
1414
1415 dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL);
1416 if (unlikely(!dc)) {
1417 dev_err(&pdev->dev, "Could not allocate memory\n");
1418 ret = -ENOMEM;
1419 goto err_free;
1420 }
1421
1422 dc->pdev = pdev;
1423
1424 /* Find out what card type it is */
1425 nozomi_get_card_type(dc);
1426
1427 ret = pci_enable_device(dc->pdev);
1428 if (ret) {
1429 dev_err(&pdev->dev, "Failed to enable PCI Device\n");
1430 goto err_free;
1431 }
1432
1433 start = pci_resource_start(dc->pdev, 0);
1434 if (start == 0) {
1435 dev_err(&pdev->dev, "No I/O address for card detected\n");
1436 ret = -ENODEV;
1437 goto err_disable_device;
1438 }
1439
1440 ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
1441 if (ret) {
1442 dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
1443 (int) /* nozomi_private.io_addr */ 0);
1444 goto err_disable_device;
1445 }
1446
1447 dc->base_addr = ioremap(start, dc->card_type);
1448 if (!dc->base_addr) {
1449 dev_err(&pdev->dev, "Unable to map card MMIO\n");
1450 ret = -ENODEV;
1451 goto err_rel_regs;
1452 }
1453
1454 dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL);
1455 if (!dc->send_buf) {
1456 dev_err(&pdev->dev, "Could not allocate send buffer?\n");
1457 ret = -ENOMEM;
1458 goto err_free_sbuf;
1459 }
1460
1461 spin_lock_init(&dc->spin_mutex);
1462
1463 nozomi_setup_private_data(dc);
1464
1465 /* Disable all interrupts */
1466 dc->last_ier = 0;
1467 writew(dc->last_ier, dc->reg_ier);
1468
1469 ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED,
1470 NOZOMI_NAME, dc);
1471 if (unlikely(ret)) {
1472 dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
1473 goto err_free_sbuf;
1474 }
1475
1476 DBG1("base_addr: %p", dc->base_addr);
1477
1478 make_sysfs_files(dc);
1479
1480 dc->index_start = ndev_idx * MAX_PORT;
1481 ndevs[ndev_idx] = dc;
1482
1483 for (i = 0; i < MAX_PORT; i++) {
1484 mutex_init(&dc->port[i].tty_sem);
1485 dc->port[i].tty_open_count = 0;
1486 dc->port[i].tty = NULL;
1487 tty_register_device(ntty_driver, dc->index_start + i,
1488 &pdev->dev);
1489 }
1490
1491 /* Enable RESET interrupt. */
1492 dc->last_ier = RESET;
1493 writew(dc->last_ier, dc->reg_ier);
1494
1495 pci_set_drvdata(pdev, dc);
1496
1497 return 0;
1498
1499err_free_sbuf:
1500 kfree(dc->send_buf);
1501 iounmap(dc->base_addr);
1502err_rel_regs:
1503 pci_release_regions(pdev);
1504err_disable_device:
1505 pci_disable_device(pdev);
1506err_free:
1507 kfree(dc);
1508err:
1509 return ret;
1510}
1511
1512static void __devexit tty_exit(struct nozomi *dc)
1513{
1514 unsigned int i;
1515
1516 DBG1(" ");
1517
1518 flush_scheduled_work();
1519
1520 for (i = 0; i < MAX_PORT; ++i)
1521 if (dc->port[i].tty && \
1522 list_empty(&dc->port[i].tty->hangup_work.entry))
1523 tty_hangup(dc->port[i].tty);
1524
1525 while (dc->open_ttys)
1526 msleep(1);
1527
1528 for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
1529 tty_unregister_device(ntty_driver, i);
1530}
1531
1532/* Deallocate memory for one device */
1533static void __devexit nozomi_card_exit(struct pci_dev *pdev)
1534{
1535 int i;
1536 struct ctrl_ul ctrl;
1537 struct nozomi *dc = pci_get_drvdata(pdev);
1538
1539 /* Disable all interrupts */
1540 dc->last_ier = 0;
1541 writew(dc->last_ier, dc->reg_ier);
1542
1543 tty_exit(dc);
1544
1545 /* Send 0x0001, command card to resend the reset token. */
1546 /* This is to get the reset when the module is reloaded. */
1547 ctrl.port = 0x00;
1548 ctrl.reserved = 0;
1549 ctrl.RTS = 0;
1550 ctrl.DTR = 1;
1551 DBG1("sending flow control 0x%04X", *((u16 *)&ctrl));
1552
1553 /* Setup dc->reg addresses to we can use defines here */
1554 write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2);
1555 writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */
1556
1557 remove_sysfs_files(dc);
1558
1559 free_irq(pdev->irq, dc);
1560
1561 for (i = 0; i < MAX_PORT; i++)
1562 if (dc->port[i].fifo_ul)
1563 kfifo_free(dc->port[i].fifo_ul);
1564
1565 kfree(dc->send_buf);
1566
1567 iounmap(dc->base_addr);
1568
1569 pci_release_regions(pdev);
1570
1571 pci_disable_device(pdev);
1572
1573 ndevs[dc->index_start / MAX_PORT] = NULL;
1574
1575 kfree(dc);
1576}
1577
1578static void set_rts(const struct tty_struct *tty, int rts)
1579{
1580 struct port *port = get_port_by_tty(tty);
1581
1582 port->ctrl_ul.RTS = rts;
1583 port->update_flow_control = 1;
1584 enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
1585}
1586
1587static void set_dtr(const struct tty_struct *tty, int dtr)
1588{
1589 struct port *port = get_port_by_tty(tty);
1590
1591 DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr);
1592
1593 port->ctrl_ul.DTR = dtr;
1594 port->update_flow_control = 1;
1595 enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
1596}
1597
1598/*
1599 * ----------------------------------------------------------------------------
1600 * TTY code
1601 * ----------------------------------------------------------------------------
1602 */
1603
1604/* Called when the userspace process opens the tty, /dev/noz*. */
1605static int ntty_open(struct tty_struct *tty, struct file *file)
1606{
1607 struct port *port = get_port_by_tty(tty);
1608 struct nozomi *dc = get_dc_by_tty(tty);
1609 unsigned long flags;
1610
1611 if (!port || !dc)
1612 return -ENODEV;
1613
1614 if (mutex_lock_interruptible(&port->tty_sem))
1615 return -ERESTARTSYS;
1616
1617 port->tty_open_count++;
1618 dc->open_ttys++;
1619
1620 /* Enable interrupt downlink for channel */
1621 if (port->tty_open_count == 1) {
1622 tty->low_latency = 1;
1623 tty->driver_data = port;
1624 port->tty = tty;
1625 DBG1("open: %d", port->token_dl);
1626 spin_lock_irqsave(&dc->spin_mutex, flags);
1627 dc->last_ier = dc->last_ier | port->token_dl;
1628 writew(dc->last_ier, dc->reg_ier);
1629 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1630 }
1631
1632 mutex_unlock(&port->tty_sem);
1633
1634 return 0;
1635}
1636
1637/* Called when the userspace process close the tty, /dev/noz*. */
1638static void ntty_close(struct tty_struct *tty, struct file *file)
1639{
1640 struct nozomi *dc = get_dc_by_tty(tty);
1641 struct port *port = tty->driver_data;
1642 unsigned long flags;
1643
1644 if (!dc || !port)
1645 return;
1646
1647 if (mutex_lock_interruptible(&port->tty_sem))
1648 return;
1649
1650 if (!port->tty_open_count)
1651 goto exit;
1652
1653 dc->open_ttys--;
1654 port->tty_open_count--;
1655
1656 if (port->tty_open_count == 0) {
1657 DBG1("close: %d", port->token_dl);
1658 spin_lock_irqsave(&dc->spin_mutex, flags);
1659 dc->last_ier &= ~(port->token_dl);
1660 writew(dc->last_ier, dc->reg_ier);
1661 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1662 }
1663
1664exit:
1665 mutex_unlock(&port->tty_sem);
1666}
1667
1668/*
1669 * called when the userspace process writes to the tty (/dev/noz*).
1670 * Data is inserted into a fifo, which is then read and transfered to the modem.
1671 */
1672static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
1673 int count)
1674{
1675 int rval = -EINVAL;
1676 struct nozomi *dc = get_dc_by_tty(tty);
1677 struct port *port = tty->driver_data;
1678 unsigned long flags;
1679
1680 /* DBG1( "WRITEx: %d, index = %d", count, index); */
1681
1682 if (!dc || !port)
1683 return -ENODEV;
1684
1685 if (unlikely(!mutex_trylock(&port->tty_sem))) {
1686 /*
1687 * must test lock as tty layer wraps calls
1688 * to this function with BKL
1689 */
1690 dev_err(&dc->pdev->dev, "Would have deadlocked - "
1691 "return EAGAIN\n");
1692 return -EAGAIN;
1693 }
1694
1695 if (unlikely(!port->tty_open_count)) {
1696 DBG1(" ");
1697 goto exit;
1698 }
1699
1700 rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
1701
1702 /* notify card */
1703 if (unlikely(dc == NULL)) {
1704 DBG1("No device context?");
1705 goto exit;
1706 }
1707
1708 spin_lock_irqsave(&dc->spin_mutex, flags);
1709 /* CTS is only valid on the modem channel */
1710 if (port == &(dc->port[PORT_MDM])) {
1711 if (port->ctrl_dl.CTS) {
1712 DBG4("Enable interrupt");
1713 enable_transmit_ul(tty->index % MAX_PORT, dc);
1714 } else {
1715 dev_err(&dc->pdev->dev,
1716 "CTS not active on modem port?\n");
1717 }
1718 } else {
1719 enable_transmit_ul(tty->index % MAX_PORT, dc);
1720 }
1721 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1722
1723exit:
1724 mutex_unlock(&port->tty_sem);
1725 return rval;
1726}
1727
1728/*
1729 * Calculate how much is left in device
1730 * This method is called by the upper tty layer.
1731 * #according to sources N_TTY.c it expects a value >= 0 and
1732 * does not check for negative values.
1733 */
1734static int ntty_write_room(struct tty_struct *tty)
1735{
1736 struct port *port = tty->driver_data;
1737 int room = 0;
1738 struct nozomi *dc = get_dc_by_tty(tty);
1739
1740 if (!dc || !port)
1741 return 0;
1742 if (!mutex_trylock(&port->tty_sem))
1743 return 0;
1744
1745 if (!port->tty_open_count)
1746 goto exit;
1747
1748 room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
1749
1750exit:
1751 mutex_unlock(&port->tty_sem);
1752 return room;
1753}
1754
1755/* Gets io control parameters */
1756static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
1757{
1758 struct port *port = tty->driver_data;
1759 struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
1760 struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
1761
1762 return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
1763 (ctrl_ul->DTR ? TIOCM_DTR : 0) |
1764 (ctrl_dl->DCD ? TIOCM_CAR : 0) |
1765 (ctrl_dl->RI ? TIOCM_RNG : 0) |
1766 (ctrl_dl->DSR ? TIOCM_DSR : 0) |
1767 (ctrl_dl->CTS ? TIOCM_CTS : 0);
1768}
1769
1770/* Sets io controls parameters */
1771static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
1772 unsigned int set, unsigned int clear)
1773{
1774 if (set & TIOCM_RTS)
1775 set_rts(tty, 1);
1776 else if (clear & TIOCM_RTS)
1777 set_rts(tty, 0);
1778
1779 if (set & TIOCM_DTR)
1780 set_dtr(tty, 1);
1781 else if (clear & TIOCM_DTR)
1782 set_dtr(tty, 0);
1783
1784 return 0;
1785}
1786
1787static int ntty_cflags_changed(struct port *port, unsigned long flags,
1788 struct async_icount *cprev)
1789{
1790 struct async_icount cnow = port->tty_icount;
1791 int ret;
1792
1793 ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
1794 ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
1795 ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
1796 ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts));
1797
1798 *cprev = cnow;
1799
1800 return ret;
1801}
1802
1803static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
1804{
1805 struct async_icount cnow = port->tty_icount;
1806 struct serial_icounter_struct icount;
1807
1808 icount.cts = cnow.cts;
1809 icount.dsr = cnow.dsr;
1810 icount.rng = cnow.rng;
1811 icount.dcd = cnow.dcd;
1812 icount.rx = cnow.rx;
1813 icount.tx = cnow.tx;
1814 icount.frame = cnow.frame;
1815 icount.overrun = cnow.overrun;
1816 icount.parity = cnow.parity;
1817 icount.brk = cnow.brk;
1818 icount.buf_overrun = cnow.buf_overrun;
1819
1820 return copy_to_user(argp, &icount, sizeof(icount));
1821}
1822
1823static int ntty_ioctl(struct tty_struct *tty, struct file *file,
1824 unsigned int cmd, unsigned long arg)
1825{
1826 struct port *port = tty->driver_data;
1827 void __user *argp = (void __user *)arg;
1828 int rval = -ENOIOCTLCMD;
1829
1830 DBG1("******** IOCTL, cmd: %d", cmd);
1831
1832 switch (cmd) {
1833 case TIOCMIWAIT: {
1834 struct async_icount cprev = port->tty_icount;
1835
1836 rval = wait_event_interruptible(port->tty_wait,
1837 ntty_cflags_changed(port, arg, &cprev));
1838 break;
1839 } case TIOCGICOUNT:
1840 rval = ntty_ioctl_tiocgicount(port, argp);
1841 break;
1842 default:
1843 DBG1("ERR: 0x%08X, %d", cmd, cmd);
1844 break;
1845 };
1846
1847 return rval;
1848}
1849
1850/*
1851 * Called by the upper tty layer when tty buffers are ready
1852 * to receive data again after a call to throttle.
1853 */
1854static void ntty_unthrottle(struct tty_struct *tty)
1855{
1856 struct nozomi *dc = get_dc_by_tty(tty);
1857 unsigned long flags;
1858
1859 DBG1("UNTHROTTLE");
1860 spin_lock_irqsave(&dc->spin_mutex, flags);
1861 enable_transmit_dl(tty->index % MAX_PORT, dc);
1862 set_rts(tty, 1);
1863
1864 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1865}
1866
1867/*
1868 * Called by the upper tty layer when the tty buffers are almost full.
1869 * The driver should stop send more data.
1870 */
1871static void ntty_throttle(struct tty_struct *tty)
1872{
1873 struct nozomi *dc = get_dc_by_tty(tty);
1874 unsigned long flags;
1875
1876 DBG1("THROTTLE");
1877 spin_lock_irqsave(&dc->spin_mutex, flags);
1878 set_rts(tty, 0);
1879 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1880}
1881
1882/* just to discard single character writes */
1883static void ntty_put_char(struct tty_struct *tty, unsigned char c)
1884{
1885 /* FIXME !!! */
1886 DBG2("PUT CHAR Function: %c", c);
1887}
1888
1889/* Returns number of chars in buffer, called by tty layer */
1890static s32 ntty_chars_in_buffer(struct tty_struct *tty)
1891{
1892 struct port *port = tty->driver_data;
1893 struct nozomi *dc = get_dc_by_tty(tty);
1894 s32 rval;
1895
1896 if (unlikely(!dc || !port)) {
1897 rval = -ENODEV;
1898 goto exit_in_buffer;
1899 }
1900
1901 if (unlikely(!port->tty_open_count)) {
1902 dev_err(&dc->pdev->dev, "No tty open?\n");
1903 rval = -ENODEV;
1904 goto exit_in_buffer;
1905 }
1906
1907 rval = __kfifo_len(port->fifo_ul);
1908
1909exit_in_buffer:
1910 return rval;
1911}
1912
1913static struct tty_operations tty_ops = {
1914 .ioctl = ntty_ioctl,
1915 .open = ntty_open,
1916 .close = ntty_close,
1917 .write = ntty_write,
1918 .write_room = ntty_write_room,
1919 .unthrottle = ntty_unthrottle,
1920 .throttle = ntty_throttle,
1921 .chars_in_buffer = ntty_chars_in_buffer,
1922 .put_char = ntty_put_char,
1923 .tiocmget = ntty_tiocmget,
1924 .tiocmset = ntty_tiocmset,
1925};
1926
1927/* Module initialization */
1928static struct pci_driver nozomi_driver = {
1929 .name = NOZOMI_NAME,
1930 .id_table = nozomi_pci_tbl,
1931 .probe = nozomi_card_init,
1932 .remove = __devexit_p(nozomi_card_exit),
1933};
1934
1935static __init int nozomi_init(void)
1936{
1937 int ret;
1938
1939 printk(KERN_INFO "Initializing %s\n", VERSION_STRING);
1940
1941 ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS);
1942 if (!ntty_driver)
1943 return -ENOMEM;
1944
1945 ntty_driver->owner = THIS_MODULE;
1946 ntty_driver->driver_name = NOZOMI_NAME_TTY;
1947 ntty_driver->name = "noz";
1948 ntty_driver->major = 0;
1949 ntty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1950 ntty_driver->subtype = SERIAL_TYPE_NORMAL;
1951 ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1952 ntty_driver->init_termios = tty_std_termios;
1953 ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
1954 HUPCL | CLOCAL;
1955 ntty_driver->init_termios.c_ispeed = 115200;
1956 ntty_driver->init_termios.c_ospeed = 115200;
1957 tty_set_operations(ntty_driver, &tty_ops);
1958
1959 ret = tty_register_driver(ntty_driver);
1960 if (ret) {
1961 printk(KERN_ERR "Nozomi: failed to register ntty driver\n");
1962 goto free_tty;
1963 }
1964
1965 ret = pci_register_driver(&nozomi_driver);
1966 if (ret) {
1967 printk(KERN_ERR "Nozomi: can't register pci driver\n");
1968 goto unr_tty;
1969 }
1970
1971 return 0;
1972unr_tty:
1973 tty_unregister_driver(ntty_driver);
1974free_tty:
1975 put_tty_driver(ntty_driver);
1976 return ret;
1977}
1978
1979static __exit void nozomi_exit(void)
1980{
1981 printk(KERN_INFO "Unloading %s\n", DRIVER_DESC);
1982 pci_unregister_driver(&nozomi_driver);
1983 tty_unregister_driver(ntty_driver);
1984 put_tty_driver(ntty_driver);
1985}
1986
1987module_init(nozomi_init);
1988module_exit(nozomi_exit);
1989
1990module_param(debug, int, S_IRUGO | S_IWUSR);
1991
1992MODULE_LICENSE("Dual BSD/GPL");
1993MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 0c66b802736a..78b151c4d20f 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Real Time Clock interface for Linux 2 * Real Time Clock interface for Linux
3 * 3 *
4 * Copyright (C) 1996 Paul Gortmaker 4 * Copyright (C) 1996 Paul Gortmaker
5 * 5 *
@@ -17,7 +17,7 @@
17 * has been received. If a RTC interrupt has already happened, 17 * has been received. If a RTC interrupt has already happened,
18 * it will output an unsigned long and then block. The output value 18 * it will output an unsigned long and then block. The output value
19 * contains the interrupt status in the low byte and the number of 19 * contains the interrupt status in the low byte and the number of
20 * interrupts since the last read in the remaining high bytes. The 20 * interrupts since the last read in the remaining high bytes. The
21 * /dev/rtc interface can also be used with the select(2) call. 21 * /dev/rtc interface can also be used with the select(2) call.
22 * 22 *
23 * This program is free software; you can redistribute it and/or 23 * This program is free software; you can redistribute it and/or
@@ -104,12 +104,14 @@ static int rtc_has_irq = 1;
104 104
105#ifndef CONFIG_HPET_EMULATE_RTC 105#ifndef CONFIG_HPET_EMULATE_RTC
106#define is_hpet_enabled() 0 106#define is_hpet_enabled() 0
107#define hpet_set_alarm_time(hrs, min, sec) 0 107#define hpet_set_alarm_time(hrs, min, sec) 0
108#define hpet_set_periodic_freq(arg) 0 108#define hpet_set_periodic_freq(arg) 0
109#define hpet_mask_rtc_irq_bit(arg) 0 109#define hpet_mask_rtc_irq_bit(arg) 0
110#define hpet_set_rtc_irq_bit(arg) 0 110#define hpet_set_rtc_irq_bit(arg) 0
111#define hpet_rtc_timer_init() do { } while (0) 111#define hpet_rtc_timer_init() do { } while (0)
112#define hpet_rtc_dropped_irq() 0 112#define hpet_rtc_dropped_irq() 0
113#define hpet_register_irq_handler(h) 0
114#define hpet_unregister_irq_handler(h) 0
113#ifdef RTC_IRQ 115#ifdef RTC_IRQ
114static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 116static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
115{ 117{
@@ -147,7 +149,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
147static unsigned int rtc_poll(struct file *file, poll_table *wait); 149static unsigned int rtc_poll(struct file *file, poll_table *wait);
148#endif 150#endif
149 151
150static void get_rtc_alm_time (struct rtc_time *alm_tm); 152static void get_rtc_alm_time(struct rtc_time *alm_tm);
151#ifdef RTC_IRQ 153#ifdef RTC_IRQ
152static void set_rtc_irq_bit_locked(unsigned char bit); 154static void set_rtc_irq_bit_locked(unsigned char bit);
153static void mask_rtc_irq_bit_locked(unsigned char bit); 155static void mask_rtc_irq_bit_locked(unsigned char bit);
@@ -185,9 +187,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
185 * rtc_status but before mod_timer is called, which would then reenable the 187 * rtc_status but before mod_timer is called, which would then reenable the
186 * timer (but you would need to have an awful timing before you'd trip on it) 188 * timer (but you would need to have an awful timing before you'd trip on it)
187 */ 189 */
188static unsigned long rtc_status = 0; /* bitmapped status byte. */ 190static unsigned long rtc_status; /* bitmapped status byte. */
189static unsigned long rtc_freq = 0; /* Current periodic IRQ rate */ 191static unsigned long rtc_freq; /* Current periodic IRQ rate */
190static unsigned long rtc_irq_data = 0; /* our output to the world */ 192static unsigned long rtc_irq_data; /* our output to the world */
191static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ 193static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
192 194
193#ifdef RTC_IRQ 195#ifdef RTC_IRQ
@@ -195,7 +197,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
195 * rtc_task_lock nests inside rtc_lock. 197 * rtc_task_lock nests inside rtc_lock.
196 */ 198 */
197static DEFINE_SPINLOCK(rtc_task_lock); 199static DEFINE_SPINLOCK(rtc_task_lock);
198static rtc_task_t *rtc_callback = NULL; 200static rtc_task_t *rtc_callback;
199#endif 201#endif
200 202
201/* 203/*
@@ -205,7 +207,7 @@ static rtc_task_t *rtc_callback = NULL;
205 207
206static unsigned long epoch = 1900; /* year corresponding to 0x00 */ 208static unsigned long epoch = 1900; /* year corresponding to 0x00 */
207 209
208static const unsigned char days_in_mo[] = 210static const unsigned char days_in_mo[] =
209{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; 211{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
210 212
211/* 213/*
@@ -242,7 +244,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
242 * the last read in the remainder of rtc_irq_data. 244 * the last read in the remainder of rtc_irq_data.
243 */ 245 */
244 246
245 spin_lock (&rtc_lock); 247 spin_lock(&rtc_lock);
246 rtc_irq_data += 0x100; 248 rtc_irq_data += 0x100;
247 rtc_irq_data &= ~0xff; 249 rtc_irq_data &= ~0xff;
248 if (is_hpet_enabled()) { 250 if (is_hpet_enabled()) {
@@ -259,16 +261,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
259 if (rtc_status & RTC_TIMER_ON) 261 if (rtc_status & RTC_TIMER_ON)
260 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); 262 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
261 263
262 spin_unlock (&rtc_lock); 264 spin_unlock(&rtc_lock);
263 265
264 /* Now do the rest of the actions */ 266 /* Now do the rest of the actions */
265 spin_lock(&rtc_task_lock); 267 spin_lock(&rtc_task_lock);
266 if (rtc_callback) 268 if (rtc_callback)
267 rtc_callback->func(rtc_callback->private_data); 269 rtc_callback->func(rtc_callback->private_data);
268 spin_unlock(&rtc_task_lock); 270 spin_unlock(&rtc_task_lock);
269 wake_up_interruptible(&rtc_wait); 271 wake_up_interruptible(&rtc_wait);
270 272
271 kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); 273 kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
272 274
273 return IRQ_HANDLED; 275 return IRQ_HANDLED;
274} 276}
@@ -335,7 +337,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
335 DECLARE_WAITQUEUE(wait, current); 337 DECLARE_WAITQUEUE(wait, current);
336 unsigned long data; 338 unsigned long data;
337 ssize_t retval; 339 ssize_t retval;
338 340
339 if (rtc_has_irq == 0) 341 if (rtc_has_irq == 0)
340 return -EIO; 342 return -EIO;
341 343
@@ -358,11 +360,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
358 * confusing. And no, xchg() is not the answer. */ 360 * confusing. And no, xchg() is not the answer. */
359 361
360 __set_current_state(TASK_INTERRUPTIBLE); 362 __set_current_state(TASK_INTERRUPTIBLE);
361 363
362 spin_lock_irq (&rtc_lock); 364 spin_lock_irq(&rtc_lock);
363 data = rtc_irq_data; 365 data = rtc_irq_data;
364 rtc_irq_data = 0; 366 rtc_irq_data = 0;
365 spin_unlock_irq (&rtc_lock); 367 spin_unlock_irq(&rtc_lock);
366 368
367 if (data != 0) 369 if (data != 0)
368 break; 370 break;
@@ -378,10 +380,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
378 schedule(); 380 schedule();
379 } while (1); 381 } while (1);
380 382
381 if (count == sizeof(unsigned int)) 383 if (count == sizeof(unsigned int)) {
382 retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); 384 retval = put_user(data,
383 else 385 (unsigned int __user *)buf) ?: sizeof(int);
384 retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long); 386 } else {
387 retval = put_user(data,
388 (unsigned long __user *)buf) ?: sizeof(long);
389 }
385 if (!retval) 390 if (!retval)
386 retval = count; 391 retval = count;
387 out: 392 out:
@@ -394,7 +399,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
394 399
395static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) 400static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
396{ 401{
397 struct rtc_time wtime; 402 struct rtc_time wtime;
398 403
399#ifdef RTC_IRQ 404#ifdef RTC_IRQ
400 if (rtc_has_irq == 0) { 405 if (rtc_has_irq == 0) {
@@ -426,35 +431,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
426 } 431 }
427 case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ 432 case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
428 { 433 {
429 unsigned long flags; /* can be called from isr via rtc_control() */ 434 /* can be called from isr via rtc_control() */
430 spin_lock_irqsave (&rtc_lock, flags); 435 unsigned long flags;
436
437 spin_lock_irqsave(&rtc_lock, flags);
431 mask_rtc_irq_bit_locked(RTC_PIE); 438 mask_rtc_irq_bit_locked(RTC_PIE);
432 if (rtc_status & RTC_TIMER_ON) { 439 if (rtc_status & RTC_TIMER_ON) {
433 rtc_status &= ~RTC_TIMER_ON; 440 rtc_status &= ~RTC_TIMER_ON;
434 del_timer(&rtc_irq_timer); 441 del_timer(&rtc_irq_timer);
435 } 442 }
436 spin_unlock_irqrestore (&rtc_lock, flags); 443 spin_unlock_irqrestore(&rtc_lock, flags);
444
437 return 0; 445 return 0;
438 } 446 }
439 case RTC_PIE_ON: /* Allow periodic ints */ 447 case RTC_PIE_ON: /* Allow periodic ints */
440 { 448 {
441 unsigned long flags; /* can be called from isr via rtc_control() */ 449 /* can be called from isr via rtc_control() */
450 unsigned long flags;
451
442 /* 452 /*
443 * We don't really want Joe User enabling more 453 * We don't really want Joe User enabling more
444 * than 64Hz of interrupts on a multi-user machine. 454 * than 64Hz of interrupts on a multi-user machine.
445 */ 455 */
446 if (!kernel && (rtc_freq > rtc_max_user_freq) && 456 if (!kernel && (rtc_freq > rtc_max_user_freq) &&
447 (!capable(CAP_SYS_RESOURCE))) 457 (!capable(CAP_SYS_RESOURCE)))
448 return -EACCES; 458 return -EACCES;
449 459
450 spin_lock_irqsave (&rtc_lock, flags); 460 spin_lock_irqsave(&rtc_lock, flags);
451 if (!(rtc_status & RTC_TIMER_ON)) { 461 if (!(rtc_status & RTC_TIMER_ON)) {
452 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 462 mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
453 2*HZ/100); 463 2*HZ/100);
454 rtc_status |= RTC_TIMER_ON; 464 rtc_status |= RTC_TIMER_ON;
455 } 465 }
456 set_rtc_irq_bit_locked(RTC_PIE); 466 set_rtc_irq_bit_locked(RTC_PIE);
457 spin_unlock_irqrestore (&rtc_lock, flags); 467 spin_unlock_irqrestore(&rtc_lock, flags);
468
458 return 0; 469 return 0;
459 } 470 }
460 case RTC_UIE_OFF: /* Mask ints from RTC updates. */ 471 case RTC_UIE_OFF: /* Mask ints from RTC updates. */
@@ -477,7 +488,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
477 */ 488 */
478 memset(&wtime, 0, sizeof(struct rtc_time)); 489 memset(&wtime, 0, sizeof(struct rtc_time));
479 get_rtc_alm_time(&wtime); 490 get_rtc_alm_time(&wtime);
480 break; 491 break;
481 } 492 }
482 case RTC_ALM_SET: /* Store a time into the alarm */ 493 case RTC_ALM_SET: /* Store a time into the alarm */
483 { 494 {
@@ -505,16 +516,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
505 */ 516 */
506 } 517 }
507 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || 518 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
508 RTC_ALWAYS_BCD) 519 RTC_ALWAYS_BCD) {
509 { 520 if (sec < 60)
510 if (sec < 60) BIN_TO_BCD(sec); 521 BIN_TO_BCD(sec);
511 else sec = 0xff; 522 else
512 523 sec = 0xff;
513 if (min < 60) BIN_TO_BCD(min); 524
514 else min = 0xff; 525 if (min < 60)
515 526 BIN_TO_BCD(min);
516 if (hrs < 24) BIN_TO_BCD(hrs); 527 else
517 else hrs = 0xff; 528 min = 0xff;
529
530 if (hrs < 24)
531 BIN_TO_BCD(hrs);
532 else
533 hrs = 0xff;
518 } 534 }
519 CMOS_WRITE(hrs, RTC_HOURS_ALARM); 535 CMOS_WRITE(hrs, RTC_HOURS_ALARM);
520 CMOS_WRITE(min, RTC_MINUTES_ALARM); 536 CMOS_WRITE(min, RTC_MINUTES_ALARM);
@@ -563,11 +579,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
563 579
564 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) 580 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
565 return -EINVAL; 581 return -EINVAL;
566 582
567 if ((hrs >= 24) || (min >= 60) || (sec >= 60)) 583 if ((hrs >= 24) || (min >= 60) || (sec >= 60))
568 return -EINVAL; 584 return -EINVAL;
569 585
570 if ((yrs -= epoch) > 255) /* They are unsigned */ 586 yrs -= epoch;
587 if (yrs > 255) /* They are unsigned */
571 return -EINVAL; 588 return -EINVAL;
572 589
573 spin_lock_irq(&rtc_lock); 590 spin_lock_irq(&rtc_lock);
@@ -635,9 +652,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
635 { 652 {
636 int tmp = 0; 653 int tmp = 0;
637 unsigned char val; 654 unsigned char val;
638 unsigned long flags; /* can be called from isr via rtc_control() */ 655 /* can be called from isr via rtc_control() */
656 unsigned long flags;
639 657
640 /* 658 /*
641 * The max we can do is 8192Hz. 659 * The max we can do is 8192Hz.
642 */ 660 */
643 if ((arg < 2) || (arg > 8192)) 661 if ((arg < 2) || (arg > 8192))
@@ -646,7 +664,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
646 * We don't really want Joe User generating more 664 * We don't really want Joe User generating more
647 * than 64Hz of interrupts on a multi-user machine. 665 * than 64Hz of interrupts on a multi-user machine.
648 */ 666 */
649 if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) 667 if (!kernel && (arg > rtc_max_user_freq) &&
668 !capable(CAP_SYS_RESOURCE))
650 return -EACCES; 669 return -EACCES;
651 670
652 while (arg > (1<<tmp)) 671 while (arg > (1<<tmp))
@@ -674,11 +693,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
674#endif 693#endif
675 case RTC_EPOCH_READ: /* Read the epoch. */ 694 case RTC_EPOCH_READ: /* Read the epoch. */
676 { 695 {
677 return put_user (epoch, (unsigned long __user *)arg); 696 return put_user(epoch, (unsigned long __user *)arg);
678 } 697 }
679 case RTC_EPOCH_SET: /* Set the epoch. */ 698 case RTC_EPOCH_SET: /* Set the epoch. */
680 { 699 {
681 /* 700 /*
682 * There were no RTC clocks before 1900. 701 * There were no RTC clocks before 1900.
683 */ 702 */
684 if (arg < 1900) 703 if (arg < 1900)
@@ -693,7 +712,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
693 default: 712 default:
694 return -ENOTTY; 713 return -ENOTTY;
695 } 714 }
696 return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; 715 return copy_to_user((void __user *)arg,
716 &wtime, sizeof wtime) ? -EFAULT : 0;
697} 717}
698 718
699static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 719static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
@@ -712,26 +732,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
712 * needed here. Or anywhere else in this driver. */ 732 * needed here. Or anywhere else in this driver. */
713static int rtc_open(struct inode *inode, struct file *file) 733static int rtc_open(struct inode *inode, struct file *file)
714{ 734{
715 spin_lock_irq (&rtc_lock); 735 spin_lock_irq(&rtc_lock);
716 736
717 if(rtc_status & RTC_IS_OPEN) 737 if (rtc_status & RTC_IS_OPEN)
718 goto out_busy; 738 goto out_busy;
719 739
720 rtc_status |= RTC_IS_OPEN; 740 rtc_status |= RTC_IS_OPEN;
721 741
722 rtc_irq_data = 0; 742 rtc_irq_data = 0;
723 spin_unlock_irq (&rtc_lock); 743 spin_unlock_irq(&rtc_lock);
724 return 0; 744 return 0;
725 745
726out_busy: 746out_busy:
727 spin_unlock_irq (&rtc_lock); 747 spin_unlock_irq(&rtc_lock);
728 return -EBUSY; 748 return -EBUSY;
729} 749}
730 750
731static int rtc_fasync (int fd, struct file *filp, int on) 751static int rtc_fasync(int fd, struct file *filp, int on)
732
733{ 752{
734 return fasync_helper (fd, filp, on, &rtc_async_queue); 753 return fasync_helper(fd, filp, on, &rtc_async_queue);
735} 754}
736 755
737static int rtc_release(struct inode *inode, struct file *file) 756static int rtc_release(struct inode *inode, struct file *file)
@@ -762,16 +781,16 @@ static int rtc_release(struct inode *inode, struct file *file)
762 } 781 }
763 spin_unlock_irq(&rtc_lock); 782 spin_unlock_irq(&rtc_lock);
764 783
765 if (file->f_flags & FASYNC) { 784 if (file->f_flags & FASYNC)
766 rtc_fasync (-1, file, 0); 785 rtc_fasync(-1, file, 0);
767 }
768no_irq: 786no_irq:
769#endif 787#endif
770 788
771 spin_lock_irq (&rtc_lock); 789 spin_lock_irq(&rtc_lock);
772 rtc_irq_data = 0; 790 rtc_irq_data = 0;
773 rtc_status &= ~RTC_IS_OPEN; 791 rtc_status &= ~RTC_IS_OPEN;
774 spin_unlock_irq (&rtc_lock); 792 spin_unlock_irq(&rtc_lock);
793
775 return 0; 794 return 0;
776} 795}
777 796
@@ -786,9 +805,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
786 805
787 poll_wait(file, &rtc_wait, wait); 806 poll_wait(file, &rtc_wait, wait);
788 807
789 spin_lock_irq (&rtc_lock); 808 spin_lock_irq(&rtc_lock);
790 l = rtc_irq_data; 809 l = rtc_irq_data;
791 spin_unlock_irq (&rtc_lock); 810 spin_unlock_irq(&rtc_lock);
792 811
793 if (l != 0) 812 if (l != 0)
794 return POLLIN | POLLRDNORM; 813 return POLLIN | POLLRDNORM;
@@ -796,14 +815,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
796} 815}
797#endif 816#endif
798 817
799/*
800 * exported stuffs
801 */
802
803EXPORT_SYMBOL(rtc_register);
804EXPORT_SYMBOL(rtc_unregister);
805EXPORT_SYMBOL(rtc_control);
806
807int rtc_register(rtc_task_t *task) 818int rtc_register(rtc_task_t *task)
808{ 819{
809#ifndef RTC_IRQ 820#ifndef RTC_IRQ
@@ -829,6 +840,7 @@ int rtc_register(rtc_task_t *task)
829 return 0; 840 return 0;
830#endif 841#endif
831} 842}
843EXPORT_SYMBOL(rtc_register);
832 844
833int rtc_unregister(rtc_task_t *task) 845int rtc_unregister(rtc_task_t *task)
834{ 846{
@@ -845,7 +857,7 @@ int rtc_unregister(rtc_task_t *task)
845 return -ENXIO; 857 return -ENXIO;
846 } 858 }
847 rtc_callback = NULL; 859 rtc_callback = NULL;
848 860
849 /* disable controls */ 861 /* disable controls */
850 if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { 862 if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
851 tmp = CMOS_READ(RTC_CONTROL); 863 tmp = CMOS_READ(RTC_CONTROL);
@@ -865,6 +877,7 @@ int rtc_unregister(rtc_task_t *task)
865 return 0; 877 return 0;
866#endif 878#endif
867} 879}
880EXPORT_SYMBOL(rtc_unregister);
868 881
869int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) 882int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
870{ 883{
@@ -883,7 +896,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
883 return rtc_do_ioctl(cmd, arg, 1); 896 return rtc_do_ioctl(cmd, arg, 1);
884#endif 897#endif
885} 898}
886 899EXPORT_SYMBOL(rtc_control);
887 900
888/* 901/*
889 * The various file operations we support. 902 * The various file operations we support.
@@ -910,11 +923,11 @@ static struct miscdevice rtc_dev = {
910 923
911#ifdef CONFIG_PROC_FS 924#ifdef CONFIG_PROC_FS
912static const struct file_operations rtc_proc_fops = { 925static const struct file_operations rtc_proc_fops = {
913 .owner = THIS_MODULE, 926 .owner = THIS_MODULE,
914 .open = rtc_proc_open, 927 .open = rtc_proc_open,
915 .read = seq_read, 928 .read = seq_read,
916 .llseek = seq_lseek, 929 .llseek = seq_lseek,
917 .release = single_release, 930 .release = single_release,
918}; 931};
919#endif 932#endif
920 933
@@ -965,7 +978,7 @@ static int __init rtc_init(void)
965#ifdef CONFIG_SPARC32 978#ifdef CONFIG_SPARC32
966 for_each_ebus(ebus) { 979 for_each_ebus(ebus) {
967 for_each_ebusdev(edev, ebus) { 980 for_each_ebusdev(edev, ebus) {
968 if(strcmp(edev->prom_node->name, "rtc") == 0) { 981 if (strcmp(edev->prom_node->name, "rtc") == 0) {
969 rtc_port = edev->resource[0].start; 982 rtc_port = edev->resource[0].start;
970 rtc_irq = edev->irqs[0]; 983 rtc_irq = edev->irqs[0];
971 goto found; 984 goto found;
@@ -986,7 +999,8 @@ found:
986 * XXX Interrupt pin #7 in Espresso is shared between RTC and 999 * XXX Interrupt pin #7 in Espresso is shared between RTC and
987 * PCI Slot 2 INTA# (and some INTx# in Slot 1). 1000 * PCI Slot 2 INTA# (and some INTx# in Slot 1).
988 */ 1001 */
989 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { 1002 if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
1003 (void *)&rtc_port)) {
990 rtc_has_irq = 0; 1004 rtc_has_irq = 0;
991 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); 1005 printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
992 return -EIO; 1006 return -EIO;
@@ -1015,16 +1029,26 @@ no_irq:
1015 1029
1016#ifdef RTC_IRQ 1030#ifdef RTC_IRQ
1017 if (is_hpet_enabled()) { 1031 if (is_hpet_enabled()) {
1032 int err;
1033
1018 rtc_int_handler_ptr = hpet_rtc_interrupt; 1034 rtc_int_handler_ptr = hpet_rtc_interrupt;
1035 err = hpet_register_irq_handler(rtc_interrupt);
1036 if (err != 0) {
1037 printk(KERN_WARNING "hpet_register_irq_handler failed "
1038 "in rtc_init().");
1039 return err;
1040 }
1019 } else { 1041 } else {
1020 rtc_int_handler_ptr = rtc_interrupt; 1042 rtc_int_handler_ptr = rtc_interrupt;
1021 } 1043 }
1022 1044
1023 if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { 1045 if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
1046 "rtc", NULL)) {
1024 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ 1047 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
1025 rtc_has_irq = 0; 1048 rtc_has_irq = 0;
1026 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); 1049 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
1027 rtc_release_region(); 1050 rtc_release_region();
1051
1028 return -EIO; 1052 return -EIO;
1029 } 1053 }
1030 hpet_rtc_timer_init(); 1054 hpet_rtc_timer_init();
@@ -1036,6 +1060,7 @@ no_irq:
1036 if (misc_register(&rtc_dev)) { 1060 if (misc_register(&rtc_dev)) {
1037#ifdef RTC_IRQ 1061#ifdef RTC_IRQ
1038 free_irq(RTC_IRQ, NULL); 1062 free_irq(RTC_IRQ, NULL);
1063 hpet_unregister_irq_handler(rtc_interrupt);
1039 rtc_has_irq = 0; 1064 rtc_has_irq = 0;
1040#endif 1065#endif
1041 rtc_release_region(); 1066 rtc_release_region();
@@ -1052,21 +1077,21 @@ no_irq:
1052 1077
1053#if defined(__alpha__) || defined(__mips__) 1078#if defined(__alpha__) || defined(__mips__)
1054 rtc_freq = HZ; 1079 rtc_freq = HZ;
1055 1080
1056 /* Each operating system on an Alpha uses its own epoch. 1081 /* Each operating system on an Alpha uses its own epoch.
1057 Let's try to guess which one we are using now. */ 1082 Let's try to guess which one we are using now. */
1058 1083
1059 if (rtc_is_updating() != 0) 1084 if (rtc_is_updating() != 0)
1060 msleep(20); 1085 msleep(20);
1061 1086
1062 spin_lock_irq(&rtc_lock); 1087 spin_lock_irq(&rtc_lock);
1063 year = CMOS_READ(RTC_YEAR); 1088 year = CMOS_READ(RTC_YEAR);
1064 ctrl = CMOS_READ(RTC_CONTROL); 1089 ctrl = CMOS_READ(RTC_CONTROL);
1065 spin_unlock_irq(&rtc_lock); 1090 spin_unlock_irq(&rtc_lock);
1066 1091
1067 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 1092 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
1068 BCD_TO_BIN(year); /* This should never happen... */ 1093 BCD_TO_BIN(year); /* This should never happen... */
1069 1094
1070 if (year < 20) { 1095 if (year < 20) {
1071 epoch = 2000; 1096 epoch = 2000;
1072 guess = "SRM (post-2000)"; 1097 guess = "SRM (post-2000)";
@@ -1087,7 +1112,8 @@ no_irq:
1087#endif 1112#endif
1088 } 1113 }
1089 if (guess) 1114 if (guess)
1090 printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch); 1115 printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
1116 guess, epoch);
1091#endif 1117#endif
1092#ifdef RTC_IRQ 1118#ifdef RTC_IRQ
1093 if (rtc_has_irq == 0) 1119 if (rtc_has_irq == 0)
@@ -1096,8 +1122,12 @@ no_irq:
1096 spin_lock_irq(&rtc_lock); 1122 spin_lock_irq(&rtc_lock);
1097 rtc_freq = 1024; 1123 rtc_freq = 1024;
1098 if (!hpet_set_periodic_freq(rtc_freq)) { 1124 if (!hpet_set_periodic_freq(rtc_freq)) {
1099 /* Initialize periodic freq. to CMOS reset default, which is 1024Hz */ 1125 /*
1100 CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); 1126 * Initialize periodic frequency to CMOS reset default,
1127 * which is 1024Hz
1128 */
1129 CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
1130 RTC_FREQ_SELECT);
1101 } 1131 }
1102 spin_unlock_irq(&rtc_lock); 1132 spin_unlock_irq(&rtc_lock);
1103no_irq2: 1133no_irq2:
@@ -1110,20 +1140,22 @@ no_irq2:
1110 return 0; 1140 return 0;
1111} 1141}
1112 1142
1113static void __exit rtc_exit (void) 1143static void __exit rtc_exit(void)
1114{ 1144{
1115 cleanup_sysctl(); 1145 cleanup_sysctl();
1116 remove_proc_entry ("driver/rtc", NULL); 1146 remove_proc_entry("driver/rtc", NULL);
1117 misc_deregister(&rtc_dev); 1147 misc_deregister(&rtc_dev);
1118 1148
1119#ifdef CONFIG_SPARC32 1149#ifdef CONFIG_SPARC32
1120 if (rtc_has_irq) 1150 if (rtc_has_irq)
1121 free_irq (rtc_irq, &rtc_port); 1151 free_irq(rtc_irq, &rtc_port);
1122#else 1152#else
1123 rtc_release_region(); 1153 rtc_release_region();
1124#ifdef RTC_IRQ 1154#ifdef RTC_IRQ
1125 if (rtc_has_irq) 1155 if (rtc_has_irq) {
1126 free_irq (RTC_IRQ, NULL); 1156 free_irq(RTC_IRQ, NULL);
1157 hpet_unregister_irq_handler(hpet_rtc_interrupt);
1158 }
1127#endif 1159#endif
1128#endif /* CONFIG_SPARC32 */ 1160#endif /* CONFIG_SPARC32 */
1129} 1161}
@@ -1133,14 +1165,14 @@ module_exit(rtc_exit);
1133 1165
1134#ifdef RTC_IRQ 1166#ifdef RTC_IRQ
1135/* 1167/*
1136 * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. 1168 * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
1137 * (usually during an IDE disk interrupt, with IRQ unmasking off) 1169 * (usually during an IDE disk interrupt, with IRQ unmasking off)
1138 * Since the interrupt handler doesn't get called, the IRQ status 1170 * Since the interrupt handler doesn't get called, the IRQ status
1139 * byte doesn't get read, and the RTC stops generating interrupts. 1171 * byte doesn't get read, and the RTC stops generating interrupts.
1140 * A timer is set, and will call this function if/when that happens. 1172 * A timer is set, and will call this function if/when that happens.
1141 * To get it out of this stalled state, we just read the status. 1173 * To get it out of this stalled state, we just read the status.
1142 * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. 1174 * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
1143 * (You *really* shouldn't be trying to use a non-realtime system 1175 * (You *really* shouldn't be trying to use a non-realtime system
1144 * for something that requires a steady > 1KHz signal anyways.) 1176 * for something that requires a steady > 1KHz signal anyways.)
1145 */ 1177 */
1146 1178
@@ -1148,7 +1180,7 @@ static void rtc_dropped_irq(unsigned long data)
1148{ 1180{
1149 unsigned long freq; 1181 unsigned long freq;
1150 1182
1151 spin_lock_irq (&rtc_lock); 1183 spin_lock_irq(&rtc_lock);
1152 1184
1153 if (hpet_rtc_dropped_irq()) { 1185 if (hpet_rtc_dropped_irq()) {
1154 spin_unlock_irq(&rtc_lock); 1186 spin_unlock_irq(&rtc_lock);
@@ -1167,13 +1199,15 @@ static void rtc_dropped_irq(unsigned long data)
1167 1199
1168 spin_unlock_irq(&rtc_lock); 1200 spin_unlock_irq(&rtc_lock);
1169 1201
1170 if (printk_ratelimit()) 1202 if (printk_ratelimit()) {
1171 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); 1203 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
1204 freq);
1205 }
1172 1206
1173 /* Now we have new data */ 1207 /* Now we have new data */
1174 wake_up_interruptible(&rtc_wait); 1208 wake_up_interruptible(&rtc_wait);
1175 1209
1176 kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); 1210 kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
1177} 1211}
1178#endif 1212#endif
1179 1213
@@ -1277,7 +1311,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1277 * can take just over 2ms. We wait 20ms. There is no need to 1311 * can take just over 2ms. We wait 20ms. There is no need to
1278 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. 1312 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
1279 * If you need to know *exactly* when a second has started, enable 1313 * If you need to know *exactly* when a second has started, enable
1280 * periodic update complete interrupts, (via ioctl) and then 1314 * periodic update complete interrupts, (via ioctl) and then
1281 * immediately read /dev/rtc which will block until you get the IRQ. 1315 * immediately read /dev/rtc which will block until you get the IRQ.
1282 * Once the read clears, read the RTC time (again via ioctl). Easy. 1316 * Once the read clears, read the RTC time (again via ioctl). Easy.
1283 */ 1317 */
@@ -1307,8 +1341,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1307 ctrl = CMOS_READ(RTC_CONTROL); 1341 ctrl = CMOS_READ(RTC_CONTROL);
1308 spin_unlock_irqrestore(&rtc_lock, flags); 1342 spin_unlock_irqrestore(&rtc_lock, flags);
1309 1343
1310 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 1344 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1311 {
1312 BCD_TO_BIN(rtc_tm->tm_sec); 1345 BCD_TO_BIN(rtc_tm->tm_sec);
1313 BCD_TO_BIN(rtc_tm->tm_min); 1346 BCD_TO_BIN(rtc_tm->tm_min);
1314 BCD_TO_BIN(rtc_tm->tm_hour); 1347 BCD_TO_BIN(rtc_tm->tm_hour);
@@ -1326,7 +1359,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1326 * Account for differences between how the RTC uses the values 1359 * Account for differences between how the RTC uses the values
1327 * and how they are defined in a struct rtc_time; 1360 * and how they are defined in a struct rtc_time;
1328 */ 1361 */
1329 if ((rtc_tm->tm_year += (epoch - 1900)) <= 69) 1362 rtc_tm->tm_year += epoch - 1900;
1363 if (rtc_tm->tm_year <= 69)
1330 rtc_tm->tm_year += 100; 1364 rtc_tm->tm_year += 100;
1331 1365
1332 rtc_tm->tm_mon--; 1366 rtc_tm->tm_mon--;
@@ -1347,8 +1381,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm)
1347 ctrl = CMOS_READ(RTC_CONTROL); 1381 ctrl = CMOS_READ(RTC_CONTROL);
1348 spin_unlock_irq(&rtc_lock); 1382 spin_unlock_irq(&rtc_lock);
1349 1383
1350 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 1384 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1351 {
1352 BCD_TO_BIN(alm_tm->tm_sec); 1385 BCD_TO_BIN(alm_tm->tm_sec);
1353 BCD_TO_BIN(alm_tm->tm_min); 1386 BCD_TO_BIN(alm_tm->tm_min);
1354 BCD_TO_BIN(alm_tm->tm_hour); 1387 BCD_TO_BIN(alm_tm->tm_hour);