diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/drm/ati_pcigart.c | 11 | ||||
-rw-r--r-- | drivers/char/drm/drm_scatter.c | 11 | ||||
-rw-r--r-- | drivers/char/drm/drm_vm.c | 20 | ||||
-rw-r--r-- | drivers/char/drm/i915_dma.c | 3 | ||||
-rw-r--r-- | drivers/char/drm/i915_drv.h | 4 | ||||
-rw-r--r-- | drivers/char/drm/r300_cmdbuf.c | 54 | ||||
-rw-r--r-- | drivers/char/drm/radeon_drm.h | 12 | ||||
-rw-r--r-- | drivers/char/drm/radeon_mem.c | 8 | ||||
-rw-r--r-- | drivers/char/hpet.c | 51 | ||||
-rw-r--r-- | drivers/char/ip2/i2lib.c | 12 | ||||
-rw-r--r-- | drivers/char/n_tty.c | 2 | ||||
-rw-r--r-- | drivers/char/nozomi.c | 20 | ||||
-rw-r--r-- | drivers/char/rio/riotable.c | 4 | ||||
-rw-r--r-- | drivers/char/rio/riotty.c | 4 |
14 files changed, 129 insertions, 87 deletions
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c index e5a0e97cfdda..141f4dfa0a11 100644 --- a/drivers/char/drm/ati_pcigart.c +++ b/drivers/char/drm/ati_pcigart.c | |||
@@ -122,8 +122,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | |||
122 | } else { | 122 | } else { |
123 | address = gart_info->addr; | 123 | address = gart_info->addr; |
124 | bus_address = gart_info->bus_addr; | 124 | bus_address = gart_info->bus_addr; |
125 | DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", | 125 | DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n", |
126 | bus_address, (unsigned long)address); | 126 | (unsigned long long)bus_address, |
127 | (unsigned long)address); | ||
127 | } | 128 | } |
128 | 129 | ||
129 | pci_gart = (u32 *) address; | 130 | pci_gart = (u32 *) address; |
@@ -167,6 +168,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | |||
167 | } | 168 | } |
168 | } | 169 | } |
169 | 170 | ||
171 | if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) | ||
172 | dma_sync_single_for_device(&dev->pdev->dev, | ||
173 | bus_address, | ||
174 | max_pages * sizeof(u32), | ||
175 | PCI_DMA_TODEVICE); | ||
176 | |||
170 | ret = 1; | 177 | ret = 1; |
171 | 178 | ||
172 | #if defined(__i386__) || defined(__x86_64__) | 179 | #if defined(__i386__) || defined(__x86_64__) |
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c index 26d8f675ed5d..b2b0f3d41714 100644 --- a/drivers/char/drm/drm_scatter.c +++ b/drivers/char/drm/drm_scatter.c | |||
@@ -36,6 +36,15 @@ | |||
36 | 36 | ||
37 | #define DEBUG_SCATTER 0 | 37 | #define DEBUG_SCATTER 0 |
38 | 38 | ||
39 | static inline void *drm_vmalloc_dma(unsigned long size) | ||
40 | { | ||
41 | #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) | ||
42 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); | ||
43 | #else | ||
44 | return vmalloc_32(size); | ||
45 | #endif | ||
46 | } | ||
47 | |||
39 | void drm_sg_cleanup(struct drm_sg_mem * entry) | 48 | void drm_sg_cleanup(struct drm_sg_mem * entry) |
40 | { | 49 | { |
41 | struct page *page; | 50 | struct page *page; |
@@ -104,7 +113,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) | |||
104 | } | 113 | } |
105 | memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); | 114 | memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); |
106 | 115 | ||
107 | entry->virtual = vmalloc_32(pages << PAGE_SHIFT); | 116 | entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); |
108 | if (!entry->virtual) { | 117 | if (!entry->virtual) { |
109 | drm_free(entry->busaddr, | 118 | drm_free(entry->busaddr, |
110 | entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); | 119 | entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); |
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 3d65c4dcd0c6..945df72a51a9 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
@@ -54,13 +54,24 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) | |||
54 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | 54 | pgprot_val(tmp) |= _PAGE_NO_CACHE; |
55 | if (map_type == _DRM_REGISTERS) | 55 | if (map_type == _DRM_REGISTERS) |
56 | pgprot_val(tmp) |= _PAGE_GUARDED; | 56 | pgprot_val(tmp) |= _PAGE_GUARDED; |
57 | #endif | 57 | #elif defined(__ia64__) |
58 | #if defined(__ia64__) | ||
59 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | 58 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - |
60 | vma->vm_start)) | 59 | vma->vm_start)) |
61 | tmp = pgprot_writecombine(tmp); | 60 | tmp = pgprot_writecombine(tmp); |
62 | else | 61 | else |
63 | tmp = pgprot_noncached(tmp); | 62 | tmp = pgprot_noncached(tmp); |
63 | #elif defined(__sparc__) | ||
64 | tmp = pgprot_noncached(tmp); | ||
65 | #endif | ||
66 | return tmp; | ||
67 | } | ||
68 | |||
69 | static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) | ||
70 | { | ||
71 | pgprot_t tmp = vm_get_page_prot(vma->vm_flags); | ||
72 | |||
73 | #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) | ||
74 | tmp |= _PAGE_NO_CACHE; | ||
64 | #endif | 75 | #endif |
65 | return tmp; | 76 | return tmp; |
66 | } | 77 | } |
@@ -603,9 +614,6 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | |||
603 | offset = dev->driver->get_reg_ofs(dev); | 614 | offset = dev->driver->get_reg_ofs(dev); |
604 | vma->vm_flags |= VM_IO; /* not in core dump */ | 615 | vma->vm_flags |= VM_IO; /* not in core dump */ |
605 | vma->vm_page_prot = drm_io_prot(map->type, vma); | 616 | vma->vm_page_prot = drm_io_prot(map->type, vma); |
606 | #ifdef __sparc__ | ||
607 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
608 | #endif | ||
609 | if (io_remap_pfn_range(vma, vma->vm_start, | 617 | if (io_remap_pfn_range(vma, vma->vm_start, |
610 | (map->offset + offset) >> PAGE_SHIFT, | 618 | (map->offset + offset) >> PAGE_SHIFT, |
611 | vma->vm_end - vma->vm_start, | 619 | vma->vm_end - vma->vm_start, |
@@ -624,6 +632,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | |||
624 | page_to_pfn(virt_to_page(map->handle)), | 632 | page_to_pfn(virt_to_page(map->handle)), |
625 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | 633 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) |
626 | return -EAGAIN; | 634 | return -EAGAIN; |
635 | vma->vm_page_prot = drm_dma_prot(map->type, vma); | ||
627 | /* fall through to _DRM_SHM */ | 636 | /* fall through to _DRM_SHM */ |
628 | case _DRM_SHM: | 637 | case _DRM_SHM: |
629 | vma->vm_ops = &drm_vm_shm_ops; | 638 | vma->vm_ops = &drm_vm_shm_ops; |
@@ -631,6 +640,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) | |||
631 | /* Don't let this area swap. Change when | 640 | /* Don't let this area swap. Change when |
632 | DRM_KERNEL advisory is supported. */ | 641 | DRM_KERNEL advisory is supported. */ |
633 | vma->vm_flags |= VM_RESERVED; | 642 | vma->vm_flags |= VM_RESERVED; |
643 | vma->vm_page_prot = drm_dma_prot(map->type, vma); | ||
634 | break; | 644 | break; |
635 | case _DRM_SCATTER_GATHER: | 645 | case _DRM_SCATTER_GATHER: |
636 | vma->vm_ops = &drm_vm_sg_ops; | 646 | vma->vm_ops = &drm_vm_sg_ops; |
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index e9d6663bec73..a043bb12301a 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c | |||
@@ -804,6 +804,9 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
804 | { | 804 | { |
805 | drm_i915_private_t *dev_priv = dev->dev_private; | 805 | drm_i915_private_t *dev_priv = dev->dev_private; |
806 | 806 | ||
807 | if (!dev_priv) | ||
808 | return; | ||
809 | |||
807 | if (dev_priv->agp_heap) | 810 | if (dev_priv->agp_heap) |
808 | i915_mem_takedown(&(dev_priv->agp_heap)); | 811 | i915_mem_takedown(&(dev_priv->agp_heap)); |
809 | 812 | ||
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h index c10d128e34db..675d88bda066 100644 --- a/drivers/char/drm/i915_drv.h +++ b/drivers/char/drm/i915_drv.h | |||
@@ -1092,8 +1092,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1092 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | 1092 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) |
1093 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1093 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
1094 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1094 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
1095 | #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2) | 1095 | #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ |
1096 | 1096 | (dev)->pci_device == 0x27AE) | |
1097 | #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ | 1097 | #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ |
1098 | (dev)->pci_device == 0x2982 || \ | 1098 | (dev)->pci_device == 0x2982 || \ |
1099 | (dev)->pci_device == 0x2992 || \ | 1099 | (dev)->pci_device == 0x2992 || \ |
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c index 0f4afc44245c..f535812e4057 100644 --- a/drivers/char/drm/r300_cmdbuf.c +++ b/drivers/char/drm/r300_cmdbuf.c | |||
@@ -729,6 +729,47 @@ static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) | |||
729 | buf->used = 0; | 729 | buf->used = 0; |
730 | } | 730 | } |
731 | 731 | ||
732 | static void r300_cmd_wait(drm_radeon_private_t * dev_priv, | ||
733 | drm_r300_cmd_header_t header) | ||
734 | { | ||
735 | u32 wait_until; | ||
736 | RING_LOCALS; | ||
737 | |||
738 | if (!header.wait.flags) | ||
739 | return; | ||
740 | |||
741 | wait_until = 0; | ||
742 | |||
743 | switch(header.wait.flags) { | ||
744 | case R300_WAIT_2D: | ||
745 | wait_until = RADEON_WAIT_2D_IDLE; | ||
746 | break; | ||
747 | case R300_WAIT_3D: | ||
748 | wait_until = RADEON_WAIT_3D_IDLE; | ||
749 | break; | ||
750 | case R300_NEW_WAIT_2D_3D: | ||
751 | wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE; | ||
752 | break; | ||
753 | case R300_NEW_WAIT_2D_2D_CLEAN: | ||
754 | wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; | ||
755 | break; | ||
756 | case R300_NEW_WAIT_3D_3D_CLEAN: | ||
757 | wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; | ||
758 | break; | ||
759 | case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN: | ||
760 | wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; | ||
761 | wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; | ||
762 | break; | ||
763 | default: | ||
764 | return; | ||
765 | } | ||
766 | |||
767 | BEGIN_RING(2); | ||
768 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
769 | OUT_RING(wait_until); | ||
770 | ADVANCE_RING(); | ||
771 | } | ||
772 | |||
732 | static int r300_scratch(drm_radeon_private_t *dev_priv, | 773 | static int r300_scratch(drm_radeon_private_t *dev_priv, |
733 | drm_radeon_kcmd_buffer_t *cmdbuf, | 774 | drm_radeon_kcmd_buffer_t *cmdbuf, |
734 | drm_r300_cmd_header_t header) | 775 | drm_r300_cmd_header_t header) |
@@ -909,19 +950,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
909 | break; | 950 | break; |
910 | 951 | ||
911 | case R300_CMD_WAIT: | 952 | case R300_CMD_WAIT: |
912 | /* simple enough, we can do it here */ | ||
913 | DRM_DEBUG("R300_CMD_WAIT\n"); | 953 | DRM_DEBUG("R300_CMD_WAIT\n"); |
914 | if (header.wait.flags == 0) | 954 | r300_cmd_wait(dev_priv, header); |
915 | break; /* nothing to do */ | ||
916 | |||
917 | { | ||
918 | RING_LOCALS; | ||
919 | |||
920 | BEGIN_RING(2); | ||
921 | OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
922 | OUT_RING((header.wait.flags & 0xf) << 14); | ||
923 | ADVANCE_RING(); | ||
924 | } | ||
925 | break; | 955 | break; |
926 | 956 | ||
927 | case R300_CMD_SCRATCH: | 957 | case R300_CMD_SCRATCH: |
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h index 71e5b21fad2c..aab82e121e07 100644 --- a/drivers/char/drm/radeon_drm.h +++ b/drivers/char/drm/radeon_drm.h | |||
@@ -225,8 +225,20 @@ typedef union { | |||
225 | #define R300_CMD_WAIT 7 | 225 | #define R300_CMD_WAIT 7 |
226 | # define R300_WAIT_2D 0x1 | 226 | # define R300_WAIT_2D 0x1 |
227 | # define R300_WAIT_3D 0x2 | 227 | # define R300_WAIT_3D 0x2 |
228 | /* these two defines are DOING IT WRONG - however | ||
229 | * we have userspace which relies on using these. | ||
230 | * The wait interface is backwards compat new | ||
231 | * code should use the NEW_WAIT defines below | ||
232 | * THESE ARE NOT BIT FIELDS | ||
233 | */ | ||
228 | # define R300_WAIT_2D_CLEAN 0x3 | 234 | # define R300_WAIT_2D_CLEAN 0x3 |
229 | # define R300_WAIT_3D_CLEAN 0x4 | 235 | # define R300_WAIT_3D_CLEAN 0x4 |
236 | |||
237 | # define R300_NEW_WAIT_2D_3D 0x3 | ||
238 | # define R300_NEW_WAIT_2D_2D_CLEAN 0x4 | ||
239 | # define R300_NEW_WAIT_3D_3D_CLEAN 0x6 | ||
240 | # define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 | ||
241 | |||
230 | #define R300_CMD_SCRATCH 8 | 242 | #define R300_CMD_SCRATCH 8 |
231 | 243 | ||
232 | typedef union { | 244 | typedef union { |
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c index 78b34fa7c89a..4af5286a36fb 100644 --- a/drivers/char/drm/radeon_mem.c +++ b/drivers/char/drm/radeon_mem.c | |||
@@ -88,7 +88,7 @@ static struct mem_block *alloc_block(struct mem_block *heap, int size, | |||
88 | 88 | ||
89 | list_for_each(p, heap) { | 89 | list_for_each(p, heap) { |
90 | int start = (p->start + mask) & ~mask; | 90 | int start = (p->start + mask) & ~mask; |
91 | if (p->file_priv == 0 && start + size <= p->start + p->size) | 91 | if (p->file_priv == NULL && start + size <= p->start + p->size) |
92 | return split_block(p, start, size, file_priv); | 92 | return split_block(p, start, size, file_priv); |
93 | } | 93 | } |
94 | 94 | ||
@@ -113,7 +113,7 @@ static void free_block(struct mem_block *p) | |||
113 | /* Assumes a single contiguous range. Needs a special file_priv in | 113 | /* Assumes a single contiguous range. Needs a special file_priv in |
114 | * 'heap' to stop it being subsumed. | 114 | * 'heap' to stop it being subsumed. |
115 | */ | 115 | */ |
116 | if (p->next->file_priv == 0) { | 116 | if (p->next->file_priv == NULL) { |
117 | struct mem_block *q = p->next; | 117 | struct mem_block *q = p->next; |
118 | p->size += q->size; | 118 | p->size += q->size; |
119 | p->next = q->next; | 119 | p->next = q->next; |
@@ -121,7 +121,7 @@ static void free_block(struct mem_block *p) | |||
121 | drm_free(q, sizeof(*q), DRM_MEM_BUFS); | 121 | drm_free(q, sizeof(*q), DRM_MEM_BUFS); |
122 | } | 122 | } |
123 | 123 | ||
124 | if (p->prev->file_priv == 0) { | 124 | if (p->prev->file_priv == NULL) { |
125 | struct mem_block *q = p->prev; | 125 | struct mem_block *q = p->prev; |
126 | q->size += p->size; | 126 | q->size += p->size; |
127 | q->next = p->next; | 127 | q->next = p->next; |
@@ -174,7 +174,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) | |||
174 | * 'heap' to stop it being subsumed. | 174 | * 'heap' to stop it being subsumed. |
175 | */ | 175 | */ |
176 | list_for_each(p, heap) { | 176 | list_for_each(p, heap) { |
177 | while (p->file_priv == 0 && p->next->file_priv == 0) { | 177 | while (p->file_priv == NULL && p->next->file_priv == NULL) { |
178 | struct mem_block *q = p->next; | 178 | struct mem_block *q = p->next; |
179 | p->size += q->size; | 179 | p->size += q->size; |
180 | p->next = q->next; | 180 | p->next = q->next; |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 465ad35ed38f..1399971be689 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -731,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp) | |||
731 | 731 | ||
732 | int hpet_alloc(struct hpet_data *hdp) | 732 | int hpet_alloc(struct hpet_data *hdp) |
733 | { | 733 | { |
734 | u64 cap, mcfg, hpet_config; | 734 | u64 cap, mcfg; |
735 | struct hpet_dev *devp; | 735 | struct hpet_dev *devp; |
736 | u32 i, ntimer, irq; | 736 | u32 i, ntimer; |
737 | struct hpets *hpetp; | 737 | struct hpets *hpetp; |
738 | size_t siz; | 738 | size_t siz; |
739 | struct hpet __iomem *hpet; | 739 | struct hpet __iomem *hpet; |
740 | static struct hpets *last = NULL; | 740 | static struct hpets *last = NULL; |
741 | unsigned long period, irq_bitmap; | 741 | unsigned long period; |
742 | unsigned long long temp; | 742 | unsigned long long temp; |
743 | 743 | ||
744 | /* | 744 | /* |
@@ -765,47 +765,11 @@ int hpet_alloc(struct hpet_data *hdp) | |||
765 | hpetp->hp_hpet_phys = hdp->hd_phys_address; | 765 | hpetp->hp_hpet_phys = hdp->hd_phys_address; |
766 | 766 | ||
767 | hpetp->hp_ntimer = hdp->hd_nirqs; | 767 | hpetp->hp_ntimer = hdp->hd_nirqs; |
768 | hpet = hpetp->hp_hpet; | ||
769 | |||
770 | /* Assign IRQs statically for legacy devices */ | ||
771 | hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0]; | ||
772 | hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1]; | ||
773 | |||
774 | /* Assign IRQs dynamically for the others */ | ||
775 | for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) { | ||
776 | struct hpet_timer __iomem *timer; | ||
777 | 768 | ||
778 | timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; | 769 | for (i = 0; i < hdp->hd_nirqs; i++) |
770 | hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; | ||
779 | 771 | ||
780 | /* Check if there's already an IRQ assigned to the timer */ | 772 | hpet = hpetp->hp_hpet; |
781 | if (hdp->hd_irq[i]) { | ||
782 | hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; | ||
783 | continue; | ||
784 | } | ||
785 | |||
786 | hpet_config = readq(&timer->hpet_config); | ||
787 | irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK) | ||
788 | >> Tn_INT_ROUTE_CAP_SHIFT; | ||
789 | if (!irq_bitmap) | ||
790 | irq = 0; /* No valid IRQ Assignable */ | ||
791 | else { | ||
792 | irq = find_first_bit(&irq_bitmap, 32); | ||
793 | do { | ||
794 | hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT; | ||
795 | writeq(hpet_config, &timer->hpet_config); | ||
796 | |||
797 | /* | ||
798 | * Verify whether we have written a valid | ||
799 | * IRQ number by reading it back again | ||
800 | */ | ||
801 | hpet_config = readq(&timer->hpet_config); | ||
802 | if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK) | ||
803 | >> Tn_INT_ROUTE_CNF_SHIFT) | ||
804 | break; /* Success */ | ||
805 | } while ((irq = (find_next_bit(&irq_bitmap, 32, irq)))); | ||
806 | } | ||
807 | hpetp->hp_dev[i].hd_hdwirq = irq; | ||
808 | } | ||
809 | 773 | ||
810 | cap = readq(&hpet->hpet_cap); | 774 | cap = readq(&hpet->hpet_cap); |
811 | 775 | ||
@@ -836,8 +800,7 @@ int hpet_alloc(struct hpet_data *hdp) | |||
836 | hpetp->hp_which, hdp->hd_phys_address, | 800 | hpetp->hp_which, hdp->hd_phys_address, |
837 | hpetp->hp_ntimer > 1 ? "s" : ""); | 801 | hpetp->hp_ntimer > 1 ? "s" : ""); |
838 | for (i = 0; i < hpetp->hp_ntimer; i++) | 802 | for (i = 0; i < hpetp->hp_ntimer; i++) |
839 | printk("%s %d", i > 0 ? "," : "", | 803 | printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); |
840 | hpetp->hp_dev[i].hd_hdwirq); | ||
841 | printk("\n"); | 804 | printk("\n"); |
842 | 805 | ||
843 | printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", | 806 | printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", |
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c index d6567b32fb5c..9c25320121ef 100644 --- a/drivers/char/ip2/i2lib.c +++ b/drivers/char/ip2/i2lib.c | |||
@@ -644,12 +644,12 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
644 | // Normal Expected path - We still hold LOCK | 644 | // Normal Expected path - We still hold LOCK |
645 | break; /* from for()- Enough room: goto proceed */ | 645 | break; /* from for()- Enough room: goto proceed */ |
646 | } | 646 | } |
647 | } | 647 | ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); |
648 | 648 | WRITE_UNLOCK_IRQRESTORE(lock_var_p, flags); | |
649 | ip2trace (CHANN, ITRC_QUEUE, 3, 1, totalsize ); | 649 | } else |
650 | ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); | ||
650 | 651 | ||
651 | // Prepare to wait for buffers to empty | 652 | /* Prepare to wait for buffers to empty */ |
652 | WRITE_UNLOCK_IRQRESTORE(lock_var_p,flags); | ||
653 | serviceOutgoingFifo(pB); // Dump what we got | 653 | serviceOutgoingFifo(pB); // Dump what we got |
654 | 654 | ||
655 | if (timeout == 0) { | 655 | if (timeout == 0) { |
@@ -1830,6 +1830,8 @@ i2StripFifo(i2eBordStrPtr pB) | |||
1830 | default: // Neither packet? should be impossible | 1830 | default: // Neither packet? should be impossible |
1831 | ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, | 1831 | ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, |
1832 | PTYPE_OF(pB->i2eLeadoffWord) ); | 1832 | PTYPE_OF(pB->i2eLeadoffWord) ); |
1833 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, | ||
1834 | bflags); | ||
1833 | 1835 | ||
1834 | break; | 1836 | break; |
1835 | } // End of switch on type of packets | 1837 | } // End of switch on type of packets |
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 46b2a1cc8b54..0c09409fa45d 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -1183,7 +1183,7 @@ static int copy_from_read_buf(struct tty_struct *tty, | |||
1183 | return retval; | 1183 | return retval; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | extern ssize_t redirected_tty_write(struct file *, const char *, | 1186 | extern ssize_t redirected_tty_write(struct file *, const char __user *, |
1187 | size_t, loff_t *); | 1187 | size_t, loff_t *); |
1188 | 1188 | ||
1189 | /** | 1189 | /** |
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index 6d0dc5f9b6bb..6a6843a0a674 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
@@ -438,7 +438,7 @@ static void read_mem32(u32 *buf, const void __iomem *mem_addr_start, | |||
438 | u32 size_bytes) | 438 | u32 size_bytes) |
439 | { | 439 | { |
440 | u32 i = 0; | 440 | u32 i = 0; |
441 | const u32 *ptr = (__force u32 *) mem_addr_start; | 441 | const u32 __iomem *ptr = mem_addr_start; |
442 | u16 *buf16; | 442 | u16 *buf16; |
443 | 443 | ||
444 | if (unlikely(!ptr || !buf)) | 444 | if (unlikely(!ptr || !buf)) |
@@ -448,11 +448,11 @@ static void read_mem32(u32 *buf, const void __iomem *mem_addr_start, | |||
448 | switch (size_bytes) { | 448 | switch (size_bytes) { |
449 | case 2: /* 2 bytes */ | 449 | case 2: /* 2 bytes */ |
450 | buf16 = (u16 *) buf; | 450 | buf16 = (u16 *) buf; |
451 | *buf16 = __le16_to_cpu(readw((void __iomem *)ptr)); | 451 | *buf16 = __le16_to_cpu(readw(ptr)); |
452 | goto out; | 452 | goto out; |
453 | break; | 453 | break; |
454 | case 4: /* 4 bytes */ | 454 | case 4: /* 4 bytes */ |
455 | *(buf) = __le32_to_cpu(readl((void __iomem *)ptr)); | 455 | *(buf) = __le32_to_cpu(readl(ptr)); |
456 | goto out; | 456 | goto out; |
457 | break; | 457 | break; |
458 | } | 458 | } |
@@ -461,11 +461,11 @@ static void read_mem32(u32 *buf, const void __iomem *mem_addr_start, | |||
461 | if (size_bytes - i == 2) { | 461 | if (size_bytes - i == 2) { |
462 | /* Handle 2 bytes in the end */ | 462 | /* Handle 2 bytes in the end */ |
463 | buf16 = (u16 *) buf; | 463 | buf16 = (u16 *) buf; |
464 | *(buf16) = __le16_to_cpu(readw((void __iomem *)ptr)); | 464 | *(buf16) = __le16_to_cpu(readw(ptr)); |
465 | i += 2; | 465 | i += 2; |
466 | } else { | 466 | } else { |
467 | /* Read 4 bytes */ | 467 | /* Read 4 bytes */ |
468 | *(buf) = __le32_to_cpu(readl((void __iomem *)ptr)); | 468 | *(buf) = __le32_to_cpu(readl(ptr)); |
469 | i += 4; | 469 | i += 4; |
470 | } | 470 | } |
471 | buf++; | 471 | buf++; |
@@ -484,7 +484,7 @@ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, | |||
484 | u32 size_bytes) | 484 | u32 size_bytes) |
485 | { | 485 | { |
486 | u32 i = 0; | 486 | u32 i = 0; |
487 | u32 *ptr = (__force u32 *) mem_addr_start; | 487 | u32 __iomem *ptr = mem_addr_start; |
488 | const u16 *buf16; | 488 | const u16 *buf16; |
489 | 489 | ||
490 | if (unlikely(!ptr || !buf)) | 490 | if (unlikely(!ptr || !buf)) |
@@ -494,7 +494,7 @@ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, | |||
494 | switch (size_bytes) { | 494 | switch (size_bytes) { |
495 | case 2: /* 2 bytes */ | 495 | case 2: /* 2 bytes */ |
496 | buf16 = (const u16 *)buf; | 496 | buf16 = (const u16 *)buf; |
497 | writew(__cpu_to_le16(*buf16), (void __iomem *)ptr); | 497 | writew(__cpu_to_le16(*buf16), ptr); |
498 | return 2; | 498 | return 2; |
499 | break; | 499 | break; |
500 | case 1: /* | 500 | case 1: /* |
@@ -502,7 +502,7 @@ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, | |||
502 | * so falling through.. | 502 | * so falling through.. |
503 | */ | 503 | */ |
504 | case 4: /* 4 bytes */ | 504 | case 4: /* 4 bytes */ |
505 | writel(__cpu_to_le32(*buf), (void __iomem *)ptr); | 505 | writel(__cpu_to_le32(*buf), ptr); |
506 | return 4; | 506 | return 4; |
507 | break; | 507 | break; |
508 | } | 508 | } |
@@ -511,11 +511,11 @@ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, | |||
511 | if (size_bytes - i == 2) { | 511 | if (size_bytes - i == 2) { |
512 | /* 2 bytes */ | 512 | /* 2 bytes */ |
513 | buf16 = (const u16 *)buf; | 513 | buf16 = (const u16 *)buf; |
514 | writew(__cpu_to_le16(*buf16), (void __iomem *)ptr); | 514 | writew(__cpu_to_le16(*buf16), ptr); |
515 | i += 2; | 515 | i += 2; |
516 | } else { | 516 | } else { |
517 | /* 4 bytes */ | 517 | /* 4 bytes */ |
518 | writel(__cpu_to_le32(*buf), (void __iomem *)ptr); | 518 | writel(__cpu_to_le32(*buf), ptr); |
519 | i += 4; | 519 | i += 4; |
520 | } | 520 | } |
521 | buf++; | 521 | buf++; |
diff --git a/drivers/char/rio/riotable.c b/drivers/char/rio/riotable.c index 991119c9f473..9b52892a501f 100644 --- a/drivers/char/rio/riotable.c +++ b/drivers/char/rio/riotable.c | |||
@@ -425,8 +425,10 @@ int RIOApel(struct rio_info *p) | |||
425 | 425 | ||
426 | MapP = &p->RIOConnectTable[Next++]; | 426 | MapP = &p->RIOConnectTable[Next++]; |
427 | MapP->HostUniqueNum = HostP->UniqueNum; | 427 | MapP->HostUniqueNum = HostP->UniqueNum; |
428 | if ((HostP->Flags & RUN_STATE) != RC_RUNNING) | 428 | if ((HostP->Flags & RUN_STATE) != RC_RUNNING) { |
429 | rio_spin_unlock_irqrestore(&HostP->HostLock, flags); | ||
429 | continue; | 430 | continue; |
431 | } | ||
430 | MapP->RtaUniqueNum = 0; | 432 | MapP->RtaUniqueNum = 0; |
431 | MapP->ID = 0; | 433 | MapP->ID = 0; |
432 | MapP->Flags = SLOT_IN_USE; | 434 | MapP->Flags = SLOT_IN_USE; |
diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c index a4f0b1e3e7fa..cfa54361473f 100644 --- a/drivers/char/rio/riotty.c +++ b/drivers/char/rio/riotty.c | |||
@@ -319,6 +319,7 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
319 | PortP->State |= RIO_WOPEN; | 319 | PortP->State |= RIO_WOPEN; |
320 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); | 320 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); |
321 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { | 321 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { |
322 | rio_spin_lock_irqsave(&PortP->portSem, flags); | ||
322 | /* | 323 | /* |
323 | ** ACTION: verify that this is a good thing | 324 | ** ACTION: verify that this is a good thing |
324 | ** to do here. -- ??? | 325 | ** to do here. -- ??? |
@@ -334,6 +335,7 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
334 | func_exit(); | 335 | func_exit(); |
335 | return -EINTR; | 336 | return -EINTR; |
336 | } | 337 | } |
338 | rio_spin_lock_irqsave(&PortP->portSem, flags); | ||
337 | } | 339 | } |
338 | PortP->State &= ~RIO_WOPEN; | 340 | PortP->State &= ~RIO_WOPEN; |
339 | } | 341 | } |
@@ -493,6 +495,7 @@ int riotclose(void *ptr) | |||
493 | 495 | ||
494 | if (RIOShortCommand(p, PortP, CLOSE, 1, 0) == RIO_FAIL) { | 496 | if (RIOShortCommand(p, PortP, CLOSE, 1, 0) == RIO_FAIL) { |
495 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 497 | RIOPreemptiveCmd(p, PortP, FCLOSE); |
498 | rio_spin_lock_irqsave(&PortP->portSem, flags); | ||
496 | goto close_end; | 499 | goto close_end; |
497 | } | 500 | } |
498 | 501 | ||
@@ -508,6 +511,7 @@ int riotclose(void *ptr) | |||
508 | 511 | ||
509 | if (p->RIOHalted) { | 512 | if (p->RIOHalted) { |
510 | RIOClearUp(PortP); | 513 | RIOClearUp(PortP); |
514 | rio_spin_lock_irqsave(&PortP->portSem, flags); | ||
511 | goto close_end; | 515 | goto close_end; |
512 | } | 516 | } |
513 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { | 517 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { |